Searched refs:zone_end_pfn (Results 1 – 12 of 12) sorted by relevance
| /linux/mm/ |
| H A D | mm_init.c | 876 unsigned long start_pfn, unsigned long zone_end_pfn, in memmap_init_range() argument 913 if (defer_init(nid, pfn, zone_end_pfn)) { in memmap_init_range() 950 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() local 953 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 954 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 960 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE, in memmap_init_zone_range() 1168 unsigned long *zone_end_pfn) in adjust_zone_range_for_zone_movable() argument 1175 *zone_end_pfn = min(node_end_pfn, in adjust_zone_range_for_zone_movable() 1181 *zone_end_pfn > zone_movable_pfn[nid]) { in adjust_zone_range_for_zone_movable() 1182 *zone_end_pfn = zone_movable_pfn[nid]; in adjust_zone_range_for_zone_movable() [all …]
|
| H A D | shuffle.c | 84 unsigned long end_pfn = zone_end_pfn(z); in __shuffle_zone()
|
| H A D | compaction.c | 202 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions() 317 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn() 352 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable() 1473 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around() 1649 zone_end_pfn(cc->zone)), in fast_isolate_freepages() 1706 zone_end_pfn(zone)); in isolate_freepages() 2515 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
|
| H A D | memory_hotplug.c | 493 zone_end_pfn(zone)); in shrink_zone_span() 495 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() 501 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span() 526 unsigned long end_pfn = zone_end_pfn(zone); in update_pgdat_span() 706 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
|
| H A D | page_owner.c | 423 unsigned long end_pfn = zone_end_pfn(zone); in pagetypeinfo_showmixedcount_print() 766 unsigned long end_pfn = zone_end_pfn(zone); in init_pages_in_zone()
|
| H A D | kmemleak.c | 1756 unsigned long end_pfn = zone_end_pfn(zone); in kmemleak_scan()
|
| H A D | vmstat.c | 1645 unsigned long end_pfn = zone_end_pfn(zone); in pagetypeinfo_showblockcount_print()
|
| H A D | huge_memory.c | 4259 max_zone_pfn = zone_end_pfn(zone); in split_huge_pages_all()
|
| /linux/arch/arm64/kernel/ |
| H A D | hibernate.c | 266 max_zone_pfn = zone_end_pfn(zone); in swsusp_mte_save_tags()
|
| /linux/kernel/power/ |
| H A D | snapshot.c | 639 zone_end = zone_end_pfn(zone); in create_mem_extents() 1256 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages() 1356 max_zone_pfn = zone_end_pfn(zone); in count_highmem_pages() 1418 max_zone_pfn = zone_end_pfn(zone); in count_data_pages() 1531 max_zone_pfn = zone_end_pfn(zone); in copy_data_pages()
|
| /linux/include/linux/ |
| H A D | mmzone.h | 1121 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() function 1128 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 1281 if (start_pfn >= zone_end_pfn(zone) || in zone_intersects()
|
| /linux/Documentation/mm/ |
| H A D | physical_memory.rst | 473 ``spanned_pages`` = ``zone_end_pfn`` - ``zone_start_pfn``. It is initialized
|