Searched refs:page_zone (Results 1 – 16 of 16) sorted by relevance
/linux/mm/ |
H A D | page_isolation.c | 37 struct zone *zone = page_zone(page); in has_unmovable_pages() 157 struct zone *zone = page_zone(page); in set_migratetype_isolate() 220 zone = page_zone(page); in unset_migratetype_isolate() 336 zone = page_zone(pfn_to_page(isolate_pageblock)); in isolate_single_pageblock() 632 zone = page_zone(page); in test_pages_isolated()
|
H A D | zpdesc.h | 167 return page_zone(zpdesc_page(zpdesc)); in zpdesc_zone()
|
H A D | shuffle.c | 51 if (page_zone(page) != zone) in shuffle_valid_page()
|
H A D | memory_hotplug.c | 445 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 470 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 1072 struct zone *zone = page_zone(page); in adjust_present_page_count() 1146 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); in mhp_deinit_memmap_on_memory() 1947 if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone || in offline_pages() 1948 page_zone(pfn_to_page(end_pfn - 1)) != zone)) { in offline_pages() 2322 if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) in try_offline_memory_block()
|
H A D | page_alloc.c | 342 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap() 351 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx() 374 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); in get_pfnblock_bitmap_bitidx() 607 if (zone != page_zone(page)) in bad_range() 1569 struct zone *zone = page_zone(page); in __free_pages_ok() 1606 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core() 1663 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page() 1668 /* This gives a shorter code than deriving page_zone(end_page) */ in __pageblock_pfn_to_page() 1954 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in __move_freepages_block() 2905 zone = page_zone(pag in __free_frozen_pages() [all...] |
H A D | vmstat.c | 417 * generated for page_zone(page) into the optimized functions. 481 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 537 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state() 608 mod_zone_state(page_zone(page), item, 1, 1); in inc_zone_page_state() 614 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state() 711 zone = page_zone(page); in inc_zone_page_state() 1653 if (page_zone(page) != zone) in pagetypeinfo_showblockcount_print()
|
H A D | memremap.c | 96 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), in pageunmap_range()
|
H A D | memory-failure.c | 181 drain_all_pages(page_zone(page)); in __page_handle_poison() 1560 zone_pcp_disable(page_zone(p)); in get_hwpoison_page() 1565 zone_pcp_enable(page_zone(p)); in get_hwpoison_page()
|
H A D | mm_init.c | 2321 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock() 2330 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_pageblock()
|
H A D | kmemleak.c | 1756 if (page_zone(page) != zone) in kmemleak_scan()
|
H A D | compaction.c | 286 if (zone != page_zone(page)) in __reset_isolation_pfn()
|
H A D | mempolicy.c | 2424 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); in alloc_pages_mpol()
|
/linux/arch/powerpc/platforms/pseries/ |
H A D | cmm.c | 528 if (page_zone(page) != page_zone(newpage)) { in cmm_migratepage()
|
/linux/include/linux/ |
H A D | vmstat.h | 362 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 375 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state()
|
H A D | mm.h | 1735 static inline struct zone *page_zone(const struct page *page) in page_zone() function 1747 return page_zone(&folio->page); in folio_zone() 2199 * Some inline functions in vmstat.h depend on page_zone()
|
/linux/drivers/virtio/ |
H A D | virtio_balloon.c | 853 page_zone(page) != page_zone(newpage)) { in virtballoon_migratepage()
|