Lines Matching full:page

7 #include <linux/page-isolation.h>
28 * Returns a page without holding a reference. If the caller wants to
29 * dereference that page (e.g., dumping), it has to make sure that it
33 static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn, in has_unmovable_pages()
36 struct page *page = pfn_to_page(start_pfn); in has_unmovable_pages() local
37 struct zone *zone = page_zone(page); in has_unmovable_pages()
43 if (is_migrate_cma_page(page)) { in has_unmovable_pages()
52 return page; in has_unmovable_pages()
56 page = pfn_to_page(pfn); in has_unmovable_pages()
64 if (PageReserved(page)) in has_unmovable_pages()
65 return page; in has_unmovable_pages()
79 * handle each tail page individually in migration. in has_unmovable_pages()
81 if (PageHuge(page) || PageTransCompound(page)) { in has_unmovable_pages()
82 struct folio *folio = page_folio(page); in has_unmovable_pages()
85 if (PageHuge(page)) { in has_unmovable_pages()
87 return page; in has_unmovable_pages()
89 return page; in has_unmovable_pages()
92 skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page); in has_unmovable_pages()
98 * We can't use page_count without pin a page in has_unmovable_pages()
99 * because another CPU can free compound page. in has_unmovable_pages()
101 * because their page->_refcount is zero at all time. in has_unmovable_pages()
103 if (!page_ref_count(page)) { in has_unmovable_pages()
104 if (PageBuddy(page)) in has_unmovable_pages()
105 pfn += (1 << buddy_order(page)) - 1; in has_unmovable_pages()
110 * The HWPoisoned page may be not in buddy system, and in has_unmovable_pages()
113 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) in has_unmovable_pages()
126 if ((flags & MEMORY_OFFLINE) && PageOffline(page)) in has_unmovable_pages()
129 if (__PageMovable(page) || PageLRU(page)) in has_unmovable_pages()
137 return page; in has_unmovable_pages()
143 * This function set pageblock migratetype to isolate if no unmovable page is
147 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags, in set_migratetype_isolate() argument
150 struct zone *zone = page_zone(page); in set_migratetype_isolate()
151 struct page *unmovable; in set_migratetype_isolate()
162 if (is_migrate_isolate_page(page)) { in set_migratetype_isolate()
171 * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock in set_migratetype_isolate()
174 check_unmovable_start = max(page_to_pfn(page), start_pfn); in set_migratetype_isolate()
175 check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)), in set_migratetype_isolate()
182 int mt = get_pageblock_migratetype(page); in set_migratetype_isolate()
184 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate()
186 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
200 dump_page(unmovable, "unmovable page"); in set_migratetype_isolate()
206 static void unset_migratetype_isolate(struct page *page, int migratetype) in unset_migratetype_isolate() argument
212 struct page *buddy; in unset_migratetype_isolate()
214 zone = page_zone(page); in unset_migratetype_isolate()
216 if (!is_migrate_isolate_page(page)) in unset_migratetype_isolate()
222 * it is possible that there is free buddy page. in unset_migratetype_isolate()
227 if (PageBuddy(page)) { in unset_migratetype_isolate()
228 order = buddy_order(page); in unset_migratetype_isolate()
230 buddy = find_buddy_page_pfn(page, page_to_pfn(page), in unset_migratetype_isolate()
233 isolated_page = !!__isolate_free_page(page, order); in unset_migratetype_isolate()
235 * Isolating a free page in an isolated pageblock in unset_migratetype_isolate()
255 nr_pages = move_freepages_block(zone, page, migratetype, NULL); in unset_migratetype_isolate()
258 set_pageblock_migratetype(page, migratetype); in unset_migratetype_isolate()
260 __putback_isolated_page(page, order, migratetype); in unset_migratetype_isolate()
266 static inline struct page *
272 struct page *page; in __first_valid_page() local
274 page = pfn_to_online_page(pfn + i); in __first_valid_page()
275 if (!page) in __first_valid_page()
277 return page; in __first_valid_page()
284 * within a free or in-use page.
285 * @boundary_pfn: pageblock-aligned pfn that a page might cross
294 * pageblock. When not all pageblocks within a page are isolated at the same
295 * time, free page accounting can go wrong. For example, in the case of
296 * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
300 * When either pageblock is isolated, if it is a free page, the page is not
302 * in-use page and freed later, __free_one_page() does not split the free page
303 * either. The function handles this by splitting the free page or migrating
304 * the in-use page then splitting the free page.
326 * free or in-use page. Also make sure all to-be-isolated pageblocks in isolate_single_pageblock()
347 * a free or in-use page across boundary_pfn: in isolate_single_pageblock()
349 * 1. isolate before boundary_pfn: the page after is not online in isolate_single_pageblock()
350 * 2. isolate after boundary_pfn: the page before is not online in isolate_single_pageblock()
366 struct page *page = __first_valid_page(pfn, boundary_pfn - pfn); in isolate_single_pageblock() local
368 VM_BUG_ON(!page); in isolate_single_pageblock()
369 pfn = page_to_pfn(page); in isolate_single_pageblock()
372 * free pages in [start_pfn, boundary_pfn), its head page will in isolate_single_pageblock()
375 if (PageBuddy(page)) { in isolate_single_pageblock()
376 int order = buddy_order(page); in isolate_single_pageblock()
379 /* free page changed before split, check it again */ in isolate_single_pageblock()
380 if (split_free_page(page, order, boundary_pfn - pfn)) in isolate_single_pageblock()
388 * migrate compound pages then let the free page handling code in isolate_single_pageblock()
391 if (PageCompound(page)) { in isolate_single_pageblock()
392 struct page *head = compound_head(page); in isolate_single_pageblock()
405 if (PageHuge(page) || PageLRU(page) || __PageMovable(page)) { in isolate_single_pageblock()
408 int page_mt = get_pageblock_migratetype(page); in isolate_single_pageblock()
409 bool isolate_page = !is_migrate_isolate_page(page); in isolate_single_pageblock()
423 * XXX: mark the page as MIGRATE_ISOLATE so that in isolate_single_pageblock()
424 * no one else can grab the freed page after migration. in isolate_single_pageblock()
425 * Ideally, the page should be freed as two separate in isolate_single_pageblock()
430 ret = set_migratetype_isolate(page, page_mt, in isolate_single_pageblock()
440 * restore the page's migratetype so that it can in isolate_single_pageblock()
445 unset_migratetype_isolate(page, page_mt); in isolate_single_pageblock()
450 * reset pfn to the head of the free page, so in isolate_single_pageblock()
451 * that the free page handling code above can split in isolate_single_pageblock()
452 * the free page to the right migratetype list. in isolate_single_pageblock()
454 * head_pfn is not used here as a hugetlb page order in isolate_single_pageblock()
456 * freed, the free page order is not. Use pfn within in isolate_single_pageblock()
457 * the range to find the head of the free page. in isolate_single_pageblock()
462 /* stop if we cannot find the free page */ in isolate_single_pageblock()
485 * start_isolate_page_range() - mark page range MIGRATE_ISOLATE
499 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
518 * Please note that there is no strong synchronization with the page allocator
519 * either. Pages might be freed while their page blocks are marked ISOLATED.
533 struct page *page; in start_isolate_page_range() local
534 /* isolation is done at page block granularity */ in start_isolate_page_range()
561 page = __first_valid_page(pfn, pageblock_nr_pages); in start_isolate_page_range()
562 if (page && set_migratetype_isolate(page, migratetype, flags, in start_isolate_page_range()
580 * This finds every MIGRATE_ISOLATE page block in the given range
587 struct page *page; in undo_isolate_page_range() local
594 page = __first_valid_page(pfn, pageblock_nr_pages); in undo_isolate_page_range()
595 if (!page || !is_migrate_isolate_page(page)) in undo_isolate_page_range()
597 unset_migratetype_isolate(page, migratetype); in undo_isolate_page_range()
611 struct page *page; in __test_page_isolated_in_pageblock() local
614 page = pfn_to_page(pfn); in __test_page_isolated_in_pageblock()
615 if (PageBuddy(page)) in __test_page_isolated_in_pageblock()
617 * If the page is on a free list, it has to be on in __test_page_isolated_in_pageblock()
621 pfn += 1 << buddy_order(page); in __test_page_isolated_in_pageblock()
622 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) in __test_page_isolated_in_pageblock()
623 /* A HWPoisoned page cannot be also PageBuddy */ in __test_page_isolated_in_pageblock()
625 else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && in __test_page_isolated_in_pageblock()
626 !page_count(page)) in __test_page_isolated_in_pageblock()
659 struct page *page; in test_pages_isolated() local
669 page = __first_valid_page(pfn, pageblock_nr_pages); in test_pages_isolated()
670 if (page && !is_migrate_isolate_page(page)) in test_pages_isolated()
673 page = __first_valid_page(start_pfn, end_pfn - start_pfn); in test_pages_isolated()
674 if ((pfn < end_pfn) || !page) { in test_pages_isolated()
680 zone = page_zone(page); in test_pages_isolated()