Lines Matching full:page
45 /* How many pages do we try to swap or page in/out together? */
79 static void __page_cache_release(struct page *page) in __page_cache_release() argument
81 if (PageLRU(page)) { in __page_cache_release()
82 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release()
87 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
88 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
89 __ClearPageLRU(page); in __page_cache_release()
90 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
93 __ClearPageWaiters(page); in __page_cache_release()
96 static void __put_single_page(struct page *page) in __put_single_page() argument
98 __page_cache_release(page); in __put_single_page()
99 mem_cgroup_uncharge(page); in __put_single_page()
100 free_unref_page(page); in __put_single_page()
103 static void __put_compound_page(struct page *page) in __put_compound_page() argument
107 * hugetlb. This is because hugetlb page does never have PageLRU set in __put_compound_page()
111 if (!PageHuge(page)) in __put_compound_page()
112 __page_cache_release(page); in __put_compound_page()
113 destroy_compound_page(page); in __put_compound_page()
116 void __put_page(struct page *page) in __put_page() argument
118 if (is_zone_device_page(page)) { in __put_page()
119 put_dev_pagemap(page->pgmap); in __put_page()
122 * The page belongs to the device that created pgmap. Do in __put_page()
123 * not return it to page allocator. in __put_page()
128 if (unlikely(PageCompound(page))) in __put_page()
129 __put_compound_page(page); in __put_page()
131 __put_single_page(page); in __put_page()
137 * @pages: list of pages threaded on page->lru
139 * Release a list of pages which are strung together on page.lru. Currently
145 struct page *victim; in put_pages_list()
164 * were pinned, returns -errno. Each page returned must be released
168 struct page **pages) in get_kernel_pages()
185 * get_kernel_page() - pin a kernel page in memory
188 * @pages: array that receives pointer to the page pinned.
191 * Returns 1 if page is pinned. If the page was not pinned, returns
192 * -errno. The page returned must be released with a put_page() call
195 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page()
207 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
216 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
217 struct pglist_data *pagepgdat = page_pgdat(page); in pagevec_lru_move_fn()
226 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
227 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
240 if (PageLRU(page) && !PageUnevictable(page)) { in pagevec_move_tail_fn()
241 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
242 ClearPageActive(page); in pagevec_move_tail_fn()
243 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
244 (*pgmoved) += thp_nr_pages(page); in pagevec_move_tail_fn()
261 * Writeback is about to end against a page which has been marked for immediate
265 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
267 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
268 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
272 get_page(page); in rotate_reclaimable_page()
275 if (!pagevec_add(pvec, page) || PageCompound(page)) in rotate_reclaimable_page()
312 void lru_note_cost_page(struct page *page) in lru_note_cost_page() argument
314 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), in lru_note_cost_page()
315 page_is_file_lru(page), thp_nr_pages(page)); in lru_note_cost_page()
318 static void __activate_page(struct page *page, struct lruvec *lruvec, in __activate_page() argument
321 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in __activate_page()
322 int lru = page_lru_base_type(page); in __activate_page()
323 int nr_pages = thp_nr_pages(page); in __activate_page()
325 del_page_from_lru_list(page, lruvec, lru); in __activate_page()
326 SetPageActive(page); in __activate_page()
328 add_page_to_lru_list(page, lruvec, lru); in __activate_page()
329 trace_mm_lru_activate(page); in __activate_page()
351 static void activate_page(struct page *page) in activate_page() argument
353 page = compound_head(page); in activate_page()
354 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
359 get_page(page); in activate_page()
360 if (!pagevec_add(pvec, page) || PageCompound(page)) in activate_page()
371 static void activate_page(struct page *page) in activate_page() argument
373 pg_data_t *pgdat = page_pgdat(page); in activate_page()
375 page = compound_head(page); in activate_page()
377 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); in activate_page()
382 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
391 * Search backwards on the optimistic assumption that the page being in __lru_cache_activate_page()
393 * the local pagevec is examined as a !PageLRU page could be in the in __lru_cache_activate_page()
396 * a remote pagevec's page PageActive potentially hits a race where in __lru_cache_activate_page()
397 * a page is marked PageActive just after it is added to the inactive in __lru_cache_activate_page()
401 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
403 if (pagevec_page == page) { in __lru_cache_activate_page()
404 SetPageActive(page); in __lru_cache_activate_page()
413 * Mark a page as having seen activity.
419 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
420 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
422 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
424 page = compound_head(page); in mark_page_accessed()
426 if (!PageReferenced(page)) { in mark_page_accessed()
427 SetPageReferenced(page); in mark_page_accessed()
428 } else if (PageUnevictable(page)) { in mark_page_accessed()
432 * evictable page accessed has no effect. in mark_page_accessed()
434 } else if (!PageActive(page)) { in mark_page_accessed()
436 * If the page is on the LRU, queue it for activation via in mark_page_accessed()
437 * lru_pvecs.activate_page. Otherwise, assume the page is on a in mark_page_accessed()
441 if (PageLRU(page)) in mark_page_accessed()
442 activate_page(page); in mark_page_accessed()
444 __lru_cache_activate_page(page); in mark_page_accessed()
445 ClearPageReferenced(page); in mark_page_accessed()
446 workingset_activation(page); in mark_page_accessed()
448 if (page_is_idle(page)) in mark_page_accessed()
449 clear_page_idle(page); in mark_page_accessed()
454 * lru_cache_add - add a page to a page list
455 * @page: the page to be added to the LRU.
457 * Queue the page for addition to the LRU via pagevec. The decision on whether
458 * to add the page to the [in]active [file|anon] list is deferred until the
460 * have the page added to the active list using mark_page_accessed().
462 void lru_cache_add(struct page *page) in lru_cache_add() argument
466 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
467 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
469 get_page(page); in lru_cache_add()
472 if (!pagevec_add(pvec, page) || PageCompound(page)) in lru_cache_add()
480 * @page: the page to be added to LRU
481 * @vma: vma in which page is mapped for determining reclaimability
483 * Place @page on the inactive or unevictable LRU list, depending on its
486 void lru_cache_add_inactive_or_unevictable(struct page *page, in lru_cache_add_inactive_or_unevictable() argument
491 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add_inactive_or_unevictable()
494 if (unlikely(unevictable) && !TestSetPageMlocked(page)) { in lru_cache_add_inactive_or_unevictable()
495 int nr_pages = thp_nr_pages(page); in lru_cache_add_inactive_or_unevictable()
501 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in lru_cache_add_inactive_or_unevictable()
504 lru_cache_add(page); in lru_cache_add_inactive_or_unevictable()
508 * If the page can not be invalidated, it is moved to the
512 * effective than the single-page writeout from reclaim.
514 * If the page isn't page_mapped and dirty/writeback, the page
517 * 1. active, mapped page -> none
518 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
519 * 3. inactive, mapped page -> none
520 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
524 * In 4, why it moves inactive's head, the VM expects the page would
526 * than the single-page writeout from reclaim.
528 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_file_fn() argument
533 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn()
535 if (!PageLRU(page)) in lru_deactivate_file_fn()
538 if (PageUnevictable(page)) in lru_deactivate_file_fn()
541 /* Some processes are using the page */ in lru_deactivate_file_fn()
542 if (page_mapped(page)) in lru_deactivate_file_fn()
545 active = PageActive(page); in lru_deactivate_file_fn()
546 lru = page_lru_base_type(page); in lru_deactivate_file_fn()
548 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn()
549 ClearPageActive(page); in lru_deactivate_file_fn()
550 ClearPageReferenced(page); in lru_deactivate_file_fn()
552 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
558 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn()
559 SetPageReclaim(page); in lru_deactivate_file_fn()
562 * The page's writeback ends up during pagevec in lru_deactivate_file_fn()
563 * We moves tha page into tail of inactive. in lru_deactivate_file_fn()
565 add_page_to_lru_list_tail(page, lruvec, lru); in lru_deactivate_file_fn()
576 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_fn() argument
579 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in lru_deactivate_fn()
580 int lru = page_lru_base_type(page); in lru_deactivate_fn()
581 int nr_pages = thp_nr_pages(page); in lru_deactivate_fn()
583 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); in lru_deactivate_fn()
584 ClearPageActive(page); in lru_deactivate_fn()
585 ClearPageReferenced(page); in lru_deactivate_fn()
586 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_fn()
594 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_fn() argument
597 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
598 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
599 bool active = PageActive(page); in lru_lazyfree_fn()
600 int nr_pages = thp_nr_pages(page); in lru_lazyfree_fn()
602 del_page_from_lru_list(page, lruvec, in lru_lazyfree_fn()
604 ClearPageActive(page); in lru_lazyfree_fn()
605 ClearPageReferenced(page); in lru_lazyfree_fn()
611 ClearPageSwapBacked(page); in lru_lazyfree_fn()
612 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_fn()
659 * deactivate_file_page - forcefully deactivate a file page
660 * @page: page to deactivate
662 * This function hints the VM that @page is a good reclaim candidate,
663 * for example if its invalidation fails due to the page being dirty
666 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
669 * In a workload with many unevictable page such as mprotect, in deactivate_file_page()
670 * unevictable page deactivation for accelerating reclaim is pointless. in deactivate_file_page()
672 if (PageUnevictable(page)) in deactivate_file_page()
675 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
681 if (!pagevec_add(pvec, page) || PageCompound(page)) in deactivate_file_page()
688 * deactivate_page - deactivate a page
689 * @page: page to deactivate
691 * deactivate_page() moves @page to the inactive list if @page was on the active
692 * list and was not an unevictable page. This is done to accelerate the reclaim
693 * of @page.
695 void deactivate_page(struct page *page) in deactivate_page() argument
697 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in deactivate_page()
702 get_page(page); in deactivate_page()
703 if (!pagevec_add(pvec, page) || PageCompound(page)) in deactivate_page()
710 * mark_page_lazyfree - make an anon page lazyfree
711 * @page: page to deactivate
713 * mark_page_lazyfree() moves @page to the inactive file list.
714 * This is done to accelerate the reclaim of @page.
716 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
718 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
719 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
724 get_page(page); in mark_page_lazyfree()
725 if (!pagevec_add(pvec, page) || PageCompound(page)) in mark_page_lazyfree()
818 * below which drains the page vectors. in lru_add_drain_all()
868 * fell to zero, remove the page from the LRU and free it.
870 void release_pages(struct page **pages, int nr) in release_pages()
880 struct page *page = pages[i]; in release_pages() local
892 page = compound_head(page); in release_pages()
893 if (is_huge_zero_page(page)) in release_pages()
896 if (is_zone_device_page(page)) { in release_pages()
908 if (page_is_devmap_managed(page)) { in release_pages()
909 put_devmap_managed_page(page); in release_pages()
914 if (!put_page_testzero(page)) in release_pages()
917 if (PageCompound(page)) { in release_pages()
922 __put_compound_page(page); in release_pages()
926 if (PageLRU(page)) { in release_pages()
927 struct pglist_data *pgdat = page_pgdat(page); in release_pages()
938 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); in release_pages()
939 VM_BUG_ON_PAGE(!PageLRU(page), page); in release_pages()
940 __ClearPageLRU(page); in release_pages()
941 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in release_pages()
944 __ClearPageWaiters(page); in release_pages()
946 list_add(&page->lru, &pages_to_free); in release_pages()
960 * cache-warm and we want to give them back to the page allocator ASAP.
979 void lru_add_page_tail(struct page *page, struct page *page_tail, in lru_add_page_tail() argument
982 VM_BUG_ON_PAGE(!PageHead(page), page); in lru_add_page_tail()
983 VM_BUG_ON_PAGE(PageCompound(page_tail), page); in lru_add_page_tail()
984 VM_BUG_ON_PAGE(PageLRU(page_tail), page); in lru_add_page_tail()
990 if (likely(PageLRU(page))) in lru_add_page_tail()
991 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
993 /* page reclaim is reclaiming a huge page */ in lru_add_page_tail()
998 * Head page has not yet been counted, as an hpage, in lru_add_page_tail()
1010 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, in __pagevec_lru_add_fn() argument
1014 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
1015 int nr_pages = thp_nr_pages(page); in __pagevec_lru_add_fn()
1017 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
1020 * Page becomes evictable in two ways: in __pagevec_lru_add_fn()
1022 * 2) Before acquiring LRU lock to put the page to correct LRU and then in __pagevec_lru_add_fn()
1039 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU in __pagevec_lru_add_fn()
1041 * the isolation of the page whose Mlocked bit is cleared (#0 is also in __pagevec_lru_add_fn()
1042 * looking at the same page) and the evictable page will be stranded in __pagevec_lru_add_fn()
1045 SetPageLRU(page); in __pagevec_lru_add_fn()
1048 if (page_evictable(page)) { in __pagevec_lru_add_fn()
1049 lru = page_lru(page); in __pagevec_lru_add_fn()
1054 ClearPageActive(page); in __pagevec_lru_add_fn()
1055 SetPageUnevictable(page); in __pagevec_lru_add_fn()
1060 add_page_to_lru_list(page, lruvec, lru); in __pagevec_lru_add_fn()
1061 trace_mm_lru_insertion(page, lru); in __pagevec_lru_add_fn()
1090 * Only one subpage of a Transparent Huge Page is returned in one call:
1114 * passed on to page-only pagevec operations.
1121 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
1122 if (!xa_is_value(page)) in pagevec_remove_exceptionals()
1123 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
1132 * @start: The starting page index
1133 * @end: The final page index
1142 * also update @start to index the next page for the traversal.
1195 void put_devmap_managed_page(struct page *page) in put_devmap_managed_page() argument
1199 if (WARN_ON_ONCE(!page_is_devmap_managed(page))) in put_devmap_managed_page()
1202 count = page_ref_dec_return(page); in put_devmap_managed_page()
1205 * devmap page refcounts are 1-based, rather than 0-based: if in put_devmap_managed_page()
1206 * refcount is 1, then the page is free and the refcount is in put_devmap_managed_page()
1207 * stable because nobody holds a reference on the page. in put_devmap_managed_page()
1210 free_devmap_managed_page(page); in put_devmap_managed_page()
1212 __put_page(page); in put_devmap_managed_page()