Lines Matching +full:inactive +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
10 * Linux VM subsystem. Fine-tuning documentation can be found in
11 * Documentation/admin-guide/sysctl/vm.rst.
32 #include <linux/backing-dev.h>
84 * This path almost never happens for VM activity - pages are normally freed
85 * in batches. But it gets used by networking - and for compound pages.
112 free_frozen_pages(&folio->page, folio_order(folio)); in __folio_put()
143 * folio->mlock_count = !!folio_test_mlocked(folio)? in lru_add()
149 folio->mlock_count = 0; in lru_add()
165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
227 * to the tail of the inactive list.
258 * Hold lruvec->lru_lock is safe here, since in lru_note_cost()
260 * 2) From a pre-LRU page during refault (which also holds the in lru_note_cost()
264 spin_lock_irq(&lruvec->lru_lock); in lru_note_cost()
267 lruvec->file_cost += cost; in lru_note_cost()
269 lruvec->anon_cost += cost; in lru_note_cost()
284 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { in lru_note_cost()
285 lruvec->file_cost /= 2; in lru_note_cost()
286 lruvec->anon_cost /= 2; in lru_note_cost()
288 spin_unlock_irq(&lruvec->lru_lock); in lru_note_cost()
366 * a folio is marked active just after it is added to the inactive in __lru_cache_activate_folio()
369 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { in __lru_cache_activate_folio()
370 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
385 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in lru_gen_inc_refs()
392 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); in lru_gen_inc_refs()
404 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in lru_gen_inc_refs()
416 set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0); in lru_gen_clear_refs()
418 lrugen = &folio_lruvec(folio)->lrugen; in lru_gen_clear_refs()
420 return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type])); in lru_gen_clear_refs()
437 * folio_mark_accessed - Mark a folio as having seen activity.
442 * * inactive,unreferenced -> inactive,referenced
443 * * inactive,referenced -> active,unreferenced
444 * * active,unreferenced -> active,referenced
446 * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
486 * folio_add_lru - Add a folio to an LRU list.
502 lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) in folio_add_lru()
510 * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
521 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) in folio_add_lru_vma()
529 * inactive list to speed up its reclaim. It is moved to the
532 * effective than the single-page writeout from reclaim.
537 * 1. active, mapped folio -> none
538 * 2. active, dirty/writeback folio -> inactive, head, reclaim
539 * 3. inactive, mapped folio -> none
540 * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
541 * 5. inactive, clean -> inactive, tail
542 * 6. Others -> none
544 * In 4, it moves to the head of the inactive list so the folio is
546 * than the single-page writeout from reclaim.
576 * We move that folio to the tail of the inactive list. in lru_deactivate_file()
634 * disabled; or "cpu" is being hot-unplugged, and is already dead.
639 struct folio_batch *fbatch = &fbatches->lru_add; in lru_add_drain_cpu()
644 fbatch = &fbatches->lru_move_tail; in lru_add_drain_cpu()
655 fbatch = &fbatches->lru_deactivate_file; in lru_add_drain_cpu()
659 fbatch = &fbatches->lru_deactivate; in lru_add_drain_cpu()
663 fbatch = &fbatches->lru_lazyfree; in lru_add_drain_cpu()
671 * deactivate_file_folio() - Deactivate a file folio.
693 * folio_deactivate - deactivate a folio
696 * folio_deactivate() moves @folio to the inactive list if @folio was on the
712 * folio_mark_lazyfree - make an anon folio lazyfree
715 * folio_mark_lazyfree() moves @folio to the inactive file list.
736 * It's called from per-cpu workqueue context in SMP case so
773 return folio_batch_count(&fbatches->lru_add) || in cpu_needs_drain()
774 folio_batch_count(&fbatches->lru_move_tail) || in cpu_needs_drain()
775 folio_batch_count(&fbatches->lru_deactivate_file) || in cpu_needs_drain()
776 folio_batch_count(&fbatches->lru_deactivate) || in cpu_needs_drain()
777 folio_batch_count(&fbatches->lru_lazyfree) || in cpu_needs_drain()
778 folio_batch_count(&fbatches->lru_activate) || in cpu_needs_drain()
784 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
793 * lru_drain_gen - Global pages generation number in __lru_add_drain_all()
798 * This is an optimization for the highly-contended use case where a in __lru_add_drain_all()
852 * below and has already reached CPU #y's per-cpu data. CPU #x comes in __lru_add_drain_all()
853 * along, adds some pages to its per-cpu vectors, then calls in __lru_add_drain_all()
927 * folios_put_refs - Reduce the reference count on a batch of folios.
947 for (i = 0, j = 0; i < folios->nr; i++) { in folios_put_refs()
948 struct folio *folio = folios->folios[i]; in folios_put_refs()
980 folios->folios[j] = folio; in folios_put_refs()
990 folios->nr = j; in folios_put_refs()
997 * release_pages - batched put_page()
1020 /* Is our next entry actually "nr_pages" -> "nr_refs" ? */ in release_pages()
1037 * The folios which we're about to release may be in the deferred lru-addition
1039 * OK from a correctness point of view but is inefficient - those folios may be
1040 * cache-warm and we want to give them back to the page allocator ASAP.
1048 if (!fbatch->percpu_pvec_drained) { in __folio_batch_release()
1050 fbatch->percpu_pvec_drained = true; in __folio_batch_release()
1057 * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1061 * entries. This function prunes all the non-folio entries from @fbatch
1062 * without leaving holes, so that it can be passed on to folio-only batch
1070 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals()
1072 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()
1074 fbatch->nr = j; in folio_batch_remove_exceptionals()
1079 .procname = "page-cluster",
1094 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); in swap_setup()
1096 /* Use a smaller cluster for small-memory machines */ in swap_setup()