Lines Matching +full:ext +full:- +full:gen
1 // SPDX-License-Identifier: GPL-2.0
31 #include <linux/backing-dev.h>
45 #include <linux/memory-tiers.h>
174 if ((_folio)->lru.prev != _base) { \
177 prev = lru_to_folio(&(_folio->lru)); \
178 prefetchw(&prev->_field); \
195 return sc->target_mem_cgroup; in cgroup_reclaim()
204 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
208 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
251 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
253 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
254 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
256 task->reclaim_state = rs; in set_task_reclaim_state()
260 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
261 * scan_control->nr_reclaimed.
266 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
273 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
276 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
278 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
293 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
294 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
295 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
303 if (sc && sc->no_demotion) in can_demote()
317 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
355 * lruvec_lru_size - Returns the number of pages on the given LRU list.
358 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
367 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
412 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
413 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); in reclaimer_offset()
414 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
415 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); in reclaimer_offset()
416 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
417 PGSCAN_DIRECT - PGSCAN_KSWAPD); in reclaimer_offset()
418 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
419 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); in reclaimer_offset()
424 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
425 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
433 * private data at folio->private. in is_page_cache_freeable()
435 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
441 * -ENOSPC. We need to propagate that into the address_space for a subsequent
469 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
478 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
495 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
505 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
513 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
515 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
524 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
525 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
555 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
557 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
558 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
575 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
581 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
582 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
585 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
602 * Calls ->writepage().
609 * will be non-blocking. To prevent this allocation from being in pageout()
628 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
639 if (mapping->a_ops->writepage == NULL) in pageout()
654 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
688 spin_lock(&mapping->host->i_lock); in __remove_mapping()
689 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
709 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
710 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
725 swp_entry_t swap = folio->swap; in __remove_mapping()
731 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
736 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
757 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
759 inode_add_lru(mapping->host); in __remove_mapping()
760 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
769 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
771 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
776 * remove_mapping() - Attempt to remove a folio from its mapping.
802 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
829 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
841 if (referenced_ptes == -1) in folio_check_references()
865 * Activate file-backed executable folios after first usage. in folio_check_references()
909 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
910 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
922 allowed_mask = mtc->nmask; in alloc_demote_folio()
932 mtc->nmask = NULL; in alloc_demote_folio()
933 mtc->gfp_mask |= __GFP_THISNODE; in alloc_demote_folio()
938 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_demote_folio()
939 mtc->nmask = allowed_mask; in alloc_demote_folio()
951 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
993 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
995 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1019 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1032 list_del(&folio->lru); in shrink_folio_list()
1042 sc->nr_scanned += nr_pages; in shrink_folio_list()
1047 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1062 stat->nr_dirty += nr_pages; in shrink_folio_list()
1065 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1074 stat->nr_congested += nr_pages; in shrink_folio_list()
1124 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1125 stat->nr_immediate += nr_pages; in shrink_folio_list()
1131 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1133 * This is slightly racy - in shrink_folio_list()
1137 * interpreted as the readahead flag - but in shrink_folio_list()
1147 stat->nr_writeback += nr_pages; in shrink_folio_list()
1155 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1167 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1180 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1192 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1238 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1255 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1258 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1278 * injecting inefficient single-folio I/O into in shrink_folio_list()
1289 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1305 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1307 if (!sc->may_writepage) in shrink_folio_list()
1322 stat->nr_pageout += nr_pages; in shrink_folio_list()
1330 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
1358 * and mark the folio clean - it can be freed. in shrink_folio_list()
1360 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
1369 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1404 sc->target_mem_cgroup)) in shrink_folio_list()
1422 list_add(&folio->lru, &free_folios); in shrink_folio_list()
1431 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1443 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
1449 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
1478 if (!sc->proactive) { in shrink_folio_list()
1484 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
1516 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
1527 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1532 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1533 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
1540 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
1542 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1543 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
1560 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1569 * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
1574 gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && in skip_cma()
1587 * lruvec->lru_lock is heavily contended. Some of the functions that
1610 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
1630 if (folio_zonenum(folio) > sc->reclaim_idx || in isolate_lru_folios()
1648 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
1653 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
1669 list_move(&folio->lru, move_to); in isolate_lru_folios()
1692 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
1699 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1769 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
1772 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
1800 list_del(&folio->lru); in move_folios_to_lru()
1802 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1804 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1816 * list_add(&folio->lru,) in move_folios_to_lru()
1817 * list_add(&folio->lru,) in move_folios_to_lru()
1825 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1827 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1829 list_add(&folio->lru, &folios_to_free); in move_folios_to_lru()
1855 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1861 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
1897 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1909 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1916 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1919 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
1925 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1927 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); in shrink_inactive_list()
1957 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
1958 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
1959 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
1960 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
1961 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
1962 sc->nr.taken += nr_taken; in shrink_inactive_list()
1964 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
1966 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
1967 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
1982 * It is safe to rely on the active flag against the non-LRU folios in here
1983 * because nobody will play with that bit on a non-LRU folio.
1985 * The downside is that we have to touch folio->_refcount against each folio.
1986 * But we had to alter folio->flags anyway.
2006 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2017 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2024 list_del(&folio->lru); in shrink_active_list()
2040 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2043 * Identify referenced, file-backed active folios and in shrink_active_list()
2047 * are not likely to be evicted by use-once streaming in shrink_active_list()
2053 list_add(&folio->lru, &l_active); in shrink_active_list()
2058 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2060 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2066 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2076 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2077 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2083 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2084 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2104 list_del(&folio->lru); in reclaim_folio_list()
2129 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2148 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2151 sc->skipped_deactivate = 1; in shrink_list()
2163 * to the established workingset on the scan-resistant active list,
2177 * -------------------------------------
2196 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2220 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2223 * Flush the memory cgroup stats, so that we read accurate per-memcg in prepare_scan_control()
2226 mem_cgroup_flush_stats(sc->target_mem_cgroup); in prepare_scan_control()
2231 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2232 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_control()
2233 sc->file_cost = target_lruvec->file_cost; in prepare_scan_control()
2234 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2240 if (!sc->force_deactivate) { in prepare_scan_control()
2250 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_control()
2252 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_control()
2254 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_control()
2258 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_control()
2260 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_control()
2262 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_control()
2264 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_control()
2272 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) in prepare_scan_control()
2273 sc->cache_trim_mode = 1; in prepare_scan_control()
2275 sc->cache_trim_mode = 0; in prepare_scan_control()
2291 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2296 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_control()
2311 sc->file_is_tiny = in prepare_scan_control()
2313 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_control()
2314 anon >> sc->priority; in prepare_scan_control()
2339 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2361 if (!sc->priority && swappiness) { in get_scan_count()
2367 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
2369 if (sc->file_is_tiny) { in get_scan_count()
2378 if (sc->cache_trim_mode) { in get_scan_count()
2399 total_cost = sc->anon_cost + sc->file_cost; in get_scan_count()
2400 anon_cost = total_cost + sc->anon_cost; in get_scan_count()
2401 file_cost = total_cost + sc->file_cost; in get_scan_count()
2407 fp = (200 - swappiness) * (total_cost + 1); in get_scan_count()
2420 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2421 mem_cgroup_protection(sc->target_mem_cgroup, memcg, in get_scan_count()
2431 * becomes extremely binary -- from nothing as we in get_scan_count()
2446 * the best-effort low protection. However, we still in get_scan_count()
2447 * ideally want to honor how well-behaved groups are in in get_scan_count()
2458 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count()
2460 sc->memcg_low_skipped = 1; in get_scan_count()
2468 scan = lruvec_size - lruvec_size * protection / in get_scan_count()
2474 * sc->priority further than desirable. in get_scan_count()
2481 scan >>= sc->priority; in get_scan_count()
2500 * round-off error. in get_scan_count()
2534 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
2564 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2568 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2569 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2572 #define for_each_gen_type_zone(gen, type, zone) \ argument
2573 for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
2586 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
2589 if (!lruvec->pgdat) in get_lruvec()
2590 lruvec->pgdat = pgdat; in get_lruvec()
2597 return &pgdat->__lruvec; in get_lruvec()
2605 if (!sc->may_swap) in get_swappiness()
2608 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
2617 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
2639 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2640 * aging uses the double-buffering technique to flip to the other filter each
2641 * time it produces a new generation. For non-leaf entries that have enough
2667 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
2676 int gen = filter_gen_from_seq(seq); in test_bloom_filter() local
2678 filter = READ_ONCE(mm_state->filters[gen]); in test_bloom_filter()
2692 int gen = filter_gen_from_seq(seq); in update_bloom_filter() local
2694 filter = READ_ONCE(mm_state->filters[gen]); in update_bloom_filter()
2709 int gen = filter_gen_from_seq(seq); in reset_bloom_filter() local
2711 filter = mm_state->filters[gen]; in reset_bloom_filter()
2719 WRITE_ONCE(mm_state->filters[gen], filter); in reset_bloom_filter()
2737 return &memcg->mm_list; in get_mm_list()
2746 return &lruvec->mm_state; in get_mm_state()
2753 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm()
2754 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in get_next_mm()
2756 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in get_next_mm()
2757 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
2759 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in get_next_mm()
2762 clear_bit(key, &mm->lru_gen.bitmap); in get_next_mm()
2773 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
2775 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
2776 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
2778 spin_lock(&mm_list->lock); in lru_gen_add_mm()
2785 if (mm_state->tail == &mm_list->fifo) in lru_gen_add_mm()
2786 mm_state->tail = &mm->lru_gen.list; in lru_gen_add_mm()
2789 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
2791 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
2800 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
2804 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
2808 spin_lock(&mm_list->lock); in lru_gen_del_mm()
2815 if (mm_state->head == &mm->lru_gen.list) in lru_gen_del_mm()
2816 mm_state->head = mm_state->head->prev; in lru_gen_del_mm()
2819 if (mm_state->tail == &mm->lru_gen.list) in lru_gen_del_mm()
2820 mm_state->tail = mm_state->tail->next; in lru_gen_del_mm()
2823 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
2825 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
2828 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
2829 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
2837 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
2839 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
2840 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
2847 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
2853 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
2856 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
2888 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
2891 hist = lru_hist_from_seq(walk->max_seq); in reset_mm_stats()
2894 WRITE_ONCE(mm_state->stats[hist][i], in reset_mm_stats()
2895 mm_state->stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
2896 walk->mm_stats[i] = 0; in reset_mm_stats()
2901 hist = lru_hist_from_seq(mm_state->seq + 1); in reset_mm_stats()
2904 WRITE_ONCE(mm_state->stats[hist][i], 0); in reset_mm_stats()
2919 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
2928 spin_lock(&mm_list->lock); in iterate_mm_list()
2930 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); in iterate_mm_list()
2932 if (walk->max_seq <= mm_state->seq) in iterate_mm_list()
2935 if (!mm_state->head) in iterate_mm_list()
2936 mm_state->head = &mm_list->fifo; in iterate_mm_list()
2938 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
2942 mm_state->head = mm_state->head->next; in iterate_mm_list()
2943 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
2944 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
2950 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
2951 mm_state->tail = mm_state->head->next; in iterate_mm_list()
2952 walk->force_scan = true; in iterate_mm_list()
2959 spin_unlock(&mm_list->lock); in iterate_mm_list()
2962 reset_bloom_filter(mm_state, walk->max_seq + 1); in iterate_mm_list()
2979 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
2981 VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); in iterate_mm_list_nowalk()
2983 if (max_seq > mm_state->seq) { in iterate_mm_list_nowalk()
2984 mm_state->head = NULL; in iterate_mm_list_nowalk()
2985 mm_state->tail = NULL; in iterate_mm_list_nowalk()
2986 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
2991 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3001 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3016 * 1. The D term may discount the other two terms over time so that long-lived
3028 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3029 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3031 pos->refaulted = lrugen->avg_refaulted[type][tier] + in read_ctrl_pos()
3032 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in read_ctrl_pos()
3033 pos->total = lrugen->avg_total[type][tier] + in read_ctrl_pos()
3034 atomic_long_read(&lrugen->evicted[hist][type][tier]); in read_ctrl_pos()
3036 pos->total += lrugen->protected[hist][type][tier - 1]; in read_ctrl_pos()
3037 pos->gain = gain; in read_ctrl_pos()
3043 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3045 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3047 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3058 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3059 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3060 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3062 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3063 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3065 sum += lrugen->protected[hist][type][tier - 1]; in reset_ctrl_pos()
3066 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3070 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3071 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3073 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); in reset_ctrl_pos()
3084 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3085 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3086 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3094 static int folio_update_gen(struct folio *folio, int gen) in folio_update_gen() argument
3096 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3098 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); in folio_update_gen()
3110 new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; in folio_update_gen()
3111 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3113 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3120 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3121 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3122 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3127 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3139 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3156 walk->batched++; in update_batch_size()
3158 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3159 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3164 int gen, type, zone; in reset_batch_size() local
3165 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3167 walk->batched = 0; in reset_batch_size()
3169 for_each_gen_type_zone(gen, type, zone) { in reset_batch_size()
3171 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3176 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3177 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3178 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3180 if (lru_gen_is_active(lruvec, gen)) in reset_batch_size()
3189 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3190 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3201 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3204 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3208 return !walk->can_swap; in should_skip_vma()
3210 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3213 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3218 return !walk->can_swap; in should_skip_vma()
3221 return !mapping->a_ops->read_folio; in should_skip_vma()
3225 * Some userspace memory allocators map many single-page VMAs. Instead of
3234 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3239 for_each_vma(vmi, args->vma) { in get_next_vma()
3240 if (end && end <= args->vma->vm_start) in get_next_vma()
3243 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3246 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3247 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3259 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3262 return -1; in get_pte_pfn()
3265 return -1; in get_pte_pfn()
3268 return -1; in get_pte_pfn()
3277 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3280 return -1; in get_pmd_pfn()
3283 return -1; in get_pmd_pfn()
3286 return -1; in get_pmd_pfn()
3297 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pfn_folio()
3301 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3331 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
3332 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3333 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3334 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pte_range()
3336 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); in walk_pte_range()
3352 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
3354 pfn = get_pte_pfn(ptent, args->vma, addr); in walk_pte_range()
3355 if (pfn == -1) in walk_pte_range()
3359 walk->mm_stats[MM_LEAF_OLD]++; in walk_pte_range()
3363 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
3367 if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) in walk_pte_range()
3371 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
3398 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
3399 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3400 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
3401 int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); in walk_pmd_range_locked()
3406 if (*first == -1) { in walk_pmd_range_locked()
3412 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
3414 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
3420 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
3434 if (pfn == -1) in walk_pmd_range_locked()
3443 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
3450 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
3467 *first = -1; in walk_pmd_range_locked()
3479 unsigned long first = -1; in walk_pmd_range()
3480 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
3481 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in walk_pmd_range()
3493 vma = args->vma; in walk_pmd_range()
3500 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3506 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
3508 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3511 walk->mm_stats[MM_LEAF_OLD]++; in walk_pmd_range()
3516 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in walk_pmd_range()
3523 walk->mm_stats[MM_NONLEAF_TOTAL]++; in walk_pmd_range()
3532 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->max_seq, pmd + i)) in walk_pmd_range()
3535 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
3540 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
3543 update_bloom_filter(mm_state, walk->max_seq + 1, pmd + i); in walk_pmd_range()
3546 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
3559 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
3575 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
3586 if (!end || !args->vma) in walk_pud_range()
3589 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
3591 return -EAGAIN; in walk_pud_range()
3605 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
3610 err = -EBUSY; in walk_mm()
3613 if (walk->max_seq != max_seq) in walk_mm()
3622 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
3629 if (walk->batched) { in walk_mm()
3630 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
3632 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
3636 } while (err == -EAGAIN); in walk_mm()
3641 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
3646 walk = &pgdat->mm_walk; in set_mm_walk()
3653 current->reclaim_state->mm_walk = walk; in set_mm_walk()
3660 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
3662 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
3663 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
3665 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
3675 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
3676 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3683 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
3694 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
3696 if (!--remaining) in inc_min_seq()
3702 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
3709 int gen, type, zone; in try_to_inc_min_seq() local
3711 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
3718 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
3719 gen = lru_gen_from_seq(min_seq[type]); in try_to_inc_min_seq()
3722 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
3735 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); in try_to_inc_min_seq()
3739 if (min_seq[type] == lrugen->min_seq[type]) in try_to_inc_min_seq()
3743 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
3756 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
3758 if (max_seq < READ_ONCE(lrugen->max_seq)) in inc_max_seq()
3761 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
3765 success = max_seq == lrugen->max_seq; in inc_max_seq()
3769 for (type = ANON_AND_FILE - 1; type >= 0; type--) { in inc_max_seq()
3778 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3789 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
3790 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
3795 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
3796 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
3802 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
3809 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
3811 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
3813 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3824 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
3827 VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
3833 if (max_seq <= READ_ONCE(mm_state->seq)) in try_to_inc_max_seq()
3853 walk->lruvec = lruvec; in try_to_inc_max_seq()
3854 walk->max_seq = max_seq; in try_to_inc_max_seq()
3855 walk->can_swap = can_swap; in try_to_inc_max_seq()
3856 walk->force_scan = force_scan; in try_to_inc_max_seq()
3878 int gen, type, zone; in lruvec_is_sizable() local
3881 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
3890 gen = lru_gen_from_seq(seq); in lruvec_is_sizable()
3893 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
3898 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
3904 int gen; in lruvec_is_reclaimable() local
3910 gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); in lruvec_is_reclaimable()
3911 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
3934 /* check the order to exclude compaction-induced reclaim */ in lru_gen_age_node()
3935 if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) in lru_gen_age_node()
3957 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
3984 pte_t *pte = pvmw->pte; in lru_gen_look_around()
3985 unsigned long addr = pvmw->address; in lru_gen_look_around()
3986 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
3987 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
3996 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
3999 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4003 if (vma->vm_flags & VM_SPECIAL) in lru_gen_look_around()
4007 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4009 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around()
4010 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; in lru_gen_look_around()
4012 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4013 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4015 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4016 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4018 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4029 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4036 if (pfn == -1) in lru_gen_look_around()
4076 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
4100 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4102 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4105 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4113 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4115 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4119 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4120 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4122 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4125 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4127 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4129 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4130 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4132 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4133 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4135 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4142 int gen; in lru_gen_online_memcg() local
4150 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4152 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4154 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4156 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4158 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4159 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4161 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4178 int gen; in lru_gen_release_memcg() local
4185 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4187 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4190 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4192 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4193 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4195 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4196 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4198 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4207 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) in lru_gen_soft_reclaim()
4221 int gen = folio_lru_gen(folio); in sort_folio() local
4227 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4229 VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); in sort_folio()
4251 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4252 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4258 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4260 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4261 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4263 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], in sort_folio()
4264 lrugen->protected[hist][type][tier - 1] + delta); in sort_folio()
4269 if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { in sort_folio()
4270 gen = folio_inc_gen(lruvec, folio, false); in sort_folio()
4271 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4278 gen = folio_inc_gen(lruvec, folio, true); in sort_folio()
4279 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4291 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
4308 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); in isolate_folio()
4324 int gen; in scan_folios() local
4331 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
4339 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
4341 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
4344 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
4345 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
4361 list_add(&folio->lru, list); in scan_folios()
4364 list_move(&folio->lru, &moved); in scan_folios()
4368 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) in scan_folios()
4390 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, in scan_folios()
4418 return tier - 1; in get_tier_idx()
4425 int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; in get_type_to_scan()
4444 *tier_idx = tier - 1; in get_type_to_scan()
4455 int tier = -1; in isolate_folios()
4483 tier = -1; in isolate_folios()
4507 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4516 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4522 sc->nr_reclaimed += reclaimed; in evict_folios()
4523 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4524 scanned, reclaimed, &stat, sc->priority, in evict_folios()
4529 list_del(&folio->lru); in evict_folios()
4546 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, in evict_folios()
4552 list_move(&folio->lru, &clean); in evict_folios()
4553 sc->nr_scanned -= folio_nr_pages(folio); in evict_folios()
4556 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4560 walk = current->reclaim_state->mm_walk; in evict_folios()
4561 if (walk && walk->batched) in evict_folios()
4570 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4589 int gen, type, zone; in should_run_aging() local
4593 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
4609 gen = lru_gen_from_seq(seq); in should_run_aging()
4612 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
4628 *nr_to_scan = total >> sc->priority; in should_run_aging()
4664 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
4665 return -1; in get_nr_to_scan()
4671 if (sc->priority == DEF_PRIORITY) in get_nr_to_scan()
4675 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; in get_nr_to_scan()
4687 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) in should_abort_scan()
4690 /* check the order to exclude compaction-induced reclaim */ in should_abort_scan()
4691 if (!current_is_kswapd() || sc->order) in should_abort_scan()
4697 for (i = 0; i <= sc->reclaim_idx; i++) { in should_abort_scan()
4698 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
4701 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) in should_abort_scan()
4716 if (swappiness && !(sc->gfp_mask & __GFP_IO)) in try_to_shrink_lruvec()
4747 unsigned long scanned = sc->nr_scanned; in shrink_one()
4748 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
4759 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) in shrink_one()
4767 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4769 if (!sc->proactive) in shrink_one()
4770 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
4771 sc->nr_reclaimed - reclaimed); in shrink_one()
4782 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? in shrink_one()
4789 int gen; in shrink_many() local
4797 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4805 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
4814 if (gen != READ_ONCE(lrugen->gen)) in shrink_many()
4847 if (gen != get_nulls_value(pos)) in shrink_many()
4861 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
4867 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
4883 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
4894 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
4895 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
4897 sc->priority = clamp(priority, 0, DEF_PRIORITY); in set_initial_priority()
4903 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
4912 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
4919 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
4924 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
4927 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
4932 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
4939 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
4948 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
4950 if (lrugen->enabled) { in state_is_valid()
4954 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
4958 int gen, type, zone; in state_is_valid() local
4960 for_each_gen_type_zone(gen, type, zone) { in state_is_valid()
4961 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
4977 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
4986 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
4992 if (!--remaining) in fill_evictable()
5002 int gen, type, zone; in drain_evictable() local
5005 for_each_gen_type_zone(gen, type, zone) { in drain_evictable()
5006 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5021 if (!--remaining) in drain_evictable()
5055 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5060 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5063 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5065 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5068 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5089 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5096 return -EINVAL; in min_ttl_ms_store()
5121 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5131 caps = -1; in enabled_store()
5133 return -EINVAL; in enabled_store()
5171 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5172 if (!m->private) in lru_gen_seq_start()
5173 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5180 if (!nr_to_skip--) in lru_gen_seq_start()
5193 kvfree(m->private); in lru_gen_seq_stop()
5194 m->private = NULL; in lru_gen_seq_stop()
5199 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5223 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5234 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5235 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5238 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5239 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5241 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); in lru_gen_seq_show_full()
5260 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5263 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5271 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5275 bool full = !debugfs_real_fops(m->file)->write; in lru_gen_seq_show()
5277 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5278 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5284 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5288 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5298 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5304 int gen = lru_gen_from_seq(seq); in lru_gen_seq_show() local
5305 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5307 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5314 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5345 return -EINVAL; in run_aging()
5347 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) in run_aging()
5348 return -ERANGE; in run_aging()
5361 return -EINVAL; in run_eviction()
5363 sc->nr_reclaimed = 0; in run_eviction()
5371 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
5380 return -EINTR; in run_eviction()
5387 int err = -EINVAL; in run_cmd()
5391 return -EINVAL; in run_cmd()
5403 return -EINVAL; in run_cmd()
5420 case '-': in run_cmd()
5430 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5438 int err = -EINVAL; in lru_gen_seq_write()
5443 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
5449 return -ENOMEM; in lru_gen_seq_write()
5453 return -EFAULT; in lru_gen_seq_write()
5460 err = -ENOMEM; in lru_gen_seq_write()
5474 unsigned int swappiness = -1; in lru_gen_seq_write()
5475 unsigned long opt = -1; in lru_gen_seq_write()
5484 err = -EINVAL; in lru_gen_seq_write()
5531 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5535 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5542 int gen, type, zone; in lru_gen_init_lruvec() local
5543 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5546 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
5547 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
5550 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
5552 for_each_gen_type_zone(gen, type, zone) in lru_gen_init_lruvec()
5553 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
5556 mm_state->seq = MIN_NR_GENS; in lru_gen_init_lruvec()
5568 INIT_LIST_HEAD(&mm_list->fifo); in lru_gen_init_memcg()
5569 spin_lock_init(&mm_list->lock); in lru_gen_init_memcg()
5578 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); in lru_gen_exit_memcg()
5584 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5585 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5587 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
5593 bitmap_free(mm_state->filters[i]); in lru_gen_exit_memcg()
5594 mm_state->filters[i] = NULL; in lru_gen_exit_memcg()
5642 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
5668 sc->priority == DEF_PRIORITY); in shrink_lruvec()
5679 nr[lru] -= nr_to_scan; in shrink_lruvec()
5731 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5732 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5733 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5736 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5737 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5738 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5741 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
5756 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && in in_reclaim_compaction()
5757 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
5758 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
5765 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5766 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5789 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
5791 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
5797 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
5798 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
5803 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in should_continue_reclaim()
5804 sc->reclaim_idx, 0)) in should_continue_reclaim()
5807 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) in should_continue_reclaim()
5815 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
5817 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5825 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
5835 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
5836 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
5857 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
5858 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
5864 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
5865 scanned = sc->nr_scanned; in shrink_node_memcgs()
5869 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
5870 sc->priority); in shrink_node_memcgs()
5873 if (!sc->proactive) in shrink_node_memcgs()
5874 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
5875 sc->nr_scanned - scanned, in shrink_node_memcgs()
5876 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
5892 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
5895 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
5897 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
5898 nr_scanned = sc->nr_scanned; in shrink_node()
5906 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
5909 if (!sc->proactive) in shrink_node()
5910 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
5911 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
5919 * it implies that the long-lived page allocation rate in shrink_node()
5934 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
5935 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
5938 if (sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
5939 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
5948 if (sc->nr.immediate) in shrink_node()
5959 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
5961 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
5964 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
5974 !sc->hibernation_mode && in shrink_node()
5975 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
5976 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
5989 pgdat->kswapd_failures = 0; in shrink_node()
5993 * Returns true if compaction should go ahead for a costly-order request, or
6001 if (!gfp_compaction_allowed(sc->gfp_mask)) in compaction_ready()
6005 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6006 sc->reclaim_idx, 0)) in compaction_ready()
6010 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) in compaction_ready()
6022 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6024 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6033 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6036 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6053 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6058 * This is the direct reclaim path, for page-allocating processes. We only
6080 orig_mask = sc->gfp_mask; in shrink_zones()
6082 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6083 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6087 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6101 * non-zero order, only frequent costly order in shrink_zones()
6107 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6109 sc->compaction_ready = true; in shrink_zones()
6119 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6129 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6130 sc->order, sc->gfp_mask, in shrink_zones()
6132 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6133 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6138 first_pgdat = zone->zone_pgdat; in shrink_zones()
6141 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6143 last_pgdat = zone->zone_pgdat; in shrink_zones()
6144 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6154 sc->gfp_mask = orig_mask; in shrink_zones()
6167 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6169 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6179 * high - the zone may be full of dirty or under-writeback pages, which this
6191 int initial_priority = sc->priority; in do_try_to_free_pages()
6199 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6202 if (!sc->proactive) in do_try_to_free_pages()
6203 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6204 sc->priority); in do_try_to_free_pages()
6205 sc->nr_scanned = 0; in do_try_to_free_pages()
6208 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6211 if (sc->compaction_ready) in do_try_to_free_pages()
6218 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6219 sc->may_writepage = 1; in do_try_to_free_pages()
6220 } while (--sc->priority >= 0); in do_try_to_free_pages()
6223 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6224 sc->nodemask) { in do_try_to_free_pages()
6225 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6227 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6229 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6234 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6235 zone->zone_pgdat); in do_try_to_free_pages()
6236 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6242 if (sc->nr_reclaimed) in do_try_to_free_pages()
6243 return sc->nr_reclaimed; in do_try_to_free_pages()
6246 if (sc->compaction_ready) in do_try_to_free_pages()
6258 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6259 sc->priority = initial_priority; in do_try_to_free_pages()
6260 sc->force_deactivate = 1; in do_try_to_free_pages()
6261 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6266 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6267 sc->priority = initial_priority; in do_try_to_free_pages()
6268 sc->force_deactivate = 0; in do_try_to_free_pages()
6269 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6270 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6285 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6289 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6307 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6308 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6309 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6311 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6340 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
6370 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6386 * transaction in the case of a filesystem like ext[3|4]. In this case, in throttle_direct_reclaim()
6392 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6396 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6463 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
6467 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
6502 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
6563 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
6569 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
6570 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6574 if (zone->watermark_boost) in pgdat_watermark_boosted()
6588 unsigned long mark = -1; in pgdat_balanced()
6592 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
6596 zone = pgdat->node_zones + i; in pgdat_balanced()
6611 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
6614 if (mark == -1) in pgdat_balanced()
6625 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6626 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6627 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6628 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6653 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6654 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6657 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6683 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
6684 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
6685 zone = pgdat->node_zones + z; in kswapd_shrink_node()
6689 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
6700 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
6701 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
6702 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
6705 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
6706 sc->order = 0; in kswapd_shrink_node()
6708 return sc->nr_scanned >= sc->nr_to_reclaim; in kswapd_shrink_node()
6719 zone = pgdat->node_zones + i; in update_reclaim_active()
6725 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6727 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6750 * kswapd scans the zones in the highmem->normal->dma direction. It skips
6785 zone = pgdat->node_zones + i; in balance_pgdat()
6789 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
6790 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
6808 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
6809 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
6816 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
6817 zone = pgdat->node_zones + i; in balance_pgdat()
6831 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
6848 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
6853 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
6871 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
6894 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
6896 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
6909 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
6910 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
6921 sc.priority--; in balance_pgdat()
6925 pgdat->kswapd_failures++; in balance_pgdat()
6939 zone = pgdat->node_zones + i; in balance_pgdat()
6940 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
6941 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
6942 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
6967 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
6976 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
6990 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7022 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7026 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7027 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7030 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7031 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7040 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7047 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7062 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7075 * If there are applications that are active memory-allocators
7081 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
7101 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7104 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7105 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7106 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7110 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7119 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7122 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7123 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7137 * Reclaim begins at the requested order but if a high-order in kswapd()
7139 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7144 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7152 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7158 * A zone is low on free memory or too fragmented for high-order memory. If
7176 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7177 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7180 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7182 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7183 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7185 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7189 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7194 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7204 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7206 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7211 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7223 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7249 * This kswapd start function will be called by init and node-hot-add.
7256 if (!pgdat->kswapd) { in kswapd_run()
7257 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
7258 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7261 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7263 pgdat->kswapd = NULL; in kswapd_run()
7279 kswapd = pgdat->kswapd; in kswapd_stop()
7282 pgdat->kswapd = NULL; in kswapd_stop()
7303 * If non-zero call node_reclaim when the number of free pages falls below
7338 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
7366 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
7390 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
7403 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7404 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7411 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
7439 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7441 pgdat->min_slab_pages) in node_reclaim()
7447 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
7456 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7459 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7463 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
7473 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7488 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
7489 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()