Lines Matching +full:oc +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
28 #include <linux/cgroup-defs.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
70 #include "memcontrol-v1.h"
105 (current->flags & PF_EXITING); in task_is_dying()
113 return &memcg->vmpressure; in memcg_to_vmpressure()
144 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
148 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
153 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
155 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
157 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
162 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
163 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
170 list_del(&objcg->list); in obj_cgroup_release()
186 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
192 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
201 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
206 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
208 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
209 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
211 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
215 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
248 return &memcg->css; in mem_cgroup_css_from_folio()
252 * page_cgroup_ino - return inode number of the memcg a page is charged to
273 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
276 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
372 /* Non-hierarchical (CPU aggregated) state */
393 x = READ_ONCE(pn->lruvec_stats->state[i]); in lruvec_page_state()
416 x = READ_ONCE(pn->lruvec_stats->state_local[i]); in lruvec_page_state_local()
514 /* Non-hierarchical (CPU aggregated) page state & events */
572 return atomic64_read(&vmstats->stats_updates) > in memcg_vmstats_needs_flush()
585 cgroup_rstat_updated(memcg->css.cgroup, cpu); in memcg_rstat_updated()
586 statc = this_cpu_ptr(memcg->vmstats_percpu); in memcg_rstat_updated()
587 for (; statc; statc = statc->parent) { in memcg_rstat_updated()
588 stats_updates = READ_ONCE(statc->stats_updates) + abs(val); in memcg_rstat_updated()
589 WRITE_ONCE(statc->stats_updates, stats_updates); in memcg_rstat_updated()
594 * If @memcg is already flush-able, increasing stats_updates is in memcg_rstat_updated()
597 if (!memcg_vmstats_needs_flush(statc->vmstats)) in memcg_rstat_updated()
599 &statc->vmstats->stats_updates); in memcg_rstat_updated()
600 WRITE_ONCE(statc->stats_updates, 0); in memcg_rstat_updated()
606 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); in __mem_cgroup_flush_stats()
608 trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates), in __mem_cgroup_flush_stats()
617 cgroup_rstat_flush(memcg->css.cgroup); in __mem_cgroup_flush_stats()
621 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
651 * in latency-sensitive paths is as cheap as possible. in flush_memcg_stats_dwork()
665 x = READ_ONCE(memcg->vmstats->state[i]); in memcg_page_state()
677 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
690 * __mod_memcg_state - update cgroup memory statistics
692 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
706 __this_cpu_add(memcg->vmstats_percpu->state[i], val); in __mod_memcg_state()
722 x = READ_ONCE(memcg->vmstats->state_local[i]); in memcg_page_state_local()
743 memcg = pn->memcg; in __mod_memcg_lruvec_state()
747 * update their counter from in-interrupt context. For these two in __mod_memcg_lruvec_state()
765 __this_cpu_add(memcg->vmstats_percpu->state[i], val); in __mod_memcg_lruvec_state()
768 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val); in __mod_memcg_lruvec_state()
777 * __mod_lruvec_state - update lruvec memory statistics
784 * change of state at this level: per-node, per-cgroup, per-lruvec.
831 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
844 * __count_memcg_events - account VM events in a cgroup
861 __this_cpu_add(memcg->vmstats_percpu->events[i], count); in __count_memcg_events()
874 return READ_ONCE(memcg->vmstats->events[i]); in memcg_events()
885 return READ_ONCE(memcg->vmstats->events_local[i]); in memcg_events_local()
892 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
908 return current->active_memcg; in active_memcg()
915 * Obtain a reference on mm->memcg and returns it if successful. If mm
918 * 2) current->mm->memcg, if available
942 css_get(&memcg->css); in get_mem_cgroup_from_mm()
945 mm = current->mm; in get_mem_cgroup_from_mm()
952 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
955 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
962 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
974 if (!css_tryget(&memcg->css)) { in get_mem_cgroup_from_current()
983 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
994 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_mem_cgroup_from_folio()
1001 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1007 * @root itself, or %NULL after a full round-trip.
1011 * to cancel a hierarchy walk before the round-trip is complete.
1038 int nid = reclaim->pgdat->node_id; in mem_cgroup_iter()
1040 iter = &root->nodeinfo[nid]->iter; in mem_cgroup_iter()
1041 gen = atomic_read(&iter->generation); in mem_cgroup_iter()
1048 reclaim->generation = gen; in mem_cgroup_iter()
1049 else if (reclaim->generation != gen) in mem_cgroup_iter()
1052 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1056 css = pos ? &pos->css : NULL; in mem_cgroup_iter()
1058 while ((css = css_next_descendant_pre(css, &root->css))) { in mem_cgroup_iter()
1064 if (css == &root->css || css_tryget(css)) in mem_cgroup_iter()
1076 if (cmpxchg(&iter->position, pos, next) != pos) { in mem_cgroup_iter()
1077 if (css && css != &root->css) in mem_cgroup_iter()
1083 atomic_inc(&iter->generation); in mem_cgroup_iter()
1088 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1099 css_put(&prev->css); in mem_cgroup_iter()
1105 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1115 css_put(&prev->css); in mem_cgroup_iter_break()
1126 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1127 iter = &mz->iter; in __invalidate_reclaim_iterators()
1128 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1143 * When cgroup1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1154 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1160 * descendants and calls @fn for each task. If @fn returns a non-zero
1178 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1210 * folio_lruvec_lock - Lock the lruvec for a folio.
1214 * - folio locked
1215 * - folio_test_lru false
1216 * - folio frozen (refcount of 0)
1224 spin_lock(&lruvec->lru_lock); in folio_lruvec_lock()
1231 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1235 * - folio locked
1236 * - folio_test_lru false
1237 * - folio frozen (refcount of 0)
1246 spin_lock_irq(&lruvec->lru_lock); in folio_lruvec_lock_irq()
1253 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1258 * - folio locked
1259 * - folio_test_lru false
1260 * - folio frozen (refcount of 0)
1270 spin_lock_irqsave(&lruvec->lru_lock, *flags); in folio_lruvec_lock_irqsave()
1277 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1297 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1315 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1327 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1328 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1330 margin = limit - count; in mem_cgroup_margin()
1333 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1334 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1336 margin = min(margin, limit - count); in mem_cgroup_margin()
1486 * 1) generic big picture -> specifics and details in memcg_stat_format()
1487 * 2) reflecting userspace activity -> reflecting kernel heuristics in memcg_stat_format()
1560 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1585 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); in mem_cgroup_print_oom_meminfo()
1587 memory_failcnt = memcg->memory.failcnt; in mem_cgroup_print_oom_meminfo()
1590 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1591 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); in mem_cgroup_print_oom_meminfo()
1594 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1595 K((u64)READ_ONCE(memcg->swap.max)), in mem_cgroup_print_oom_meminfo()
1596 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in mem_cgroup_print_oom_meminfo()
1600 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1601 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1603 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1604 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1609 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1621 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1626 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1632 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1640 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1646 struct oom_control oc = { in mem_cgroup_out_of_memory() local
1665 ret = task_is_dying() || out_of_memory(&oc); in mem_cgroup_out_of_memory()
1696 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1698 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1701 * by killing all belonging OOM-killable tasks.
1703 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1734 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
1737 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
1745 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
1755 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1812 stock_pages = READ_ONCE(stock->nr_pages); in consume_stock()
1813 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { in consume_stock()
1814 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages); in consume_stock()
1828 unsigned int stock_pages = READ_ONCE(stock->nr_pages); in drain_stock()
1829 struct mem_cgroup *old = READ_ONCE(stock->cached); in drain_stock()
1835 page_counter_uncharge(&old->memory, stock_pages); in drain_stock()
1837 page_counter_uncharge(&old->memsw, stock_pages); in drain_stock()
1839 WRITE_ONCE(stock->nr_pages, 0); in drain_stock()
1842 css_put(&old->css); in drain_stock()
1843 WRITE_ONCE(stock->cached, NULL); in drain_stock()
1862 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
1878 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ in __refill_stock()
1880 css_get(&memcg->css); in __refill_stock()
1881 WRITE_ONCE(stock->cached, memcg); in __refill_stock()
1883 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages; in __refill_stock()
1884 WRITE_ONCE(stock->nr_pages, stock_pages); in __refill_stock()
1901 page_counter_uncharge(&memcg->memory, nr_pages); in refill_stock()
1903 page_counter_uncharge(&memcg->memsw, nr_pages); in refill_stock()
1911 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1922 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
1925 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
1935 memcg = READ_ONCE(stock->cached); in drain_all_stock()
1936 if (memcg && READ_ONCE(stock->nr_pages) && in drain_all_stock()
1944 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
1946 drain_local_stock(&stock->work); in drain_all_stock()
1948 schedule_work_on(cpu, &stock->work); in drain_all_stock()
1983 if (page_counter_read(&memcg->memory) <= in reclaim_high()
1984 READ_ONCE(memcg->memory.high)) in reclaim_high()
2017 * When calculating the delay, we use these either side of the exponentiation to
2021 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2022 * overage ratio to a delay.
2023 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2025 * to produce a reasonable delay curve.
2028 * reasonable delay curve compared to precision-adjusted overage, not
2033 * +-------+------------------------+
2035 * +-------+------------------------+
2057 * +-------+------------------------+
2075 overage = usage - high; in calculate_overage()
2085 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2086 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2099 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2100 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2128 * its crazy behaviour, so we exponentially increase the delay based on in calculate_high_delay()
2137 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2138 * 4N-sized allocation. in calculate_high_delay()
2156 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2164 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2165 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2203 * Clamp the max delay per usermode return so as to still keep the in mem_cgroup_handle_over_high()
2210 * Don't sleep if the amount of jiffies this memcg owes us is so low in mem_cgroup_handle_over_high()
2223 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2234 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2241 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2267 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2268 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2271 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2289 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2329 if (nr_retries--) in try_charge_memcg()
2358 return -ENOMEM; in try_charge_memcg()
2372 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2374 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2380 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2394 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2395 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2396 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2397 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2402 schedule_work(&memcg->high_work); in try_charge_memcg()
2414 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2415 * and distribute reclaim work and delay penalties in try_charge_memcg()
2418 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2431 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && in try_charge_memcg()
2432 !(current->flags & PF_MEMALLOC) && in try_charge_memcg()
2453 * - the page lock in commit_charge()
2454 * - LRU isolation in commit_charge()
2455 * - exclusive reference in commit_charge()
2457 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2478 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj_folio()
2480 * slab->obj_exts. in mem_cgroup_from_obj_folio()
2492 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
2502 * slab->obj_exts has not been freed yet in mem_cgroup_from_obj_folio()
2531 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
2546 old = xchg(&current->objcg, NULL); in current_objcg_update()
2556 if (!current->mm || (current->flags & PF_KTHREAD)) in current_objcg_update()
2585 } while (!try_cmpxchg(&current->objcg, &old, objcg)); in current_objcg_update()
2596 memcg = current->active_memcg; in current_obj_cgroup()
2600 objcg = READ_ONCE(current->objcg); in current_obj_cgroup()
2625 objcg = rcu_dereference_check(memcg->objcg, 1); in current_obj_cgroup()
2669 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_uncharge_pages()
2670 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
2674 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
2700 css_put(&memcg->css); in obj_cgroup_charge_pages()
2707 unsigned long memcg_data = page->memcg_data; in page_objcg()
2714 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); in page_objcg()
2719 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; in page_set_objcg()
2761 page->memcg_data = 0; in __memcg_kmem_uncharge_page()
2773 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in replace_stock_objcg()
2774 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in replace_stock_objcg()
2775 WRITE_ONCE(stock->cached_objcg, objcg); in replace_stock_objcg()
2795 if (READ_ONCE(stock->cached_objcg) != objcg) { in mod_objcg_state()
2797 stock->cached_pgdat = pgdat; in mod_objcg_state()
2798 } else if (stock->cached_pgdat != pgdat) { in mod_objcg_state()
2800 struct pglist_data *oldpg = stock->cached_pgdat; in mod_objcg_state()
2802 if (stock->nr_slab_reclaimable_b) { in mod_objcg_state()
2804 stock->nr_slab_reclaimable_b); in mod_objcg_state()
2805 stock->nr_slab_reclaimable_b = 0; in mod_objcg_state()
2807 if (stock->nr_slab_unreclaimable_b) { in mod_objcg_state()
2809 stock->nr_slab_unreclaimable_b); in mod_objcg_state()
2810 stock->nr_slab_unreclaimable_b = 0; in mod_objcg_state()
2812 stock->cached_pgdat = pgdat; in mod_objcg_state()
2815 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in mod_objcg_state()
2816 : &stock->nr_slab_unreclaimable_b; in mod_objcg_state()
2849 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
2850 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
2861 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); in drain_obj_stock()
2866 if (stock->nr_bytes) { in drain_obj_stock()
2867 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
2868 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
2875 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in drain_obj_stock()
2876 memcg1_account_kmem(memcg, -nr_pages); in drain_obj_stock()
2879 css_put(&memcg->css); in drain_obj_stock()
2883 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
2885 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
2888 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
2892 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
2893 stock->nr_bytes = 0; in drain_obj_stock()
2899 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
2900 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
2901 __mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
2903 stock->nr_slab_reclaimable_b); in drain_obj_stock()
2904 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
2906 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
2907 __mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
2909 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
2910 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
2912 stock->cached_pgdat = NULL; in drain_obj_stock()
2915 WRITE_ONCE(stock->cached_objcg, NULL); in drain_obj_stock()
2926 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); in obj_stock_flush_required()
2949 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ in refill_obj_stock()
2953 stock->nr_bytes += nr_bytes; in refill_obj_stock()
2955 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
2956 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
2957 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
2976 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge()
2977 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
2978 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
2979 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge()
2980 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge()
2984 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge()
2986 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge()
2988 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge()
2990 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge()
2993 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge()
2994 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge()
2995 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge()
2999 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3006 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); in obj_cgroup_charge()
3022 return s->size + sizeof(struct obj_cgroup *); in obj_full_size()
3059 css_put(&memcg->css); in __memcg_slab_post_alloc_hook()
3102 -obj_full_size(s)); in __memcg_slab_free_hook()
3122 obj_cgroup_get_many(objcg, nr - 1); in split_page_memcg()
3133 new_refs = (1 << (old_order - new_order)) - 1; in folio_split_memcg_refs()
3134 css_get_many(&__folio_memcg(folio)->css, new_refs); in folio_split_memcg_refs()
3149 val += total_swap_pages - get_nr_swap_pages(); in mem_cgroup_usage()
3152 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3154 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3171 return -ENOMEM; in memcg_online_kmem()
3173 objcg->memcg = memcg; in memcg_online_kmem()
3174 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3176 memcg->orig_objcg = objcg; in memcg_online_kmem()
3180 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3214 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
3219 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
3224 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
3229 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
3231 if (!memcg->css.parent) in mem_cgroup_wb_domain()
3234 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
3238 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3246 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3249 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3259 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
3271 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
3272 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
3273 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
3275 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
3284 * tracks ownership per-page while the latter per-inode. This was a
3285 * deliberate design decision because honoring per-page ownership in the
3287 * and deemed unnecessary given that write-sharing an inode across
3288 * different cgroups isn't a common use-case.
3290 * Combined with inode majority-writer ownership switching, this works well
3311 * page - a page whose memcg and writeback ownerships don't match - is
3317 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3331 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
3342 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
3343 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
3344 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
3346 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
3347 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
3349 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
3355 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
3357 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
3365 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
3366 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
3369 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
3370 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
3371 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
3372 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
3379 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
3385 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
3393 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
3394 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
3395 frn->at = 0; in mem_cgroup_flush_foreign()
3396 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
3397 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
3399 &frn->done); in mem_cgroup_flush_foreign()
3424 * Swap-out records and page cache shadow entries need to store memcg
3427 * memory-controlled cgroups to 64k.
3434 * even when there are much fewer than 64k cgroups - possibly none.
3436 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3445 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3450 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
3451 xa_erase(&mem_cgroup_ids, memcg->id.id); in mem_cgroup_id_remove()
3452 memcg->id.id = 0; in mem_cgroup_id_remove()
3459 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
3464 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
3468 css_put(&memcg->css); in mem_cgroup_id_put_many()
3479 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
3496 * mem_cgroup_from_id - look up a memcg from a memcg id
3522 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
3535 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
3536 kfree(pn->lruvec_stats); in free_mem_cgroup_per_node_info()
3548 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), in alloc_mem_cgroup_per_node_info()
3550 if (!pn->lruvec_stats) in alloc_mem_cgroup_per_node_info()
3553 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
3555 if (!pn->lruvec_stats_percpu) in alloc_mem_cgroup_per_node_info()
3558 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
3559 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
3561 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
3572 obj_cgroup_put(memcg->orig_objcg); in __mem_cgroup_free()
3575 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); in __mem_cgroup_free()
3577 kfree(memcg->vmstats); in __mem_cgroup_free()
3578 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
3599 return ERR_PTR(-ENOMEM); in mem_cgroup_alloc()
3601 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, in mem_cgroup_alloc()
3605 error = -ENOMEM; in mem_cgroup_alloc()
3607 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), in mem_cgroup_alloc()
3609 if (!memcg->vmstats) in mem_cgroup_alloc()
3612 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
3614 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
3622 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); in mem_cgroup_alloc()
3623 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_alloc()
3624 statc->parent = parent ? pstatc : NULL; in mem_cgroup_alloc()
3625 statc->vmstats = memcg->vmstats; in mem_cgroup_alloc()
3635 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
3636 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
3637 INIT_LIST_HEAD(&memcg->memory_peaks); in mem_cgroup_alloc()
3638 INIT_LIST_HEAD(&memcg->swap_peaks); in mem_cgroup_alloc()
3639 spin_lock_init(&memcg->peaks_lock); in mem_cgroup_alloc()
3640 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
3642 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
3643 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
3645 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
3647 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
3651 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
3652 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
3653 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
3676 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3679 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
3680 WRITE_ONCE(memcg->zswap_writeback, true); in mem_cgroup_css_alloc()
3682 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3684 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
3686 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); in mem_cgroup_css_alloc()
3687 page_counter_init(&memcg->swap, &parent->swap, false); in mem_cgroup_css_alloc()
3689 memcg->memory.track_failcnt = !memcg_on_dfl; in mem_cgroup_css_alloc()
3690 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
3691 page_counter_init(&memcg->kmem, &parent->kmem, false); in mem_cgroup_css_alloc()
3692 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); in mem_cgroup_css_alloc()
3697 page_counter_init(&memcg->memory, NULL, true); in mem_cgroup_css_alloc()
3698 page_counter_init(&memcg->swap, NULL, false); in mem_cgroup_css_alloc()
3700 page_counter_init(&memcg->kmem, NULL, false); in mem_cgroup_css_alloc()
3701 page_counter_init(&memcg->tcpmem, NULL, false); in mem_cgroup_css_alloc()
3704 return &memcg->css; in mem_cgroup_css_alloc()
3713 return &memcg->css; in mem_cgroup_css_alloc()
3737 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
3750 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); in mem_cgroup_css_online()
3757 return -ENOMEM; in mem_cgroup_css_online()
3766 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
3767 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
3796 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
3807 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
3808 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
3815 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3831 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3832 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3834 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3835 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3837 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
3838 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
3839 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3841 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3848 /* pointer to the non-hierarchichal (CPU aggregated) counters */
3867 for (i = 0; i < ac->size; i++) { in mem_cgroup_stat_aggregate()
3870 * below us. We're in a per-cpu loop here and this is in mem_cgroup_stat_aggregate()
3873 delta = ac->pending[i]; in mem_cgroup_stat_aggregate()
3875 ac->pending[i] = 0; in mem_cgroup_stat_aggregate()
3879 v = READ_ONCE(ac->cstat[i]); in mem_cgroup_stat_aggregate()
3880 if (v != ac->cstat_prev[i]) { in mem_cgroup_stat_aggregate()
3881 delta_cpu = v - ac->cstat_prev[i]; in mem_cgroup_stat_aggregate()
3883 ac->cstat_prev[i] = v; in mem_cgroup_stat_aggregate()
3888 ac->local[i] += delta_cpu; in mem_cgroup_stat_aggregate()
3891 ac->aggregate[i] += delta; in mem_cgroup_stat_aggregate()
3892 if (ac->ppending) in mem_cgroup_stat_aggregate()
3893 ac->ppending[i] += delta; in mem_cgroup_stat_aggregate()
3906 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
3909 .aggregate = memcg->vmstats->state, in mem_cgroup_css_rstat_flush()
3910 .local = memcg->vmstats->state_local, in mem_cgroup_css_rstat_flush()
3911 .pending = memcg->vmstats->state_pending, in mem_cgroup_css_rstat_flush()
3912 .ppending = parent ? parent->vmstats->state_pending : NULL, in mem_cgroup_css_rstat_flush()
3913 .cstat = statc->state, in mem_cgroup_css_rstat_flush()
3914 .cstat_prev = statc->state_prev, in mem_cgroup_css_rstat_flush()
3920 .aggregate = memcg->vmstats->events, in mem_cgroup_css_rstat_flush()
3921 .local = memcg->vmstats->events_local, in mem_cgroup_css_rstat_flush()
3922 .pending = memcg->vmstats->events_pending, in mem_cgroup_css_rstat_flush()
3923 .ppending = parent ? parent->vmstats->events_pending : NULL, in mem_cgroup_css_rstat_flush()
3924 .cstat = statc->events, in mem_cgroup_css_rstat_flush()
3925 .cstat_prev = statc->events_prev, in mem_cgroup_css_rstat_flush()
3931 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
3932 struct lruvec_stats *lstats = pn->lruvec_stats; in mem_cgroup_css_rstat_flush()
3937 plstats = parent->nodeinfo[nid]->lruvec_stats; in mem_cgroup_css_rstat_flush()
3939 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
3942 .aggregate = lstats->state, in mem_cgroup_css_rstat_flush()
3943 .local = lstats->state_local, in mem_cgroup_css_rstat_flush()
3944 .pending = lstats->state_pending, in mem_cgroup_css_rstat_flush()
3945 .ppending = plstats ? plstats->state_pending : NULL, in mem_cgroup_css_rstat_flush()
3946 .cstat = lstatc->state, in mem_cgroup_css_rstat_flush()
3947 .cstat_prev = lstatc->state_prev, in mem_cgroup_css_rstat_flush()
3953 WRITE_ONCE(statc->stats_updates, 0); in mem_cgroup_css_rstat_flush()
3954 /* We are in a per-cpu loop here, only do the atomic write once */ in mem_cgroup_css_rstat_flush()
3955 if (atomic64_read(&memcg->vmstats->stats_updates)) in mem_cgroup_css_rstat_flush()
3956 atomic64_set(&memcg->vmstats->stats_updates, 0); in mem_cgroup_css_rstat_flush()
3962 * Set the update flag to cause task->objcg to be initialized lazily in mem_cgroup_fork()
3967 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; in mem_cgroup_fork()
3972 struct obj_cgroup *objcg = task->objcg; in mem_cgroup_exit()
3984 task->objcg = NULL; in mem_cgroup_exit()
4001 if (task->mm && READ_ONCE(task->mm->owner) == task) in mem_cgroup_lru_gen_attach()
4002 lru_gen_migrate_mm(task->mm); in mem_cgroup_lru_gen_attach()
4016 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); in mem_cgroup_kmem_attach()
4041 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
4044 #define OFP_PEAK_UNSET (((-1UL)))
4048 struct cgroup_of_peak *ofp = of_peak(sf->private); in peak_show()
4049 u64 fd_peak = READ_ONCE(ofp->value), peak; in peak_show()
4053 peak = pc->watermark; in peak_show()
4055 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); in peak_show()
4065 return peak_show(sf, v, &memcg->memory); in memory_peak_show()
4072 ofp->value = OFP_PEAK_UNSET; in peak_open()
4081 if (ofp->value == OFP_PEAK_UNSET) { in peak_release()
4085 spin_lock(&memcg->peaks_lock); in peak_release()
4086 list_del(&ofp->list); in peak_release()
4087 spin_unlock(&memcg->peaks_lock); in peak_release()
4099 spin_lock(&memcg->peaks_lock); in peak_write()
4102 WRITE_ONCE(pc->local_watermark, usage); in peak_write()
4105 if (usage > peer_ctx->value) in peak_write()
4106 WRITE_ONCE(peer_ctx->value, usage); in peak_write()
4109 if (ofp->value == OFP_PEAK_UNSET) in peak_write()
4110 list_add(&ofp->list, watchers); in peak_write()
4112 WRITE_ONCE(ofp->value, usage); in peak_write()
4113 spin_unlock(&memcg->peaks_lock); in peak_write()
4123 return peak_write(of, buf, nbytes, off, &memcg->memory, in memory_peak_write()
4124 &memcg->memory_peaks); in memory_peak_write()
4132 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
4147 page_counter_set_min(&memcg->memory, min); in memory_min_write()
4155 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
4170 page_counter_set_low(&memcg->memory, low); in memory_low_write()
4178 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
4195 page_counter_set_high(&memcg->memory, high); in memory_high_write()
4198 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
4213 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
4216 if (!reclaimed && !nr_retries--) in memory_high_write()
4227 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
4244 xchg(&memcg->memory.max, max); in memory_max_write()
4247 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
4262 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
4264 nr_reclaims--; in memory_max_write()
4298 __memory_events_show(m, memcg->memory_events); in memory_events_show()
4306 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
4317 return -ENOMEM; in memory_stat_show()
4367 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
4380 return -EINVAL; in memory_oom_group_write()
4387 return -EINVAL; in memory_oom_group_write()
4389 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
4410 int swappiness = -1; in memory_reclaim()
4420 return -EINVAL; in memory_reclaim()
4430 return -EINVAL; in memory_reclaim()
4432 return -EINVAL; in memory_reclaim()
4435 return -EINVAL; in memory_reclaim()
4442 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4; in memory_reclaim()
4446 return -EINTR; in memory_reclaim()
4459 swappiness == -1 ? NULL : &swappiness); in memory_reclaim()
4461 if (!reclaimed && !nr_retries--) in memory_reclaim()
4462 return -EAGAIN; in memory_reclaim()
4563 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4564 * @root: the top ancestor of the sub-tree being checked
4568 * of a top-down tree iteration, not for isolated queries.
4582 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); in mem_cgroup_calculate_protection()
4594 css_get(&memcg->css); in charge_memcg()
4608 css_put(&memcg->css); in __mem_cgroup_charge()
4614 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4632 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip in mem_cgroup_charge_hugetlb()
4640 ret = -ENOMEM; in mem_cgroup_charge_hugetlb()
4648 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4672 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
4678 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
4697 if (ug->nr_memory) { in uncharge_batch()
4698 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
4700 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
4701 if (ug->nr_kmem) { in uncharge_batch()
4702 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); in uncharge_batch()
4703 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
4705 memcg1_oom_recover(ug->memcg); in uncharge_batch()
4708 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); in uncharge_batch()
4711 css_put(&ug->memcg->css); in uncharge_batch()
4741 if (ug->memcg != memcg) { in uncharge_folio()
4742 if (ug->memcg) { in uncharge_folio()
4746 ug->memcg = memcg; in uncharge_folio()
4747 ug->nid = folio_nid(folio); in uncharge_folio()
4750 css_get(&memcg->css); in uncharge_folio()
4756 ug->nr_memory += nr_pages; in uncharge_folio()
4757 ug->nr_kmem += nr_pages; in uncharge_folio()
4759 folio->memcg_data = 0; in uncharge_folio()
4764 ug->nr_memory += nr_pages; in uncharge_folio()
4765 ug->pgpgout++; in uncharge_folio()
4768 folio->memcg_data = 0; in uncharge_folio()
4771 css_put(&memcg->css); in uncharge_folio()
4778 /* Don't touch folio->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
4793 for (i = 0; i < folios->nr; i++) in __mem_cgroup_uncharge_folios()
4794 uncharge_folio(folios->folios[i], &ug); in __mem_cgroup_uncharge_folios()
4800 * mem_cgroup_replace_folio - Charge a folio's replacement.
4807 * Both folios must be locked, @new->mapping must be set up.
4831 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_replace_folio()
4833 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_replace_folio()
4835 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_replace_folio()
4838 css_get(&memcg->css); in mem_cgroup_replace_folio()
4844 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4852 * Both folios must be locked, @new->mapping must be set up.
4880 /* Warning should never happen, so don't worry about refcount non-0 */ in mem_cgroup_migrate()
4882 old->memcg_data = 0; in mem_cgroup_migrate()
4905 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
4906 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
4913 if (sk->sk_memcg) in mem_cgroup_sk_free()
4914 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
4918 * mem_cgroup_charge_skmem - charge socket memory
4941 * mem_cgroup_uncharge_skmem - uncharge socket memory
4952 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
4979 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4989 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
4999 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
5008 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5014 * Returns 0 on success, -ENOMEM on failure.
5039 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
5043 return -ENOMEM; in __mem_cgroup_try_charge_swap()
5048 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
5057 * __mem_cgroup_uncharge_swap - uncharge swap space
5072 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
5074 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
5076 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
5090 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
5091 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
5111 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
5113 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
5114 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
5128 "Please report your usecase to linux-mm@kvack.org if you " in setup_swap_account()
5139 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
5146 return peak_show(sf, v, &memcg->swap); in swap_peak_show()
5154 return peak_write(of, buf, nbytes, off, &memcg->swap, in swap_peak_write()
5155 &memcg->swap_peaks); in swap_peak_write()
5161 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
5176 page_counter_set_high(&memcg->swap, high); in swap_high_write()
5184 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
5199 xchg(&memcg->swap.max, max); in swap_max_write()
5209 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
5211 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
5213 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
5255 * obj_cgroup_may_zswap - check if this cgroup can zswap
5262 * once compression has occurred, and this optimistic pre-check avoids
5277 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
5300 * obj_cgroup_charge_zswap - charge compression backend memory
5314 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); in obj_cgroup_charge_zswap()
5328 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5345 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
5346 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
5357 if (!READ_ONCE(memcg->zswap_writeback)) in mem_cgroup_zswap_writeback_enabled()
5375 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); in zswap_max_show()
5390 xchg(&memcg->zswap_max, max); in zswap_max_write()
5399 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); in zswap_writeback_show()
5414 return -EINVAL; in zswap_writeback_write()
5416 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); in zswap_writeback_write()