Lines Matching +full:oc +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
110 * Cgroups above their limits are maintained in a RB-Tree, independent of
207 /* for encoding cft->private value on file */
237 (current->flags & PF_EXITING); in task_is_dying()
245 return &memcg->vmpressure; in memcg_to_vmpressure()
276 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
280 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
285 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
287 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
289 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
294 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
295 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
302 list_del(&objcg->list); in obj_cgroup_release()
318 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
324 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
333 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
338 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
340 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
341 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
343 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
347 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
364 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
381 return &memcg->css; in mem_cgroup_css_from_folio()
385 * page_cgroup_ino - return inode number of the memcg a page is charged to
406 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
409 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
418 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
423 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
426 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
427 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
433 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
434 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
437 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
442 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
444 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
445 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
446 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
452 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
455 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
456 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
458 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
459 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
467 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
469 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
474 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
475 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
479 excess = nr_pages - soft_limit; in soft_limit_excess()
504 mz = memcg->nodeinfo[nid]; in mem_cgroup_update_tree()
507 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
510 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
513 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
514 /* if on-tree, remove it */ in mem_cgroup_update_tree()
515 if (mz->on_tree) in mem_cgroup_update_tree()
518 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
522 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
534 mz = memcg->nodeinfo[nid]; in mem_cgroup_remove_from_trees()
548 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
551 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
559 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
560 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
571 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
573 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
620 return mem_cgroup_events_index[idx] - 1; in memcg_events_index()
651 /* Non-hierarchical (CPU aggregated) page state & events */
709 return atomic64_read(&vmstats->stats_updates) > in memcg_vmstats_needs_flush()
721 cgroup_rstat_updated(memcg->css.cgroup, cpu); in memcg_rstat_updated()
722 statc = this_cpu_ptr(memcg->vmstats_percpu); in memcg_rstat_updated()
723 for (; statc; statc = statc->parent) { in memcg_rstat_updated()
724 statc->stats_updates += abs(val); in memcg_rstat_updated()
725 if (statc->stats_updates < MEMCG_CHARGE_BATCH) in memcg_rstat_updated()
729 * If @memcg is already flush-able, increasing stats_updates is in memcg_rstat_updated()
732 if (!memcg_vmstats_needs_flush(statc->vmstats)) in memcg_rstat_updated()
733 atomic64_add(statc->stats_updates, in memcg_rstat_updated()
734 &statc->vmstats->stats_updates); in memcg_rstat_updated()
735 statc->stats_updates = 0; in memcg_rstat_updated()
744 cgroup_rstat_flush(memcg->css.cgroup); in do_flush_stats()
748 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
764 if (memcg_vmstats_needs_flush(memcg->vmstats)) in mem_cgroup_flush_stats()
779 * in latency-sensitive paths is as cheap as possible. in flush_memcg_stats_dwork()
787 long x = READ_ONCE(memcg->vmstats->state[idx]); in memcg_page_state()
799 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
812 * __mod_memcg_state - update cgroup memory statistics
814 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
822 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_state()
829 long x = READ_ONCE(memcg->vmstats->state_local[idx]); in memcg_page_state_local()
845 memcg = pn->memcg; in __mod_memcg_lruvec_state()
849 * update their counter from in-interrupt context. For these two in __mod_memcg_lruvec_state()
869 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
872 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
879 * __mod_lruvec_state - update lruvec memory statistics
886 * change of state at this level: per-node, per-cgroup, per-lruvec.
933 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
946 * __count_memcg_events - account VM events in a cgroup
960 __this_cpu_add(memcg->vmstats_percpu->events[index], count); in __count_memcg_events()
971 return READ_ONCE(memcg->vmstats->events[index]); in memcg_events()
981 return READ_ONCE(memcg->vmstats->events_local[index]); in memcg_events_local()
992 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
995 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
1003 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
1004 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
1006 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
1017 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
1048 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
1064 return current->active_memcg; in active_memcg()
1071 * Obtain a reference on mm->memcg and returns it if successful. If mm
1074 * 2) current->mm->memcg, if available
1098 css_get(&memcg->css); in get_mem_cgroup_from_mm()
1101 mm = current->mm; in get_mem_cgroup_from_mm()
1108 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
1111 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
1118 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1130 if (!css_tryget(&memcg->css)) { in get_mem_cgroup_from_current()
1139 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1145 * @root itself, or %NULL after a full round-trip.
1149 * to cancel a hierarchy walk before the round-trip is complete.
1175 mz = root->nodeinfo[reclaim->pgdat->node_id]; in mem_cgroup_iter()
1176 iter = &mz->iter; in mem_cgroup_iter()
1183 reclaim->generation = iter->generation; in mem_cgroup_iter()
1184 else if (reclaim->generation != iter->generation) in mem_cgroup_iter()
1188 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1189 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
1192 * css reference reached zero, so iter->position will in mem_cgroup_iter()
1193 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
1194 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
1195 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
1196 * might block it. So we clear iter->position right in mem_cgroup_iter()
1199 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
1206 css = &pos->css; in mem_cgroup_iter()
1209 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
1214 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1227 if (css == &root->css || css_tryget(css)) { in mem_cgroup_iter()
1239 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1242 css_put(&pos->css); in mem_cgroup_iter()
1245 iter->generation++; in mem_cgroup_iter()
1251 css_put(&prev->css); in mem_cgroup_iter()
1257 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1267 css_put(&prev->css); in mem_cgroup_iter_break()
1278 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1279 iter = &mz->iter; in __invalidate_reclaim_iterators()
1280 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1295 * When cgroup1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1306 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1312 * descendants and calls @fn for each task. If @fn returns a non-zero
1330 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1359 * folio_lruvec_lock - Lock the lruvec for a folio.
1363 * - folio locked
1364 * - folio_test_lru false
1365 * - folio_memcg_lock()
1366 * - folio frozen (refcount of 0)
1374 spin_lock(&lruvec->lru_lock); in folio_lruvec_lock()
1381 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1385 * - folio locked
1386 * - folio_test_lru false
1387 * - folio_memcg_lock()
1388 * - folio frozen (refcount of 0)
1397 spin_lock_irq(&lruvec->lru_lock); in folio_lruvec_lock_irq()
1404 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1409 * - folio locked
1410 * - folio_test_lru false
1411 * - folio_memcg_lock()
1412 * - folio frozen (refcount of 0)
1422 spin_lock_irqsave(&lruvec->lru_lock, *flags); in folio_lruvec_lock_irqsave()
1429 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1449 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1467 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1479 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1480 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1482 margin = limit - count; in mem_cgroup_margin()
1485 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1486 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1488 margin = min(margin, limit - count); in mem_cgroup_margin()
1500 * moving cgroups. This is for waiting at high-memory pressure
1650 * 1) generic big picture -> specifics and details in memcg_stat_format()
1651 * 2) reflecting userspace activity -> reflecting kernel heuristics in memcg_stat_format()
1720 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1744 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1745 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1748 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1749 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1752 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1753 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1755 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1756 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1760 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1772 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1777 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1783 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1791 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1797 struct oom_control oc = { in mem_cgroup_out_of_memory() local
1816 ret = task_is_dying() || out_of_memory(&oc); in mem_cgroup_out_of_memory()
1882 * Check OOM-Killer is already running under our hierarchy.
1892 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1901 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1914 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1931 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1941 iter->under_oom++; in mem_cgroup_mark_under_oom()
1955 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1956 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1975 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1986 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1993 if (memcg && memcg->under_oom) in memcg_oom_recover()
2020 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
2028 if (READ_ONCE(memcg->oom_kill_disable)) { in mem_cgroup_oom()
2029 if (current->in_user_fault) { in mem_cgroup_oom()
2030 css_get(&memcg->css); in mem_cgroup_oom()
2031 current->memcg_in_oom = memcg; in mem_cgroup_oom()
2032 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
2033 current->memcg_oom_order = order; in mem_cgroup_oom()
2055 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2073 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
2105 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
2106 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2111 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2113 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2116 * by killing all belonging OOM-killable tasks.
2118 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2149 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
2152 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
2160 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
2170 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2175 * folio_memcg_lock - Bind a folio to its memcg.
2191 * path can get away without acquiring the memcg->move_lock in folio_memcg_lock()
2205 might_lock(&memcg->move_lock); in folio_memcg_lock()
2209 if (atomic_read(&memcg->moving_account) <= 0) in folio_memcg_lock()
2212 spin_lock_irqsave(&memcg->move_lock, flags); in folio_memcg_lock()
2214 spin_unlock_irqrestore(&memcg->move_lock, flags); in folio_memcg_lock()
2220 * critical sections holding the fast-path RCU lock and one in folio_memcg_lock()
2224 memcg->move_lock_task = current; in folio_memcg_lock()
2225 memcg->move_lock_flags = flags; in folio_memcg_lock()
2230 if (memcg && memcg->move_lock_task == current) { in __folio_memcg_unlock()
2231 unsigned long flags = memcg->move_lock_flags; in __folio_memcg_unlock()
2233 memcg->move_lock_task = NULL; in __folio_memcg_unlock()
2234 memcg->move_lock_flags = 0; in __folio_memcg_unlock()
2236 spin_unlock_irqrestore(&memcg->move_lock, flags); in __folio_memcg_unlock()
2243 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2321 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { in consume_stock()
2322 stock->nr_pages -= nr_pages; in consume_stock()
2336 struct mem_cgroup *old = READ_ONCE(stock->cached); in drain_stock()
2341 if (stock->nr_pages) { in drain_stock()
2342 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2344 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2345 stock->nr_pages = 0; in drain_stock()
2348 css_put(&old->css); in drain_stock()
2349 WRITE_ONCE(stock->cached, NULL); in drain_stock()
2368 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2384 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ in __refill_stock()
2386 css_get(&memcg->css); in __refill_stock()
2387 WRITE_ONCE(stock->cached, memcg); in __refill_stock()
2389 stock->nr_pages += nr_pages; in __refill_stock()
2391 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in __refill_stock()
2405 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2416 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2419 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2429 memcg = READ_ONCE(stock->cached); in drain_all_stock()
2430 if (memcg && stock->nr_pages && in drain_all_stock()
2438 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2440 drain_local_stock(&stock->work); in drain_all_stock()
2442 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2468 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2469 READ_ONCE(memcg->memory.high)) in reclaim_high()
2501 * When calculating the delay, we use these either side of the exponentiation to
2505 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506 * overage ratio to a delay.
2507 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2509 * to produce a reasonable delay curve.
2512 * reasonable delay curve compared to precision-adjusted overage, not
2517 * +-------+------------------------+
2519 * +-------+------------------------+
2541 * +-------+------------------------+
2559 overage = usage - high; in calculate_overage()
2569 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2570 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2583 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2584 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2612 * its crazy behaviour, so we exponentially increase the delay based on in calculate_high_delay()
2621 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2622 * 4N-sized allocation. in calculate_high_delay()
2640 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2648 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2649 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2687 * Clamp the max delay per usermode return so as to still keep the in mem_cgroup_handle_over_high()
2694 * Don't sleep if the amount of jiffies this memcg owes us is so low in mem_cgroup_handle_over_high()
2707 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2718 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2725 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2747 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2748 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2751 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2769 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2815 if (nr_retries--) in try_charge_memcg()
2844 return -ENOMEM; in try_charge_memcg()
2858 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2860 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2866 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2880 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2881 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2882 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2883 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2888 schedule_work(&memcg->high_work); in try_charge_memcg()
2900 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2901 * and distribute reclaim work and delay penalties in try_charge_memcg()
2904 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2917 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && in try_charge_memcg()
2918 !(current->flags & PF_MEMALLOC) && in try_charge_memcg()
2934 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2943 page_counter_uncharge(&memcg->memory, nr_pages); in mem_cgroup_cancel_charge()
2945 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_cancel_charge()
2954 * - the page lock in commit_charge()
2955 * - LRU isolation in commit_charge()
2956 * - folio_memcg_lock() in commit_charge()
2957 * - exclusive reference in commit_charge()
2958 * - mem_cgroup_trylock_pages() in commit_charge()
2960 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2964 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2970 css_get(&memcg->css); in mem_cgroup_commit_charge()
3017 return -ENOMEM; in memcg_alloc_slab_cgroups()
3026 slab->memcg_data = memcg_data; in memcg_alloc_slab_cgroups()
3027 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { in memcg_alloc_slab_cgroups()
3045 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj_folio()
3047 * slab->memcg_data. in mem_cgroup_from_obj_folio()
3059 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
3069 * slab->memcg_data has not been freed yet in mem_cgroup_from_obj_folio()
3128 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
3143 old = xchg(&current->objcg, NULL); in current_objcg_update()
3154 if (!current->mm || (current->flags & PF_KTHREAD)) in current_objcg_update()
3183 } while (!try_cmpxchg(&current->objcg, &old, objcg)); in current_objcg_update()
3194 memcg = current->active_memcg; in current_obj_cgroup()
3198 objcg = READ_ONCE(current->objcg); in current_obj_cgroup()
3223 objcg = rcu_dereference_check(memcg->objcg, 1); in current_obj_cgroup()
3260 page_counter_charge(&memcg->kmem, nr_pages); in memcg_account_kmem()
3262 page_counter_uncharge(&memcg->kmem, -nr_pages); in memcg_account_kmem()
3279 memcg_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
3282 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
3307 css_put(&memcg->css); in obj_cgroup_charge_pages()
3330 page->memcg_data = (unsigned long)objcg | in __memcg_kmem_charge_page()
3354 folio->memcg_data = 0; in __memcg_kmem_uncharge_page()
3374 if (READ_ONCE(stock->cached_objcg) != objcg) { in mod_objcg_state()
3377 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in mod_objcg_state()
3378 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in mod_objcg_state()
3379 WRITE_ONCE(stock->cached_objcg, objcg); in mod_objcg_state()
3380 stock->cached_pgdat = pgdat; in mod_objcg_state()
3381 } else if (stock->cached_pgdat != pgdat) { in mod_objcg_state()
3383 struct pglist_data *oldpg = stock->cached_pgdat; in mod_objcg_state()
3385 if (stock->nr_slab_reclaimable_b) { in mod_objcg_state()
3387 stock->nr_slab_reclaimable_b); in mod_objcg_state()
3388 stock->nr_slab_reclaimable_b = 0; in mod_objcg_state()
3390 if (stock->nr_slab_unreclaimable_b) { in mod_objcg_state()
3392 stock->nr_slab_unreclaimable_b); in mod_objcg_state()
3393 stock->nr_slab_unreclaimable_b = 0; in mod_objcg_state()
3395 stock->cached_pgdat = pgdat; in mod_objcg_state()
3398 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in mod_objcg_state()
3399 : &stock->nr_slab_unreclaimable_b; in mod_objcg_state()
3433 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
3434 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
3445 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); in drain_obj_stock()
3450 if (stock->nr_bytes) { in drain_obj_stock()
3451 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
3452 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
3459 memcg_account_kmem(memcg, -nr_pages); in drain_obj_stock()
3462 css_put(&memcg->css); in drain_obj_stock()
3466 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
3468 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
3471 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
3475 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
3476 stock->nr_bytes = 0; in drain_obj_stock()
3482 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3483 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
3484 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3486 stock->nr_slab_reclaimable_b); in drain_obj_stock()
3487 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
3489 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3490 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3492 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
3493 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
3495 stock->cached_pgdat = NULL; in drain_obj_stock()
3498 WRITE_ONCE(stock->cached_objcg, NULL); in drain_obj_stock()
3509 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); in obj_stock_flush_required()
3532 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ in refill_obj_stock()
3535 WRITE_ONCE(stock->cached_objcg, objcg); in refill_obj_stock()
3536 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in refill_obj_stock()
3537 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in refill_obj_stock()
3540 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3542 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
3543 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
3544 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
3564 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge()
3565 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
3566 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
3567 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge()
3568 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge()
3572 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge()
3574 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge()
3576 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge()
3578 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge()
3581 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge()
3582 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge()
3583 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge()
3587 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3594 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); in obj_cgroup_charge()
3619 folio_page(folio, i)->memcg_data = folio->memcg_data; in split_page_memcg()
3622 obj_cgroup_get_many(__folio_objcg(folio), nr - 1); in split_page_memcg()
3624 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3629 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3637 * Returns 0 on success, -EINVAL on failure.
3651 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
3655 return -EINVAL; in mem_cgroup_move_swap_account()
3661 return -EINVAL; in mem_cgroup_move_swap_account()
3674 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3678 ret = -EINTR; in mem_cgroup_resize_max()
3687 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3688 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3691 ret = -EINVAL; in mem_cgroup_resize_max()
3694 if (max > counter->max) in mem_cgroup_resize_max()
3710 ret = -EBUSY; in mem_cgroup_resize_max()
3738 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in mem_cgroup_soft_limit_reclaim()
3745 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
3761 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3764 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3774 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3785 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3786 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3799 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3812 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
3818 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3820 return -EINTR; in mem_cgroup_force_empty()
3824 nr_retries--; in mem_cgroup_force_empty()
3837 return -EINVAL; in mem_cgroup_force_empty_write()
3853 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
3854 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_hierarchy_write()
3857 return -EINVAL; in mem_cgroup_hierarchy_write()
3872 val += total_swap_pages - get_nr_swap_pages(); in mem_cgroup_usage()
3875 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3877 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3896 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3898 counter = &memcg->memory; in mem_cgroup_read_u64()
3901 counter = &memcg->memsw; in mem_cgroup_read_u64()
3904 counter = &memcg->kmem; in mem_cgroup_read_u64()
3907 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3913 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3915 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3917 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3921 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3923 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3925 return counter->failcnt; in mem_cgroup_read_u64()
3927 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; in mem_cgroup_read_u64()
3940 return -EINVAL; in mem_cgroup_dummy_seq_show()
3956 return -ENOMEM; in memcg_online_kmem()
3958 objcg->memcg = memcg; in memcg_online_kmem()
3959 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3961 memcg->orig_objcg = objcg; in memcg_online_kmem()
3965 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3989 * The ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
4010 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
4014 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
4032 memcg->tcpmem_active = true; in memcg_update_tcp_max()
4051 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
4055 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
4058 ret = -EINVAL; in mem_cgroup_write()
4061 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
4071 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
4082 ret = -EOPNOTSUPP; in mem_cgroup_write()
4084 WRITE_ONCE(memcg->soft_limit, nr_pages); in mem_cgroup_write()
4098 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
4100 counter = &memcg->memory; in mem_cgroup_reset()
4103 counter = &memcg->memsw; in mem_cgroup_reset()
4106 counter = &memcg->kmem; in mem_cgroup_reset()
4109 counter = &memcg->tcpmem; in mem_cgroup_reset()
4115 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
4120 counter->failcnt = 0; in mem_cgroup_reset()
4132 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
4142 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_move_charge_write()
4146 return -EINVAL; in mem_cgroup_move_charge_write()
4149 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
4154 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
4161 return -ENOSYS; in mem_cgroup_move_charge_write()
4169 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4229 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
4230 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4235 stat->lru_mask, false)); in memcg_numa_stat_show()
4241 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
4242 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4247 stat->lru_mask, true)); in memcg_numa_stat_show()
4328 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format()
4329 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
4362 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
4364 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format()
4365 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
4387 return -EINVAL; in mem_cgroup_swappiness_write()
4390 WRITE_ONCE(memcg->swappiness, val); in mem_cgroup_swappiness_write()
4405 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4407 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4419 i = t->current_threshold; in __mem_cgroup_threshold()
4427 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
4428 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
4439 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
4440 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
4443 t->current_threshold = i - 1; in __mem_cgroup_threshold()
4464 if (_a->threshold > _b->threshold) in compare_thresholds()
4467 if (_a->threshold < _b->threshold) in compare_thresholds()
4468 return -1; in compare_thresholds()
4479 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4480 eventfd_signal(ev->eventfd); in mem_cgroup_oom_notify_cb()
4503 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
4507 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4510 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4513 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4519 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4522 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
4527 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
4530 new->size = size; in __mem_cgroup_usage_register_event()
4533 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4534 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
4535 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
4538 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
4539 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
4541 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
4542 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
4546 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
4548 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
4550 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
4554 ++new->current_threshold; in __mem_cgroup_usage_register_event()
4560 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
4561 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
4563 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
4569 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4594 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4597 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4600 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4605 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
4613 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4614 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
4620 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
4633 new->size = size; in __mem_cgroup_usage_unregister_event()
4636 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
4637 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4638 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
4641 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
4642 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
4644 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
4648 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
4655 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
4657 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
4664 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
4665 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
4668 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4690 return -ENOMEM; in mem_cgroup_oom_register_event()
4694 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
4695 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4698 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4712 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4713 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
4714 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
4726 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); in mem_cgroup_oom_control_read()
4727 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4729 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4740 return -EINVAL; in mem_cgroup_oom_control_write()
4742 WRITE_ONCE(memcg->oom_kill_disable, val); in mem_cgroup_oom_control_write()
4755 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4760 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4765 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4770 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
4772 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4775 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4779 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4787 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4790 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4800 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
4812 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4813 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4814 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4816 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4825 * tracks ownership per-page while the latter per-inode. This was a
4826 * deliberate design decision because honoring per-page ownership in the
4828 * and deemed unnecessary given that write-sharing an inode across
4829 * different cgroups isn't a common use-case.
4831 * Combined with inode majority-writer ownership switching, this works well
4852 * page - a page whose memcg and writeback ownerships don't match - is
4858 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4872 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
4883 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4884 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
4885 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
4887 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
4888 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
4890 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
4896 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
4898 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
4906 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
4907 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4910 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4911 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
4912 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
4913 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4920 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
4926 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4934 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
4935 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
4936 frn->at = 0; in mem_cgroup_flush_foreign()
4937 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
4938 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
4940 &frn->done); in mem_cgroup_flush_foreign()
4967 * This is way over-engineered. It tries to support fully configurable
4984 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4986 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4988 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4991 eventfd_signal(event->eventfd); in memcg_event_remove()
4993 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4995 css_put(&memcg->css); in memcg_event_remove()
5001 * Called with wqh->lock held and interrupts disabled.
5008 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
5015 * for us. in memcg_event_wake()
5018 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
5021 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
5022 if (!list_empty(&event->list)) { in memcg_event_wake()
5023 list_del_init(&event->list); in memcg_event_wake()
5028 schedule_work(&event->remove); in memcg_event_wake()
5030 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
5042 event->wqh = wqh; in memcg_event_ptable_queue_proc()
5043 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
5070 return -EOPNOTSUPP; in memcg_write_event_control()
5076 return -EINVAL; in memcg_write_event_control()
5081 return -EINVAL; in memcg_write_event_control()
5086 return -ENOMEM; in memcg_write_event_control()
5088 event->memcg = memcg; in memcg_write_event_control()
5089 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
5090 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
5091 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
5092 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
5096 ret = -EBADF; in memcg_write_event_control()
5100 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
5101 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
5102 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
5108 ret = -EBADF; in memcg_write_event_control()
5122 cdentry = cfile.file->f_path.dentry; in memcg_write_event_control()
5123 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
5124 ret = -EINVAL; in memcg_write_event_control()
5136 name = cdentry->d_name.name; in memcg_write_event_control()
5139 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
5140 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
5142 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
5143 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
5145 event->register_event = vmpressure_register_event; in memcg_write_event_control()
5146 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
5148 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
5149 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
5151 ret = -EINVAL; in memcg_write_event_control()
5160 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
5162 ret = -EINVAL; in memcg_write_event_control()
5170 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
5174 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
5176 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
5177 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
5178 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
5190 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
5343 * Swap-out records and page cache shadow entries need to store memcg
5346 * memory-controlled cgroups to 64k.
5353 * even when there are much fewer than 64k cgroups - possibly none.
5355 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5364 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5369 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5370 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5371 memcg->id.id = 0; in mem_cgroup_id_remove()
5378 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5383 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5387 css_put(&memcg->css); in mem_cgroup_id_put_many()
5397 * mem_cgroup_from_id - look up a memcg from a memcg id
5423 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
5439 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
5441 if (!pn->lruvec_stats_percpu) { in alloc_mem_cgroup_per_node_info()
5446 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
5447 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5449 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5455 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5460 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
5468 if (memcg->orig_objcg) in __mem_cgroup_free()
5469 obj_cgroup_put(memcg->orig_objcg); in __mem_cgroup_free()
5473 kfree(memcg->vmstats); in __mem_cgroup_free()
5474 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5491 long error = -ENOMEM; in mem_cgroup_alloc()
5497 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5499 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5500 error = memcg->id.id; in mem_cgroup_alloc()
5504 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL); in mem_cgroup_alloc()
5505 if (!memcg->vmstats) in mem_cgroup_alloc()
5508 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5510 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5515 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); in mem_cgroup_alloc()
5516 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_alloc()
5517 statc->parent = parent ? pstatc : NULL; in mem_cgroup_alloc()
5518 statc->vmstats = memcg->vmstats; in mem_cgroup_alloc()
5528 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5529 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5530 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5531 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5532 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5533 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5534 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5535 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5537 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5538 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5541 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5543 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5547 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5548 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5549 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5571 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5572 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5574 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5575 WRITE_ONCE(memcg->zswap_writeback, in mem_cgroup_css_alloc()
5576 !parent || READ_ONCE(parent->zswap_writeback)); in mem_cgroup_css_alloc()
5578 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5580 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
5581 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
5583 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5584 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5585 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5586 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5589 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5590 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5591 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5592 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5595 return &memcg->css; in mem_cgroup_css_alloc()
5606 return &memcg->css; in mem_cgroup_css_alloc()
5630 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5643 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_css_online()
5650 return -ENOMEM; in mem_cgroup_css_online()
5663 spin_lock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5664 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5665 list_del_init(&event->list); in mem_cgroup_css_offline()
5666 schedule_work(&event->remove); in mem_cgroup_css_offline()
5668 spin_unlock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5670 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5671 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5700 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5705 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5713 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5714 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5721 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5737 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5738 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5739 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5740 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5741 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5742 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5743 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5744 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5745 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5757 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5762 * below us. We're in a per-cpu loop here and this is in mem_cgroup_css_rstat_flush()
5765 delta = memcg->vmstats->state_pending[i]; in mem_cgroup_css_rstat_flush()
5767 memcg->vmstats->state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5771 v = READ_ONCE(statc->state[i]); in mem_cgroup_css_rstat_flush()
5772 if (v != statc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5773 delta_cpu = v - statc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5775 statc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5780 memcg->vmstats->state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5783 memcg->vmstats->state[i] += delta; in mem_cgroup_css_rstat_flush()
5785 parent->vmstats->state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5790 delta = memcg->vmstats->events_pending[i]; in mem_cgroup_css_rstat_flush()
5792 memcg->vmstats->events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5795 v = READ_ONCE(statc->events[i]); in mem_cgroup_css_rstat_flush()
5796 if (v != statc->events_prev[i]) { in mem_cgroup_css_rstat_flush()
5797 delta_cpu = v - statc->events_prev[i]; in mem_cgroup_css_rstat_flush()
5799 statc->events_prev[i] = v; in mem_cgroup_css_rstat_flush()
5803 memcg->vmstats->events_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5806 memcg->vmstats->events[i] += delta; in mem_cgroup_css_rstat_flush()
5808 parent->vmstats->events_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5813 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5818 ppn = parent->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5820 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5823 delta = pn->lruvec_stats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5825 pn->lruvec_stats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5828 v = READ_ONCE(lstatc->state[i]); in mem_cgroup_css_rstat_flush()
5829 if (v != lstatc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5830 delta_cpu = v - lstatc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5832 lstatc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5836 pn->lruvec_stats.state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
5839 pn->lruvec_stats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5841 ppn->lruvec_stats.state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5845 statc->stats_updates = 0; in mem_cgroup_css_rstat_flush()
5846 /* We are in a per-cpu loop here, only do the atomic write once */ in mem_cgroup_css_rstat_flush()
5847 if (atomic64_read(&memcg->vmstats->stats_updates)) in mem_cgroup_css_rstat_flush()
5848 atomic64_set(&memcg->vmstats->stats_updates, 0); in mem_cgroup_css_rstat_flush()
5865 while (count--) { in mem_cgroup_do_precharge()
5935 entry->val = ent.val; in mc_handle_swap_pte()
5953 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5958 /* folio is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
5961 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index); in mc_handle_file_pte()
5968 * mem_cgroup_move_account - move account of the page
5995 ret = -EINVAL; in mem_cgroup_move_account()
6007 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); in mem_cgroup_move_account()
6011 -nr_pages); in mem_cgroup_move_account()
6017 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); in mem_cgroup_move_account()
6021 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); in mem_cgroup_move_account()
6026 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
6035 -nr_pages); in mem_cgroup_move_account()
6044 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages); in mem_cgroup_move_account()
6049 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
6068 css_get(&to->css); in mem_cgroup_move_account()
6069 css_put(&from->css); in mem_cgroup_move_account()
6071 folio->memcg_data = (unsigned long)to; in mem_cgroup_move_account()
6081 mem_cgroup_charge_statistics(from, -nr_pages); in mem_cgroup_move_account()
6089 * get_mctgt_type - get target type of moving charge
6097 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6098 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6099 * move charge. If @target is not NULL, the page is stored in target->page
6101 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6103 * stored in target->ent.
6104 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6164 target->page = page; in get_mctgt_type()
6174 * But we cannot move a tail-page in a THP. in get_mctgt_type()
6180 target->ent = ent; in get_mctgt_type()
6214 target->page = page; in get_mctgt_type_thp()
6231 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
6248 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
6254 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
6311 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
6316 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
6317 * should uncharge to->memory. in __mem_cgroup_clear_mc()
6320 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
6363 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
6382 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
6394 if (mm->owner == p) { in mem_cgroup_can_attach()
6429 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
6448 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6459 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6470 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
6499 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6512 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6522 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
6551 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
6553 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
6561 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
6574 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
6602 * Set the update flag to cause task->objcg to be initialized lazily in mem_cgroup_fork()
6607 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; in mem_cgroup_fork()
6612 struct obj_cgroup *objcg = task->objcg; in mem_cgroup_exit()
6625 task->objcg = NULL; in mem_cgroup_exit()
6643 if (task->mm && READ_ONCE(task->mm->owner) == task) in mem_cgroup_lru_gen_attach()
6644 lru_gen_migrate_mm(task->mm); in mem_cgroup_lru_gen_attach()
6659 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); in mem_cgroup_kmem_attach()
6689 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6697 return (u64)memcg->memory.watermark * PAGE_SIZE; in memory_peak_read()
6703 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
6718 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6726 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
6741 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6749 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
6766 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6769 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6784 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6787 if (!reclaimed && !nr_retries--) in memory_high_write()
6798 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
6815 xchg(&memcg->memory.max, max); in memory_max_write()
6818 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6833 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6835 nr_reclaims--; in memory_max_write()
6868 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6876 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6887 return -ENOMEM; in memory_stat_show()
6937 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
6950 return -EINVAL; in memory_oom_group_write()
6957 return -EINVAL; in memory_oom_group_write()
6959 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
6983 return -EINTR; in memory_reclaim()
6994 min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX), in memory_reclaim()
6997 if (!reclaimed && !nr_retries--) in memory_reclaim()
6998 return -EAGAIN; in memory_reclaim()
7121 * This makes distribution proportional, but also work-conserving:
7132 * of the ancestor's claim to protection, any unutilized -
7133 * "floating" - protection from up the tree is distributed in
7159 * claimed protection in order to be work-conserving: claimed in effective_protection()
7197 * aren't read atomically - make sure the division is sane. in effective_protection()
7206 unclaimed = parent_effective - siblings_protected; in effective_protection()
7207 unclaimed *= usage - protected; in effective_protection()
7208 unclaimed /= parent_usage - siblings_protected; in effective_protection()
7217 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7218 * @root: the top ancestor of the sub-tree being checked
7222 * of a top-down tree iteration, not for isolated queries.
7246 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
7253 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
7254 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
7258 parent_usage = page_counter_read(&parent->memory); in mem_cgroup_calculate_protection()
7260 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
7261 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
7262 READ_ONCE(parent->memory.emin), in mem_cgroup_calculate_protection()
7263 atomic_long_read(&parent->memory.children_min_usage))); in mem_cgroup_calculate_protection()
7265 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
7266 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
7267 READ_ONCE(parent->memory.elow), in mem_cgroup_calculate_protection()
7268 atomic_long_read(&parent->memory.children_low_usage))); in mem_cgroup_calculate_protection()
7292 css_put(&memcg->css); in __mem_cgroup_charge()
7298 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7324 return -EOPNOTSUPP; in mem_cgroup_hugetlb_try_charge()
7327 return -ENOMEM; in mem_cgroup_hugetlb_try_charge()
7333 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7357 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
7363 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
7368 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7386 * so this is a non-issue here. Memory and swap charge lifetimes in mem_cgroup_swapin_uncharge_swap()
7417 if (ug->nr_memory) { in uncharge_batch()
7418 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
7420 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
7421 if (ug->nr_kmem) in uncharge_batch()
7422 memcg_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
7423 memcg_oom_recover(ug->memcg); in uncharge_batch()
7427 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
7428 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); in uncharge_batch()
7429 memcg_check_events(ug->memcg, ug->nid); in uncharge_batch()
7433 css_put(&ug->memcg->css); in uncharge_batch()
7463 if (ug->memcg != memcg) { in uncharge_folio()
7464 if (ug->memcg) { in uncharge_folio()
7468 ug->memcg = memcg; in uncharge_folio()
7469 ug->nid = folio_nid(folio); in uncharge_folio()
7472 css_get(&memcg->css); in uncharge_folio()
7478 ug->nr_memory += nr_pages; in uncharge_folio()
7479 ug->nr_kmem += nr_pages; in uncharge_folio()
7481 folio->memcg_data = 0; in uncharge_folio()
7486 ug->nr_memory += nr_pages; in uncharge_folio()
7487 ug->pgpgout++; in uncharge_folio()
7489 folio->memcg_data = 0; in uncharge_folio()
7492 css_put(&memcg->css); in uncharge_folio()
7499 /* Don't touch folio->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
7509 * __mem_cgroup_uncharge_list - uncharge a list of page
7528 * mem_cgroup_replace_folio - Charge a folio's replacement.
7536 * Both folios must be locked, @new->mapping must be set up.
7561 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_replace_folio()
7563 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_replace_folio()
7565 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_replace_folio()
7568 css_get(&memcg->css); in mem_cgroup_replace_folio()
7578 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7586 * Both folios must be locked, @new->mapping must be set up.
7623 old->memcg_data = 0; in mem_cgroup_migrate()
7644 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7646 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7647 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7654 if (sk->sk_memcg) in mem_cgroup_sk_free()
7655 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
7659 * mem_cgroup_charge_skmem - charge socket memory
7673 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7674 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7677 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7679 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7694 * mem_cgroup_uncharge_skmem - uncharge socket memory
7701 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7705 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7732 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7742 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
7752 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
7760 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
7761 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
7762 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
7773 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7790 * mem_cgroup_swapout - transfer a memsw charge to swap
7826 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
7832 folio->memcg_data = 0; in mem_cgroup_swapout()
7835 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7839 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
7840 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7845 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
7847 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
7850 mem_cgroup_charge_statistics(memcg, -nr_entries); in mem_cgroup_swapout()
7854 css_put(&memcg->css); in mem_cgroup_swapout()
7858 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7864 * Returns 0 on success, -ENOMEM on failure.
7890 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7894 return -ENOMEM; in __mem_cgroup_try_charge_swap()
7899 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7908 * __mem_cgroup_uncharge_swap - uncharge swap space
7923 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7925 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7927 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7941 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7942 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7962 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7964 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7965 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7979 "Please report your usecase to linux-mm@kvack.org if you " in setup_swap_account()
7990 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7998 return (u64)memcg->swap.watermark * PAGE_SIZE; in swap_peak_read()
8004 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
8019 page_counter_set_high(&memcg->swap, high); in swap_high_write()
8027 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
8042 xchg(&memcg->swap.max, max); in swap_max_write()
8052 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
8054 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
8056 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
8122 * obj_cgroup_may_zswap - check if this cgroup can zswap
8129 * once compression has occurred, and this optimistic pre-check avoids
8144 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
8170 * obj_cgroup_charge_zswap - charge compression backend memory
8184 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); in obj_cgroup_charge_zswap()
8198 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8215 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
8216 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
8223 return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback); in mem_cgroup_zswap_writeback_enabled()
8238 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); in zswap_max_show()
8253 xchg(&memcg->zswap_max, max); in zswap_max_write()
8262 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); in zswap_writeback_show()
8277 return -EINVAL; in zswap_writeback_write()
8279 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); in zswap_writeback_write()