1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27 
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/pagevec.h>
37 #include <linux/vm_event_item.h>
38 #include <linux/smp.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
41 #include <linux/bit_spinlock.h>
42 #include <linux/rcupdate.h>
43 #include <linux/limits.h>
44 #include <linux/export.h>
45 #include <linux/list.h>
46 #include <linux/mutex.h>
47 #include <linux/rbtree.h>
48 #include <linux/slab.h>
49 #include <linux/swapops.h>
50 #include <linux/spinlock.h>
51 #include <linux/fs.h>
52 #include <linux/seq_file.h>
53 #include <linux/parser.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71 
72 #include <linux/uaccess.h>
73 
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77 
78 #include <trace/events/vmscan.h>
79 
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82 
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 
85 /* Active memory cgroup to use from an interrupt context */
86 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
87 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
88 
89 /* Socket memory accounting disabled? */
90 static bool cgroup_memory_nosocket __ro_after_init;
91 
92 /* Kernel memory accounting disabled? */
93 static bool cgroup_memory_nokmem __ro_after_init;
94 
95 /* BPF memory accounting disabled? */
96 static bool cgroup_memory_nobpf __ro_after_init;
97 
98 #ifdef CONFIG_CGROUP_WRITEBACK
99 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 #endif
101 
task_is_dying(void)102 static inline bool task_is_dying(void)
103 {
104 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
105 		(current->flags & PF_EXITING);
106 }
107 
108 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)109 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
110 {
111 	if (!memcg)
112 		memcg = root_mem_cgroup;
113 	return &memcg->vmpressure;
114 }
115 
vmpressure_to_memcg(struct vmpressure * vmpr)116 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
117 {
118 	return container_of(vmpr, struct mem_cgroup, vmpressure);
119 }
120 
121 #define SEQ_BUF_SIZE SZ_4K
122 #define CURRENT_OBJCG_UPDATE_BIT 0
123 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
124 
125 static DEFINE_SPINLOCK(objcg_lock);
126 
mem_cgroup_kmem_disabled(void)127 bool mem_cgroup_kmem_disabled(void)
128 {
129 	return cgroup_memory_nokmem;
130 }
131 
132 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
133 				      unsigned int nr_pages);
134 
obj_cgroup_release(struct percpu_ref * ref)135 static void obj_cgroup_release(struct percpu_ref *ref)
136 {
137 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
138 	unsigned int nr_bytes;
139 	unsigned int nr_pages;
140 	unsigned long flags;
141 
142 	/*
143 	 * At this point all allocated objects are freed, and
144 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
145 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
146 	 *
147 	 * The following sequence can lead to it:
148 	 * 1) CPU0: objcg == stock->cached_objcg
149 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
150 	 *          PAGE_SIZE bytes are charged
151 	 * 3) CPU1: a process from another memcg is allocating something,
152 	 *          the stock if flushed,
153 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
154 	 * 5) CPU0: we do release this object,
155 	 *          92 bytes are added to stock->nr_bytes
156 	 * 6) CPU0: stock is flushed,
157 	 *          92 bytes are added to objcg->nr_charged_bytes
158 	 *
159 	 * In the result, nr_charged_bytes == PAGE_SIZE.
160 	 * This page will be uncharged in obj_cgroup_release().
161 	 */
162 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
163 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
164 	nr_pages = nr_bytes >> PAGE_SHIFT;
165 
166 	if (nr_pages)
167 		obj_cgroup_uncharge_pages(objcg, nr_pages);
168 
169 	spin_lock_irqsave(&objcg_lock, flags);
170 	list_del(&objcg->list);
171 	spin_unlock_irqrestore(&objcg_lock, flags);
172 
173 	percpu_ref_exit(ref);
174 	kfree_rcu(objcg, rcu);
175 }
176 
obj_cgroup_alloc(void)177 static struct obj_cgroup *obj_cgroup_alloc(void)
178 {
179 	struct obj_cgroup *objcg;
180 	int ret;
181 
182 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
183 	if (!objcg)
184 		return NULL;
185 
186 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
187 			      GFP_KERNEL);
188 	if (ret) {
189 		kfree(objcg);
190 		return NULL;
191 	}
192 	INIT_LIST_HEAD(&objcg->list);
193 	return objcg;
194 }
195 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)196 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
197 				  struct mem_cgroup *parent)
198 {
199 	struct obj_cgroup *objcg, *iter;
200 
201 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
202 
203 	spin_lock_irq(&objcg_lock);
204 
205 	/* 1) Ready to reparent active objcg. */
206 	list_add(&objcg->list, &memcg->objcg_list);
207 	/* 2) Reparent active objcg and already reparented objcgs to parent. */
208 	list_for_each_entry(iter, &memcg->objcg_list, list)
209 		WRITE_ONCE(iter->memcg, parent);
210 	/* 3) Move already reparented objcgs to the parent's list */
211 	list_splice(&memcg->objcg_list, &parent->objcg_list);
212 
213 	spin_unlock_irq(&objcg_lock);
214 
215 	percpu_ref_kill(&objcg->refcnt);
216 }
217 
218 /*
219  * A lot of the calls to the cache allocation functions are expected to be
220  * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
221  * conditional to this static branch, we'll have to allow modules that does
222  * kmem_cache_alloc and the such to see this symbol as well
223  */
224 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
225 EXPORT_SYMBOL(memcg_kmem_online_key);
226 
227 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
228 EXPORT_SYMBOL(memcg_bpf_enabled_key);
229 
230 /**
231  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
232  * @folio: folio of interest
233  *
234  * If memcg is bound to the default hierarchy, css of the memcg associated
235  * with @folio is returned.  The returned css remains associated with @folio
236  * until it is released.
237  *
238  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
239  * is returned.
240  */
mem_cgroup_css_from_folio(struct folio * folio)241 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
242 {
243 	struct mem_cgroup *memcg = folio_memcg(folio);
244 
245 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
246 		memcg = root_mem_cgroup;
247 
248 	return &memcg->css;
249 }
250 
251 /**
252  * page_cgroup_ino - return inode number of the memcg a page is charged to
253  * @page: the page
254  *
255  * Look up the closest online ancestor of the memory cgroup @page is charged to
256  * and return its inode number or 0 if @page is not charged to any cgroup. It
257  * is safe to call this function without holding a reference to @page.
258  *
259  * Note, this function is inherently racy, because there is nothing to prevent
260  * the cgroup inode from getting torn down and potentially reallocated a moment
261  * after page_cgroup_ino() returns, so it only should be used by callers that
262  * do not care (such as procfs interfaces).
263  */
page_cgroup_ino(struct page * page)264 ino_t page_cgroup_ino(struct page *page)
265 {
266 	struct mem_cgroup *memcg;
267 	unsigned long ino = 0;
268 
269 	rcu_read_lock();
270 	/* page_folio() is racy here, but the entire function is racy anyway */
271 	memcg = folio_memcg_check(page_folio(page));
272 
273 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
274 		memcg = parent_mem_cgroup(memcg);
275 	if (memcg)
276 		ino = cgroup_ino(memcg->css.cgroup);
277 	rcu_read_unlock();
278 	return ino;
279 }
280 
281 /* Subset of node_stat_item for memcg stats */
282 static const unsigned int memcg_node_stat_items[] = {
283 	NR_INACTIVE_ANON,
284 	NR_ACTIVE_ANON,
285 	NR_INACTIVE_FILE,
286 	NR_ACTIVE_FILE,
287 	NR_UNEVICTABLE,
288 	NR_SLAB_RECLAIMABLE_B,
289 	NR_SLAB_UNRECLAIMABLE_B,
290 	WORKINGSET_REFAULT_ANON,
291 	WORKINGSET_REFAULT_FILE,
292 	WORKINGSET_ACTIVATE_ANON,
293 	WORKINGSET_ACTIVATE_FILE,
294 	WORKINGSET_RESTORE_ANON,
295 	WORKINGSET_RESTORE_FILE,
296 	WORKINGSET_NODERECLAIM,
297 	NR_ANON_MAPPED,
298 	NR_FILE_MAPPED,
299 	NR_FILE_PAGES,
300 	NR_FILE_DIRTY,
301 	NR_WRITEBACK,
302 	NR_SHMEM,
303 	NR_SHMEM_THPS,
304 	NR_FILE_THPS,
305 	NR_ANON_THPS,
306 	NR_KERNEL_STACK_KB,
307 	NR_PAGETABLE,
308 	NR_SECONDARY_PAGETABLE,
309 #ifdef CONFIG_SWAP
310 	NR_SWAPCACHE,
311 #endif
312 #ifdef CONFIG_NUMA_BALANCING
313 	PGPROMOTE_SUCCESS,
314 #endif
315 	PGDEMOTE_KSWAPD,
316 	PGDEMOTE_DIRECT,
317 	PGDEMOTE_KHUGEPAGED,
318 	PGDEMOTE_PROACTIVE,
319 #ifdef CONFIG_HUGETLB_PAGE
320 	NR_HUGETLB,
321 #endif
322 };
323 
324 static const unsigned int memcg_stat_items[] = {
325 	MEMCG_SWAP,
326 	MEMCG_SOCK,
327 	MEMCG_PERCPU_B,
328 	MEMCG_VMALLOC,
329 	MEMCG_KMEM,
330 	MEMCG_ZSWAP_B,
331 	MEMCG_ZSWAPPED,
332 };
333 
334 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
335 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
336 			   ARRAY_SIZE(memcg_stat_items))
337 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
338 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
339 
init_memcg_stats(void)340 static void init_memcg_stats(void)
341 {
342 	u8 i, j = 0;
343 
344 	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
345 
346 	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
347 
348 	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
349 		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
350 
351 	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
352 		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
353 }
354 
memcg_stats_index(int idx)355 static inline int memcg_stats_index(int idx)
356 {
357 	return mem_cgroup_stats_index[idx];
358 }
359 
360 struct lruvec_stats_percpu {
361 	/* Local (CPU and cgroup) state */
362 	long state[NR_MEMCG_NODE_STAT_ITEMS];
363 
364 	/* Delta calculation for lockless upward propagation */
365 	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
366 };
367 
368 struct lruvec_stats {
369 	/* Aggregated (CPU and subtree) state */
370 	long state[NR_MEMCG_NODE_STAT_ITEMS];
371 
372 	/* Non-hierarchical (CPU aggregated) state */
373 	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
374 
375 	/* Pending child counts during tree propagation */
376 	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
377 };
378 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)379 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
380 {
381 	struct mem_cgroup_per_node *pn;
382 	long x;
383 	int i;
384 
385 	if (mem_cgroup_disabled())
386 		return node_page_state(lruvec_pgdat(lruvec), idx);
387 
388 	i = memcg_stats_index(idx);
389 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
390 		return 0;
391 
392 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
393 	x = READ_ONCE(pn->lruvec_stats->state[i]);
394 #ifdef CONFIG_SMP
395 	if (x < 0)
396 		x = 0;
397 #endif
398 	return x;
399 }
400 
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)401 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
402 				      enum node_stat_item idx)
403 {
404 	struct mem_cgroup_per_node *pn;
405 	long x;
406 	int i;
407 
408 	if (mem_cgroup_disabled())
409 		return node_page_state(lruvec_pgdat(lruvec), idx);
410 
411 	i = memcg_stats_index(idx);
412 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
413 		return 0;
414 
415 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
416 	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
417 #ifdef CONFIG_SMP
418 	if (x < 0)
419 		x = 0;
420 #endif
421 	return x;
422 }
423 
424 /* Subset of vm_event_item to report for memcg event stats */
425 static const unsigned int memcg_vm_event_stat[] = {
426 #ifdef CONFIG_MEMCG_V1
427 	PGPGIN,
428 	PGPGOUT,
429 #endif
430 	PSWPIN,
431 	PSWPOUT,
432 	PGSCAN_KSWAPD,
433 	PGSCAN_DIRECT,
434 	PGSCAN_KHUGEPAGED,
435 	PGSCAN_PROACTIVE,
436 	PGSTEAL_KSWAPD,
437 	PGSTEAL_DIRECT,
438 	PGSTEAL_KHUGEPAGED,
439 	PGSTEAL_PROACTIVE,
440 	PGFAULT,
441 	PGMAJFAULT,
442 	PGREFILL,
443 	PGACTIVATE,
444 	PGDEACTIVATE,
445 	PGLAZYFREE,
446 	PGLAZYFREED,
447 #ifdef CONFIG_SWAP
448 	SWPIN_ZERO,
449 	SWPOUT_ZERO,
450 #endif
451 #ifdef CONFIG_ZSWAP
452 	ZSWPIN,
453 	ZSWPOUT,
454 	ZSWPWB,
455 #endif
456 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
457 	THP_FAULT_ALLOC,
458 	THP_COLLAPSE_ALLOC,
459 	THP_SWPOUT,
460 	THP_SWPOUT_FALLBACK,
461 #endif
462 #ifdef CONFIG_NUMA_BALANCING
463 	NUMA_PAGE_MIGRATE,
464 	NUMA_PTE_UPDATES,
465 	NUMA_HINT_FAULTS,
466 #endif
467 };
468 
469 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
470 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
471 
init_memcg_events(void)472 static void init_memcg_events(void)
473 {
474 	u8 i;
475 
476 	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
477 
478 	memset(mem_cgroup_events_index, U8_MAX,
479 	       sizeof(mem_cgroup_events_index));
480 
481 	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
482 		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
483 }
484 
memcg_events_index(enum vm_event_item idx)485 static inline int memcg_events_index(enum vm_event_item idx)
486 {
487 	return mem_cgroup_events_index[idx];
488 }
489 
490 struct memcg_vmstats_percpu {
491 	/* Stats updates since the last flush */
492 	unsigned int			stats_updates;
493 
494 	/* Cached pointers for fast iteration in memcg_rstat_updated() */
495 	struct memcg_vmstats_percpu	*parent;
496 	struct memcg_vmstats		*vmstats;
497 
498 	/* The above should fit a single cacheline for memcg_rstat_updated() */
499 
500 	/* Local (CPU and cgroup) page state & events */
501 	long			state[MEMCG_VMSTAT_SIZE];
502 	unsigned long		events[NR_MEMCG_EVENTS];
503 
504 	/* Delta calculation for lockless upward propagation */
505 	long			state_prev[MEMCG_VMSTAT_SIZE];
506 	unsigned long		events_prev[NR_MEMCG_EVENTS];
507 } ____cacheline_aligned;
508 
509 struct memcg_vmstats {
510 	/* Aggregated (CPU and subtree) page state & events */
511 	long			state[MEMCG_VMSTAT_SIZE];
512 	unsigned long		events[NR_MEMCG_EVENTS];
513 
514 	/* Non-hierarchical (CPU aggregated) page state & events */
515 	long			state_local[MEMCG_VMSTAT_SIZE];
516 	unsigned long		events_local[NR_MEMCG_EVENTS];
517 
518 	/* Pending child counts during tree propagation */
519 	long			state_pending[MEMCG_VMSTAT_SIZE];
520 	unsigned long		events_pending[NR_MEMCG_EVENTS];
521 
522 	/* Stats updates since the last flush */
523 	atomic64_t		stats_updates;
524 };
525 
526 /*
527  * memcg and lruvec stats flushing
528  *
529  * Many codepaths leading to stats update or read are performance sensitive and
530  * adding stats flushing in such codepaths is not desirable. So, to optimize the
531  * flushing the kernel does:
532  *
533  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
534  *    rstat update tree grow unbounded.
535  *
536  * 2) Flush the stats synchronously on reader side only when there are more than
537  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
538  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
539  *    only for 2 seconds due to (1).
540  */
541 static void flush_memcg_stats_dwork(struct work_struct *w);
542 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
543 static u64 flush_last_time;
544 
545 #define FLUSH_TIME (2UL*HZ)
546 
547 /*
548  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
549  * not rely on this as part of an acquired spinlock_t lock. These functions are
550  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
551  * is sufficient.
552  */
memcg_stats_lock(void)553 static void memcg_stats_lock(void)
554 {
555 	preempt_disable_nested();
556 	VM_WARN_ON_IRQS_ENABLED();
557 }
558 
__memcg_stats_lock(void)559 static void __memcg_stats_lock(void)
560 {
561 	preempt_disable_nested();
562 }
563 
memcg_stats_unlock(void)564 static void memcg_stats_unlock(void)
565 {
566 	preempt_enable_nested();
567 }
568 
569 
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)570 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
571 {
572 	return atomic64_read(&vmstats->stats_updates) >
573 		MEMCG_CHARGE_BATCH * num_online_cpus();
574 }
575 
memcg_rstat_updated(struct mem_cgroup * memcg,int val)576 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
577 {
578 	struct memcg_vmstats_percpu *statc;
579 	int cpu = smp_processor_id();
580 	unsigned int stats_updates;
581 
582 	if (!val)
583 		return;
584 
585 	cgroup_rstat_updated(memcg->css.cgroup, cpu);
586 	statc = this_cpu_ptr(memcg->vmstats_percpu);
587 	for (; statc; statc = statc->parent) {
588 		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
589 		WRITE_ONCE(statc->stats_updates, stats_updates);
590 		if (stats_updates < MEMCG_CHARGE_BATCH)
591 			continue;
592 
593 		/*
594 		 * If @memcg is already flush-able, increasing stats_updates is
595 		 * redundant. Avoid the overhead of the atomic update.
596 		 */
597 		if (!memcg_vmstats_needs_flush(statc->vmstats))
598 			atomic64_add(stats_updates,
599 				     &statc->vmstats->stats_updates);
600 		WRITE_ONCE(statc->stats_updates, 0);
601 	}
602 }
603 
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)604 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
605 {
606 	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
607 
608 	trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
609 		force, needs_flush);
610 
611 	if (!force && !needs_flush)
612 		return;
613 
614 	if (mem_cgroup_is_root(memcg))
615 		WRITE_ONCE(flush_last_time, jiffies_64);
616 
617 	cgroup_rstat_flush(memcg->css.cgroup);
618 }
619 
620 /*
621  * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
622  * @memcg: root of the subtree to flush
623  *
624  * Flushing is serialized by the underlying global rstat lock. There is also a
625  * minimum amount of work to be done even if there are no stat updates to flush.
626  * Hence, we only flush the stats if the updates delta exceeds a threshold. This
627  * avoids unnecessary work and contention on the underlying lock.
628  */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)629 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
630 {
631 	if (mem_cgroup_disabled())
632 		return;
633 
634 	if (!memcg)
635 		memcg = root_mem_cgroup;
636 
637 	__mem_cgroup_flush_stats(memcg, false);
638 }
639 
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)640 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
641 {
642 	/* Only flush if the periodic flusher is one full cycle late */
643 	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
644 		mem_cgroup_flush_stats(memcg);
645 }
646 
flush_memcg_stats_dwork(struct work_struct * w)647 static void flush_memcg_stats_dwork(struct work_struct *w)
648 {
649 	/*
650 	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
651 	 * in latency-sensitive paths is as cheap as possible.
652 	 */
653 	__mem_cgroup_flush_stats(root_mem_cgroup, true);
654 	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
655 }
656 
memcg_page_state(struct mem_cgroup * memcg,int idx)657 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
658 {
659 	long x;
660 	int i = memcg_stats_index(idx);
661 
662 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
663 		return 0;
664 
665 	x = READ_ONCE(memcg->vmstats->state[i]);
666 #ifdef CONFIG_SMP
667 	if (x < 0)
668 		x = 0;
669 #endif
670 	return x;
671 }
672 
673 static int memcg_page_state_unit(int item);
674 
675 /*
676  * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
677  * up non-zero sub-page updates to 1 page as zero page updates are ignored.
678  */
memcg_state_val_in_pages(int idx,int val)679 static int memcg_state_val_in_pages(int idx, int val)
680 {
681 	int unit = memcg_page_state_unit(idx);
682 
683 	if (!val || unit == PAGE_SIZE)
684 		return val;
685 	else
686 		return max(val * unit / PAGE_SIZE, 1UL);
687 }
688 
689 /**
690  * __mod_memcg_state - update cgroup memory statistics
691  * @memcg: the memory cgroup
692  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
693  * @val: delta to add to the counter, can be negative
694  */
__mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)695 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
696 		       int val)
697 {
698 	int i = memcg_stats_index(idx);
699 
700 	if (mem_cgroup_disabled())
701 		return;
702 
703 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
704 		return;
705 
706 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
707 	val = memcg_state_val_in_pages(idx, val);
708 	memcg_rstat_updated(memcg, val);
709 	trace_mod_memcg_state(memcg, idx, val);
710 }
711 
712 #ifdef CONFIG_MEMCG_V1
713 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)714 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
715 {
716 	long x;
717 	int i = memcg_stats_index(idx);
718 
719 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
720 		return 0;
721 
722 	x = READ_ONCE(memcg->vmstats->state_local[i]);
723 #ifdef CONFIG_SMP
724 	if (x < 0)
725 		x = 0;
726 #endif
727 	return x;
728 }
729 #endif
730 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)731 static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
732 				     enum node_stat_item idx,
733 				     int val)
734 {
735 	struct mem_cgroup_per_node *pn;
736 	struct mem_cgroup *memcg;
737 	int i = memcg_stats_index(idx);
738 
739 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
740 		return;
741 
742 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
743 	memcg = pn->memcg;
744 
745 	/*
746 	 * The caller from rmap relies on disabled preemption because they never
747 	 * update their counter from in-interrupt context. For these two
748 	 * counters we check that the update is never performed from an
749 	 * interrupt context while other caller need to have disabled interrupt.
750 	 */
751 	__memcg_stats_lock();
752 	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
753 		switch (idx) {
754 		case NR_ANON_MAPPED:
755 		case NR_FILE_MAPPED:
756 		case NR_ANON_THPS:
757 			WARN_ON_ONCE(!in_task());
758 			break;
759 		default:
760 			VM_WARN_ON_IRQS_ENABLED();
761 		}
762 	}
763 
764 	/* Update memcg */
765 	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
766 
767 	/* Update lruvec */
768 	__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
769 
770 	val = memcg_state_val_in_pages(idx, val);
771 	memcg_rstat_updated(memcg, val);
772 	trace_mod_memcg_lruvec_state(memcg, idx, val);
773 	memcg_stats_unlock();
774 }
775 
776 /**
777  * __mod_lruvec_state - update lruvec memory statistics
778  * @lruvec: the lruvec
779  * @idx: the stat item
780  * @val: delta to add to the counter, can be negative
781  *
782  * The lruvec is the intersection of the NUMA node and a cgroup. This
783  * function updates the all three counters that are affected by a
784  * change of state at this level: per-node, per-cgroup, per-lruvec.
785  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)786 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
787 			int val)
788 {
789 	/* Update node */
790 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
791 
792 	/* Update memcg and lruvec */
793 	if (!mem_cgroup_disabled())
794 		__mod_memcg_lruvec_state(lruvec, idx, val);
795 }
796 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)797 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
798 			     int val)
799 {
800 	struct mem_cgroup *memcg;
801 	pg_data_t *pgdat = folio_pgdat(folio);
802 	struct lruvec *lruvec;
803 
804 	rcu_read_lock();
805 	memcg = folio_memcg(folio);
806 	/* Untracked pages have no memcg, no lruvec. Update only the node */
807 	if (!memcg) {
808 		rcu_read_unlock();
809 		__mod_node_page_state(pgdat, idx, val);
810 		return;
811 	}
812 
813 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
814 	__mod_lruvec_state(lruvec, idx, val);
815 	rcu_read_unlock();
816 }
817 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
818 
__mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)819 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
820 {
821 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
822 	struct mem_cgroup *memcg;
823 	struct lruvec *lruvec;
824 
825 	rcu_read_lock();
826 	memcg = mem_cgroup_from_slab_obj(p);
827 
828 	/*
829 	 * Untracked pages have no memcg, no lruvec. Update only the
830 	 * node. If we reparent the slab objects to the root memcg,
831 	 * when we free the slab object, we need to update the per-memcg
832 	 * vmstats to keep it correct for the root memcg.
833 	 */
834 	if (!memcg) {
835 		__mod_node_page_state(pgdat, idx, val);
836 	} else {
837 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
838 		__mod_lruvec_state(lruvec, idx, val);
839 	}
840 	rcu_read_unlock();
841 }
842 
843 /**
844  * __count_memcg_events - account VM events in a cgroup
845  * @memcg: the memory cgroup
846  * @idx: the event item
847  * @count: the number of events that occurred
848  */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)849 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
850 			  unsigned long count)
851 {
852 	int i = memcg_events_index(idx);
853 
854 	if (mem_cgroup_disabled())
855 		return;
856 
857 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
858 		return;
859 
860 	memcg_stats_lock();
861 	__this_cpu_add(memcg->vmstats_percpu->events[i], count);
862 	memcg_rstat_updated(memcg, count);
863 	trace_count_memcg_events(memcg, idx, count);
864 	memcg_stats_unlock();
865 }
866 
memcg_events(struct mem_cgroup * memcg,int event)867 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
868 {
869 	int i = memcg_events_index(event);
870 
871 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
872 		return 0;
873 
874 	return READ_ONCE(memcg->vmstats->events[i]);
875 }
876 
877 #ifdef CONFIG_MEMCG_V1
memcg_events_local(struct mem_cgroup * memcg,int event)878 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
879 {
880 	int i = memcg_events_index(event);
881 
882 	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
883 		return 0;
884 
885 	return READ_ONCE(memcg->vmstats->events_local[i]);
886 }
887 #endif
888 
mem_cgroup_from_task(struct task_struct * p)889 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
890 {
891 	/*
892 	 * mm_update_next_owner() may clear mm->owner to NULL
893 	 * if it races with swapoff, page migration, etc.
894 	 * So this can be called with p == NULL.
895 	 */
896 	if (unlikely(!p))
897 		return NULL;
898 
899 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
900 }
901 EXPORT_SYMBOL(mem_cgroup_from_task);
902 
active_memcg(void)903 static __always_inline struct mem_cgroup *active_memcg(void)
904 {
905 	if (!in_task())
906 		return this_cpu_read(int_active_memcg);
907 	else
908 		return current->active_memcg;
909 }
910 
911 /**
912  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
913  * @mm: mm from which memcg should be extracted. It can be NULL.
914  *
915  * Obtain a reference on mm->memcg and returns it if successful. If mm
916  * is NULL, then the memcg is chosen as follows:
917  * 1) The active memcg, if set.
918  * 2) current->mm->memcg, if available
919  * 3) root memcg
920  * If mem_cgroup is disabled, NULL is returned.
921  */
get_mem_cgroup_from_mm(struct mm_struct * mm)922 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
923 {
924 	struct mem_cgroup *memcg;
925 
926 	if (mem_cgroup_disabled())
927 		return NULL;
928 
929 	/*
930 	 * Page cache insertions can happen without an
931 	 * actual mm context, e.g. during disk probing
932 	 * on boot, loopback IO, acct() writes etc.
933 	 *
934 	 * No need to css_get on root memcg as the reference
935 	 * counting is disabled on the root level in the
936 	 * cgroup core. See CSS_NO_REF.
937 	 */
938 	if (unlikely(!mm)) {
939 		memcg = active_memcg();
940 		if (unlikely(memcg)) {
941 			/* remote memcg must hold a ref */
942 			css_get(&memcg->css);
943 			return memcg;
944 		}
945 		mm = current->mm;
946 		if (unlikely(!mm))
947 			return root_mem_cgroup;
948 	}
949 
950 	rcu_read_lock();
951 	do {
952 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
953 		if (unlikely(!memcg))
954 			memcg = root_mem_cgroup;
955 	} while (!css_tryget(&memcg->css));
956 	rcu_read_unlock();
957 	return memcg;
958 }
959 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
960 
961 /**
962  * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
963  */
get_mem_cgroup_from_current(void)964 struct mem_cgroup *get_mem_cgroup_from_current(void)
965 {
966 	struct mem_cgroup *memcg;
967 
968 	if (mem_cgroup_disabled())
969 		return NULL;
970 
971 again:
972 	rcu_read_lock();
973 	memcg = mem_cgroup_from_task(current);
974 	if (!css_tryget(&memcg->css)) {
975 		rcu_read_unlock();
976 		goto again;
977 	}
978 	rcu_read_unlock();
979 	return memcg;
980 }
981 
982 /**
983  * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
984  * @folio: folio from which memcg should be extracted.
985  */
get_mem_cgroup_from_folio(struct folio * folio)986 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
987 {
988 	struct mem_cgroup *memcg = folio_memcg(folio);
989 
990 	if (mem_cgroup_disabled())
991 		return NULL;
992 
993 	rcu_read_lock();
994 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
995 		memcg = root_mem_cgroup;
996 	rcu_read_unlock();
997 	return memcg;
998 }
999 
1000 /**
1001  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1002  * @root: hierarchy root
1003  * @prev: previously returned memcg, NULL on first invocation
1004  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1005  *
1006  * Returns references to children of the hierarchy below @root, or
1007  * @root itself, or %NULL after a full round-trip.
1008  *
1009  * Caller must pass the return value in @prev on subsequent
1010  * invocations for reference counting, or use mem_cgroup_iter_break()
1011  * to cancel a hierarchy walk before the round-trip is complete.
1012  *
1013  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1014  * in the hierarchy among all concurrent reclaimers operating on the
1015  * same node.
1016  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1017 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1018 				   struct mem_cgroup *prev,
1019 				   struct mem_cgroup_reclaim_cookie *reclaim)
1020 {
1021 	struct mem_cgroup_reclaim_iter *iter;
1022 	struct cgroup_subsys_state *css;
1023 	struct mem_cgroup *pos;
1024 	struct mem_cgroup *next;
1025 
1026 	if (mem_cgroup_disabled())
1027 		return NULL;
1028 
1029 	if (!root)
1030 		root = root_mem_cgroup;
1031 
1032 	rcu_read_lock();
1033 restart:
1034 	next = NULL;
1035 
1036 	if (reclaim) {
1037 		int gen;
1038 		int nid = reclaim->pgdat->node_id;
1039 
1040 		iter = &root->nodeinfo[nid]->iter;
1041 		gen = atomic_read(&iter->generation);
1042 
1043 		/*
1044 		 * On start, join the current reclaim iteration cycle.
1045 		 * Exit when a concurrent walker completes it.
1046 		 */
1047 		if (!prev)
1048 			reclaim->generation = gen;
1049 		else if (reclaim->generation != gen)
1050 			goto out_unlock;
1051 
1052 		pos = READ_ONCE(iter->position);
1053 	} else
1054 		pos = prev;
1055 
1056 	css = pos ? &pos->css : NULL;
1057 
1058 	while ((css = css_next_descendant_pre(css, &root->css))) {
1059 		/*
1060 		 * Verify the css and acquire a reference.  The root
1061 		 * is provided by the caller, so we know it's alive
1062 		 * and kicking, and don't take an extra reference.
1063 		 */
1064 		if (css == &root->css || css_tryget(css))
1065 			break;
1066 	}
1067 
1068 	next = mem_cgroup_from_css(css);
1069 
1070 	if (reclaim) {
1071 		/*
1072 		 * The position could have already been updated by a competing
1073 		 * thread, so check that the value hasn't changed since we read
1074 		 * it to avoid reclaiming from the same cgroup twice.
1075 		 */
1076 		if (cmpxchg(&iter->position, pos, next) != pos) {
1077 			if (css && css != &root->css)
1078 				css_put(css);
1079 			goto restart;
1080 		}
1081 
1082 		if (!next) {
1083 			atomic_inc(&iter->generation);
1084 
1085 			/*
1086 			 * Reclaimers share the hierarchy walk, and a
1087 			 * new one might jump in right at the end of
1088 			 * the hierarchy - make sure they see at least
1089 			 * one group and restart from the beginning.
1090 			 */
1091 			if (!prev)
1092 				goto restart;
1093 		}
1094 	}
1095 
1096 out_unlock:
1097 	rcu_read_unlock();
1098 	if (prev && prev != root)
1099 		css_put(&prev->css);
1100 
1101 	return next;
1102 }
1103 
1104 /**
1105  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1106  * @root: hierarchy root
1107  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1108  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1109 void mem_cgroup_iter_break(struct mem_cgroup *root,
1110 			   struct mem_cgroup *prev)
1111 {
1112 	if (!root)
1113 		root = root_mem_cgroup;
1114 	if (prev && prev != root)
1115 		css_put(&prev->css);
1116 }
1117 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1118 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1119 					struct mem_cgroup *dead_memcg)
1120 {
1121 	struct mem_cgroup_reclaim_iter *iter;
1122 	struct mem_cgroup_per_node *mz;
1123 	int nid;
1124 
1125 	for_each_node(nid) {
1126 		mz = from->nodeinfo[nid];
1127 		iter = &mz->iter;
1128 		cmpxchg(&iter->position, dead_memcg, NULL);
1129 	}
1130 }
1131 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1132 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1133 {
1134 	struct mem_cgroup *memcg = dead_memcg;
1135 	struct mem_cgroup *last;
1136 
1137 	do {
1138 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1139 		last = memcg;
1140 	} while ((memcg = parent_mem_cgroup(memcg)));
1141 
1142 	/*
1143 	 * When cgroup1 non-hierarchy mode is used,
1144 	 * parent_mem_cgroup() does not walk all the way up to the
1145 	 * cgroup root (root_mem_cgroup). So we have to handle
1146 	 * dead_memcg from cgroup root separately.
1147 	 */
1148 	if (!mem_cgroup_is_root(last))
1149 		__invalidate_reclaim_iterators(root_mem_cgroup,
1150 						dead_memcg);
1151 }
1152 
1153 /**
1154  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155  * @memcg: hierarchy root
1156  * @fn: function to call for each task
1157  * @arg: argument passed to @fn
1158  *
1159  * This function iterates over tasks attached to @memcg or to any of its
1160  * descendants and calls @fn for each task. If @fn returns a non-zero
1161  * value, the function breaks the iteration loop. Otherwise, it will iterate
1162  * over all tasks and return 0.
1163  *
1164  * This function must not be called for the root memory cgroup.
1165  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1166 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1167 			   int (*fn)(struct task_struct *, void *), void *arg)
1168 {
1169 	struct mem_cgroup *iter;
1170 	int ret = 0;
1171 
1172 	BUG_ON(mem_cgroup_is_root(memcg));
1173 
1174 	for_each_mem_cgroup_tree(iter, memcg) {
1175 		struct css_task_iter it;
1176 		struct task_struct *task;
1177 
1178 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1179 		while (!ret && (task = css_task_iter_next(&it))) {
1180 			ret = fn(task, arg);
1181 			/* Avoid potential softlockup warning */
1182 			cond_resched();
1183 		}
1184 		css_task_iter_end(&it);
1185 		if (ret) {
1186 			mem_cgroup_iter_break(memcg, iter);
1187 			break;
1188 		}
1189 	}
1190 }
1191 
1192 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1193 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1194 {
1195 	struct mem_cgroup *memcg;
1196 
1197 	if (mem_cgroup_disabled())
1198 		return;
1199 
1200 	memcg = folio_memcg(folio);
1201 
1202 	if (!memcg)
1203 		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1204 	else
1205 		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1206 }
1207 #endif
1208 
1209 /**
1210  * folio_lruvec_lock - Lock the lruvec for a folio.
1211  * @folio: Pointer to the folio.
1212  *
1213  * These functions are safe to use under any of the following conditions:
1214  * - folio locked
1215  * - folio_test_lru false
1216  * - folio frozen (refcount of 0)
1217  *
1218  * Return: The lruvec this folio is on with its lock held.
1219  */
folio_lruvec_lock(struct folio * folio)1220 struct lruvec *folio_lruvec_lock(struct folio *folio)
1221 {
1222 	struct lruvec *lruvec = folio_lruvec(folio);
1223 
1224 	spin_lock(&lruvec->lru_lock);
1225 	lruvec_memcg_debug(lruvec, folio);
1226 
1227 	return lruvec;
1228 }
1229 
1230 /**
1231  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1232  * @folio: Pointer to the folio.
1233  *
1234  * These functions are safe to use under any of the following conditions:
1235  * - folio locked
1236  * - folio_test_lru false
1237  * - folio frozen (refcount of 0)
1238  *
1239  * Return: The lruvec this folio is on with its lock held and interrupts
1240  * disabled.
1241  */
folio_lruvec_lock_irq(struct folio * folio)1242 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1243 {
1244 	struct lruvec *lruvec = folio_lruvec(folio);
1245 
1246 	spin_lock_irq(&lruvec->lru_lock);
1247 	lruvec_memcg_debug(lruvec, folio);
1248 
1249 	return lruvec;
1250 }
1251 
1252 /**
1253  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1254  * @folio: Pointer to the folio.
1255  * @flags: Pointer to irqsave flags.
1256  *
1257  * These functions are safe to use under any of the following conditions:
1258  * - folio locked
1259  * - folio_test_lru false
1260  * - folio frozen (refcount of 0)
1261  *
1262  * Return: The lruvec this folio is on with its lock held and interrupts
1263  * disabled.
1264  */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1265 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1266 		unsigned long *flags)
1267 {
1268 	struct lruvec *lruvec = folio_lruvec(folio);
1269 
1270 	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1271 	lruvec_memcg_debug(lruvec, folio);
1272 
1273 	return lruvec;
1274 }
1275 
1276 /**
1277  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1278  * @lruvec: mem_cgroup per zone lru vector
1279  * @lru: index of lru list the page is sitting on
1280  * @zid: zone id of the accounted pages
1281  * @nr_pages: positive when adding or negative when removing
1282  *
1283  * This function must be called under lru_lock, just before a page is added
1284  * to or just after a page is removed from an lru list.
1285  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1286 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1287 				int zid, int nr_pages)
1288 {
1289 	struct mem_cgroup_per_node *mz;
1290 	unsigned long *lru_size;
1291 	long size;
1292 
1293 	if (mem_cgroup_disabled())
1294 		return;
1295 
1296 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1297 	lru_size = &mz->lru_zone_size[zid][lru];
1298 
1299 	if (nr_pages < 0)
1300 		*lru_size += nr_pages;
1301 
1302 	size = *lru_size;
1303 	if (WARN_ONCE(size < 0,
1304 		"%s(%p, %d, %d): lru_size %ld\n",
1305 		__func__, lruvec, lru, nr_pages, size)) {
1306 		VM_BUG_ON(1);
1307 		*lru_size = 0;
1308 	}
1309 
1310 	if (nr_pages > 0)
1311 		*lru_size += nr_pages;
1312 }
1313 
1314 /**
1315  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1316  * @memcg: the memory cgroup
1317  *
1318  * Returns the maximum amount of memory @mem can be charged with, in
1319  * pages.
1320  */
mem_cgroup_margin(struct mem_cgroup * memcg)1321 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1322 {
1323 	unsigned long margin = 0;
1324 	unsigned long count;
1325 	unsigned long limit;
1326 
1327 	count = page_counter_read(&memcg->memory);
1328 	limit = READ_ONCE(memcg->memory.max);
1329 	if (count < limit)
1330 		margin = limit - count;
1331 
1332 	if (do_memsw_account()) {
1333 		count = page_counter_read(&memcg->memsw);
1334 		limit = READ_ONCE(memcg->memsw.max);
1335 		if (count < limit)
1336 			margin = min(margin, limit - count);
1337 		else
1338 			margin = 0;
1339 	}
1340 
1341 	return margin;
1342 }
1343 
1344 struct memory_stat {
1345 	const char *name;
1346 	unsigned int idx;
1347 };
1348 
1349 static const struct memory_stat memory_stats[] = {
1350 	{ "anon",			NR_ANON_MAPPED			},
1351 	{ "file",			NR_FILE_PAGES			},
1352 	{ "kernel",			MEMCG_KMEM			},
1353 	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1354 	{ "pagetables",			NR_PAGETABLE			},
1355 	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1356 	{ "percpu",			MEMCG_PERCPU_B			},
1357 	{ "sock",			MEMCG_SOCK			},
1358 	{ "vmalloc",			MEMCG_VMALLOC			},
1359 	{ "shmem",			NR_SHMEM			},
1360 #ifdef CONFIG_ZSWAP
1361 	{ "zswap",			MEMCG_ZSWAP_B			},
1362 	{ "zswapped",			MEMCG_ZSWAPPED			},
1363 #endif
1364 	{ "file_mapped",		NR_FILE_MAPPED			},
1365 	{ "file_dirty",			NR_FILE_DIRTY			},
1366 	{ "file_writeback",		NR_WRITEBACK			},
1367 #ifdef CONFIG_SWAP
1368 	{ "swapcached",			NR_SWAPCACHE			},
1369 #endif
1370 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1371 	{ "anon_thp",			NR_ANON_THPS			},
1372 	{ "file_thp",			NR_FILE_THPS			},
1373 	{ "shmem_thp",			NR_SHMEM_THPS			},
1374 #endif
1375 	{ "inactive_anon",		NR_INACTIVE_ANON		},
1376 	{ "active_anon",		NR_ACTIVE_ANON			},
1377 	{ "inactive_file",		NR_INACTIVE_FILE		},
1378 	{ "active_file",		NR_ACTIVE_FILE			},
1379 	{ "unevictable",		NR_UNEVICTABLE			},
1380 	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1381 	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1382 #ifdef CONFIG_HUGETLB_PAGE
1383 	{ "hugetlb",			NR_HUGETLB			},
1384 #endif
1385 
1386 	/* The memory events */
1387 	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1388 	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1389 	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1390 	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1391 	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1392 	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1393 	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1394 
1395 	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1396 	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1397 	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1398 	{ "pgdemote_proactive",		PGDEMOTE_PROACTIVE	},
1399 #ifdef CONFIG_NUMA_BALANCING
1400 	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1401 #endif
1402 };
1403 
1404 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1405 static int memcg_page_state_unit(int item)
1406 {
1407 	switch (item) {
1408 	case MEMCG_PERCPU_B:
1409 	case MEMCG_ZSWAP_B:
1410 	case NR_SLAB_RECLAIMABLE_B:
1411 	case NR_SLAB_UNRECLAIMABLE_B:
1412 		return 1;
1413 	case NR_KERNEL_STACK_KB:
1414 		return SZ_1K;
1415 	default:
1416 		return PAGE_SIZE;
1417 	}
1418 }
1419 
1420 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1421 static int memcg_page_state_output_unit(int item)
1422 {
1423 	/*
1424 	 * Workingset state is actually in pages, but we export it to userspace
1425 	 * as a scalar count of events, so special case it here.
1426 	 *
1427 	 * Demotion and promotion activities are exported in pages, consistent
1428 	 * with their global counterparts.
1429 	 */
1430 	switch (item) {
1431 	case WORKINGSET_REFAULT_ANON:
1432 	case WORKINGSET_REFAULT_FILE:
1433 	case WORKINGSET_ACTIVATE_ANON:
1434 	case WORKINGSET_ACTIVATE_FILE:
1435 	case WORKINGSET_RESTORE_ANON:
1436 	case WORKINGSET_RESTORE_FILE:
1437 	case WORKINGSET_NODERECLAIM:
1438 	case PGDEMOTE_KSWAPD:
1439 	case PGDEMOTE_DIRECT:
1440 	case PGDEMOTE_KHUGEPAGED:
1441 	case PGDEMOTE_PROACTIVE:
1442 #ifdef CONFIG_NUMA_BALANCING
1443 	case PGPROMOTE_SUCCESS:
1444 #endif
1445 		return 1;
1446 	default:
1447 		return memcg_page_state_unit(item);
1448 	}
1449 }
1450 
memcg_page_state_output(struct mem_cgroup * memcg,int item)1451 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1452 {
1453 	return memcg_page_state(memcg, item) *
1454 		memcg_page_state_output_unit(item);
1455 }
1456 
1457 #ifdef CONFIG_MEMCG_V1
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1458 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1459 {
1460 	return memcg_page_state_local(memcg, item) *
1461 		memcg_page_state_output_unit(item);
1462 }
1463 #endif
1464 
1465 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1466 static bool memcg_accounts_hugetlb(void)
1467 {
1468 	return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1469 }
1470 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1471 static bool memcg_accounts_hugetlb(void)
1472 {
1473 	return false;
1474 }
1475 #endif /* CONFIG_HUGETLB_PAGE */
1476 
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1477 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1478 {
1479 	int i;
1480 
1481 	/*
1482 	 * Provide statistics on the state of the memory subsystem as
1483 	 * well as cumulative event counters that show past behavior.
1484 	 *
1485 	 * This list is ordered following a combination of these gradients:
1486 	 * 1) generic big picture -> specifics and details
1487 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1488 	 *
1489 	 * Current memory state:
1490 	 */
1491 	mem_cgroup_flush_stats(memcg);
1492 
1493 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1494 		u64 size;
1495 
1496 #ifdef CONFIG_HUGETLB_PAGE
1497 		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1498 			!memcg_accounts_hugetlb())
1499 			continue;
1500 #endif
1501 		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1502 		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1503 
1504 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1505 			size += memcg_page_state_output(memcg,
1506 							NR_SLAB_RECLAIMABLE_B);
1507 			seq_buf_printf(s, "slab %llu\n", size);
1508 		}
1509 	}
1510 
1511 	/* Accumulated memory events */
1512 	seq_buf_printf(s, "pgscan %lu\n",
1513 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1514 		       memcg_events(memcg, PGSCAN_DIRECT) +
1515 		       memcg_events(memcg, PGSCAN_PROACTIVE) +
1516 		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1517 	seq_buf_printf(s, "pgsteal %lu\n",
1518 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1519 		       memcg_events(memcg, PGSTEAL_DIRECT) +
1520 		       memcg_events(memcg, PGSTEAL_PROACTIVE) +
1521 		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1522 
1523 	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1524 #ifdef CONFIG_MEMCG_V1
1525 		if (memcg_vm_event_stat[i] == PGPGIN ||
1526 		    memcg_vm_event_stat[i] == PGPGOUT)
1527 			continue;
1528 #endif
1529 		seq_buf_printf(s, "%s %lu\n",
1530 			       vm_event_name(memcg_vm_event_stat[i]),
1531 			       memcg_events(memcg, memcg_vm_event_stat[i]));
1532 	}
1533 }
1534 
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1535 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1536 {
1537 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1538 		memcg_stat_format(memcg, s);
1539 	else
1540 		memcg1_stat_format(memcg, s);
1541 	if (seq_buf_has_overflowed(s))
1542 		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1543 }
1544 
1545 /**
1546  * mem_cgroup_print_oom_context: Print OOM information relevant to
1547  * memory controller.
1548  * @memcg: The memory cgroup that went over limit
1549  * @p: Task that is going to be killed
1550  *
1551  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1552  * enabled
1553  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1554 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1555 {
1556 	rcu_read_lock();
1557 
1558 	if (memcg) {
1559 		pr_cont(",oom_memcg=");
1560 		pr_cont_cgroup_path(memcg->css.cgroup);
1561 	} else
1562 		pr_cont(",global_oom");
1563 	if (p) {
1564 		pr_cont(",task_memcg=");
1565 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1566 	}
1567 	rcu_read_unlock();
1568 }
1569 
1570 /**
1571  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1572  * memory controller.
1573  * @memcg: The memory cgroup that went over limit
1574  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1575 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1576 {
1577 	/* Use static buffer, for the caller is holding oom_lock. */
1578 	static char buf[SEQ_BUF_SIZE];
1579 	struct seq_buf s;
1580 	unsigned long memory_failcnt;
1581 
1582 	lockdep_assert_held(&oom_lock);
1583 
1584 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1585 		memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1586 	else
1587 		memory_failcnt = memcg->memory.failcnt;
1588 
1589 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1590 		K((u64)page_counter_read(&memcg->memory)),
1591 		K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1592 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1593 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1594 			K((u64)page_counter_read(&memcg->swap)),
1595 			K((u64)READ_ONCE(memcg->swap.max)),
1596 			atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1597 #ifdef CONFIG_MEMCG_V1
1598 	else {
1599 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1600 			K((u64)page_counter_read(&memcg->memsw)),
1601 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1602 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1603 			K((u64)page_counter_read(&memcg->kmem)),
1604 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1605 	}
1606 #endif
1607 
1608 	pr_info("Memory cgroup stats for ");
1609 	pr_cont_cgroup_path(memcg->css.cgroup);
1610 	pr_cont(":");
1611 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1612 	memory_stat_format(memcg, &s);
1613 	seq_buf_do_printk(&s, KERN_INFO);
1614 }
1615 
1616 /*
1617  * Return the memory (and swap, if configured) limit for a memcg.
1618  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1619 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1620 {
1621 	unsigned long max = READ_ONCE(memcg->memory.max);
1622 
1623 	if (do_memsw_account()) {
1624 		if (mem_cgroup_swappiness(memcg)) {
1625 			/* Calculate swap excess capacity from memsw limit */
1626 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1627 
1628 			max += min(swap, (unsigned long)total_swap_pages);
1629 		}
1630 	} else {
1631 		if (mem_cgroup_swappiness(memcg))
1632 			max += min(READ_ONCE(memcg->swap.max),
1633 				   (unsigned long)total_swap_pages);
1634 	}
1635 	return max;
1636 }
1637 
mem_cgroup_size(struct mem_cgroup * memcg)1638 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1639 {
1640 	return page_counter_read(&memcg->memory);
1641 }
1642 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1643 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1644 				     int order)
1645 {
1646 	struct oom_control oc = {
1647 		.zonelist = NULL,
1648 		.nodemask = NULL,
1649 		.memcg = memcg,
1650 		.gfp_mask = gfp_mask,
1651 		.order = order,
1652 	};
1653 	bool ret = true;
1654 
1655 	if (mutex_lock_killable(&oom_lock))
1656 		return true;
1657 
1658 	if (mem_cgroup_margin(memcg) >= (1 << order))
1659 		goto unlock;
1660 
1661 	/*
1662 	 * A few threads which were not waiting at mutex_lock_killable() can
1663 	 * fail to bail out. Therefore, check again after holding oom_lock.
1664 	 */
1665 	ret = task_is_dying() || out_of_memory(&oc);
1666 
1667 unlock:
1668 	mutex_unlock(&oom_lock);
1669 	return ret;
1670 }
1671 
1672 /*
1673  * Returns true if successfully killed one or more processes. Though in some
1674  * corner cases it can return true even without killing any process.
1675  */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1676 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1677 {
1678 	bool locked, ret;
1679 
1680 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1681 		return false;
1682 
1683 	memcg_memory_event(memcg, MEMCG_OOM);
1684 
1685 	if (!memcg1_oom_prepare(memcg, &locked))
1686 		return false;
1687 
1688 	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1689 
1690 	memcg1_oom_finish(memcg, locked);
1691 
1692 	return ret;
1693 }
1694 
1695 /**
1696  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1697  * @victim: task to be killed by the OOM killer
1698  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1699  *
1700  * Returns a pointer to a memory cgroup, which has to be cleaned up
1701  * by killing all belonging OOM-killable tasks.
1702  *
1703  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1704  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1705 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1706 					    struct mem_cgroup *oom_domain)
1707 {
1708 	struct mem_cgroup *oom_group = NULL;
1709 	struct mem_cgroup *memcg;
1710 
1711 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1712 		return NULL;
1713 
1714 	if (!oom_domain)
1715 		oom_domain = root_mem_cgroup;
1716 
1717 	rcu_read_lock();
1718 
1719 	memcg = mem_cgroup_from_task(victim);
1720 	if (mem_cgroup_is_root(memcg))
1721 		goto out;
1722 
1723 	/*
1724 	 * If the victim task has been asynchronously moved to a different
1725 	 * memory cgroup, we might end up killing tasks outside oom_domain.
1726 	 * In this case it's better to ignore memory.group.oom.
1727 	 */
1728 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1729 		goto out;
1730 
1731 	/*
1732 	 * Traverse the memory cgroup hierarchy from the victim task's
1733 	 * cgroup up to the OOMing cgroup (or root) to find the
1734 	 * highest-level memory cgroup with oom.group set.
1735 	 */
1736 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1737 		if (READ_ONCE(memcg->oom_group))
1738 			oom_group = memcg;
1739 
1740 		if (memcg == oom_domain)
1741 			break;
1742 	}
1743 
1744 	if (oom_group)
1745 		css_get(&oom_group->css);
1746 out:
1747 	rcu_read_unlock();
1748 
1749 	return oom_group;
1750 }
1751 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1752 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1753 {
1754 	pr_info("Tasks in ");
1755 	pr_cont_cgroup_path(memcg->css.cgroup);
1756 	pr_cont(" are going to be killed due to memory.oom.group set\n");
1757 }
1758 
1759 struct memcg_stock_pcp {
1760 	local_trylock_t stock_lock;
1761 	struct mem_cgroup *cached; /* this never be root cgroup */
1762 	unsigned int nr_pages;
1763 
1764 	struct obj_cgroup *cached_objcg;
1765 	struct pglist_data *cached_pgdat;
1766 	unsigned int nr_bytes;
1767 	int nr_slab_reclaimable_b;
1768 	int nr_slab_unreclaimable_b;
1769 
1770 	struct work_struct work;
1771 	unsigned long flags;
1772 #define FLUSHING_CACHED_CHARGE	0
1773 };
1774 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1775 	.stock_lock = INIT_LOCAL_TRYLOCK(stock_lock),
1776 };
1777 static DEFINE_MUTEX(percpu_charge_mutex);
1778 
1779 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1780 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1781 				     struct mem_cgroup *root_memcg);
1782 
1783 /**
1784  * consume_stock: Try to consume stocked charge on this cpu.
1785  * @memcg: memcg to consume from.
1786  * @nr_pages: how many pages to charge.
1787  * @gfp_mask: allocation mask.
1788  *
1789  * The charges will only happen if @memcg matches the current cpu's memcg
1790  * stock, and at least @nr_pages are available in that stock.  Failure to
1791  * service an allocation will refill the stock.
1792  *
1793  * returns true if successful, false otherwise.
1794  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1795 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
1796 			  gfp_t gfp_mask)
1797 {
1798 	struct memcg_stock_pcp *stock;
1799 	unsigned int stock_pages;
1800 	unsigned long flags;
1801 	bool ret = false;
1802 
1803 	if (nr_pages > MEMCG_CHARGE_BATCH)
1804 		return ret;
1805 
1806 	if (gfpflags_allow_spinning(gfp_mask))
1807 		local_lock_irqsave(&memcg_stock.stock_lock, flags);
1808 	else if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
1809 		return ret;
1810 
1811 	stock = this_cpu_ptr(&memcg_stock);
1812 	stock_pages = READ_ONCE(stock->nr_pages);
1813 	if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1814 		WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1815 		ret = true;
1816 	}
1817 
1818 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1819 
1820 	return ret;
1821 }
1822 
1823 /*
1824  * Returns stocks cached in percpu and reset cached information.
1825  */
drain_stock(struct memcg_stock_pcp * stock)1826 static void drain_stock(struct memcg_stock_pcp *stock)
1827 {
1828 	unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1829 	struct mem_cgroup *old = READ_ONCE(stock->cached);
1830 
1831 	if (!old)
1832 		return;
1833 
1834 	if (stock_pages) {
1835 		page_counter_uncharge(&old->memory, stock_pages);
1836 		if (do_memsw_account())
1837 			page_counter_uncharge(&old->memsw, stock_pages);
1838 
1839 		WRITE_ONCE(stock->nr_pages, 0);
1840 	}
1841 
1842 	css_put(&old->css);
1843 	WRITE_ONCE(stock->cached, NULL);
1844 }
1845 
drain_local_stock(struct work_struct * dummy)1846 static void drain_local_stock(struct work_struct *dummy)
1847 {
1848 	struct memcg_stock_pcp *stock;
1849 	struct obj_cgroup *old = NULL;
1850 	unsigned long flags;
1851 
1852 	/*
1853 	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1854 	 * drain_stock races is that we always operate on local CPU stock
1855 	 * here with IRQ disabled
1856 	 */
1857 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1858 
1859 	stock = this_cpu_ptr(&memcg_stock);
1860 	old = drain_obj_stock(stock);
1861 	drain_stock(stock);
1862 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1863 
1864 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1865 	obj_cgroup_put(old);
1866 }
1867 
1868 /*
1869  * Cache charges(val) to local per_cpu area.
1870  * This will be consumed by consume_stock() function, later.
1871  */
__refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1872 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1873 {
1874 	struct memcg_stock_pcp *stock;
1875 	unsigned int stock_pages;
1876 
1877 	stock = this_cpu_ptr(&memcg_stock);
1878 	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1879 		drain_stock(stock);
1880 		css_get(&memcg->css);
1881 		WRITE_ONCE(stock->cached, memcg);
1882 	}
1883 	stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1884 	WRITE_ONCE(stock->nr_pages, stock_pages);
1885 
1886 	if (stock_pages > MEMCG_CHARGE_BATCH)
1887 		drain_stock(stock);
1888 }
1889 
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1890 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1891 {
1892 	unsigned long flags;
1893 
1894 	if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
1895 		/*
1896 		 * In case of unlikely failure to lock percpu stock_lock
1897 		 * uncharge memcg directly.
1898 		 */
1899 		if (mem_cgroup_is_root(memcg))
1900 			return;
1901 		page_counter_uncharge(&memcg->memory, nr_pages);
1902 		if (do_memsw_account())
1903 			page_counter_uncharge(&memcg->memsw, nr_pages);
1904 		return;
1905 	}
1906 	__refill_stock(memcg, nr_pages);
1907 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1908 }
1909 
1910 /*
1911  * Drains all per-CPU charge caches for given root_memcg resp. subtree
1912  * of the hierarchy under it.
1913  */
drain_all_stock(struct mem_cgroup * root_memcg)1914 void drain_all_stock(struct mem_cgroup *root_memcg)
1915 {
1916 	int cpu, curcpu;
1917 
1918 	/* If someone's already draining, avoid adding running more workers. */
1919 	if (!mutex_trylock(&percpu_charge_mutex))
1920 		return;
1921 	/*
1922 	 * Notify other cpus that system-wide "drain" is running
1923 	 * We do not care about races with the cpu hotplug because cpu down
1924 	 * as well as workers from this path always operate on the local
1925 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1926 	 */
1927 	migrate_disable();
1928 	curcpu = smp_processor_id();
1929 	for_each_online_cpu(cpu) {
1930 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1931 		struct mem_cgroup *memcg;
1932 		bool flush = false;
1933 
1934 		rcu_read_lock();
1935 		memcg = READ_ONCE(stock->cached);
1936 		if (memcg && READ_ONCE(stock->nr_pages) &&
1937 		    mem_cgroup_is_descendant(memcg, root_memcg))
1938 			flush = true;
1939 		else if (obj_stock_flush_required(stock, root_memcg))
1940 			flush = true;
1941 		rcu_read_unlock();
1942 
1943 		if (flush &&
1944 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1945 			if (cpu == curcpu)
1946 				drain_local_stock(&stock->work);
1947 			else if (!cpu_is_isolated(cpu))
1948 				schedule_work_on(cpu, &stock->work);
1949 		}
1950 	}
1951 	migrate_enable();
1952 	mutex_unlock(&percpu_charge_mutex);
1953 }
1954 
memcg_hotplug_cpu_dead(unsigned int cpu)1955 static int memcg_hotplug_cpu_dead(unsigned int cpu)
1956 {
1957 	struct memcg_stock_pcp *stock;
1958 	struct obj_cgroup *old;
1959 	unsigned long flags;
1960 
1961 	stock = &per_cpu(memcg_stock, cpu);
1962 
1963 	/* drain_obj_stock requires stock_lock */
1964 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1965 	old = drain_obj_stock(stock);
1966 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1967 
1968 	drain_stock(stock);
1969 	obj_cgroup_put(old);
1970 
1971 	return 0;
1972 }
1973 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)1974 static unsigned long reclaim_high(struct mem_cgroup *memcg,
1975 				  unsigned int nr_pages,
1976 				  gfp_t gfp_mask)
1977 {
1978 	unsigned long nr_reclaimed = 0;
1979 
1980 	do {
1981 		unsigned long pflags;
1982 
1983 		if (page_counter_read(&memcg->memory) <=
1984 		    READ_ONCE(memcg->memory.high))
1985 			continue;
1986 
1987 		memcg_memory_event(memcg, MEMCG_HIGH);
1988 
1989 		psi_memstall_enter(&pflags);
1990 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1991 							gfp_mask,
1992 							MEMCG_RECLAIM_MAY_SWAP,
1993 							NULL);
1994 		psi_memstall_leave(&pflags);
1995 	} while ((memcg = parent_mem_cgroup(memcg)) &&
1996 		 !mem_cgroup_is_root(memcg));
1997 
1998 	return nr_reclaimed;
1999 }
2000 
high_work_func(struct work_struct * work)2001 static void high_work_func(struct work_struct *work)
2002 {
2003 	struct mem_cgroup *memcg;
2004 
2005 	memcg = container_of(work, struct mem_cgroup, high_work);
2006 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2007 }
2008 
2009 /*
2010  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2011  * enough to still cause a significant slowdown in most cases, while still
2012  * allowing diagnostics and tracing to proceed without becoming stuck.
2013  */
2014 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2015 
2016 /*
2017  * When calculating the delay, we use these either side of the exponentiation to
2018  * maintain precision and scale to a reasonable number of jiffies (see the table
2019  * below.
2020  *
2021  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2022  *   overage ratio to a delay.
2023  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2024  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2025  *   to produce a reasonable delay curve.
2026  *
2027  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2028  * reasonable delay curve compared to precision-adjusted overage, not
2029  * penalising heavily at first, but still making sure that growth beyond the
2030  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2031  * example, with a high of 100 megabytes:
2032  *
2033  *  +-------+------------------------+
2034  *  | usage | time to allocate in ms |
2035  *  +-------+------------------------+
2036  *  | 100M  |                      0 |
2037  *  | 101M  |                      6 |
2038  *  | 102M  |                     25 |
2039  *  | 103M  |                     57 |
2040  *  | 104M  |                    102 |
2041  *  | 105M  |                    159 |
2042  *  | 106M  |                    230 |
2043  *  | 107M  |                    313 |
2044  *  | 108M  |                    409 |
2045  *  | 109M  |                    518 |
2046  *  | 110M  |                    639 |
2047  *  | 111M  |                    774 |
2048  *  | 112M  |                    921 |
2049  *  | 113M  |                   1081 |
2050  *  | 114M  |                   1254 |
2051  *  | 115M  |                   1439 |
2052  *  | 116M  |                   1638 |
2053  *  | 117M  |                   1849 |
2054  *  | 118M  |                   2000 |
2055  *  | 119M  |                   2000 |
2056  *  | 120M  |                   2000 |
2057  *  +-------+------------------------+
2058  */
2059  #define MEMCG_DELAY_PRECISION_SHIFT 20
2060  #define MEMCG_DELAY_SCALING_SHIFT 14
2061 
calculate_overage(unsigned long usage,unsigned long high)2062 static u64 calculate_overage(unsigned long usage, unsigned long high)
2063 {
2064 	u64 overage;
2065 
2066 	if (usage <= high)
2067 		return 0;
2068 
2069 	/*
2070 	 * Prevent division by 0 in overage calculation by acting as if
2071 	 * it was a threshold of 1 page
2072 	 */
2073 	high = max(high, 1UL);
2074 
2075 	overage = usage - high;
2076 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2077 	return div64_u64(overage, high);
2078 }
2079 
mem_find_max_overage(struct mem_cgroup * memcg)2080 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2081 {
2082 	u64 overage, max_overage = 0;
2083 
2084 	do {
2085 		overage = calculate_overage(page_counter_read(&memcg->memory),
2086 					    READ_ONCE(memcg->memory.high));
2087 		max_overage = max(overage, max_overage);
2088 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2089 		 !mem_cgroup_is_root(memcg));
2090 
2091 	return max_overage;
2092 }
2093 
swap_find_max_overage(struct mem_cgroup * memcg)2094 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2095 {
2096 	u64 overage, max_overage = 0;
2097 
2098 	do {
2099 		overage = calculate_overage(page_counter_read(&memcg->swap),
2100 					    READ_ONCE(memcg->swap.high));
2101 		if (overage)
2102 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2103 		max_overage = max(overage, max_overage);
2104 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2105 		 !mem_cgroup_is_root(memcg));
2106 
2107 	return max_overage;
2108 }
2109 
2110 /*
2111  * Get the number of jiffies that we should penalise a mischievous cgroup which
2112  * is exceeding its memory.high by checking both it and its ancestors.
2113  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2114 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2115 					  unsigned int nr_pages,
2116 					  u64 max_overage)
2117 {
2118 	unsigned long penalty_jiffies;
2119 
2120 	if (!max_overage)
2121 		return 0;
2122 
2123 	/*
2124 	 * We use overage compared to memory.high to calculate the number of
2125 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2126 	 * fairly lenient on small overages, and increasingly harsh when the
2127 	 * memcg in question makes it clear that it has no intention of stopping
2128 	 * its crazy behaviour, so we exponentially increase the delay based on
2129 	 * overage amount.
2130 	 */
2131 	penalty_jiffies = max_overage * max_overage * HZ;
2132 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2133 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2134 
2135 	/*
2136 	 * Factor in the task's own contribution to the overage, such that four
2137 	 * N-sized allocations are throttled approximately the same as one
2138 	 * 4N-sized allocation.
2139 	 *
2140 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2141 	 * larger the current charge patch is than that.
2142 	 */
2143 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2144 }
2145 
2146 /*
2147  * Reclaims memory over the high limit. Called directly from
2148  * try_charge() (context permitting), as well as from the userland
2149  * return path where reclaim is always able to block.
2150  */
mem_cgroup_handle_over_high(gfp_t gfp_mask)2151 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2152 {
2153 	unsigned long penalty_jiffies;
2154 	unsigned long pflags;
2155 	unsigned long nr_reclaimed;
2156 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2157 	int nr_retries = MAX_RECLAIM_RETRIES;
2158 	struct mem_cgroup *memcg;
2159 	bool in_retry = false;
2160 
2161 	if (likely(!nr_pages))
2162 		return;
2163 
2164 	memcg = get_mem_cgroup_from_mm(current->mm);
2165 	current->memcg_nr_pages_over_high = 0;
2166 
2167 retry_reclaim:
2168 	/*
2169 	 * Bail if the task is already exiting. Unlike memory.max,
2170 	 * memory.high enforcement isn't as strict, and there is no
2171 	 * OOM killer involved, which means the excess could already
2172 	 * be much bigger (and still growing) than it could for
2173 	 * memory.max; the dying task could get stuck in fruitless
2174 	 * reclaim for a long time, which isn't desirable.
2175 	 */
2176 	if (task_is_dying())
2177 		goto out;
2178 
2179 	/*
2180 	 * The allocating task should reclaim at least the batch size, but for
2181 	 * subsequent retries we only want to do what's necessary to prevent oom
2182 	 * or breaching resource isolation.
2183 	 *
2184 	 * This is distinct from memory.max or page allocator behaviour because
2185 	 * memory.high is currently batched, whereas memory.max and the page
2186 	 * allocator run every time an allocation is made.
2187 	 */
2188 	nr_reclaimed = reclaim_high(memcg,
2189 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2190 				    gfp_mask);
2191 
2192 	/*
2193 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2194 	 * allocators proactively to slow down excessive growth.
2195 	 */
2196 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2197 					       mem_find_max_overage(memcg));
2198 
2199 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2200 						swap_find_max_overage(memcg));
2201 
2202 	/*
2203 	 * Clamp the max delay per usermode return so as to still keep the
2204 	 * application moving forwards and also permit diagnostics, albeit
2205 	 * extremely slowly.
2206 	 */
2207 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2208 
2209 	/*
2210 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2211 	 * that it's not even worth doing, in an attempt to be nice to those who
2212 	 * go only a small amount over their memory.high value and maybe haven't
2213 	 * been aggressively reclaimed enough yet.
2214 	 */
2215 	if (penalty_jiffies <= HZ / 100)
2216 		goto out;
2217 
2218 	/*
2219 	 * If reclaim is making forward progress but we're still over
2220 	 * memory.high, we want to encourage that rather than doing allocator
2221 	 * throttling.
2222 	 */
2223 	if (nr_reclaimed || nr_retries--) {
2224 		in_retry = true;
2225 		goto retry_reclaim;
2226 	}
2227 
2228 	/*
2229 	 * Reclaim didn't manage to push usage below the limit, slow
2230 	 * this allocating task down.
2231 	 *
2232 	 * If we exit early, we're guaranteed to die (since
2233 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2234 	 * need to account for any ill-begotten jiffies to pay them off later.
2235 	 */
2236 	psi_memstall_enter(&pflags);
2237 	schedule_timeout_killable(penalty_jiffies);
2238 	psi_memstall_leave(&pflags);
2239 
2240 out:
2241 	css_put(&memcg->css);
2242 }
2243 
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2244 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2245 			    unsigned int nr_pages)
2246 {
2247 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2248 	int nr_retries = MAX_RECLAIM_RETRIES;
2249 	struct mem_cgroup *mem_over_limit;
2250 	struct page_counter *counter;
2251 	unsigned long nr_reclaimed;
2252 	bool passed_oom = false;
2253 	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2254 	bool drained = false;
2255 	bool raised_max_event = false;
2256 	unsigned long pflags;
2257 
2258 retry:
2259 	if (consume_stock(memcg, nr_pages, gfp_mask))
2260 		return 0;
2261 
2262 	if (!gfpflags_allow_spinning(gfp_mask))
2263 		/* Avoid the refill and flush of the older stock */
2264 		batch = nr_pages;
2265 
2266 	if (!do_memsw_account() ||
2267 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2268 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2269 			goto done_restock;
2270 		if (do_memsw_account())
2271 			page_counter_uncharge(&memcg->memsw, batch);
2272 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2273 	} else {
2274 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2275 		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2276 	}
2277 
2278 	if (batch > nr_pages) {
2279 		batch = nr_pages;
2280 		goto retry;
2281 	}
2282 
2283 	/*
2284 	 * Prevent unbounded recursion when reclaim operations need to
2285 	 * allocate memory. This might exceed the limits temporarily,
2286 	 * but we prefer facilitating memory reclaim and getting back
2287 	 * under the limit over triggering OOM kills in these cases.
2288 	 */
2289 	if (unlikely(current->flags & PF_MEMALLOC))
2290 		goto force;
2291 
2292 	if (unlikely(task_in_memcg_oom(current)))
2293 		goto nomem;
2294 
2295 	if (!gfpflags_allow_blocking(gfp_mask))
2296 		goto nomem;
2297 
2298 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2299 	raised_max_event = true;
2300 
2301 	psi_memstall_enter(&pflags);
2302 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2303 						    gfp_mask, reclaim_options, NULL);
2304 	psi_memstall_leave(&pflags);
2305 
2306 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2307 		goto retry;
2308 
2309 	if (!drained) {
2310 		drain_all_stock(mem_over_limit);
2311 		drained = true;
2312 		goto retry;
2313 	}
2314 
2315 	if (gfp_mask & __GFP_NORETRY)
2316 		goto nomem;
2317 	/*
2318 	 * Even though the limit is exceeded at this point, reclaim
2319 	 * may have been able to free some pages.  Retry the charge
2320 	 * before killing the task.
2321 	 *
2322 	 * Only for regular pages, though: huge pages are rather
2323 	 * unlikely to succeed so close to the limit, and we fall back
2324 	 * to regular pages anyway in case of failure.
2325 	 */
2326 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2327 		goto retry;
2328 
2329 	if (nr_retries--)
2330 		goto retry;
2331 
2332 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2333 		goto nomem;
2334 
2335 	/* Avoid endless loop for tasks bypassed by the oom killer */
2336 	if (passed_oom && task_is_dying())
2337 		goto nomem;
2338 
2339 	/*
2340 	 * keep retrying as long as the memcg oom killer is able to make
2341 	 * a forward progress or bypass the charge if the oom killer
2342 	 * couldn't make any progress.
2343 	 */
2344 	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2345 			   get_order(nr_pages * PAGE_SIZE))) {
2346 		passed_oom = true;
2347 		nr_retries = MAX_RECLAIM_RETRIES;
2348 		goto retry;
2349 	}
2350 nomem:
2351 	/*
2352 	 * Memcg doesn't have a dedicated reserve for atomic
2353 	 * allocations. But like the global atomic pool, we need to
2354 	 * put the burden of reclaim on regular allocation requests
2355 	 * and let these go through as privileged allocations.
2356 	 */
2357 	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2358 		return -ENOMEM;
2359 force:
2360 	/*
2361 	 * If the allocation has to be enforced, don't forget to raise
2362 	 * a MEMCG_MAX event.
2363 	 */
2364 	if (!raised_max_event)
2365 		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2366 
2367 	/*
2368 	 * The allocation either can't fail or will lead to more memory
2369 	 * being freed very soon.  Allow memory usage go over the limit
2370 	 * temporarily by force charging it.
2371 	 */
2372 	page_counter_charge(&memcg->memory, nr_pages);
2373 	if (do_memsw_account())
2374 		page_counter_charge(&memcg->memsw, nr_pages);
2375 
2376 	return 0;
2377 
2378 done_restock:
2379 	if (batch > nr_pages)
2380 		refill_stock(memcg, batch - nr_pages);
2381 
2382 	/*
2383 	 * If the hierarchy is above the normal consumption range, schedule
2384 	 * reclaim on returning to userland.  We can perform reclaim here
2385 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2386 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2387 	 * not recorded as it most likely matches current's and won't
2388 	 * change in the meantime.  As high limit is checked again before
2389 	 * reclaim, the cost of mismatch is negligible.
2390 	 */
2391 	do {
2392 		bool mem_high, swap_high;
2393 
2394 		mem_high = page_counter_read(&memcg->memory) >
2395 			READ_ONCE(memcg->memory.high);
2396 		swap_high = page_counter_read(&memcg->swap) >
2397 			READ_ONCE(memcg->swap.high);
2398 
2399 		/* Don't bother a random interrupted task */
2400 		if (!in_task()) {
2401 			if (mem_high) {
2402 				schedule_work(&memcg->high_work);
2403 				break;
2404 			}
2405 			continue;
2406 		}
2407 
2408 		if (mem_high || swap_high) {
2409 			/*
2410 			 * The allocating tasks in this cgroup will need to do
2411 			 * reclaim or be throttled to prevent further growth
2412 			 * of the memory or swap footprints.
2413 			 *
2414 			 * Target some best-effort fairness between the tasks,
2415 			 * and distribute reclaim work and delay penalties
2416 			 * based on how much each task is actually allocating.
2417 			 */
2418 			current->memcg_nr_pages_over_high += batch;
2419 			set_notify_resume(current);
2420 			break;
2421 		}
2422 	} while ((memcg = parent_mem_cgroup(memcg)));
2423 
2424 	/*
2425 	 * Reclaim is set up above to be called from the userland
2426 	 * return path. But also attempt synchronous reclaim to avoid
2427 	 * excessive overrun while the task is still inside the
2428 	 * kernel. If this is successful, the return path will see it
2429 	 * when it rechecks the overage and simply bail out.
2430 	 */
2431 	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2432 	    !(current->flags & PF_MEMALLOC) &&
2433 	    gfpflags_allow_blocking(gfp_mask))
2434 		mem_cgroup_handle_over_high(gfp_mask);
2435 	return 0;
2436 }
2437 
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2438 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2439 			     unsigned int nr_pages)
2440 {
2441 	if (mem_cgroup_is_root(memcg))
2442 		return 0;
2443 
2444 	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2445 }
2446 
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2447 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2448 {
2449 	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2450 	/*
2451 	 * Any of the following ensures page's memcg stability:
2452 	 *
2453 	 * - the page lock
2454 	 * - LRU isolation
2455 	 * - exclusive reference
2456 	 */
2457 	folio->memcg_data = (unsigned long)memcg;
2458 }
2459 
__mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2460 static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2461 				       struct pglist_data *pgdat,
2462 				       enum node_stat_item idx, int nr)
2463 {
2464 	struct mem_cgroup *memcg;
2465 	struct lruvec *lruvec;
2466 
2467 	rcu_read_lock();
2468 	memcg = obj_cgroup_memcg(objcg);
2469 	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2470 	__mod_memcg_lruvec_state(lruvec, idx, nr);
2471 	rcu_read_unlock();
2472 }
2473 
2474 static __always_inline
mem_cgroup_from_obj_folio(struct folio * folio,void * p)2475 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2476 {
2477 	/*
2478 	 * Slab objects are accounted individually, not per-page.
2479 	 * Memcg membership data for each individual object is saved in
2480 	 * slab->obj_exts.
2481 	 */
2482 	if (folio_test_slab(folio)) {
2483 		struct slabobj_ext *obj_exts;
2484 		struct slab *slab;
2485 		unsigned int off;
2486 
2487 		slab = folio_slab(folio);
2488 		obj_exts = slab_obj_exts(slab);
2489 		if (!obj_exts)
2490 			return NULL;
2491 
2492 		off = obj_to_index(slab->slab_cache, slab, p);
2493 		if (obj_exts[off].objcg)
2494 			return obj_cgroup_memcg(obj_exts[off].objcg);
2495 
2496 		return NULL;
2497 	}
2498 
2499 	/*
2500 	 * folio_memcg_check() is used here, because in theory we can encounter
2501 	 * a folio where the slab flag has been cleared already, but
2502 	 * slab->obj_exts has not been freed yet
2503 	 * folio_memcg_check() will guarantee that a proper memory
2504 	 * cgroup pointer or NULL will be returned.
2505 	 */
2506 	return folio_memcg_check(folio);
2507 }
2508 
2509 /*
2510  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2511  * It is not suitable for objects allocated using vmalloc().
2512  *
2513  * A passed kernel object must be a slab object or a generic kernel page.
2514  *
2515  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2516  * cgroup_mutex, etc.
2517  */
mem_cgroup_from_slab_obj(void * p)2518 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2519 {
2520 	if (mem_cgroup_disabled())
2521 		return NULL;
2522 
2523 	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2524 }
2525 
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2526 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2527 {
2528 	struct obj_cgroup *objcg = NULL;
2529 
2530 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2531 		objcg = rcu_dereference(memcg->objcg);
2532 		if (likely(objcg && obj_cgroup_tryget(objcg)))
2533 			break;
2534 		objcg = NULL;
2535 	}
2536 	return objcg;
2537 }
2538 
current_objcg_update(void)2539 static struct obj_cgroup *current_objcg_update(void)
2540 {
2541 	struct mem_cgroup *memcg;
2542 	struct obj_cgroup *old, *objcg = NULL;
2543 
2544 	do {
2545 		/* Atomically drop the update bit. */
2546 		old = xchg(&current->objcg, NULL);
2547 		if (old) {
2548 			old = (struct obj_cgroup *)
2549 				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2550 			obj_cgroup_put(old);
2551 
2552 			old = NULL;
2553 		}
2554 
2555 		/* If new objcg is NULL, no reason for the second atomic update. */
2556 		if (!current->mm || (current->flags & PF_KTHREAD))
2557 			return NULL;
2558 
2559 		/*
2560 		 * Release the objcg pointer from the previous iteration,
2561 		 * if try_cmpxcg() below fails.
2562 		 */
2563 		if (unlikely(objcg)) {
2564 			obj_cgroup_put(objcg);
2565 			objcg = NULL;
2566 		}
2567 
2568 		/*
2569 		 * Obtain the new objcg pointer. The current task can be
2570 		 * asynchronously moved to another memcg and the previous
2571 		 * memcg can be offlined. So let's get the memcg pointer
2572 		 * and try get a reference to objcg under a rcu read lock.
2573 		 */
2574 
2575 		rcu_read_lock();
2576 		memcg = mem_cgroup_from_task(current);
2577 		objcg = __get_obj_cgroup_from_memcg(memcg);
2578 		rcu_read_unlock();
2579 
2580 		/*
2581 		 * Try set up a new objcg pointer atomically. If it
2582 		 * fails, it means the update flag was set concurrently, so
2583 		 * the whole procedure should be repeated.
2584 		 */
2585 	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2586 
2587 	return objcg;
2588 }
2589 
current_obj_cgroup(void)2590 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2591 {
2592 	struct mem_cgroup *memcg;
2593 	struct obj_cgroup *objcg;
2594 
2595 	if (in_task()) {
2596 		memcg = current->active_memcg;
2597 		if (unlikely(memcg))
2598 			goto from_memcg;
2599 
2600 		objcg = READ_ONCE(current->objcg);
2601 		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2602 			objcg = current_objcg_update();
2603 		/*
2604 		 * Objcg reference is kept by the task, so it's safe
2605 		 * to use the objcg by the current task.
2606 		 */
2607 		return objcg;
2608 	}
2609 
2610 	memcg = this_cpu_read(int_active_memcg);
2611 	if (unlikely(memcg))
2612 		goto from_memcg;
2613 
2614 	return NULL;
2615 
2616 from_memcg:
2617 	objcg = NULL;
2618 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2619 		/*
2620 		 * Memcg pointer is protected by scope (see set_active_memcg())
2621 		 * and is pinning the corresponding objcg, so objcg can't go
2622 		 * away and can be used within the scope without any additional
2623 		 * protection.
2624 		 */
2625 		objcg = rcu_dereference_check(memcg->objcg, 1);
2626 		if (likely(objcg))
2627 			break;
2628 	}
2629 
2630 	return objcg;
2631 }
2632 
get_obj_cgroup_from_folio(struct folio * folio)2633 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2634 {
2635 	struct obj_cgroup *objcg;
2636 
2637 	if (!memcg_kmem_online())
2638 		return NULL;
2639 
2640 	if (folio_memcg_kmem(folio)) {
2641 		objcg = __folio_objcg(folio);
2642 		obj_cgroup_get(objcg);
2643 	} else {
2644 		struct mem_cgroup *memcg;
2645 
2646 		rcu_read_lock();
2647 		memcg = __folio_memcg(folio);
2648 		if (memcg)
2649 			objcg = __get_obj_cgroup_from_memcg(memcg);
2650 		else
2651 			objcg = NULL;
2652 		rcu_read_unlock();
2653 	}
2654 	return objcg;
2655 }
2656 
2657 /*
2658  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2659  * @objcg: object cgroup to uncharge
2660  * @nr_pages: number of pages to uncharge
2661  */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2662 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2663 				      unsigned int nr_pages)
2664 {
2665 	struct mem_cgroup *memcg;
2666 
2667 	memcg = get_mem_cgroup_from_objcg(objcg);
2668 
2669 	mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2670 	memcg1_account_kmem(memcg, -nr_pages);
2671 	if (!mem_cgroup_is_root(memcg))
2672 		refill_stock(memcg, nr_pages);
2673 
2674 	css_put(&memcg->css);
2675 }
2676 
2677 /*
2678  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2679  * @objcg: object cgroup to charge
2680  * @gfp: reclaim mode
2681  * @nr_pages: number of pages to charge
2682  *
2683  * Returns 0 on success, an error code on failure.
2684  */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2685 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2686 				   unsigned int nr_pages)
2687 {
2688 	struct mem_cgroup *memcg;
2689 	int ret;
2690 
2691 	memcg = get_mem_cgroup_from_objcg(objcg);
2692 
2693 	ret = try_charge_memcg(memcg, gfp, nr_pages);
2694 	if (ret)
2695 		goto out;
2696 
2697 	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2698 	memcg1_account_kmem(memcg, nr_pages);
2699 out:
2700 	css_put(&memcg->css);
2701 
2702 	return ret;
2703 }
2704 
page_objcg(const struct page * page)2705 static struct obj_cgroup *page_objcg(const struct page *page)
2706 {
2707 	unsigned long memcg_data = page->memcg_data;
2708 
2709 	if (mem_cgroup_disabled() || !memcg_data)
2710 		return NULL;
2711 
2712 	VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2713 			page);
2714 	return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2715 }
2716 
page_set_objcg(struct page * page,const struct obj_cgroup * objcg)2717 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2718 {
2719 	page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2720 }
2721 
2722 /**
2723  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2724  * @page: page to charge
2725  * @gfp: reclaim mode
2726  * @order: allocation order
2727  *
2728  * Returns 0 on success, an error code on failure.
2729  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2730 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2731 {
2732 	struct obj_cgroup *objcg;
2733 	int ret = 0;
2734 
2735 	objcg = current_obj_cgroup();
2736 	if (objcg) {
2737 		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2738 		if (!ret) {
2739 			obj_cgroup_get(objcg);
2740 			page_set_objcg(page, objcg);
2741 			return 0;
2742 		}
2743 	}
2744 	return ret;
2745 }
2746 
2747 /**
2748  * __memcg_kmem_uncharge_page: uncharge a kmem page
2749  * @page: page to uncharge
2750  * @order: allocation order
2751  */
__memcg_kmem_uncharge_page(struct page * page,int order)2752 void __memcg_kmem_uncharge_page(struct page *page, int order)
2753 {
2754 	struct obj_cgroup *objcg = page_objcg(page);
2755 	unsigned int nr_pages = 1 << order;
2756 
2757 	if (!objcg)
2758 		return;
2759 
2760 	obj_cgroup_uncharge_pages(objcg, nr_pages);
2761 	page->memcg_data = 0;
2762 	obj_cgroup_put(objcg);
2763 }
2764 
2765 /* Replace the stock objcg with objcg, return the old objcg */
replace_stock_objcg(struct memcg_stock_pcp * stock,struct obj_cgroup * objcg)2766 static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock,
2767 					     struct obj_cgroup *objcg)
2768 {
2769 	struct obj_cgroup *old = NULL;
2770 
2771 	old = drain_obj_stock(stock);
2772 	obj_cgroup_get(objcg);
2773 	stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2774 			? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2775 	WRITE_ONCE(stock->cached_objcg, objcg);
2776 	return old;
2777 }
2778 
mod_objcg_state(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2779 static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2780 		     enum node_stat_item idx, int nr)
2781 {
2782 	struct memcg_stock_pcp *stock;
2783 	struct obj_cgroup *old = NULL;
2784 	unsigned long flags;
2785 	int *bytes;
2786 
2787 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2788 	stock = this_cpu_ptr(&memcg_stock);
2789 
2790 	/*
2791 	 * Save vmstat data in stock and skip vmstat array update unless
2792 	 * accumulating over a page of vmstat data or when pgdat or idx
2793 	 * changes.
2794 	 */
2795 	if (READ_ONCE(stock->cached_objcg) != objcg) {
2796 		old = replace_stock_objcg(stock, objcg);
2797 		stock->cached_pgdat = pgdat;
2798 	} else if (stock->cached_pgdat != pgdat) {
2799 		/* Flush the existing cached vmstat data */
2800 		struct pglist_data *oldpg = stock->cached_pgdat;
2801 
2802 		if (stock->nr_slab_reclaimable_b) {
2803 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2804 					  stock->nr_slab_reclaimable_b);
2805 			stock->nr_slab_reclaimable_b = 0;
2806 		}
2807 		if (stock->nr_slab_unreclaimable_b) {
2808 			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2809 					  stock->nr_slab_unreclaimable_b);
2810 			stock->nr_slab_unreclaimable_b = 0;
2811 		}
2812 		stock->cached_pgdat = pgdat;
2813 	}
2814 
2815 	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2816 					       : &stock->nr_slab_unreclaimable_b;
2817 	/*
2818 	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2819 	 * cached locally at least once before pushing it out.
2820 	 */
2821 	if (!*bytes) {
2822 		*bytes = nr;
2823 		nr = 0;
2824 	} else {
2825 		*bytes += nr;
2826 		if (abs(*bytes) > PAGE_SIZE) {
2827 			nr = *bytes;
2828 			*bytes = 0;
2829 		} else {
2830 			nr = 0;
2831 		}
2832 	}
2833 	if (nr)
2834 		__mod_objcg_mlstate(objcg, pgdat, idx, nr);
2835 
2836 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2837 	obj_cgroup_put(old);
2838 }
2839 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)2840 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
2841 {
2842 	struct memcg_stock_pcp *stock;
2843 	unsigned long flags;
2844 	bool ret = false;
2845 
2846 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2847 
2848 	stock = this_cpu_ptr(&memcg_stock);
2849 	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2850 		stock->nr_bytes -= nr_bytes;
2851 		ret = true;
2852 	}
2853 
2854 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2855 
2856 	return ret;
2857 }
2858 
drain_obj_stock(struct memcg_stock_pcp * stock)2859 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2860 {
2861 	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2862 
2863 	if (!old)
2864 		return NULL;
2865 
2866 	if (stock->nr_bytes) {
2867 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2868 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2869 
2870 		if (nr_pages) {
2871 			struct mem_cgroup *memcg;
2872 
2873 			memcg = get_mem_cgroup_from_objcg(old);
2874 
2875 			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2876 			memcg1_account_kmem(memcg, -nr_pages);
2877 			__refill_stock(memcg, nr_pages);
2878 
2879 			css_put(&memcg->css);
2880 		}
2881 
2882 		/*
2883 		 * The leftover is flushed to the centralized per-memcg value.
2884 		 * On the next attempt to refill obj stock it will be moved
2885 		 * to a per-cpu stock (probably, on an other CPU), see
2886 		 * refill_obj_stock().
2887 		 *
2888 		 * How often it's flushed is a trade-off between the memory
2889 		 * limit enforcement accuracy and potential CPU contention,
2890 		 * so it might be changed in the future.
2891 		 */
2892 		atomic_add(nr_bytes, &old->nr_charged_bytes);
2893 		stock->nr_bytes = 0;
2894 	}
2895 
2896 	/*
2897 	 * Flush the vmstat data in current stock
2898 	 */
2899 	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2900 		if (stock->nr_slab_reclaimable_b) {
2901 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2902 					  NR_SLAB_RECLAIMABLE_B,
2903 					  stock->nr_slab_reclaimable_b);
2904 			stock->nr_slab_reclaimable_b = 0;
2905 		}
2906 		if (stock->nr_slab_unreclaimable_b) {
2907 			__mod_objcg_mlstate(old, stock->cached_pgdat,
2908 					  NR_SLAB_UNRECLAIMABLE_B,
2909 					  stock->nr_slab_unreclaimable_b);
2910 			stock->nr_slab_unreclaimable_b = 0;
2911 		}
2912 		stock->cached_pgdat = NULL;
2913 	}
2914 
2915 	WRITE_ONCE(stock->cached_objcg, NULL);
2916 	/*
2917 	 * The `old' objects needs to be released by the caller via
2918 	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2919 	 */
2920 	return old;
2921 }
2922 
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2923 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2924 				     struct mem_cgroup *root_memcg)
2925 {
2926 	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2927 	struct mem_cgroup *memcg;
2928 
2929 	if (objcg) {
2930 		memcg = obj_cgroup_memcg(objcg);
2931 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2932 			return true;
2933 	}
2934 
2935 	return false;
2936 }
2937 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)2938 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2939 			     bool allow_uncharge)
2940 {
2941 	struct memcg_stock_pcp *stock;
2942 	struct obj_cgroup *old = NULL;
2943 	unsigned long flags;
2944 	unsigned int nr_pages = 0;
2945 
2946 	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2947 
2948 	stock = this_cpu_ptr(&memcg_stock);
2949 	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2950 		old = replace_stock_objcg(stock, objcg);
2951 		allow_uncharge = true;	/* Allow uncharge when objcg changes */
2952 	}
2953 	stock->nr_bytes += nr_bytes;
2954 
2955 	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2956 		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2957 		stock->nr_bytes &= (PAGE_SIZE - 1);
2958 	}
2959 
2960 	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2961 	obj_cgroup_put(old);
2962 
2963 	if (nr_pages)
2964 		obj_cgroup_uncharge_pages(objcg, nr_pages);
2965 }
2966 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)2967 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2968 {
2969 	unsigned int nr_pages, nr_bytes;
2970 	int ret;
2971 
2972 	if (consume_obj_stock(objcg, size))
2973 		return 0;
2974 
2975 	/*
2976 	 * In theory, objcg->nr_charged_bytes can have enough
2977 	 * pre-charged bytes to satisfy the allocation. However,
2978 	 * flushing objcg->nr_charged_bytes requires two atomic
2979 	 * operations, and objcg->nr_charged_bytes can't be big.
2980 	 * The shared objcg->nr_charged_bytes can also become a
2981 	 * performance bottleneck if all tasks of the same memcg are
2982 	 * trying to update it. So it's better to ignore it and try
2983 	 * grab some new pages. The stock's nr_bytes will be flushed to
2984 	 * objcg->nr_charged_bytes later on when objcg changes.
2985 	 *
2986 	 * The stock's nr_bytes may contain enough pre-charged bytes
2987 	 * to allow one less page from being charged, but we can't rely
2988 	 * on the pre-charged bytes not being changed outside of
2989 	 * consume_obj_stock() or refill_obj_stock(). So ignore those
2990 	 * pre-charged bytes as well when charging pages. To avoid a
2991 	 * page uncharge right after a page charge, we set the
2992 	 * allow_uncharge flag to false when calling refill_obj_stock()
2993 	 * to temporarily allow the pre-charged bytes to exceed the page
2994 	 * size limit. The maximum reachable value of the pre-charged
2995 	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2996 	 * race.
2997 	 */
2998 	nr_pages = size >> PAGE_SHIFT;
2999 	nr_bytes = size & (PAGE_SIZE - 1);
3000 
3001 	if (nr_bytes)
3002 		nr_pages += 1;
3003 
3004 	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3005 	if (!ret && nr_bytes)
3006 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3007 
3008 	return ret;
3009 }
3010 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3011 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3012 {
3013 	refill_obj_stock(objcg, size, true);
3014 }
3015 
obj_full_size(struct kmem_cache * s)3016 static inline size_t obj_full_size(struct kmem_cache *s)
3017 {
3018 	/*
3019 	 * For each accounted object there is an extra space which is used
3020 	 * to store obj_cgroup membership. Charge it too.
3021 	 */
3022 	return s->size + sizeof(struct obj_cgroup *);
3023 }
3024 
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)3025 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3026 				  gfp_t flags, size_t size, void **p)
3027 {
3028 	struct obj_cgroup *objcg;
3029 	struct slab *slab;
3030 	unsigned long off;
3031 	size_t i;
3032 
3033 	/*
3034 	 * The obtained objcg pointer is safe to use within the current scope,
3035 	 * defined by current task or set_active_memcg() pair.
3036 	 * obj_cgroup_get() is used to get a permanent reference.
3037 	 */
3038 	objcg = current_obj_cgroup();
3039 	if (!objcg)
3040 		return true;
3041 
3042 	/*
3043 	 * slab_alloc_node() avoids the NULL check, so we might be called with a
3044 	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3045 	 * the whole requested size.
3046 	 * return success as there's nothing to free back
3047 	 */
3048 	if (unlikely(*p == NULL))
3049 		return true;
3050 
3051 	flags &= gfp_allowed_mask;
3052 
3053 	if (lru) {
3054 		int ret;
3055 		struct mem_cgroup *memcg;
3056 
3057 		memcg = get_mem_cgroup_from_objcg(objcg);
3058 		ret = memcg_list_lru_alloc(memcg, lru, flags);
3059 		css_put(&memcg->css);
3060 
3061 		if (ret)
3062 			return false;
3063 	}
3064 
3065 	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
3066 		return false;
3067 
3068 	for (i = 0; i < size; i++) {
3069 		slab = virt_to_slab(p[i]);
3070 
3071 		if (!slab_obj_exts(slab) &&
3072 		    alloc_slab_obj_exts(slab, s, flags, false)) {
3073 			obj_cgroup_uncharge(objcg, obj_full_size(s));
3074 			continue;
3075 		}
3076 
3077 		off = obj_to_index(s, slab, p[i]);
3078 		obj_cgroup_get(objcg);
3079 		slab_obj_exts(slab)[off].objcg = objcg;
3080 		mod_objcg_state(objcg, slab_pgdat(slab),
3081 				cache_vmstat_idx(s), obj_full_size(s));
3082 	}
3083 
3084 	return true;
3085 }
3086 
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,struct slabobj_ext * obj_exts)3087 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3088 			    void **p, int objects, struct slabobj_ext *obj_exts)
3089 {
3090 	for (int i = 0; i < objects; i++) {
3091 		struct obj_cgroup *objcg;
3092 		unsigned int off;
3093 
3094 		off = obj_to_index(s, slab, p[i]);
3095 		objcg = obj_exts[off].objcg;
3096 		if (!objcg)
3097 			continue;
3098 
3099 		obj_exts[off].objcg = NULL;
3100 		obj_cgroup_uncharge(objcg, obj_full_size(s));
3101 		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3102 				-obj_full_size(s));
3103 		obj_cgroup_put(objcg);
3104 	}
3105 }
3106 
3107 /*
3108  * The objcg is only set on the first page, so transfer it to all the
3109  * other pages.
3110  */
split_page_memcg(struct page * page,unsigned order)3111 void split_page_memcg(struct page *page, unsigned order)
3112 {
3113 	struct obj_cgroup *objcg = page_objcg(page);
3114 	unsigned int i, nr = 1 << order;
3115 
3116 	if (!objcg)
3117 		return;
3118 
3119 	for (i = 1; i < nr; i++)
3120 		page_set_objcg(&page[i], objcg);
3121 
3122 	obj_cgroup_get_many(objcg, nr - 1);
3123 }
3124 
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)3125 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3126 		unsigned new_order)
3127 {
3128 	unsigned new_refs;
3129 
3130 	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3131 		return;
3132 
3133 	new_refs = (1 << (old_order - new_order)) - 1;
3134 	css_get_many(&__folio_memcg(folio)->css, new_refs);
3135 }
3136 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3137 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3138 {
3139 	unsigned long val;
3140 
3141 	if (mem_cgroup_is_root(memcg)) {
3142 		/*
3143 		 * Approximate root's usage from global state. This isn't
3144 		 * perfect, but the root usage was always an approximation.
3145 		 */
3146 		val = global_node_page_state(NR_FILE_PAGES) +
3147 			global_node_page_state(NR_ANON_MAPPED);
3148 		if (swap)
3149 			val += total_swap_pages - get_nr_swap_pages();
3150 	} else {
3151 		if (!swap)
3152 			val = page_counter_read(&memcg->memory);
3153 		else
3154 			val = page_counter_read(&memcg->memsw);
3155 	}
3156 	return val;
3157 }
3158 
memcg_online_kmem(struct mem_cgroup * memcg)3159 static int memcg_online_kmem(struct mem_cgroup *memcg)
3160 {
3161 	struct obj_cgroup *objcg;
3162 
3163 	if (mem_cgroup_kmem_disabled())
3164 		return 0;
3165 
3166 	if (unlikely(mem_cgroup_is_root(memcg)))
3167 		return 0;
3168 
3169 	objcg = obj_cgroup_alloc();
3170 	if (!objcg)
3171 		return -ENOMEM;
3172 
3173 	objcg->memcg = memcg;
3174 	rcu_assign_pointer(memcg->objcg, objcg);
3175 	obj_cgroup_get(objcg);
3176 	memcg->orig_objcg = objcg;
3177 
3178 	static_branch_enable(&memcg_kmem_online_key);
3179 
3180 	memcg->kmemcg_id = memcg->id.id;
3181 
3182 	return 0;
3183 }
3184 
memcg_offline_kmem(struct mem_cgroup * memcg)3185 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3186 {
3187 	struct mem_cgroup *parent;
3188 
3189 	if (mem_cgroup_kmem_disabled())
3190 		return;
3191 
3192 	if (unlikely(mem_cgroup_is_root(memcg)))
3193 		return;
3194 
3195 	parent = parent_mem_cgroup(memcg);
3196 	if (!parent)
3197 		parent = root_mem_cgroup;
3198 
3199 	memcg_reparent_list_lrus(memcg, parent);
3200 
3201 	/*
3202 	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3203 	 * helpers won't use parent's list_lru until child is drained.
3204 	 */
3205 	memcg_reparent_objcgs(memcg, parent);
3206 }
3207 
3208 #ifdef CONFIG_CGROUP_WRITEBACK
3209 
3210 #include <trace/events/writeback.h>
3211 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3212 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3213 {
3214 	return wb_domain_init(&memcg->cgwb_domain, gfp);
3215 }
3216 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3217 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3218 {
3219 	wb_domain_exit(&memcg->cgwb_domain);
3220 }
3221 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3222 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3223 {
3224 	wb_domain_size_changed(&memcg->cgwb_domain);
3225 }
3226 
mem_cgroup_wb_domain(struct bdi_writeback * wb)3227 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3228 {
3229 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3230 
3231 	if (!memcg->css.parent)
3232 		return NULL;
3233 
3234 	return &memcg->cgwb_domain;
3235 }
3236 
3237 /**
3238  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3239  * @wb: bdi_writeback in question
3240  * @pfilepages: out parameter for number of file pages
3241  * @pheadroom: out parameter for number of allocatable pages according to memcg
3242  * @pdirty: out parameter for number of dirty pages
3243  * @pwriteback: out parameter for number of pages under writeback
3244  *
3245  * Determine the numbers of file, headroom, dirty, and writeback pages in
3246  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3247  * is a bit more involved.
3248  *
3249  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3250  * headroom is calculated as the lowest headroom of itself and the
3251  * ancestors.  Note that this doesn't consider the actual amount of
3252  * available memory in the system.  The caller should further cap
3253  * *@pheadroom accordingly.
3254  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3255 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3256 			 unsigned long *pheadroom, unsigned long *pdirty,
3257 			 unsigned long *pwriteback)
3258 {
3259 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3260 	struct mem_cgroup *parent;
3261 
3262 	mem_cgroup_flush_stats_ratelimited(memcg);
3263 
3264 	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3265 	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3266 	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3267 			memcg_page_state(memcg, NR_ACTIVE_FILE);
3268 
3269 	*pheadroom = PAGE_COUNTER_MAX;
3270 	while ((parent = parent_mem_cgroup(memcg))) {
3271 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3272 					    READ_ONCE(memcg->memory.high));
3273 		unsigned long used = page_counter_read(&memcg->memory);
3274 
3275 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3276 		memcg = parent;
3277 	}
3278 }
3279 
3280 /*
3281  * Foreign dirty flushing
3282  *
3283  * There's an inherent mismatch between memcg and writeback.  The former
3284  * tracks ownership per-page while the latter per-inode.  This was a
3285  * deliberate design decision because honoring per-page ownership in the
3286  * writeback path is complicated, may lead to higher CPU and IO overheads
3287  * and deemed unnecessary given that write-sharing an inode across
3288  * different cgroups isn't a common use-case.
3289  *
3290  * Combined with inode majority-writer ownership switching, this works well
3291  * enough in most cases but there are some pathological cases.  For
3292  * example, let's say there are two cgroups A and B which keep writing to
3293  * different but confined parts of the same inode.  B owns the inode and
3294  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3295  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3296  * triggering background writeback.  A will be slowed down without a way to
3297  * make writeback of the dirty pages happen.
3298  *
3299  * Conditions like the above can lead to a cgroup getting repeatedly and
3300  * severely throttled after making some progress after each
3301  * dirty_expire_interval while the underlying IO device is almost
3302  * completely idle.
3303  *
3304  * Solving this problem completely requires matching the ownership tracking
3305  * granularities between memcg and writeback in either direction.  However,
3306  * the more egregious behaviors can be avoided by simply remembering the
3307  * most recent foreign dirtying events and initiating remote flushes on
3308  * them when local writeback isn't enough to keep the memory clean enough.
3309  *
3310  * The following two functions implement such mechanism.  When a foreign
3311  * page - a page whose memcg and writeback ownerships don't match - is
3312  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3313  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3314  * decides that the memcg needs to sleep due to high dirty ratio, it calls
3315  * mem_cgroup_flush_foreign() which queues writeback on the recorded
3316  * foreign bdi_writebacks which haven't expired.  Both the numbers of
3317  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3318  * limited to MEMCG_CGWB_FRN_CNT.
3319  *
3320  * The mechanism only remembers IDs and doesn't hold any object references.
3321  * As being wrong occasionally doesn't matter, updates and accesses to the
3322  * records are lockless and racy.
3323  */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3324 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3325 					     struct bdi_writeback *wb)
3326 {
3327 	struct mem_cgroup *memcg = folio_memcg(folio);
3328 	struct memcg_cgwb_frn *frn;
3329 	u64 now = get_jiffies_64();
3330 	u64 oldest_at = now;
3331 	int oldest = -1;
3332 	int i;
3333 
3334 	trace_track_foreign_dirty(folio, wb);
3335 
3336 	/*
3337 	 * Pick the slot to use.  If there is already a slot for @wb, keep
3338 	 * using it.  If not replace the oldest one which isn't being
3339 	 * written out.
3340 	 */
3341 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3342 		frn = &memcg->cgwb_frn[i];
3343 		if (frn->bdi_id == wb->bdi->id &&
3344 		    frn->memcg_id == wb->memcg_css->id)
3345 			break;
3346 		if (time_before64(frn->at, oldest_at) &&
3347 		    atomic_read(&frn->done.cnt) == 1) {
3348 			oldest = i;
3349 			oldest_at = frn->at;
3350 		}
3351 	}
3352 
3353 	if (i < MEMCG_CGWB_FRN_CNT) {
3354 		/*
3355 		 * Re-using an existing one.  Update timestamp lazily to
3356 		 * avoid making the cacheline hot.  We want them to be
3357 		 * reasonably up-to-date and significantly shorter than
3358 		 * dirty_expire_interval as that's what expires the record.
3359 		 * Use the shorter of 1s and dirty_expire_interval / 8.
3360 		 */
3361 		unsigned long update_intv =
3362 			min_t(unsigned long, HZ,
3363 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3364 
3365 		if (time_before64(frn->at, now - update_intv))
3366 			frn->at = now;
3367 	} else if (oldest >= 0) {
3368 		/* replace the oldest free one */
3369 		frn = &memcg->cgwb_frn[oldest];
3370 		frn->bdi_id = wb->bdi->id;
3371 		frn->memcg_id = wb->memcg_css->id;
3372 		frn->at = now;
3373 	}
3374 }
3375 
3376 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3377 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3378 {
3379 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3380 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3381 	u64 now = jiffies_64;
3382 	int i;
3383 
3384 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3385 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3386 
3387 		/*
3388 		 * If the record is older than dirty_expire_interval,
3389 		 * writeback on it has already started.  No need to kick it
3390 		 * off again.  Also, don't start a new one if there's
3391 		 * already one in flight.
3392 		 */
3393 		if (time_after64(frn->at, now - intv) &&
3394 		    atomic_read(&frn->done.cnt) == 1) {
3395 			frn->at = 0;
3396 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3397 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3398 					       WB_REASON_FOREIGN_FLUSH,
3399 					       &frn->done);
3400 		}
3401 	}
3402 }
3403 
3404 #else	/* CONFIG_CGROUP_WRITEBACK */
3405 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3406 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3407 {
3408 	return 0;
3409 }
3410 
memcg_wb_domain_exit(struct mem_cgroup * memcg)3411 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3412 {
3413 }
3414 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3415 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3416 {
3417 }
3418 
3419 #endif	/* CONFIG_CGROUP_WRITEBACK */
3420 
3421 /*
3422  * Private memory cgroup IDR
3423  *
3424  * Swap-out records and page cache shadow entries need to store memcg
3425  * references in constrained space, so we maintain an ID space that is
3426  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3427  * memory-controlled cgroups to 64k.
3428  *
3429  * However, there usually are many references to the offline CSS after
3430  * the cgroup has been destroyed, such as page cache or reclaimable
3431  * slab objects, that don't need to hang on to the ID. We want to keep
3432  * those dead CSS from occupying IDs, or we might quickly exhaust the
3433  * relatively small ID space and prevent the creation of new cgroups
3434  * even when there are much fewer than 64k cgroups - possibly none.
3435  *
3436  * Maintain a private 16-bit ID space for memcg, and allow the ID to
3437  * be freed and recycled when it's no longer needed, which is usually
3438  * when the CSS is offlined.
3439  *
3440  * The only exception to that are records of swapped out tmpfs/shmem
3441  * pages that need to be attributed to live ancestors on swapin. But
3442  * those references are manageable from userspace.
3443  */
3444 
3445 #define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3446 static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3447 
mem_cgroup_id_remove(struct mem_cgroup * memcg)3448 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3449 {
3450 	if (memcg->id.id > 0) {
3451 		xa_erase(&mem_cgroup_ids, memcg->id.id);
3452 		memcg->id.id = 0;
3453 	}
3454 }
3455 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)3456 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3457 					   unsigned int n)
3458 {
3459 	refcount_add(n, &memcg->id.ref);
3460 }
3461 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)3462 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3463 {
3464 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3465 		mem_cgroup_id_remove(memcg);
3466 
3467 		/* Memcg ID pins CSS */
3468 		css_put(&memcg->css);
3469 	}
3470 }
3471 
mem_cgroup_id_put(struct mem_cgroup * memcg)3472 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3473 {
3474 	mem_cgroup_id_put_many(memcg, 1);
3475 }
3476 
mem_cgroup_id_get_online(struct mem_cgroup * memcg)3477 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
3478 {
3479 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
3480 		/*
3481 		 * The root cgroup cannot be destroyed, so it's refcount must
3482 		 * always be >= 1.
3483 		 */
3484 		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3485 			VM_BUG_ON(1);
3486 			break;
3487 		}
3488 		memcg = parent_mem_cgroup(memcg);
3489 		if (!memcg)
3490 			memcg = root_mem_cgroup;
3491 	}
3492 	return memcg;
3493 }
3494 
3495 /**
3496  * mem_cgroup_from_id - look up a memcg from a memcg id
3497  * @id: the memcg id to look up
3498  *
3499  * Caller must hold rcu_read_lock().
3500  */
mem_cgroup_from_id(unsigned short id)3501 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
3502 {
3503 	WARN_ON_ONCE(!rcu_read_lock_held());
3504 	return xa_load(&mem_cgroup_ids, id);
3505 }
3506 
3507 #ifdef CONFIG_SHRINKER_DEBUG
mem_cgroup_get_from_ino(unsigned long ino)3508 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3509 {
3510 	struct cgroup *cgrp;
3511 	struct cgroup_subsys_state *css;
3512 	struct mem_cgroup *memcg;
3513 
3514 	cgrp = cgroup_get_from_id(ino);
3515 	if (IS_ERR(cgrp))
3516 		return ERR_CAST(cgrp);
3517 
3518 	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3519 	if (css)
3520 		memcg = container_of(css, struct mem_cgroup, css);
3521 	else
3522 		memcg = ERR_PTR(-ENOENT);
3523 
3524 	cgroup_put(cgrp);
3525 
3526 	return memcg;
3527 }
3528 #endif
3529 
free_mem_cgroup_per_node_info(struct mem_cgroup_per_node * pn)3530 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3531 {
3532 	if (!pn)
3533 		return;
3534 
3535 	free_percpu(pn->lruvec_stats_percpu);
3536 	kfree(pn->lruvec_stats);
3537 	kfree(pn);
3538 }
3539 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3540 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3541 {
3542 	struct mem_cgroup_per_node *pn;
3543 
3544 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3545 	if (!pn)
3546 		return false;
3547 
3548 	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3549 					GFP_KERNEL_ACCOUNT, node);
3550 	if (!pn->lruvec_stats)
3551 		goto fail;
3552 
3553 	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3554 						   GFP_KERNEL_ACCOUNT);
3555 	if (!pn->lruvec_stats_percpu)
3556 		goto fail;
3557 
3558 	lruvec_init(&pn->lruvec);
3559 	pn->memcg = memcg;
3560 
3561 	memcg->nodeinfo[node] = pn;
3562 	return true;
3563 fail:
3564 	free_mem_cgroup_per_node_info(pn);
3565 	return false;
3566 }
3567 
__mem_cgroup_free(struct mem_cgroup * memcg)3568 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3569 {
3570 	int node;
3571 
3572 	obj_cgroup_put(memcg->orig_objcg);
3573 
3574 	for_each_node(node)
3575 		free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3576 	memcg1_free_events(memcg);
3577 	kfree(memcg->vmstats);
3578 	free_percpu(memcg->vmstats_percpu);
3579 	kfree(memcg);
3580 }
3581 
mem_cgroup_free(struct mem_cgroup * memcg)3582 static void mem_cgroup_free(struct mem_cgroup *memcg)
3583 {
3584 	lru_gen_exit_memcg(memcg);
3585 	memcg_wb_domain_exit(memcg);
3586 	__mem_cgroup_free(memcg);
3587 }
3588 
mem_cgroup_alloc(struct mem_cgroup * parent)3589 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3590 {
3591 	struct memcg_vmstats_percpu *statc, *pstatc;
3592 	struct mem_cgroup *memcg;
3593 	int node, cpu;
3594 	int __maybe_unused i;
3595 	long error;
3596 
3597 	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3598 	if (!memcg)
3599 		return ERR_PTR(-ENOMEM);
3600 
3601 	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3602 			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3603 	if (error)
3604 		goto fail;
3605 	error = -ENOMEM;
3606 
3607 	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3608 				 GFP_KERNEL_ACCOUNT);
3609 	if (!memcg->vmstats)
3610 		goto fail;
3611 
3612 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3613 						 GFP_KERNEL_ACCOUNT);
3614 	if (!memcg->vmstats_percpu)
3615 		goto fail;
3616 
3617 	if (!memcg1_alloc_events(memcg))
3618 		goto fail;
3619 
3620 	for_each_possible_cpu(cpu) {
3621 		if (parent)
3622 			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3623 		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3624 		statc->parent = parent ? pstatc : NULL;
3625 		statc->vmstats = memcg->vmstats;
3626 	}
3627 
3628 	for_each_node(node)
3629 		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3630 			goto fail;
3631 
3632 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3633 		goto fail;
3634 
3635 	INIT_WORK(&memcg->high_work, high_work_func);
3636 	vmpressure_init(&memcg->vmpressure);
3637 	INIT_LIST_HEAD(&memcg->memory_peaks);
3638 	INIT_LIST_HEAD(&memcg->swap_peaks);
3639 	spin_lock_init(&memcg->peaks_lock);
3640 	memcg->socket_pressure = jiffies;
3641 	memcg1_memcg_init(memcg);
3642 	memcg->kmemcg_id = -1;
3643 	INIT_LIST_HEAD(&memcg->objcg_list);
3644 #ifdef CONFIG_CGROUP_WRITEBACK
3645 	INIT_LIST_HEAD(&memcg->cgwb_list);
3646 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3647 		memcg->cgwb_frn[i].done =
3648 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3649 #endif
3650 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3651 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3652 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3653 	memcg->deferred_split_queue.split_queue_len = 0;
3654 #endif
3655 	lru_gen_init_memcg(memcg);
3656 	return memcg;
3657 fail:
3658 	mem_cgroup_id_remove(memcg);
3659 	__mem_cgroup_free(memcg);
3660 	return ERR_PTR(error);
3661 }
3662 
3663 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3664 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3665 {
3666 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3667 	struct mem_cgroup *memcg, *old_memcg;
3668 	bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3669 
3670 	old_memcg = set_active_memcg(parent);
3671 	memcg = mem_cgroup_alloc(parent);
3672 	set_active_memcg(old_memcg);
3673 	if (IS_ERR(memcg))
3674 		return ERR_CAST(memcg);
3675 
3676 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3677 	memcg1_soft_limit_reset(memcg);
3678 #ifdef CONFIG_ZSWAP
3679 	memcg->zswap_max = PAGE_COUNTER_MAX;
3680 	WRITE_ONCE(memcg->zswap_writeback, true);
3681 #endif
3682 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3683 	if (parent) {
3684 		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3685 
3686 		page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3687 		page_counter_init(&memcg->swap, &parent->swap, false);
3688 #ifdef CONFIG_MEMCG_V1
3689 		memcg->memory.track_failcnt = !memcg_on_dfl;
3690 		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3691 		page_counter_init(&memcg->kmem, &parent->kmem, false);
3692 		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3693 #endif
3694 	} else {
3695 		init_memcg_stats();
3696 		init_memcg_events();
3697 		page_counter_init(&memcg->memory, NULL, true);
3698 		page_counter_init(&memcg->swap, NULL, false);
3699 #ifdef CONFIG_MEMCG_V1
3700 		page_counter_init(&memcg->kmem, NULL, false);
3701 		page_counter_init(&memcg->tcpmem, NULL, false);
3702 #endif
3703 		root_mem_cgroup = memcg;
3704 		return &memcg->css;
3705 	}
3706 
3707 	if (memcg_on_dfl && !cgroup_memory_nosocket)
3708 		static_branch_inc(&memcg_sockets_enabled_key);
3709 
3710 	if (!cgroup_memory_nobpf)
3711 		static_branch_inc(&memcg_bpf_enabled_key);
3712 
3713 	return &memcg->css;
3714 }
3715 
mem_cgroup_css_online(struct cgroup_subsys_state * css)3716 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3717 {
3718 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3719 
3720 	if (memcg_online_kmem(memcg))
3721 		goto remove_id;
3722 
3723 	/*
3724 	 * A memcg must be visible for expand_shrinker_info()
3725 	 * by the time the maps are allocated. So, we allocate maps
3726 	 * here, when for_each_mem_cgroup() can't skip it.
3727 	 */
3728 	if (alloc_shrinker_info(memcg))
3729 		goto offline_kmem;
3730 
3731 	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3732 		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3733 				   FLUSH_TIME);
3734 	lru_gen_online_memcg(memcg);
3735 
3736 	/* Online state pins memcg ID, memcg ID pins CSS */
3737 	refcount_set(&memcg->id.ref, 1);
3738 	css_get(css);
3739 
3740 	/*
3741 	 * Ensure mem_cgroup_from_id() works once we're fully online.
3742 	 *
3743 	 * We could do this earlier and require callers to filter with
3744 	 * css_tryget_online(). But right now there are no users that
3745 	 * need earlier access, and the workingset code relies on the
3746 	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3747 	 * publish it here at the end of onlining. This matches the
3748 	 * regular ID destruction during offlining.
3749 	 */
3750 	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3751 
3752 	return 0;
3753 offline_kmem:
3754 	memcg_offline_kmem(memcg);
3755 remove_id:
3756 	mem_cgroup_id_remove(memcg);
3757 	return -ENOMEM;
3758 }
3759 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3760 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3761 {
3762 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3763 
3764 	memcg1_css_offline(memcg);
3765 
3766 	page_counter_set_min(&memcg->memory, 0);
3767 	page_counter_set_low(&memcg->memory, 0);
3768 
3769 	zswap_memcg_offline_cleanup(memcg);
3770 
3771 	memcg_offline_kmem(memcg);
3772 	reparent_shrinker_deferred(memcg);
3773 	wb_memcg_offline(memcg);
3774 	lru_gen_offline_memcg(memcg);
3775 
3776 	drain_all_stock(memcg);
3777 
3778 	mem_cgroup_id_put(memcg);
3779 }
3780 
mem_cgroup_css_released(struct cgroup_subsys_state * css)3781 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
3782 {
3783 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3784 
3785 	invalidate_reclaim_iterators(memcg);
3786 	lru_gen_release_memcg(memcg);
3787 }
3788 
mem_cgroup_css_free(struct cgroup_subsys_state * css)3789 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
3790 {
3791 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3792 	int __maybe_unused i;
3793 
3794 #ifdef CONFIG_CGROUP_WRITEBACK
3795 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3796 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3797 #endif
3798 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3799 		static_branch_dec(&memcg_sockets_enabled_key);
3800 
3801 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3802 		static_branch_dec(&memcg_sockets_enabled_key);
3803 
3804 	if (!cgroup_memory_nobpf)
3805 		static_branch_dec(&memcg_bpf_enabled_key);
3806 
3807 	vmpressure_cleanup(&memcg->vmpressure);
3808 	cancel_work_sync(&memcg->high_work);
3809 	memcg1_remove_from_trees(memcg);
3810 	free_shrinker_info(memcg);
3811 	mem_cgroup_free(memcg);
3812 }
3813 
3814 /**
3815  * mem_cgroup_css_reset - reset the states of a mem_cgroup
3816  * @css: the target css
3817  *
3818  * Reset the states of the mem_cgroup associated with @css.  This is
3819  * invoked when the userland requests disabling on the default hierarchy
3820  * but the memcg is pinned through dependency.  The memcg should stop
3821  * applying policies and should revert to the vanilla state as it may be
3822  * made visible again.
3823  *
3824  * The current implementation only resets the essential configurations.
3825  * This needs to be expanded to cover all the visible parts.
3826  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)3827 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3828 {
3829 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3830 
3831 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3832 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3833 #ifdef CONFIG_MEMCG_V1
3834 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3835 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3836 #endif
3837 	page_counter_set_min(&memcg->memory, 0);
3838 	page_counter_set_low(&memcg->memory, 0);
3839 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3840 	memcg1_soft_limit_reset(memcg);
3841 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3842 	memcg_wb_domain_size_changed(memcg);
3843 }
3844 
3845 struct aggregate_control {
3846 	/* pointer to the aggregated (CPU and subtree aggregated) counters */
3847 	long *aggregate;
3848 	/* pointer to the non-hierarchichal (CPU aggregated) counters */
3849 	long *local;
3850 	/* pointer to the pending child counters during tree propagation */
3851 	long *pending;
3852 	/* pointer to the parent's pending counters, could be NULL */
3853 	long *ppending;
3854 	/* pointer to the percpu counters to be aggregated */
3855 	long *cstat;
3856 	/* pointer to the percpu counters of the last aggregation*/
3857 	long *cstat_prev;
3858 	/* size of the above counters */
3859 	int size;
3860 };
3861 
mem_cgroup_stat_aggregate(struct aggregate_control * ac)3862 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3863 {
3864 	int i;
3865 	long delta, delta_cpu, v;
3866 
3867 	for (i = 0; i < ac->size; i++) {
3868 		/*
3869 		 * Collect the aggregated propagation counts of groups
3870 		 * below us. We're in a per-cpu loop here and this is
3871 		 * a global counter, so the first cycle will get them.
3872 		 */
3873 		delta = ac->pending[i];
3874 		if (delta)
3875 			ac->pending[i] = 0;
3876 
3877 		/* Add CPU changes on this level since the last flush */
3878 		delta_cpu = 0;
3879 		v = READ_ONCE(ac->cstat[i]);
3880 		if (v != ac->cstat_prev[i]) {
3881 			delta_cpu = v - ac->cstat_prev[i];
3882 			delta += delta_cpu;
3883 			ac->cstat_prev[i] = v;
3884 		}
3885 
3886 		/* Aggregate counts on this level and propagate upwards */
3887 		if (delta_cpu)
3888 			ac->local[i] += delta_cpu;
3889 
3890 		if (delta) {
3891 			ac->aggregate[i] += delta;
3892 			if (ac->ppending)
3893 				ac->ppending[i] += delta;
3894 		}
3895 	}
3896 }
3897 
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)3898 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3899 {
3900 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3901 	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3902 	struct memcg_vmstats_percpu *statc;
3903 	struct aggregate_control ac;
3904 	int nid;
3905 
3906 	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3907 
3908 	ac = (struct aggregate_control) {
3909 		.aggregate = memcg->vmstats->state,
3910 		.local = memcg->vmstats->state_local,
3911 		.pending = memcg->vmstats->state_pending,
3912 		.ppending = parent ? parent->vmstats->state_pending : NULL,
3913 		.cstat = statc->state,
3914 		.cstat_prev = statc->state_prev,
3915 		.size = MEMCG_VMSTAT_SIZE,
3916 	};
3917 	mem_cgroup_stat_aggregate(&ac);
3918 
3919 	ac = (struct aggregate_control) {
3920 		.aggregate = memcg->vmstats->events,
3921 		.local = memcg->vmstats->events_local,
3922 		.pending = memcg->vmstats->events_pending,
3923 		.ppending = parent ? parent->vmstats->events_pending : NULL,
3924 		.cstat = statc->events,
3925 		.cstat_prev = statc->events_prev,
3926 		.size = NR_MEMCG_EVENTS,
3927 	};
3928 	mem_cgroup_stat_aggregate(&ac);
3929 
3930 	for_each_node_state(nid, N_MEMORY) {
3931 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3932 		struct lruvec_stats *lstats = pn->lruvec_stats;
3933 		struct lruvec_stats *plstats = NULL;
3934 		struct lruvec_stats_percpu *lstatc;
3935 
3936 		if (parent)
3937 			plstats = parent->nodeinfo[nid]->lruvec_stats;
3938 
3939 		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3940 
3941 		ac = (struct aggregate_control) {
3942 			.aggregate = lstats->state,
3943 			.local = lstats->state_local,
3944 			.pending = lstats->state_pending,
3945 			.ppending = plstats ? plstats->state_pending : NULL,
3946 			.cstat = lstatc->state,
3947 			.cstat_prev = lstatc->state_prev,
3948 			.size = NR_MEMCG_NODE_STAT_ITEMS,
3949 		};
3950 		mem_cgroup_stat_aggregate(&ac);
3951 
3952 	}
3953 	WRITE_ONCE(statc->stats_updates, 0);
3954 	/* We are in a per-cpu loop here, only do the atomic write once */
3955 	if (atomic64_read(&memcg->vmstats->stats_updates))
3956 		atomic64_set(&memcg->vmstats->stats_updates, 0);
3957 }
3958 
mem_cgroup_fork(struct task_struct * task)3959 static void mem_cgroup_fork(struct task_struct *task)
3960 {
3961 	/*
3962 	 * Set the update flag to cause task->objcg to be initialized lazily
3963 	 * on the first allocation. It can be done without any synchronization
3964 	 * because it's always performed on the current task, so does
3965 	 * current_objcg_update().
3966 	 */
3967 	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3968 }
3969 
mem_cgroup_exit(struct task_struct * task)3970 static void mem_cgroup_exit(struct task_struct *task)
3971 {
3972 	struct obj_cgroup *objcg = task->objcg;
3973 
3974 	objcg = (struct obj_cgroup *)
3975 		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3976 	obj_cgroup_put(objcg);
3977 
3978 	/*
3979 	 * Some kernel allocations can happen after this point,
3980 	 * but let's ignore them. It can be done without any synchronization
3981 	 * because it's always performed on the current task, so does
3982 	 * current_objcg_update().
3983 	 */
3984 	task->objcg = NULL;
3985 }
3986 
3987 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)3988 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3989 {
3990 	struct task_struct *task;
3991 	struct cgroup_subsys_state *css;
3992 
3993 	/* find the first leader if there is any */
3994 	cgroup_taskset_for_each_leader(task, css, tset)
3995 		break;
3996 
3997 	if (!task)
3998 		return;
3999 
4000 	task_lock(task);
4001 	if (task->mm && READ_ONCE(task->mm->owner) == task)
4002 		lru_gen_migrate_mm(task->mm);
4003 	task_unlock(task);
4004 }
4005 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4006 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4007 #endif /* CONFIG_LRU_GEN */
4008 
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)4009 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4010 {
4011 	struct task_struct *task;
4012 	struct cgroup_subsys_state *css;
4013 
4014 	cgroup_taskset_for_each(task, css, tset) {
4015 		/* atomically set the update bit */
4016 		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4017 	}
4018 }
4019 
mem_cgroup_attach(struct cgroup_taskset * tset)4020 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4021 {
4022 	mem_cgroup_lru_gen_attach(tset);
4023 	mem_cgroup_kmem_attach(tset);
4024 }
4025 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)4026 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4027 {
4028 	if (value == PAGE_COUNTER_MAX)
4029 		seq_puts(m, "max\n");
4030 	else
4031 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4032 
4033 	return 0;
4034 }
4035 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)4036 static u64 memory_current_read(struct cgroup_subsys_state *css,
4037 			       struct cftype *cft)
4038 {
4039 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4040 
4041 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4042 }
4043 
4044 #define OFP_PEAK_UNSET (((-1UL)))
4045 
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)4046 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4047 {
4048 	struct cgroup_of_peak *ofp = of_peak(sf->private);
4049 	u64 fd_peak = READ_ONCE(ofp->value), peak;
4050 
4051 	/* User wants global or local peak? */
4052 	if (fd_peak == OFP_PEAK_UNSET)
4053 		peak = pc->watermark;
4054 	else
4055 		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4056 
4057 	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4058 	return 0;
4059 }
4060 
memory_peak_show(struct seq_file * sf,void * v)4061 static int memory_peak_show(struct seq_file *sf, void *v)
4062 {
4063 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4064 
4065 	return peak_show(sf, v, &memcg->memory);
4066 }
4067 
peak_open(struct kernfs_open_file * of)4068 static int peak_open(struct kernfs_open_file *of)
4069 {
4070 	struct cgroup_of_peak *ofp = of_peak(of);
4071 
4072 	ofp->value = OFP_PEAK_UNSET;
4073 	return 0;
4074 }
4075 
peak_release(struct kernfs_open_file * of)4076 static void peak_release(struct kernfs_open_file *of)
4077 {
4078 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4079 	struct cgroup_of_peak *ofp = of_peak(of);
4080 
4081 	if (ofp->value == OFP_PEAK_UNSET) {
4082 		/* fast path (no writes on this fd) */
4083 		return;
4084 	}
4085 	spin_lock(&memcg->peaks_lock);
4086 	list_del(&ofp->list);
4087 	spin_unlock(&memcg->peaks_lock);
4088 }
4089 
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)4090 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4091 			  loff_t off, struct page_counter *pc,
4092 			  struct list_head *watchers)
4093 {
4094 	unsigned long usage;
4095 	struct cgroup_of_peak *peer_ctx;
4096 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4097 	struct cgroup_of_peak *ofp = of_peak(of);
4098 
4099 	spin_lock(&memcg->peaks_lock);
4100 
4101 	usage = page_counter_read(pc);
4102 	WRITE_ONCE(pc->local_watermark, usage);
4103 
4104 	list_for_each_entry(peer_ctx, watchers, list)
4105 		if (usage > peer_ctx->value)
4106 			WRITE_ONCE(peer_ctx->value, usage);
4107 
4108 	/* initial write, register watcher */
4109 	if (ofp->value == OFP_PEAK_UNSET)
4110 		list_add(&ofp->list, watchers);
4111 
4112 	WRITE_ONCE(ofp->value, usage);
4113 	spin_unlock(&memcg->peaks_lock);
4114 
4115 	return nbytes;
4116 }
4117 
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4118 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4119 				 size_t nbytes, loff_t off)
4120 {
4121 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4122 
4123 	return peak_write(of, buf, nbytes, off, &memcg->memory,
4124 			  &memcg->memory_peaks);
4125 }
4126 
4127 #undef OFP_PEAK_UNSET
4128 
memory_min_show(struct seq_file * m,void * v)4129 static int memory_min_show(struct seq_file *m, void *v)
4130 {
4131 	return seq_puts_memcg_tunable(m,
4132 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4133 }
4134 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4135 static ssize_t memory_min_write(struct kernfs_open_file *of,
4136 				char *buf, size_t nbytes, loff_t off)
4137 {
4138 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4139 	unsigned long min;
4140 	int err;
4141 
4142 	buf = strstrip(buf);
4143 	err = page_counter_memparse(buf, "max", &min);
4144 	if (err)
4145 		return err;
4146 
4147 	page_counter_set_min(&memcg->memory, min);
4148 
4149 	return nbytes;
4150 }
4151 
memory_low_show(struct seq_file * m,void * v)4152 static int memory_low_show(struct seq_file *m, void *v)
4153 {
4154 	return seq_puts_memcg_tunable(m,
4155 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4156 }
4157 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4158 static ssize_t memory_low_write(struct kernfs_open_file *of,
4159 				char *buf, size_t nbytes, loff_t off)
4160 {
4161 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4162 	unsigned long low;
4163 	int err;
4164 
4165 	buf = strstrip(buf);
4166 	err = page_counter_memparse(buf, "max", &low);
4167 	if (err)
4168 		return err;
4169 
4170 	page_counter_set_low(&memcg->memory, low);
4171 
4172 	return nbytes;
4173 }
4174 
memory_high_show(struct seq_file * m,void * v)4175 static int memory_high_show(struct seq_file *m, void *v)
4176 {
4177 	return seq_puts_memcg_tunable(m,
4178 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4179 }
4180 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4181 static ssize_t memory_high_write(struct kernfs_open_file *of,
4182 				 char *buf, size_t nbytes, loff_t off)
4183 {
4184 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4185 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4186 	bool drained = false;
4187 	unsigned long high;
4188 	int err;
4189 
4190 	buf = strstrip(buf);
4191 	err = page_counter_memparse(buf, "max", &high);
4192 	if (err)
4193 		return err;
4194 
4195 	page_counter_set_high(&memcg->memory, high);
4196 
4197 	for (;;) {
4198 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4199 		unsigned long reclaimed;
4200 
4201 		if (nr_pages <= high)
4202 			break;
4203 
4204 		if (signal_pending(current))
4205 			break;
4206 
4207 		if (!drained) {
4208 			drain_all_stock(memcg);
4209 			drained = true;
4210 			continue;
4211 		}
4212 
4213 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4214 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4215 
4216 		if (!reclaimed && !nr_retries--)
4217 			break;
4218 	}
4219 
4220 	memcg_wb_domain_size_changed(memcg);
4221 	return nbytes;
4222 }
4223 
memory_max_show(struct seq_file * m,void * v)4224 static int memory_max_show(struct seq_file *m, void *v)
4225 {
4226 	return seq_puts_memcg_tunable(m,
4227 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4228 }
4229 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4230 static ssize_t memory_max_write(struct kernfs_open_file *of,
4231 				char *buf, size_t nbytes, loff_t off)
4232 {
4233 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4234 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4235 	bool drained = false;
4236 	unsigned long max;
4237 	int err;
4238 
4239 	buf = strstrip(buf);
4240 	err = page_counter_memparse(buf, "max", &max);
4241 	if (err)
4242 		return err;
4243 
4244 	xchg(&memcg->memory.max, max);
4245 
4246 	for (;;) {
4247 		unsigned long nr_pages = page_counter_read(&memcg->memory);
4248 
4249 		if (nr_pages <= max)
4250 			break;
4251 
4252 		if (signal_pending(current))
4253 			break;
4254 
4255 		if (!drained) {
4256 			drain_all_stock(memcg);
4257 			drained = true;
4258 			continue;
4259 		}
4260 
4261 		if (nr_reclaims) {
4262 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4263 					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4264 				nr_reclaims--;
4265 			continue;
4266 		}
4267 
4268 		memcg_memory_event(memcg, MEMCG_OOM);
4269 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4270 			break;
4271 		cond_resched();
4272 	}
4273 
4274 	memcg_wb_domain_size_changed(memcg);
4275 	return nbytes;
4276 }
4277 
4278 /*
4279  * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4280  * if any new events become available.
4281  */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4282 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4283 {
4284 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4285 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4286 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4287 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4288 	seq_printf(m, "oom_kill %lu\n",
4289 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4290 	seq_printf(m, "oom_group_kill %lu\n",
4291 		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4292 }
4293 
memory_events_show(struct seq_file * m,void * v)4294 static int memory_events_show(struct seq_file *m, void *v)
4295 {
4296 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4297 
4298 	__memory_events_show(m, memcg->memory_events);
4299 	return 0;
4300 }
4301 
memory_events_local_show(struct seq_file * m,void * v)4302 static int memory_events_local_show(struct seq_file *m, void *v)
4303 {
4304 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4305 
4306 	__memory_events_show(m, memcg->memory_events_local);
4307 	return 0;
4308 }
4309 
memory_stat_show(struct seq_file * m,void * v)4310 int memory_stat_show(struct seq_file *m, void *v)
4311 {
4312 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4313 	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4314 	struct seq_buf s;
4315 
4316 	if (!buf)
4317 		return -ENOMEM;
4318 	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4319 	memory_stat_format(memcg, &s);
4320 	seq_puts(m, buf);
4321 	kfree(buf);
4322 	return 0;
4323 }
4324 
4325 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4326 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4327 						     int item)
4328 {
4329 	return lruvec_page_state(lruvec, item) *
4330 		memcg_page_state_output_unit(item);
4331 }
4332 
memory_numa_stat_show(struct seq_file * m,void * v)4333 static int memory_numa_stat_show(struct seq_file *m, void *v)
4334 {
4335 	int i;
4336 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4337 
4338 	mem_cgroup_flush_stats(memcg);
4339 
4340 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4341 		int nid;
4342 
4343 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4344 			continue;
4345 
4346 		seq_printf(m, "%s", memory_stats[i].name);
4347 		for_each_node_state(nid, N_MEMORY) {
4348 			u64 size;
4349 			struct lruvec *lruvec;
4350 
4351 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4352 			size = lruvec_page_state_output(lruvec,
4353 							memory_stats[i].idx);
4354 			seq_printf(m, " N%d=%llu", nid, size);
4355 		}
4356 		seq_putc(m, '\n');
4357 	}
4358 
4359 	return 0;
4360 }
4361 #endif
4362 
memory_oom_group_show(struct seq_file * m,void * v)4363 static int memory_oom_group_show(struct seq_file *m, void *v)
4364 {
4365 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4366 
4367 	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4368 
4369 	return 0;
4370 }
4371 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4372 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4373 				      char *buf, size_t nbytes, loff_t off)
4374 {
4375 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4376 	int ret, oom_group;
4377 
4378 	buf = strstrip(buf);
4379 	if (!buf)
4380 		return -EINVAL;
4381 
4382 	ret = kstrtoint(buf, 0, &oom_group);
4383 	if (ret)
4384 		return ret;
4385 
4386 	if (oom_group != 0 && oom_group != 1)
4387 		return -EINVAL;
4388 
4389 	WRITE_ONCE(memcg->oom_group, oom_group);
4390 
4391 	return nbytes;
4392 }
4393 
4394 enum {
4395 	MEMORY_RECLAIM_SWAPPINESS = 0,
4396 	MEMORY_RECLAIM_NULL,
4397 };
4398 
4399 static const match_table_t tokens = {
4400 	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4401 	{ MEMORY_RECLAIM_NULL, NULL },
4402 };
4403 
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4404 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4405 			      size_t nbytes, loff_t off)
4406 {
4407 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4408 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4409 	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4410 	int swappiness = -1;
4411 	unsigned int reclaim_options;
4412 	char *old_buf, *start;
4413 	substring_t args[MAX_OPT_ARGS];
4414 
4415 	buf = strstrip(buf);
4416 
4417 	old_buf = buf;
4418 	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4419 	if (buf == old_buf)
4420 		return -EINVAL;
4421 
4422 	buf = strstrip(buf);
4423 
4424 	while ((start = strsep(&buf, " ")) != NULL) {
4425 		if (!strlen(start))
4426 			continue;
4427 		switch (match_token(start, tokens, args)) {
4428 		case MEMORY_RECLAIM_SWAPPINESS:
4429 			if (match_int(&args[0], &swappiness))
4430 				return -EINVAL;
4431 			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4432 				return -EINVAL;
4433 			break;
4434 		default:
4435 			return -EINVAL;
4436 		}
4437 	}
4438 
4439 	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4440 	while (nr_reclaimed < nr_to_reclaim) {
4441 		/* Will converge on zero, but reclaim enforces a minimum */
4442 		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4443 		unsigned long reclaimed;
4444 
4445 		if (signal_pending(current))
4446 			return -EINTR;
4447 
4448 		/*
4449 		 * This is the final attempt, drain percpu lru caches in the
4450 		 * hope of introducing more evictable pages for
4451 		 * try_to_free_mem_cgroup_pages().
4452 		 */
4453 		if (!nr_retries)
4454 			lru_add_drain_all();
4455 
4456 		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4457 					batch_size, GFP_KERNEL,
4458 					reclaim_options,
4459 					swappiness == -1 ? NULL : &swappiness);
4460 
4461 		if (!reclaimed && !nr_retries--)
4462 			return -EAGAIN;
4463 
4464 		nr_reclaimed += reclaimed;
4465 	}
4466 
4467 	return nbytes;
4468 }
4469 
4470 static struct cftype memory_files[] = {
4471 	{
4472 		.name = "current",
4473 		.flags = CFTYPE_NOT_ON_ROOT,
4474 		.read_u64 = memory_current_read,
4475 	},
4476 	{
4477 		.name = "peak",
4478 		.flags = CFTYPE_NOT_ON_ROOT,
4479 		.open = peak_open,
4480 		.release = peak_release,
4481 		.seq_show = memory_peak_show,
4482 		.write = memory_peak_write,
4483 	},
4484 	{
4485 		.name = "min",
4486 		.flags = CFTYPE_NOT_ON_ROOT,
4487 		.seq_show = memory_min_show,
4488 		.write = memory_min_write,
4489 	},
4490 	{
4491 		.name = "low",
4492 		.flags = CFTYPE_NOT_ON_ROOT,
4493 		.seq_show = memory_low_show,
4494 		.write = memory_low_write,
4495 	},
4496 	{
4497 		.name = "high",
4498 		.flags = CFTYPE_NOT_ON_ROOT,
4499 		.seq_show = memory_high_show,
4500 		.write = memory_high_write,
4501 	},
4502 	{
4503 		.name = "max",
4504 		.flags = CFTYPE_NOT_ON_ROOT,
4505 		.seq_show = memory_max_show,
4506 		.write = memory_max_write,
4507 	},
4508 	{
4509 		.name = "events",
4510 		.flags = CFTYPE_NOT_ON_ROOT,
4511 		.file_offset = offsetof(struct mem_cgroup, events_file),
4512 		.seq_show = memory_events_show,
4513 	},
4514 	{
4515 		.name = "events.local",
4516 		.flags = CFTYPE_NOT_ON_ROOT,
4517 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4518 		.seq_show = memory_events_local_show,
4519 	},
4520 	{
4521 		.name = "stat",
4522 		.seq_show = memory_stat_show,
4523 	},
4524 #ifdef CONFIG_NUMA
4525 	{
4526 		.name = "numa_stat",
4527 		.seq_show = memory_numa_stat_show,
4528 	},
4529 #endif
4530 	{
4531 		.name = "oom.group",
4532 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4533 		.seq_show = memory_oom_group_show,
4534 		.write = memory_oom_group_write,
4535 	},
4536 	{
4537 		.name = "reclaim",
4538 		.flags = CFTYPE_NS_DELEGATABLE,
4539 		.write = memory_reclaim,
4540 	},
4541 	{ }	/* terminate */
4542 };
4543 
4544 struct cgroup_subsys memory_cgrp_subsys = {
4545 	.css_alloc = mem_cgroup_css_alloc,
4546 	.css_online = mem_cgroup_css_online,
4547 	.css_offline = mem_cgroup_css_offline,
4548 	.css_released = mem_cgroup_css_released,
4549 	.css_free = mem_cgroup_css_free,
4550 	.css_reset = mem_cgroup_css_reset,
4551 	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4552 	.attach = mem_cgroup_attach,
4553 	.fork = mem_cgroup_fork,
4554 	.exit = mem_cgroup_exit,
4555 	.dfl_cftypes = memory_files,
4556 #ifdef CONFIG_MEMCG_V1
4557 	.legacy_cftypes = mem_cgroup_legacy_files,
4558 #endif
4559 	.early_init = 0,
4560 };
4561 
4562 /**
4563  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4564  * @root: the top ancestor of the sub-tree being checked
4565  * @memcg: the memory cgroup to check
4566  *
4567  * WARNING: This function is not stateless! It can only be used as part
4568  *          of a top-down tree iteration, not for isolated queries.
4569  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4570 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4571 				     struct mem_cgroup *memcg)
4572 {
4573 	bool recursive_protection =
4574 		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4575 
4576 	if (mem_cgroup_disabled())
4577 		return;
4578 
4579 	if (!root)
4580 		root = root_mem_cgroup;
4581 
4582 	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4583 }
4584 
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4585 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4586 			gfp_t gfp)
4587 {
4588 	int ret;
4589 
4590 	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4591 	if (ret)
4592 		goto out;
4593 
4594 	css_get(&memcg->css);
4595 	commit_charge(folio, memcg);
4596 	memcg1_commit_charge(folio, memcg);
4597 out:
4598 	return ret;
4599 }
4600 
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4601 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4602 {
4603 	struct mem_cgroup *memcg;
4604 	int ret;
4605 
4606 	memcg = get_mem_cgroup_from_mm(mm);
4607 	ret = charge_memcg(folio, memcg, gfp);
4608 	css_put(&memcg->css);
4609 
4610 	return ret;
4611 }
4612 
4613 /**
4614  * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4615  * @folio: folio being charged
4616  * @gfp: reclaim mode
4617  *
4618  * This function is called when allocating a huge page folio, after the page has
4619  * already been obtained and charged to the appropriate hugetlb cgroup
4620  * controller (if it is enabled).
4621  *
4622  * Returns ENOMEM if the memcg is already full.
4623  * Returns 0 if either the charge was successful, or if we skip the charging.
4624  */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4625 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4626 {
4627 	struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4628 	int ret = 0;
4629 
4630 	/*
4631 	 * Even memcg does not account for hugetlb, we still want to update
4632 	 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4633 	 * charging the memcg.
4634 	 */
4635 	if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4636 		!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4637 		goto out;
4638 
4639 	if (charge_memcg(folio, memcg, gfp))
4640 		ret = -ENOMEM;
4641 
4642 out:
4643 	mem_cgroup_put(memcg);
4644 	return ret;
4645 }
4646 
4647 /**
4648  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4649  * @folio: folio to charge.
4650  * @mm: mm context of the victim
4651  * @gfp: reclaim mode
4652  * @entry: swap entry for which the folio is allocated
4653  *
4654  * This function charges a folio allocated for swapin. Please call this before
4655  * adding the folio to the swapcache.
4656  *
4657  * Returns 0 on success. Otherwise, an error code is returned.
4658  */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4659 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4660 				  gfp_t gfp, swp_entry_t entry)
4661 {
4662 	struct mem_cgroup *memcg;
4663 	unsigned short id;
4664 	int ret;
4665 
4666 	if (mem_cgroup_disabled())
4667 		return 0;
4668 
4669 	id = lookup_swap_cgroup_id(entry);
4670 	rcu_read_lock();
4671 	memcg = mem_cgroup_from_id(id);
4672 	if (!memcg || !css_tryget_online(&memcg->css))
4673 		memcg = get_mem_cgroup_from_mm(mm);
4674 	rcu_read_unlock();
4675 
4676 	ret = charge_memcg(folio, memcg, gfp);
4677 
4678 	css_put(&memcg->css);
4679 	return ret;
4680 }
4681 
4682 struct uncharge_gather {
4683 	struct mem_cgroup *memcg;
4684 	unsigned long nr_memory;
4685 	unsigned long pgpgout;
4686 	unsigned long nr_kmem;
4687 	int nid;
4688 };
4689 
uncharge_gather_clear(struct uncharge_gather * ug)4690 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4691 {
4692 	memset(ug, 0, sizeof(*ug));
4693 }
4694 
uncharge_batch(const struct uncharge_gather * ug)4695 static void uncharge_batch(const struct uncharge_gather *ug)
4696 {
4697 	if (ug->nr_memory) {
4698 		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4699 		if (do_memsw_account())
4700 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4701 		if (ug->nr_kmem) {
4702 			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4703 			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4704 		}
4705 		memcg1_oom_recover(ug->memcg);
4706 	}
4707 
4708 	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4709 
4710 	/* drop reference from uncharge_folio */
4711 	css_put(&ug->memcg->css);
4712 }
4713 
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4714 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4715 {
4716 	long nr_pages;
4717 	struct mem_cgroup *memcg;
4718 	struct obj_cgroup *objcg;
4719 
4720 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4721 
4722 	/*
4723 	 * Nobody should be changing or seriously looking at
4724 	 * folio memcg or objcg at this point, we have fully
4725 	 * exclusive access to the folio.
4726 	 */
4727 	if (folio_memcg_kmem(folio)) {
4728 		objcg = __folio_objcg(folio);
4729 		/*
4730 		 * This get matches the put at the end of the function and
4731 		 * kmem pages do not hold memcg references anymore.
4732 		 */
4733 		memcg = get_mem_cgroup_from_objcg(objcg);
4734 	} else {
4735 		memcg = __folio_memcg(folio);
4736 	}
4737 
4738 	if (!memcg)
4739 		return;
4740 
4741 	if (ug->memcg != memcg) {
4742 		if (ug->memcg) {
4743 			uncharge_batch(ug);
4744 			uncharge_gather_clear(ug);
4745 		}
4746 		ug->memcg = memcg;
4747 		ug->nid = folio_nid(folio);
4748 
4749 		/* pairs with css_put in uncharge_batch */
4750 		css_get(&memcg->css);
4751 	}
4752 
4753 	nr_pages = folio_nr_pages(folio);
4754 
4755 	if (folio_memcg_kmem(folio)) {
4756 		ug->nr_memory += nr_pages;
4757 		ug->nr_kmem += nr_pages;
4758 
4759 		folio->memcg_data = 0;
4760 		obj_cgroup_put(objcg);
4761 	} else {
4762 		/* LRU pages aren't accounted at the root level */
4763 		if (!mem_cgroup_is_root(memcg))
4764 			ug->nr_memory += nr_pages;
4765 		ug->pgpgout++;
4766 
4767 		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4768 		folio->memcg_data = 0;
4769 	}
4770 
4771 	css_put(&memcg->css);
4772 }
4773 
__mem_cgroup_uncharge(struct folio * folio)4774 void __mem_cgroup_uncharge(struct folio *folio)
4775 {
4776 	struct uncharge_gather ug;
4777 
4778 	/* Don't touch folio->lru of any random page, pre-check: */
4779 	if (!folio_memcg_charged(folio))
4780 		return;
4781 
4782 	uncharge_gather_clear(&ug);
4783 	uncharge_folio(folio, &ug);
4784 	uncharge_batch(&ug);
4785 }
4786 
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4787 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
4788 {
4789 	struct uncharge_gather ug;
4790 	unsigned int i;
4791 
4792 	uncharge_gather_clear(&ug);
4793 	for (i = 0; i < folios->nr; i++)
4794 		uncharge_folio(folios->folios[i], &ug);
4795 	if (ug.memcg)
4796 		uncharge_batch(&ug);
4797 }
4798 
4799 /**
4800  * mem_cgroup_replace_folio - Charge a folio's replacement.
4801  * @old: Currently circulating folio.
4802  * @new: Replacement folio.
4803  *
4804  * Charge @new as a replacement folio for @old. @old will
4805  * be uncharged upon free.
4806  *
4807  * Both folios must be locked, @new->mapping must be set up.
4808  */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)4809 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4810 {
4811 	struct mem_cgroup *memcg;
4812 	long nr_pages = folio_nr_pages(new);
4813 
4814 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4815 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4816 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4817 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4818 
4819 	if (mem_cgroup_disabled())
4820 		return;
4821 
4822 	/* Page cache replacement: new folio already charged? */
4823 	if (folio_memcg_charged(new))
4824 		return;
4825 
4826 	memcg = folio_memcg(old);
4827 	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4828 	if (!memcg)
4829 		return;
4830 
4831 	/* Force-charge the new page. The old one will be freed soon */
4832 	if (!mem_cgroup_is_root(memcg)) {
4833 		page_counter_charge(&memcg->memory, nr_pages);
4834 		if (do_memsw_account())
4835 			page_counter_charge(&memcg->memsw, nr_pages);
4836 	}
4837 
4838 	css_get(&memcg->css);
4839 	commit_charge(new, memcg);
4840 	memcg1_commit_charge(new, memcg);
4841 }
4842 
4843 /**
4844  * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4845  * @old: Currently circulating folio.
4846  * @new: Replacement folio.
4847  *
4848  * Transfer the memcg data from the old folio to the new folio for migration.
4849  * The old folio's data info will be cleared. Note that the memory counters
4850  * will remain unchanged throughout the process.
4851  *
4852  * Both folios must be locked, @new->mapping must be set up.
4853  */
mem_cgroup_migrate(struct folio * old,struct folio * new)4854 void mem_cgroup_migrate(struct folio *old, struct folio *new)
4855 {
4856 	struct mem_cgroup *memcg;
4857 
4858 	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4859 	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4860 	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4861 	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4862 	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4863 
4864 	if (mem_cgroup_disabled())
4865 		return;
4866 
4867 	memcg = folio_memcg(old);
4868 	/*
4869 	 * Note that it is normal to see !memcg for a hugetlb folio.
4870 	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4871 	 * was not selected.
4872 	 */
4873 	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4874 	if (!memcg)
4875 		return;
4876 
4877 	/* Transfer the charge and the css ref */
4878 	commit_charge(new, memcg);
4879 
4880 	/* Warning should never happen, so don't worry about refcount non-0 */
4881 	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4882 	old->memcg_data = 0;
4883 }
4884 
4885 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4886 EXPORT_SYMBOL(memcg_sockets_enabled_key);
4887 
mem_cgroup_sk_alloc(struct sock * sk)4888 void mem_cgroup_sk_alloc(struct sock *sk)
4889 {
4890 	struct mem_cgroup *memcg;
4891 
4892 	if (!mem_cgroup_sockets_enabled)
4893 		return;
4894 
4895 	/* Do not associate the sock with unrelated interrupted task's memcg. */
4896 	if (!in_task())
4897 		return;
4898 
4899 	rcu_read_lock();
4900 	memcg = mem_cgroup_from_task(current);
4901 	if (mem_cgroup_is_root(memcg))
4902 		goto out;
4903 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4904 		goto out;
4905 	if (css_tryget(&memcg->css))
4906 		sk->sk_memcg = memcg;
4907 out:
4908 	rcu_read_unlock();
4909 }
4910 
mem_cgroup_sk_free(struct sock * sk)4911 void mem_cgroup_sk_free(struct sock *sk)
4912 {
4913 	if (sk->sk_memcg)
4914 		css_put(&sk->sk_memcg->css);
4915 }
4916 
4917 /**
4918  * mem_cgroup_charge_skmem - charge socket memory
4919  * @memcg: memcg to charge
4920  * @nr_pages: number of pages to charge
4921  * @gfp_mask: reclaim mode
4922  *
4923  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4924  * @memcg's configured limit, %false if it doesn't.
4925  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)4926 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4927 			     gfp_t gfp_mask)
4928 {
4929 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4930 		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4931 
4932 	if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
4933 		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4934 		return true;
4935 	}
4936 
4937 	return false;
4938 }
4939 
4940 /**
4941  * mem_cgroup_uncharge_skmem - uncharge socket memory
4942  * @memcg: memcg to uncharge
4943  * @nr_pages: number of pages to uncharge
4944  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)4945 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4946 {
4947 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4948 		memcg1_uncharge_skmem(memcg, nr_pages);
4949 		return;
4950 	}
4951 
4952 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4953 
4954 	refill_stock(memcg, nr_pages);
4955 }
4956 
cgroup_memory(char * s)4957 static int __init cgroup_memory(char *s)
4958 {
4959 	char *token;
4960 
4961 	while ((token = strsep(&s, ",")) != NULL) {
4962 		if (!*token)
4963 			continue;
4964 		if (!strcmp(token, "nosocket"))
4965 			cgroup_memory_nosocket = true;
4966 		if (!strcmp(token, "nokmem"))
4967 			cgroup_memory_nokmem = true;
4968 		if (!strcmp(token, "nobpf"))
4969 			cgroup_memory_nobpf = true;
4970 	}
4971 	return 1;
4972 }
4973 __setup("cgroup.memory=", cgroup_memory);
4974 
4975 /*
4976  * subsys_initcall() for memory controller.
4977  *
4978  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4979  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4980  * basically everything that doesn't depend on a specific mem_cgroup structure
4981  * should be initialized from here.
4982  */
mem_cgroup_init(void)4983 static int __init mem_cgroup_init(void)
4984 {
4985 	int cpu;
4986 
4987 	/*
4988 	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4989 	 * used for per-memcg-per-cpu caching of per-node statistics. In order
4990 	 * to work fine, we should make sure that the overfill threshold can't
4991 	 * exceed S32_MAX / PAGE_SIZE.
4992 	 */
4993 	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4994 
4995 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4996 				  memcg_hotplug_cpu_dead);
4997 
4998 	for_each_possible_cpu(cpu)
4999 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5000 			  drain_local_stock);
5001 
5002 	return 0;
5003 }
5004 subsys_initcall(mem_cgroup_init);
5005 
5006 #ifdef CONFIG_SWAP
5007 /**
5008  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5009  * @folio: folio being added to swap
5010  * @entry: swap entry to charge
5011  *
5012  * Try to charge @folio's memcg for the swap space at @entry.
5013  *
5014  * Returns 0 on success, -ENOMEM on failure.
5015  */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5016 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5017 {
5018 	unsigned int nr_pages = folio_nr_pages(folio);
5019 	struct page_counter *counter;
5020 	struct mem_cgroup *memcg;
5021 
5022 	if (do_memsw_account())
5023 		return 0;
5024 
5025 	memcg = folio_memcg(folio);
5026 
5027 	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5028 	if (!memcg)
5029 		return 0;
5030 
5031 	if (!entry.val) {
5032 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5033 		return 0;
5034 	}
5035 
5036 	memcg = mem_cgroup_id_get_online(memcg);
5037 
5038 	if (!mem_cgroup_is_root(memcg) &&
5039 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5040 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5041 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5042 		mem_cgroup_id_put(memcg);
5043 		return -ENOMEM;
5044 	}
5045 
5046 	/* Get references for the tail pages, too */
5047 	if (nr_pages > 1)
5048 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5049 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5050 
5051 	swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
5052 
5053 	return 0;
5054 }
5055 
5056 /**
5057  * __mem_cgroup_uncharge_swap - uncharge swap space
5058  * @entry: swap entry to uncharge
5059  * @nr_pages: the amount of swap space to uncharge
5060  */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5061 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5062 {
5063 	struct mem_cgroup *memcg;
5064 	unsigned short id;
5065 
5066 	id = swap_cgroup_clear(entry, nr_pages);
5067 	rcu_read_lock();
5068 	memcg = mem_cgroup_from_id(id);
5069 	if (memcg) {
5070 		if (!mem_cgroup_is_root(memcg)) {
5071 			if (do_memsw_account())
5072 				page_counter_uncharge(&memcg->memsw, nr_pages);
5073 			else
5074 				page_counter_uncharge(&memcg->swap, nr_pages);
5075 		}
5076 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5077 		mem_cgroup_id_put_many(memcg, nr_pages);
5078 	}
5079 	rcu_read_unlock();
5080 }
5081 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5082 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5083 {
5084 	long nr_swap_pages = get_nr_swap_pages();
5085 
5086 	if (mem_cgroup_disabled() || do_memsw_account())
5087 		return nr_swap_pages;
5088 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5089 		nr_swap_pages = min_t(long, nr_swap_pages,
5090 				      READ_ONCE(memcg->swap.max) -
5091 				      page_counter_read(&memcg->swap));
5092 	return nr_swap_pages;
5093 }
5094 
mem_cgroup_swap_full(struct folio * folio)5095 bool mem_cgroup_swap_full(struct folio *folio)
5096 {
5097 	struct mem_cgroup *memcg;
5098 
5099 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5100 
5101 	if (vm_swap_full())
5102 		return true;
5103 	if (do_memsw_account())
5104 		return false;
5105 
5106 	memcg = folio_memcg(folio);
5107 	if (!memcg)
5108 		return false;
5109 
5110 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5111 		unsigned long usage = page_counter_read(&memcg->swap);
5112 
5113 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5114 		    usage * 2 >= READ_ONCE(memcg->swap.max))
5115 			return true;
5116 	}
5117 
5118 	return false;
5119 }
5120 
setup_swap_account(char * s)5121 static int __init setup_swap_account(char *s)
5122 {
5123 	bool res;
5124 
5125 	if (!kstrtobool(s, &res) && !res)
5126 		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5127 			     "in favor of configuring swap control via cgroupfs. "
5128 			     "Please report your usecase to linux-mm@kvack.org if you "
5129 			     "depend on this functionality.\n");
5130 	return 1;
5131 }
5132 __setup("swapaccount=", setup_swap_account);
5133 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5134 static u64 swap_current_read(struct cgroup_subsys_state *css,
5135 			     struct cftype *cft)
5136 {
5137 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5138 
5139 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5140 }
5141 
swap_peak_show(struct seq_file * sf,void * v)5142 static int swap_peak_show(struct seq_file *sf, void *v)
5143 {
5144 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5145 
5146 	return peak_show(sf, v, &memcg->swap);
5147 }
5148 
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5149 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5150 			       size_t nbytes, loff_t off)
5151 {
5152 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5153 
5154 	return peak_write(of, buf, nbytes, off, &memcg->swap,
5155 			  &memcg->swap_peaks);
5156 }
5157 
swap_high_show(struct seq_file * m,void * v)5158 static int swap_high_show(struct seq_file *m, void *v)
5159 {
5160 	return seq_puts_memcg_tunable(m,
5161 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5162 }
5163 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5164 static ssize_t swap_high_write(struct kernfs_open_file *of,
5165 			       char *buf, size_t nbytes, loff_t off)
5166 {
5167 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5168 	unsigned long high;
5169 	int err;
5170 
5171 	buf = strstrip(buf);
5172 	err = page_counter_memparse(buf, "max", &high);
5173 	if (err)
5174 		return err;
5175 
5176 	page_counter_set_high(&memcg->swap, high);
5177 
5178 	return nbytes;
5179 }
5180 
swap_max_show(struct seq_file * m,void * v)5181 static int swap_max_show(struct seq_file *m, void *v)
5182 {
5183 	return seq_puts_memcg_tunable(m,
5184 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5185 }
5186 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5187 static ssize_t swap_max_write(struct kernfs_open_file *of,
5188 			      char *buf, size_t nbytes, loff_t off)
5189 {
5190 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5191 	unsigned long max;
5192 	int err;
5193 
5194 	buf = strstrip(buf);
5195 	err = page_counter_memparse(buf, "max", &max);
5196 	if (err)
5197 		return err;
5198 
5199 	xchg(&memcg->swap.max, max);
5200 
5201 	return nbytes;
5202 }
5203 
swap_events_show(struct seq_file * m,void * v)5204 static int swap_events_show(struct seq_file *m, void *v)
5205 {
5206 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5207 
5208 	seq_printf(m, "high %lu\n",
5209 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5210 	seq_printf(m, "max %lu\n",
5211 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5212 	seq_printf(m, "fail %lu\n",
5213 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5214 
5215 	return 0;
5216 }
5217 
5218 static struct cftype swap_files[] = {
5219 	{
5220 		.name = "swap.current",
5221 		.flags = CFTYPE_NOT_ON_ROOT,
5222 		.read_u64 = swap_current_read,
5223 	},
5224 	{
5225 		.name = "swap.high",
5226 		.flags = CFTYPE_NOT_ON_ROOT,
5227 		.seq_show = swap_high_show,
5228 		.write = swap_high_write,
5229 	},
5230 	{
5231 		.name = "swap.max",
5232 		.flags = CFTYPE_NOT_ON_ROOT,
5233 		.seq_show = swap_max_show,
5234 		.write = swap_max_write,
5235 	},
5236 	{
5237 		.name = "swap.peak",
5238 		.flags = CFTYPE_NOT_ON_ROOT,
5239 		.open = peak_open,
5240 		.release = peak_release,
5241 		.seq_show = swap_peak_show,
5242 		.write = swap_peak_write,
5243 	},
5244 	{
5245 		.name = "swap.events",
5246 		.flags = CFTYPE_NOT_ON_ROOT,
5247 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5248 		.seq_show = swap_events_show,
5249 	},
5250 	{ }	/* terminate */
5251 };
5252 
5253 #ifdef CONFIG_ZSWAP
5254 /**
5255  * obj_cgroup_may_zswap - check if this cgroup can zswap
5256  * @objcg: the object cgroup
5257  *
5258  * Check if the hierarchical zswap limit has been reached.
5259  *
5260  * This doesn't check for specific headroom, and it is not atomic
5261  * either. But with zswap, the size of the allocation is only known
5262  * once compression has occurred, and this optimistic pre-check avoids
5263  * spending cycles on compression when there is already no room left
5264  * or zswap is disabled altogether somewhere in the hierarchy.
5265  */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5266 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5267 {
5268 	struct mem_cgroup *memcg, *original_memcg;
5269 	bool ret = true;
5270 
5271 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5272 		return true;
5273 
5274 	original_memcg = get_mem_cgroup_from_objcg(objcg);
5275 	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5276 	     memcg = parent_mem_cgroup(memcg)) {
5277 		unsigned long max = READ_ONCE(memcg->zswap_max);
5278 		unsigned long pages;
5279 
5280 		if (max == PAGE_COUNTER_MAX)
5281 			continue;
5282 		if (max == 0) {
5283 			ret = false;
5284 			break;
5285 		}
5286 
5287 		/* Force flush to get accurate stats for charging */
5288 		__mem_cgroup_flush_stats(memcg, true);
5289 		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5290 		if (pages < max)
5291 			continue;
5292 		ret = false;
5293 		break;
5294 	}
5295 	mem_cgroup_put(original_memcg);
5296 	return ret;
5297 }
5298 
5299 /**
5300  * obj_cgroup_charge_zswap - charge compression backend memory
5301  * @objcg: the object cgroup
5302  * @size: size of compressed object
5303  *
5304  * This forces the charge after obj_cgroup_may_zswap() allowed
5305  * compression and storage in zwap for this cgroup to go ahead.
5306  */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5307 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5308 {
5309 	struct mem_cgroup *memcg;
5310 
5311 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5312 		return;
5313 
5314 	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5315 
5316 	/* PF_MEMALLOC context, charging must succeed */
5317 	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5318 		VM_WARN_ON_ONCE(1);
5319 
5320 	rcu_read_lock();
5321 	memcg = obj_cgroup_memcg(objcg);
5322 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5323 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5324 	rcu_read_unlock();
5325 }
5326 
5327 /**
5328  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5329  * @objcg: the object cgroup
5330  * @size: size of compressed object
5331  *
5332  * Uncharges zswap memory on page in.
5333  */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5334 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5335 {
5336 	struct mem_cgroup *memcg;
5337 
5338 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5339 		return;
5340 
5341 	obj_cgroup_uncharge(objcg, size);
5342 
5343 	rcu_read_lock();
5344 	memcg = obj_cgroup_memcg(objcg);
5345 	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5346 	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5347 	rcu_read_unlock();
5348 }
5349 
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5350 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5351 {
5352 	/* if zswap is disabled, do not block pages going to the swapping device */
5353 	if (!zswap_is_enabled())
5354 		return true;
5355 
5356 	for (; memcg; memcg = parent_mem_cgroup(memcg))
5357 		if (!READ_ONCE(memcg->zswap_writeback))
5358 			return false;
5359 
5360 	return true;
5361 }
5362 
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5363 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5364 			      struct cftype *cft)
5365 {
5366 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5367 
5368 	mem_cgroup_flush_stats(memcg);
5369 	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5370 }
5371 
zswap_max_show(struct seq_file * m,void * v)5372 static int zswap_max_show(struct seq_file *m, void *v)
5373 {
5374 	return seq_puts_memcg_tunable(m,
5375 		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5376 }
5377 
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5378 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5379 			       char *buf, size_t nbytes, loff_t off)
5380 {
5381 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5382 	unsigned long max;
5383 	int err;
5384 
5385 	buf = strstrip(buf);
5386 	err = page_counter_memparse(buf, "max", &max);
5387 	if (err)
5388 		return err;
5389 
5390 	xchg(&memcg->zswap_max, max);
5391 
5392 	return nbytes;
5393 }
5394 
zswap_writeback_show(struct seq_file * m,void * v)5395 static int zswap_writeback_show(struct seq_file *m, void *v)
5396 {
5397 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5398 
5399 	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5400 	return 0;
5401 }
5402 
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5403 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5404 				char *buf, size_t nbytes, loff_t off)
5405 {
5406 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5407 	int zswap_writeback;
5408 	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5409 
5410 	if (parse_ret)
5411 		return parse_ret;
5412 
5413 	if (zswap_writeback != 0 && zswap_writeback != 1)
5414 		return -EINVAL;
5415 
5416 	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5417 	return nbytes;
5418 }
5419 
5420 static struct cftype zswap_files[] = {
5421 	{
5422 		.name = "zswap.current",
5423 		.flags = CFTYPE_NOT_ON_ROOT,
5424 		.read_u64 = zswap_current_read,
5425 	},
5426 	{
5427 		.name = "zswap.max",
5428 		.flags = CFTYPE_NOT_ON_ROOT,
5429 		.seq_show = zswap_max_show,
5430 		.write = zswap_max_write,
5431 	},
5432 	{
5433 		.name = "zswap.writeback",
5434 		.seq_show = zswap_writeback_show,
5435 		.write = zswap_writeback_write,
5436 	},
5437 	{ }	/* terminate */
5438 };
5439 #endif /* CONFIG_ZSWAP */
5440 
mem_cgroup_swap_init(void)5441 static int __init mem_cgroup_swap_init(void)
5442 {
5443 	if (mem_cgroup_disabled())
5444 		return 0;
5445 
5446 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5447 #ifdef CONFIG_MEMCG_V1
5448 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5449 #endif
5450 #ifdef CONFIG_ZSWAP
5451 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5452 #endif
5453 	return 0;
5454 }
5455 subsys_initcall(mem_cgroup_swap_init);
5456 
5457 #endif /* CONFIG_SWAP */
5458