1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28 #include <linux/cgroup-defs.h>
29 #include <linux/page_counter.h>
30 #include <linux/memcontrol.h>
31 #include <linux/cgroup.h>
32 #include <linux/cpuset.h>
33 #include <linux/sched/mm.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/hugetlb.h>
36 #include <linux/pagemap.h>
37 #include <linux/folio_batch.h>
38 #include <linux/vm_event_item.h>
39 #include <linux/smp.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
42 #include <linux/bit_spinlock.h>
43 #include <linux/rcupdate.h>
44 #include <linux/limits.h>
45 #include <linux/export.h>
46 #include <linux/list.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swapops.h>
51 #include <linux/spinlock.h>
52 #include <linux/fs.h>
53 #include <linux/seq_file.h>
54 #include <linux/vmpressure.h>
55 #include <linux/memremap.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/resume_user_mode.h>
62 #include <linux/psi.h>
63 #include <linux/seq_buf.h>
64 #include <linux/sched/isolation.h>
65 #include <linux/kmemleak.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70 #include "memcontrol-v1.h"
71
72 #include <linux/uaccess.h>
73
74 #define CREATE_TRACE_POINTS
75 #include <trace/events/memcg.h>
76 #undef CREATE_TRACE_POINTS
77
78 #include <trace/events/vmscan.h>
79
80 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
81 EXPORT_SYMBOL(memory_cgrp_subsys);
82
83 struct mem_cgroup *root_mem_cgroup __read_mostly;
84 EXPORT_SYMBOL(root_mem_cgroup);
85
86 /* Active memory cgroup to use from an interrupt context */
87 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
88 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
89
90 /* Socket memory accounting disabled? */
91 static bool cgroup_memory_nosocket __ro_after_init;
92
93 /* Kernel memory accounting disabled? */
94 static bool cgroup_memory_nokmem __ro_after_init;
95
96 /* BPF memory accounting disabled? */
97 static bool cgroup_memory_nobpf __ro_after_init;
98
99 static struct workqueue_struct *memcg_wq __ro_after_init;
100
101 static struct kmem_cache *memcg_cachep;
102 static struct kmem_cache *memcg_pn_cachep;
103
104 #ifdef CONFIG_CGROUP_WRITEBACK
105 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
106 #endif
107
task_is_dying(void)108 static inline bool task_is_dying(void)
109 {
110 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
111 (current->flags & PF_EXITING);
112 }
113
114 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)115 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
116 {
117 if (!memcg)
118 memcg = root_mem_cgroup;
119 return &memcg->vmpressure;
120 }
121
vmpressure_to_memcg(struct vmpressure * vmpr)122 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
123 {
124 return container_of(vmpr, struct mem_cgroup, vmpressure);
125 }
126
127 #define SEQ_BUF_SIZE SZ_4K
128 #define CURRENT_OBJCG_UPDATE_BIT 0
129 #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
130
131 static DEFINE_SPINLOCK(objcg_lock);
132
mem_cgroup_kmem_disabled(void)133 bool mem_cgroup_kmem_disabled(void)
134 {
135 return cgroup_memory_nokmem;
136 }
137
138 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
139
obj_cgroup_release(struct percpu_ref * ref)140 static void obj_cgroup_release(struct percpu_ref *ref)
141 {
142 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
143 unsigned int nr_bytes;
144 unsigned int nr_pages;
145 unsigned long flags;
146
147 /*
148 * At this point all allocated objects are freed, and
149 * objcg->nr_charged_bytes can't have an arbitrary byte value.
150 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
151 *
152 * The following sequence can lead to it:
153 * 1) CPU0: objcg == stock->cached_objcg
154 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
155 * PAGE_SIZE bytes are charged
156 * 3) CPU1: a process from another memcg is allocating something,
157 * the stock if flushed,
158 * objcg->nr_charged_bytes = PAGE_SIZE - 92
159 * 5) CPU0: we do release this object,
160 * 92 bytes are added to stock->nr_bytes
161 * 6) CPU0: stock is flushed,
162 * 92 bytes are added to objcg->nr_charged_bytes
163 *
164 * In the result, nr_charged_bytes == PAGE_SIZE.
165 * This page will be uncharged in obj_cgroup_release().
166 */
167 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
168 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
169 nr_pages = nr_bytes >> PAGE_SHIFT;
170
171 if (nr_pages) {
172 struct mem_cgroup *memcg;
173
174 memcg = get_mem_cgroup_from_objcg(objcg);
175 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
176 memcg1_account_kmem(memcg, -nr_pages);
177 if (!mem_cgroup_is_root(memcg))
178 memcg_uncharge(memcg, nr_pages);
179 mem_cgroup_put(memcg);
180 }
181
182 spin_lock_irqsave(&objcg_lock, flags);
183 list_del(&objcg->list);
184 spin_unlock_irqrestore(&objcg_lock, flags);
185
186 percpu_ref_exit(ref);
187 kfree_rcu(objcg, rcu);
188 }
189
obj_cgroup_alloc(void)190 static struct obj_cgroup *obj_cgroup_alloc(void)
191 {
192 struct obj_cgroup *objcg;
193 int ret;
194
195 objcg = kzalloc_obj(struct obj_cgroup);
196 if (!objcg)
197 return NULL;
198
199 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
200 GFP_KERNEL);
201 if (ret) {
202 kfree(objcg);
203 return NULL;
204 }
205 INIT_LIST_HEAD(&objcg->list);
206 return objcg;
207 }
208
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)209 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
210 struct mem_cgroup *parent)
211 {
212 struct obj_cgroup *objcg, *iter;
213
214 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
215
216 spin_lock_irq(&objcg_lock);
217
218 /* 1) Ready to reparent active objcg. */
219 list_add(&objcg->list, &memcg->objcg_list);
220 /* 2) Reparent active objcg and already reparented objcgs to parent. */
221 list_for_each_entry(iter, &memcg->objcg_list, list)
222 WRITE_ONCE(iter->memcg, parent);
223 /* 3) Move already reparented objcgs to the parent's list */
224 list_splice(&memcg->objcg_list, &parent->objcg_list);
225
226 spin_unlock_irq(&objcg_lock);
227
228 percpu_ref_kill(&objcg->refcnt);
229 }
230
231 /*
232 * A lot of the calls to the cache allocation functions are expected to be
233 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
234 * conditional to this static branch, we'll have to allow modules that does
235 * kmem_cache_alloc and the such to see this symbol as well
236 */
237 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
238 EXPORT_SYMBOL(memcg_kmem_online_key);
239
240 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
241 EXPORT_SYMBOL(memcg_bpf_enabled_key);
242
243 /**
244 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
245 * @folio: folio of interest
246 *
247 * If memcg is bound to the default hierarchy, css of the memcg associated
248 * with @folio is returned. The returned css remains associated with @folio
249 * until it is released.
250 *
251 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
252 * is returned.
253 */
mem_cgroup_css_from_folio(struct folio * folio)254 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
255 {
256 struct mem_cgroup *memcg = folio_memcg(folio);
257
258 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
259 memcg = root_mem_cgroup;
260
261 return &memcg->css;
262 }
263
264 /**
265 * page_cgroup_ino - return inode number of the memcg a page is charged to
266 * @page: the page
267 *
268 * Look up the closest online ancestor of the memory cgroup @page is charged to
269 * and return its inode number or 0 if @page is not charged to any cgroup. It
270 * is safe to call this function without holding a reference to @page.
271 *
272 * Note, this function is inherently racy, because there is nothing to prevent
273 * the cgroup inode from getting torn down and potentially reallocated a moment
274 * after page_cgroup_ino() returns, so it only should be used by callers that
275 * do not care (such as procfs interfaces).
276 */
page_cgroup_ino(struct page * page)277 ino_t page_cgroup_ino(struct page *page)
278 {
279 struct mem_cgroup *memcg;
280 unsigned long ino = 0;
281
282 rcu_read_lock();
283 /* page_folio() is racy here, but the entire function is racy anyway */
284 memcg = folio_memcg_check(page_folio(page));
285
286 while (memcg && !css_is_online(&memcg->css))
287 memcg = parent_mem_cgroup(memcg);
288 if (memcg)
289 ino = cgroup_ino(memcg->css.cgroup);
290 rcu_read_unlock();
291 return ino;
292 }
293 EXPORT_SYMBOL_GPL(page_cgroup_ino);
294
295 /* Subset of node_stat_item for memcg stats */
296 static const unsigned int memcg_node_stat_items[] = {
297 NR_INACTIVE_ANON,
298 NR_ACTIVE_ANON,
299 NR_INACTIVE_FILE,
300 NR_ACTIVE_FILE,
301 NR_UNEVICTABLE,
302 NR_SLAB_RECLAIMABLE_B,
303 NR_SLAB_UNRECLAIMABLE_B,
304 WORKINGSET_REFAULT_ANON,
305 WORKINGSET_REFAULT_FILE,
306 WORKINGSET_ACTIVATE_ANON,
307 WORKINGSET_ACTIVATE_FILE,
308 WORKINGSET_RESTORE_ANON,
309 WORKINGSET_RESTORE_FILE,
310 WORKINGSET_NODERECLAIM,
311 NR_ANON_MAPPED,
312 NR_FILE_MAPPED,
313 NR_FILE_PAGES,
314 NR_FILE_DIRTY,
315 NR_WRITEBACK,
316 NR_SHMEM,
317 NR_SHMEM_THPS,
318 NR_FILE_THPS,
319 NR_ANON_THPS,
320 NR_VMALLOC,
321 NR_KERNEL_STACK_KB,
322 NR_PAGETABLE,
323 NR_SECONDARY_PAGETABLE,
324 #ifdef CONFIG_SWAP
325 NR_SWAPCACHE,
326 #endif
327 #ifdef CONFIG_NUMA_BALANCING
328 PGPROMOTE_SUCCESS,
329 #endif
330 PGDEMOTE_KSWAPD,
331 PGDEMOTE_DIRECT,
332 PGDEMOTE_KHUGEPAGED,
333 PGDEMOTE_PROACTIVE,
334 PGSTEAL_KSWAPD,
335 PGSTEAL_DIRECT,
336 PGSTEAL_KHUGEPAGED,
337 PGSTEAL_PROACTIVE,
338 PGSTEAL_ANON,
339 PGSTEAL_FILE,
340 PGSCAN_KSWAPD,
341 PGSCAN_DIRECT,
342 PGSCAN_KHUGEPAGED,
343 PGSCAN_PROACTIVE,
344 PGSCAN_ANON,
345 PGSCAN_FILE,
346 PGREFILL,
347 #ifdef CONFIG_HUGETLB_PAGE
348 NR_HUGETLB,
349 #endif
350 };
351
352 static const unsigned int memcg_stat_items[] = {
353 MEMCG_SWAP,
354 MEMCG_SOCK,
355 MEMCG_PERCPU_B,
356 MEMCG_KMEM,
357 MEMCG_ZSWAP_B,
358 MEMCG_ZSWAPPED,
359 MEMCG_ZSWAP_INCOMP,
360 };
361
362 #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
363 #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
364 ARRAY_SIZE(memcg_stat_items))
365 #define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
366 static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
367
init_memcg_stats(void)368 static void init_memcg_stats(void)
369 {
370 u8 i, j = 0;
371
372 BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
373
374 memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
375
376 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
377 mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
378
379 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
380 mem_cgroup_stats_index[memcg_stat_items[i]] = j;
381 }
382
memcg_stats_index(int idx)383 static inline int memcg_stats_index(int idx)
384 {
385 return mem_cgroup_stats_index[idx];
386 }
387
388 struct lruvec_stats_percpu {
389 /* Local (CPU and cgroup) state */
390 long state[NR_MEMCG_NODE_STAT_ITEMS];
391
392 /* Delta calculation for lockless upward propagation */
393 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
394 };
395
396 struct lruvec_stats {
397 /* Aggregated (CPU and subtree) state */
398 long state[NR_MEMCG_NODE_STAT_ITEMS];
399
400 /* Non-hierarchical (CPU aggregated) state */
401 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
402
403 /* Pending child counts during tree propagation */
404 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
405 };
406
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)407 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
408 {
409 struct mem_cgroup_per_node *pn;
410 long x;
411 int i;
412
413 if (mem_cgroup_disabled())
414 return node_page_state(lruvec_pgdat(lruvec), idx);
415
416 i = memcg_stats_index(idx);
417 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
418 return 0;
419
420 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
421 x = READ_ONCE(pn->lruvec_stats->state[i]);
422 #ifdef CONFIG_SMP
423 if (x < 0)
424 x = 0;
425 #endif
426 return x;
427 }
428
lruvec_page_state_local(struct lruvec * lruvec,enum node_stat_item idx)429 unsigned long lruvec_page_state_local(struct lruvec *lruvec,
430 enum node_stat_item idx)
431 {
432 struct mem_cgroup_per_node *pn;
433 long x;
434 int i;
435
436 if (mem_cgroup_disabled())
437 return node_page_state(lruvec_pgdat(lruvec), idx);
438
439 i = memcg_stats_index(idx);
440 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
441 return 0;
442
443 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
444 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
445 #ifdef CONFIG_SMP
446 if (x < 0)
447 x = 0;
448 #endif
449 return x;
450 }
451
452 /* Subset of vm_event_item to report for memcg event stats */
453 static const unsigned int memcg_vm_event_stat[] = {
454 #ifdef CONFIG_MEMCG_V1
455 PGPGIN,
456 PGPGOUT,
457 #endif
458 PSWPIN,
459 PSWPOUT,
460 PGFAULT,
461 PGMAJFAULT,
462 PGACTIVATE,
463 PGDEACTIVATE,
464 PGLAZYFREE,
465 PGLAZYFREED,
466 #ifdef CONFIG_SWAP
467 SWPIN_ZERO,
468 SWPOUT_ZERO,
469 #endif
470 #ifdef CONFIG_ZSWAP
471 ZSWPIN,
472 ZSWPOUT,
473 ZSWPWB,
474 #endif
475 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
476 THP_FAULT_ALLOC,
477 THP_COLLAPSE_ALLOC,
478 THP_SWPOUT,
479 THP_SWPOUT_FALLBACK,
480 #endif
481 #ifdef CONFIG_NUMA_BALANCING
482 NUMA_PAGE_MIGRATE,
483 NUMA_PTE_UPDATES,
484 NUMA_HINT_FAULTS,
485 #endif
486 };
487
488 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
489 static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
490
init_memcg_events(void)491 static void init_memcg_events(void)
492 {
493 u8 i;
494
495 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
496
497 memset(mem_cgroup_events_index, U8_MAX,
498 sizeof(mem_cgroup_events_index));
499
500 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
501 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
502 }
503
memcg_events_index(enum vm_event_item idx)504 static inline int memcg_events_index(enum vm_event_item idx)
505 {
506 return mem_cgroup_events_index[idx];
507 }
508
509 struct memcg_vmstats_percpu {
510 /* Stats updates since the last flush */
511 unsigned int stats_updates;
512
513 /* Cached pointers for fast iteration in memcg_rstat_updated() */
514 struct memcg_vmstats_percpu __percpu *parent_pcpu;
515 struct memcg_vmstats *vmstats;
516
517 /* The above should fit a single cacheline for memcg_rstat_updated() */
518
519 /* Local (CPU and cgroup) page state & events */
520 long state[MEMCG_VMSTAT_SIZE];
521 unsigned long events[NR_MEMCG_EVENTS];
522
523 /* Delta calculation for lockless upward propagation */
524 long state_prev[MEMCG_VMSTAT_SIZE];
525 unsigned long events_prev[NR_MEMCG_EVENTS];
526 } ____cacheline_aligned;
527
528 struct memcg_vmstats {
529 /* Aggregated (CPU and subtree) page state & events */
530 long state[MEMCG_VMSTAT_SIZE];
531 unsigned long events[NR_MEMCG_EVENTS];
532
533 /* Non-hierarchical (CPU aggregated) page state & events */
534 long state_local[MEMCG_VMSTAT_SIZE];
535 unsigned long events_local[NR_MEMCG_EVENTS];
536
537 /* Pending child counts during tree propagation */
538 long state_pending[MEMCG_VMSTAT_SIZE];
539 unsigned long events_pending[NR_MEMCG_EVENTS];
540
541 /* Stats updates since the last flush */
542 atomic_t stats_updates;
543 };
544
545 /*
546 * memcg and lruvec stats flushing
547 *
548 * Many codepaths leading to stats update or read are performance sensitive and
549 * adding stats flushing in such codepaths is not desirable. So, to optimize the
550 * flushing the kernel does:
551 *
552 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
553 * rstat update tree grow unbounded.
554 *
555 * 2) Flush the stats synchronously on reader side only when there are more than
556 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
557 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
558 * only for 2 seconds due to (1).
559 */
560 static void flush_memcg_stats_dwork(struct work_struct *w);
561 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
562 static u64 flush_last_time;
563
564 #define FLUSH_TIME (2UL*HZ)
565
memcg_vmstats_needs_flush(struct memcg_vmstats * vmstats)566 static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
567 {
568 return atomic_read(&vmstats->stats_updates) >
569 MEMCG_CHARGE_BATCH * num_online_cpus();
570 }
571
memcg_rstat_updated(struct mem_cgroup * memcg,int val,int cpu)572 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
573 int cpu)
574 {
575 struct memcg_vmstats_percpu __percpu *statc_pcpu;
576 struct memcg_vmstats_percpu *statc;
577 unsigned int stats_updates;
578
579 if (!val)
580 return;
581
582 css_rstat_updated(&memcg->css, cpu);
583 statc_pcpu = memcg->vmstats_percpu;
584 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
585 statc = this_cpu_ptr(statc_pcpu);
586 /*
587 * If @memcg is already flushable then all its ancestors are
588 * flushable as well and also there is no need to increase
589 * stats_updates.
590 */
591 if (memcg_vmstats_needs_flush(statc->vmstats))
592 break;
593
594 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
595 abs(val));
596 if (stats_updates < MEMCG_CHARGE_BATCH)
597 continue;
598
599 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
600 atomic_add(stats_updates, &statc->vmstats->stats_updates);
601 }
602 }
603
__mem_cgroup_flush_stats(struct mem_cgroup * memcg,bool force)604 static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
605 {
606 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
607
608 trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
609 force, needs_flush);
610
611 if (!force && !needs_flush)
612 return;
613
614 if (mem_cgroup_is_root(memcg))
615 WRITE_ONCE(flush_last_time, jiffies_64);
616
617 css_rstat_flush(&memcg->css);
618 }
619
620 /*
621 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
622 * @memcg: root of the subtree to flush
623 *
624 * Flushing is serialized by the underlying global rstat lock. There is also a
625 * minimum amount of work to be done even if there are no stat updates to flush.
626 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
627 * avoids unnecessary work and contention on the underlying lock.
628 */
mem_cgroup_flush_stats(struct mem_cgroup * memcg)629 void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
630 {
631 if (mem_cgroup_disabled())
632 return;
633
634 if (!memcg)
635 memcg = root_mem_cgroup;
636
637 __mem_cgroup_flush_stats(memcg, false);
638 }
639
mem_cgroup_flush_stats_ratelimited(struct mem_cgroup * memcg)640 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
641 {
642 /* Only flush if the periodic flusher is one full cycle late */
643 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
644 mem_cgroup_flush_stats(memcg);
645 }
646
flush_memcg_stats_dwork(struct work_struct * w)647 static void flush_memcg_stats_dwork(struct work_struct *w)
648 {
649 /*
650 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
651 * in latency-sensitive paths is as cheap as possible.
652 */
653 __mem_cgroup_flush_stats(root_mem_cgroup, true);
654 queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME);
655 }
656
memcg_page_state(struct mem_cgroup * memcg,int idx)657 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
658 {
659 long x;
660 int i = memcg_stats_index(idx);
661
662 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
663 return 0;
664
665 x = READ_ONCE(memcg->vmstats->state[i]);
666 #ifdef CONFIG_SMP
667 if (x < 0)
668 x = 0;
669 #endif
670 return x;
671 }
672
memcg_stat_item_valid(int idx)673 bool memcg_stat_item_valid(int idx)
674 {
675 if ((u32)idx >= MEMCG_NR_STAT)
676 return false;
677
678 return !BAD_STAT_IDX(memcg_stats_index(idx));
679 }
680
681 static int memcg_page_state_unit(int item);
682
683 /*
684 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
685 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
686 */
memcg_state_val_in_pages(int idx,int val)687 static int memcg_state_val_in_pages(int idx, int val)
688 {
689 int unit = memcg_page_state_unit(idx);
690
691 if (!val || unit == PAGE_SIZE)
692 return val;
693 else
694 return max(val * unit / PAGE_SIZE, 1UL);
695 }
696
697 /**
698 * mod_memcg_state - update cgroup memory statistics
699 * @memcg: the memory cgroup
700 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
701 * @val: delta to add to the counter, can be negative
702 */
mod_memcg_state(struct mem_cgroup * memcg,enum memcg_stat_item idx,int val)703 void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
704 int val)
705 {
706 int i = memcg_stats_index(idx);
707 int cpu;
708
709 if (mem_cgroup_disabled())
710 return;
711
712 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
713 return;
714
715 cpu = get_cpu();
716
717 this_cpu_add(memcg->vmstats_percpu->state[i], val);
718 val = memcg_state_val_in_pages(idx, val);
719 memcg_rstat_updated(memcg, val, cpu);
720 trace_mod_memcg_state(memcg, idx, val);
721
722 put_cpu();
723 }
724
725 #ifdef CONFIG_MEMCG_V1
726 /* idx can be of type enum memcg_stat_item or node_stat_item. */
memcg_page_state_local(struct mem_cgroup * memcg,int idx)727 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
728 {
729 long x;
730 int i = memcg_stats_index(idx);
731
732 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
733 return 0;
734
735 x = READ_ONCE(memcg->vmstats->state_local[i]);
736 #ifdef CONFIG_SMP
737 if (x < 0)
738 x = 0;
739 #endif
740 return x;
741 }
742 #endif
743
mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)744 static void mod_memcg_lruvec_state(struct lruvec *lruvec,
745 enum node_stat_item idx,
746 int val)
747 {
748 struct mem_cgroup_per_node *pn;
749 struct mem_cgroup *memcg;
750 int i = memcg_stats_index(idx);
751 int cpu;
752
753 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
754 return;
755
756 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
757 memcg = pn->memcg;
758
759 cpu = get_cpu();
760
761 /* Update memcg */
762 this_cpu_add(memcg->vmstats_percpu->state[i], val);
763
764 /* Update lruvec */
765 this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
766
767 val = memcg_state_val_in_pages(idx, val);
768 memcg_rstat_updated(memcg, val, cpu);
769 trace_mod_memcg_lruvec_state(memcg, idx, val);
770
771 put_cpu();
772 }
773
774 /**
775 * mod_lruvec_state - update lruvec memory statistics
776 * @lruvec: the lruvec
777 * @idx: the stat item
778 * @val: delta to add to the counter, can be negative
779 *
780 * The lruvec is the intersection of the NUMA node and a cgroup. This
781 * function updates the all three counters that are affected by a
782 * change of state at this level: per-node, per-cgroup, per-lruvec.
783 */
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)784 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
785 int val)
786 {
787 /* Update node */
788 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
789
790 /* Update memcg and lruvec */
791 if (!mem_cgroup_disabled())
792 mod_memcg_lruvec_state(lruvec, idx, val);
793 }
794
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)795 void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
796 int val)
797 {
798 struct mem_cgroup *memcg;
799 pg_data_t *pgdat = folio_pgdat(folio);
800 struct lruvec *lruvec;
801
802 rcu_read_lock();
803 memcg = folio_memcg(folio);
804 /* Untracked pages have no memcg, no lruvec. Update only the node */
805 if (!memcg) {
806 rcu_read_unlock();
807 mod_node_page_state(pgdat, idx, val);
808 return;
809 }
810
811 lruvec = mem_cgroup_lruvec(memcg, pgdat);
812 mod_lruvec_state(lruvec, idx, val);
813 rcu_read_unlock();
814 }
815 EXPORT_SYMBOL(lruvec_stat_mod_folio);
816
mod_lruvec_kmem_state(void * p,enum node_stat_item idx,int val)817 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
818 {
819 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
820 struct mem_cgroup *memcg;
821 struct lruvec *lruvec;
822
823 rcu_read_lock();
824 memcg = mem_cgroup_from_virt(p);
825
826 /*
827 * Untracked pages have no memcg, no lruvec. Update only the
828 * node. If we reparent the slab objects to the root memcg,
829 * when we free the slab object, we need to update the per-memcg
830 * vmstats to keep it correct for the root memcg.
831 */
832 if (!memcg) {
833 mod_node_page_state(pgdat, idx, val);
834 } else {
835 lruvec = mem_cgroup_lruvec(memcg, pgdat);
836 mod_lruvec_state(lruvec, idx, val);
837 }
838 rcu_read_unlock();
839 }
840
841 /**
842 * count_memcg_events - account VM events in a cgroup
843 * @memcg: the memory cgroup
844 * @idx: the event item
845 * @count: the number of events that occurred
846 */
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)847 void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
848 unsigned long count)
849 {
850 int i = memcg_events_index(idx);
851 int cpu;
852
853 if (mem_cgroup_disabled())
854 return;
855
856 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
857 return;
858
859 cpu = get_cpu();
860
861 this_cpu_add(memcg->vmstats_percpu->events[i], count);
862 memcg_rstat_updated(memcg, count, cpu);
863 trace_count_memcg_events(memcg, idx, count);
864
865 put_cpu();
866 }
867
memcg_events(struct mem_cgroup * memcg,int event)868 unsigned long memcg_events(struct mem_cgroup *memcg, int event)
869 {
870 int i = memcg_events_index(event);
871
872 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
873 return 0;
874
875 return READ_ONCE(memcg->vmstats->events[i]);
876 }
877
memcg_vm_event_item_valid(enum vm_event_item idx)878 bool memcg_vm_event_item_valid(enum vm_event_item idx)
879 {
880 if (idx >= NR_VM_EVENT_ITEMS)
881 return false;
882
883 return !BAD_STAT_IDX(memcg_events_index(idx));
884 }
885
886 #ifdef CONFIG_MEMCG_V1
memcg_events_local(struct mem_cgroup * memcg,int event)887 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
888 {
889 int i = memcg_events_index(event);
890
891 if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
892 return 0;
893
894 return READ_ONCE(memcg->vmstats->events_local[i]);
895 }
896 #endif
897
mem_cgroup_from_task(struct task_struct * p)898 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
899 {
900 /*
901 * mm_update_next_owner() may clear mm->owner to NULL
902 * if it races with swapoff, page migration, etc.
903 * So this can be called with p == NULL.
904 */
905 if (unlikely(!p))
906 return NULL;
907
908 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
909 }
910 EXPORT_SYMBOL(mem_cgroup_from_task);
911
active_memcg(void)912 static __always_inline struct mem_cgroup *active_memcg(void)
913 {
914 if (!in_task())
915 return this_cpu_read(int_active_memcg);
916 else
917 return current->active_memcg;
918 }
919
920 /**
921 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
922 * @mm: mm from which memcg should be extracted. It can be NULL.
923 *
924 * Obtain a reference on mm->memcg and returns it if successful. If mm
925 * is NULL, then the memcg is chosen as follows:
926 * 1) The active memcg, if set.
927 * 2) current->mm->memcg, if available
928 * 3) root memcg
929 * If mem_cgroup is disabled, NULL is returned.
930 */
get_mem_cgroup_from_mm(struct mm_struct * mm)931 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
932 {
933 struct mem_cgroup *memcg;
934
935 if (mem_cgroup_disabled())
936 return NULL;
937
938 /*
939 * Page cache insertions can happen without an
940 * actual mm context, e.g. during disk probing
941 * on boot, loopback IO, acct() writes etc.
942 *
943 * No need to css_get on root memcg as the reference
944 * counting is disabled on the root level in the
945 * cgroup core. See CSS_NO_REF.
946 */
947 if (unlikely(!mm)) {
948 memcg = active_memcg();
949 if (unlikely(memcg)) {
950 /* remote memcg must hold a ref */
951 css_get(&memcg->css);
952 return memcg;
953 }
954 mm = current->mm;
955 if (unlikely(!mm))
956 return root_mem_cgroup;
957 }
958
959 rcu_read_lock();
960 do {
961 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
962 if (unlikely(!memcg))
963 memcg = root_mem_cgroup;
964 } while (!css_tryget(&memcg->css));
965 rcu_read_unlock();
966 return memcg;
967 }
968 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
969
970 /**
971 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
972 */
get_mem_cgroup_from_current(void)973 struct mem_cgroup *get_mem_cgroup_from_current(void)
974 {
975 struct mem_cgroup *memcg;
976
977 if (mem_cgroup_disabled())
978 return NULL;
979
980 again:
981 rcu_read_lock();
982 memcg = mem_cgroup_from_task(current);
983 if (!css_tryget(&memcg->css)) {
984 rcu_read_unlock();
985 goto again;
986 }
987 rcu_read_unlock();
988 return memcg;
989 }
990
991 /**
992 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
993 * @folio: folio from which memcg should be extracted.
994 */
get_mem_cgroup_from_folio(struct folio * folio)995 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
996 {
997 struct mem_cgroup *memcg = folio_memcg(folio);
998
999 if (mem_cgroup_disabled())
1000 return NULL;
1001
1002 rcu_read_lock();
1003 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1004 memcg = root_mem_cgroup;
1005 rcu_read_unlock();
1006 return memcg;
1007 }
1008
1009 /**
1010 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1011 * @root: hierarchy root
1012 * @prev: previously returned memcg, NULL on first invocation
1013 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1014 *
1015 * Returns references to children of the hierarchy below @root, or
1016 * @root itself, or %NULL after a full round-trip.
1017 *
1018 * Caller must pass the return value in @prev on subsequent
1019 * invocations for reference counting, or use mem_cgroup_iter_break()
1020 * to cancel a hierarchy walk before the round-trip is complete.
1021 *
1022 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1023 * in the hierarchy among all concurrent reclaimers operating on the
1024 * same node.
1025 */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1026 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1027 struct mem_cgroup *prev,
1028 struct mem_cgroup_reclaim_cookie *reclaim)
1029 {
1030 struct mem_cgroup_reclaim_iter *iter;
1031 struct cgroup_subsys_state *css;
1032 struct mem_cgroup *pos;
1033 struct mem_cgroup *next;
1034
1035 if (mem_cgroup_disabled())
1036 return NULL;
1037
1038 if (!root)
1039 root = root_mem_cgroup;
1040
1041 rcu_read_lock();
1042 restart:
1043 next = NULL;
1044
1045 if (reclaim) {
1046 int gen;
1047 int nid = reclaim->pgdat->node_id;
1048
1049 iter = &root->nodeinfo[nid]->iter;
1050 gen = atomic_read(&iter->generation);
1051
1052 /*
1053 * On start, join the current reclaim iteration cycle.
1054 * Exit when a concurrent walker completes it.
1055 */
1056 if (!prev)
1057 reclaim->generation = gen;
1058 else if (reclaim->generation != gen)
1059 goto out_unlock;
1060
1061 pos = READ_ONCE(iter->position);
1062 } else
1063 pos = prev;
1064
1065 css = pos ? &pos->css : NULL;
1066
1067 while ((css = css_next_descendant_pre(css, &root->css))) {
1068 /*
1069 * Verify the css and acquire a reference. The root
1070 * is provided by the caller, so we know it's alive
1071 * and kicking, and don't take an extra reference.
1072 */
1073 if (css == &root->css || css_tryget(css))
1074 break;
1075 }
1076
1077 next = mem_cgroup_from_css(css);
1078
1079 if (reclaim) {
1080 /*
1081 * The position could have already been updated by a competing
1082 * thread, so check that the value hasn't changed since we read
1083 * it to avoid reclaiming from the same cgroup twice.
1084 */
1085 if (cmpxchg(&iter->position, pos, next) != pos) {
1086 if (css && css != &root->css)
1087 css_put(css);
1088 goto restart;
1089 }
1090
1091 if (!next) {
1092 atomic_inc(&iter->generation);
1093
1094 /*
1095 * Reclaimers share the hierarchy walk, and a
1096 * new one might jump in right at the end of
1097 * the hierarchy - make sure they see at least
1098 * one group and restart from the beginning.
1099 */
1100 if (!prev)
1101 goto restart;
1102 }
1103 }
1104
1105 out_unlock:
1106 rcu_read_unlock();
1107 if (prev && prev != root)
1108 css_put(&prev->css);
1109
1110 return next;
1111 }
1112
1113 /**
1114 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1115 * @root: hierarchy root
1116 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1117 */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1118 void mem_cgroup_iter_break(struct mem_cgroup *root,
1119 struct mem_cgroup *prev)
1120 {
1121 if (!root)
1122 root = root_mem_cgroup;
1123 if (prev && prev != root)
1124 css_put(&prev->css);
1125 }
1126
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1127 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1128 struct mem_cgroup *dead_memcg)
1129 {
1130 struct mem_cgroup_reclaim_iter *iter;
1131 struct mem_cgroup_per_node *mz;
1132 int nid;
1133
1134 for_each_node(nid) {
1135 mz = from->nodeinfo[nid];
1136 iter = &mz->iter;
1137 cmpxchg(&iter->position, dead_memcg, NULL);
1138 }
1139 }
1140
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1141 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1142 {
1143 struct mem_cgroup *memcg = dead_memcg;
1144 struct mem_cgroup *last;
1145
1146 do {
1147 __invalidate_reclaim_iterators(memcg, dead_memcg);
1148 last = memcg;
1149 } while ((memcg = parent_mem_cgroup(memcg)));
1150
1151 /*
1152 * When cgroup1 non-hierarchy mode is used,
1153 * parent_mem_cgroup() does not walk all the way up to the
1154 * cgroup root (root_mem_cgroup). So we have to handle
1155 * dead_memcg from cgroup root separately.
1156 */
1157 if (!mem_cgroup_is_root(last))
1158 __invalidate_reclaim_iterators(root_mem_cgroup,
1159 dead_memcg);
1160 }
1161
1162 /**
1163 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1164 * @memcg: hierarchy root
1165 * @fn: function to call for each task
1166 * @arg: argument passed to @fn
1167 *
1168 * This function iterates over tasks attached to @memcg or to any of its
1169 * descendants and calls @fn for each task. If @fn returns a non-zero
1170 * value, the function breaks the iteration loop. Otherwise, it will iterate
1171 * over all tasks and return 0.
1172 *
1173 * This function must not be called for the root memory cgroup.
1174 */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1175 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1176 int (*fn)(struct task_struct *, void *), void *arg)
1177 {
1178 struct mem_cgroup *iter;
1179 int ret = 0;
1180
1181 BUG_ON(mem_cgroup_is_root(memcg));
1182
1183 for_each_mem_cgroup_tree(iter, memcg) {
1184 struct css_task_iter it;
1185 struct task_struct *task;
1186
1187 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1188 while (!ret && (task = css_task_iter_next(&it))) {
1189 ret = fn(task, arg);
1190 /* Avoid potential softlockup warning */
1191 cond_resched();
1192 }
1193 css_task_iter_end(&it);
1194 if (ret) {
1195 mem_cgroup_iter_break(memcg, iter);
1196 break;
1197 }
1198 }
1199 }
1200
1201 #ifdef CONFIG_DEBUG_VM
lruvec_memcg_debug(struct lruvec * lruvec,struct folio * folio)1202 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1203 {
1204 struct mem_cgroup *memcg;
1205
1206 if (mem_cgroup_disabled())
1207 return;
1208
1209 memcg = folio_memcg(folio);
1210
1211 if (!memcg)
1212 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1213 else
1214 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1215 }
1216 #endif
1217
1218 /**
1219 * folio_lruvec_lock - Lock the lruvec for a folio.
1220 * @folio: Pointer to the folio.
1221 *
1222 * These functions are safe to use under any of the following conditions:
1223 * - folio locked
1224 * - folio_test_lru false
1225 * - folio frozen (refcount of 0)
1226 *
1227 * Return: The lruvec this folio is on with its lock held.
1228 */
folio_lruvec_lock(struct folio * folio)1229 struct lruvec *folio_lruvec_lock(struct folio *folio)
1230 {
1231 struct lruvec *lruvec = folio_lruvec(folio);
1232
1233 spin_lock(&lruvec->lru_lock);
1234 lruvec_memcg_debug(lruvec, folio);
1235
1236 return lruvec;
1237 }
1238
1239 /**
1240 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1241 * @folio: Pointer to the folio.
1242 *
1243 * These functions are safe to use under any of the following conditions:
1244 * - folio locked
1245 * - folio_test_lru false
1246 * - folio frozen (refcount of 0)
1247 *
1248 * Return: The lruvec this folio is on with its lock held and interrupts
1249 * disabled.
1250 */
folio_lruvec_lock_irq(struct folio * folio)1251 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1252 {
1253 struct lruvec *lruvec = folio_lruvec(folio);
1254
1255 spin_lock_irq(&lruvec->lru_lock);
1256 lruvec_memcg_debug(lruvec, folio);
1257
1258 return lruvec;
1259 }
1260
1261 /**
1262 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1263 * @folio: Pointer to the folio.
1264 * @flags: Pointer to irqsave flags.
1265 *
1266 * These functions are safe to use under any of the following conditions:
1267 * - folio locked
1268 * - folio_test_lru false
1269 * - folio frozen (refcount of 0)
1270 *
1271 * Return: The lruvec this folio is on with its lock held and interrupts
1272 * disabled.
1273 */
folio_lruvec_lock_irqsave(struct folio * folio,unsigned long * flags)1274 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1275 unsigned long *flags)
1276 {
1277 struct lruvec *lruvec = folio_lruvec(folio);
1278
1279 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1280 lruvec_memcg_debug(lruvec, folio);
1281
1282 return lruvec;
1283 }
1284
1285 /**
1286 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1287 * @lruvec: mem_cgroup per zone lru vector
1288 * @lru: index of lru list the page is sitting on
1289 * @zid: zone id of the accounted pages
1290 * @nr_pages: positive when adding or negative when removing
1291 *
1292 * This function must be called under lru_lock, just before a page is added
1293 * to or just after a page is removed from an lru list.
1294 */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1295 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1296 int zid, int nr_pages)
1297 {
1298 struct mem_cgroup_per_node *mz;
1299 unsigned long *lru_size;
1300 long size;
1301
1302 if (mem_cgroup_disabled())
1303 return;
1304
1305 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1306 lru_size = &mz->lru_zone_size[zid][lru];
1307
1308 if (nr_pages < 0)
1309 *lru_size += nr_pages;
1310
1311 size = *lru_size;
1312 if (WARN_ONCE(size < 0,
1313 "%s(%p, %d, %d): lru_size %ld\n",
1314 __func__, lruvec, lru, nr_pages, size)) {
1315 VM_BUG_ON(1);
1316 *lru_size = 0;
1317 }
1318
1319 if (nr_pages > 0)
1320 *lru_size += nr_pages;
1321 }
1322
1323 /**
1324 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1325 * @memcg: the memory cgroup
1326 *
1327 * Returns the maximum amount of memory @mem can be charged with, in
1328 * pages.
1329 */
mem_cgroup_margin(struct mem_cgroup * memcg)1330 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1331 {
1332 unsigned long margin = 0;
1333 unsigned long count;
1334 unsigned long limit;
1335
1336 count = page_counter_read(&memcg->memory);
1337 limit = READ_ONCE(memcg->memory.max);
1338 if (count < limit)
1339 margin = limit - count;
1340
1341 if (do_memsw_account()) {
1342 count = page_counter_read(&memcg->memsw);
1343 limit = READ_ONCE(memcg->memsw.max);
1344 if (count < limit)
1345 margin = min(margin, limit - count);
1346 else
1347 margin = 0;
1348 }
1349
1350 return margin;
1351 }
1352
1353 struct memory_stat {
1354 const char *name;
1355 unsigned int idx;
1356 };
1357
1358 static const struct memory_stat memory_stats[] = {
1359 { "anon", NR_ANON_MAPPED },
1360 { "file", NR_FILE_PAGES },
1361 { "kernel", MEMCG_KMEM },
1362 { "kernel_stack", NR_KERNEL_STACK_KB },
1363 { "pagetables", NR_PAGETABLE },
1364 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1365 { "percpu", MEMCG_PERCPU_B },
1366 { "sock", MEMCG_SOCK },
1367 { "vmalloc", NR_VMALLOC },
1368 { "shmem", NR_SHMEM },
1369 #ifdef CONFIG_ZSWAP
1370 { "zswap", MEMCG_ZSWAP_B },
1371 { "zswapped", MEMCG_ZSWAPPED },
1372 { "zswap_incomp", MEMCG_ZSWAP_INCOMP },
1373 #endif
1374 { "file_mapped", NR_FILE_MAPPED },
1375 { "file_dirty", NR_FILE_DIRTY },
1376 { "file_writeback", NR_WRITEBACK },
1377 #ifdef CONFIG_SWAP
1378 { "swapcached", NR_SWAPCACHE },
1379 #endif
1380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1381 { "anon_thp", NR_ANON_THPS },
1382 { "file_thp", NR_FILE_THPS },
1383 { "shmem_thp", NR_SHMEM_THPS },
1384 #endif
1385 { "inactive_anon", NR_INACTIVE_ANON },
1386 { "active_anon", NR_ACTIVE_ANON },
1387 { "inactive_file", NR_INACTIVE_FILE },
1388 { "active_file", NR_ACTIVE_FILE },
1389 { "unevictable", NR_UNEVICTABLE },
1390 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1391 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1392 #ifdef CONFIG_HUGETLB_PAGE
1393 { "hugetlb", NR_HUGETLB },
1394 #endif
1395
1396 /* The memory events */
1397 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1398 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1399 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1400 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1401 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1402 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1403 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1404
1405 { "pgdemote_kswapd", PGDEMOTE_KSWAPD },
1406 { "pgdemote_direct", PGDEMOTE_DIRECT },
1407 { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
1408 { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
1409 { "pgsteal_kswapd", PGSTEAL_KSWAPD },
1410 { "pgsteal_direct", PGSTEAL_DIRECT },
1411 { "pgsteal_khugepaged", PGSTEAL_KHUGEPAGED },
1412 { "pgsteal_proactive", PGSTEAL_PROACTIVE },
1413 { "pgscan_kswapd", PGSCAN_KSWAPD },
1414 { "pgscan_direct", PGSCAN_DIRECT },
1415 { "pgscan_khugepaged", PGSCAN_KHUGEPAGED },
1416 { "pgscan_proactive", PGSCAN_PROACTIVE },
1417 { "pgrefill", PGREFILL },
1418 #ifdef CONFIG_NUMA_BALANCING
1419 { "pgpromote_success", PGPROMOTE_SUCCESS },
1420 #endif
1421 };
1422
1423 /* The actual unit of the state item, not the same as the output unit */
memcg_page_state_unit(int item)1424 static int memcg_page_state_unit(int item)
1425 {
1426 switch (item) {
1427 case MEMCG_PERCPU_B:
1428 case MEMCG_ZSWAP_B:
1429 case NR_SLAB_RECLAIMABLE_B:
1430 case NR_SLAB_UNRECLAIMABLE_B:
1431 return 1;
1432 case NR_KERNEL_STACK_KB:
1433 return SZ_1K;
1434 default:
1435 return PAGE_SIZE;
1436 }
1437 }
1438
1439 /* Translate stat items to the correct unit for memory.stat output */
memcg_page_state_output_unit(int item)1440 static int memcg_page_state_output_unit(int item)
1441 {
1442 /*
1443 * Workingset state is actually in pages, but we export it to userspace
1444 * as a scalar count of events, so special case it here.
1445 *
1446 * Demotion and promotion activities are exported in pages, consistent
1447 * with their global counterparts.
1448 */
1449 switch (item) {
1450 case WORKINGSET_REFAULT_ANON:
1451 case WORKINGSET_REFAULT_FILE:
1452 case WORKINGSET_ACTIVATE_ANON:
1453 case WORKINGSET_ACTIVATE_FILE:
1454 case WORKINGSET_RESTORE_ANON:
1455 case WORKINGSET_RESTORE_FILE:
1456 case WORKINGSET_NODERECLAIM:
1457 case PGDEMOTE_KSWAPD:
1458 case PGDEMOTE_DIRECT:
1459 case PGDEMOTE_KHUGEPAGED:
1460 case PGDEMOTE_PROACTIVE:
1461 case PGSTEAL_KSWAPD:
1462 case PGSTEAL_DIRECT:
1463 case PGSTEAL_KHUGEPAGED:
1464 case PGSTEAL_PROACTIVE:
1465 case PGSCAN_KSWAPD:
1466 case PGSCAN_DIRECT:
1467 case PGSCAN_KHUGEPAGED:
1468 case PGSCAN_PROACTIVE:
1469 case PGREFILL:
1470 #ifdef CONFIG_NUMA_BALANCING
1471 case PGPROMOTE_SUCCESS:
1472 #endif
1473 return 1;
1474 default:
1475 return memcg_page_state_unit(item);
1476 }
1477 }
1478
memcg_page_state_output(struct mem_cgroup * memcg,int item)1479 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1480 {
1481 return memcg_page_state(memcg, item) *
1482 memcg_page_state_output_unit(item);
1483 }
1484
1485 #ifdef CONFIG_MEMCG_V1
memcg_page_state_local_output(struct mem_cgroup * memcg,int item)1486 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1487 {
1488 return memcg_page_state_local(memcg, item) *
1489 memcg_page_state_output_unit(item);
1490 }
1491 #endif
1492
1493 #ifdef CONFIG_HUGETLB_PAGE
memcg_accounts_hugetlb(void)1494 static bool memcg_accounts_hugetlb(void)
1495 {
1496 return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING;
1497 }
1498 #else /* CONFIG_HUGETLB_PAGE */
memcg_accounts_hugetlb(void)1499 static bool memcg_accounts_hugetlb(void)
1500 {
1501 return false;
1502 }
1503 #endif /* CONFIG_HUGETLB_PAGE */
1504
memcg_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1505 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1506 {
1507 int i;
1508
1509 /*
1510 * Provide statistics on the state of the memory subsystem as
1511 * well as cumulative event counters that show past behavior.
1512 *
1513 * This list is ordered following a combination of these gradients:
1514 * 1) generic big picture -> specifics and details
1515 * 2) reflecting userspace activity -> reflecting kernel heuristics
1516 *
1517 * Current memory state:
1518 */
1519 mem_cgroup_flush_stats(memcg);
1520
1521 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1522 u64 size;
1523
1524 #ifdef CONFIG_HUGETLB_PAGE
1525 if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1526 !memcg_accounts_hugetlb())
1527 continue;
1528 #endif
1529 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1530 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1531
1532 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1533 size += memcg_page_state_output(memcg,
1534 NR_SLAB_RECLAIMABLE_B);
1535 seq_buf_printf(s, "slab %llu\n", size);
1536 }
1537 }
1538
1539 /* Accumulated memory events */
1540 seq_buf_printf(s, "pgscan %lu\n",
1541 memcg_page_state(memcg, PGSCAN_KSWAPD) +
1542 memcg_page_state(memcg, PGSCAN_DIRECT) +
1543 memcg_page_state(memcg, PGSCAN_PROACTIVE) +
1544 memcg_page_state(memcg, PGSCAN_KHUGEPAGED));
1545 seq_buf_printf(s, "pgsteal %lu\n",
1546 memcg_page_state(memcg, PGSTEAL_KSWAPD) +
1547 memcg_page_state(memcg, PGSTEAL_DIRECT) +
1548 memcg_page_state(memcg, PGSTEAL_PROACTIVE) +
1549 memcg_page_state(memcg, PGSTEAL_KHUGEPAGED));
1550
1551 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1552 #ifdef CONFIG_MEMCG_V1
1553 if (memcg_vm_event_stat[i] == PGPGIN ||
1554 memcg_vm_event_stat[i] == PGPGOUT)
1555 continue;
1556 #endif
1557 seq_buf_printf(s, "%s %lu\n",
1558 vm_event_name(memcg_vm_event_stat[i]),
1559 memcg_events(memcg, memcg_vm_event_stat[i]));
1560 }
1561 }
1562
memory_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)1563 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1564 {
1565 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1566 memcg_stat_format(memcg, s);
1567 else
1568 memcg1_stat_format(memcg, s);
1569 if (seq_buf_has_overflowed(s))
1570 pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1571 }
1572
1573 /**
1574 * mem_cgroup_print_oom_context: Print OOM information relevant to
1575 * memory controller.
1576 * @memcg: The memory cgroup that went over limit
1577 * @p: Task that is going to be killed
1578 *
1579 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1580 * enabled
1581 */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1582 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1583 {
1584 rcu_read_lock();
1585
1586 if (memcg) {
1587 pr_cont(",oom_memcg=");
1588 pr_cont_cgroup_path(memcg->css.cgroup);
1589 } else
1590 pr_cont(",global_oom");
1591 if (p) {
1592 pr_cont(",task_memcg=");
1593 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1594 }
1595 rcu_read_unlock();
1596 }
1597
1598 /**
1599 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1600 * memory controller.
1601 * @memcg: The memory cgroup that went over limit
1602 */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1603 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1604 {
1605 /* Use static buffer, for the caller is holding oom_lock. */
1606 static char buf[SEQ_BUF_SIZE];
1607 struct seq_buf s;
1608 unsigned long memory_failcnt;
1609
1610 lockdep_assert_held(&oom_lock);
1611
1612 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1613 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
1614 else
1615 memory_failcnt = memcg->memory.failcnt;
1616
1617 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1618 K((u64)page_counter_read(&memcg->memory)),
1619 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
1620 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1621 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1622 K((u64)page_counter_read(&memcg->swap)),
1623 K((u64)READ_ONCE(memcg->swap.max)),
1624 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
1625 #ifdef CONFIG_MEMCG_V1
1626 else {
1627 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1628 K((u64)page_counter_read(&memcg->memsw)),
1629 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1630 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1631 K((u64)page_counter_read(&memcg->kmem)),
1632 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1633 }
1634 #endif
1635
1636 pr_info("Memory cgroup stats for ");
1637 pr_cont_cgroup_path(memcg->css.cgroup);
1638 pr_cont(":");
1639 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1640 memory_stat_format(memcg, &s);
1641 seq_buf_do_printk(&s, KERN_INFO);
1642 }
1643
1644 /*
1645 * Return the memory (and swap, if configured) limit for a memcg.
1646 */
mem_cgroup_get_max(struct mem_cgroup * memcg)1647 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1648 {
1649 unsigned long max = READ_ONCE(memcg->memory.max);
1650
1651 if (do_memsw_account()) {
1652 if (mem_cgroup_swappiness(memcg)) {
1653 /* Calculate swap excess capacity from memsw limit */
1654 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1655
1656 max += min(swap, (unsigned long)total_swap_pages);
1657 }
1658 } else {
1659 if (mem_cgroup_swappiness(memcg))
1660 max += min(READ_ONCE(memcg->swap.max),
1661 (unsigned long)total_swap_pages);
1662 }
1663 return max;
1664 }
1665
__memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event,bool allow_spinning)1666 void __memcg_memory_event(struct mem_cgroup *memcg,
1667 enum memcg_memory_event event, bool allow_spinning)
1668 {
1669 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1670 event == MEMCG_SWAP_FAIL;
1671
1672 /* For now only MEMCG_MAX can happen with !allow_spinning context. */
1673 VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
1674
1675 atomic_long_inc(&memcg->memory_events_local[event]);
1676 if (!swap_event && allow_spinning)
1677 cgroup_file_notify(&memcg->events_local_file);
1678
1679 do {
1680 atomic_long_inc(&memcg->memory_events[event]);
1681 if (allow_spinning) {
1682 if (swap_event)
1683 cgroup_file_notify(&memcg->swap_events_file);
1684 else
1685 cgroup_file_notify(&memcg->events_file);
1686 }
1687
1688 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1689 break;
1690 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1691 break;
1692 } while ((memcg = parent_mem_cgroup(memcg)) &&
1693 !mem_cgroup_is_root(memcg));
1694 }
1695 EXPORT_SYMBOL_GPL(__memcg_memory_event);
1696
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1697 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1698 int order)
1699 {
1700 struct oom_control oc = {
1701 .zonelist = NULL,
1702 .nodemask = NULL,
1703 .memcg = memcg,
1704 .gfp_mask = gfp_mask,
1705 .order = order,
1706 };
1707 bool ret = true;
1708
1709 if (mutex_lock_killable(&oom_lock))
1710 return true;
1711
1712 if (mem_cgroup_margin(memcg) >= (1 << order))
1713 goto unlock;
1714
1715 /*
1716 * A few threads which were not waiting at mutex_lock_killable() can
1717 * fail to bail out. Therefore, check again after holding oom_lock.
1718 */
1719 ret = out_of_memory(&oc);
1720
1721 unlock:
1722 mutex_unlock(&oom_lock);
1723 return ret;
1724 }
1725
1726 /*
1727 * Returns true if successfully killed one or more processes. Though in some
1728 * corner cases it can return true even without killing any process.
1729 */
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1730 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1731 {
1732 bool locked, ret;
1733
1734 if (order > PAGE_ALLOC_COSTLY_ORDER)
1735 return false;
1736
1737 memcg_memory_event(memcg, MEMCG_OOM);
1738
1739 if (!memcg1_oom_prepare(memcg, &locked))
1740 return false;
1741
1742 ret = mem_cgroup_out_of_memory(memcg, mask, order);
1743
1744 memcg1_oom_finish(memcg, locked);
1745
1746 return ret;
1747 }
1748
1749 /**
1750 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1751 * @victim: task to be killed by the OOM killer
1752 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1753 *
1754 * Returns a pointer to a memory cgroup, which has to be cleaned up
1755 * by killing all belonging OOM-killable tasks.
1756 *
1757 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1758 */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1759 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1760 struct mem_cgroup *oom_domain)
1761 {
1762 struct mem_cgroup *oom_group = NULL;
1763 struct mem_cgroup *memcg;
1764
1765 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1766 return NULL;
1767
1768 if (!oom_domain)
1769 oom_domain = root_mem_cgroup;
1770
1771 rcu_read_lock();
1772
1773 memcg = mem_cgroup_from_task(victim);
1774 if (mem_cgroup_is_root(memcg))
1775 goto out;
1776
1777 /*
1778 * If the victim task has been asynchronously moved to a different
1779 * memory cgroup, we might end up killing tasks outside oom_domain.
1780 * In this case it's better to ignore memory.group.oom.
1781 */
1782 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1783 goto out;
1784
1785 /*
1786 * Traverse the memory cgroup hierarchy from the victim task's
1787 * cgroup up to the OOMing cgroup (or root) to find the
1788 * highest-level memory cgroup with oom.group set.
1789 */
1790 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1791 if (READ_ONCE(memcg->oom_group))
1792 oom_group = memcg;
1793
1794 if (memcg == oom_domain)
1795 break;
1796 }
1797
1798 if (oom_group)
1799 css_get(&oom_group->css);
1800 out:
1801 rcu_read_unlock();
1802
1803 return oom_group;
1804 }
1805
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1806 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1807 {
1808 pr_info("Tasks in ");
1809 pr_cont_cgroup_path(memcg->css.cgroup);
1810 pr_cont(" are going to be killed due to memory.oom.group set\n");
1811 }
1812
1813 /*
1814 * The value of NR_MEMCG_STOCK is selected to keep the cached memcgs and their
1815 * nr_pages in a single cacheline. This may change in future.
1816 */
1817 #define NR_MEMCG_STOCK 7
1818 #define FLUSHING_CACHED_CHARGE 0
1819 struct memcg_stock_pcp {
1820 local_trylock_t lock;
1821 uint8_t nr_pages[NR_MEMCG_STOCK];
1822 struct mem_cgroup *cached[NR_MEMCG_STOCK];
1823
1824 struct work_struct work;
1825 unsigned long flags;
1826 };
1827
1828 static DEFINE_PER_CPU_ALIGNED(struct memcg_stock_pcp, memcg_stock) = {
1829 .lock = INIT_LOCAL_TRYLOCK(lock),
1830 };
1831
1832 struct obj_stock_pcp {
1833 local_trylock_t lock;
1834 unsigned int nr_bytes;
1835 struct obj_cgroup *cached_objcg;
1836 struct pglist_data *cached_pgdat;
1837 int nr_slab_reclaimable_b;
1838 int nr_slab_unreclaimable_b;
1839
1840 struct work_struct work;
1841 unsigned long flags;
1842 };
1843
1844 static DEFINE_PER_CPU_ALIGNED(struct obj_stock_pcp, obj_stock) = {
1845 .lock = INIT_LOCAL_TRYLOCK(lock),
1846 };
1847
1848 static DEFINE_MUTEX(percpu_charge_mutex);
1849
1850 static void drain_obj_stock(struct obj_stock_pcp *stock);
1851 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
1852 struct mem_cgroup *root_memcg);
1853
1854 /**
1855 * consume_stock: Try to consume stocked charge on this cpu.
1856 * @memcg: memcg to consume from.
1857 * @nr_pages: how many pages to charge.
1858 *
1859 * Consume the cached charge if enough nr_pages are present otherwise return
1860 * failure. Also return failure for charge request larger than
1861 * MEMCG_CHARGE_BATCH or if the local lock is already taken.
1862 *
1863 * returns true if successful, false otherwise.
1864 */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1865 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1866 {
1867 struct memcg_stock_pcp *stock;
1868 uint8_t stock_pages;
1869 bool ret = false;
1870 int i;
1871
1872 if (nr_pages > MEMCG_CHARGE_BATCH ||
1873 !local_trylock(&memcg_stock.lock))
1874 return ret;
1875
1876 stock = this_cpu_ptr(&memcg_stock);
1877
1878 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1879 if (memcg != READ_ONCE(stock->cached[i]))
1880 continue;
1881
1882 stock_pages = READ_ONCE(stock->nr_pages[i]);
1883 if (stock_pages >= nr_pages) {
1884 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages);
1885 ret = true;
1886 }
1887 break;
1888 }
1889
1890 local_unlock(&memcg_stock.lock);
1891
1892 return ret;
1893 }
1894
memcg_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)1895 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
1896 {
1897 page_counter_uncharge(&memcg->memory, nr_pages);
1898 if (do_memsw_account())
1899 page_counter_uncharge(&memcg->memsw, nr_pages);
1900 }
1901
1902 /*
1903 * Returns stocks cached in percpu and reset cached information.
1904 */
drain_stock(struct memcg_stock_pcp * stock,int i)1905 static void drain_stock(struct memcg_stock_pcp *stock, int i)
1906 {
1907 struct mem_cgroup *old = READ_ONCE(stock->cached[i]);
1908 uint8_t stock_pages;
1909
1910 if (!old)
1911 return;
1912
1913 stock_pages = READ_ONCE(stock->nr_pages[i]);
1914 if (stock_pages) {
1915 memcg_uncharge(old, stock_pages);
1916 WRITE_ONCE(stock->nr_pages[i], 0);
1917 }
1918
1919 css_put(&old->css);
1920 WRITE_ONCE(stock->cached[i], NULL);
1921 }
1922
drain_stock_fully(struct memcg_stock_pcp * stock)1923 static void drain_stock_fully(struct memcg_stock_pcp *stock)
1924 {
1925 int i;
1926
1927 for (i = 0; i < NR_MEMCG_STOCK; ++i)
1928 drain_stock(stock, i);
1929 }
1930
drain_local_memcg_stock(struct work_struct * dummy)1931 static void drain_local_memcg_stock(struct work_struct *dummy)
1932 {
1933 struct memcg_stock_pcp *stock;
1934
1935 if (WARN_ONCE(!in_task(), "drain in non-task context"))
1936 return;
1937
1938 local_lock(&memcg_stock.lock);
1939
1940 stock = this_cpu_ptr(&memcg_stock);
1941 drain_stock_fully(stock);
1942 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1943
1944 local_unlock(&memcg_stock.lock);
1945 }
1946
drain_local_obj_stock(struct work_struct * dummy)1947 static void drain_local_obj_stock(struct work_struct *dummy)
1948 {
1949 struct obj_stock_pcp *stock;
1950
1951 if (WARN_ONCE(!in_task(), "drain in non-task context"))
1952 return;
1953
1954 local_lock(&obj_stock.lock);
1955
1956 stock = this_cpu_ptr(&obj_stock);
1957 drain_obj_stock(stock);
1958 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1959
1960 local_unlock(&obj_stock.lock);
1961 }
1962
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)1963 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1964 {
1965 struct memcg_stock_pcp *stock;
1966 struct mem_cgroup *cached;
1967 uint8_t stock_pages;
1968 bool success = false;
1969 int empty_slot = -1;
1970 int i;
1971
1972 /*
1973 * For now limit MEMCG_CHARGE_BATCH to 127 and less. In future if we
1974 * decide to increase it more than 127 then we will need more careful
1975 * handling of nr_pages[] in struct memcg_stock_pcp.
1976 */
1977 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S8_MAX);
1978
1979 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg));
1980
1981 if (nr_pages > MEMCG_CHARGE_BATCH ||
1982 !local_trylock(&memcg_stock.lock)) {
1983 /*
1984 * In case of larger than batch refill or unlikely failure to
1985 * lock the percpu memcg_stock.lock, uncharge memcg directly.
1986 */
1987 memcg_uncharge(memcg, nr_pages);
1988 return;
1989 }
1990
1991 stock = this_cpu_ptr(&memcg_stock);
1992 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
1993 cached = READ_ONCE(stock->cached[i]);
1994 if (!cached && empty_slot == -1)
1995 empty_slot = i;
1996 if (memcg == READ_ONCE(stock->cached[i])) {
1997 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages;
1998 WRITE_ONCE(stock->nr_pages[i], stock_pages);
1999 if (stock_pages > MEMCG_CHARGE_BATCH)
2000 drain_stock(stock, i);
2001 success = true;
2002 break;
2003 }
2004 }
2005
2006 if (!success) {
2007 i = empty_slot;
2008 if (i == -1) {
2009 i = get_random_u32_below(NR_MEMCG_STOCK);
2010 drain_stock(stock, i);
2011 }
2012 css_get(&memcg->css);
2013 WRITE_ONCE(stock->cached[i], memcg);
2014 WRITE_ONCE(stock->nr_pages[i], nr_pages);
2015 }
2016
2017 local_unlock(&memcg_stock.lock);
2018 }
2019
is_memcg_drain_needed(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2020 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,
2021 struct mem_cgroup *root_memcg)
2022 {
2023 struct mem_cgroup *memcg;
2024 bool flush = false;
2025 int i;
2026
2027 rcu_read_lock();
2028 for (i = 0; i < NR_MEMCG_STOCK; ++i) {
2029 memcg = READ_ONCE(stock->cached[i]);
2030 if (!memcg)
2031 continue;
2032
2033 if (READ_ONCE(stock->nr_pages[i]) &&
2034 mem_cgroup_is_descendant(memcg, root_memcg)) {
2035 flush = true;
2036 break;
2037 }
2038 }
2039 rcu_read_unlock();
2040 return flush;
2041 }
2042
schedule_drain_work(int cpu,struct work_struct * work)2043 static void schedule_drain_work(int cpu, struct work_struct *work)
2044 {
2045 /*
2046 * Protect housekeeping cpumask read and work enqueue together
2047 * in the same RCU critical section so that later cpuset isolated
2048 * partition update only need to wait for an RCU GP and flush the
2049 * pending work on newly isolated CPUs.
2050 */
2051 guard(rcu)();
2052 if (!cpu_is_isolated(cpu))
2053 queue_work_on(cpu, memcg_wq, work);
2054 }
2055
2056 /*
2057 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2058 * of the hierarchy under it.
2059 */
drain_all_stock(struct mem_cgroup * root_memcg)2060 void drain_all_stock(struct mem_cgroup *root_memcg)
2061 {
2062 int cpu, curcpu;
2063
2064 /* If someone's already draining, avoid adding running more workers. */
2065 if (!mutex_trylock(&percpu_charge_mutex))
2066 return;
2067 /*
2068 * Notify other cpus that system-wide "drain" is running
2069 * We do not care about races with the cpu hotplug because cpu down
2070 * as well as workers from this path always operate on the local
2071 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2072 */
2073 migrate_disable();
2074 curcpu = smp_processor_id();
2075 for_each_online_cpu(cpu) {
2076 struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
2077 struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
2078
2079 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) &&
2080 is_memcg_drain_needed(memcg_st, root_memcg) &&
2081 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2082 &memcg_st->flags)) {
2083 if (cpu == curcpu)
2084 drain_local_memcg_stock(&memcg_st->work);
2085 else
2086 schedule_drain_work(cpu, &memcg_st->work);
2087 }
2088
2089 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) &&
2090 obj_stock_flush_required(obj_st, root_memcg) &&
2091 !test_and_set_bit(FLUSHING_CACHED_CHARGE,
2092 &obj_st->flags)) {
2093 if (cpu == curcpu)
2094 drain_local_obj_stock(&obj_st->work);
2095 else
2096 schedule_drain_work(cpu, &obj_st->work);
2097 }
2098 }
2099 migrate_enable();
2100 mutex_unlock(&percpu_charge_mutex);
2101 }
2102
memcg_hotplug_cpu_dead(unsigned int cpu)2103 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2104 {
2105 /* no need for the local lock */
2106 drain_obj_stock(&per_cpu(obj_stock, cpu));
2107 drain_stock_fully(&per_cpu(memcg_stock, cpu));
2108
2109 return 0;
2110 }
2111
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2112 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2113 unsigned int nr_pages,
2114 gfp_t gfp_mask)
2115 {
2116 unsigned long nr_reclaimed = 0;
2117
2118 do {
2119 unsigned long pflags;
2120
2121 if (page_counter_read(&memcg->memory) <=
2122 READ_ONCE(memcg->memory.high))
2123 continue;
2124
2125 memcg_memory_event(memcg, MEMCG_HIGH);
2126
2127 psi_memstall_enter(&pflags);
2128 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2129 gfp_mask,
2130 MEMCG_RECLAIM_MAY_SWAP,
2131 NULL);
2132 psi_memstall_leave(&pflags);
2133 } while ((memcg = parent_mem_cgroup(memcg)) &&
2134 !mem_cgroup_is_root(memcg));
2135
2136 return nr_reclaimed;
2137 }
2138
high_work_func(struct work_struct * work)2139 static void high_work_func(struct work_struct *work)
2140 {
2141 struct mem_cgroup *memcg;
2142
2143 memcg = container_of(work, struct mem_cgroup, high_work);
2144 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2145 }
2146
2147 /*
2148 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2149 * enough to still cause a significant slowdown in most cases, while still
2150 * allowing diagnostics and tracing to proceed without becoming stuck.
2151 */
2152 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2153
2154 /*
2155 * When calculating the delay, we use these either side of the exponentiation to
2156 * maintain precision and scale to a reasonable number of jiffies (see the table
2157 * below.
2158 *
2159 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2160 * overage ratio to a delay.
2161 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2162 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2163 * to produce a reasonable delay curve.
2164 *
2165 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2166 * reasonable delay curve compared to precision-adjusted overage, not
2167 * penalising heavily at first, but still making sure that growth beyond the
2168 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2169 * example, with a high of 100 megabytes:
2170 *
2171 * +-------+------------------------+
2172 * | usage | time to allocate in ms |
2173 * +-------+------------------------+
2174 * | 100M | 0 |
2175 * | 101M | 6 |
2176 * | 102M | 25 |
2177 * | 103M | 57 |
2178 * | 104M | 102 |
2179 * | 105M | 159 |
2180 * | 106M | 230 |
2181 * | 107M | 313 |
2182 * | 108M | 409 |
2183 * | 109M | 518 |
2184 * | 110M | 639 |
2185 * | 111M | 774 |
2186 * | 112M | 921 |
2187 * | 113M | 1081 |
2188 * | 114M | 1254 |
2189 * | 115M | 1439 |
2190 * | 116M | 1638 |
2191 * | 117M | 1849 |
2192 * | 118M | 2000 |
2193 * | 119M | 2000 |
2194 * | 120M | 2000 |
2195 * +-------+------------------------+
2196 */
2197 #define MEMCG_DELAY_PRECISION_SHIFT 20
2198 #define MEMCG_DELAY_SCALING_SHIFT 14
2199
calculate_overage(unsigned long usage,unsigned long high)2200 static u64 calculate_overage(unsigned long usage, unsigned long high)
2201 {
2202 u64 overage;
2203
2204 if (usage <= high)
2205 return 0;
2206
2207 /*
2208 * Prevent division by 0 in overage calculation by acting as if
2209 * it was a threshold of 1 page
2210 */
2211 high = max(high, 1UL);
2212
2213 overage = usage - high;
2214 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2215 return div64_u64(overage, high);
2216 }
2217
mem_find_max_overage(struct mem_cgroup * memcg)2218 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2219 {
2220 u64 overage, max_overage = 0;
2221
2222 do {
2223 overage = calculate_overage(page_counter_read(&memcg->memory),
2224 READ_ONCE(memcg->memory.high));
2225 max_overage = max(overage, max_overage);
2226 } while ((memcg = parent_mem_cgroup(memcg)) &&
2227 !mem_cgroup_is_root(memcg));
2228
2229 return max_overage;
2230 }
2231
swap_find_max_overage(struct mem_cgroup * memcg)2232 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2233 {
2234 u64 overage, max_overage = 0;
2235
2236 do {
2237 overage = calculate_overage(page_counter_read(&memcg->swap),
2238 READ_ONCE(memcg->swap.high));
2239 if (overage)
2240 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2241 max_overage = max(overage, max_overage);
2242 } while ((memcg = parent_mem_cgroup(memcg)) &&
2243 !mem_cgroup_is_root(memcg));
2244
2245 return max_overage;
2246 }
2247
2248 /*
2249 * Get the number of jiffies that we should penalise a mischievous cgroup which
2250 * is exceeding its memory.high by checking both it and its ancestors.
2251 */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2252 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2253 unsigned int nr_pages,
2254 u64 max_overage)
2255 {
2256 unsigned long penalty_jiffies;
2257
2258 if (!max_overage)
2259 return 0;
2260
2261 /*
2262 * We use overage compared to memory.high to calculate the number of
2263 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2264 * fairly lenient on small overages, and increasingly harsh when the
2265 * memcg in question makes it clear that it has no intention of stopping
2266 * its crazy behaviour, so we exponentially increase the delay based on
2267 * overage amount.
2268 */
2269 penalty_jiffies = max_overage * max_overage * HZ;
2270 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2271 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2272
2273 /*
2274 * Factor in the task's own contribution to the overage, such that four
2275 * N-sized allocations are throttled approximately the same as one
2276 * 4N-sized allocation.
2277 *
2278 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2279 * larger the current charge patch is than that.
2280 */
2281 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2282 }
2283
2284 /*
2285 * Reclaims memory over the high limit. Called directly from
2286 * try_charge() (context permitting), as well as from the userland
2287 * return path where reclaim is always able to block.
2288 */
__mem_cgroup_handle_over_high(gfp_t gfp_mask)2289 void __mem_cgroup_handle_over_high(gfp_t gfp_mask)
2290 {
2291 unsigned long penalty_jiffies;
2292 unsigned long pflags;
2293 unsigned long nr_reclaimed;
2294 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2295 int nr_retries = MAX_RECLAIM_RETRIES;
2296 struct mem_cgroup *memcg;
2297 bool in_retry = false;
2298
2299 memcg = get_mem_cgroup_from_mm(current->mm);
2300 current->memcg_nr_pages_over_high = 0;
2301
2302 retry_reclaim:
2303 /*
2304 * Bail if the task is already exiting. Unlike memory.max,
2305 * memory.high enforcement isn't as strict, and there is no
2306 * OOM killer involved, which means the excess could already
2307 * be much bigger (and still growing) than it could for
2308 * memory.max; the dying task could get stuck in fruitless
2309 * reclaim for a long time, which isn't desirable.
2310 */
2311 if (task_is_dying())
2312 goto out;
2313
2314 /*
2315 * The allocating task should reclaim at least the batch size, but for
2316 * subsequent retries we only want to do what's necessary to prevent oom
2317 * or breaching resource isolation.
2318 *
2319 * This is distinct from memory.max or page allocator behaviour because
2320 * memory.high is currently batched, whereas memory.max and the page
2321 * allocator run every time an allocation is made.
2322 */
2323 nr_reclaimed = reclaim_high(memcg,
2324 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2325 gfp_mask);
2326
2327 /*
2328 * memory.high is breached and reclaim is unable to keep up. Throttle
2329 * allocators proactively to slow down excessive growth.
2330 */
2331 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2332 mem_find_max_overage(memcg));
2333
2334 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2335 swap_find_max_overage(memcg));
2336
2337 /*
2338 * Clamp the max delay per usermode return so as to still keep the
2339 * application moving forwards and also permit diagnostics, albeit
2340 * extremely slowly.
2341 */
2342 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2343
2344 /*
2345 * Don't sleep if the amount of jiffies this memcg owes us is so low
2346 * that it's not even worth doing, in an attempt to be nice to those who
2347 * go only a small amount over their memory.high value and maybe haven't
2348 * been aggressively reclaimed enough yet.
2349 */
2350 if (penalty_jiffies <= HZ / 100)
2351 goto out;
2352
2353 /*
2354 * If reclaim is making forward progress but we're still over
2355 * memory.high, we want to encourage that rather than doing allocator
2356 * throttling.
2357 */
2358 if (nr_reclaimed || nr_retries--) {
2359 in_retry = true;
2360 goto retry_reclaim;
2361 }
2362
2363 /*
2364 * Reclaim didn't manage to push usage below the limit, slow
2365 * this allocating task down.
2366 *
2367 * If we exit early, we're guaranteed to die (since
2368 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2369 * need to account for any ill-begotten jiffies to pay them off later.
2370 */
2371 psi_memstall_enter(&pflags);
2372 schedule_timeout_killable(penalty_jiffies);
2373 psi_memstall_leave(&pflags);
2374
2375 out:
2376 css_put(&memcg->css);
2377 }
2378
try_charge_memcg(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2379 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2380 unsigned int nr_pages)
2381 {
2382 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2383 int nr_retries = MAX_RECLAIM_RETRIES;
2384 struct mem_cgroup *mem_over_limit;
2385 struct page_counter *counter;
2386 unsigned long nr_reclaimed;
2387 bool passed_oom = false;
2388 unsigned int reclaim_options;
2389 bool drained = false;
2390 bool raised_max_event = false;
2391 unsigned long pflags;
2392 bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
2393
2394 retry:
2395 if (consume_stock(memcg, nr_pages))
2396 return 0;
2397
2398 if (!allow_spinning)
2399 /* Avoid the refill and flush of the older stock */
2400 batch = nr_pages;
2401
2402 reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2403 if (!do_memsw_account() ||
2404 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2405 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2406 goto done_restock;
2407 if (do_memsw_account())
2408 page_counter_uncharge(&memcg->memsw, batch);
2409 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2410 } else {
2411 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2412 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2413 }
2414
2415 if (batch > nr_pages) {
2416 batch = nr_pages;
2417 goto retry;
2418 }
2419
2420 /*
2421 * Prevent unbounded recursion when reclaim operations need to
2422 * allocate memory. This might exceed the limits temporarily,
2423 * but we prefer facilitating memory reclaim and getting back
2424 * under the limit over triggering OOM kills in these cases.
2425 */
2426 if (unlikely(current->flags & PF_MEMALLOC))
2427 goto force;
2428
2429 if (unlikely(task_in_memcg_oom(current)))
2430 goto nomem;
2431
2432 if (!gfpflags_allow_blocking(gfp_mask))
2433 goto nomem;
2434
2435 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2436 raised_max_event = true;
2437
2438 psi_memstall_enter(&pflags);
2439 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2440 gfp_mask, reclaim_options, NULL);
2441 psi_memstall_leave(&pflags);
2442
2443 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2444 goto retry;
2445
2446 if (!drained) {
2447 drain_all_stock(mem_over_limit);
2448 drained = true;
2449 goto retry;
2450 }
2451
2452 if (gfp_mask & __GFP_NORETRY)
2453 goto nomem;
2454 /*
2455 * Even though the limit is exceeded at this point, reclaim
2456 * may have been able to free some pages. Retry the charge
2457 * before killing the task.
2458 *
2459 * Only for regular pages, though: huge pages are rather
2460 * unlikely to succeed so close to the limit, and we fall back
2461 * to regular pages anyway in case of failure.
2462 */
2463 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2464 goto retry;
2465
2466 if (nr_retries--)
2467 goto retry;
2468
2469 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2470 goto nomem;
2471
2472 /* Avoid endless loop for tasks bypassed by the oom killer */
2473 if (passed_oom && task_is_dying())
2474 goto nomem;
2475
2476 /*
2477 * keep retrying as long as the memcg oom killer is able to make
2478 * a forward progress or bypass the charge if the oom killer
2479 * couldn't make any progress.
2480 */
2481 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2482 get_order(nr_pages * PAGE_SIZE))) {
2483 passed_oom = true;
2484 nr_retries = MAX_RECLAIM_RETRIES;
2485 goto retry;
2486 }
2487 nomem:
2488 /*
2489 * Memcg doesn't have a dedicated reserve for atomic
2490 * allocations. But like the global atomic pool, we need to
2491 * put the burden of reclaim on regular allocation requests
2492 * and let these go through as privileged allocations.
2493 */
2494 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2495 return -ENOMEM;
2496 force:
2497 /*
2498 * If the allocation has to be enforced, don't forget to raise
2499 * a MEMCG_MAX event.
2500 */
2501 if (!raised_max_event)
2502 __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
2503
2504 /*
2505 * The allocation either can't fail or will lead to more memory
2506 * being freed very soon. Allow memory usage go over the limit
2507 * temporarily by force charging it.
2508 */
2509 page_counter_charge(&memcg->memory, nr_pages);
2510 if (do_memsw_account())
2511 page_counter_charge(&memcg->memsw, nr_pages);
2512
2513 return 0;
2514
2515 done_restock:
2516 if (batch > nr_pages)
2517 refill_stock(memcg, batch - nr_pages);
2518
2519 /*
2520 * If the hierarchy is above the normal consumption range, schedule
2521 * reclaim on returning to userland. We can perform reclaim here
2522 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2523 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2524 * not recorded as it most likely matches current's and won't
2525 * change in the meantime. As high limit is checked again before
2526 * reclaim, the cost of mismatch is negligible.
2527 */
2528 do {
2529 bool mem_high, swap_high;
2530
2531 mem_high = page_counter_read(&memcg->memory) >
2532 READ_ONCE(memcg->memory.high);
2533 swap_high = page_counter_read(&memcg->swap) >
2534 READ_ONCE(memcg->swap.high);
2535
2536 /* Don't bother a random interrupted task */
2537 if (!in_task()) {
2538 if (mem_high) {
2539 schedule_work(&memcg->high_work);
2540 break;
2541 }
2542 continue;
2543 }
2544
2545 if (mem_high || swap_high) {
2546 /*
2547 * The allocating tasks in this cgroup will need to do
2548 * reclaim or be throttled to prevent further growth
2549 * of the memory or swap footprints.
2550 *
2551 * Target some best-effort fairness between the tasks,
2552 * and distribute reclaim work and delay penalties
2553 * based on how much each task is actually allocating.
2554 */
2555 current->memcg_nr_pages_over_high += batch;
2556 set_notify_resume(current);
2557 break;
2558 }
2559 } while ((memcg = parent_mem_cgroup(memcg)));
2560
2561 /*
2562 * Reclaim is set up above to be called from the userland
2563 * return path. But also attempt synchronous reclaim to avoid
2564 * excessive overrun while the task is still inside the
2565 * kernel. If this is successful, the return path will see it
2566 * when it rechecks the overage and simply bail out.
2567 */
2568 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2569 !(current->flags & PF_MEMALLOC) &&
2570 gfpflags_allow_blocking(gfp_mask))
2571 __mem_cgroup_handle_over_high(gfp_mask);
2572 return 0;
2573 }
2574
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2575 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2576 unsigned int nr_pages)
2577 {
2578 if (mem_cgroup_is_root(memcg))
2579 return 0;
2580
2581 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2582 }
2583
commit_charge(struct folio * folio,struct mem_cgroup * memcg)2584 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2585 {
2586 VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2587 /*
2588 * Any of the following ensures page's memcg stability:
2589 *
2590 * - the page lock
2591 * - LRU isolation
2592 * - exclusive reference
2593 */
2594 folio->memcg_data = (unsigned long)memcg;
2595 }
2596
2597 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2598 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2599 struct pglist_data *pgdat,
2600 enum node_stat_item idx, int nr)
2601 {
2602 struct lruvec *lruvec;
2603
2604 if (likely(!in_nmi())) {
2605 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2606 mod_memcg_lruvec_state(lruvec, idx, nr);
2607 } else {
2608 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id];
2609
2610 /* preemption is disabled in_nmi(). */
2611 css_rstat_updated(&memcg->css, smp_processor_id());
2612 if (idx == NR_SLAB_RECLAIMABLE_B)
2613 atomic_add(nr, &pn->slab_reclaimable);
2614 else
2615 atomic_add(nr, &pn->slab_unreclaimable);
2616 }
2617 }
2618 #else
account_slab_nmi_safe(struct mem_cgroup * memcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2619 static inline void account_slab_nmi_safe(struct mem_cgroup *memcg,
2620 struct pglist_data *pgdat,
2621 enum node_stat_item idx, int nr)
2622 {
2623 struct lruvec *lruvec;
2624
2625 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2626 mod_memcg_lruvec_state(lruvec, idx, nr);
2627 }
2628 #endif
2629
mod_objcg_mlstate(struct obj_cgroup * objcg,struct pglist_data * pgdat,enum node_stat_item idx,int nr)2630 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2631 struct pglist_data *pgdat,
2632 enum node_stat_item idx, int nr)
2633 {
2634 struct mem_cgroup *memcg;
2635
2636 rcu_read_lock();
2637 memcg = obj_cgroup_memcg(objcg);
2638 account_slab_nmi_safe(memcg, pgdat, idx, nr);
2639 rcu_read_unlock();
2640 }
2641
2642 static __always_inline
mem_cgroup_from_obj_slab(struct slab * slab,void * p)2643 struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
2644 {
2645 /*
2646 * Slab objects are accounted individually, not per-page.
2647 * Memcg membership data for each individual object is saved in
2648 * slab->obj_exts.
2649 */
2650 unsigned long obj_exts;
2651 struct slabobj_ext *obj_ext;
2652 unsigned int off;
2653
2654 obj_exts = slab_obj_exts(slab);
2655 if (!obj_exts)
2656 return NULL;
2657
2658 get_slab_obj_exts(obj_exts);
2659 off = obj_to_index(slab->slab_cache, slab, p);
2660 obj_ext = slab_obj_ext(slab, obj_exts, off);
2661 if (obj_ext->objcg) {
2662 struct obj_cgroup *objcg = obj_ext->objcg;
2663
2664 put_slab_obj_exts(obj_exts);
2665 return obj_cgroup_memcg(objcg);
2666 }
2667 put_slab_obj_exts(obj_exts);
2668
2669 return NULL;
2670 }
2671
2672 /*
2673 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2674 * It is not suitable for objects allocated using vmalloc().
2675 *
2676 * A passed kernel object must be a slab object or a generic kernel page.
2677 *
2678 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2679 * cgroup_mutex, etc.
2680 */
mem_cgroup_from_virt(void * p)2681 struct mem_cgroup *mem_cgroup_from_virt(void *p)
2682 {
2683 struct slab *slab;
2684
2685 if (mem_cgroup_disabled())
2686 return NULL;
2687
2688 slab = virt_to_slab(p);
2689 if (slab)
2690 return mem_cgroup_from_obj_slab(slab, p);
2691 return folio_memcg_check(virt_to_folio(p));
2692 }
2693
__get_obj_cgroup_from_memcg(struct mem_cgroup * memcg)2694 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2695 {
2696 struct obj_cgroup *objcg = NULL;
2697
2698 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2699 objcg = rcu_dereference(memcg->objcg);
2700 if (likely(objcg && obj_cgroup_tryget(objcg)))
2701 break;
2702 objcg = NULL;
2703 }
2704 return objcg;
2705 }
2706
current_objcg_update(void)2707 static struct obj_cgroup *current_objcg_update(void)
2708 {
2709 struct mem_cgroup *memcg;
2710 struct obj_cgroup *old, *objcg = NULL;
2711
2712 do {
2713 /* Atomically drop the update bit. */
2714 old = xchg(¤t->objcg, NULL);
2715 if (old) {
2716 old = (struct obj_cgroup *)
2717 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2718 obj_cgroup_put(old);
2719
2720 old = NULL;
2721 }
2722
2723 /* If new objcg is NULL, no reason for the second atomic update. */
2724 if (!current->mm || (current->flags & PF_KTHREAD))
2725 return NULL;
2726
2727 /*
2728 * Release the objcg pointer from the previous iteration,
2729 * if try_cmpxcg() below fails.
2730 */
2731 if (unlikely(objcg)) {
2732 obj_cgroup_put(objcg);
2733 objcg = NULL;
2734 }
2735
2736 /*
2737 * Obtain the new objcg pointer. The current task can be
2738 * asynchronously moved to another memcg and the previous
2739 * memcg can be offlined. So let's get the memcg pointer
2740 * and try get a reference to objcg under a rcu read lock.
2741 */
2742
2743 rcu_read_lock();
2744 memcg = mem_cgroup_from_task(current);
2745 objcg = __get_obj_cgroup_from_memcg(memcg);
2746 rcu_read_unlock();
2747
2748 /*
2749 * Try set up a new objcg pointer atomically. If it
2750 * fails, it means the update flag was set concurrently, so
2751 * the whole procedure should be repeated.
2752 */
2753 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2754
2755 return objcg;
2756 }
2757
current_obj_cgroup(void)2758 __always_inline struct obj_cgroup *current_obj_cgroup(void)
2759 {
2760 struct mem_cgroup *memcg;
2761 struct obj_cgroup *objcg;
2762
2763 if (IS_ENABLED(CONFIG_MEMCG_NMI_UNSAFE) && in_nmi())
2764 return NULL;
2765
2766 if (in_task()) {
2767 memcg = current->active_memcg;
2768 if (unlikely(memcg))
2769 goto from_memcg;
2770
2771 objcg = READ_ONCE(current->objcg);
2772 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2773 objcg = current_objcg_update();
2774 /*
2775 * Objcg reference is kept by the task, so it's safe
2776 * to use the objcg by the current task.
2777 */
2778 return objcg;
2779 }
2780
2781 memcg = this_cpu_read(int_active_memcg);
2782 if (unlikely(memcg))
2783 goto from_memcg;
2784
2785 return NULL;
2786
2787 from_memcg:
2788 objcg = NULL;
2789 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2790 /*
2791 * Memcg pointer is protected by scope (see set_active_memcg())
2792 * and is pinning the corresponding objcg, so objcg can't go
2793 * away and can be used within the scope without any additional
2794 * protection.
2795 */
2796 objcg = rcu_dereference_check(memcg->objcg, 1);
2797 if (likely(objcg))
2798 break;
2799 }
2800
2801 return objcg;
2802 }
2803
get_obj_cgroup_from_folio(struct folio * folio)2804 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
2805 {
2806 struct obj_cgroup *objcg;
2807
2808 if (!memcg_kmem_online())
2809 return NULL;
2810
2811 if (folio_memcg_kmem(folio)) {
2812 objcg = __folio_objcg(folio);
2813 obj_cgroup_get(objcg);
2814 } else {
2815 struct mem_cgroup *memcg;
2816
2817 rcu_read_lock();
2818 memcg = __folio_memcg(folio);
2819 if (memcg)
2820 objcg = __get_obj_cgroup_from_memcg(memcg);
2821 else
2822 objcg = NULL;
2823 rcu_read_unlock();
2824 }
2825 return objcg;
2826 }
2827
2828 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)2829 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2830 {
2831 if (likely(!in_nmi())) {
2832 mod_memcg_state(memcg, MEMCG_KMEM, val);
2833 } else {
2834 /* preemption is disabled in_nmi(). */
2835 css_rstat_updated(&memcg->css, smp_processor_id());
2836 atomic_add(val, &memcg->kmem_stat);
2837 }
2838 }
2839 #else
account_kmem_nmi_safe(struct mem_cgroup * memcg,int val)2840 static inline void account_kmem_nmi_safe(struct mem_cgroup *memcg, int val)
2841 {
2842 mod_memcg_state(memcg, MEMCG_KMEM, val);
2843 }
2844 #endif
2845
2846 /*
2847 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2848 * @objcg: object cgroup to uncharge
2849 * @nr_pages: number of pages to uncharge
2850 */
obj_cgroup_uncharge_pages(struct obj_cgroup * objcg,unsigned int nr_pages)2851 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2852 unsigned int nr_pages)
2853 {
2854 struct mem_cgroup *memcg;
2855
2856 memcg = get_mem_cgroup_from_objcg(objcg);
2857
2858 account_kmem_nmi_safe(memcg, -nr_pages);
2859 memcg1_account_kmem(memcg, -nr_pages);
2860 if (!mem_cgroup_is_root(memcg))
2861 refill_stock(memcg, nr_pages);
2862
2863 css_put(&memcg->css);
2864 }
2865
2866 /*
2867 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2868 * @objcg: object cgroup to charge
2869 * @gfp: reclaim mode
2870 * @nr_pages: number of pages to charge
2871 *
2872 * Returns 0 on success, an error code on failure.
2873 */
obj_cgroup_charge_pages(struct obj_cgroup * objcg,gfp_t gfp,unsigned int nr_pages)2874 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2875 unsigned int nr_pages)
2876 {
2877 struct mem_cgroup *memcg;
2878 int ret;
2879
2880 memcg = get_mem_cgroup_from_objcg(objcg);
2881
2882 ret = try_charge_memcg(memcg, gfp, nr_pages);
2883 if (ret)
2884 goto out;
2885
2886 account_kmem_nmi_safe(memcg, nr_pages);
2887 memcg1_account_kmem(memcg, nr_pages);
2888 out:
2889 css_put(&memcg->css);
2890
2891 return ret;
2892 }
2893
page_objcg(const struct page * page)2894 static struct obj_cgroup *page_objcg(const struct page *page)
2895 {
2896 unsigned long memcg_data = page->memcg_data;
2897
2898 if (mem_cgroup_disabled() || !memcg_data)
2899 return NULL;
2900
2901 VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
2902 page);
2903 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
2904 }
2905
page_set_objcg(struct page * page,const struct obj_cgroup * objcg)2906 static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
2907 {
2908 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
2909 }
2910
2911 /**
2912 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2913 * @page: page to charge
2914 * @gfp: reclaim mode
2915 * @order: allocation order
2916 *
2917 * Returns 0 on success, an error code on failure.
2918 */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)2919 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2920 {
2921 struct obj_cgroup *objcg;
2922 int ret = 0;
2923
2924 objcg = current_obj_cgroup();
2925 if (objcg) {
2926 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2927 if (!ret) {
2928 obj_cgroup_get(objcg);
2929 page_set_objcg(page, objcg);
2930 return 0;
2931 }
2932 }
2933 return ret;
2934 }
2935
2936 /**
2937 * __memcg_kmem_uncharge_page: uncharge a kmem page
2938 * @page: page to uncharge
2939 * @order: allocation order
2940 */
__memcg_kmem_uncharge_page(struct page * page,int order)2941 void __memcg_kmem_uncharge_page(struct page *page, int order)
2942 {
2943 struct obj_cgroup *objcg = page_objcg(page);
2944 unsigned int nr_pages = 1 << order;
2945
2946 if (!objcg)
2947 return;
2948
2949 obj_cgroup_uncharge_pages(objcg, nr_pages);
2950 page->memcg_data = 0;
2951 obj_cgroup_put(objcg);
2952 }
2953
trylock_stock(void)2954 static struct obj_stock_pcp *trylock_stock(void)
2955 {
2956 if (local_trylock(&obj_stock.lock))
2957 return this_cpu_ptr(&obj_stock);
2958
2959 return NULL;
2960 }
2961
unlock_stock(struct obj_stock_pcp * stock)2962 static void unlock_stock(struct obj_stock_pcp *stock)
2963 {
2964 if (stock)
2965 local_unlock(&obj_stock.lock);
2966 }
2967
2968 /* Call after __refill_obj_stock() to ensure stock->cached_objg == objcg */
__account_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,int nr,struct pglist_data * pgdat,enum node_stat_item idx)2969 static void __account_obj_stock(struct obj_cgroup *objcg,
2970 struct obj_stock_pcp *stock, int nr,
2971 struct pglist_data *pgdat, enum node_stat_item idx)
2972 {
2973 int *bytes;
2974
2975 if (!stock || READ_ONCE(stock->cached_objcg) != objcg)
2976 goto direct;
2977
2978 /*
2979 * Save vmstat data in stock and skip vmstat array update unless
2980 * accumulating over a page of vmstat data or when pgdat changes.
2981 */
2982 if (stock->cached_pgdat != pgdat) {
2983 /* Flush the existing cached vmstat data */
2984 struct pglist_data *oldpg = stock->cached_pgdat;
2985
2986 if (stock->nr_slab_reclaimable_b) {
2987 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2988 stock->nr_slab_reclaimable_b);
2989 stock->nr_slab_reclaimable_b = 0;
2990 }
2991 if (stock->nr_slab_unreclaimable_b) {
2992 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2993 stock->nr_slab_unreclaimable_b);
2994 stock->nr_slab_unreclaimable_b = 0;
2995 }
2996 stock->cached_pgdat = pgdat;
2997 }
2998
2999 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3000 : &stock->nr_slab_unreclaimable_b;
3001 /*
3002 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3003 * cached locally at least once before pushing it out.
3004 */
3005 if (!*bytes) {
3006 *bytes = nr;
3007 nr = 0;
3008 } else {
3009 *bytes += nr;
3010 if (abs(*bytes) > PAGE_SIZE) {
3011 nr = *bytes;
3012 *bytes = 0;
3013 } else {
3014 nr = 0;
3015 }
3016 }
3017 direct:
3018 if (nr)
3019 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3020 }
3021
__consume_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,unsigned int nr_bytes)3022 static bool __consume_obj_stock(struct obj_cgroup *objcg,
3023 struct obj_stock_pcp *stock,
3024 unsigned int nr_bytes)
3025 {
3026 if (objcg == READ_ONCE(stock->cached_objcg) &&
3027 stock->nr_bytes >= nr_bytes) {
3028 stock->nr_bytes -= nr_bytes;
3029 return true;
3030 }
3031
3032 return false;
3033 }
3034
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3035 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3036 {
3037 struct obj_stock_pcp *stock;
3038 bool ret = false;
3039
3040 stock = trylock_stock();
3041 if (!stock)
3042 return ret;
3043
3044 ret = __consume_obj_stock(objcg, stock, nr_bytes);
3045 unlock_stock(stock);
3046
3047 return ret;
3048 }
3049
drain_obj_stock(struct obj_stock_pcp * stock)3050 static void drain_obj_stock(struct obj_stock_pcp *stock)
3051 {
3052 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3053
3054 if (!old)
3055 return;
3056
3057 if (stock->nr_bytes) {
3058 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3059 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3060
3061 if (nr_pages) {
3062 struct mem_cgroup *memcg;
3063
3064 memcg = get_mem_cgroup_from_objcg(old);
3065
3066 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
3067 memcg1_account_kmem(memcg, -nr_pages);
3068 if (!mem_cgroup_is_root(memcg))
3069 memcg_uncharge(memcg, nr_pages);
3070
3071 css_put(&memcg->css);
3072 }
3073
3074 /*
3075 * The leftover is flushed to the centralized per-memcg value.
3076 * On the next attempt to refill obj stock it will be moved
3077 * to a per-cpu stock (probably, on an other CPU), see
3078 * refill_obj_stock().
3079 *
3080 * How often it's flushed is a trade-off between the memory
3081 * limit enforcement accuracy and potential CPU contention,
3082 * so it might be changed in the future.
3083 */
3084 atomic_add(nr_bytes, &old->nr_charged_bytes);
3085 stock->nr_bytes = 0;
3086 }
3087
3088 /*
3089 * Flush the vmstat data in current stock
3090 */
3091 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3092 if (stock->nr_slab_reclaimable_b) {
3093 mod_objcg_mlstate(old, stock->cached_pgdat,
3094 NR_SLAB_RECLAIMABLE_B,
3095 stock->nr_slab_reclaimable_b);
3096 stock->nr_slab_reclaimable_b = 0;
3097 }
3098 if (stock->nr_slab_unreclaimable_b) {
3099 mod_objcg_mlstate(old, stock->cached_pgdat,
3100 NR_SLAB_UNRECLAIMABLE_B,
3101 stock->nr_slab_unreclaimable_b);
3102 stock->nr_slab_unreclaimable_b = 0;
3103 }
3104 stock->cached_pgdat = NULL;
3105 }
3106
3107 WRITE_ONCE(stock->cached_objcg, NULL);
3108 obj_cgroup_put(old);
3109 }
3110
obj_stock_flush_required(struct obj_stock_pcp * stock,struct mem_cgroup * root_memcg)3111 static bool obj_stock_flush_required(struct obj_stock_pcp *stock,
3112 struct mem_cgroup *root_memcg)
3113 {
3114 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3115 struct mem_cgroup *memcg;
3116 bool flush = false;
3117
3118 rcu_read_lock();
3119 if (objcg) {
3120 memcg = obj_cgroup_memcg(objcg);
3121 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3122 flush = true;
3123 }
3124 rcu_read_unlock();
3125
3126 return flush;
3127 }
3128
__refill_obj_stock(struct obj_cgroup * objcg,struct obj_stock_pcp * stock,unsigned int nr_bytes,bool allow_uncharge)3129 static void __refill_obj_stock(struct obj_cgroup *objcg,
3130 struct obj_stock_pcp *stock,
3131 unsigned int nr_bytes,
3132 bool allow_uncharge)
3133 {
3134 unsigned int nr_pages = 0;
3135
3136 if (!stock) {
3137 nr_pages = nr_bytes >> PAGE_SHIFT;
3138 nr_bytes = nr_bytes & (PAGE_SIZE - 1);
3139 atomic_add(nr_bytes, &objcg->nr_charged_bytes);
3140 goto out;
3141 }
3142
3143 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3144 drain_obj_stock(stock);
3145 obj_cgroup_get(objcg);
3146 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3147 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3148 WRITE_ONCE(stock->cached_objcg, objcg);
3149
3150 allow_uncharge = true; /* Allow uncharge when objcg changes */
3151 }
3152 stock->nr_bytes += nr_bytes;
3153
3154 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3155 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3156 stock->nr_bytes &= (PAGE_SIZE - 1);
3157 }
3158
3159 out:
3160 if (nr_pages)
3161 obj_cgroup_uncharge_pages(objcg, nr_pages);
3162 }
3163
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes,bool allow_uncharge)3164 static void refill_obj_stock(struct obj_cgroup *objcg,
3165 unsigned int nr_bytes,
3166 bool allow_uncharge)
3167 {
3168 struct obj_stock_pcp *stock = trylock_stock();
3169 __refill_obj_stock(objcg, stock, nr_bytes, allow_uncharge);
3170 unlock_stock(stock);
3171 }
3172
__obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size,size_t * remainder)3173 static int __obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp,
3174 size_t size, size_t *remainder)
3175 {
3176 size_t charge_size;
3177 int ret;
3178
3179 charge_size = PAGE_ALIGN(size);
3180 ret = obj_cgroup_charge_pages(objcg, gfp, charge_size >> PAGE_SHIFT);
3181 if (!ret)
3182 *remainder = charge_size - size;
3183
3184 return ret;
3185 }
3186
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3187 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3188 {
3189 size_t remainder;
3190 int ret;
3191
3192 if (likely(consume_obj_stock(objcg, size)))
3193 return 0;
3194
3195 /*
3196 * In theory, objcg->nr_charged_bytes can have enough
3197 * pre-charged bytes to satisfy the allocation. However,
3198 * flushing objcg->nr_charged_bytes requires two atomic
3199 * operations, and objcg->nr_charged_bytes can't be big.
3200 * The shared objcg->nr_charged_bytes can also become a
3201 * performance bottleneck if all tasks of the same memcg are
3202 * trying to update it. So it's better to ignore it and try
3203 * grab some new pages. The stock's nr_bytes will be flushed to
3204 * objcg->nr_charged_bytes later on when objcg changes.
3205 *
3206 * The stock's nr_bytes may contain enough pre-charged bytes
3207 * to allow one less page from being charged, but we can't rely
3208 * on the pre-charged bytes not being changed outside of
3209 * consume_obj_stock() or refill_obj_stock(). So ignore those
3210 * pre-charged bytes as well when charging pages. To avoid a
3211 * page uncharge right after a page charge, we set the
3212 * allow_uncharge flag to false when calling refill_obj_stock()
3213 * to temporarily allow the pre-charged bytes to exceed the page
3214 * size limit. The maximum reachable value of the pre-charged
3215 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3216 * race.
3217 */
3218 ret = __obj_cgroup_charge(objcg, gfp, size, &remainder);
3219 if (!ret && remainder)
3220 refill_obj_stock(objcg, remainder, false);
3221
3222 return ret;
3223 }
3224
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3225 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3226 {
3227 refill_obj_stock(objcg, size, true);
3228 }
3229
obj_full_size(struct kmem_cache * s)3230 static inline size_t obj_full_size(struct kmem_cache *s)
3231 {
3232 /*
3233 * For each accounted object there is an extra space which is used
3234 * to store obj_cgroup membership. Charge it too.
3235 */
3236 return s->size + sizeof(struct obj_cgroup *);
3237 }
3238
__memcg_slab_post_alloc_hook(struct kmem_cache * s,struct list_lru * lru,gfp_t flags,size_t size,void ** p)3239 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3240 gfp_t flags, size_t size, void **p)
3241 {
3242 size_t obj_size = obj_full_size(s);
3243 struct obj_cgroup *objcg;
3244 struct slab *slab;
3245 unsigned long off;
3246 size_t i;
3247
3248 /*
3249 * The obtained objcg pointer is safe to use within the current scope,
3250 * defined by current task or set_active_memcg() pair.
3251 * obj_cgroup_get() is used to get a permanent reference.
3252 */
3253 objcg = current_obj_cgroup();
3254 if (!objcg)
3255 return true;
3256
3257 /*
3258 * slab_alloc_node() avoids the NULL check, so we might be called with a
3259 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3260 * the whole requested size.
3261 * return success as there's nothing to free back
3262 */
3263 if (unlikely(*p == NULL))
3264 return true;
3265
3266 flags &= gfp_allowed_mask;
3267
3268 if (lru) {
3269 int ret;
3270 struct mem_cgroup *memcg;
3271
3272 memcg = get_mem_cgroup_from_objcg(objcg);
3273 ret = memcg_list_lru_alloc(memcg, lru, flags);
3274 css_put(&memcg->css);
3275
3276 if (ret)
3277 return false;
3278 }
3279
3280 for (i = 0; i < size; i++) {
3281 unsigned long obj_exts;
3282 struct slabobj_ext *obj_ext;
3283 struct obj_stock_pcp *stock;
3284
3285 slab = virt_to_slab(p[i]);
3286
3287 if (!slab_obj_exts(slab) &&
3288 alloc_slab_obj_exts(slab, s, flags, false)) {
3289 continue;
3290 }
3291
3292 /*
3293 * if we fail and size is 1, memcg_alloc_abort_single() will
3294 * just free the object, which is ok as we have not assigned
3295 * objcg to its obj_ext yet
3296 *
3297 * for larger sizes, kmem_cache_free_bulk() will uncharge
3298 * any objects that were already charged and obj_ext assigned
3299 *
3300 * TODO: we could batch this until slab_pgdat(slab) changes
3301 * between iterations, with a more complicated undo
3302 */
3303 stock = trylock_stock();
3304 if (!stock || !__consume_obj_stock(objcg, stock, obj_size)) {
3305 size_t remainder;
3306
3307 unlock_stock(stock);
3308 if (__obj_cgroup_charge(objcg, flags, obj_size, &remainder))
3309 return false;
3310 stock = trylock_stock();
3311 if (remainder)
3312 __refill_obj_stock(objcg, stock, remainder, false);
3313 }
3314 __account_obj_stock(objcg, stock, obj_size,
3315 slab_pgdat(slab), cache_vmstat_idx(s));
3316 unlock_stock(stock);
3317
3318 obj_exts = slab_obj_exts(slab);
3319 get_slab_obj_exts(obj_exts);
3320 off = obj_to_index(s, slab, p[i]);
3321 obj_ext = slab_obj_ext(slab, obj_exts, off);
3322 obj_cgroup_get(objcg);
3323 obj_ext->objcg = objcg;
3324 put_slab_obj_exts(obj_exts);
3325 }
3326
3327 return true;
3328 }
3329
__memcg_slab_free_hook(struct kmem_cache * s,struct slab * slab,void ** p,int objects,unsigned long obj_exts)3330 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3331 void **p, int objects, unsigned long obj_exts)
3332 {
3333 size_t obj_size = obj_full_size(s);
3334
3335 for (int i = 0; i < objects; i++) {
3336 struct obj_cgroup *objcg;
3337 struct slabobj_ext *obj_ext;
3338 struct obj_stock_pcp *stock;
3339 unsigned int off;
3340
3341 off = obj_to_index(s, slab, p[i]);
3342 obj_ext = slab_obj_ext(slab, obj_exts, off);
3343 objcg = obj_ext->objcg;
3344 if (!objcg)
3345 continue;
3346
3347 obj_ext->objcg = NULL;
3348
3349 stock = trylock_stock();
3350 __refill_obj_stock(objcg, stock, obj_size, true);
3351 __account_obj_stock(objcg, stock, -obj_size,
3352 slab_pgdat(slab), cache_vmstat_idx(s));
3353 unlock_stock(stock);
3354
3355 obj_cgroup_put(objcg);
3356 }
3357 }
3358
3359 /*
3360 * The objcg is only set on the first page, so transfer it to all the
3361 * other pages.
3362 */
split_page_memcg(struct page * page,unsigned order)3363 void split_page_memcg(struct page *page, unsigned order)
3364 {
3365 struct obj_cgroup *objcg = page_objcg(page);
3366 unsigned int i, nr = 1 << order;
3367
3368 if (!objcg)
3369 return;
3370
3371 for (i = 1; i < nr; i++)
3372 page_set_objcg(&page[i], objcg);
3373
3374 obj_cgroup_get_many(objcg, nr - 1);
3375 }
3376
folio_split_memcg_refs(struct folio * folio,unsigned old_order,unsigned new_order)3377 void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
3378 unsigned new_order)
3379 {
3380 unsigned new_refs;
3381
3382 if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3383 return;
3384
3385 new_refs = (1 << (old_order - new_order)) - 1;
3386 css_get_many(&__folio_memcg(folio)->css, new_refs);
3387 }
3388
memcg_online_kmem(struct mem_cgroup * memcg)3389 static int memcg_online_kmem(struct mem_cgroup *memcg)
3390 {
3391 struct obj_cgroup *objcg;
3392
3393 if (mem_cgroup_kmem_disabled())
3394 return 0;
3395
3396 if (unlikely(mem_cgroup_is_root(memcg)))
3397 return 0;
3398
3399 objcg = obj_cgroup_alloc();
3400 if (!objcg)
3401 return -ENOMEM;
3402
3403 objcg->memcg = memcg;
3404 rcu_assign_pointer(memcg->objcg, objcg);
3405 obj_cgroup_get(objcg);
3406 memcg->orig_objcg = objcg;
3407
3408 static_branch_enable(&memcg_kmem_online_key);
3409
3410 memcg->kmemcg_id = memcg->id.id;
3411
3412 return 0;
3413 }
3414
memcg_offline_kmem(struct mem_cgroup * memcg)3415 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3416 {
3417 struct mem_cgroup *parent;
3418
3419 if (mem_cgroup_kmem_disabled())
3420 return;
3421
3422 if (unlikely(mem_cgroup_is_root(memcg)))
3423 return;
3424
3425 parent = parent_mem_cgroup(memcg);
3426 if (!parent)
3427 parent = root_mem_cgroup;
3428
3429 memcg_reparent_list_lrus(memcg, parent);
3430
3431 /*
3432 * Objcg's reparenting must be after list_lru's, make sure list_lru
3433 * helpers won't use parent's list_lru until child is drained.
3434 */
3435 memcg_reparent_objcgs(memcg, parent);
3436 }
3437
3438 #ifdef CONFIG_CGROUP_WRITEBACK
3439
3440 #include <trace/events/writeback.h>
3441
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3442 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3443 {
3444 return wb_domain_init(&memcg->cgwb_domain, gfp);
3445 }
3446
memcg_wb_domain_exit(struct mem_cgroup * memcg)3447 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3448 {
3449 wb_domain_exit(&memcg->cgwb_domain);
3450 }
3451
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3452 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3453 {
3454 wb_domain_size_changed(&memcg->cgwb_domain);
3455 }
3456
mem_cgroup_wb_domain(struct bdi_writeback * wb)3457 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3458 {
3459 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3460
3461 if (!memcg->css.parent)
3462 return NULL;
3463
3464 return &memcg->cgwb_domain;
3465 }
3466
3467 /**
3468 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3469 * @wb: bdi_writeback in question
3470 * @pfilepages: out parameter for number of file pages
3471 * @pheadroom: out parameter for number of allocatable pages according to memcg
3472 * @pdirty: out parameter for number of dirty pages
3473 * @pwriteback: out parameter for number of pages under writeback
3474 *
3475 * Determine the numbers of file, headroom, dirty, and writeback pages in
3476 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3477 * is a bit more involved.
3478 *
3479 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3480 * headroom is calculated as the lowest headroom of itself and the
3481 * ancestors. Note that this doesn't consider the actual amount of
3482 * available memory in the system. The caller should further cap
3483 * *@pheadroom accordingly.
3484 */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)3485 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3486 unsigned long *pheadroom, unsigned long *pdirty,
3487 unsigned long *pwriteback)
3488 {
3489 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3490 struct mem_cgroup *parent;
3491
3492 mem_cgroup_flush_stats_ratelimited(memcg);
3493
3494 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3495 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3496 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3497 memcg_page_state(memcg, NR_ACTIVE_FILE);
3498
3499 *pheadroom = PAGE_COUNTER_MAX;
3500 while ((parent = parent_mem_cgroup(memcg))) {
3501 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3502 READ_ONCE(memcg->memory.high));
3503 unsigned long used = page_counter_read(&memcg->memory);
3504
3505 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3506 memcg = parent;
3507 }
3508 }
3509
3510 /*
3511 * Foreign dirty flushing
3512 *
3513 * There's an inherent mismatch between memcg and writeback. The former
3514 * tracks ownership per-page while the latter per-inode. This was a
3515 * deliberate design decision because honoring per-page ownership in the
3516 * writeback path is complicated, may lead to higher CPU and IO overheads
3517 * and deemed unnecessary given that write-sharing an inode across
3518 * different cgroups isn't a common use-case.
3519 *
3520 * Combined with inode majority-writer ownership switching, this works well
3521 * enough in most cases but there are some pathological cases. For
3522 * example, let's say there are two cgroups A and B which keep writing to
3523 * different but confined parts of the same inode. B owns the inode and
3524 * A's memory is limited far below B's. A's dirty ratio can rise enough to
3525 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3526 * triggering background writeback. A will be slowed down without a way to
3527 * make writeback of the dirty pages happen.
3528 *
3529 * Conditions like the above can lead to a cgroup getting repeatedly and
3530 * severely throttled after making some progress after each
3531 * dirty_expire_interval while the underlying IO device is almost
3532 * completely idle.
3533 *
3534 * Solving this problem completely requires matching the ownership tracking
3535 * granularities between memcg and writeback in either direction. However,
3536 * the more egregious behaviors can be avoided by simply remembering the
3537 * most recent foreign dirtying events and initiating remote flushes on
3538 * them when local writeback isn't enough to keep the memory clean enough.
3539 *
3540 * The following two functions implement such mechanism. When a foreign
3541 * page - a page whose memcg and writeback ownerships don't match - is
3542 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3543 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
3544 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3545 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3546 * foreign bdi_writebacks which haven't expired. Both the numbers of
3547 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3548 * limited to MEMCG_CGWB_FRN_CNT.
3549 *
3550 * The mechanism only remembers IDs and doesn't hold any object references.
3551 * As being wrong occasionally doesn't matter, updates and accesses to the
3552 * records are lockless and racy.
3553 */
mem_cgroup_track_foreign_dirty_slowpath(struct folio * folio,struct bdi_writeback * wb)3554 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3555 struct bdi_writeback *wb)
3556 {
3557 struct mem_cgroup *memcg = folio_memcg(folio);
3558 struct memcg_cgwb_frn *frn;
3559 u64 now = get_jiffies_64();
3560 u64 oldest_at = now;
3561 int oldest = -1;
3562 int i;
3563
3564 trace_track_foreign_dirty(folio, wb);
3565
3566 /*
3567 * Pick the slot to use. If there is already a slot for @wb, keep
3568 * using it. If not replace the oldest one which isn't being
3569 * written out.
3570 */
3571 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3572 frn = &memcg->cgwb_frn[i];
3573 if (frn->bdi_id == wb->bdi->id &&
3574 frn->memcg_id == wb->memcg_css->id)
3575 break;
3576 if (time_before64(frn->at, oldest_at) &&
3577 atomic_read(&frn->done.cnt) == 1) {
3578 oldest = i;
3579 oldest_at = frn->at;
3580 }
3581 }
3582
3583 if (i < MEMCG_CGWB_FRN_CNT) {
3584 /*
3585 * Re-using an existing one. Update timestamp lazily to
3586 * avoid making the cacheline hot. We want them to be
3587 * reasonably up-to-date and significantly shorter than
3588 * dirty_expire_interval as that's what expires the record.
3589 * Use the shorter of 1s and dirty_expire_interval / 8.
3590 */
3591 unsigned long update_intv =
3592 min_t(unsigned long, HZ,
3593 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3594
3595 if (time_before64(frn->at, now - update_intv))
3596 frn->at = now;
3597 } else if (oldest >= 0) {
3598 /* replace the oldest free one */
3599 frn = &memcg->cgwb_frn[oldest];
3600 frn->bdi_id = wb->bdi->id;
3601 frn->memcg_id = wb->memcg_css->id;
3602 frn->at = now;
3603 }
3604 }
3605
3606 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)3607 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3608 {
3609 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3610 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3611 u64 now = jiffies_64;
3612 int i;
3613
3614 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3615 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3616
3617 /*
3618 * If the record is older than dirty_expire_interval,
3619 * writeback on it has already started. No need to kick it
3620 * off again. Also, don't start a new one if there's
3621 * already one in flight.
3622 */
3623 if (time_after64(frn->at, now - intv) &&
3624 atomic_read(&frn->done.cnt) == 1) {
3625 frn->at = 0;
3626 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3627 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3628 WB_REASON_FOREIGN_FLUSH,
3629 &frn->done);
3630 }
3631 }
3632 }
3633
3634 #else /* CONFIG_CGROUP_WRITEBACK */
3635
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)3636 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3637 {
3638 return 0;
3639 }
3640
memcg_wb_domain_exit(struct mem_cgroup * memcg)3641 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3642 {
3643 }
3644
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)3645 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3646 {
3647 }
3648
3649 #endif /* CONFIG_CGROUP_WRITEBACK */
3650
3651 /*
3652 * Private memory cgroup IDR
3653 *
3654 * Swap-out records and page cache shadow entries need to store memcg
3655 * references in constrained space, so we maintain an ID space that is
3656 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3657 * memory-controlled cgroups to 64k.
3658 *
3659 * However, there usually are many references to the offline CSS after
3660 * the cgroup has been destroyed, such as page cache or reclaimable
3661 * slab objects, that don't need to hang on to the ID. We want to keep
3662 * those dead CSS from occupying IDs, or we might quickly exhaust the
3663 * relatively small ID space and prevent the creation of new cgroups
3664 * even when there are much fewer than 64k cgroups - possibly none.
3665 *
3666 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3667 * be freed and recycled when it's no longer needed, which is usually
3668 * when the CSS is offlined.
3669 *
3670 * The only exception to that are records of swapped out tmpfs/shmem
3671 * pages that need to be attributed to live ancestors on swapin. But
3672 * those references are manageable from userspace.
3673 */
3674
3675 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3676 static DEFINE_XARRAY_ALLOC1(mem_cgroup_private_ids);
3677
mem_cgroup_private_id_remove(struct mem_cgroup * memcg)3678 static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
3679 {
3680 if (memcg->id.id > 0) {
3681 xa_erase(&mem_cgroup_private_ids, memcg->id.id);
3682 memcg->id.id = 0;
3683 }
3684 }
3685
mem_cgroup_private_id_put(struct mem_cgroup * memcg,unsigned int n)3686 static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n)
3687 {
3688 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3689 mem_cgroup_private_id_remove(memcg);
3690
3691 /* Memcg ID pins CSS */
3692 css_put(&memcg->css);
3693 }
3694 }
3695
mem_cgroup_private_id_get_online(struct mem_cgroup * memcg,unsigned int n)3696 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n)
3697 {
3698 while (!refcount_add_not_zero(n, &memcg->id.ref)) {
3699 /*
3700 * The root cgroup cannot be destroyed, so it's refcount must
3701 * always be >= 1.
3702 */
3703 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
3704 VM_BUG_ON(1);
3705 break;
3706 }
3707 memcg = parent_mem_cgroup(memcg);
3708 if (!memcg)
3709 memcg = root_mem_cgroup;
3710 }
3711 return memcg;
3712 }
3713
3714 /**
3715 * mem_cgroup_from_private_id - look up a memcg from a memcg id
3716 * @id: the memcg id to look up
3717 *
3718 * Caller must hold rcu_read_lock().
3719 */
mem_cgroup_from_private_id(unsigned short id)3720 struct mem_cgroup *mem_cgroup_from_private_id(unsigned short id)
3721 {
3722 WARN_ON_ONCE(!rcu_read_lock_held());
3723 return xa_load(&mem_cgroup_private_ids, id);
3724 }
3725
mem_cgroup_get_from_id(u64 id)3726 struct mem_cgroup *mem_cgroup_get_from_id(u64 id)
3727 {
3728 struct cgroup *cgrp;
3729 struct cgroup_subsys_state *css;
3730 struct mem_cgroup *memcg = NULL;
3731
3732 cgrp = cgroup_get_from_id(id);
3733 if (IS_ERR(cgrp))
3734 return NULL;
3735
3736 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3737 if (css)
3738 memcg = container_of(css, struct mem_cgroup, css);
3739
3740 cgroup_put(cgrp);
3741
3742 return memcg;
3743 }
3744
free_mem_cgroup_per_node_info(struct mem_cgroup_per_node * pn)3745 static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
3746 {
3747 if (!pn)
3748 return;
3749
3750 free_percpu(pn->lruvec_stats_percpu);
3751 kfree(pn->lruvec_stats);
3752 kfree(pn);
3753 }
3754
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)3755 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3756 {
3757 struct mem_cgroup_per_node *pn;
3758
3759 pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
3760 node);
3761 if (!pn)
3762 return false;
3763
3764 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3765 GFP_KERNEL_ACCOUNT, node);
3766 if (!pn->lruvec_stats)
3767 goto fail;
3768
3769 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3770 GFP_KERNEL_ACCOUNT);
3771 if (!pn->lruvec_stats_percpu)
3772 goto fail;
3773
3774 lruvec_init(&pn->lruvec);
3775 pn->memcg = memcg;
3776
3777 memcg->nodeinfo[node] = pn;
3778 return true;
3779 fail:
3780 free_mem_cgroup_per_node_info(pn);
3781 return false;
3782 }
3783
__mem_cgroup_free(struct mem_cgroup * memcg)3784 static void __mem_cgroup_free(struct mem_cgroup *memcg)
3785 {
3786 int node;
3787
3788 obj_cgroup_put(memcg->orig_objcg);
3789
3790 for_each_node(node)
3791 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
3792 memcg1_free_events(memcg);
3793 kfree(memcg->vmstats);
3794 free_percpu(memcg->vmstats_percpu);
3795 kfree(memcg);
3796 }
3797
mem_cgroup_free(struct mem_cgroup * memcg)3798 static void mem_cgroup_free(struct mem_cgroup *memcg)
3799 {
3800 lru_gen_exit_memcg(memcg);
3801 memcg_wb_domain_exit(memcg);
3802 __mem_cgroup_free(memcg);
3803 }
3804
mem_cgroup_alloc(struct mem_cgroup * parent)3805 static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
3806 {
3807 struct memcg_vmstats_percpu *statc;
3808 struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
3809 struct mem_cgroup *memcg;
3810 int node, cpu;
3811 int __maybe_unused i;
3812 long error;
3813
3814 memcg = kmem_cache_zalloc(memcg_cachep, GFP_KERNEL);
3815 if (!memcg)
3816 return ERR_PTR(-ENOMEM);
3817
3818 error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
3819 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3820 if (error)
3821 goto fail;
3822 error = -ENOMEM;
3823
3824 memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT);
3825 if (!memcg->vmstats)
3826 goto fail;
3827
3828 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3829 GFP_KERNEL_ACCOUNT);
3830 if (!memcg->vmstats_percpu)
3831 goto fail;
3832
3833 if (!memcg1_alloc_events(memcg))
3834 goto fail;
3835
3836 for_each_possible_cpu(cpu) {
3837 if (parent)
3838 pstatc_pcpu = parent->vmstats_percpu;
3839 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3840 statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
3841 statc->vmstats = memcg->vmstats;
3842 }
3843
3844 for_each_node(node)
3845 if (!alloc_mem_cgroup_per_node_info(memcg, node))
3846 goto fail;
3847
3848 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3849 goto fail;
3850
3851 INIT_WORK(&memcg->high_work, high_work_func);
3852 vmpressure_init(&memcg->vmpressure);
3853 INIT_LIST_HEAD(&memcg->memory_peaks);
3854 INIT_LIST_HEAD(&memcg->swap_peaks);
3855 spin_lock_init(&memcg->peaks_lock);
3856 memcg->socket_pressure = get_jiffies_64();
3857 #if BITS_PER_LONG < 64
3858 seqlock_init(&memcg->socket_pressure_seqlock);
3859 #endif
3860 memcg1_memcg_init(memcg);
3861 memcg->kmemcg_id = -1;
3862 INIT_LIST_HEAD(&memcg->objcg_list);
3863 #ifdef CONFIG_CGROUP_WRITEBACK
3864 INIT_LIST_HEAD(&memcg->cgwb_list);
3865 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3866 memcg->cgwb_frn[i].done =
3867 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3868 #endif
3869 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3870 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3871 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3872 memcg->deferred_split_queue.split_queue_len = 0;
3873 #endif
3874 lru_gen_init_memcg(memcg);
3875 return memcg;
3876 fail:
3877 mem_cgroup_private_id_remove(memcg);
3878 __mem_cgroup_free(memcg);
3879 return ERR_PTR(error);
3880 }
3881
3882 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)3883 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
3884 {
3885 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3886 struct mem_cgroup *memcg, *old_memcg;
3887 bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
3888
3889 old_memcg = set_active_memcg(parent);
3890 memcg = mem_cgroup_alloc(parent);
3891 set_active_memcg(old_memcg);
3892 if (IS_ERR(memcg))
3893 return ERR_CAST(memcg);
3894
3895 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3896 memcg1_soft_limit_reset(memcg);
3897 #ifdef CONFIG_ZSWAP
3898 memcg->zswap_max = PAGE_COUNTER_MAX;
3899 WRITE_ONCE(memcg->zswap_writeback, true);
3900 #endif
3901 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3902 if (parent) {
3903 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3904
3905 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
3906 page_counter_init(&memcg->swap, &parent->swap, false);
3907 #ifdef CONFIG_MEMCG_V1
3908 memcg->memory.track_failcnt = !memcg_on_dfl;
3909 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3910 page_counter_init(&memcg->kmem, &parent->kmem, false);
3911 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3912 #endif
3913 } else {
3914 init_memcg_stats();
3915 init_memcg_events();
3916 page_counter_init(&memcg->memory, NULL, true);
3917 page_counter_init(&memcg->swap, NULL, false);
3918 #ifdef CONFIG_MEMCG_V1
3919 page_counter_init(&memcg->kmem, NULL, false);
3920 page_counter_init(&memcg->tcpmem, NULL, false);
3921 #endif
3922 root_mem_cgroup = memcg;
3923 return &memcg->css;
3924 }
3925
3926 if (memcg_on_dfl && !cgroup_memory_nosocket)
3927 static_branch_inc(&memcg_sockets_enabled_key);
3928
3929 if (!cgroup_memory_nobpf)
3930 static_branch_inc(&memcg_bpf_enabled_key);
3931
3932 return &memcg->css;
3933 }
3934
mem_cgroup_css_online(struct cgroup_subsys_state * css)3935 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
3936 {
3937 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3938
3939 if (memcg_online_kmem(memcg))
3940 goto remove_id;
3941
3942 /*
3943 * A memcg must be visible for expand_shrinker_info()
3944 * by the time the maps are allocated. So, we allocate maps
3945 * here, when for_each_mem_cgroup() can't skip it.
3946 */
3947 if (alloc_shrinker_info(memcg))
3948 goto offline_kmem;
3949
3950 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3951 queue_delayed_work(system_dfl_wq, &stats_flush_dwork,
3952 FLUSH_TIME);
3953 lru_gen_online_memcg(memcg);
3954
3955 /* Online state pins memcg ID, memcg ID pins CSS */
3956 refcount_set(&memcg->id.ref, 1);
3957 css_get(css);
3958
3959 /*
3960 * Ensure mem_cgroup_from_private_id() works once we're fully online.
3961 *
3962 * We could do this earlier and require callers to filter with
3963 * css_tryget_online(). But right now there are no users that
3964 * need earlier access, and the workingset code relies on the
3965 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3966 * publish it here at the end of onlining. This matches the
3967 * regular ID destruction during offlining.
3968 */
3969 xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
3970
3971 return 0;
3972 offline_kmem:
3973 memcg_offline_kmem(memcg);
3974 remove_id:
3975 mem_cgroup_private_id_remove(memcg);
3976 return -ENOMEM;
3977 }
3978
mem_cgroup_css_offline(struct cgroup_subsys_state * css)3979 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3980 {
3981 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3982
3983 memcg1_css_offline(memcg);
3984
3985 page_counter_set_min(&memcg->memory, 0);
3986 page_counter_set_low(&memcg->memory, 0);
3987
3988 zswap_memcg_offline_cleanup(memcg);
3989
3990 memcg_offline_kmem(memcg);
3991 reparent_deferred_split_queue(memcg);
3992 reparent_shrinker_deferred(memcg);
3993 wb_memcg_offline(memcg);
3994 lru_gen_offline_memcg(memcg);
3995
3996 drain_all_stock(memcg);
3997
3998 mem_cgroup_private_id_put(memcg, 1);
3999 }
4000
mem_cgroup_css_released(struct cgroup_subsys_state * css)4001 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4002 {
4003 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4004
4005 invalidate_reclaim_iterators(memcg);
4006 lru_gen_release_memcg(memcg);
4007 }
4008
mem_cgroup_css_free(struct cgroup_subsys_state * css)4009 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4010 {
4011 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4012 int __maybe_unused i;
4013
4014 #ifdef CONFIG_CGROUP_WRITEBACK
4015 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
4016 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
4017 #endif
4018 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4019 static_branch_dec(&memcg_sockets_enabled_key);
4020
4021 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
4022 static_branch_dec(&memcg_sockets_enabled_key);
4023
4024 if (!cgroup_memory_nobpf)
4025 static_branch_dec(&memcg_bpf_enabled_key);
4026
4027 vmpressure_cleanup(&memcg->vmpressure);
4028 cancel_work_sync(&memcg->high_work);
4029 memcg1_remove_from_trees(memcg);
4030 free_shrinker_info(memcg);
4031 mem_cgroup_free(memcg);
4032 }
4033
4034 /**
4035 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4036 * @css: the target css
4037 *
4038 * Reset the states of the mem_cgroup associated with @css. This is
4039 * invoked when the userland requests disabling on the default hierarchy
4040 * but the memcg is pinned through dependency. The memcg should stop
4041 * applying policies and should revert to the vanilla state as it may be
4042 * made visible again.
4043 *
4044 * The current implementation only resets the essential configurations.
4045 * This needs to be expanded to cover all the visible parts.
4046 */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)4047 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4048 {
4049 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4050
4051 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4052 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4053 #ifdef CONFIG_MEMCG_V1
4054 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4055 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
4056 #endif
4057 page_counter_set_min(&memcg->memory, 0);
4058 page_counter_set_low(&memcg->memory, 0);
4059 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
4060 memcg1_soft_limit_reset(memcg);
4061 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
4062 memcg_wb_domain_size_changed(memcg);
4063 }
4064
4065 struct aggregate_control {
4066 /* pointer to the aggregated (CPU and subtree aggregated) counters */
4067 long *aggregate;
4068 /* pointer to the non-hierarchichal (CPU aggregated) counters */
4069 long *local;
4070 /* pointer to the pending child counters during tree propagation */
4071 long *pending;
4072 /* pointer to the parent's pending counters, could be NULL */
4073 long *ppending;
4074 /* pointer to the percpu counters to be aggregated */
4075 long *cstat;
4076 /* pointer to the percpu counters of the last aggregation*/
4077 long *cstat_prev;
4078 /* size of the above counters */
4079 int size;
4080 };
4081
mem_cgroup_stat_aggregate(struct aggregate_control * ac)4082 static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
4083 {
4084 int i;
4085 long delta, delta_cpu, v;
4086
4087 for (i = 0; i < ac->size; i++) {
4088 /*
4089 * Collect the aggregated propagation counts of groups
4090 * below us. We're in a per-cpu loop here and this is
4091 * a global counter, so the first cycle will get them.
4092 */
4093 delta = ac->pending[i];
4094 if (delta)
4095 ac->pending[i] = 0;
4096
4097 /* Add CPU changes on this level since the last flush */
4098 delta_cpu = 0;
4099 v = READ_ONCE(ac->cstat[i]);
4100 if (v != ac->cstat_prev[i]) {
4101 delta_cpu = v - ac->cstat_prev[i];
4102 delta += delta_cpu;
4103 ac->cstat_prev[i] = v;
4104 }
4105
4106 /* Aggregate counts on this level and propagate upwards */
4107 if (delta_cpu)
4108 ac->local[i] += delta_cpu;
4109
4110 if (delta) {
4111 ac->aggregate[i] += delta;
4112 if (ac->ppending)
4113 ac->ppending[i] += delta;
4114 }
4115 }
4116 }
4117
4118 #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4119 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4120 int cpu)
4121 {
4122 int nid;
4123
4124 if (atomic_read(&memcg->kmem_stat)) {
4125 int kmem = atomic_xchg(&memcg->kmem_stat, 0);
4126 int index = memcg_stats_index(MEMCG_KMEM);
4127
4128 memcg->vmstats->state[index] += kmem;
4129 if (parent)
4130 parent->vmstats->state_pending[index] += kmem;
4131 }
4132
4133 for_each_node_state(nid, N_MEMORY) {
4134 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4135 struct lruvec_stats *lstats = pn->lruvec_stats;
4136 struct lruvec_stats *plstats = NULL;
4137
4138 if (parent)
4139 plstats = parent->nodeinfo[nid]->lruvec_stats;
4140
4141 if (atomic_read(&pn->slab_reclaimable)) {
4142 int slab = atomic_xchg(&pn->slab_reclaimable, 0);
4143 int index = memcg_stats_index(NR_SLAB_RECLAIMABLE_B);
4144
4145 lstats->state[index] += slab;
4146 if (plstats)
4147 plstats->state_pending[index] += slab;
4148 }
4149 if (atomic_read(&pn->slab_unreclaimable)) {
4150 int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
4151 int index = memcg_stats_index(NR_SLAB_UNRECLAIMABLE_B);
4152
4153 lstats->state[index] += slab;
4154 if (plstats)
4155 plstats->state_pending[index] += slab;
4156 }
4157 }
4158 }
4159 #else
flush_nmi_stats(struct mem_cgroup * memcg,struct mem_cgroup * parent,int cpu)4160 static void flush_nmi_stats(struct mem_cgroup *memcg, struct mem_cgroup *parent,
4161 int cpu)
4162 {}
4163 #endif
4164
mem_cgroup_css_rstat_flush(struct cgroup_subsys_state * css,int cpu)4165 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
4166 {
4167 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4168 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4169 struct memcg_vmstats_percpu *statc;
4170 struct aggregate_control ac;
4171 int nid;
4172
4173 flush_nmi_stats(memcg, parent, cpu);
4174
4175 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
4176
4177 ac = (struct aggregate_control) {
4178 .aggregate = memcg->vmstats->state,
4179 .local = memcg->vmstats->state_local,
4180 .pending = memcg->vmstats->state_pending,
4181 .ppending = parent ? parent->vmstats->state_pending : NULL,
4182 .cstat = statc->state,
4183 .cstat_prev = statc->state_prev,
4184 .size = MEMCG_VMSTAT_SIZE,
4185 };
4186 mem_cgroup_stat_aggregate(&ac);
4187
4188 ac = (struct aggregate_control) {
4189 .aggregate = memcg->vmstats->events,
4190 .local = memcg->vmstats->events_local,
4191 .pending = memcg->vmstats->events_pending,
4192 .ppending = parent ? parent->vmstats->events_pending : NULL,
4193 .cstat = statc->events,
4194 .cstat_prev = statc->events_prev,
4195 .size = NR_MEMCG_EVENTS,
4196 };
4197 mem_cgroup_stat_aggregate(&ac);
4198
4199 for_each_node_state(nid, N_MEMORY) {
4200 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
4201 struct lruvec_stats *lstats = pn->lruvec_stats;
4202 struct lruvec_stats *plstats = NULL;
4203 struct lruvec_stats_percpu *lstatc;
4204
4205 if (parent)
4206 plstats = parent->nodeinfo[nid]->lruvec_stats;
4207
4208 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
4209
4210 ac = (struct aggregate_control) {
4211 .aggregate = lstats->state,
4212 .local = lstats->state_local,
4213 .pending = lstats->state_pending,
4214 .ppending = plstats ? plstats->state_pending : NULL,
4215 .cstat = lstatc->state,
4216 .cstat_prev = lstatc->state_prev,
4217 .size = NR_MEMCG_NODE_STAT_ITEMS,
4218 };
4219 mem_cgroup_stat_aggregate(&ac);
4220
4221 }
4222 WRITE_ONCE(statc->stats_updates, 0);
4223 /* We are in a per-cpu loop here, only do the atomic write once */
4224 if (atomic_read(&memcg->vmstats->stats_updates))
4225 atomic_set(&memcg->vmstats->stats_updates, 0);
4226 }
4227
mem_cgroup_fork(struct task_struct * task)4228 static void mem_cgroup_fork(struct task_struct *task)
4229 {
4230 /*
4231 * Set the update flag to cause task->objcg to be initialized lazily
4232 * on the first allocation. It can be done without any synchronization
4233 * because it's always performed on the current task, so does
4234 * current_objcg_update().
4235 */
4236 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
4237 }
4238
mem_cgroup_exit(struct task_struct * task)4239 static void mem_cgroup_exit(struct task_struct *task)
4240 {
4241 struct obj_cgroup *objcg = task->objcg;
4242
4243 objcg = (struct obj_cgroup *)
4244 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
4245 obj_cgroup_put(objcg);
4246
4247 /*
4248 * Some kernel allocations can happen after this point,
4249 * but let's ignore them. It can be done without any synchronization
4250 * because it's always performed on the current task, so does
4251 * current_objcg_update().
4252 */
4253 task->objcg = NULL;
4254 }
4255
4256 #ifdef CONFIG_LRU_GEN
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4257 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
4258 {
4259 struct task_struct *task;
4260 struct cgroup_subsys_state *css;
4261
4262 /* find the first leader if there is any */
4263 cgroup_taskset_for_each_leader(task, css, tset)
4264 break;
4265
4266 if (!task)
4267 return;
4268
4269 task_lock(task);
4270 if (task->mm && READ_ONCE(task->mm->owner) == task)
4271 lru_gen_migrate_mm(task->mm);
4272 task_unlock(task);
4273 }
4274 #else
mem_cgroup_lru_gen_attach(struct cgroup_taskset * tset)4275 static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
4276 #endif /* CONFIG_LRU_GEN */
4277
mem_cgroup_kmem_attach(struct cgroup_taskset * tset)4278 static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
4279 {
4280 struct task_struct *task;
4281 struct cgroup_subsys_state *css;
4282
4283 cgroup_taskset_for_each(task, css, tset) {
4284 /* atomically set the update bit */
4285 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
4286 }
4287 }
4288
mem_cgroup_attach(struct cgroup_taskset * tset)4289 static void mem_cgroup_attach(struct cgroup_taskset *tset)
4290 {
4291 mem_cgroup_lru_gen_attach(tset);
4292 mem_cgroup_kmem_attach(tset);
4293 }
4294
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)4295 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
4296 {
4297 if (value == PAGE_COUNTER_MAX)
4298 seq_puts(m, "max\n");
4299 else
4300 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
4301
4302 return 0;
4303 }
4304
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)4305 static u64 memory_current_read(struct cgroup_subsys_state *css,
4306 struct cftype *cft)
4307 {
4308 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4309
4310 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4311 }
4312
4313 #define OFP_PEAK_UNSET (((-1UL)))
4314
peak_show(struct seq_file * sf,void * v,struct page_counter * pc)4315 static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
4316 {
4317 struct cgroup_of_peak *ofp = of_peak(sf->private);
4318 u64 fd_peak = READ_ONCE(ofp->value), peak;
4319
4320 /* User wants global or local peak? */
4321 if (fd_peak == OFP_PEAK_UNSET)
4322 peak = pc->watermark;
4323 else
4324 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
4325
4326 seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
4327 return 0;
4328 }
4329
memory_peak_show(struct seq_file * sf,void * v)4330 static int memory_peak_show(struct seq_file *sf, void *v)
4331 {
4332 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
4333
4334 return peak_show(sf, v, &memcg->memory);
4335 }
4336
peak_open(struct kernfs_open_file * of)4337 static int peak_open(struct kernfs_open_file *of)
4338 {
4339 struct cgroup_of_peak *ofp = of_peak(of);
4340
4341 ofp->value = OFP_PEAK_UNSET;
4342 return 0;
4343 }
4344
peak_release(struct kernfs_open_file * of)4345 static void peak_release(struct kernfs_open_file *of)
4346 {
4347 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4348 struct cgroup_of_peak *ofp = of_peak(of);
4349
4350 if (ofp->value == OFP_PEAK_UNSET) {
4351 /* fast path (no writes on this fd) */
4352 return;
4353 }
4354 spin_lock(&memcg->peaks_lock);
4355 list_del(&ofp->list);
4356 spin_unlock(&memcg->peaks_lock);
4357 }
4358
peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,struct page_counter * pc,struct list_head * watchers)4359 static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4360 loff_t off, struct page_counter *pc,
4361 struct list_head *watchers)
4362 {
4363 unsigned long usage;
4364 struct cgroup_of_peak *peer_ctx;
4365 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4366 struct cgroup_of_peak *ofp = of_peak(of);
4367
4368 spin_lock(&memcg->peaks_lock);
4369
4370 usage = page_counter_read(pc);
4371 WRITE_ONCE(pc->local_watermark, usage);
4372
4373 list_for_each_entry(peer_ctx, watchers, list)
4374 if (usage > peer_ctx->value)
4375 WRITE_ONCE(peer_ctx->value, usage);
4376
4377 /* initial write, register watcher */
4378 if (ofp->value == OFP_PEAK_UNSET)
4379 list_add(&ofp->list, watchers);
4380
4381 WRITE_ONCE(ofp->value, usage);
4382 spin_unlock(&memcg->peaks_lock);
4383
4384 return nbytes;
4385 }
4386
memory_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4387 static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4388 size_t nbytes, loff_t off)
4389 {
4390 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4391
4392 return peak_write(of, buf, nbytes, off, &memcg->memory,
4393 &memcg->memory_peaks);
4394 }
4395
4396 #undef OFP_PEAK_UNSET
4397
memory_min_show(struct seq_file * m,void * v)4398 static int memory_min_show(struct seq_file *m, void *v)
4399 {
4400 return seq_puts_memcg_tunable(m,
4401 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4402 }
4403
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4404 static ssize_t memory_min_write(struct kernfs_open_file *of,
4405 char *buf, size_t nbytes, loff_t off)
4406 {
4407 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4408 unsigned long min;
4409 int err;
4410
4411 buf = strstrip(buf);
4412 err = page_counter_memparse(buf, "max", &min);
4413 if (err)
4414 return err;
4415
4416 page_counter_set_min(&memcg->memory, min);
4417
4418 return nbytes;
4419 }
4420
memory_low_show(struct seq_file * m,void * v)4421 static int memory_low_show(struct seq_file *m, void *v)
4422 {
4423 return seq_puts_memcg_tunable(m,
4424 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4425 }
4426
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4427 static ssize_t memory_low_write(struct kernfs_open_file *of,
4428 char *buf, size_t nbytes, loff_t off)
4429 {
4430 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4431 unsigned long low;
4432 int err;
4433
4434 buf = strstrip(buf);
4435 err = page_counter_memparse(buf, "max", &low);
4436 if (err)
4437 return err;
4438
4439 page_counter_set_low(&memcg->memory, low);
4440
4441 return nbytes;
4442 }
4443
memory_high_show(struct seq_file * m,void * v)4444 static int memory_high_show(struct seq_file *m, void *v)
4445 {
4446 return seq_puts_memcg_tunable(m,
4447 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4448 }
4449
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4450 static ssize_t memory_high_write(struct kernfs_open_file *of,
4451 char *buf, size_t nbytes, loff_t off)
4452 {
4453 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4454 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4455 bool drained = false;
4456 unsigned long high;
4457 int err;
4458
4459 buf = strstrip(buf);
4460 err = page_counter_memparse(buf, "max", &high);
4461 if (err)
4462 return err;
4463
4464 page_counter_set_high(&memcg->memory, high);
4465
4466 if (of->file->f_flags & O_NONBLOCK)
4467 goto out;
4468
4469 for (;;) {
4470 unsigned long nr_pages = page_counter_read(&memcg->memory);
4471 unsigned long reclaimed;
4472
4473 if (nr_pages <= high)
4474 break;
4475
4476 if (signal_pending(current))
4477 break;
4478
4479 if (!drained) {
4480 drain_all_stock(memcg);
4481 drained = true;
4482 continue;
4483 }
4484
4485 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4486 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4487
4488 if (!reclaimed && !nr_retries--)
4489 break;
4490 }
4491 out:
4492 memcg_wb_domain_size_changed(memcg);
4493 return nbytes;
4494 }
4495
memory_max_show(struct seq_file * m,void * v)4496 static int memory_max_show(struct seq_file *m, void *v)
4497 {
4498 return seq_puts_memcg_tunable(m,
4499 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4500 }
4501
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4502 static ssize_t memory_max_write(struct kernfs_open_file *of,
4503 char *buf, size_t nbytes, loff_t off)
4504 {
4505 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4506 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4507 bool drained = false;
4508 unsigned long max;
4509 int err;
4510
4511 buf = strstrip(buf);
4512 err = page_counter_memparse(buf, "max", &max);
4513 if (err)
4514 return err;
4515
4516 xchg(&memcg->memory.max, max);
4517
4518 if (of->file->f_flags & O_NONBLOCK)
4519 goto out;
4520
4521 for (;;) {
4522 unsigned long nr_pages = page_counter_read(&memcg->memory);
4523
4524 if (nr_pages <= max)
4525 break;
4526
4527 if (signal_pending(current))
4528 break;
4529
4530 if (!drained) {
4531 drain_all_stock(memcg);
4532 drained = true;
4533 continue;
4534 }
4535
4536 if (nr_reclaims) {
4537 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4538 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4539 nr_reclaims--;
4540 continue;
4541 }
4542
4543 memcg_memory_event(memcg, MEMCG_OOM);
4544 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4545 break;
4546 cond_resched();
4547 }
4548 out:
4549 memcg_wb_domain_size_changed(memcg);
4550 return nbytes;
4551 }
4552
4553 /*
4554 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4555 * if any new events become available.
4556 */
__memory_events_show(struct seq_file * m,atomic_long_t * events)4557 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4558 {
4559 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4560 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4561 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4562 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4563 seq_printf(m, "oom_kill %lu\n",
4564 atomic_long_read(&events[MEMCG_OOM_KILL]));
4565 seq_printf(m, "oom_group_kill %lu\n",
4566 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4567 seq_printf(m, "sock_throttled %lu\n",
4568 atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
4569 }
4570
memory_events_show(struct seq_file * m,void * v)4571 static int memory_events_show(struct seq_file *m, void *v)
4572 {
4573 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4574
4575 __memory_events_show(m, memcg->memory_events);
4576 return 0;
4577 }
4578
memory_events_local_show(struct seq_file * m,void * v)4579 static int memory_events_local_show(struct seq_file *m, void *v)
4580 {
4581 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4582
4583 __memory_events_show(m, memcg->memory_events_local);
4584 return 0;
4585 }
4586
memory_stat_show(struct seq_file * m,void * v)4587 int memory_stat_show(struct seq_file *m, void *v)
4588 {
4589 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4590 char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4591 struct seq_buf s;
4592
4593 if (!buf)
4594 return -ENOMEM;
4595 seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4596 memory_stat_format(memcg, &s);
4597 seq_puts(m, buf);
4598 kfree(buf);
4599 return 0;
4600 }
4601
4602 #ifdef CONFIG_NUMA
lruvec_page_state_output(struct lruvec * lruvec,int item)4603 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4604 int item)
4605 {
4606 return lruvec_page_state(lruvec, item) *
4607 memcg_page_state_output_unit(item);
4608 }
4609
memory_numa_stat_show(struct seq_file * m,void * v)4610 static int memory_numa_stat_show(struct seq_file *m, void *v)
4611 {
4612 int i;
4613 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4614
4615 mem_cgroup_flush_stats(memcg);
4616
4617 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4618 int nid;
4619
4620 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4621 continue;
4622
4623 seq_printf(m, "%s", memory_stats[i].name);
4624 for_each_node_state(nid, N_MEMORY) {
4625 u64 size;
4626 struct lruvec *lruvec;
4627
4628 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4629 size = lruvec_page_state_output(lruvec,
4630 memory_stats[i].idx);
4631 seq_printf(m, " N%d=%llu", nid, size);
4632 }
4633 seq_putc(m, '\n');
4634 }
4635
4636 return 0;
4637 }
4638 #endif
4639
memory_oom_group_show(struct seq_file * m,void * v)4640 static int memory_oom_group_show(struct seq_file *m, void *v)
4641 {
4642 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4643
4644 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4645
4646 return 0;
4647 }
4648
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4649 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4650 char *buf, size_t nbytes, loff_t off)
4651 {
4652 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4653 int ret, oom_group;
4654
4655 buf = strstrip(buf);
4656 if (!buf)
4657 return -EINVAL;
4658
4659 ret = kstrtoint(buf, 0, &oom_group);
4660 if (ret)
4661 return ret;
4662
4663 if (oom_group != 0 && oom_group != 1)
4664 return -EINVAL;
4665
4666 WRITE_ONCE(memcg->oom_group, oom_group);
4667
4668 return nbytes;
4669 }
4670
memory_reclaim(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4671 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4672 size_t nbytes, loff_t off)
4673 {
4674 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4675 int ret;
4676
4677 ret = user_proactive_reclaim(buf, memcg, NULL);
4678 if (ret)
4679 return ret;
4680
4681 return nbytes;
4682 }
4683
4684 static struct cftype memory_files[] = {
4685 {
4686 .name = "current",
4687 .flags = CFTYPE_NOT_ON_ROOT,
4688 .read_u64 = memory_current_read,
4689 },
4690 {
4691 .name = "peak",
4692 .flags = CFTYPE_NOT_ON_ROOT,
4693 .open = peak_open,
4694 .release = peak_release,
4695 .seq_show = memory_peak_show,
4696 .write = memory_peak_write,
4697 },
4698 {
4699 .name = "min",
4700 .flags = CFTYPE_NOT_ON_ROOT,
4701 .seq_show = memory_min_show,
4702 .write = memory_min_write,
4703 },
4704 {
4705 .name = "low",
4706 .flags = CFTYPE_NOT_ON_ROOT,
4707 .seq_show = memory_low_show,
4708 .write = memory_low_write,
4709 },
4710 {
4711 .name = "high",
4712 .flags = CFTYPE_NOT_ON_ROOT,
4713 .seq_show = memory_high_show,
4714 .write = memory_high_write,
4715 },
4716 {
4717 .name = "max",
4718 .flags = CFTYPE_NOT_ON_ROOT,
4719 .seq_show = memory_max_show,
4720 .write = memory_max_write,
4721 },
4722 {
4723 .name = "events",
4724 .flags = CFTYPE_NOT_ON_ROOT,
4725 .file_offset = offsetof(struct mem_cgroup, events_file),
4726 .seq_show = memory_events_show,
4727 },
4728 {
4729 .name = "events.local",
4730 .flags = CFTYPE_NOT_ON_ROOT,
4731 .file_offset = offsetof(struct mem_cgroup, events_local_file),
4732 .seq_show = memory_events_local_show,
4733 },
4734 {
4735 .name = "stat",
4736 .seq_show = memory_stat_show,
4737 },
4738 #ifdef CONFIG_NUMA
4739 {
4740 .name = "numa_stat",
4741 .seq_show = memory_numa_stat_show,
4742 },
4743 #endif
4744 {
4745 .name = "oom.group",
4746 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4747 .seq_show = memory_oom_group_show,
4748 .write = memory_oom_group_write,
4749 },
4750 {
4751 .name = "reclaim",
4752 .flags = CFTYPE_NS_DELEGATABLE,
4753 .write = memory_reclaim,
4754 },
4755 { } /* terminate */
4756 };
4757
4758 struct cgroup_subsys memory_cgrp_subsys = {
4759 .css_alloc = mem_cgroup_css_alloc,
4760 .css_online = mem_cgroup_css_online,
4761 .css_offline = mem_cgroup_css_offline,
4762 .css_released = mem_cgroup_css_released,
4763 .css_free = mem_cgroup_css_free,
4764 .css_reset = mem_cgroup_css_reset,
4765 .css_rstat_flush = mem_cgroup_css_rstat_flush,
4766 .attach = mem_cgroup_attach,
4767 .fork = mem_cgroup_fork,
4768 .exit = mem_cgroup_exit,
4769 .dfl_cftypes = memory_files,
4770 #ifdef CONFIG_MEMCG_V1
4771 .legacy_cftypes = mem_cgroup_legacy_files,
4772 #endif
4773 .early_init = 0,
4774 };
4775
4776 /**
4777 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4778 * @root: the top ancestor of the sub-tree being checked
4779 * @memcg: the memory cgroup to check
4780 *
4781 * WARNING: This function is not stateless! It can only be used as part
4782 * of a top-down tree iteration, not for isolated queries.
4783 */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)4784 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4785 struct mem_cgroup *memcg)
4786 {
4787 bool recursive_protection =
4788 cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
4789
4790 if (mem_cgroup_disabled())
4791 return;
4792
4793 if (!root)
4794 root = root_mem_cgroup;
4795
4796 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4797 }
4798
charge_memcg(struct folio * folio,struct mem_cgroup * memcg,gfp_t gfp)4799 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4800 gfp_t gfp)
4801 {
4802 int ret;
4803
4804 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4805 if (ret)
4806 goto out;
4807
4808 css_get(&memcg->css);
4809 commit_charge(folio, memcg);
4810 memcg1_commit_charge(folio, memcg);
4811 out:
4812 return ret;
4813 }
4814
__mem_cgroup_charge(struct folio * folio,struct mm_struct * mm,gfp_t gfp)4815 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4816 {
4817 struct mem_cgroup *memcg;
4818 int ret;
4819
4820 memcg = get_mem_cgroup_from_mm(mm);
4821 ret = charge_memcg(folio, memcg, gfp);
4822 css_put(&memcg->css);
4823
4824 return ret;
4825 }
4826
4827 /**
4828 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4829 * @folio: folio being charged
4830 * @gfp: reclaim mode
4831 *
4832 * This function is called when allocating a huge page folio, after the page has
4833 * already been obtained and charged to the appropriate hugetlb cgroup
4834 * controller (if it is enabled).
4835 *
4836 * Returns ENOMEM if the memcg is already full.
4837 * Returns 0 if either the charge was successful, or if we skip the charging.
4838 */
mem_cgroup_charge_hugetlb(struct folio * folio,gfp_t gfp)4839 int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
4840 {
4841 struct mem_cgroup *memcg = get_mem_cgroup_from_current();
4842 int ret = 0;
4843
4844 /*
4845 * Even memcg does not account for hugetlb, we still want to update
4846 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip
4847 * charging the memcg.
4848 */
4849 if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
4850 !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
4851 goto out;
4852
4853 if (charge_memcg(folio, memcg, gfp))
4854 ret = -ENOMEM;
4855
4856 out:
4857 mem_cgroup_put(memcg);
4858 return ret;
4859 }
4860
4861 /**
4862 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4863 * @folio: folio to charge.
4864 * @mm: mm context of the victim
4865 * @gfp: reclaim mode
4866 * @entry: swap entry for which the folio is allocated
4867 *
4868 * This function charges a folio allocated for swapin. Please call this before
4869 * adding the folio to the swapcache.
4870 *
4871 * Returns 0 on success. Otherwise, an error code is returned.
4872 */
mem_cgroup_swapin_charge_folio(struct folio * folio,struct mm_struct * mm,gfp_t gfp,swp_entry_t entry)4873 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4874 gfp_t gfp, swp_entry_t entry)
4875 {
4876 struct mem_cgroup *memcg;
4877 unsigned short id;
4878 int ret;
4879
4880 if (mem_cgroup_disabled())
4881 return 0;
4882
4883 id = lookup_swap_cgroup_id(entry);
4884 rcu_read_lock();
4885 memcg = mem_cgroup_from_private_id(id);
4886 if (!memcg || !css_tryget_online(&memcg->css))
4887 memcg = get_mem_cgroup_from_mm(mm);
4888 rcu_read_unlock();
4889
4890 ret = charge_memcg(folio, memcg, gfp);
4891
4892 css_put(&memcg->css);
4893 return ret;
4894 }
4895
4896 struct uncharge_gather {
4897 struct mem_cgroup *memcg;
4898 unsigned long nr_memory;
4899 unsigned long pgpgout;
4900 unsigned long nr_kmem;
4901 int nid;
4902 };
4903
uncharge_gather_clear(struct uncharge_gather * ug)4904 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4905 {
4906 memset(ug, 0, sizeof(*ug));
4907 }
4908
uncharge_batch(const struct uncharge_gather * ug)4909 static void uncharge_batch(const struct uncharge_gather *ug)
4910 {
4911 if (ug->nr_memory) {
4912 memcg_uncharge(ug->memcg, ug->nr_memory);
4913 if (ug->nr_kmem) {
4914 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4915 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4916 }
4917 memcg1_oom_recover(ug->memcg);
4918 }
4919
4920 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4921
4922 /* drop reference from uncharge_folio */
4923 css_put(&ug->memcg->css);
4924 }
4925
uncharge_folio(struct folio * folio,struct uncharge_gather * ug)4926 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4927 {
4928 long nr_pages;
4929 struct mem_cgroup *memcg;
4930 struct obj_cgroup *objcg;
4931
4932 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4933
4934 /*
4935 * Nobody should be changing or seriously looking at
4936 * folio memcg or objcg at this point, we have fully
4937 * exclusive access to the folio.
4938 */
4939 if (folio_memcg_kmem(folio)) {
4940 objcg = __folio_objcg(folio);
4941 /*
4942 * This get matches the put at the end of the function and
4943 * kmem pages do not hold memcg references anymore.
4944 */
4945 memcg = get_mem_cgroup_from_objcg(objcg);
4946 } else {
4947 memcg = __folio_memcg(folio);
4948 }
4949
4950 if (!memcg)
4951 return;
4952
4953 if (ug->memcg != memcg) {
4954 if (ug->memcg) {
4955 uncharge_batch(ug);
4956 uncharge_gather_clear(ug);
4957 }
4958 ug->memcg = memcg;
4959 ug->nid = folio_nid(folio);
4960
4961 /* pairs with css_put in uncharge_batch */
4962 css_get(&memcg->css);
4963 }
4964
4965 nr_pages = folio_nr_pages(folio);
4966
4967 if (folio_memcg_kmem(folio)) {
4968 ug->nr_memory += nr_pages;
4969 ug->nr_kmem += nr_pages;
4970
4971 folio->memcg_data = 0;
4972 obj_cgroup_put(objcg);
4973 } else {
4974 /* LRU pages aren't accounted at the root level */
4975 if (!mem_cgroup_is_root(memcg))
4976 ug->nr_memory += nr_pages;
4977 ug->pgpgout++;
4978
4979 WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4980 folio->memcg_data = 0;
4981 }
4982
4983 css_put(&memcg->css);
4984 }
4985
__mem_cgroup_uncharge(struct folio * folio)4986 void __mem_cgroup_uncharge(struct folio *folio)
4987 {
4988 struct uncharge_gather ug;
4989
4990 /* Don't touch folio->lru of any random page, pre-check: */
4991 if (!folio_memcg_charged(folio))
4992 return;
4993
4994 uncharge_gather_clear(&ug);
4995 uncharge_folio(folio, &ug);
4996 uncharge_batch(&ug);
4997 }
4998
__mem_cgroup_uncharge_folios(struct folio_batch * folios)4999 void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
5000 {
5001 struct uncharge_gather ug;
5002 unsigned int i;
5003
5004 uncharge_gather_clear(&ug);
5005 for (i = 0; i < folios->nr; i++)
5006 uncharge_folio(folios->folios[i], &ug);
5007 if (ug.memcg)
5008 uncharge_batch(&ug);
5009 }
5010
5011 /**
5012 * mem_cgroup_replace_folio - Charge a folio's replacement.
5013 * @old: Currently circulating folio.
5014 * @new: Replacement folio.
5015 *
5016 * Charge @new as a replacement folio for @old. @old will
5017 * be uncharged upon free.
5018 *
5019 * Both folios must be locked, @new->mapping must be set up.
5020 */
mem_cgroup_replace_folio(struct folio * old,struct folio * new)5021 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
5022 {
5023 struct mem_cgroup *memcg;
5024 long nr_pages = folio_nr_pages(new);
5025
5026 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5027 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5028 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5029 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
5030
5031 if (mem_cgroup_disabled())
5032 return;
5033
5034 /* Page cache replacement: new folio already charged? */
5035 if (folio_memcg_charged(new))
5036 return;
5037
5038 memcg = folio_memcg(old);
5039 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
5040 if (!memcg)
5041 return;
5042
5043 /* Force-charge the new page. The old one will be freed soon */
5044 if (!mem_cgroup_is_root(memcg)) {
5045 page_counter_charge(&memcg->memory, nr_pages);
5046 if (do_memsw_account())
5047 page_counter_charge(&memcg->memsw, nr_pages);
5048 }
5049
5050 css_get(&memcg->css);
5051 commit_charge(new, memcg);
5052 memcg1_commit_charge(new, memcg);
5053 }
5054
5055 /**
5056 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
5057 * @old: Currently circulating folio.
5058 * @new: Replacement folio.
5059 *
5060 * Transfer the memcg data from the old folio to the new folio for migration.
5061 * The old folio's data info will be cleared. Note that the memory counters
5062 * will remain unchanged throughout the process.
5063 *
5064 * Both folios must be locked, @new->mapping must be set up.
5065 */
mem_cgroup_migrate(struct folio * old,struct folio * new)5066 void mem_cgroup_migrate(struct folio *old, struct folio *new)
5067 {
5068 struct mem_cgroup *memcg;
5069
5070 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
5071 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
5072 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
5073 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
5074 VM_BUG_ON_FOLIO(folio_test_lru(old), old);
5075
5076 if (mem_cgroup_disabled())
5077 return;
5078
5079 memcg = folio_memcg(old);
5080 /*
5081 * Note that it is normal to see !memcg for a hugetlb folio.
5082 * For e.g, it could have been allocated when memory_hugetlb_accounting
5083 * was not selected.
5084 */
5085 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
5086 if (!memcg)
5087 return;
5088
5089 /* Transfer the charge and the css ref */
5090 commit_charge(new, memcg);
5091
5092 /* Warning should never happen, so don't worry about refcount non-0 */
5093 WARN_ON_ONCE(folio_unqueue_deferred_split(old));
5094 old->memcg_data = 0;
5095 }
5096
5097 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5098 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5099
mem_cgroup_sk_alloc(struct sock * sk)5100 void mem_cgroup_sk_alloc(struct sock *sk)
5101 {
5102 struct mem_cgroup *memcg;
5103
5104 if (!mem_cgroup_sockets_enabled)
5105 return;
5106
5107 /* Do not associate the sock with unrelated interrupted task's memcg. */
5108 if (!in_task())
5109 return;
5110
5111 rcu_read_lock();
5112 memcg = mem_cgroup_from_task(current);
5113 if (mem_cgroup_is_root(memcg))
5114 goto out;
5115 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
5116 goto out;
5117 if (css_tryget(&memcg->css))
5118 sk->sk_memcg = memcg;
5119 out:
5120 rcu_read_unlock();
5121 }
5122
mem_cgroup_sk_free(struct sock * sk)5123 void mem_cgroup_sk_free(struct sock *sk)
5124 {
5125 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5126
5127 if (memcg)
5128 css_put(&memcg->css);
5129 }
5130
mem_cgroup_sk_inherit(const struct sock * sk,struct sock * newsk)5131 void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
5132 {
5133 struct mem_cgroup *memcg;
5134
5135 if (sk->sk_memcg == newsk->sk_memcg)
5136 return;
5137
5138 mem_cgroup_sk_free(newsk);
5139
5140 memcg = mem_cgroup_from_sk(sk);
5141 if (memcg)
5142 css_get(&memcg->css);
5143
5144 newsk->sk_memcg = sk->sk_memcg;
5145 }
5146
5147 /**
5148 * mem_cgroup_sk_charge - charge socket memory
5149 * @sk: socket in memcg to charge
5150 * @nr_pages: number of pages to charge
5151 * @gfp_mask: reclaim mode
5152 *
5153 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5154 * @memcg's configured limit, %false if it doesn't.
5155 */
mem_cgroup_sk_charge(const struct sock * sk,unsigned int nr_pages,gfp_t gfp_mask)5156 bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
5157 gfp_t gfp_mask)
5158 {
5159 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5160
5161 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5162 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
5163
5164 if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
5165 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5166 return true;
5167 }
5168
5169 return false;
5170 }
5171
5172 /**
5173 * mem_cgroup_sk_uncharge - uncharge socket memory
5174 * @sk: socket in memcg to uncharge
5175 * @nr_pages: number of pages to uncharge
5176 */
mem_cgroup_sk_uncharge(const struct sock * sk,unsigned int nr_pages)5177 void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages)
5178 {
5179 struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
5180
5181 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5182 memcg1_uncharge_skmem(memcg, nr_pages);
5183 return;
5184 }
5185
5186 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5187
5188 refill_stock(memcg, nr_pages);
5189 }
5190
mem_cgroup_flush_workqueue(void)5191 void mem_cgroup_flush_workqueue(void)
5192 {
5193 flush_workqueue(memcg_wq);
5194 }
5195
cgroup_memory(char * s)5196 static int __init cgroup_memory(char *s)
5197 {
5198 char *token;
5199
5200 while ((token = strsep(&s, ",")) != NULL) {
5201 if (!*token)
5202 continue;
5203 if (!strcmp(token, "nosocket"))
5204 cgroup_memory_nosocket = true;
5205 if (!strcmp(token, "nokmem"))
5206 cgroup_memory_nokmem = true;
5207 if (!strcmp(token, "nobpf"))
5208 cgroup_memory_nobpf = true;
5209 }
5210 return 1;
5211 }
5212 __setup("cgroup.memory=", cgroup_memory);
5213
5214 /*
5215 * Memory controller init before cgroup_init() initialize root_mem_cgroup.
5216 *
5217 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5218 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5219 * basically everything that doesn't depend on a specific mem_cgroup structure
5220 * should be initialized from here.
5221 */
mem_cgroup_init(void)5222 int __init mem_cgroup_init(void)
5223 {
5224 unsigned int memcg_size;
5225 int cpu;
5226
5227 /*
5228 * Currently s32 type (can refer to struct batched_lruvec_stat) is
5229 * used for per-memcg-per-cpu caching of per-node statistics. In order
5230 * to work fine, we should make sure that the overfill threshold can't
5231 * exceed S32_MAX / PAGE_SIZE.
5232 */
5233 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
5234
5235 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5236 memcg_hotplug_cpu_dead);
5237
5238 memcg_wq = alloc_workqueue("memcg", WQ_PERCPU, 0);
5239 WARN_ON(!memcg_wq);
5240
5241 for_each_possible_cpu(cpu) {
5242 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5243 drain_local_memcg_stock);
5244 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work,
5245 drain_local_obj_stock);
5246 }
5247
5248 memcg_size = struct_size_t(struct mem_cgroup, nodeinfo, nr_node_ids);
5249 memcg_cachep = kmem_cache_create("mem_cgroup", memcg_size, 0,
5250 SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
5251
5252 memcg_pn_cachep = KMEM_CACHE(mem_cgroup_per_node,
5253 SLAB_PANIC | SLAB_HWCACHE_ALIGN);
5254
5255 return 0;
5256 }
5257
5258 #ifdef CONFIG_SWAP
5259 /**
5260 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5261 * @folio: folio being added to swap
5262 * @entry: swap entry to charge
5263 *
5264 * Try to charge @folio's memcg for the swap space at @entry.
5265 *
5266 * Returns 0 on success, -ENOMEM on failure.
5267 */
__mem_cgroup_try_charge_swap(struct folio * folio,swp_entry_t entry)5268 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5269 {
5270 unsigned int nr_pages = folio_nr_pages(folio);
5271 struct page_counter *counter;
5272 struct mem_cgroup *memcg;
5273
5274 if (do_memsw_account())
5275 return 0;
5276
5277 memcg = folio_memcg(folio);
5278
5279 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5280 if (!memcg)
5281 return 0;
5282
5283 if (!entry.val) {
5284 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5285 return 0;
5286 }
5287
5288 memcg = mem_cgroup_private_id_get_online(memcg, nr_pages);
5289
5290 if (!mem_cgroup_is_root(memcg) &&
5291 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5292 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5293 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5294 mem_cgroup_private_id_put(memcg, nr_pages);
5295 return -ENOMEM;
5296 }
5297 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5298
5299 swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
5300
5301 return 0;
5302 }
5303
5304 /**
5305 * __mem_cgroup_uncharge_swap - uncharge swap space
5306 * @entry: swap entry to uncharge
5307 * @nr_pages: the amount of swap space to uncharge
5308 */
__mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)5309 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5310 {
5311 struct mem_cgroup *memcg;
5312 unsigned short id;
5313
5314 id = swap_cgroup_clear(entry, nr_pages);
5315 rcu_read_lock();
5316 memcg = mem_cgroup_from_private_id(id);
5317 if (memcg) {
5318 if (!mem_cgroup_is_root(memcg)) {
5319 if (do_memsw_account())
5320 page_counter_uncharge(&memcg->memsw, nr_pages);
5321 else
5322 page_counter_uncharge(&memcg->swap, nr_pages);
5323 }
5324 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5325 mem_cgroup_private_id_put(memcg, nr_pages);
5326 }
5327 rcu_read_unlock();
5328 }
5329
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)5330 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5331 {
5332 long nr_swap_pages = get_nr_swap_pages();
5333
5334 if (mem_cgroup_disabled() || do_memsw_account())
5335 return nr_swap_pages;
5336 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5337 nr_swap_pages = min_t(long, nr_swap_pages,
5338 READ_ONCE(memcg->swap.max) -
5339 page_counter_read(&memcg->swap));
5340 return nr_swap_pages;
5341 }
5342
mem_cgroup_swap_full(struct folio * folio)5343 bool mem_cgroup_swap_full(struct folio *folio)
5344 {
5345 struct mem_cgroup *memcg;
5346
5347 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5348
5349 if (vm_swap_full())
5350 return true;
5351 if (do_memsw_account())
5352 return false;
5353
5354 memcg = folio_memcg(folio);
5355 if (!memcg)
5356 return false;
5357
5358 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5359 unsigned long usage = page_counter_read(&memcg->swap);
5360
5361 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5362 usage * 2 >= READ_ONCE(memcg->swap.max))
5363 return true;
5364 }
5365
5366 return false;
5367 }
5368
setup_swap_account(char * s)5369 static int __init setup_swap_account(char *s)
5370 {
5371 bool res;
5372
5373 if (!kstrtobool(s, &res) && !res)
5374 pr_warn_once("The swapaccount=0 commandline option is deprecated "
5375 "in favor of configuring swap control via cgroupfs. "
5376 "Please report your usecase to linux-mm@kvack.org if you "
5377 "depend on this functionality.\n");
5378 return 1;
5379 }
5380 __setup("swapaccount=", setup_swap_account);
5381
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5382 static u64 swap_current_read(struct cgroup_subsys_state *css,
5383 struct cftype *cft)
5384 {
5385 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5386
5387 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5388 }
5389
swap_peak_show(struct seq_file * sf,void * v)5390 static int swap_peak_show(struct seq_file *sf, void *v)
5391 {
5392 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5393
5394 return peak_show(sf, v, &memcg->swap);
5395 }
5396
swap_peak_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5397 static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5398 size_t nbytes, loff_t off)
5399 {
5400 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5401
5402 return peak_write(of, buf, nbytes, off, &memcg->swap,
5403 &memcg->swap_peaks);
5404 }
5405
swap_high_show(struct seq_file * m,void * v)5406 static int swap_high_show(struct seq_file *m, void *v)
5407 {
5408 return seq_puts_memcg_tunable(m,
5409 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5410 }
5411
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5412 static ssize_t swap_high_write(struct kernfs_open_file *of,
5413 char *buf, size_t nbytes, loff_t off)
5414 {
5415 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5416 unsigned long high;
5417 int err;
5418
5419 buf = strstrip(buf);
5420 err = page_counter_memparse(buf, "max", &high);
5421 if (err)
5422 return err;
5423
5424 page_counter_set_high(&memcg->swap, high);
5425
5426 return nbytes;
5427 }
5428
swap_max_show(struct seq_file * m,void * v)5429 static int swap_max_show(struct seq_file *m, void *v)
5430 {
5431 return seq_puts_memcg_tunable(m,
5432 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5433 }
5434
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5435 static ssize_t swap_max_write(struct kernfs_open_file *of,
5436 char *buf, size_t nbytes, loff_t off)
5437 {
5438 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5439 unsigned long max;
5440 int err;
5441
5442 buf = strstrip(buf);
5443 err = page_counter_memparse(buf, "max", &max);
5444 if (err)
5445 return err;
5446
5447 xchg(&memcg->swap.max, max);
5448
5449 return nbytes;
5450 }
5451
swap_events_show(struct seq_file * m,void * v)5452 static int swap_events_show(struct seq_file *m, void *v)
5453 {
5454 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5455
5456 seq_printf(m, "high %lu\n",
5457 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5458 seq_printf(m, "max %lu\n",
5459 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5460 seq_printf(m, "fail %lu\n",
5461 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5462
5463 return 0;
5464 }
5465
5466 static struct cftype swap_files[] = {
5467 {
5468 .name = "swap.current",
5469 .flags = CFTYPE_NOT_ON_ROOT,
5470 .read_u64 = swap_current_read,
5471 },
5472 {
5473 .name = "swap.high",
5474 .flags = CFTYPE_NOT_ON_ROOT,
5475 .seq_show = swap_high_show,
5476 .write = swap_high_write,
5477 },
5478 {
5479 .name = "swap.max",
5480 .flags = CFTYPE_NOT_ON_ROOT,
5481 .seq_show = swap_max_show,
5482 .write = swap_max_write,
5483 },
5484 {
5485 .name = "swap.peak",
5486 .flags = CFTYPE_NOT_ON_ROOT,
5487 .open = peak_open,
5488 .release = peak_release,
5489 .seq_show = swap_peak_show,
5490 .write = swap_peak_write,
5491 },
5492 {
5493 .name = "swap.events",
5494 .flags = CFTYPE_NOT_ON_ROOT,
5495 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
5496 .seq_show = swap_events_show,
5497 },
5498 { } /* terminate */
5499 };
5500
5501 #ifdef CONFIG_ZSWAP
5502 /**
5503 * obj_cgroup_may_zswap - check if this cgroup can zswap
5504 * @objcg: the object cgroup
5505 *
5506 * Check if the hierarchical zswap limit has been reached.
5507 *
5508 * This doesn't check for specific headroom, and it is not atomic
5509 * either. But with zswap, the size of the allocation is only known
5510 * once compression has occurred, and this optimistic pre-check avoids
5511 * spending cycles on compression when there is already no room left
5512 * or zswap is disabled altogether somewhere in the hierarchy.
5513 */
obj_cgroup_may_zswap(struct obj_cgroup * objcg)5514 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5515 {
5516 struct mem_cgroup *memcg, *original_memcg;
5517 bool ret = true;
5518
5519 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5520 return true;
5521
5522 original_memcg = get_mem_cgroup_from_objcg(objcg);
5523 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5524 memcg = parent_mem_cgroup(memcg)) {
5525 unsigned long max = READ_ONCE(memcg->zswap_max);
5526 unsigned long pages;
5527
5528 if (max == PAGE_COUNTER_MAX)
5529 continue;
5530 if (max == 0) {
5531 ret = false;
5532 break;
5533 }
5534
5535 /* Force flush to get accurate stats for charging */
5536 __mem_cgroup_flush_stats(memcg, true);
5537 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5538 if (pages < max)
5539 continue;
5540 ret = false;
5541 break;
5542 }
5543 mem_cgroup_put(original_memcg);
5544 return ret;
5545 }
5546
5547 /**
5548 * obj_cgroup_charge_zswap - charge compression backend memory
5549 * @objcg: the object cgroup
5550 * @size: size of compressed object
5551 *
5552 * This forces the charge after obj_cgroup_may_zswap() allowed
5553 * compression and storage in zswap for this cgroup to go ahead.
5554 */
obj_cgroup_charge_zswap(struct obj_cgroup * objcg,size_t size)5555 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5556 {
5557 struct mem_cgroup *memcg;
5558
5559 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5560 return;
5561
5562 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5563
5564 /* PF_MEMALLOC context, charging must succeed */
5565 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5566 VM_WARN_ON_ONCE(1);
5567
5568 rcu_read_lock();
5569 memcg = obj_cgroup_memcg(objcg);
5570 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5571 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5572 if (size == PAGE_SIZE)
5573 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, 1);
5574 rcu_read_unlock();
5575 }
5576
5577 /**
5578 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5579 * @objcg: the object cgroup
5580 * @size: size of compressed object
5581 *
5582 * Uncharges zswap memory on page in.
5583 */
obj_cgroup_uncharge_zswap(struct obj_cgroup * objcg,size_t size)5584 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5585 {
5586 struct mem_cgroup *memcg;
5587
5588 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5589 return;
5590
5591 obj_cgroup_uncharge(objcg, size);
5592
5593 rcu_read_lock();
5594 memcg = obj_cgroup_memcg(objcg);
5595 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5596 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5597 if (size == PAGE_SIZE)
5598 mod_memcg_state(memcg, MEMCG_ZSWAP_INCOMP, -1);
5599 rcu_read_unlock();
5600 }
5601
mem_cgroup_zswap_writeback_enabled(struct mem_cgroup * memcg)5602 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
5603 {
5604 /* if zswap is disabled, do not block pages going to the swapping device */
5605 if (!zswap_is_enabled())
5606 return true;
5607
5608 for (; memcg; memcg = parent_mem_cgroup(memcg))
5609 if (!READ_ONCE(memcg->zswap_writeback))
5610 return false;
5611
5612 return true;
5613 }
5614
zswap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)5615 static u64 zswap_current_read(struct cgroup_subsys_state *css,
5616 struct cftype *cft)
5617 {
5618 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5619
5620 mem_cgroup_flush_stats(memcg);
5621 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5622 }
5623
zswap_max_show(struct seq_file * m,void * v)5624 static int zswap_max_show(struct seq_file *m, void *v)
5625 {
5626 return seq_puts_memcg_tunable(m,
5627 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5628 }
5629
zswap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5630 static ssize_t zswap_max_write(struct kernfs_open_file *of,
5631 char *buf, size_t nbytes, loff_t off)
5632 {
5633 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5634 unsigned long max;
5635 int err;
5636
5637 buf = strstrip(buf);
5638 err = page_counter_memparse(buf, "max", &max);
5639 if (err)
5640 return err;
5641
5642 xchg(&memcg->zswap_max, max);
5643
5644 return nbytes;
5645 }
5646
zswap_writeback_show(struct seq_file * m,void * v)5647 static int zswap_writeback_show(struct seq_file *m, void *v)
5648 {
5649 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5650
5651 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5652 return 0;
5653 }
5654
zswap_writeback_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)5655 static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5656 char *buf, size_t nbytes, loff_t off)
5657 {
5658 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5659 int zswap_writeback;
5660 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5661
5662 if (parse_ret)
5663 return parse_ret;
5664
5665 if (zswap_writeback != 0 && zswap_writeback != 1)
5666 return -EINVAL;
5667
5668 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5669 return nbytes;
5670 }
5671
5672 static struct cftype zswap_files[] = {
5673 {
5674 .name = "zswap.current",
5675 .flags = CFTYPE_NOT_ON_ROOT,
5676 .read_u64 = zswap_current_read,
5677 },
5678 {
5679 .name = "zswap.max",
5680 .flags = CFTYPE_NOT_ON_ROOT,
5681 .seq_show = zswap_max_show,
5682 .write = zswap_max_write,
5683 },
5684 {
5685 .name = "zswap.writeback",
5686 .seq_show = zswap_writeback_show,
5687 .write = zswap_writeback_write,
5688 },
5689 { } /* terminate */
5690 };
5691 #endif /* CONFIG_ZSWAP */
5692
mem_cgroup_swap_init(void)5693 static int __init mem_cgroup_swap_init(void)
5694 {
5695 if (mem_cgroup_disabled())
5696 return 0;
5697
5698 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5699 #ifdef CONFIG_MEMCG_V1
5700 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5701 #endif
5702 #ifdef CONFIG_ZSWAP
5703 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5704 #endif
5705 return 0;
5706 }
5707 subsys_initcall(mem_cgroup_swap_init);
5708
5709 #endif /* CONFIG_SWAP */
5710
mem_cgroup_node_filter_allowed(struct mem_cgroup * memcg,nodemask_t * mask)5711 void mem_cgroup_node_filter_allowed(struct mem_cgroup *memcg, nodemask_t *mask)
5712 {
5713 nodemask_t allowed;
5714
5715 if (!memcg)
5716 return;
5717
5718 /*
5719 * Since this interface is intended for use by migration paths, and
5720 * reclaim and migration are subject to race conditions such as changes
5721 * in effective_mems and hot-unpluging of nodes, inaccurate allowed
5722 * mask is acceptable.
5723 */
5724 cpuset_nodes_allowed(memcg->css.cgroup, &allowed);
5725 nodes_and(*mask, *mask, allowed);
5726 }
5727
mem_cgroup_show_protected_memory(struct mem_cgroup * memcg)5728 void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
5729 {
5730 if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5731 return;
5732
5733 if (!memcg)
5734 memcg = root_mem_cgroup;
5735
5736 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
5737 K(atomic_long_read(&memcg->memory.children_min_usage)),
5738 K(atomic_long_read(&memcg->memory.children_low_usage)));
5739 }
5740