1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 #ifndef __MM_MEMCONTROL_V1_H
4 #define __MM_MEMCONTROL_V1_H
5
6 #include <linux/cgroup-defs.h>
7
8 /* Cgroup v1 and v2 common declarations */
9
10 /*
11 * Iteration constructs for visiting all cgroups (under a tree). If
12 * loops are exited prematurely (break), mem_cgroup_iter_break() must
13 * be used for reference counting.
14 */
15 #define for_each_mem_cgroup_tree(iter, root) \
16 for (iter = mem_cgroup_iter(root, NULL, NULL); \
17 iter != NULL; \
18 iter = mem_cgroup_iter(root, iter, NULL))
19
20 #define for_each_mem_cgroup(iter) \
21 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
22 iter != NULL; \
23 iter = mem_cgroup_iter(NULL, iter, NULL))
24
25 void drain_all_stock(struct mem_cgroup *root_memcg);
26
27 unsigned long memcg_events(struct mem_cgroup *memcg, int event);
28 int memory_stat_show(struct seq_file *m, void *v);
29
30 void mem_cgroup_private_id_get_many(struct mem_cgroup *memcg, unsigned int n);
31 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg);
32
33 /* Cgroup v1-specific declarations */
34 #ifdef CONFIG_MEMCG_V1
35
36 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)37 static inline bool do_memsw_account(void)
38 {
39 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
40 }
41
42 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
43 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
44 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
45 bool memcg1_alloc_events(struct mem_cgroup *memcg);
46 void memcg1_free_events(struct mem_cgroup *memcg);
47
48 void memcg1_memcg_init(struct mem_cgroup *memcg);
49 void memcg1_remove_from_trees(struct mem_cgroup *memcg);
50
memcg1_soft_limit_reset(struct mem_cgroup * memcg)51 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
52 {
53 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
54 }
55
56 struct cgroup_taskset;
57 void memcg1_css_offline(struct mem_cgroup *memcg);
58
59 /* for encoding cft->private value on file */
60 enum res_type {
61 _MEM,
62 _MEMSWAP,
63 _KMEM,
64 _TCP,
65 };
66
67 bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
68 void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
69 void memcg1_oom_recover(struct mem_cgroup *memcg);
70
71 void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
72 void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
73 unsigned long nr_memory, int nid);
74
75 void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
76
77 void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages);
memcg1_tcpmem_active(struct mem_cgroup * memcg)78 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg)
79 {
80 return memcg->tcpmem_active;
81 }
82 bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
83 gfp_t gfp_mask);
memcg1_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)84 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
85 {
86 page_counter_uncharge(&memcg->tcpmem, nr_pages);
87 }
88
89 extern struct cftype memsw_files[];
90 extern struct cftype mem_cgroup_legacy_files[];
91
92 #else /* CONFIG_MEMCG_V1 */
93
do_memsw_account(void)94 static inline bool do_memsw_account(void) { return false; }
memcg1_alloc_events(struct mem_cgroup * memcg)95 static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
memcg1_free_events(struct mem_cgroup * memcg)96 static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
97
memcg1_memcg_init(struct mem_cgroup * memcg)98 static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
memcg1_remove_from_trees(struct mem_cgroup * memcg)99 static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
memcg1_soft_limit_reset(struct mem_cgroup * memcg)100 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
memcg1_css_offline(struct mem_cgroup * memcg)101 static inline void memcg1_css_offline(struct mem_cgroup *memcg) {}
102
memcg1_oom_prepare(struct mem_cgroup * memcg,bool * locked)103 static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { return true; }
memcg1_oom_finish(struct mem_cgroup * memcg,bool locked)104 static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
memcg1_oom_recover(struct mem_cgroup * memcg)105 static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
106
memcg1_commit_charge(struct folio * folio,struct mem_cgroup * memcg)107 static inline void memcg1_commit_charge(struct folio *folio,
108 struct mem_cgroup *memcg) {}
109
memcg1_uncharge_batch(struct mem_cgroup * memcg,unsigned long pgpgout,unsigned long nr_memory,int nid)110 static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
111 unsigned long pgpgout,
112 unsigned long nr_memory, int nid) {}
113
memcg1_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)114 static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
115
memcg1_account_kmem(struct mem_cgroup * memcg,int nr_pages)116 static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {}
memcg1_tcpmem_active(struct mem_cgroup * memcg)117 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) { return false; }
memcg1_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)118 static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
119 gfp_t gfp_mask) { return true; }
memcg1_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)120 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
121
122 #endif /* CONFIG_MEMCG_V1 */
123
124 #endif /* __MM_MEMCONTROL_V1_H */
125