1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef __MM_MEMCONTROL_V1_H
4 #define __MM_MEMCONTROL_V1_H
5 
6 #include <linux/cgroup-defs.h>
7 
8 /* Cgroup v1 and v2 common declarations */
9 
10 /*
11  * Iteration constructs for visiting all cgroups (under a tree).  If
12  * loops are exited prematurely (break), mem_cgroup_iter_break() must
13  * be used for reference counting.
14  */
15 #define for_each_mem_cgroup_tree(iter, root)		\
16 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
17 	     iter != NULL;				\
18 	     iter = mem_cgroup_iter(root, iter, NULL))
19 
20 #define for_each_mem_cgroup(iter)			\
21 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
22 	     iter != NULL;				\
23 	     iter = mem_cgroup_iter(NULL, iter, NULL))
24 
25 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
26 
27 void drain_all_stock(struct mem_cgroup *root_memcg);
28 
29 unsigned long memcg_events(struct mem_cgroup *memcg, int event);
30 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
31 int memory_stat_show(struct seq_file *m, void *v);
32 
33 void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
34 struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg);
35 
36 /* Cgroup v1-specific declarations */
37 #ifdef CONFIG_MEMCG_V1
38 
39 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)40 static inline bool do_memsw_account(void)
41 {
42 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
43 }
44 
45 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
46 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
47 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
48 bool memcg1_alloc_events(struct mem_cgroup *memcg);
49 void memcg1_free_events(struct mem_cgroup *memcg);
50 
51 void memcg1_memcg_init(struct mem_cgroup *memcg);
52 void memcg1_remove_from_trees(struct mem_cgroup *memcg);
53 
memcg1_soft_limit_reset(struct mem_cgroup * memcg)54 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
55 {
56 	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
57 }
58 
59 struct cgroup_taskset;
60 void memcg1_css_offline(struct mem_cgroup *memcg);
61 
62 /* for encoding cft->private value on file */
63 enum res_type {
64 	_MEM,
65 	_MEMSWAP,
66 	_KMEM,
67 	_TCP,
68 };
69 
70 bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
71 void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
72 void memcg1_oom_recover(struct mem_cgroup *memcg);
73 
74 void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
75 void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
76 			   unsigned long nr_memory, int nid);
77 
78 void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
79 
80 void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages);
memcg1_tcpmem_active(struct mem_cgroup * memcg)81 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg)
82 {
83 	return memcg->tcpmem_active;
84 }
85 bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
86 			 gfp_t gfp_mask);
memcg1_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)87 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
88 {
89 	page_counter_uncharge(&memcg->tcpmem, nr_pages);
90 }
91 
92 extern struct cftype memsw_files[];
93 extern struct cftype mem_cgroup_legacy_files[];
94 
95 #else	/* CONFIG_MEMCG_V1 */
96 
do_memsw_account(void)97 static inline bool do_memsw_account(void) { return false; }
memcg1_alloc_events(struct mem_cgroup * memcg)98 static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
memcg1_free_events(struct mem_cgroup * memcg)99 static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
100 
memcg1_memcg_init(struct mem_cgroup * memcg)101 static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
memcg1_remove_from_trees(struct mem_cgroup * memcg)102 static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
memcg1_soft_limit_reset(struct mem_cgroup * memcg)103 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}
memcg1_css_offline(struct mem_cgroup * memcg)104 static inline void memcg1_css_offline(struct mem_cgroup *memcg) {}
105 
memcg1_oom_prepare(struct mem_cgroup * memcg,bool * locked)106 static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { return true; }
memcg1_oom_finish(struct mem_cgroup * memcg,bool locked)107 static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
memcg1_oom_recover(struct mem_cgroup * memcg)108 static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
109 
memcg1_commit_charge(struct folio * folio,struct mem_cgroup * memcg)110 static inline void memcg1_commit_charge(struct folio *folio,
111 					struct mem_cgroup *memcg) {}
112 
memcg1_uncharge_batch(struct mem_cgroup * memcg,unsigned long pgpgout,unsigned long nr_memory,int nid)113 static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
114 					 unsigned long pgpgout,
115 					 unsigned long nr_memory, int nid) {}
116 
memcg1_stat_format(struct mem_cgroup * memcg,struct seq_buf * s)117 static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
118 
memcg1_account_kmem(struct mem_cgroup * memcg,int nr_pages)119 static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {}
memcg1_tcpmem_active(struct mem_cgroup * memcg)120 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) { return false; }
memcg1_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)121 static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
122 				       gfp_t gfp_mask) { return true; }
memcg1_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)123 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {}
124 
125 #endif	/* CONFIG_MEMCG_V1 */
126 
127 #endif	/* __MM_MEMCONTROL_V1_H */
128