1 #ifndef __LINUX_PAGE_CGROUP_H
2 #define __LINUX_PAGE_CGROUP_H
3 
4 enum {
5 	/* flags for mem_cgroup */
6 	PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
7 	PCG_CACHE, /* charged as cache */
8 	PCG_USED, /* this object is in use. */
9 	PCG_MIGRATION, /* under page migration */
10 	/* flags for mem_cgroup and file and I/O status */
11 	PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 	__NR_PCG_FLAGS,
14 };
15 
16 #ifndef __GENERATING_BOUNDS_H
17 #include <generated/bounds.h>
18 
19 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
20 #include <linux/bit_spinlock.h>
21 
22 /*
23  * Page Cgroup can be considered as an extended mem_map.
24  * A page_cgroup page is associated with every page descriptor. The
25  * page_cgroup helps us identify information about the cgroup
26  * All page cgroups are allocated at boot or memory hotplug event,
27  * then the page cgroup for pfn always exists.
28  */
29 struct page_cgroup {
30 	unsigned long flags;
31 	struct mem_cgroup *mem_cgroup;
32 };
33 
34 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
35 
36 #ifdef CONFIG_SPARSEMEM
page_cgroup_init_flatmem(void)37 static inline void __init page_cgroup_init_flatmem(void)
38 {
39 }
40 extern void __init page_cgroup_init(void);
41 #else
42 void __init page_cgroup_init_flatmem(void);
page_cgroup_init(void)43 static inline void __init page_cgroup_init(void)
44 {
45 }
46 #endif
47 
48 struct page_cgroup *lookup_page_cgroup(struct page *page);
49 struct page *lookup_cgroup_page(struct page_cgroup *pc);
50 
51 #define TESTPCGFLAG(uname, lname)			\
52 static inline int PageCgroup##uname(struct page_cgroup *pc)	\
53 	{ return test_bit(PCG_##lname, &pc->flags); }
54 
55 #define SETPCGFLAG(uname, lname)			\
56 static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
57 	{ set_bit(PCG_##lname, &pc->flags);  }
58 
59 #define CLEARPCGFLAG(uname, lname)			\
60 static inline void ClearPageCgroup##uname(struct page_cgroup *pc)	\
61 	{ clear_bit(PCG_##lname, &pc->flags);  }
62 
63 #define TESTCLEARPCGFLAG(uname, lname)			\
64 static inline int TestClearPageCgroup##uname(struct page_cgroup *pc)	\
65 	{ return test_and_clear_bit(PCG_##lname, &pc->flags);  }
66 
67 /* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache,CACHE)68 TESTPCGFLAG(Cache, CACHE)
69 CLEARPCGFLAG(Cache, CACHE)
70 SETPCGFLAG(Cache, CACHE)
71 
72 TESTPCGFLAG(Used, USED)
73 CLEARPCGFLAG(Used, USED)
74 SETPCGFLAG(Used, USED)
75 
76 SETPCGFLAG(FileMapped, FILE_MAPPED)
77 CLEARPCGFLAG(FileMapped, FILE_MAPPED)
78 TESTPCGFLAG(FileMapped, FILE_MAPPED)
79 
80 SETPCGFLAG(Migration, MIGRATION)
81 CLEARPCGFLAG(Migration, MIGRATION)
82 TESTPCGFLAG(Migration, MIGRATION)
83 
84 static inline void lock_page_cgroup(struct page_cgroup *pc)
85 {
86 	/*
87 	 * Don't take this lock in IRQ context.
88 	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
89 	 */
90 	bit_spin_lock(PCG_LOCK, &pc->flags);
91 }
92 
unlock_page_cgroup(struct page_cgroup * pc)93 static inline void unlock_page_cgroup(struct page_cgroup *pc)
94 {
95 	bit_spin_unlock(PCG_LOCK, &pc->flags);
96 }
97 
move_lock_page_cgroup(struct page_cgroup * pc,unsigned long * flags)98 static inline void move_lock_page_cgroup(struct page_cgroup *pc,
99 	unsigned long *flags)
100 {
101 	/*
102 	 * We know updates to pc->flags of page cache's stats are from both of
103 	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
104 	 */
105 	local_irq_save(*flags);
106 	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
107 }
108 
move_unlock_page_cgroup(struct page_cgroup * pc,unsigned long * flags)109 static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
110 	unsigned long *flags)
111 {
112 	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
113 	local_irq_restore(*flags);
114 }
115 
116 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
117 struct page_cgroup;
118 
pgdat_page_cgroup_init(struct pglist_data * pgdat)119 static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
120 {
121 }
122 
lookup_page_cgroup(struct page * page)123 static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
124 {
125 	return NULL;
126 }
127 
page_cgroup_init(void)128 static inline void page_cgroup_init(void)
129 {
130 }
131 
page_cgroup_init_flatmem(void)132 static inline void __init page_cgroup_init_flatmem(void)
133 {
134 }
135 
136 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
137 
138 #include <linux/swap.h>
139 
140 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
141 extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
142 					unsigned short old, unsigned short new);
143 extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
144 extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
145 extern int swap_cgroup_swapon(int type, unsigned long max_pages);
146 extern void swap_cgroup_swapoff(int type);
147 #else
148 
149 static inline
swap_cgroup_record(swp_entry_t ent,unsigned short id)150 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
151 {
152 	return 0;
153 }
154 
155 static inline
lookup_swap_cgroup_id(swp_entry_t ent)156 unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
157 {
158 	return 0;
159 }
160 
161 static inline int
swap_cgroup_swapon(int type,unsigned long max_pages)162 swap_cgroup_swapon(int type, unsigned long max_pages)
163 {
164 	return 0;
165 }
166 
swap_cgroup_swapoff(int type)167 static inline void swap_cgroup_swapoff(int type)
168 {
169 	return;
170 }
171 
172 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
173 
174 #endif /* !__GENERATING_BOUNDS_H */
175 
176 #endif /* __LINUX_PAGE_CGROUP_H */
177