1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23 
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/export.h>
37 #include <linux/mutex.h>
38 #include <linux/rbtree.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/swapops.h>
42 #include <linux/spinlock.h>
43 #include <linux/eventfd.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/vmalloc.h>
48 #include <linux/mm_inline.h>
49 #include <linux/page_cgroup.h>
50 #include <linux/cpu.h>
51 #include <linux/oom.h>
52 #include "internal.h"
53 #include <net/sock.h>
54 #include <net/tcp_memcontrol.h>
55 
56 #include <asm/uaccess.h>
57 
58 #include <trace/events/vmscan.h>
59 
60 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
61 #define MEM_CGROUP_RECLAIM_RETRIES	5
62 struct mem_cgroup *root_mem_cgroup __read_mostly;
63 
64 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
65 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66 int do_swap_account __read_mostly;
67 
68 /* for remember boot option*/
69 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
70 static int really_do_swap_account __initdata = 1;
71 #else
72 static int really_do_swap_account __initdata = 0;
73 #endif
74 
75 #else
76 #define do_swap_account		(0)
77 #endif
78 
79 
80 /*
81  * Statistics for memory cgroup.
82  */
83 enum mem_cgroup_stat_index {
84 	/*
85 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
86 	 */
87 	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
88 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
89 	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
90 	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91 	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92 	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
93 	MEM_CGROUP_STAT_NSTATS,
94 };
95 
96 enum mem_cgroup_events_index {
97 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
98 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
99 	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
100 	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
101 	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
102 	MEM_CGROUP_EVENTS_NSTATS,
103 };
104 /*
105  * Per memcg event counter is incremented at every pagein/pageout. With THP,
106  * it will be incremated by the number of pages. This counter is used for
107  * for trigger some periodic events. This is straightforward and better
108  * than using jiffies etc. to handle periodic memcg event.
109  */
110 enum mem_cgroup_events_target {
111 	MEM_CGROUP_TARGET_THRESH,
112 	MEM_CGROUP_TARGET_SOFTLIMIT,
113 	MEM_CGROUP_TARGET_NUMAINFO,
114 	MEM_CGROUP_NTARGETS,
115 };
116 #define THRESHOLDS_EVENTS_TARGET (128)
117 #define SOFTLIMIT_EVENTS_TARGET (1024)
118 #define NUMAINFO_EVENTS_TARGET	(1024)
119 
120 struct mem_cgroup_stat_cpu {
121 	long count[MEM_CGROUP_STAT_NSTATS];
122 	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
123 	unsigned long targets[MEM_CGROUP_NTARGETS];
124 };
125 
126 struct mem_cgroup_reclaim_iter {
127 	/* css_id of the last scanned hierarchy member */
128 	int position;
129 	/* scan generation, increased every round-trip */
130 	unsigned int generation;
131 };
132 
133 /*
134  * per-zone information in memory controller.
135  */
136 struct mem_cgroup_per_zone {
137 	struct lruvec		lruvec;
138 	unsigned long		count[NR_LRU_LISTS];
139 
140 	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
141 
142 	struct zone_reclaim_stat reclaim_stat;
143 	struct rb_node		tree_node;	/* RB tree node */
144 	unsigned long long	usage_in_excess;/* Set to the value by which */
145 						/* the soft limit is exceeded*/
146 	bool			on_tree;
147 	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
148 						/* use container_of	   */
149 };
150 /* Macro for accessing counter */
151 #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
152 
153 struct mem_cgroup_per_node {
154 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
155 };
156 
157 struct mem_cgroup_lru_info {
158 	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
159 };
160 
161 /*
162  * Cgroups above their limits are maintained in a RB-Tree, independent of
163  * their hierarchy representation
164  */
165 
166 struct mem_cgroup_tree_per_zone {
167 	struct rb_root rb_root;
168 	spinlock_t lock;
169 };
170 
171 struct mem_cgroup_tree_per_node {
172 	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
173 };
174 
175 struct mem_cgroup_tree {
176 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
177 };
178 
179 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
180 
181 struct mem_cgroup_threshold {
182 	struct eventfd_ctx *eventfd;
183 	u64 threshold;
184 };
185 
186 /* For threshold */
187 struct mem_cgroup_threshold_ary {
188 	/* An array index points to threshold just below usage. */
189 	int current_threshold;
190 	/* Size of entries[] */
191 	unsigned int size;
192 	/* Array of thresholds */
193 	struct mem_cgroup_threshold entries[0];
194 };
195 
196 struct mem_cgroup_thresholds {
197 	/* Primary thresholds array */
198 	struct mem_cgroup_threshold_ary *primary;
199 	/*
200 	 * Spare threshold array.
201 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
202 	 * It must be able to store at least primary->size - 1 entries.
203 	 */
204 	struct mem_cgroup_threshold_ary *spare;
205 };
206 
207 /* for OOM */
208 struct mem_cgroup_eventfd_list {
209 	struct list_head list;
210 	struct eventfd_ctx *eventfd;
211 };
212 
213 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
214 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
215 
216 /*
217  * The memory controller data structure. The memory controller controls both
218  * page cache and RSS per cgroup. We would eventually like to provide
219  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
220  * to help the administrator determine what knobs to tune.
221  *
222  * TODO: Add a water mark for the memory controller. Reclaim will begin when
223  * we hit the water mark. May be even add a low water mark, such that
224  * no reclaim occurs from a cgroup at it's low water mark, this is
225  * a feature that will be implemented much later in the future.
226  */
227 struct mem_cgroup {
228 	struct cgroup_subsys_state css;
229 	/*
230 	 * the counter to account for memory usage
231 	 */
232 	struct res_counter res;
233 
234 	union {
235 		/*
236 		 * the counter to account for mem+swap usage.
237 		 */
238 		struct res_counter memsw;
239 
240 		/*
241 		 * rcu_freeing is used only when freeing struct mem_cgroup,
242 		 * so put it into a union to avoid wasting more memory.
243 		 * It must be disjoint from the css field.  It could be
244 		 * in a union with the res field, but res plays a much
245 		 * larger part in mem_cgroup life than memsw, and might
246 		 * be of interest, even at time of free, when debugging.
247 		 * So share rcu_head with the less interesting memsw.
248 		 */
249 		struct rcu_head rcu_freeing;
250 		/*
251 		 * But when using vfree(), that cannot be done at
252 		 * interrupt time, so we must then queue the work.
253 		 */
254 		struct work_struct work_freeing;
255 	};
256 
257 	/*
258 	 * Per cgroup active and inactive list, similar to the
259 	 * per zone LRU lists.
260 	 */
261 	struct mem_cgroup_lru_info info;
262 	int last_scanned_node;
263 #if MAX_NUMNODES > 1
264 	nodemask_t	scan_nodes;
265 	atomic_t	numainfo_events;
266 	atomic_t	numainfo_updating;
267 #endif
268 	/*
269 	 * Should the accounting and control be hierarchical, per subtree?
270 	 */
271 	bool use_hierarchy;
272 
273 	bool		oom_lock;
274 	atomic_t	under_oom;
275 
276 	atomic_t	refcnt;
277 
278 	int	swappiness;
279 	/* OOM-Killer disable */
280 	int		oom_kill_disable;
281 
282 	/* set when res.limit == memsw.limit */
283 	bool		memsw_is_minimum;
284 
285 	/* protect arrays of thresholds */
286 	struct mutex thresholds_lock;
287 
288 	/* thresholds for memory usage. RCU-protected */
289 	struct mem_cgroup_thresholds thresholds;
290 
291 	/* thresholds for mem+swap usage. RCU-protected */
292 	struct mem_cgroup_thresholds memsw_thresholds;
293 
294 	/* For oom notifier event fd */
295 	struct list_head oom_notify;
296 
297 	/*
298 	 * Should we move charges of a task when a task is moved into this
299 	 * mem_cgroup ? And what type of charges should we move ?
300 	 */
301 	unsigned long 	move_charge_at_immigrate;
302 	/*
303 	 * percpu counter.
304 	 */
305 	struct mem_cgroup_stat_cpu *stat;
306 	/*
307 	 * used when a cpu is offlined or other synchronizations
308 	 * See mem_cgroup_read_stat().
309 	 */
310 	struct mem_cgroup_stat_cpu nocpu_base;
311 	spinlock_t pcp_counter_lock;
312 
313 #ifdef CONFIG_INET
314 	struct tcp_memcontrol tcp_mem;
315 #endif
316 };
317 
318 /* Stuffs for move charges at task migration. */
319 /*
320  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
321  * left-shifted bitmap of these types.
322  */
323 enum move_type {
324 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
325 	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
326 	NR_MOVE_TYPE,
327 };
328 
329 /* "mc" and its members are protected by cgroup_mutex */
330 static struct move_charge_struct {
331 	spinlock_t	  lock; /* for from, to */
332 	struct mem_cgroup *from;
333 	struct mem_cgroup *to;
334 	unsigned long precharge;
335 	unsigned long moved_charge;
336 	unsigned long moved_swap;
337 	struct task_struct *moving_task;	/* a task moving charges */
338 	wait_queue_head_t waitq;		/* a waitq for other context */
339 } mc = {
340 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
341 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
342 };
343 
move_anon(void)344 static bool move_anon(void)
345 {
346 	return test_bit(MOVE_CHARGE_TYPE_ANON,
347 					&mc.to->move_charge_at_immigrate);
348 }
349 
move_file(void)350 static bool move_file(void)
351 {
352 	return test_bit(MOVE_CHARGE_TYPE_FILE,
353 					&mc.to->move_charge_at_immigrate);
354 }
355 
356 /*
357  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
358  * limit reclaim to prevent infinite loops, if they ever occur.
359  */
360 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
361 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
362 
363 enum charge_type {
364 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
365 	MEM_CGROUP_CHARGE_TYPE_MAPPED,
366 	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
367 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
368 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
369 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
370 	NR_CHARGE_TYPE,
371 };
372 
373 /* for encoding cft->private value on file */
374 #define _MEM			(0)
375 #define _MEMSWAP		(1)
376 #define _OOM_TYPE		(2)
377 #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
378 #define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
379 #define MEMFILE_ATTR(val)	((val) & 0xffff)
380 /* Used for OOM nofiier */
381 #define OOM_CONTROL		(0)
382 
383 /*
384  * Reclaim flags for mem_cgroup_hierarchical_reclaim
385  */
386 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
387 #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
388 #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
389 #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
390 
391 static void mem_cgroup_get(struct mem_cgroup *memcg);
392 static void mem_cgroup_put(struct mem_cgroup *memcg);
393 
394 /* Writing them here to avoid exposing memcg's inner layout */
395 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
396 #include <net/sock.h>
397 #include <net/ip.h>
398 
399 static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
sock_update_memcg(struct sock * sk)400 void sock_update_memcg(struct sock *sk)
401 {
402 	if (mem_cgroup_sockets_enabled) {
403 		struct mem_cgroup *memcg;
404 
405 		BUG_ON(!sk->sk_prot->proto_cgroup);
406 
407 		/* Socket cloning can throw us here with sk_cgrp already
408 		 * filled. It won't however, necessarily happen from
409 		 * process context. So the test for root memcg given
410 		 * the current task's memcg won't help us in this case.
411 		 *
412 		 * Respecting the original socket's memcg is a better
413 		 * decision in this case.
414 		 */
415 		if (sk->sk_cgrp) {
416 			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
417 			mem_cgroup_get(sk->sk_cgrp->memcg);
418 			return;
419 		}
420 
421 		rcu_read_lock();
422 		memcg = mem_cgroup_from_task(current);
423 		if (!mem_cgroup_is_root(memcg)) {
424 			mem_cgroup_get(memcg);
425 			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
426 		}
427 		rcu_read_unlock();
428 	}
429 }
430 EXPORT_SYMBOL(sock_update_memcg);
431 
sock_release_memcg(struct sock * sk)432 void sock_release_memcg(struct sock *sk)
433 {
434 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
435 		struct mem_cgroup *memcg;
436 		WARN_ON(!sk->sk_cgrp->memcg);
437 		memcg = sk->sk_cgrp->memcg;
438 		mem_cgroup_put(memcg);
439 	}
440 }
441 
442 #ifdef CONFIG_INET
tcp_proto_cgroup(struct mem_cgroup * memcg)443 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
444 {
445 	if (!memcg || mem_cgroup_is_root(memcg))
446 		return NULL;
447 
448 	return &memcg->tcp_mem.cg_proto;
449 }
450 EXPORT_SYMBOL(tcp_proto_cgroup);
451 #endif /* CONFIG_INET */
452 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
453 
454 static void drain_all_stock_async(struct mem_cgroup *memcg);
455 
456 static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup * memcg,int nid,int zid)457 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
458 {
459 	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
460 }
461 
mem_cgroup_css(struct mem_cgroup * memcg)462 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
463 {
464 	return &memcg->css;
465 }
466 
467 static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct mem_cgroup * memcg,struct page * page)468 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
469 {
470 	int nid = page_to_nid(page);
471 	int zid = page_zonenum(page);
472 
473 	return mem_cgroup_zoneinfo(memcg, nid, zid);
474 }
475 
476 static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid,int zid)477 soft_limit_tree_node_zone(int nid, int zid)
478 {
479 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
480 }
481 
482 static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page * page)483 soft_limit_tree_from_page(struct page *page)
484 {
485 	int nid = page_to_nid(page);
486 	int zid = page_zonenum(page);
487 
488 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
489 }
490 
491 static void
__mem_cgroup_insert_exceeded(struct mem_cgroup * memcg,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz,unsigned long long new_usage_in_excess)492 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
493 				struct mem_cgroup_per_zone *mz,
494 				struct mem_cgroup_tree_per_zone *mctz,
495 				unsigned long long new_usage_in_excess)
496 {
497 	struct rb_node **p = &mctz->rb_root.rb_node;
498 	struct rb_node *parent = NULL;
499 	struct mem_cgroup_per_zone *mz_node;
500 
501 	if (mz->on_tree)
502 		return;
503 
504 	mz->usage_in_excess = new_usage_in_excess;
505 	if (!mz->usage_in_excess)
506 		return;
507 	while (*p) {
508 		parent = *p;
509 		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
510 					tree_node);
511 		if (mz->usage_in_excess < mz_node->usage_in_excess)
512 			p = &(*p)->rb_left;
513 		/*
514 		 * We can't avoid mem cgroups that are over their soft
515 		 * limit by the same amount
516 		 */
517 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
518 			p = &(*p)->rb_right;
519 	}
520 	rb_link_node(&mz->tree_node, parent, p);
521 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
522 	mz->on_tree = true;
523 }
524 
525 static void
__mem_cgroup_remove_exceeded(struct mem_cgroup * memcg,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz)526 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
527 				struct mem_cgroup_per_zone *mz,
528 				struct mem_cgroup_tree_per_zone *mctz)
529 {
530 	if (!mz->on_tree)
531 		return;
532 	rb_erase(&mz->tree_node, &mctz->rb_root);
533 	mz->on_tree = false;
534 }
535 
536 static void
mem_cgroup_remove_exceeded(struct mem_cgroup * memcg,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz)537 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
538 				struct mem_cgroup_per_zone *mz,
539 				struct mem_cgroup_tree_per_zone *mctz)
540 {
541 	spin_lock(&mctz->lock);
542 	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
543 	spin_unlock(&mctz->lock);
544 }
545 
546 
mem_cgroup_update_tree(struct mem_cgroup * memcg,struct page * page)547 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
548 {
549 	unsigned long long excess;
550 	struct mem_cgroup_per_zone *mz;
551 	struct mem_cgroup_tree_per_zone *mctz;
552 	int nid = page_to_nid(page);
553 	int zid = page_zonenum(page);
554 	mctz = soft_limit_tree_from_page(page);
555 
556 	/*
557 	 * Necessary to update all ancestors when hierarchy is used.
558 	 * because their event counter is not touched.
559 	 */
560 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
561 		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
562 		excess = res_counter_soft_limit_excess(&memcg->res);
563 		/*
564 		 * We have to update the tree if mz is on RB-tree or
565 		 * mem is over its softlimit.
566 		 */
567 		if (excess || mz->on_tree) {
568 			spin_lock(&mctz->lock);
569 			/* if on-tree, remove it */
570 			if (mz->on_tree)
571 				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
572 			/*
573 			 * Insert again. mz->usage_in_excess will be updated.
574 			 * If excess is 0, no tree ops.
575 			 */
576 			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
577 			spin_unlock(&mctz->lock);
578 		}
579 	}
580 }
581 
mem_cgroup_remove_from_trees(struct mem_cgroup * memcg)582 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
583 {
584 	int node, zone;
585 	struct mem_cgroup_per_zone *mz;
586 	struct mem_cgroup_tree_per_zone *mctz;
587 
588 	for_each_node(node) {
589 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
590 			mz = mem_cgroup_zoneinfo(memcg, node, zone);
591 			mctz = soft_limit_tree_node_zone(node, zone);
592 			mem_cgroup_remove_exceeded(memcg, mz, mctz);
593 		}
594 	}
595 }
596 
597 static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone * mctz)598 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
599 {
600 	struct rb_node *rightmost = NULL;
601 	struct mem_cgroup_per_zone *mz;
602 
603 retry:
604 	mz = NULL;
605 	rightmost = rb_last(&mctz->rb_root);
606 	if (!rightmost)
607 		goto done;		/* Nothing to reclaim from */
608 
609 	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
610 	/*
611 	 * Remove the node now but someone else can add it back,
612 	 * we will to add it back at the end of reclaim to its correct
613 	 * position in the tree.
614 	 */
615 	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
616 	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
617 		!css_tryget(&mz->mem->css))
618 		goto retry;
619 done:
620 	return mz;
621 }
622 
623 static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone * mctz)624 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
625 {
626 	struct mem_cgroup_per_zone *mz;
627 
628 	spin_lock(&mctz->lock);
629 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
630 	spin_unlock(&mctz->lock);
631 	return mz;
632 }
633 
634 /*
635  * Implementation Note: reading percpu statistics for memcg.
636  *
637  * Both of vmstat[] and percpu_counter has threshold and do periodic
638  * synchronization to implement "quick" read. There are trade-off between
639  * reading cost and precision of value. Then, we may have a chance to implement
640  * a periodic synchronizion of counter in memcg's counter.
641  *
642  * But this _read() function is used for user interface now. The user accounts
643  * memory usage by memory cgroup and he _always_ requires exact value because
644  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
645  * have to visit all online cpus and make sum. So, for now, unnecessary
646  * synchronization is not implemented. (just implemented for cpu hotplug)
647  *
648  * If there are kernel internal actions which can make use of some not-exact
649  * value, and reading all cpu value can be performance bottleneck in some
650  * common workload, threashold and synchonization as vmstat[] should be
651  * implemented.
652  */
mem_cgroup_read_stat(struct mem_cgroup * memcg,enum mem_cgroup_stat_index idx)653 static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
654 				 enum mem_cgroup_stat_index idx)
655 {
656 	long val = 0;
657 	int cpu;
658 
659 	get_online_cpus();
660 	for_each_online_cpu(cpu)
661 		val += per_cpu(memcg->stat->count[idx], cpu);
662 #ifdef CONFIG_HOTPLUG_CPU
663 	spin_lock(&memcg->pcp_counter_lock);
664 	val += memcg->nocpu_base.count[idx];
665 	spin_unlock(&memcg->pcp_counter_lock);
666 #endif
667 	put_online_cpus();
668 	return val;
669 }
670 
mem_cgroup_swap_statistics(struct mem_cgroup * memcg,bool charge)671 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
672 					 bool charge)
673 {
674 	int val = (charge) ? 1 : -1;
675 	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
676 }
677 
mem_cgroup_read_events(struct mem_cgroup * memcg,enum mem_cgroup_events_index idx)678 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
679 					    enum mem_cgroup_events_index idx)
680 {
681 	unsigned long val = 0;
682 	int cpu;
683 
684 	for_each_online_cpu(cpu)
685 		val += per_cpu(memcg->stat->events[idx], cpu);
686 #ifdef CONFIG_HOTPLUG_CPU
687 	spin_lock(&memcg->pcp_counter_lock);
688 	val += memcg->nocpu_base.events[idx];
689 	spin_unlock(&memcg->pcp_counter_lock);
690 #endif
691 	return val;
692 }
693 
mem_cgroup_charge_statistics(struct mem_cgroup * memcg,bool file,int nr_pages)694 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
695 					 bool file, int nr_pages)
696 {
697 	preempt_disable();
698 
699 	if (file)
700 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
701 				nr_pages);
702 	else
703 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
704 				nr_pages);
705 
706 	/* pagein of a big page is an event. So, ignore page size */
707 	if (nr_pages > 0)
708 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
709 	else {
710 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
711 		nr_pages = -nr_pages; /* for event */
712 	}
713 
714 	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
715 
716 	preempt_enable();
717 }
718 
719 unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup * memcg,int nid,int zid,unsigned int lru_mask)720 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
721 			unsigned int lru_mask)
722 {
723 	struct mem_cgroup_per_zone *mz;
724 	enum lru_list l;
725 	unsigned long ret = 0;
726 
727 	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
728 
729 	for_each_lru(l) {
730 		if (BIT(l) & lru_mask)
731 			ret += MEM_CGROUP_ZSTAT(mz, l);
732 	}
733 	return ret;
734 }
735 
736 static unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask)737 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
738 			int nid, unsigned int lru_mask)
739 {
740 	u64 total = 0;
741 	int zid;
742 
743 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
744 		total += mem_cgroup_zone_nr_lru_pages(memcg,
745 						nid, zid, lru_mask);
746 
747 	return total;
748 }
749 
mem_cgroup_nr_lru_pages(struct mem_cgroup * memcg,unsigned int lru_mask)750 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
751 			unsigned int lru_mask)
752 {
753 	int nid;
754 	u64 total = 0;
755 
756 	for_each_node_state(nid, N_HIGH_MEMORY)
757 		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
758 	return total;
759 }
760 
mem_cgroup_event_ratelimit(struct mem_cgroup * memcg,enum mem_cgroup_events_target target)761 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
762 				       enum mem_cgroup_events_target target)
763 {
764 	unsigned long val, next;
765 
766 	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
767 	next = __this_cpu_read(memcg->stat->targets[target]);
768 	/* from time_after() in jiffies.h */
769 	if ((long)next - (long)val < 0) {
770 		switch (target) {
771 		case MEM_CGROUP_TARGET_THRESH:
772 			next = val + THRESHOLDS_EVENTS_TARGET;
773 			break;
774 		case MEM_CGROUP_TARGET_SOFTLIMIT:
775 			next = val + SOFTLIMIT_EVENTS_TARGET;
776 			break;
777 		case MEM_CGROUP_TARGET_NUMAINFO:
778 			next = val + NUMAINFO_EVENTS_TARGET;
779 			break;
780 		default:
781 			break;
782 		}
783 		__this_cpu_write(memcg->stat->targets[target], next);
784 		return true;
785 	}
786 	return false;
787 }
788 
789 /*
790  * Check events in order.
791  *
792  */
memcg_check_events(struct mem_cgroup * memcg,struct page * page)793 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
794 {
795 	preempt_disable();
796 	/* threshold event is triggered in finer grain than soft limit */
797 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
798 						MEM_CGROUP_TARGET_THRESH))) {
799 		bool do_softlimit;
800 		bool do_numainfo __maybe_unused;
801 
802 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
803 						MEM_CGROUP_TARGET_SOFTLIMIT);
804 #if MAX_NUMNODES > 1
805 		do_numainfo = mem_cgroup_event_ratelimit(memcg,
806 						MEM_CGROUP_TARGET_NUMAINFO);
807 #endif
808 		preempt_enable();
809 
810 		mem_cgroup_threshold(memcg);
811 		if (unlikely(do_softlimit))
812 			mem_cgroup_update_tree(memcg, page);
813 #if MAX_NUMNODES > 1
814 		if (unlikely(do_numainfo))
815 			atomic_inc(&memcg->numainfo_events);
816 #endif
817 	} else
818 		preempt_enable();
819 }
820 
mem_cgroup_from_cont(struct cgroup * cont)821 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
822 {
823 	return container_of(cgroup_subsys_state(cont,
824 				mem_cgroup_subsys_id), struct mem_cgroup,
825 				css);
826 }
827 
mem_cgroup_from_task(struct task_struct * p)828 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
829 {
830 	/*
831 	 * mm_update_next_owner() may clear mm->owner to NULL
832 	 * if it races with swapoff, page migration, etc.
833 	 * So this can be called with p == NULL.
834 	 */
835 	if (unlikely(!p))
836 		return NULL;
837 
838 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
839 				struct mem_cgroup, css);
840 }
841 
try_get_mem_cgroup_from_mm(struct mm_struct * mm)842 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
843 {
844 	struct mem_cgroup *memcg = NULL;
845 
846 	if (!mm)
847 		return NULL;
848 	/*
849 	 * Because we have no locks, mm->owner's may be being moved to other
850 	 * cgroup. We use css_tryget() here even if this looks
851 	 * pessimistic (rather than adding locks here).
852 	 */
853 	rcu_read_lock();
854 	do {
855 		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
856 		if (unlikely(!memcg))
857 			break;
858 	} while (!css_tryget(&memcg->css));
859 	rcu_read_unlock();
860 	return memcg;
861 }
862 
863 /**
864  * mem_cgroup_iter - iterate over memory cgroup hierarchy
865  * @root: hierarchy root
866  * @prev: previously returned memcg, NULL on first invocation
867  * @reclaim: cookie for shared reclaim walks, NULL for full walks
868  *
869  * Returns references to children of the hierarchy below @root, or
870  * @root itself, or %NULL after a full round-trip.
871  *
872  * Caller must pass the return value in @prev on subsequent
873  * invocations for reference counting, or use mem_cgroup_iter_break()
874  * to cancel a hierarchy walk before the round-trip is complete.
875  *
876  * Reclaimers can specify a zone and a priority level in @reclaim to
877  * divide up the memcgs in the hierarchy among all concurrent
878  * reclaimers operating on the same zone and priority.
879  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)880 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
881 				   struct mem_cgroup *prev,
882 				   struct mem_cgroup_reclaim_cookie *reclaim)
883 {
884 	struct mem_cgroup *memcg = NULL;
885 	int id = 0;
886 
887 	if (mem_cgroup_disabled())
888 		return NULL;
889 
890 	if (!root)
891 		root = root_mem_cgroup;
892 
893 	if (prev && !reclaim)
894 		id = css_id(&prev->css);
895 
896 	if (prev && prev != root)
897 		css_put(&prev->css);
898 
899 	if (!root->use_hierarchy && root != root_mem_cgroup) {
900 		if (prev)
901 			return NULL;
902 		return root;
903 	}
904 
905 	while (!memcg) {
906 		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
907 		struct cgroup_subsys_state *css;
908 
909 		if (reclaim) {
910 			int nid = zone_to_nid(reclaim->zone);
911 			int zid = zone_idx(reclaim->zone);
912 			struct mem_cgroup_per_zone *mz;
913 
914 			mz = mem_cgroup_zoneinfo(root, nid, zid);
915 			iter = &mz->reclaim_iter[reclaim->priority];
916 			if (prev && reclaim->generation != iter->generation)
917 				return NULL;
918 			id = iter->position;
919 		}
920 
921 		rcu_read_lock();
922 		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
923 		if (css) {
924 			if (css == &root->css || css_tryget(css))
925 				memcg = container_of(css,
926 						     struct mem_cgroup, css);
927 		} else
928 			id = 0;
929 		rcu_read_unlock();
930 
931 		if (reclaim) {
932 			iter->position = id;
933 			if (!css)
934 				iter->generation++;
935 			else if (!prev && memcg)
936 				reclaim->generation = iter->generation;
937 		}
938 
939 		if (prev && !css)
940 			return NULL;
941 	}
942 	return memcg;
943 }
944 
945 /**
946  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
947  * @root: hierarchy root
948  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
949  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)950 void mem_cgroup_iter_break(struct mem_cgroup *root,
951 			   struct mem_cgroup *prev)
952 {
953 	if (!root)
954 		root = root_mem_cgroup;
955 	if (prev && prev != root)
956 		css_put(&prev->css);
957 }
958 
959 /*
960  * Iteration constructs for visiting all cgroups (under a tree).  If
961  * loops are exited prematurely (break), mem_cgroup_iter_break() must
962  * be used for reference counting.
963  */
964 #define for_each_mem_cgroup_tree(iter, root)		\
965 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
966 	     iter != NULL;				\
967 	     iter = mem_cgroup_iter(root, iter, NULL))
968 
969 #define for_each_mem_cgroup(iter)			\
970 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
971 	     iter != NULL;				\
972 	     iter = mem_cgroup_iter(NULL, iter, NULL))
973 
mem_cgroup_is_root(struct mem_cgroup * memcg)974 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
975 {
976 	return (memcg == root_mem_cgroup);
977 }
978 
mem_cgroup_count_vm_event(struct mm_struct * mm,enum vm_event_item idx)979 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
980 {
981 	struct mem_cgroup *memcg;
982 
983 	if (!mm)
984 		return;
985 
986 	rcu_read_lock();
987 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
988 	if (unlikely(!memcg))
989 		goto out;
990 
991 	switch (idx) {
992 	case PGFAULT:
993 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
994 		break;
995 	case PGMAJFAULT:
996 		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
997 		break;
998 	default:
999 		BUG();
1000 	}
1001 out:
1002 	rcu_read_unlock();
1003 }
1004 EXPORT_SYMBOL(mem_cgroup_count_vm_event);
1005 
1006 /**
1007  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1008  * @zone: zone of the wanted lruvec
1009  * @mem: memcg of the wanted lruvec
1010  *
1011  * Returns the lru list vector holding pages for the given @zone and
1012  * @mem.  This can be the global zone lruvec, if the memory controller
1013  * is disabled.
1014  */
mem_cgroup_zone_lruvec(struct zone * zone,struct mem_cgroup * memcg)1015 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1016 				      struct mem_cgroup *memcg)
1017 {
1018 	struct mem_cgroup_per_zone *mz;
1019 
1020 	if (mem_cgroup_disabled())
1021 		return &zone->lruvec;
1022 
1023 	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1024 	return &mz->lruvec;
1025 }
1026 
1027 /*
1028  * Following LRU functions are allowed to be used without PCG_LOCK.
1029  * Operations are called by routine of global LRU independently from memcg.
1030  * What we have to take care of here is validness of pc->mem_cgroup.
1031  *
1032  * Changes to pc->mem_cgroup happens when
1033  * 1. charge
1034  * 2. moving account
1035  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1036  * It is added to LRU before charge.
1037  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1038  * When moving account, the page is not on LRU. It's isolated.
1039  */
1040 
1041 /**
1042  * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
1043  * @zone: zone of the page
1044  * @page: the page
1045  * @lru: current lru
1046  *
1047  * This function accounts for @page being added to @lru, and returns
1048  * the lruvec for the given @zone and the memcg @page is charged to.
1049  *
1050  * The callsite is then responsible for physically linking the page to
1051  * the returned lruvec->lists[@lru].
1052  */
mem_cgroup_lru_add_list(struct zone * zone,struct page * page,enum lru_list lru)1053 struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1054 				       enum lru_list lru)
1055 {
1056 	struct mem_cgroup_per_zone *mz;
1057 	struct mem_cgroup *memcg;
1058 	struct page_cgroup *pc;
1059 
1060 	if (mem_cgroup_disabled())
1061 		return &zone->lruvec;
1062 
1063 	pc = lookup_page_cgroup(page);
1064 	memcg = pc->mem_cgroup;
1065 
1066 	/*
1067 	 * Surreptitiously switch any uncharged page to root:
1068 	 * an uncharged page off lru does nothing to secure
1069 	 * its former mem_cgroup from sudden removal.
1070 	 *
1071 	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1072 	 * under page_cgroup lock: between them, they make all uses
1073 	 * of pc->mem_cgroup safe.
1074 	 */
1075 	if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1076 		pc->mem_cgroup = memcg = root_mem_cgroup;
1077 
1078 	mz = page_cgroup_zoneinfo(memcg, page);
1079 	/* compound_order() is stabilized through lru_lock */
1080 	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1081 	return &mz->lruvec;
1082 }
1083 
1084 /**
1085  * mem_cgroup_lru_del_list - account for removing an lru page
1086  * @page: the page
1087  * @lru: target lru
1088  *
1089  * This function accounts for @page being removed from @lru.
1090  *
1091  * The callsite is then responsible for physically unlinking
1092  * @page->lru.
1093  */
mem_cgroup_lru_del_list(struct page * page,enum lru_list lru)1094 void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
1095 {
1096 	struct mem_cgroup_per_zone *mz;
1097 	struct mem_cgroup *memcg;
1098 	struct page_cgroup *pc;
1099 
1100 	if (mem_cgroup_disabled())
1101 		return;
1102 
1103 	pc = lookup_page_cgroup(page);
1104 	memcg = pc->mem_cgroup;
1105 	VM_BUG_ON(!memcg);
1106 	mz = page_cgroup_zoneinfo(memcg, page);
1107 	/* huge page split is done under lru_lock. so, we have no races. */
1108 	VM_BUG_ON(MEM_CGROUP_ZSTAT(mz, lru) < (1 << compound_order(page)));
1109 	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
1110 }
1111 
mem_cgroup_lru_del(struct page * page)1112 void mem_cgroup_lru_del(struct page *page)
1113 {
1114 	mem_cgroup_lru_del_list(page, page_lru(page));
1115 }
1116 
1117 /**
1118  * mem_cgroup_lru_move_lists - account for moving a page between lrus
1119  * @zone: zone of the page
1120  * @page: the page
1121  * @from: current lru
1122  * @to: target lru
1123  *
1124  * This function accounts for @page being moved between the lrus @from
1125  * and @to, and returns the lruvec for the given @zone and the memcg
1126  * @page is charged to.
1127  *
1128  * The callsite is then responsible for physically relinking
1129  * @page->lru to the returned lruvec->lists[@to].
1130  */
mem_cgroup_lru_move_lists(struct zone * zone,struct page * page,enum lru_list from,enum lru_list to)1131 struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
1132 					 struct page *page,
1133 					 enum lru_list from,
1134 					 enum lru_list to)
1135 {
1136 	/* XXX: Optimize this, especially for @from == @to */
1137 	mem_cgroup_lru_del_list(page, from);
1138 	return mem_cgroup_lru_add_list(zone, page, to);
1139 }
1140 
1141 /*
1142  * Checks whether given mem is same or in the root_mem_cgroup's
1143  * hierarchy subtree
1144  */
mem_cgroup_same_or_subtree(const struct mem_cgroup * root_memcg,struct mem_cgroup * memcg)1145 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1146 		struct mem_cgroup *memcg)
1147 {
1148 	if (root_memcg != memcg) {
1149 		return (root_memcg->use_hierarchy &&
1150 			css_is_ancestor(&memcg->css, &root_memcg->css));
1151 	}
1152 
1153 	return true;
1154 }
1155 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * memcg)1156 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1157 {
1158 	int ret;
1159 	struct mem_cgroup *curr = NULL;
1160 	struct task_struct *p;
1161 
1162 	p = find_lock_task_mm(task);
1163 	if (p) {
1164 		curr = try_get_mem_cgroup_from_mm(p->mm);
1165 		task_unlock(p);
1166 	} else {
1167 		/*
1168 		 * All threads may have already detached their mm's, but the oom
1169 		 * killer still needs to detect if they have already been oom
1170 		 * killed to prevent needlessly killing additional tasks.
1171 		 */
1172 		task_lock(task);
1173 		curr = mem_cgroup_from_task(task);
1174 		if (curr)
1175 			css_get(&curr->css);
1176 		task_unlock(task);
1177 	}
1178 	if (!curr)
1179 		return 0;
1180 	/*
1181 	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1182 	 * use_hierarchy of "curr" here make this function true if hierarchy is
1183 	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1184 	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1185 	 */
1186 	ret = mem_cgroup_same_or_subtree(memcg, curr);
1187 	css_put(&curr->css);
1188 	return ret;
1189 }
1190 
mem_cgroup_inactive_anon_is_low(struct mem_cgroup * memcg,struct zone * zone)1191 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
1192 {
1193 	unsigned long inactive_ratio;
1194 	int nid = zone_to_nid(zone);
1195 	int zid = zone_idx(zone);
1196 	unsigned long inactive;
1197 	unsigned long active;
1198 	unsigned long gb;
1199 
1200 	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1201 						BIT(LRU_INACTIVE_ANON));
1202 	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1203 					      BIT(LRU_ACTIVE_ANON));
1204 
1205 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1206 	if (gb)
1207 		inactive_ratio = int_sqrt(10 * gb);
1208 	else
1209 		inactive_ratio = 1;
1210 
1211 	return inactive * inactive_ratio < active;
1212 }
1213 
mem_cgroup_inactive_file_is_low(struct mem_cgroup * memcg,struct zone * zone)1214 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
1215 {
1216 	unsigned long active;
1217 	unsigned long inactive;
1218 	int zid = zone_idx(zone);
1219 	int nid = zone_to_nid(zone);
1220 
1221 	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1222 						BIT(LRU_INACTIVE_FILE));
1223 	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1224 					      BIT(LRU_ACTIVE_FILE));
1225 
1226 	return (active > inactive);
1227 }
1228 
mem_cgroup_get_reclaim_stat(struct mem_cgroup * memcg,struct zone * zone)1229 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1230 						      struct zone *zone)
1231 {
1232 	int nid = zone_to_nid(zone);
1233 	int zid = zone_idx(zone);
1234 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1235 
1236 	return &mz->reclaim_stat;
1237 }
1238 
1239 struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page * page)1240 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1241 {
1242 	struct page_cgroup *pc;
1243 	struct mem_cgroup_per_zone *mz;
1244 
1245 	if (mem_cgroup_disabled())
1246 		return NULL;
1247 
1248 	pc = lookup_page_cgroup(page);
1249 	if (!PageCgroupUsed(pc))
1250 		return NULL;
1251 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1252 	smp_rmb();
1253 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1254 	return &mz->reclaim_stat;
1255 }
1256 
1257 #define mem_cgroup_from_res_counter(counter, member)	\
1258 	container_of(counter, struct mem_cgroup, member)
1259 
1260 /**
1261  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1262  * @mem: the memory cgroup
1263  *
1264  * Returns the maximum amount of memory @mem can be charged with, in
1265  * pages.
1266  */
mem_cgroup_margin(struct mem_cgroup * memcg)1267 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1268 {
1269 	unsigned long long margin;
1270 
1271 	margin = res_counter_margin(&memcg->res);
1272 	if (do_swap_account)
1273 		margin = min(margin, res_counter_margin(&memcg->memsw));
1274 	return margin >> PAGE_SHIFT;
1275 }
1276 
mem_cgroup_swappiness(struct mem_cgroup * memcg)1277 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1278 {
1279 	struct cgroup *cgrp = memcg->css.cgroup;
1280 
1281 	/* root ? */
1282 	if (cgrp->parent == NULL)
1283 		return vm_swappiness;
1284 
1285 	return memcg->swappiness;
1286 }
1287 
mem_cgroup_start_move(struct mem_cgroup * memcg)1288 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1289 {
1290 	int cpu;
1291 
1292 	get_online_cpus();
1293 	spin_lock(&memcg->pcp_counter_lock);
1294 	for_each_online_cpu(cpu)
1295 		per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1296 	memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1297 	spin_unlock(&memcg->pcp_counter_lock);
1298 	put_online_cpus();
1299 
1300 	synchronize_rcu();
1301 }
1302 
mem_cgroup_end_move(struct mem_cgroup * memcg)1303 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1304 {
1305 	int cpu;
1306 
1307 	if (!memcg)
1308 		return;
1309 	get_online_cpus();
1310 	spin_lock(&memcg->pcp_counter_lock);
1311 	for_each_online_cpu(cpu)
1312 		per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1313 	memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1314 	spin_unlock(&memcg->pcp_counter_lock);
1315 	put_online_cpus();
1316 }
1317 /*
1318  * 2 routines for checking "mem" is under move_account() or not.
1319  *
1320  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1321  *			  for avoiding race in accounting. If true,
1322  *			  pc->mem_cgroup may be overwritten.
1323  *
1324  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1325  *			  under hierarchy of moving cgroups. This is for
1326  *			  waiting at hith-memory prressure caused by "move".
1327  */
1328 
mem_cgroup_stealed(struct mem_cgroup * memcg)1329 static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
1330 {
1331 	VM_BUG_ON(!rcu_read_lock_held());
1332 	return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1333 }
1334 
mem_cgroup_under_move(struct mem_cgroup * memcg)1335 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1336 {
1337 	struct mem_cgroup *from;
1338 	struct mem_cgroup *to;
1339 	bool ret = false;
1340 	/*
1341 	 * Unlike task_move routines, we access mc.to, mc.from not under
1342 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1343 	 */
1344 	spin_lock(&mc.lock);
1345 	from = mc.from;
1346 	to = mc.to;
1347 	if (!from)
1348 		goto unlock;
1349 
1350 	ret = mem_cgroup_same_or_subtree(memcg, from)
1351 		|| mem_cgroup_same_or_subtree(memcg, to);
1352 unlock:
1353 	spin_unlock(&mc.lock);
1354 	return ret;
1355 }
1356 
mem_cgroup_wait_acct_move(struct mem_cgroup * memcg)1357 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1358 {
1359 	if (mc.moving_task && current != mc.moving_task) {
1360 		if (mem_cgroup_under_move(memcg)) {
1361 			DEFINE_WAIT(wait);
1362 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1363 			/* moving charge context might have finished. */
1364 			if (mc.moving_task)
1365 				schedule();
1366 			finish_wait(&mc.waitq, &wait);
1367 			return true;
1368 		}
1369 	}
1370 	return false;
1371 }
1372 
1373 /**
1374  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1375  * @memcg: The memory cgroup that went over limit
1376  * @p: Task that is going to be killed
1377  *
1378  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1379  * enabled
1380  */
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)1381 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1382 {
1383 	struct cgroup *task_cgrp;
1384 	struct cgroup *mem_cgrp;
1385 	/*
1386 	 * Need a buffer in BSS, can't rely on allocations. The code relies
1387 	 * on the assumption that OOM is serialized for memory controller.
1388 	 * If this assumption is broken, revisit this code.
1389 	 */
1390 	static char memcg_name[PATH_MAX];
1391 	int ret;
1392 
1393 	if (!memcg || !p)
1394 		return;
1395 
1396 
1397 	rcu_read_lock();
1398 
1399 	mem_cgrp = memcg->css.cgroup;
1400 	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1401 
1402 	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1403 	if (ret < 0) {
1404 		/*
1405 		 * Unfortunately, we are unable to convert to a useful name
1406 		 * But we'll still print out the usage information
1407 		 */
1408 		rcu_read_unlock();
1409 		goto done;
1410 	}
1411 	rcu_read_unlock();
1412 
1413 	printk(KERN_INFO "Task in %s killed", memcg_name);
1414 
1415 	rcu_read_lock();
1416 	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1417 	if (ret < 0) {
1418 		rcu_read_unlock();
1419 		goto done;
1420 	}
1421 	rcu_read_unlock();
1422 
1423 	/*
1424 	 * Continues from above, so we don't need an KERN_ level
1425 	 */
1426 	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1427 done:
1428 
1429 	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1430 		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1431 		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1432 		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1433 	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1434 		"failcnt %llu\n",
1435 		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1436 		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1437 		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1438 }
1439 
1440 /*
1441  * This function returns the number of memcg under hierarchy tree. Returns
1442  * 1(self count) if no children.
1443  */
mem_cgroup_count_children(struct mem_cgroup * memcg)1444 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1445 {
1446 	int num = 0;
1447 	struct mem_cgroup *iter;
1448 
1449 	for_each_mem_cgroup_tree(iter, memcg)
1450 		num++;
1451 	return num;
1452 }
1453 
1454 /*
1455  * Return the memory (and swap, if configured) limit for a memcg.
1456  */
mem_cgroup_get_limit(struct mem_cgroup * memcg)1457 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1458 {
1459 	u64 limit;
1460 	u64 memsw;
1461 
1462 	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1463 	limit += total_swap_pages << PAGE_SHIFT;
1464 
1465 	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1466 	/*
1467 	 * If memsw is finite and limits the amount of swap space available
1468 	 * to this memcg, return that limit.
1469 	 */
1470 	return min(limit, memsw);
1471 }
1472 
mem_cgroup_reclaim(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned long flags)1473 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1474 					gfp_t gfp_mask,
1475 					unsigned long flags)
1476 {
1477 	unsigned long total = 0;
1478 	bool noswap = false;
1479 	int loop;
1480 
1481 	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1482 		noswap = true;
1483 	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1484 		noswap = true;
1485 
1486 	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1487 		if (loop)
1488 			drain_all_stock_async(memcg);
1489 		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1490 		/*
1491 		 * Allow limit shrinkers, which are triggered directly
1492 		 * by userspace, to catch signals and stop reclaim
1493 		 * after minimal progress, regardless of the margin.
1494 		 */
1495 		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1496 			break;
1497 		if (mem_cgroup_margin(memcg))
1498 			break;
1499 		/*
1500 		 * If nothing was reclaimed after two attempts, there
1501 		 * may be no reclaimable pages in this hierarchy.
1502 		 */
1503 		if (loop && !total)
1504 			break;
1505 	}
1506 	return total;
1507 }
1508 
1509 /**
1510  * test_mem_cgroup_node_reclaimable
1511  * @mem: the target memcg
1512  * @nid: the node ID to be checked.
1513  * @noswap : specify true here if the user wants flle only information.
1514  *
1515  * This function returns whether the specified memcg contains any
1516  * reclaimable pages on a node. Returns true if there are any reclaimable
1517  * pages in the node.
1518  */
test_mem_cgroup_node_reclaimable(struct mem_cgroup * memcg,int nid,bool noswap)1519 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1520 		int nid, bool noswap)
1521 {
1522 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1523 		return true;
1524 	if (noswap || !total_swap_pages)
1525 		return false;
1526 	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1527 		return true;
1528 	return false;
1529 
1530 }
1531 #if MAX_NUMNODES > 1
1532 
1533 /*
1534  * Always updating the nodemask is not very good - even if we have an empty
1535  * list or the wrong list here, we can start from some node and traverse all
1536  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1537  *
1538  */
mem_cgroup_may_update_nodemask(struct mem_cgroup * memcg)1539 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1540 {
1541 	int nid;
1542 	/*
1543 	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1544 	 * pagein/pageout changes since the last update.
1545 	 */
1546 	if (!atomic_read(&memcg->numainfo_events))
1547 		return;
1548 	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1549 		return;
1550 
1551 	/* make a nodemask where this memcg uses memory from */
1552 	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1553 
1554 	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1555 
1556 		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1557 			node_clear(nid, memcg->scan_nodes);
1558 	}
1559 
1560 	atomic_set(&memcg->numainfo_events, 0);
1561 	atomic_set(&memcg->numainfo_updating, 0);
1562 }
1563 
1564 /*
1565  * Selecting a node where we start reclaim from. Because what we need is just
1566  * reducing usage counter, start from anywhere is O,K. Considering
1567  * memory reclaim from current node, there are pros. and cons.
1568  *
1569  * Freeing memory from current node means freeing memory from a node which
1570  * we'll use or we've used. So, it may make LRU bad. And if several threads
1571  * hit limits, it will see a contention on a node. But freeing from remote
1572  * node means more costs for memory reclaim because of memory latency.
1573  *
1574  * Now, we use round-robin. Better algorithm is welcomed.
1575  */
mem_cgroup_select_victim_node(struct mem_cgroup * memcg)1576 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1577 {
1578 	int node;
1579 
1580 	mem_cgroup_may_update_nodemask(memcg);
1581 	node = memcg->last_scanned_node;
1582 
1583 	node = next_node(node, memcg->scan_nodes);
1584 	if (node == MAX_NUMNODES)
1585 		node = first_node(memcg->scan_nodes);
1586 	/*
1587 	 * We call this when we hit limit, not when pages are added to LRU.
1588 	 * No LRU may hold pages because all pages are UNEVICTABLE or
1589 	 * memcg is too small and all pages are not on LRU. In that case,
1590 	 * we use curret node.
1591 	 */
1592 	if (unlikely(node == MAX_NUMNODES))
1593 		node = numa_node_id();
1594 
1595 	memcg->last_scanned_node = node;
1596 	return node;
1597 }
1598 
1599 /*
1600  * Check all nodes whether it contains reclaimable pages or not.
1601  * For quick scan, we make use of scan_nodes. This will allow us to skip
1602  * unused nodes. But scan_nodes is lazily updated and may not cotain
1603  * enough new information. We need to do double check.
1604  */
mem_cgroup_reclaimable(struct mem_cgroup * memcg,bool noswap)1605 bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1606 {
1607 	int nid;
1608 
1609 	/*
1610 	 * quick check...making use of scan_node.
1611 	 * We can skip unused nodes.
1612 	 */
1613 	if (!nodes_empty(memcg->scan_nodes)) {
1614 		for (nid = first_node(memcg->scan_nodes);
1615 		     nid < MAX_NUMNODES;
1616 		     nid = next_node(nid, memcg->scan_nodes)) {
1617 
1618 			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1619 				return true;
1620 		}
1621 	}
1622 	/*
1623 	 * Check rest of nodes.
1624 	 */
1625 	for_each_node_state(nid, N_HIGH_MEMORY) {
1626 		if (node_isset(nid, memcg->scan_nodes))
1627 			continue;
1628 		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1629 			return true;
1630 	}
1631 	return false;
1632 }
1633 
1634 #else
mem_cgroup_select_victim_node(struct mem_cgroup * memcg)1635 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1636 {
1637 	return 0;
1638 }
1639 
mem_cgroup_reclaimable(struct mem_cgroup * memcg,bool noswap)1640 bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1641 {
1642 	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1643 }
1644 #endif
1645 
mem_cgroup_soft_reclaim(struct mem_cgroup * root_memcg,struct zone * zone,gfp_t gfp_mask,unsigned long * total_scanned)1646 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1647 				   struct zone *zone,
1648 				   gfp_t gfp_mask,
1649 				   unsigned long *total_scanned)
1650 {
1651 	struct mem_cgroup *victim = NULL;
1652 	int total = 0;
1653 	int loop = 0;
1654 	unsigned long excess;
1655 	unsigned long nr_scanned;
1656 	struct mem_cgroup_reclaim_cookie reclaim = {
1657 		.zone = zone,
1658 		.priority = 0,
1659 	};
1660 
1661 	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
1662 
1663 	while (1) {
1664 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1665 		if (!victim) {
1666 			loop++;
1667 			if (loop >= 2) {
1668 				/*
1669 				 * If we have not been able to reclaim
1670 				 * anything, it might because there are
1671 				 * no reclaimable pages under this hierarchy
1672 				 */
1673 				if (!total)
1674 					break;
1675 				/*
1676 				 * We want to do more targeted reclaim.
1677 				 * excess >> 2 is not to excessive so as to
1678 				 * reclaim too much, nor too less that we keep
1679 				 * coming back to reclaim from this cgroup
1680 				 */
1681 				if (total >= (excess >> 2) ||
1682 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1683 					break;
1684 			}
1685 			continue;
1686 		}
1687 		if (!mem_cgroup_reclaimable(victim, false))
1688 			continue;
1689 		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1690 						     zone, &nr_scanned);
1691 		*total_scanned += nr_scanned;
1692 		if (!res_counter_soft_limit_excess(&root_memcg->res))
1693 			break;
1694 	}
1695 	mem_cgroup_iter_break(root_memcg, victim);
1696 	return total;
1697 }
1698 
1699 /*
1700  * Check OOM-Killer is already running under our hierarchy.
1701  * If someone is running, return false.
1702  * Has to be called with memcg_oom_lock
1703  */
mem_cgroup_oom_lock(struct mem_cgroup * memcg)1704 static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
1705 {
1706 	struct mem_cgroup *iter, *failed = NULL;
1707 
1708 	for_each_mem_cgroup_tree(iter, memcg) {
1709 		if (iter->oom_lock) {
1710 			/*
1711 			 * this subtree of our hierarchy is already locked
1712 			 * so we cannot give a lock.
1713 			 */
1714 			failed = iter;
1715 			mem_cgroup_iter_break(memcg, iter);
1716 			break;
1717 		} else
1718 			iter->oom_lock = true;
1719 	}
1720 
1721 	if (!failed)
1722 		return true;
1723 
1724 	/*
1725 	 * OK, we failed to lock the whole subtree so we have to clean up
1726 	 * what we set up to the failing subtree
1727 	 */
1728 	for_each_mem_cgroup_tree(iter, memcg) {
1729 		if (iter == failed) {
1730 			mem_cgroup_iter_break(memcg, iter);
1731 			break;
1732 		}
1733 		iter->oom_lock = false;
1734 	}
1735 	return false;
1736 }
1737 
1738 /*
1739  * Has to be called with memcg_oom_lock
1740  */
mem_cgroup_oom_unlock(struct mem_cgroup * memcg)1741 static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1742 {
1743 	struct mem_cgroup *iter;
1744 
1745 	for_each_mem_cgroup_tree(iter, memcg)
1746 		iter->oom_lock = false;
1747 	return 0;
1748 }
1749 
mem_cgroup_mark_under_oom(struct mem_cgroup * memcg)1750 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1751 {
1752 	struct mem_cgroup *iter;
1753 
1754 	for_each_mem_cgroup_tree(iter, memcg)
1755 		atomic_inc(&iter->under_oom);
1756 }
1757 
mem_cgroup_unmark_under_oom(struct mem_cgroup * memcg)1758 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1759 {
1760 	struct mem_cgroup *iter;
1761 
1762 	/*
1763 	 * When a new child is created while the hierarchy is under oom,
1764 	 * mem_cgroup_oom_lock() may not be called. We have to use
1765 	 * atomic_add_unless() here.
1766 	 */
1767 	for_each_mem_cgroup_tree(iter, memcg)
1768 		atomic_add_unless(&iter->under_oom, -1, 0);
1769 }
1770 
1771 static DEFINE_SPINLOCK(memcg_oom_lock);
1772 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1773 
1774 struct oom_wait_info {
1775 	struct mem_cgroup *mem;
1776 	wait_queue_t	wait;
1777 };
1778 
memcg_oom_wake_function(wait_queue_t * wait,unsigned mode,int sync,void * arg)1779 static int memcg_oom_wake_function(wait_queue_t *wait,
1780 	unsigned mode, int sync, void *arg)
1781 {
1782 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
1783 			  *oom_wait_memcg;
1784 	struct oom_wait_info *oom_wait_info;
1785 
1786 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1787 	oom_wait_memcg = oom_wait_info->mem;
1788 
1789 	/*
1790 	 * Both of oom_wait_info->mem and wake_mem are stable under us.
1791 	 * Then we can use css_is_ancestor without taking care of RCU.
1792 	 */
1793 	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1794 		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
1795 		return 0;
1796 	return autoremove_wake_function(wait, mode, sync, arg);
1797 }
1798 
memcg_wakeup_oom(struct mem_cgroup * memcg)1799 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1800 {
1801 	/* for filtering, pass "memcg" as argument. */
1802 	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1803 }
1804 
memcg_oom_recover(struct mem_cgroup * memcg)1805 static void memcg_oom_recover(struct mem_cgroup *memcg)
1806 {
1807 	if (memcg && atomic_read(&memcg->under_oom))
1808 		memcg_wakeup_oom(memcg);
1809 }
1810 
1811 /*
1812  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1813  */
mem_cgroup_handle_oom(struct mem_cgroup * memcg,gfp_t mask)1814 bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
1815 {
1816 	struct oom_wait_info owait;
1817 	bool locked, need_to_kill;
1818 
1819 	owait.mem = memcg;
1820 	owait.wait.flags = 0;
1821 	owait.wait.func = memcg_oom_wake_function;
1822 	owait.wait.private = current;
1823 	INIT_LIST_HEAD(&owait.wait.task_list);
1824 	need_to_kill = true;
1825 	mem_cgroup_mark_under_oom(memcg);
1826 
1827 	/* At first, try to OOM lock hierarchy under memcg.*/
1828 	spin_lock(&memcg_oom_lock);
1829 	locked = mem_cgroup_oom_lock(memcg);
1830 	/*
1831 	 * Even if signal_pending(), we can't quit charge() loop without
1832 	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1833 	 * under OOM is always welcomed, use TASK_KILLABLE here.
1834 	 */
1835 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1836 	if (!locked || memcg->oom_kill_disable)
1837 		need_to_kill = false;
1838 	if (locked)
1839 		mem_cgroup_oom_notify(memcg);
1840 	spin_unlock(&memcg_oom_lock);
1841 
1842 	if (need_to_kill) {
1843 		finish_wait(&memcg_oom_waitq, &owait.wait);
1844 		mem_cgroup_out_of_memory(memcg, mask);
1845 	} else {
1846 		schedule();
1847 		finish_wait(&memcg_oom_waitq, &owait.wait);
1848 	}
1849 	spin_lock(&memcg_oom_lock);
1850 	if (locked)
1851 		mem_cgroup_oom_unlock(memcg);
1852 	memcg_wakeup_oom(memcg);
1853 	spin_unlock(&memcg_oom_lock);
1854 
1855 	mem_cgroup_unmark_under_oom(memcg);
1856 
1857 	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1858 		return false;
1859 	/* Give chance to dying process */
1860 	schedule_timeout_uninterruptible(1);
1861 	return true;
1862 }
1863 
1864 /*
1865  * Currently used to update mapped file statistics, but the routine can be
1866  * generalized to update other statistics as well.
1867  *
1868  * Notes: Race condition
1869  *
1870  * We usually use page_cgroup_lock() for accessing page_cgroup member but
1871  * it tends to be costly. But considering some conditions, we doesn't need
1872  * to do so _always_.
1873  *
1874  * Considering "charge", lock_page_cgroup() is not required because all
1875  * file-stat operations happen after a page is attached to radix-tree. There
1876  * are no race with "charge".
1877  *
1878  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1879  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1880  * if there are race with "uncharge". Statistics itself is properly handled
1881  * by flags.
1882  *
1883  * Considering "move", this is an only case we see a race. To make the race
1884  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1885  * possibility of race condition. If there is, we take a lock.
1886  */
1887 
mem_cgroup_update_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx,int val)1888 void mem_cgroup_update_page_stat(struct page *page,
1889 				 enum mem_cgroup_page_stat_item idx, int val)
1890 {
1891 	struct mem_cgroup *memcg;
1892 	struct page_cgroup *pc = lookup_page_cgroup(page);
1893 	bool need_unlock = false;
1894 	unsigned long uninitialized_var(flags);
1895 
1896 	if (mem_cgroup_disabled())
1897 		return;
1898 
1899 	rcu_read_lock();
1900 	memcg = pc->mem_cgroup;
1901 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1902 		goto out;
1903 	/* pc->mem_cgroup is unstable ? */
1904 	if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) {
1905 		/* take a lock against to access pc->mem_cgroup */
1906 		move_lock_page_cgroup(pc, &flags);
1907 		need_unlock = true;
1908 		memcg = pc->mem_cgroup;
1909 		if (!memcg || !PageCgroupUsed(pc))
1910 			goto out;
1911 	}
1912 
1913 	switch (idx) {
1914 	case MEMCG_NR_FILE_MAPPED:
1915 		if (val > 0)
1916 			SetPageCgroupFileMapped(pc);
1917 		else if (!page_mapped(page))
1918 			ClearPageCgroupFileMapped(pc);
1919 		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1920 		break;
1921 	default:
1922 		BUG();
1923 	}
1924 
1925 	this_cpu_add(memcg->stat->count[idx], val);
1926 
1927 out:
1928 	if (unlikely(need_unlock))
1929 		move_unlock_page_cgroup(pc, &flags);
1930 	rcu_read_unlock();
1931 	return;
1932 }
1933 EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1934 
1935 /*
1936  * size of first charge trial. "32" comes from vmscan.c's magic value.
1937  * TODO: maybe necessary to use big numbers in big irons.
1938  */
1939 #define CHARGE_BATCH	32U
1940 struct memcg_stock_pcp {
1941 	struct mem_cgroup *cached; /* this never be root cgroup */
1942 	unsigned int nr_pages;
1943 	struct work_struct work;
1944 	unsigned long flags;
1945 #define FLUSHING_CACHED_CHARGE	(0)
1946 };
1947 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1948 static DEFINE_MUTEX(percpu_charge_mutex);
1949 
1950 /*
1951  * Try to consume stocked charge on this cpu. If success, one page is consumed
1952  * from local stock and true is returned. If the stock is 0 or charges from a
1953  * cgroup which is not current target, returns false. This stock will be
1954  * refilled.
1955  */
consume_stock(struct mem_cgroup * memcg)1956 static bool consume_stock(struct mem_cgroup *memcg)
1957 {
1958 	struct memcg_stock_pcp *stock;
1959 	bool ret = true;
1960 
1961 	stock = &get_cpu_var(memcg_stock);
1962 	if (memcg == stock->cached && stock->nr_pages)
1963 		stock->nr_pages--;
1964 	else /* need to call res_counter_charge */
1965 		ret = false;
1966 	put_cpu_var(memcg_stock);
1967 	return ret;
1968 }
1969 
1970 /*
1971  * Returns stocks cached in percpu to res_counter and reset cached information.
1972  */
drain_stock(struct memcg_stock_pcp * stock)1973 static void drain_stock(struct memcg_stock_pcp *stock)
1974 {
1975 	struct mem_cgroup *old = stock->cached;
1976 
1977 	if (stock->nr_pages) {
1978 		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1979 
1980 		res_counter_uncharge(&old->res, bytes);
1981 		if (do_swap_account)
1982 			res_counter_uncharge(&old->memsw, bytes);
1983 		stock->nr_pages = 0;
1984 	}
1985 	stock->cached = NULL;
1986 }
1987 
1988 /*
1989  * This must be called under preempt disabled or must be called by
1990  * a thread which is pinned to local cpu.
1991  */
drain_local_stock(struct work_struct * dummy)1992 static void drain_local_stock(struct work_struct *dummy)
1993 {
1994 	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1995 	drain_stock(stock);
1996 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1997 }
1998 
1999 /*
2000  * Cache charges(val) which is from res_counter, to local per_cpu area.
2001  * This will be consumed by consume_stock() function, later.
2002  */
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2003 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2004 {
2005 	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2006 
2007 	if (stock->cached != memcg) { /* reset if necessary */
2008 		drain_stock(stock);
2009 		stock->cached = memcg;
2010 	}
2011 	stock->nr_pages += nr_pages;
2012 	put_cpu_var(memcg_stock);
2013 }
2014 
2015 /*
2016  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2017  * of the hierarchy under it. sync flag says whether we should block
2018  * until the work is done.
2019  */
drain_all_stock(struct mem_cgroup * root_memcg,bool sync)2020 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2021 {
2022 	int cpu, curcpu;
2023 
2024 	/* Notify other cpus that system-wide "drain" is running */
2025 	get_online_cpus();
2026 	curcpu = get_cpu();
2027 	for_each_online_cpu(cpu) {
2028 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2029 		struct mem_cgroup *memcg;
2030 
2031 		memcg = stock->cached;
2032 		if (!memcg || !stock->nr_pages)
2033 			continue;
2034 		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2035 			continue;
2036 		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2037 			if (cpu == curcpu)
2038 				drain_local_stock(&stock->work);
2039 			else
2040 				schedule_work_on(cpu, &stock->work);
2041 		}
2042 	}
2043 	put_cpu();
2044 
2045 	if (!sync)
2046 		goto out;
2047 
2048 	for_each_online_cpu(cpu) {
2049 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2050 		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2051 			flush_work(&stock->work);
2052 	}
2053 out:
2054  	put_online_cpus();
2055 }
2056 
2057 /*
2058  * Tries to drain stocked charges in other cpus. This function is asynchronous
2059  * and just put a work per cpu for draining localy on each cpu. Caller can
2060  * expects some charges will be back to res_counter later but cannot wait for
2061  * it.
2062  */
drain_all_stock_async(struct mem_cgroup * root_memcg)2063 static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2064 {
2065 	/*
2066 	 * If someone calls draining, avoid adding more kworker runs.
2067 	 */
2068 	if (!mutex_trylock(&percpu_charge_mutex))
2069 		return;
2070 	drain_all_stock(root_memcg, false);
2071 	mutex_unlock(&percpu_charge_mutex);
2072 }
2073 
2074 /* This is a synchronous drain interface. */
drain_all_stock_sync(struct mem_cgroup * root_memcg)2075 static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2076 {
2077 	/* called when force_empty is called */
2078 	mutex_lock(&percpu_charge_mutex);
2079 	drain_all_stock(root_memcg, true);
2080 	mutex_unlock(&percpu_charge_mutex);
2081 }
2082 
2083 /*
2084  * This function drains percpu counter value from DEAD cpu and
2085  * move it to local cpu. Note that this function can be preempted.
2086  */
mem_cgroup_drain_pcp_counter(struct mem_cgroup * memcg,int cpu)2087 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2088 {
2089 	int i;
2090 
2091 	spin_lock(&memcg->pcp_counter_lock);
2092 	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2093 		long x = per_cpu(memcg->stat->count[i], cpu);
2094 
2095 		per_cpu(memcg->stat->count[i], cpu) = 0;
2096 		memcg->nocpu_base.count[i] += x;
2097 	}
2098 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2099 		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2100 
2101 		per_cpu(memcg->stat->events[i], cpu) = 0;
2102 		memcg->nocpu_base.events[i] += x;
2103 	}
2104 	/* need to clear ON_MOVE value, works as a kind of lock. */
2105 	per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2106 	spin_unlock(&memcg->pcp_counter_lock);
2107 }
2108 
synchronize_mem_cgroup_on_move(struct mem_cgroup * memcg,int cpu)2109 static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
2110 {
2111 	int idx = MEM_CGROUP_ON_MOVE;
2112 
2113 	spin_lock(&memcg->pcp_counter_lock);
2114 	per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
2115 	spin_unlock(&memcg->pcp_counter_lock);
2116 }
2117 
memcg_cpu_hotplug_callback(struct notifier_block * nb,unsigned long action,void * hcpu)2118 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2119 					unsigned long action,
2120 					void *hcpu)
2121 {
2122 	int cpu = (unsigned long)hcpu;
2123 	struct memcg_stock_pcp *stock;
2124 	struct mem_cgroup *iter;
2125 
2126 	if ((action == CPU_ONLINE)) {
2127 		for_each_mem_cgroup(iter)
2128 			synchronize_mem_cgroup_on_move(iter, cpu);
2129 		return NOTIFY_OK;
2130 	}
2131 
2132 	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2133 		return NOTIFY_OK;
2134 
2135 	for_each_mem_cgroup(iter)
2136 		mem_cgroup_drain_pcp_counter(iter, cpu);
2137 
2138 	stock = &per_cpu(memcg_stock, cpu);
2139 	drain_stock(stock);
2140 	return NOTIFY_OK;
2141 }
2142 
2143 
2144 /* See __mem_cgroup_try_charge() for details */
2145 enum {
2146 	CHARGE_OK,		/* success */
2147 	CHARGE_RETRY,		/* need to retry but retry is not bad */
2148 	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2149 	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2150 	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
2151 };
2152 
mem_cgroup_do_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages,bool oom_check)2153 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2154 				unsigned int nr_pages, bool oom_check)
2155 {
2156 	unsigned long csize = nr_pages * PAGE_SIZE;
2157 	struct mem_cgroup *mem_over_limit;
2158 	struct res_counter *fail_res;
2159 	unsigned long flags = 0;
2160 	int ret;
2161 
2162 	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2163 
2164 	if (likely(!ret)) {
2165 		if (!do_swap_account)
2166 			return CHARGE_OK;
2167 		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2168 		if (likely(!ret))
2169 			return CHARGE_OK;
2170 
2171 		res_counter_uncharge(&memcg->res, csize);
2172 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2173 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2174 	} else
2175 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2176 	/*
2177 	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2178 	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2179 	 *
2180 	 * Never reclaim on behalf of optional batching, retry with a
2181 	 * single page instead.
2182 	 */
2183 	if (nr_pages == CHARGE_BATCH)
2184 		return CHARGE_RETRY;
2185 
2186 	if (!(gfp_mask & __GFP_WAIT))
2187 		return CHARGE_WOULDBLOCK;
2188 
2189 	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2190 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2191 		return CHARGE_RETRY;
2192 	/*
2193 	 * Even though the limit is exceeded at this point, reclaim
2194 	 * may have been able to free some pages.  Retry the charge
2195 	 * before killing the task.
2196 	 *
2197 	 * Only for regular pages, though: huge pages are rather
2198 	 * unlikely to succeed so close to the limit, and we fall back
2199 	 * to regular pages anyway in case of failure.
2200 	 */
2201 	if (nr_pages == 1 && ret)
2202 		return CHARGE_RETRY;
2203 
2204 	/*
2205 	 * At task move, charge accounts can be doubly counted. So, it's
2206 	 * better to wait until the end of task_move if something is going on.
2207 	 */
2208 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2209 		return CHARGE_RETRY;
2210 
2211 	/* If we don't need to call oom-killer at el, return immediately */
2212 	if (!oom_check)
2213 		return CHARGE_NOMEM;
2214 	/* check OOM */
2215 	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2216 		return CHARGE_OOM_DIE;
2217 
2218 	return CHARGE_RETRY;
2219 }
2220 
2221 /*
2222  * __mem_cgroup_try_charge() does
2223  * 1. detect memcg to be charged against from passed *mm and *ptr,
2224  * 2. update res_counter
2225  * 3. call memory reclaim if necessary.
2226  *
2227  * In some special case, if the task is fatal, fatal_signal_pending() or
2228  * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
2229  * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
2230  * as possible without any hazards. 2: all pages should have a valid
2231  * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
2232  * pointer, that is treated as a charge to root_mem_cgroup.
2233  *
2234  * So __mem_cgroup_try_charge() will return
2235  *  0       ...  on success, filling *ptr with a valid memcg pointer.
2236  *  -ENOMEM ...  charge failure because of resource limits.
2237  *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
2238  *
2239  * Unlike the exported interface, an "oom" parameter is added. if oom==true,
2240  * the oom-killer can be invoked.
2241  */
__mem_cgroup_try_charge(struct mm_struct * mm,gfp_t gfp_mask,unsigned int nr_pages,struct mem_cgroup ** ptr,bool oom)2242 static int __mem_cgroup_try_charge(struct mm_struct *mm,
2243 				   gfp_t gfp_mask,
2244 				   unsigned int nr_pages,
2245 				   struct mem_cgroup **ptr,
2246 				   bool oom)
2247 {
2248 	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2249 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2250 	struct mem_cgroup *memcg = NULL;
2251 	int ret;
2252 
2253 	/*
2254 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2255 	 * in system level. So, allow to go ahead dying process in addition to
2256 	 * MEMDIE process.
2257 	 */
2258 	if (unlikely(test_thread_flag(TIF_MEMDIE)
2259 		     || fatal_signal_pending(current)))
2260 		goto bypass;
2261 
2262 	/*
2263 	 * We always charge the cgroup the mm_struct belongs to.
2264 	 * The mm_struct's mem_cgroup changes on task migration if the
2265 	 * thread group leader migrates. It's possible that mm is not
2266 	 * set, if so charge the init_mm (happens for pagecache usage).
2267 	 */
2268 	if (!*ptr && !mm)
2269 		*ptr = root_mem_cgroup;
2270 again:
2271 	if (*ptr) { /* css should be a valid one */
2272 		memcg = *ptr;
2273 		VM_BUG_ON(css_is_removed(&memcg->css));
2274 		if (mem_cgroup_is_root(memcg))
2275 			goto done;
2276 		if (nr_pages == 1 && consume_stock(memcg))
2277 			goto done;
2278 		css_get(&memcg->css);
2279 	} else {
2280 		struct task_struct *p;
2281 
2282 		rcu_read_lock();
2283 		p = rcu_dereference(mm->owner);
2284 		/*
2285 		 * Because we don't have task_lock(), "p" can exit.
2286 		 * In that case, "memcg" can point to root or p can be NULL with
2287 		 * race with swapoff. Then, we have small risk of mis-accouning.
2288 		 * But such kind of mis-account by race always happens because
2289 		 * we don't have cgroup_mutex(). It's overkill and we allo that
2290 		 * small race, here.
2291 		 * (*) swapoff at el will charge against mm-struct not against
2292 		 * task-struct. So, mm->owner can be NULL.
2293 		 */
2294 		memcg = mem_cgroup_from_task(p);
2295 		if (!memcg)
2296 			memcg = root_mem_cgroup;
2297 		if (mem_cgroup_is_root(memcg)) {
2298 			rcu_read_unlock();
2299 			goto done;
2300 		}
2301 		if (nr_pages == 1 && consume_stock(memcg)) {
2302 			/*
2303 			 * It seems dagerous to access memcg without css_get().
2304 			 * But considering how consume_stok works, it's not
2305 			 * necessary. If consume_stock success, some charges
2306 			 * from this memcg are cached on this cpu. So, we
2307 			 * don't need to call css_get()/css_tryget() before
2308 			 * calling consume_stock().
2309 			 */
2310 			rcu_read_unlock();
2311 			goto done;
2312 		}
2313 		/* after here, we may be blocked. we need to get refcnt */
2314 		if (!css_tryget(&memcg->css)) {
2315 			rcu_read_unlock();
2316 			goto again;
2317 		}
2318 		rcu_read_unlock();
2319 	}
2320 
2321 	do {
2322 		bool oom_check;
2323 
2324 		/* If killed, bypass charge */
2325 		if (fatal_signal_pending(current)) {
2326 			css_put(&memcg->css);
2327 			goto bypass;
2328 		}
2329 
2330 		oom_check = false;
2331 		if (oom && !nr_oom_retries) {
2332 			oom_check = true;
2333 			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2334 		}
2335 
2336 		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2337 		switch (ret) {
2338 		case CHARGE_OK:
2339 			break;
2340 		case CHARGE_RETRY: /* not in OOM situation but retry */
2341 			batch = nr_pages;
2342 			css_put(&memcg->css);
2343 			memcg = NULL;
2344 			goto again;
2345 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2346 			css_put(&memcg->css);
2347 			goto nomem;
2348 		case CHARGE_NOMEM: /* OOM routine works */
2349 			if (!oom) {
2350 				css_put(&memcg->css);
2351 				goto nomem;
2352 			}
2353 			/* If oom, we never return -ENOMEM */
2354 			nr_oom_retries--;
2355 			break;
2356 		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2357 			css_put(&memcg->css);
2358 			goto bypass;
2359 		}
2360 	} while (ret != CHARGE_OK);
2361 
2362 	if (batch > nr_pages)
2363 		refill_stock(memcg, batch - nr_pages);
2364 	css_put(&memcg->css);
2365 done:
2366 	*ptr = memcg;
2367 	return 0;
2368 nomem:
2369 	*ptr = NULL;
2370 	return -ENOMEM;
2371 bypass:
2372 	*ptr = root_mem_cgroup;
2373 	return -EINTR;
2374 }
2375 
2376 /*
2377  * Somemtimes we have to undo a charge we got by try_charge().
2378  * This function is for that and do uncharge, put css's refcnt.
2379  * gotten by try_charge().
2380  */
__mem_cgroup_cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2381 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2382 				       unsigned int nr_pages)
2383 {
2384 	if (!mem_cgroup_is_root(memcg)) {
2385 		unsigned long bytes = nr_pages * PAGE_SIZE;
2386 
2387 		res_counter_uncharge(&memcg->res, bytes);
2388 		if (do_swap_account)
2389 			res_counter_uncharge(&memcg->memsw, bytes);
2390 	}
2391 }
2392 
2393 /*
2394  * A helper function to get mem_cgroup from ID. must be called under
2395  * rcu_read_lock(). The caller must check css_is_removed() or some if
2396  * it's concern. (dropping refcnt from swap can be called against removed
2397  * memcg.)
2398  */
mem_cgroup_lookup(unsigned short id)2399 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2400 {
2401 	struct cgroup_subsys_state *css;
2402 
2403 	/* ID 0 is unused ID */
2404 	if (!id)
2405 		return NULL;
2406 	css = css_lookup(&mem_cgroup_subsys, id);
2407 	if (!css)
2408 		return NULL;
2409 	return container_of(css, struct mem_cgroup, css);
2410 }
2411 
try_get_mem_cgroup_from_page(struct page * page)2412 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2413 {
2414 	struct mem_cgroup *memcg = NULL;
2415 	struct page_cgroup *pc;
2416 	unsigned short id;
2417 	swp_entry_t ent;
2418 
2419 	VM_BUG_ON(!PageLocked(page));
2420 
2421 	pc = lookup_page_cgroup(page);
2422 	lock_page_cgroup(pc);
2423 	if (PageCgroupUsed(pc)) {
2424 		memcg = pc->mem_cgroup;
2425 		if (memcg && !css_tryget(&memcg->css))
2426 			memcg = NULL;
2427 	} else if (PageSwapCache(page)) {
2428 		ent.val = page_private(page);
2429 		id = lookup_swap_cgroup_id(ent);
2430 		rcu_read_lock();
2431 		memcg = mem_cgroup_lookup(id);
2432 		if (memcg && !css_tryget(&memcg->css))
2433 			memcg = NULL;
2434 		rcu_read_unlock();
2435 	}
2436 	unlock_page_cgroup(pc);
2437 	return memcg;
2438 }
2439 
__mem_cgroup_commit_charge(struct mem_cgroup * memcg,struct page * page,unsigned int nr_pages,struct page_cgroup * pc,enum charge_type ctype,bool lrucare)2440 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2441 				       struct page *page,
2442 				       unsigned int nr_pages,
2443 				       struct page_cgroup *pc,
2444 				       enum charge_type ctype,
2445 				       bool lrucare)
2446 {
2447 	struct zone *uninitialized_var(zone);
2448 	bool was_on_lru = false;
2449 
2450 	lock_page_cgroup(pc);
2451 	if (unlikely(PageCgroupUsed(pc))) {
2452 		unlock_page_cgroup(pc);
2453 		__mem_cgroup_cancel_charge(memcg, nr_pages);
2454 		return;
2455 	}
2456 	/*
2457 	 * we don't need page_cgroup_lock about tail pages, becase they are not
2458 	 * accessed by any other context at this point.
2459 	 */
2460 
2461 	/*
2462 	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2463 	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2464 	 */
2465 	if (lrucare) {
2466 		zone = page_zone(page);
2467 		spin_lock_irq(&zone->lru_lock);
2468 		if (PageLRU(page)) {
2469 			ClearPageLRU(page);
2470 			del_page_from_lru_list(zone, page, page_lru(page));
2471 			was_on_lru = true;
2472 		}
2473 	}
2474 
2475 	pc->mem_cgroup = memcg;
2476 	/*
2477 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2478 	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2479 	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2480 	 * before USED bit, we need memory barrier here.
2481 	 * See mem_cgroup_add_lru_list(), etc.
2482  	 */
2483 	smp_wmb();
2484 	switch (ctype) {
2485 	case MEM_CGROUP_CHARGE_TYPE_CACHE:
2486 	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2487 		SetPageCgroupCache(pc);
2488 		SetPageCgroupUsed(pc);
2489 		break;
2490 	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2491 		ClearPageCgroupCache(pc);
2492 		SetPageCgroupUsed(pc);
2493 		break;
2494 	default:
2495 		break;
2496 	}
2497 
2498 	if (lrucare) {
2499 		if (was_on_lru) {
2500 			VM_BUG_ON(PageLRU(page));
2501 			SetPageLRU(page);
2502 			add_page_to_lru_list(zone, page, page_lru(page));
2503 		}
2504 		spin_unlock_irq(&zone->lru_lock);
2505 	}
2506 
2507 	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
2508 	unlock_page_cgroup(pc);
2509 
2510 	/*
2511 	 * "charge_statistics" updated event counter. Then, check it.
2512 	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2513 	 * if they exceeds softlimit.
2514 	 */
2515 	memcg_check_events(memcg, page);
2516 }
2517 
2518 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2519 
2520 #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2521 			(1 << PCG_MIGRATION))
2522 /*
2523  * Because tail pages are not marked as "used", set it. We're under
2524  * zone->lru_lock, 'splitting on pmd' and compound_lock.
2525  * charge/uncharge will be never happen and move_account() is done under
2526  * compound_lock(), so we don't have to take care of races.
2527  */
mem_cgroup_split_huge_fixup(struct page * head)2528 void mem_cgroup_split_huge_fixup(struct page *head)
2529 {
2530 	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2531 	struct page_cgroup *pc;
2532 	int i;
2533 
2534 	if (mem_cgroup_disabled())
2535 		return;
2536 	for (i = 1; i < HPAGE_PMD_NR; i++) {
2537 		pc = head_pc + i;
2538 		pc->mem_cgroup = head_pc->mem_cgroup;
2539 		smp_wmb();/* see __commit_charge() */
2540 		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2541 	}
2542 }
2543 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2544 
2545 /**
2546  * mem_cgroup_move_account - move account of the page
2547  * @page: the page
2548  * @nr_pages: number of regular pages (>1 for huge pages)
2549  * @pc:	page_cgroup of the page.
2550  * @from: mem_cgroup which the page is moved from.
2551  * @to:	mem_cgroup which the page is moved to. @from != @to.
2552  * @uncharge: whether we should call uncharge and css_put against @from.
2553  *
2554  * The caller must confirm following.
2555  * - page is not on LRU (isolate_page() is useful.)
2556  * - compound_lock is held when nr_pages > 1
2557  *
2558  * This function doesn't do "charge" nor css_get to new cgroup. It should be
2559  * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2560  * true, this function does "uncharge" from old cgroup, but it doesn't if
2561  * @uncharge is false, so a caller should do "uncharge".
2562  */
mem_cgroup_move_account(struct page * page,unsigned int nr_pages,struct page_cgroup * pc,struct mem_cgroup * from,struct mem_cgroup * to,bool uncharge)2563 static int mem_cgroup_move_account(struct page *page,
2564 				   unsigned int nr_pages,
2565 				   struct page_cgroup *pc,
2566 				   struct mem_cgroup *from,
2567 				   struct mem_cgroup *to,
2568 				   bool uncharge)
2569 {
2570 	unsigned long flags;
2571 	int ret;
2572 
2573 	VM_BUG_ON(from == to);
2574 	VM_BUG_ON(PageLRU(page));
2575 	/*
2576 	 * The page is isolated from LRU. So, collapse function
2577 	 * will not handle this page. But page splitting can happen.
2578 	 * Do this check under compound_page_lock(). The caller should
2579 	 * hold it.
2580 	 */
2581 	ret = -EBUSY;
2582 	if (nr_pages > 1 && !PageTransHuge(page))
2583 		goto out;
2584 
2585 	lock_page_cgroup(pc);
2586 
2587 	ret = -EINVAL;
2588 	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2589 		goto unlock;
2590 
2591 	move_lock_page_cgroup(pc, &flags);
2592 
2593 	if (PageCgroupFileMapped(pc)) {
2594 		/* Update mapped_file data for mem_cgroup */
2595 		preempt_disable();
2596 		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2597 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2598 		preempt_enable();
2599 	}
2600 	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2601 	if (uncharge)
2602 		/* This is not "cancel", but cancel_charge does all we need. */
2603 		__mem_cgroup_cancel_charge(from, nr_pages);
2604 
2605 	/* caller should have done css_get */
2606 	pc->mem_cgroup = to;
2607 	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2608 	/*
2609 	 * We charges against "to" which may not have any tasks. Then, "to"
2610 	 * can be under rmdir(). But in current implementation, caller of
2611 	 * this function is just force_empty() and move charge, so it's
2612 	 * guaranteed that "to" is never removed. So, we don't check rmdir
2613 	 * status here.
2614 	 */
2615 	move_unlock_page_cgroup(pc, &flags);
2616 	ret = 0;
2617 unlock:
2618 	unlock_page_cgroup(pc);
2619 	/*
2620 	 * check events
2621 	 */
2622 	memcg_check_events(to, page);
2623 	memcg_check_events(from, page);
2624 out:
2625 	return ret;
2626 }
2627 
2628 /*
2629  * move charges to its parent.
2630  */
2631 
mem_cgroup_move_parent(struct page * page,struct page_cgroup * pc,struct mem_cgroup * child,gfp_t gfp_mask)2632 static int mem_cgroup_move_parent(struct page *page,
2633 				  struct page_cgroup *pc,
2634 				  struct mem_cgroup *child,
2635 				  gfp_t gfp_mask)
2636 {
2637 	struct cgroup *cg = child->css.cgroup;
2638 	struct cgroup *pcg = cg->parent;
2639 	struct mem_cgroup *parent;
2640 	unsigned int nr_pages;
2641 	unsigned long uninitialized_var(flags);
2642 	int ret;
2643 
2644 	/* Is ROOT ? */
2645 	if (!pcg)
2646 		return -EINVAL;
2647 
2648 	ret = -EBUSY;
2649 	if (!get_page_unless_zero(page))
2650 		goto out;
2651 	if (isolate_lru_page(page))
2652 		goto put;
2653 
2654 	nr_pages = hpage_nr_pages(page);
2655 
2656 	parent = mem_cgroup_from_cont(pcg);
2657 	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2658 	if (ret)
2659 		goto put_back;
2660 
2661 	if (nr_pages > 1)
2662 		flags = compound_lock_irqsave(page);
2663 
2664 	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2665 	if (ret)
2666 		__mem_cgroup_cancel_charge(parent, nr_pages);
2667 
2668 	if (nr_pages > 1)
2669 		compound_unlock_irqrestore(page, flags);
2670 put_back:
2671 	putback_lru_page(page);
2672 put:
2673 	put_page(page);
2674 out:
2675 	return ret;
2676 }
2677 
2678 /*
2679  * Charge the memory controller for page usage.
2680  * Return
2681  * 0 if the charge was successful
2682  * < 0 if the cgroup is over its limit
2683  */
mem_cgroup_charge_common(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,enum charge_type ctype)2684 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2685 				gfp_t gfp_mask, enum charge_type ctype)
2686 {
2687 	struct mem_cgroup *memcg = NULL;
2688 	unsigned int nr_pages = 1;
2689 	struct page_cgroup *pc;
2690 	bool oom = true;
2691 	int ret;
2692 
2693 	if (PageTransHuge(page)) {
2694 		nr_pages <<= compound_order(page);
2695 		VM_BUG_ON(!PageTransHuge(page));
2696 		/*
2697 		 * Never OOM-kill a process for a huge page.  The
2698 		 * fault handler will fall back to regular pages.
2699 		 */
2700 		oom = false;
2701 	}
2702 
2703 	pc = lookup_page_cgroup(page);
2704 	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2705 	if (ret == -ENOMEM)
2706 		return ret;
2707 	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
2708 	return 0;
2709 }
2710 
mem_cgroup_newpage_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)2711 int mem_cgroup_newpage_charge(struct page *page,
2712 			      struct mm_struct *mm, gfp_t gfp_mask)
2713 {
2714 	if (mem_cgroup_disabled())
2715 		return 0;
2716 	VM_BUG_ON(page_mapped(page));
2717 	VM_BUG_ON(page->mapping && !PageAnon(page));
2718 	VM_BUG_ON(!mm);
2719 	return mem_cgroup_charge_common(page, mm, gfp_mask,
2720 					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2721 }
2722 
2723 static void
2724 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2725 					enum charge_type ctype);
2726 
mem_cgroup_cache_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)2727 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2728 				gfp_t gfp_mask)
2729 {
2730 	struct mem_cgroup *memcg = NULL;
2731 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
2732 	int ret;
2733 
2734 	if (mem_cgroup_disabled())
2735 		return 0;
2736 	if (PageCompound(page))
2737 		return 0;
2738 
2739 	if (unlikely(!mm))
2740 		mm = &init_mm;
2741 	if (!page_is_file_cache(page))
2742 		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2743 
2744 	if (!PageSwapCache(page))
2745 		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
2746 	else { /* page is swapcache/shmem */
2747 		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
2748 		if (!ret)
2749 			__mem_cgroup_commit_charge_swapin(page, memcg, type);
2750 	}
2751 	return ret;
2752 }
2753 
2754 /*
2755  * While swap-in, try_charge -> commit or cancel, the page is locked.
2756  * And when try_charge() successfully returns, one refcnt to memcg without
2757  * struct page_cgroup is acquired. This refcnt will be consumed by
2758  * "commit()" or removed by "cancel()"
2759  */
mem_cgroup_try_charge_swapin(struct mm_struct * mm,struct page * page,gfp_t mask,struct mem_cgroup ** memcgp)2760 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2761 				 struct page *page,
2762 				 gfp_t mask, struct mem_cgroup **memcgp)
2763 {
2764 	struct mem_cgroup *memcg;
2765 	int ret;
2766 
2767 	*memcgp = NULL;
2768 
2769 	if (mem_cgroup_disabled())
2770 		return 0;
2771 
2772 	if (!do_swap_account)
2773 		goto charge_cur_mm;
2774 	/*
2775 	 * A racing thread's fault, or swapoff, may have already updated
2776 	 * the pte, and even removed page from swap cache: in those cases
2777 	 * do_swap_page()'s pte_same() test will fail; but there's also a
2778 	 * KSM case which does need to charge the page.
2779 	 */
2780 	if (!PageSwapCache(page))
2781 		goto charge_cur_mm;
2782 	memcg = try_get_mem_cgroup_from_page(page);
2783 	if (!memcg)
2784 		goto charge_cur_mm;
2785 	*memcgp = memcg;
2786 	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
2787 	css_put(&memcg->css);
2788 	if (ret == -EINTR)
2789 		ret = 0;
2790 	return ret;
2791 charge_cur_mm:
2792 	if (unlikely(!mm))
2793 		mm = &init_mm;
2794 	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
2795 	if (ret == -EINTR)
2796 		ret = 0;
2797 	return ret;
2798 }
2799 
2800 static void
__mem_cgroup_commit_charge_swapin(struct page * page,struct mem_cgroup * memcg,enum charge_type ctype)2801 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2802 					enum charge_type ctype)
2803 {
2804 	struct page_cgroup *pc;
2805 
2806 	if (mem_cgroup_disabled())
2807 		return;
2808 	if (!memcg)
2809 		return;
2810 	cgroup_exclude_rmdir(&memcg->css);
2811 
2812 	pc = lookup_page_cgroup(page);
2813 	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
2814 	/*
2815 	 * Now swap is on-memory. This means this page may be
2816 	 * counted both as mem and swap....double count.
2817 	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2818 	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2819 	 * may call delete_from_swap_cache() before reach here.
2820 	 */
2821 	if (do_swap_account && PageSwapCache(page)) {
2822 		swp_entry_t ent = {.val = page_private(page)};
2823 		struct mem_cgroup *swap_memcg;
2824 		unsigned short id;
2825 
2826 		id = swap_cgroup_record(ent, 0);
2827 		rcu_read_lock();
2828 		swap_memcg = mem_cgroup_lookup(id);
2829 		if (swap_memcg) {
2830 			/*
2831 			 * This recorded memcg can be obsolete one. So, avoid
2832 			 * calling css_tryget
2833 			 */
2834 			if (!mem_cgroup_is_root(swap_memcg))
2835 				res_counter_uncharge(&swap_memcg->memsw,
2836 						     PAGE_SIZE);
2837 			mem_cgroup_swap_statistics(swap_memcg, false);
2838 			mem_cgroup_put(swap_memcg);
2839 		}
2840 		rcu_read_unlock();
2841 	}
2842 	/*
2843 	 * At swapin, we may charge account against cgroup which has no tasks.
2844 	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2845 	 * In that case, we need to call pre_destroy() again. check it here.
2846 	 */
2847 	cgroup_release_and_wakeup_rmdir(&memcg->css);
2848 }
2849 
mem_cgroup_commit_charge_swapin(struct page * page,struct mem_cgroup * memcg)2850 void mem_cgroup_commit_charge_swapin(struct page *page,
2851 				     struct mem_cgroup *memcg)
2852 {
2853 	__mem_cgroup_commit_charge_swapin(page, memcg,
2854 					  MEM_CGROUP_CHARGE_TYPE_MAPPED);
2855 }
2856 
mem_cgroup_cancel_charge_swapin(struct mem_cgroup * memcg)2857 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2858 {
2859 	if (mem_cgroup_disabled())
2860 		return;
2861 	if (!memcg)
2862 		return;
2863 	__mem_cgroup_cancel_charge(memcg, 1);
2864 }
2865 
mem_cgroup_do_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages,const enum charge_type ctype)2866 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2867 				   unsigned int nr_pages,
2868 				   const enum charge_type ctype)
2869 {
2870 	struct memcg_batch_info *batch = NULL;
2871 	bool uncharge_memsw = true;
2872 
2873 	/* If swapout, usage of swap doesn't decrease */
2874 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2875 		uncharge_memsw = false;
2876 
2877 	batch = &current->memcg_batch;
2878 	/*
2879 	 * In usual, we do css_get() when we remember memcg pointer.
2880 	 * But in this case, we keep res->usage until end of a series of
2881 	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2882 	 */
2883 	if (!batch->memcg)
2884 		batch->memcg = memcg;
2885 	/*
2886 	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2887 	 * In those cases, all pages freed continuously can be expected to be in
2888 	 * the same cgroup and we have chance to coalesce uncharges.
2889 	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2890 	 * because we want to do uncharge as soon as possible.
2891 	 */
2892 
2893 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2894 		goto direct_uncharge;
2895 
2896 	if (nr_pages > 1)
2897 		goto direct_uncharge;
2898 
2899 	/*
2900 	 * In typical case, batch->memcg == mem. This means we can
2901 	 * merge a series of uncharges to an uncharge of res_counter.
2902 	 * If not, we uncharge res_counter ony by one.
2903 	 */
2904 	if (batch->memcg != memcg)
2905 		goto direct_uncharge;
2906 	/* remember freed charge and uncharge it later */
2907 	batch->nr_pages++;
2908 	if (uncharge_memsw)
2909 		batch->memsw_nr_pages++;
2910 	return;
2911 direct_uncharge:
2912 	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2913 	if (uncharge_memsw)
2914 		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
2915 	if (unlikely(batch->memcg != memcg))
2916 		memcg_oom_recover(memcg);
2917 	return;
2918 }
2919 
2920 /*
2921  * uncharge if !page_mapped(page)
2922  */
2923 static struct mem_cgroup *
__mem_cgroup_uncharge_common(struct page * page,enum charge_type ctype)2924 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2925 {
2926 	struct mem_cgroup *memcg = NULL;
2927 	unsigned int nr_pages = 1;
2928 	struct page_cgroup *pc;
2929 
2930 	if (mem_cgroup_disabled())
2931 		return NULL;
2932 
2933 	if (PageSwapCache(page))
2934 		return NULL;
2935 
2936 	if (PageTransHuge(page)) {
2937 		nr_pages <<= compound_order(page);
2938 		VM_BUG_ON(!PageTransHuge(page));
2939 	}
2940 	/*
2941 	 * Check if our page_cgroup is valid
2942 	 */
2943 	pc = lookup_page_cgroup(page);
2944 	if (unlikely(!PageCgroupUsed(pc)))
2945 		return NULL;
2946 
2947 	lock_page_cgroup(pc);
2948 
2949 	memcg = pc->mem_cgroup;
2950 
2951 	if (!PageCgroupUsed(pc))
2952 		goto unlock_out;
2953 
2954 	switch (ctype) {
2955 	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2956 	case MEM_CGROUP_CHARGE_TYPE_DROP:
2957 		/* See mem_cgroup_prepare_migration() */
2958 		if (page_mapped(page) || PageCgroupMigration(pc))
2959 			goto unlock_out;
2960 		break;
2961 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2962 		if (!PageAnon(page)) {	/* Shared memory */
2963 			if (page->mapping && !page_is_file_cache(page))
2964 				goto unlock_out;
2965 		} else if (page_mapped(page)) /* Anon */
2966 				goto unlock_out;
2967 		break;
2968 	default:
2969 		break;
2970 	}
2971 
2972 	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
2973 
2974 	ClearPageCgroupUsed(pc);
2975 	/*
2976 	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2977 	 * freed from LRU. This is safe because uncharged page is expected not
2978 	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2979 	 * special functions.
2980 	 */
2981 
2982 	unlock_page_cgroup(pc);
2983 	/*
2984 	 * even after unlock, we have memcg->res.usage here and this memcg
2985 	 * will never be freed.
2986 	 */
2987 	memcg_check_events(memcg, page);
2988 	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2989 		mem_cgroup_swap_statistics(memcg, true);
2990 		mem_cgroup_get(memcg);
2991 	}
2992 	if (!mem_cgroup_is_root(memcg))
2993 		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
2994 
2995 	return memcg;
2996 
2997 unlock_out:
2998 	unlock_page_cgroup(pc);
2999 	return NULL;
3000 }
3001 
mem_cgroup_uncharge_page(struct page * page)3002 void mem_cgroup_uncharge_page(struct page *page)
3003 {
3004 	/* early check. */
3005 	if (page_mapped(page))
3006 		return;
3007 	VM_BUG_ON(page->mapping && !PageAnon(page));
3008 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3009 }
3010 
mem_cgroup_uncharge_cache_page(struct page * page)3011 void mem_cgroup_uncharge_cache_page(struct page *page)
3012 {
3013 	VM_BUG_ON(page_mapped(page));
3014 	VM_BUG_ON(page->mapping);
3015 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3016 }
3017 
3018 /*
3019  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3020  * In that cases, pages are freed continuously and we can expect pages
3021  * are in the same memcg. All these calls itself limits the number of
3022  * pages freed at once, then uncharge_start/end() is called properly.
3023  * This may be called prural(2) times in a context,
3024  */
3025 
mem_cgroup_uncharge_start(void)3026 void mem_cgroup_uncharge_start(void)
3027 {
3028 	current->memcg_batch.do_batch++;
3029 	/* We can do nest. */
3030 	if (current->memcg_batch.do_batch == 1) {
3031 		current->memcg_batch.memcg = NULL;
3032 		current->memcg_batch.nr_pages = 0;
3033 		current->memcg_batch.memsw_nr_pages = 0;
3034 	}
3035 }
3036 
mem_cgroup_uncharge_end(void)3037 void mem_cgroup_uncharge_end(void)
3038 {
3039 	struct memcg_batch_info *batch = &current->memcg_batch;
3040 
3041 	if (!batch->do_batch)
3042 		return;
3043 
3044 	batch->do_batch--;
3045 	if (batch->do_batch) /* If stacked, do nothing. */
3046 		return;
3047 
3048 	if (!batch->memcg)
3049 		return;
3050 	/*
3051 	 * This "batch->memcg" is valid without any css_get/put etc...
3052 	 * bacause we hide charges behind us.
3053 	 */
3054 	if (batch->nr_pages)
3055 		res_counter_uncharge(&batch->memcg->res,
3056 				     batch->nr_pages * PAGE_SIZE);
3057 	if (batch->memsw_nr_pages)
3058 		res_counter_uncharge(&batch->memcg->memsw,
3059 				     batch->memsw_nr_pages * PAGE_SIZE);
3060 	memcg_oom_recover(batch->memcg);
3061 	/* forget this pointer (for sanity check) */
3062 	batch->memcg = NULL;
3063 }
3064 
3065 #ifdef CONFIG_SWAP
3066 /*
3067  * called after __delete_from_swap_cache() and drop "page" account.
3068  * memcg information is recorded to swap_cgroup of "ent"
3069  */
3070 void
mem_cgroup_uncharge_swapcache(struct page * page,swp_entry_t ent,bool swapout)3071 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3072 {
3073 	struct mem_cgroup *memcg;
3074 	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3075 
3076 	if (!swapout) /* this was a swap cache but the swap is unused ! */
3077 		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3078 
3079 	memcg = __mem_cgroup_uncharge_common(page, ctype);
3080 
3081 	/*
3082 	 * record memcg information,  if swapout && memcg != NULL,
3083 	 * mem_cgroup_get() was called in uncharge().
3084 	 */
3085 	if (do_swap_account && swapout && memcg)
3086 		swap_cgroup_record(ent, css_id(&memcg->css));
3087 }
3088 #endif
3089 
3090 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3091 /*
3092  * called from swap_entry_free(). remove record in swap_cgroup and
3093  * uncharge "memsw" account.
3094  */
mem_cgroup_uncharge_swap(swp_entry_t ent)3095 void mem_cgroup_uncharge_swap(swp_entry_t ent)
3096 {
3097 	struct mem_cgroup *memcg;
3098 	unsigned short id;
3099 
3100 	if (!do_swap_account)
3101 		return;
3102 
3103 	id = swap_cgroup_record(ent, 0);
3104 	rcu_read_lock();
3105 	memcg = mem_cgroup_lookup(id);
3106 	if (memcg) {
3107 		/*
3108 		 * We uncharge this because swap is freed.
3109 		 * This memcg can be obsolete one. We avoid calling css_tryget
3110 		 */
3111 		if (!mem_cgroup_is_root(memcg))
3112 			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3113 		mem_cgroup_swap_statistics(memcg, false);
3114 		mem_cgroup_put(memcg);
3115 	}
3116 	rcu_read_unlock();
3117 }
3118 
3119 /**
3120  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3121  * @entry: swap entry to be moved
3122  * @from:  mem_cgroup which the entry is moved from
3123  * @to:  mem_cgroup which the entry is moved to
3124  * @need_fixup: whether we should fixup res_counters and refcounts.
3125  *
3126  * It succeeds only when the swap_cgroup's record for this entry is the same
3127  * as the mem_cgroup's id of @from.
3128  *
3129  * Returns 0 on success, -EINVAL on failure.
3130  *
3131  * The caller must have charged to @to, IOW, called res_counter_charge() about
3132  * both res and memsw, and called css_get().
3133  */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to,bool need_fixup)3134 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3135 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3136 {
3137 	unsigned short old_id, new_id;
3138 
3139 	old_id = css_id(&from->css);
3140 	new_id = css_id(&to->css);
3141 
3142 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3143 		mem_cgroup_swap_statistics(from, false);
3144 		mem_cgroup_swap_statistics(to, true);
3145 		/*
3146 		 * This function is only called from task migration context now.
3147 		 * It postpones res_counter and refcount handling till the end
3148 		 * of task migration(mem_cgroup_clear_mc()) for performance
3149 		 * improvement. But we cannot postpone mem_cgroup_get(to)
3150 		 * because if the process that has been moved to @to does
3151 		 * swap-in, the refcount of @to might be decreased to 0.
3152 		 */
3153 		mem_cgroup_get(to);
3154 		if (need_fixup) {
3155 			if (!mem_cgroup_is_root(from))
3156 				res_counter_uncharge(&from->memsw, PAGE_SIZE);
3157 			mem_cgroup_put(from);
3158 			/*
3159 			 * we charged both to->res and to->memsw, so we should
3160 			 * uncharge to->res.
3161 			 */
3162 			if (!mem_cgroup_is_root(to))
3163 				res_counter_uncharge(&to->res, PAGE_SIZE);
3164 		}
3165 		return 0;
3166 	}
3167 	return -EINVAL;
3168 }
3169 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to,bool need_fixup)3170 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3171 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3172 {
3173 	return -EINVAL;
3174 }
3175 #endif
3176 
3177 /*
3178  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3179  * page belongs to.
3180  */
mem_cgroup_prepare_migration(struct page * page,struct page * newpage,struct mem_cgroup ** memcgp,gfp_t gfp_mask)3181 int mem_cgroup_prepare_migration(struct page *page,
3182 	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
3183 {
3184 	struct mem_cgroup *memcg = NULL;
3185 	struct page_cgroup *pc;
3186 	enum charge_type ctype;
3187 	int ret = 0;
3188 
3189 	*memcgp = NULL;
3190 
3191 	VM_BUG_ON(PageTransHuge(page));
3192 	if (mem_cgroup_disabled())
3193 		return 0;
3194 
3195 	pc = lookup_page_cgroup(page);
3196 	lock_page_cgroup(pc);
3197 	if (PageCgroupUsed(pc)) {
3198 		memcg = pc->mem_cgroup;
3199 		css_get(&memcg->css);
3200 		/*
3201 		 * At migrating an anonymous page, its mapcount goes down
3202 		 * to 0 and uncharge() will be called. But, even if it's fully
3203 		 * unmapped, migration may fail and this page has to be
3204 		 * charged again. We set MIGRATION flag here and delay uncharge
3205 		 * until end_migration() is called
3206 		 *
3207 		 * Corner Case Thinking
3208 		 * A)
3209 		 * When the old page was mapped as Anon and it's unmap-and-freed
3210 		 * while migration was ongoing.
3211 		 * If unmap finds the old page, uncharge() of it will be delayed
3212 		 * until end_migration(). If unmap finds a new page, it's
3213 		 * uncharged when it make mapcount to be 1->0. If unmap code
3214 		 * finds swap_migration_entry, the new page will not be mapped
3215 		 * and end_migration() will find it(mapcount==0).
3216 		 *
3217 		 * B)
3218 		 * When the old page was mapped but migraion fails, the kernel
3219 		 * remaps it. A charge for it is kept by MIGRATION flag even
3220 		 * if mapcount goes down to 0. We can do remap successfully
3221 		 * without charging it again.
3222 		 *
3223 		 * C)
3224 		 * The "old" page is under lock_page() until the end of
3225 		 * migration, so, the old page itself will not be swapped-out.
3226 		 * If the new page is swapped out before end_migraton, our
3227 		 * hook to usual swap-out path will catch the event.
3228 		 */
3229 		if (PageAnon(page))
3230 			SetPageCgroupMigration(pc);
3231 	}
3232 	unlock_page_cgroup(pc);
3233 	/*
3234 	 * If the page is not charged at this point,
3235 	 * we return here.
3236 	 */
3237 	if (!memcg)
3238 		return 0;
3239 
3240 	*memcgp = memcg;
3241 	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
3242 	css_put(&memcg->css);/* drop extra refcnt */
3243 	if (ret) {
3244 		if (PageAnon(page)) {
3245 			lock_page_cgroup(pc);
3246 			ClearPageCgroupMigration(pc);
3247 			unlock_page_cgroup(pc);
3248 			/*
3249 			 * The old page may be fully unmapped while we kept it.
3250 			 */
3251 			mem_cgroup_uncharge_page(page);
3252 		}
3253 		/* we'll need to revisit this error code (we have -EINTR) */
3254 		return -ENOMEM;
3255 	}
3256 	/*
3257 	 * We charge new page before it's used/mapped. So, even if unlock_page()
3258 	 * is called before end_migration, we can catch all events on this new
3259 	 * page. In the case new page is migrated but not remapped, new page's
3260 	 * mapcount will be finally 0 and we call uncharge in end_migration().
3261 	 */
3262 	pc = lookup_page_cgroup(newpage);
3263 	if (PageAnon(page))
3264 		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3265 	else if (page_is_file_cache(page))
3266 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3267 	else
3268 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3269 	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
3270 	return ret;
3271 }
3272 
3273 /* remove redundant charge if migration failed*/
mem_cgroup_end_migration(struct mem_cgroup * memcg,struct page * oldpage,struct page * newpage,bool migration_ok)3274 void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3275 	struct page *oldpage, struct page *newpage, bool migration_ok)
3276 {
3277 	struct page *used, *unused;
3278 	struct page_cgroup *pc;
3279 
3280 	if (!memcg)
3281 		return;
3282 	/* blocks rmdir() */
3283 	cgroup_exclude_rmdir(&memcg->css);
3284 	if (!migration_ok) {
3285 		used = oldpage;
3286 		unused = newpage;
3287 	} else {
3288 		used = newpage;
3289 		unused = oldpage;
3290 	}
3291 	/*
3292 	 * We disallowed uncharge of pages under migration because mapcount
3293 	 * of the page goes down to zero, temporarly.
3294 	 * Clear the flag and check the page should be charged.
3295 	 */
3296 	pc = lookup_page_cgroup(oldpage);
3297 	lock_page_cgroup(pc);
3298 	ClearPageCgroupMigration(pc);
3299 	unlock_page_cgroup(pc);
3300 
3301 	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3302 
3303 	/*
3304 	 * If a page is a file cache, radix-tree replacement is very atomic
3305 	 * and we can skip this check. When it was an Anon page, its mapcount
3306 	 * goes down to 0. But because we added MIGRATION flage, it's not
3307 	 * uncharged yet. There are several case but page->mapcount check
3308 	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3309 	 * check. (see prepare_charge() also)
3310 	 */
3311 	if (PageAnon(used))
3312 		mem_cgroup_uncharge_page(used);
3313 	/*
3314 	 * At migration, we may charge account against cgroup which has no
3315 	 * tasks.
3316 	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3317 	 * In that case, we need to call pre_destroy() again. check it here.
3318 	 */
3319 	cgroup_release_and_wakeup_rmdir(&memcg->css);
3320 }
3321 
3322 /*
3323  * At replace page cache, newpage is not under any memcg but it's on
3324  * LRU. So, this function doesn't touch res_counter but handles LRU
3325  * in correct way. Both pages are locked so we cannot race with uncharge.
3326  */
mem_cgroup_replace_page_cache(struct page * oldpage,struct page * newpage)3327 void mem_cgroup_replace_page_cache(struct page *oldpage,
3328 				  struct page *newpage)
3329 {
3330 	struct mem_cgroup *memcg;
3331 	struct page_cgroup *pc;
3332 	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3333 
3334 	if (mem_cgroup_disabled())
3335 		return;
3336 
3337 	pc = lookup_page_cgroup(oldpage);
3338 	/* fix accounting on old pages */
3339 	lock_page_cgroup(pc);
3340 	memcg = pc->mem_cgroup;
3341 	mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
3342 	ClearPageCgroupUsed(pc);
3343 	unlock_page_cgroup(pc);
3344 
3345 	if (PageSwapBacked(oldpage))
3346 		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3347 
3348 	/*
3349 	 * Even if newpage->mapping was NULL before starting replacement,
3350 	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3351 	 * LRU while we overwrite pc->mem_cgroup.
3352 	 */
3353 	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
3354 }
3355 
3356 #ifdef CONFIG_DEBUG_VM
lookup_page_cgroup_used(struct page * page)3357 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3358 {
3359 	struct page_cgroup *pc;
3360 
3361 	pc = lookup_page_cgroup(page);
3362 	/*
3363 	 * Can be NULL while feeding pages into the page allocator for
3364 	 * the first time, i.e. during boot or memory hotplug;
3365 	 * or when mem_cgroup_disabled().
3366 	 */
3367 	if (likely(pc) && PageCgroupUsed(pc))
3368 		return pc;
3369 	return NULL;
3370 }
3371 
mem_cgroup_bad_page_check(struct page * page)3372 bool mem_cgroup_bad_page_check(struct page *page)
3373 {
3374 	if (mem_cgroup_disabled())
3375 		return false;
3376 
3377 	return lookup_page_cgroup_used(page) != NULL;
3378 }
3379 
mem_cgroup_print_bad_page(struct page * page)3380 void mem_cgroup_print_bad_page(struct page *page)
3381 {
3382 	struct page_cgroup *pc;
3383 
3384 	pc = lookup_page_cgroup_used(page);
3385 	if (pc) {
3386 		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3387 		       pc, pc->flags, pc->mem_cgroup);
3388 	}
3389 }
3390 #endif
3391 
3392 static DEFINE_MUTEX(set_limit_mutex);
3393 
mem_cgroup_resize_limit(struct mem_cgroup * memcg,unsigned long long val)3394 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3395 				unsigned long long val)
3396 {
3397 	int retry_count;
3398 	u64 memswlimit, memlimit;
3399 	int ret = 0;
3400 	int children = mem_cgroup_count_children(memcg);
3401 	u64 curusage, oldusage;
3402 	int enlarge;
3403 
3404 	/*
3405 	 * For keeping hierarchical_reclaim simple, how long we should retry
3406 	 * is depends on callers. We set our retry-count to be function
3407 	 * of # of children which we should visit in this loop.
3408 	 */
3409 	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3410 
3411 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3412 
3413 	enlarge = 0;
3414 	while (retry_count) {
3415 		if (signal_pending(current)) {
3416 			ret = -EINTR;
3417 			break;
3418 		}
3419 		/*
3420 		 * Rather than hide all in some function, I do this in
3421 		 * open coded manner. You see what this really does.
3422 		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3423 		 */
3424 		mutex_lock(&set_limit_mutex);
3425 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3426 		if (memswlimit < val) {
3427 			ret = -EINVAL;
3428 			mutex_unlock(&set_limit_mutex);
3429 			break;
3430 		}
3431 
3432 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3433 		if (memlimit < val)
3434 			enlarge = 1;
3435 
3436 		ret = res_counter_set_limit(&memcg->res, val);
3437 		if (!ret) {
3438 			if (memswlimit == val)
3439 				memcg->memsw_is_minimum = true;
3440 			else
3441 				memcg->memsw_is_minimum = false;
3442 		}
3443 		mutex_unlock(&set_limit_mutex);
3444 
3445 		if (!ret)
3446 			break;
3447 
3448 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3449 				   MEM_CGROUP_RECLAIM_SHRINK);
3450 		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3451 		/* Usage is reduced ? */
3452   		if (curusage >= oldusage)
3453 			retry_count--;
3454 		else
3455 			oldusage = curusage;
3456 	}
3457 	if (!ret && enlarge)
3458 		memcg_oom_recover(memcg);
3459 
3460 	return ret;
3461 }
3462 
mem_cgroup_resize_memsw_limit(struct mem_cgroup * memcg,unsigned long long val)3463 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3464 					unsigned long long val)
3465 {
3466 	int retry_count;
3467 	u64 memlimit, memswlimit, oldusage, curusage;
3468 	int children = mem_cgroup_count_children(memcg);
3469 	int ret = -EBUSY;
3470 	int enlarge = 0;
3471 
3472 	/* see mem_cgroup_resize_res_limit */
3473  	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3474 	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3475 	while (retry_count) {
3476 		if (signal_pending(current)) {
3477 			ret = -EINTR;
3478 			break;
3479 		}
3480 		/*
3481 		 * Rather than hide all in some function, I do this in
3482 		 * open coded manner. You see what this really does.
3483 		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3484 		 */
3485 		mutex_lock(&set_limit_mutex);
3486 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3487 		if (memlimit > val) {
3488 			ret = -EINVAL;
3489 			mutex_unlock(&set_limit_mutex);
3490 			break;
3491 		}
3492 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3493 		if (memswlimit < val)
3494 			enlarge = 1;
3495 		ret = res_counter_set_limit(&memcg->memsw, val);
3496 		if (!ret) {
3497 			if (memlimit == val)
3498 				memcg->memsw_is_minimum = true;
3499 			else
3500 				memcg->memsw_is_minimum = false;
3501 		}
3502 		mutex_unlock(&set_limit_mutex);
3503 
3504 		if (!ret)
3505 			break;
3506 
3507 		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3508 				   MEM_CGROUP_RECLAIM_NOSWAP |
3509 				   MEM_CGROUP_RECLAIM_SHRINK);
3510 		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3511 		/* Usage is reduced ? */
3512 		if (curusage >= oldusage)
3513 			retry_count--;
3514 		else
3515 			oldusage = curusage;
3516 	}
3517 	if (!ret && enlarge)
3518 		memcg_oom_recover(memcg);
3519 	return ret;
3520 }
3521 
mem_cgroup_soft_limit_reclaim(struct zone * zone,int order,gfp_t gfp_mask,unsigned long * total_scanned)3522 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3523 					    gfp_t gfp_mask,
3524 					    unsigned long *total_scanned)
3525 {
3526 	unsigned long nr_reclaimed = 0;
3527 	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3528 	unsigned long reclaimed;
3529 	int loop = 0;
3530 	struct mem_cgroup_tree_per_zone *mctz;
3531 	unsigned long long excess;
3532 	unsigned long nr_scanned;
3533 
3534 	if (order > 0)
3535 		return 0;
3536 
3537 	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3538 	/*
3539 	 * This loop can run a while, specially if mem_cgroup's continuously
3540 	 * keep exceeding their soft limit and putting the system under
3541 	 * pressure
3542 	 */
3543 	do {
3544 		if (next_mz)
3545 			mz = next_mz;
3546 		else
3547 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3548 		if (!mz)
3549 			break;
3550 
3551 		nr_scanned = 0;
3552 		reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone,
3553 						    gfp_mask, &nr_scanned);
3554 		nr_reclaimed += reclaimed;
3555 		*total_scanned += nr_scanned;
3556 		spin_lock(&mctz->lock);
3557 
3558 		/*
3559 		 * If we failed to reclaim anything from this memory cgroup
3560 		 * it is time to move on to the next cgroup
3561 		 */
3562 		next_mz = NULL;
3563 		if (!reclaimed) {
3564 			do {
3565 				/*
3566 				 * Loop until we find yet another one.
3567 				 *
3568 				 * By the time we get the soft_limit lock
3569 				 * again, someone might have aded the
3570 				 * group back on the RB tree. Iterate to
3571 				 * make sure we get a different mem.
3572 				 * mem_cgroup_largest_soft_limit_node returns
3573 				 * NULL if no other cgroup is present on
3574 				 * the tree
3575 				 */
3576 				next_mz =
3577 				__mem_cgroup_largest_soft_limit_node(mctz);
3578 				if (next_mz == mz)
3579 					css_put(&next_mz->mem->css);
3580 				else /* next_mz == NULL or other memcg */
3581 					break;
3582 			} while (1);
3583 		}
3584 		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3585 		excess = res_counter_soft_limit_excess(&mz->mem->res);
3586 		/*
3587 		 * One school of thought says that we should not add
3588 		 * back the node to the tree if reclaim returns 0.
3589 		 * But our reclaim could return 0, simply because due
3590 		 * to priority we are exposing a smaller subset of
3591 		 * memory to reclaim from. Consider this as a longer
3592 		 * term TODO.
3593 		 */
3594 		/* If excess == 0, no tree ops */
3595 		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3596 		spin_unlock(&mctz->lock);
3597 		css_put(&mz->mem->css);
3598 		loop++;
3599 		/*
3600 		 * Could not reclaim anything and there are no more
3601 		 * mem cgroups to try or we seem to be looping without
3602 		 * reclaiming anything.
3603 		 */
3604 		if (!nr_reclaimed &&
3605 			(next_mz == NULL ||
3606 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3607 			break;
3608 	} while (!nr_reclaimed);
3609 	if (next_mz)
3610 		css_put(&next_mz->mem->css);
3611 	return nr_reclaimed;
3612 }
3613 
3614 /*
3615  * This routine traverse page_cgroup in given list and drop them all.
3616  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3617  */
mem_cgroup_force_empty_list(struct mem_cgroup * memcg,int node,int zid,enum lru_list lru)3618 static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3619 				int node, int zid, enum lru_list lru)
3620 {
3621 	struct mem_cgroup_per_zone *mz;
3622 	unsigned long flags, loop;
3623 	struct list_head *list;
3624 	struct page *busy;
3625 	struct zone *zone;
3626 	int ret = 0;
3627 
3628 	zone = &NODE_DATA(node)->node_zones[zid];
3629 	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3630 	list = &mz->lruvec.lists[lru];
3631 
3632 	loop = MEM_CGROUP_ZSTAT(mz, lru);
3633 	/* give some margin against EBUSY etc...*/
3634 	loop += 256;
3635 	busy = NULL;
3636 	while (loop--) {
3637 		struct page_cgroup *pc;
3638 		struct page *page;
3639 
3640 		ret = 0;
3641 		spin_lock_irqsave(&zone->lru_lock, flags);
3642 		if (list_empty(list)) {
3643 			spin_unlock_irqrestore(&zone->lru_lock, flags);
3644 			break;
3645 		}
3646 		page = list_entry(list->prev, struct page, lru);
3647 		if (busy == page) {
3648 			list_move(&page->lru, list);
3649 			busy = NULL;
3650 			spin_unlock_irqrestore(&zone->lru_lock, flags);
3651 			continue;
3652 		}
3653 		spin_unlock_irqrestore(&zone->lru_lock, flags);
3654 
3655 		pc = lookup_page_cgroup(page);
3656 
3657 		ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3658 		if (ret == -ENOMEM || ret == -EINTR)
3659 			break;
3660 
3661 		if (ret == -EBUSY || ret == -EINVAL) {
3662 			/* found lock contention or "pc" is obsolete. */
3663 			busy = page;
3664 			cond_resched();
3665 		} else
3666 			busy = NULL;
3667 	}
3668 
3669 	if (!ret && !list_empty(list))
3670 		return -EBUSY;
3671 	return ret;
3672 }
3673 
3674 /*
3675  * make mem_cgroup's charge to be 0 if there is no task.
3676  * This enables deleting this mem_cgroup.
3677  */
mem_cgroup_force_empty(struct mem_cgroup * memcg,bool free_all)3678 static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3679 {
3680 	int ret;
3681 	int node, zid, shrink;
3682 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3683 	struct cgroup *cgrp = memcg->css.cgroup;
3684 
3685 	css_get(&memcg->css);
3686 
3687 	shrink = 0;
3688 	/* should free all ? */
3689 	if (free_all)
3690 		goto try_to_free;
3691 move_account:
3692 	do {
3693 		ret = -EBUSY;
3694 		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3695 			goto out;
3696 		ret = -EINTR;
3697 		if (signal_pending(current))
3698 			goto out;
3699 		/* This is for making all *used* pages to be on LRU. */
3700 		lru_add_drain_all();
3701 		drain_all_stock_sync(memcg);
3702 		ret = 0;
3703 		mem_cgroup_start_move(memcg);
3704 		for_each_node_state(node, N_HIGH_MEMORY) {
3705 			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3706 				enum lru_list l;
3707 				for_each_lru(l) {
3708 					ret = mem_cgroup_force_empty_list(memcg,
3709 							node, zid, l);
3710 					if (ret)
3711 						break;
3712 				}
3713 			}
3714 			if (ret)
3715 				break;
3716 		}
3717 		mem_cgroup_end_move(memcg);
3718 		memcg_oom_recover(memcg);
3719 		/* it seems parent cgroup doesn't have enough mem */
3720 		if (ret == -ENOMEM)
3721 			goto try_to_free;
3722 		cond_resched();
3723 	/* "ret" should also be checked to ensure all lists are empty. */
3724 	} while (memcg->res.usage > 0 || ret);
3725 out:
3726 	css_put(&memcg->css);
3727 	return ret;
3728 
3729 try_to_free:
3730 	/* returns EBUSY if there is a task or if we come here twice. */
3731 	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3732 		ret = -EBUSY;
3733 		goto out;
3734 	}
3735 	/* we call try-to-free pages for make this cgroup empty */
3736 	lru_add_drain_all();
3737 	/* try to free all pages in this cgroup */
3738 	shrink = 1;
3739 	while (nr_retries && memcg->res.usage > 0) {
3740 		int progress;
3741 
3742 		if (signal_pending(current)) {
3743 			ret = -EINTR;
3744 			goto out;
3745 		}
3746 		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3747 						false);
3748 		if (!progress) {
3749 			nr_retries--;
3750 			/* maybe some writeback is necessary */
3751 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3752 		}
3753 
3754 	}
3755 	lru_add_drain();
3756 	/* try move_account...there may be some *locked* pages. */
3757 	goto move_account;
3758 }
3759 
mem_cgroup_force_empty_write(struct cgroup * cont,unsigned int event)3760 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3761 {
3762 	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3763 }
3764 
3765 
mem_cgroup_hierarchy_read(struct cgroup * cont,struct cftype * cft)3766 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3767 {
3768 	return mem_cgroup_from_cont(cont)->use_hierarchy;
3769 }
3770 
mem_cgroup_hierarchy_write(struct cgroup * cont,struct cftype * cft,u64 val)3771 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3772 					u64 val)
3773 {
3774 	int retval = 0;
3775 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3776 	struct cgroup *parent = cont->parent;
3777 	struct mem_cgroup *parent_memcg = NULL;
3778 
3779 	if (parent)
3780 		parent_memcg = mem_cgroup_from_cont(parent);
3781 
3782 	cgroup_lock();
3783 	/*
3784 	 * If parent's use_hierarchy is set, we can't make any modifications
3785 	 * in the child subtrees. If it is unset, then the change can
3786 	 * occur, provided the current cgroup has no children.
3787 	 *
3788 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3789 	 * set if there are no children.
3790 	 */
3791 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3792 				(val == 1 || val == 0)) {
3793 		if (list_empty(&cont->children))
3794 			memcg->use_hierarchy = val;
3795 		else
3796 			retval = -EBUSY;
3797 	} else
3798 		retval = -EINVAL;
3799 	cgroup_unlock();
3800 
3801 	return retval;
3802 }
3803 
3804 
mem_cgroup_recursive_stat(struct mem_cgroup * memcg,enum mem_cgroup_stat_index idx)3805 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3806 					       enum mem_cgroup_stat_index idx)
3807 {
3808 	struct mem_cgroup *iter;
3809 	long val = 0;
3810 
3811 	/* Per-cpu values can be negative, use a signed accumulator */
3812 	for_each_mem_cgroup_tree(iter, memcg)
3813 		val += mem_cgroup_read_stat(iter, idx);
3814 
3815 	if (val < 0) /* race ? */
3816 		val = 0;
3817 	return val;
3818 }
3819 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3820 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3821 {
3822 	u64 val;
3823 
3824 	if (!mem_cgroup_is_root(memcg)) {
3825 		if (!swap)
3826 			return res_counter_read_u64(&memcg->res, RES_USAGE);
3827 		else
3828 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3829 	}
3830 
3831 	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3832 	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3833 
3834 	if (swap)
3835 		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3836 
3837 	return val << PAGE_SHIFT;
3838 }
3839 
mem_cgroup_read(struct cgroup * cont,struct cftype * cft)3840 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3841 {
3842 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3843 	u64 val;
3844 	int type, name;
3845 
3846 	type = MEMFILE_TYPE(cft->private);
3847 	name = MEMFILE_ATTR(cft->private);
3848 	switch (type) {
3849 	case _MEM:
3850 		if (name == RES_USAGE)
3851 			val = mem_cgroup_usage(memcg, false);
3852 		else
3853 			val = res_counter_read_u64(&memcg->res, name);
3854 		break;
3855 	case _MEMSWAP:
3856 		if (name == RES_USAGE)
3857 			val = mem_cgroup_usage(memcg, true);
3858 		else
3859 			val = res_counter_read_u64(&memcg->memsw, name);
3860 		break;
3861 	default:
3862 		BUG();
3863 		break;
3864 	}
3865 	return val;
3866 }
3867 /*
3868  * The user of this function is...
3869  * RES_LIMIT.
3870  */
mem_cgroup_write(struct cgroup * cont,struct cftype * cft,const char * buffer)3871 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3872 			    const char *buffer)
3873 {
3874 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3875 	int type, name;
3876 	unsigned long long val;
3877 	int ret;
3878 
3879 	type = MEMFILE_TYPE(cft->private);
3880 	name = MEMFILE_ATTR(cft->private);
3881 	switch (name) {
3882 	case RES_LIMIT:
3883 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3884 			ret = -EINVAL;
3885 			break;
3886 		}
3887 		/* This function does all necessary parse...reuse it */
3888 		ret = res_counter_memparse_write_strategy(buffer, &val);
3889 		if (ret)
3890 			break;
3891 		if (type == _MEM)
3892 			ret = mem_cgroup_resize_limit(memcg, val);
3893 		else
3894 			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3895 		break;
3896 	case RES_SOFT_LIMIT:
3897 		ret = res_counter_memparse_write_strategy(buffer, &val);
3898 		if (ret)
3899 			break;
3900 		/*
3901 		 * For memsw, soft limits are hard to implement in terms
3902 		 * of semantics, for now, we support soft limits for
3903 		 * control without swap
3904 		 */
3905 		if (type == _MEM)
3906 			ret = res_counter_set_soft_limit(&memcg->res, val);
3907 		else
3908 			ret = -EINVAL;
3909 		break;
3910 	default:
3911 		ret = -EINVAL; /* should be BUG() ? */
3912 		break;
3913 	}
3914 	return ret;
3915 }
3916 
memcg_get_hierarchical_limit(struct mem_cgroup * memcg,unsigned long long * mem_limit,unsigned long long * memsw_limit)3917 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3918 		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3919 {
3920 	struct cgroup *cgroup;
3921 	unsigned long long min_limit, min_memsw_limit, tmp;
3922 
3923 	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3924 	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3925 	cgroup = memcg->css.cgroup;
3926 	if (!memcg->use_hierarchy)
3927 		goto out;
3928 
3929 	while (cgroup->parent) {
3930 		cgroup = cgroup->parent;
3931 		memcg = mem_cgroup_from_cont(cgroup);
3932 		if (!memcg->use_hierarchy)
3933 			break;
3934 		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3935 		min_limit = min(min_limit, tmp);
3936 		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3937 		min_memsw_limit = min(min_memsw_limit, tmp);
3938 	}
3939 out:
3940 	*mem_limit = min_limit;
3941 	*memsw_limit = min_memsw_limit;
3942 	return;
3943 }
3944 
mem_cgroup_reset(struct cgroup * cont,unsigned int event)3945 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3946 {
3947 	struct mem_cgroup *memcg;
3948 	int type, name;
3949 
3950 	memcg = mem_cgroup_from_cont(cont);
3951 	type = MEMFILE_TYPE(event);
3952 	name = MEMFILE_ATTR(event);
3953 	switch (name) {
3954 	case RES_MAX_USAGE:
3955 		if (type == _MEM)
3956 			res_counter_reset_max(&memcg->res);
3957 		else
3958 			res_counter_reset_max(&memcg->memsw);
3959 		break;
3960 	case RES_FAILCNT:
3961 		if (type == _MEM)
3962 			res_counter_reset_failcnt(&memcg->res);
3963 		else
3964 			res_counter_reset_failcnt(&memcg->memsw);
3965 		break;
3966 	}
3967 
3968 	return 0;
3969 }
3970 
mem_cgroup_move_charge_read(struct cgroup * cgrp,struct cftype * cft)3971 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3972 					struct cftype *cft)
3973 {
3974 	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3975 }
3976 
3977 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3978 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3979 					struct cftype *cft, u64 val)
3980 {
3981 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3982 
3983 	if (val >= (1 << NR_MOVE_TYPE))
3984 		return -EINVAL;
3985 	/*
3986 	 * We check this value several times in both in can_attach() and
3987 	 * attach(), so we need cgroup lock to prevent this value from being
3988 	 * inconsistent.
3989 	 */
3990 	cgroup_lock();
3991 	memcg->move_charge_at_immigrate = val;
3992 	cgroup_unlock();
3993 
3994 	return 0;
3995 }
3996 #else
mem_cgroup_move_charge_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3997 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3998 					struct cftype *cft, u64 val)
3999 {
4000 	return -ENOSYS;
4001 }
4002 #endif
4003 
4004 
4005 /* For read statistics */
4006 enum {
4007 	MCS_CACHE,
4008 	MCS_RSS,
4009 	MCS_FILE_MAPPED,
4010 	MCS_PGPGIN,
4011 	MCS_PGPGOUT,
4012 	MCS_SWAP,
4013 	MCS_PGFAULT,
4014 	MCS_PGMAJFAULT,
4015 	MCS_INACTIVE_ANON,
4016 	MCS_ACTIVE_ANON,
4017 	MCS_INACTIVE_FILE,
4018 	MCS_ACTIVE_FILE,
4019 	MCS_UNEVICTABLE,
4020 	NR_MCS_STAT,
4021 };
4022 
4023 struct mcs_total_stat {
4024 	s64 stat[NR_MCS_STAT];
4025 };
4026 
4027 struct {
4028 	char *local_name;
4029 	char *total_name;
4030 } memcg_stat_strings[NR_MCS_STAT] = {
4031 	{"cache", "total_cache"},
4032 	{"rss", "total_rss"},
4033 	{"mapped_file", "total_mapped_file"},
4034 	{"pgpgin", "total_pgpgin"},
4035 	{"pgpgout", "total_pgpgout"},
4036 	{"swap", "total_swap"},
4037 	{"pgfault", "total_pgfault"},
4038 	{"pgmajfault", "total_pgmajfault"},
4039 	{"inactive_anon", "total_inactive_anon"},
4040 	{"active_anon", "total_active_anon"},
4041 	{"inactive_file", "total_inactive_file"},
4042 	{"active_file", "total_active_file"},
4043 	{"unevictable", "total_unevictable"}
4044 };
4045 
4046 
4047 static void
mem_cgroup_get_local_stat(struct mem_cgroup * memcg,struct mcs_total_stat * s)4048 mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4049 {
4050 	s64 val;
4051 
4052 	/* per cpu stat */
4053 	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
4054 	s->stat[MCS_CACHE] += val * PAGE_SIZE;
4055 	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
4056 	s->stat[MCS_RSS] += val * PAGE_SIZE;
4057 	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
4058 	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4059 	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
4060 	s->stat[MCS_PGPGIN] += val;
4061 	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
4062 	s->stat[MCS_PGPGOUT] += val;
4063 	if (do_swap_account) {
4064 		val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
4065 		s->stat[MCS_SWAP] += val * PAGE_SIZE;
4066 	}
4067 	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
4068 	s->stat[MCS_PGFAULT] += val;
4069 	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
4070 	s->stat[MCS_PGMAJFAULT] += val;
4071 
4072 	/* per zone stat */
4073 	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
4074 	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4075 	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
4076 	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4077 	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
4078 	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4079 	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
4080 	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4081 	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4082 	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4083 }
4084 
4085 static void
mem_cgroup_get_total_stat(struct mem_cgroup * memcg,struct mcs_total_stat * s)4086 mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4087 {
4088 	struct mem_cgroup *iter;
4089 
4090 	for_each_mem_cgroup_tree(iter, memcg)
4091 		mem_cgroup_get_local_stat(iter, s);
4092 }
4093 
4094 #ifdef CONFIG_NUMA
mem_control_numa_stat_show(struct seq_file * m,void * arg)4095 static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4096 {
4097 	int nid;
4098 	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4099 	unsigned long node_nr;
4100 	struct cgroup *cont = m->private;
4101 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4102 
4103 	total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
4104 	seq_printf(m, "total=%lu", total_nr);
4105 	for_each_node_state(nid, N_HIGH_MEMORY) {
4106 		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
4107 		seq_printf(m, " N%d=%lu", nid, node_nr);
4108 	}
4109 	seq_putc(m, '\n');
4110 
4111 	file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
4112 	seq_printf(m, "file=%lu", file_nr);
4113 	for_each_node_state(nid, N_HIGH_MEMORY) {
4114 		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4115 				LRU_ALL_FILE);
4116 		seq_printf(m, " N%d=%lu", nid, node_nr);
4117 	}
4118 	seq_putc(m, '\n');
4119 
4120 	anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
4121 	seq_printf(m, "anon=%lu", anon_nr);
4122 	for_each_node_state(nid, N_HIGH_MEMORY) {
4123 		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4124 				LRU_ALL_ANON);
4125 		seq_printf(m, " N%d=%lu", nid, node_nr);
4126 	}
4127 	seq_putc(m, '\n');
4128 
4129 	unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
4130 	seq_printf(m, "unevictable=%lu", unevictable_nr);
4131 	for_each_node_state(nid, N_HIGH_MEMORY) {
4132 		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4133 				BIT(LRU_UNEVICTABLE));
4134 		seq_printf(m, " N%d=%lu", nid, node_nr);
4135 	}
4136 	seq_putc(m, '\n');
4137 	return 0;
4138 }
4139 #endif /* CONFIG_NUMA */
4140 
mem_control_stat_show(struct cgroup * cont,struct cftype * cft,struct cgroup_map_cb * cb)4141 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4142 				 struct cgroup_map_cb *cb)
4143 {
4144 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4145 	struct mcs_total_stat mystat;
4146 	int i;
4147 
4148 	memset(&mystat, 0, sizeof(mystat));
4149 	mem_cgroup_get_local_stat(mem_cont, &mystat);
4150 
4151 
4152 	for (i = 0; i < NR_MCS_STAT; i++) {
4153 		if (i == MCS_SWAP && !do_swap_account)
4154 			continue;
4155 		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4156 	}
4157 
4158 	/* Hierarchical information */
4159 	{
4160 		unsigned long long limit, memsw_limit;
4161 		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4162 		cb->fill(cb, "hierarchical_memory_limit", limit);
4163 		if (do_swap_account)
4164 			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4165 	}
4166 
4167 	memset(&mystat, 0, sizeof(mystat));
4168 	mem_cgroup_get_total_stat(mem_cont, &mystat);
4169 	for (i = 0; i < NR_MCS_STAT; i++) {
4170 		if (i == MCS_SWAP && !do_swap_account)
4171 			continue;
4172 		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4173 	}
4174 
4175 #ifdef CONFIG_DEBUG_VM
4176 	{
4177 		int nid, zid;
4178 		struct mem_cgroup_per_zone *mz;
4179 		unsigned long recent_rotated[2] = {0, 0};
4180 		unsigned long recent_scanned[2] = {0, 0};
4181 
4182 		for_each_online_node(nid)
4183 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4184 				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4185 
4186 				recent_rotated[0] +=
4187 					mz->reclaim_stat.recent_rotated[0];
4188 				recent_rotated[1] +=
4189 					mz->reclaim_stat.recent_rotated[1];
4190 				recent_scanned[0] +=
4191 					mz->reclaim_stat.recent_scanned[0];
4192 				recent_scanned[1] +=
4193 					mz->reclaim_stat.recent_scanned[1];
4194 			}
4195 		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4196 		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4197 		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4198 		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4199 	}
4200 #endif
4201 
4202 	return 0;
4203 }
4204 
mem_cgroup_swappiness_read(struct cgroup * cgrp,struct cftype * cft)4205 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4206 {
4207 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4208 
4209 	return mem_cgroup_swappiness(memcg);
4210 }
4211 
mem_cgroup_swappiness_write(struct cgroup * cgrp,struct cftype * cft,u64 val)4212 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4213 				       u64 val)
4214 {
4215 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4216 	struct mem_cgroup *parent;
4217 
4218 	if (val > 100)
4219 		return -EINVAL;
4220 
4221 	if (cgrp->parent == NULL)
4222 		return -EINVAL;
4223 
4224 	parent = mem_cgroup_from_cont(cgrp->parent);
4225 
4226 	cgroup_lock();
4227 
4228 	/* If under hierarchy, only empty-root can set this value */
4229 	if ((parent->use_hierarchy) ||
4230 	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4231 		cgroup_unlock();
4232 		return -EINVAL;
4233 	}
4234 
4235 	memcg->swappiness = val;
4236 
4237 	cgroup_unlock();
4238 
4239 	return 0;
4240 }
4241 
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)4242 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4243 {
4244 	struct mem_cgroup_threshold_ary *t;
4245 	u64 usage;
4246 	int i;
4247 
4248 	rcu_read_lock();
4249 	if (!swap)
4250 		t = rcu_dereference(memcg->thresholds.primary);
4251 	else
4252 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4253 
4254 	if (!t)
4255 		goto unlock;
4256 
4257 	usage = mem_cgroup_usage(memcg, swap);
4258 
4259 	/*
4260 	 * current_threshold points to threshold just below usage.
4261 	 * If it's not true, a threshold was crossed after last
4262 	 * call of __mem_cgroup_threshold().
4263 	 */
4264 	i = t->current_threshold;
4265 
4266 	/*
4267 	 * Iterate backward over array of thresholds starting from
4268 	 * current_threshold and check if a threshold is crossed.
4269 	 * If none of thresholds below usage is crossed, we read
4270 	 * only one element of the array here.
4271 	 */
4272 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4273 		eventfd_signal(t->entries[i].eventfd, 1);
4274 
4275 	/* i = current_threshold + 1 */
4276 	i++;
4277 
4278 	/*
4279 	 * Iterate forward over array of thresholds starting from
4280 	 * current_threshold+1 and check if a threshold is crossed.
4281 	 * If none of thresholds above usage is crossed, we read
4282 	 * only one element of the array here.
4283 	 */
4284 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4285 		eventfd_signal(t->entries[i].eventfd, 1);
4286 
4287 	/* Update current_threshold */
4288 	t->current_threshold = i - 1;
4289 unlock:
4290 	rcu_read_unlock();
4291 }
4292 
mem_cgroup_threshold(struct mem_cgroup * memcg)4293 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4294 {
4295 	while (memcg) {
4296 		__mem_cgroup_threshold(memcg, false);
4297 		if (do_swap_account)
4298 			__mem_cgroup_threshold(memcg, true);
4299 
4300 		memcg = parent_mem_cgroup(memcg);
4301 	}
4302 }
4303 
compare_thresholds(const void * a,const void * b)4304 static int compare_thresholds(const void *a, const void *b)
4305 {
4306 	const struct mem_cgroup_threshold *_a = a;
4307 	const struct mem_cgroup_threshold *_b = b;
4308 
4309 	return _a->threshold - _b->threshold;
4310 }
4311 
mem_cgroup_oom_notify_cb(struct mem_cgroup * memcg)4312 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4313 {
4314 	struct mem_cgroup_eventfd_list *ev;
4315 
4316 	list_for_each_entry(ev, &memcg->oom_notify, list)
4317 		eventfd_signal(ev->eventfd, 1);
4318 	return 0;
4319 }
4320 
mem_cgroup_oom_notify(struct mem_cgroup * memcg)4321 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4322 {
4323 	struct mem_cgroup *iter;
4324 
4325 	for_each_mem_cgroup_tree(iter, memcg)
4326 		mem_cgroup_oom_notify_cb(iter);
4327 }
4328 
mem_cgroup_usage_register_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd,const char * args)4329 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4330 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4331 {
4332 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4333 	struct mem_cgroup_thresholds *thresholds;
4334 	struct mem_cgroup_threshold_ary *new;
4335 	int type = MEMFILE_TYPE(cft->private);
4336 	u64 threshold, usage;
4337 	int i, size, ret;
4338 
4339 	ret = res_counter_memparse_write_strategy(args, &threshold);
4340 	if (ret)
4341 		return ret;
4342 
4343 	mutex_lock(&memcg->thresholds_lock);
4344 
4345 	if (type == _MEM)
4346 		thresholds = &memcg->thresholds;
4347 	else if (type == _MEMSWAP)
4348 		thresholds = &memcg->memsw_thresholds;
4349 	else
4350 		BUG();
4351 
4352 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4353 
4354 	/* Check if a threshold crossed before adding a new one */
4355 	if (thresholds->primary)
4356 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4357 
4358 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4359 
4360 	/* Allocate memory for new array of thresholds */
4361 	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4362 			GFP_KERNEL);
4363 	if (!new) {
4364 		ret = -ENOMEM;
4365 		goto unlock;
4366 	}
4367 	new->size = size;
4368 
4369 	/* Copy thresholds (if any) to new array */
4370 	if (thresholds->primary) {
4371 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4372 				sizeof(struct mem_cgroup_threshold));
4373 	}
4374 
4375 	/* Add new threshold */
4376 	new->entries[size - 1].eventfd = eventfd;
4377 	new->entries[size - 1].threshold = threshold;
4378 
4379 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4380 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4381 			compare_thresholds, NULL);
4382 
4383 	/* Find current threshold */
4384 	new->current_threshold = -1;
4385 	for (i = 0; i < size; i++) {
4386 		if (new->entries[i].threshold < usage) {
4387 			/*
4388 			 * new->current_threshold will not be used until
4389 			 * rcu_assign_pointer(), so it's safe to increment
4390 			 * it here.
4391 			 */
4392 			++new->current_threshold;
4393 		}
4394 	}
4395 
4396 	/* Free old spare buffer and save old primary buffer as spare */
4397 	kfree(thresholds->spare);
4398 	thresholds->spare = thresholds->primary;
4399 
4400 	rcu_assign_pointer(thresholds->primary, new);
4401 
4402 	/* To be sure that nobody uses thresholds */
4403 	synchronize_rcu();
4404 
4405 unlock:
4406 	mutex_unlock(&memcg->thresholds_lock);
4407 
4408 	return ret;
4409 }
4410 
mem_cgroup_usage_unregister_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd)4411 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4412 	struct cftype *cft, struct eventfd_ctx *eventfd)
4413 {
4414 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4415 	struct mem_cgroup_thresholds *thresholds;
4416 	struct mem_cgroup_threshold_ary *new;
4417 	int type = MEMFILE_TYPE(cft->private);
4418 	u64 usage;
4419 	int i, j, size;
4420 
4421 	mutex_lock(&memcg->thresholds_lock);
4422 	if (type == _MEM)
4423 		thresholds = &memcg->thresholds;
4424 	else if (type == _MEMSWAP)
4425 		thresholds = &memcg->memsw_thresholds;
4426 	else
4427 		BUG();
4428 
4429 	/*
4430 	 * Something went wrong if we trying to unregister a threshold
4431 	 * if we don't have thresholds
4432 	 */
4433 	BUG_ON(!thresholds);
4434 
4435 	if (!thresholds->primary)
4436 		goto unlock;
4437 
4438 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4439 
4440 	/* Check if a threshold crossed before removing */
4441 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4442 
4443 	/* Calculate new number of threshold */
4444 	size = 0;
4445 	for (i = 0; i < thresholds->primary->size; i++) {
4446 		if (thresholds->primary->entries[i].eventfd != eventfd)
4447 			size++;
4448 	}
4449 
4450 	new = thresholds->spare;
4451 
4452 	/* Set thresholds array to NULL if we don't have thresholds */
4453 	if (!size) {
4454 		kfree(new);
4455 		new = NULL;
4456 		goto swap_buffers;
4457 	}
4458 
4459 	new->size = size;
4460 
4461 	/* Copy thresholds and find current threshold */
4462 	new->current_threshold = -1;
4463 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4464 		if (thresholds->primary->entries[i].eventfd == eventfd)
4465 			continue;
4466 
4467 		new->entries[j] = thresholds->primary->entries[i];
4468 		if (new->entries[j].threshold < usage) {
4469 			/*
4470 			 * new->current_threshold will not be used
4471 			 * until rcu_assign_pointer(), so it's safe to increment
4472 			 * it here.
4473 			 */
4474 			++new->current_threshold;
4475 		}
4476 		j++;
4477 	}
4478 
4479 swap_buffers:
4480 	/* Swap primary and spare array */
4481 	thresholds->spare = thresholds->primary;
4482 	rcu_assign_pointer(thresholds->primary, new);
4483 
4484 	/* To be sure that nobody uses thresholds */
4485 	synchronize_rcu();
4486 unlock:
4487 	mutex_unlock(&memcg->thresholds_lock);
4488 }
4489 
mem_cgroup_oom_register_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd,const char * args)4490 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4491 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4492 {
4493 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4494 	struct mem_cgroup_eventfd_list *event;
4495 	int type = MEMFILE_TYPE(cft->private);
4496 
4497 	BUG_ON(type != _OOM_TYPE);
4498 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4499 	if (!event)
4500 		return -ENOMEM;
4501 
4502 	spin_lock(&memcg_oom_lock);
4503 
4504 	event->eventfd = eventfd;
4505 	list_add(&event->list, &memcg->oom_notify);
4506 
4507 	/* already in OOM ? */
4508 	if (atomic_read(&memcg->under_oom))
4509 		eventfd_signal(eventfd, 1);
4510 	spin_unlock(&memcg_oom_lock);
4511 
4512 	return 0;
4513 }
4514 
mem_cgroup_oom_unregister_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd)4515 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4516 	struct cftype *cft, struct eventfd_ctx *eventfd)
4517 {
4518 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4519 	struct mem_cgroup_eventfd_list *ev, *tmp;
4520 	int type = MEMFILE_TYPE(cft->private);
4521 
4522 	BUG_ON(type != _OOM_TYPE);
4523 
4524 	spin_lock(&memcg_oom_lock);
4525 
4526 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4527 		if (ev->eventfd == eventfd) {
4528 			list_del(&ev->list);
4529 			kfree(ev);
4530 		}
4531 	}
4532 
4533 	spin_unlock(&memcg_oom_lock);
4534 }
4535 
mem_cgroup_oom_control_read(struct cgroup * cgrp,struct cftype * cft,struct cgroup_map_cb * cb)4536 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4537 	struct cftype *cft,  struct cgroup_map_cb *cb)
4538 {
4539 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4540 
4541 	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4542 
4543 	if (atomic_read(&memcg->under_oom))
4544 		cb->fill(cb, "under_oom", 1);
4545 	else
4546 		cb->fill(cb, "under_oom", 0);
4547 	return 0;
4548 }
4549 
mem_cgroup_oom_control_write(struct cgroup * cgrp,struct cftype * cft,u64 val)4550 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4551 	struct cftype *cft, u64 val)
4552 {
4553 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4554 	struct mem_cgroup *parent;
4555 
4556 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4557 	if (!cgrp->parent || !((val == 0) || (val == 1)))
4558 		return -EINVAL;
4559 
4560 	parent = mem_cgroup_from_cont(cgrp->parent);
4561 
4562 	cgroup_lock();
4563 	/* oom-kill-disable is a flag for subhierarchy. */
4564 	if ((parent->use_hierarchy) ||
4565 	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4566 		cgroup_unlock();
4567 		return -EINVAL;
4568 	}
4569 	memcg->oom_kill_disable = val;
4570 	if (!val)
4571 		memcg_oom_recover(memcg);
4572 	cgroup_unlock();
4573 	return 0;
4574 }
4575 
4576 #ifdef CONFIG_NUMA
4577 static const struct file_operations mem_control_numa_stat_file_operations = {
4578 	.read = seq_read,
4579 	.llseek = seq_lseek,
4580 	.release = single_release,
4581 };
4582 
mem_control_numa_stat_open(struct inode * unused,struct file * file)4583 static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4584 {
4585 	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4586 
4587 	file->f_op = &mem_control_numa_stat_file_operations;
4588 	return single_open(file, mem_control_numa_stat_show, cont);
4589 }
4590 #endif /* CONFIG_NUMA */
4591 
4592 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
register_kmem_files(struct cgroup * cont,struct cgroup_subsys * ss)4593 static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4594 {
4595 	/*
4596 	 * Part of this would be better living in a separate allocation
4597 	 * function, leaving us with just the cgroup tree population work.
4598 	 * We, however, depend on state such as network's proto_list that
4599 	 * is only initialized after cgroup creation. I found the less
4600 	 * cumbersome way to deal with it to defer it all to populate time
4601 	 */
4602 	return mem_cgroup_sockets_init(cont, ss);
4603 };
4604 
kmem_cgroup_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4605 static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4606 				struct cgroup *cont)
4607 {
4608 	mem_cgroup_sockets_destroy(cont, ss);
4609 }
4610 #else
register_kmem_files(struct cgroup * cont,struct cgroup_subsys * ss)4611 static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4612 {
4613 	return 0;
4614 }
4615 
kmem_cgroup_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4616 static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4617 				struct cgroup *cont)
4618 {
4619 }
4620 #endif
4621 
4622 static struct cftype mem_cgroup_files[] = {
4623 	{
4624 		.name = "usage_in_bytes",
4625 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4626 		.read_u64 = mem_cgroup_read,
4627 		.register_event = mem_cgroup_usage_register_event,
4628 		.unregister_event = mem_cgroup_usage_unregister_event,
4629 	},
4630 	{
4631 		.name = "max_usage_in_bytes",
4632 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4633 		.trigger = mem_cgroup_reset,
4634 		.read_u64 = mem_cgroup_read,
4635 	},
4636 	{
4637 		.name = "limit_in_bytes",
4638 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4639 		.write_string = mem_cgroup_write,
4640 		.read_u64 = mem_cgroup_read,
4641 	},
4642 	{
4643 		.name = "soft_limit_in_bytes",
4644 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4645 		.write_string = mem_cgroup_write,
4646 		.read_u64 = mem_cgroup_read,
4647 	},
4648 	{
4649 		.name = "failcnt",
4650 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4651 		.trigger = mem_cgroup_reset,
4652 		.read_u64 = mem_cgroup_read,
4653 	},
4654 	{
4655 		.name = "stat",
4656 		.read_map = mem_control_stat_show,
4657 	},
4658 	{
4659 		.name = "force_empty",
4660 		.trigger = mem_cgroup_force_empty_write,
4661 	},
4662 	{
4663 		.name = "use_hierarchy",
4664 		.write_u64 = mem_cgroup_hierarchy_write,
4665 		.read_u64 = mem_cgroup_hierarchy_read,
4666 	},
4667 	{
4668 		.name = "swappiness",
4669 		.read_u64 = mem_cgroup_swappiness_read,
4670 		.write_u64 = mem_cgroup_swappiness_write,
4671 	},
4672 	{
4673 		.name = "move_charge_at_immigrate",
4674 		.read_u64 = mem_cgroup_move_charge_read,
4675 		.write_u64 = mem_cgroup_move_charge_write,
4676 	},
4677 	{
4678 		.name = "oom_control",
4679 		.read_map = mem_cgroup_oom_control_read,
4680 		.write_u64 = mem_cgroup_oom_control_write,
4681 		.register_event = mem_cgroup_oom_register_event,
4682 		.unregister_event = mem_cgroup_oom_unregister_event,
4683 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4684 	},
4685 #ifdef CONFIG_NUMA
4686 	{
4687 		.name = "numa_stat",
4688 		.open = mem_control_numa_stat_open,
4689 		.mode = S_IRUGO,
4690 	},
4691 #endif
4692 };
4693 
4694 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4695 static struct cftype memsw_cgroup_files[] = {
4696 	{
4697 		.name = "memsw.usage_in_bytes",
4698 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4699 		.read_u64 = mem_cgroup_read,
4700 		.register_event = mem_cgroup_usage_register_event,
4701 		.unregister_event = mem_cgroup_usage_unregister_event,
4702 	},
4703 	{
4704 		.name = "memsw.max_usage_in_bytes",
4705 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4706 		.trigger = mem_cgroup_reset,
4707 		.read_u64 = mem_cgroup_read,
4708 	},
4709 	{
4710 		.name = "memsw.limit_in_bytes",
4711 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4712 		.write_string = mem_cgroup_write,
4713 		.read_u64 = mem_cgroup_read,
4714 	},
4715 	{
4716 		.name = "memsw.failcnt",
4717 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4718 		.trigger = mem_cgroup_reset,
4719 		.read_u64 = mem_cgroup_read,
4720 	},
4721 };
4722 
register_memsw_files(struct cgroup * cont,struct cgroup_subsys * ss)4723 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4724 {
4725 	if (!do_swap_account)
4726 		return 0;
4727 	return cgroup_add_files(cont, ss, memsw_cgroup_files,
4728 				ARRAY_SIZE(memsw_cgroup_files));
4729 };
4730 #else
register_memsw_files(struct cgroup * cont,struct cgroup_subsys * ss)4731 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4732 {
4733 	return 0;
4734 }
4735 #endif
4736 
alloc_mem_cgroup_per_zone_info(struct mem_cgroup * memcg,int node)4737 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4738 {
4739 	struct mem_cgroup_per_node *pn;
4740 	struct mem_cgroup_per_zone *mz;
4741 	enum lru_list l;
4742 	int zone, tmp = node;
4743 	/*
4744 	 * This routine is called against possible nodes.
4745 	 * But it's BUG to call kmalloc() against offline node.
4746 	 *
4747 	 * TODO: this routine can waste much memory for nodes which will
4748 	 *       never be onlined. It's better to use memory hotplug callback
4749 	 *       function.
4750 	 */
4751 	if (!node_state(node, N_NORMAL_MEMORY))
4752 		tmp = -1;
4753 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4754 	if (!pn)
4755 		return 1;
4756 
4757 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4758 		mz = &pn->zoneinfo[zone];
4759 		for_each_lru(l)
4760 			INIT_LIST_HEAD(&mz->lruvec.lists[l]);
4761 		mz->usage_in_excess = 0;
4762 		mz->on_tree = false;
4763 		mz->mem = memcg;
4764 	}
4765 	memcg->info.nodeinfo[node] = pn;
4766 	return 0;
4767 }
4768 
free_mem_cgroup_per_zone_info(struct mem_cgroup * memcg,int node)4769 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4770 {
4771 	kfree(memcg->info.nodeinfo[node]);
4772 }
4773 
mem_cgroup_alloc(void)4774 static struct mem_cgroup *mem_cgroup_alloc(void)
4775 {
4776 	struct mem_cgroup *mem;
4777 	int size = sizeof(struct mem_cgroup);
4778 
4779 	/* Can be very big if MAX_NUMNODES is very big */
4780 	if (size < PAGE_SIZE)
4781 		mem = kzalloc(size, GFP_KERNEL);
4782 	else
4783 		mem = vzalloc(size);
4784 
4785 	if (!mem)
4786 		return NULL;
4787 
4788 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4789 	if (!mem->stat)
4790 		goto out_free;
4791 	spin_lock_init(&mem->pcp_counter_lock);
4792 	return mem;
4793 
4794 out_free:
4795 	if (size < PAGE_SIZE)
4796 		kfree(mem);
4797 	else
4798 		vfree(mem);
4799 	return NULL;
4800 }
4801 
4802 /*
4803  * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
4804  * but in process context.  The work_freeing structure is overlaid
4805  * on the rcu_freeing structure, which itself is overlaid on memsw.
4806  */
vfree_work(struct work_struct * work)4807 static void vfree_work(struct work_struct *work)
4808 {
4809 	struct mem_cgroup *memcg;
4810 
4811 	memcg = container_of(work, struct mem_cgroup, work_freeing);
4812 	vfree(memcg);
4813 }
vfree_rcu(struct rcu_head * rcu_head)4814 static void vfree_rcu(struct rcu_head *rcu_head)
4815 {
4816 	struct mem_cgroup *memcg;
4817 
4818 	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4819 	INIT_WORK(&memcg->work_freeing, vfree_work);
4820 	schedule_work(&memcg->work_freeing);
4821 }
4822 
4823 /*
4824  * At destroying mem_cgroup, references from swap_cgroup can remain.
4825  * (scanning all at force_empty is too costly...)
4826  *
4827  * Instead of clearing all references at force_empty, we remember
4828  * the number of reference from swap_cgroup and free mem_cgroup when
4829  * it goes down to 0.
4830  *
4831  * Removal of cgroup itself succeeds regardless of refs from swap.
4832  */
4833 
__mem_cgroup_free(struct mem_cgroup * memcg)4834 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4835 {
4836 	int node;
4837 
4838 	mem_cgroup_remove_from_trees(memcg);
4839 	free_css_id(&mem_cgroup_subsys, &memcg->css);
4840 
4841 	for_each_node(node)
4842 		free_mem_cgroup_per_zone_info(memcg, node);
4843 
4844 	free_percpu(memcg->stat);
4845 	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4846 		kfree_rcu(memcg, rcu_freeing);
4847 	else
4848 		call_rcu(&memcg->rcu_freeing, vfree_rcu);
4849 }
4850 
mem_cgroup_get(struct mem_cgroup * memcg)4851 static void mem_cgroup_get(struct mem_cgroup *memcg)
4852 {
4853 	atomic_inc(&memcg->refcnt);
4854 }
4855 
__mem_cgroup_put(struct mem_cgroup * memcg,int count)4856 static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4857 {
4858 	if (atomic_sub_and_test(count, &memcg->refcnt)) {
4859 		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4860 		__mem_cgroup_free(memcg);
4861 		if (parent)
4862 			mem_cgroup_put(parent);
4863 	}
4864 }
4865 
mem_cgroup_put(struct mem_cgroup * memcg)4866 static void mem_cgroup_put(struct mem_cgroup *memcg)
4867 {
4868 	__mem_cgroup_put(memcg, 1);
4869 }
4870 
4871 /*
4872  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4873  */
parent_mem_cgroup(struct mem_cgroup * memcg)4874 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4875 {
4876 	if (!memcg->res.parent)
4877 		return NULL;
4878 	return mem_cgroup_from_res_counter(memcg->res.parent, res);
4879 }
4880 EXPORT_SYMBOL(parent_mem_cgroup);
4881 
4882 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
enable_swap_cgroup(void)4883 static void __init enable_swap_cgroup(void)
4884 {
4885 	if (!mem_cgroup_disabled() && really_do_swap_account)
4886 		do_swap_account = 1;
4887 }
4888 #else
enable_swap_cgroup(void)4889 static void __init enable_swap_cgroup(void)
4890 {
4891 }
4892 #endif
4893 
mem_cgroup_soft_limit_tree_init(void)4894 static int mem_cgroup_soft_limit_tree_init(void)
4895 {
4896 	struct mem_cgroup_tree_per_node *rtpn;
4897 	struct mem_cgroup_tree_per_zone *rtpz;
4898 	int tmp, node, zone;
4899 
4900 	for_each_node(node) {
4901 		tmp = node;
4902 		if (!node_state(node, N_NORMAL_MEMORY))
4903 			tmp = -1;
4904 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4905 		if (!rtpn)
4906 			goto err_cleanup;
4907 
4908 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4909 
4910 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4911 			rtpz = &rtpn->rb_tree_per_zone[zone];
4912 			rtpz->rb_root = RB_ROOT;
4913 			spin_lock_init(&rtpz->lock);
4914 		}
4915 	}
4916 	return 0;
4917 
4918 err_cleanup:
4919 	for_each_node(node) {
4920 		if (!soft_limit_tree.rb_tree_per_node[node])
4921 			break;
4922 		kfree(soft_limit_tree.rb_tree_per_node[node]);
4923 		soft_limit_tree.rb_tree_per_node[node] = NULL;
4924 	}
4925 	return 1;
4926 
4927 }
4928 
4929 static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys * ss,struct cgroup * cont)4930 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4931 {
4932 	struct mem_cgroup *memcg, *parent;
4933 	long error = -ENOMEM;
4934 	int node;
4935 
4936 	memcg = mem_cgroup_alloc();
4937 	if (!memcg)
4938 		return ERR_PTR(error);
4939 
4940 	for_each_node(node)
4941 		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4942 			goto free_out;
4943 
4944 	/* root ? */
4945 	if (cont->parent == NULL) {
4946 		int cpu;
4947 		enable_swap_cgroup();
4948 		parent = NULL;
4949 		if (mem_cgroup_soft_limit_tree_init())
4950 			goto free_out;
4951 		root_mem_cgroup = memcg;
4952 		for_each_possible_cpu(cpu) {
4953 			struct memcg_stock_pcp *stock =
4954 						&per_cpu(memcg_stock, cpu);
4955 			INIT_WORK(&stock->work, drain_local_stock);
4956 		}
4957 		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4958 	} else {
4959 		parent = mem_cgroup_from_cont(cont->parent);
4960 		memcg->use_hierarchy = parent->use_hierarchy;
4961 		memcg->oom_kill_disable = parent->oom_kill_disable;
4962 	}
4963 
4964 	if (parent && parent->use_hierarchy) {
4965 		res_counter_init(&memcg->res, &parent->res);
4966 		res_counter_init(&memcg->memsw, &parent->memsw);
4967 		/*
4968 		 * We increment refcnt of the parent to ensure that we can
4969 		 * safely access it on res_counter_charge/uncharge.
4970 		 * This refcnt will be decremented when freeing this
4971 		 * mem_cgroup(see mem_cgroup_put).
4972 		 */
4973 		mem_cgroup_get(parent);
4974 	} else {
4975 		res_counter_init(&memcg->res, NULL);
4976 		res_counter_init(&memcg->memsw, NULL);
4977 	}
4978 	memcg->last_scanned_node = MAX_NUMNODES;
4979 	INIT_LIST_HEAD(&memcg->oom_notify);
4980 
4981 	if (parent)
4982 		memcg->swappiness = mem_cgroup_swappiness(parent);
4983 	atomic_set(&memcg->refcnt, 1);
4984 	memcg->move_charge_at_immigrate = 0;
4985 	mutex_init(&memcg->thresholds_lock);
4986 	return &memcg->css;
4987 free_out:
4988 	__mem_cgroup_free(memcg);
4989 	return ERR_PTR(error);
4990 }
4991 
mem_cgroup_pre_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4992 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4993 					struct cgroup *cont)
4994 {
4995 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4996 
4997 	return mem_cgroup_force_empty(memcg, false);
4998 }
4999 
mem_cgroup_destroy(struct cgroup_subsys * ss,struct cgroup * cont)5000 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
5001 				struct cgroup *cont)
5002 {
5003 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5004 
5005 	kmem_cgroup_destroy(ss, cont);
5006 
5007 	mem_cgroup_put(memcg);
5008 }
5009 
mem_cgroup_populate(struct cgroup_subsys * ss,struct cgroup * cont)5010 static int mem_cgroup_populate(struct cgroup_subsys *ss,
5011 				struct cgroup *cont)
5012 {
5013 	int ret;
5014 
5015 	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
5016 				ARRAY_SIZE(mem_cgroup_files));
5017 
5018 	if (!ret)
5019 		ret = register_memsw_files(cont, ss);
5020 
5021 	if (!ret)
5022 		ret = register_kmem_files(cont, ss);
5023 
5024 	return ret;
5025 }
5026 
5027 #ifdef CONFIG_MMU
5028 /* Handlers for move charge at task migration. */
5029 #define PRECHARGE_COUNT_AT_ONCE	256
mem_cgroup_do_precharge(unsigned long count)5030 static int mem_cgroup_do_precharge(unsigned long count)
5031 {
5032 	int ret = 0;
5033 	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5034 	struct mem_cgroup *memcg = mc.to;
5035 
5036 	if (mem_cgroup_is_root(memcg)) {
5037 		mc.precharge += count;
5038 		/* we don't need css_get for root */
5039 		return ret;
5040 	}
5041 	/* try to charge at once */
5042 	if (count > 1) {
5043 		struct res_counter *dummy;
5044 		/*
5045 		 * "memcg" cannot be under rmdir() because we've already checked
5046 		 * by cgroup_lock_live_cgroup() that it is not removed and we
5047 		 * are still under the same cgroup_mutex. So we can postpone
5048 		 * css_get().
5049 		 */
5050 		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5051 			goto one_by_one;
5052 		if (do_swap_account && res_counter_charge(&memcg->memsw,
5053 						PAGE_SIZE * count, &dummy)) {
5054 			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5055 			goto one_by_one;
5056 		}
5057 		mc.precharge += count;
5058 		return ret;
5059 	}
5060 one_by_one:
5061 	/* fall back to one by one charge */
5062 	while (count--) {
5063 		if (signal_pending(current)) {
5064 			ret = -EINTR;
5065 			break;
5066 		}
5067 		if (!batch_count--) {
5068 			batch_count = PRECHARGE_COUNT_AT_ONCE;
5069 			cond_resched();
5070 		}
5071 		ret = __mem_cgroup_try_charge(NULL,
5072 					GFP_KERNEL, 1, &memcg, false);
5073 		if (ret)
5074 			/* mem_cgroup_clear_mc() will do uncharge later */
5075 			return ret;
5076 		mc.precharge++;
5077 	}
5078 	return ret;
5079 }
5080 
5081 /**
5082  * is_target_pte_for_mc - check a pte whether it is valid for move charge
5083  * @vma: the vma the pte to be checked belongs
5084  * @addr: the address corresponding to the pte to be checked
5085  * @ptent: the pte to be checked
5086  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5087  *
5088  * Returns
5089  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5090  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5091  *     move charge. if @target is not NULL, the page is stored in target->page
5092  *     with extra refcnt got(Callers should handle it).
5093  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5094  *     target for charge migration. if @target is not NULL, the entry is stored
5095  *     in target->ent.
5096  *
5097  * Called with pte lock held.
5098  */
5099 union mc_target {
5100 	struct page	*page;
5101 	swp_entry_t	ent;
5102 };
5103 
5104 enum mc_target_type {
5105 	MC_TARGET_NONE,	/* not used */
5106 	MC_TARGET_PAGE,
5107 	MC_TARGET_SWAP,
5108 };
5109 
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5110 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5111 						unsigned long addr, pte_t ptent)
5112 {
5113 	struct page *page = vm_normal_page(vma, addr, ptent);
5114 
5115 	if (!page || !page_mapped(page))
5116 		return NULL;
5117 	if (PageAnon(page)) {
5118 		/* we don't move shared anon */
5119 		if (!move_anon() || page_mapcount(page) > 2)
5120 			return NULL;
5121 	} else if (!move_file())
5122 		/* we ignore mapcount for file pages */
5123 		return NULL;
5124 	if (!get_page_unless_zero(page))
5125 		return NULL;
5126 
5127 	return page;
5128 }
5129 
mc_handle_swap_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)5130 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5131 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5132 {
5133 	int usage_count;
5134 	struct page *page = NULL;
5135 	swp_entry_t ent = pte_to_swp_entry(ptent);
5136 
5137 	if (!move_anon() || non_swap_entry(ent))
5138 		return NULL;
5139 	usage_count = mem_cgroup_count_swap_user(ent, &page);
5140 	if (usage_count > 1) { /* we don't move shared anon */
5141 		if (page)
5142 			put_page(page);
5143 		return NULL;
5144 	}
5145 	if (do_swap_account)
5146 		entry->val = ent.val;
5147 
5148 	return page;
5149 }
5150 
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)5151 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5152 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5153 {
5154 	struct page *page = NULL;
5155 	struct inode *inode;
5156 	struct address_space *mapping;
5157 	pgoff_t pgoff;
5158 
5159 	if (!vma->vm_file) /* anonymous vma */
5160 		return NULL;
5161 	if (!move_file())
5162 		return NULL;
5163 
5164 	inode = vma->vm_file->f_path.dentry->d_inode;
5165 	mapping = vma->vm_file->f_mapping;
5166 	if (pte_none(ptent))
5167 		pgoff = linear_page_index(vma, addr);
5168 	else /* pte_file(ptent) is true */
5169 		pgoff = pte_to_pgoff(ptent);
5170 
5171 	/* page is moved even if it's not RSS of this task(page-faulted). */
5172 	page = find_get_page(mapping, pgoff);
5173 
5174 #ifdef CONFIG_SWAP
5175 	/* shmem/tmpfs may report page out on swap: account for that too. */
5176 	if (radix_tree_exceptional_entry(page)) {
5177 		swp_entry_t swap = radix_to_swp_entry(page);
5178 		if (do_swap_account)
5179 			*entry = swap;
5180 		page = find_get_page(&swapper_space, swap.val);
5181 	}
5182 #endif
5183 	return page;
5184 }
5185 
is_target_pte_for_mc(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)5186 static int is_target_pte_for_mc(struct vm_area_struct *vma,
5187 		unsigned long addr, pte_t ptent, union mc_target *target)
5188 {
5189 	struct page *page = NULL;
5190 	struct page_cgroup *pc;
5191 	int ret = 0;
5192 	swp_entry_t ent = { .val = 0 };
5193 
5194 	if (pte_present(ptent))
5195 		page = mc_handle_present_pte(vma, addr, ptent);
5196 	else if (is_swap_pte(ptent))
5197 		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5198 	else if (pte_none(ptent) || pte_file(ptent))
5199 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5200 
5201 	if (!page && !ent.val)
5202 		return 0;
5203 	if (page) {
5204 		pc = lookup_page_cgroup(page);
5205 		/*
5206 		 * Do only loose check w/o page_cgroup lock.
5207 		 * mem_cgroup_move_account() checks the pc is valid or not under
5208 		 * the lock.
5209 		 */
5210 		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5211 			ret = MC_TARGET_PAGE;
5212 			if (target)
5213 				target->page = page;
5214 		}
5215 		if (!ret || !target)
5216 			put_page(page);
5217 	}
5218 	/* There is a swap entry and a page doesn't exist or isn't charged */
5219 	if (ent.val && !ret &&
5220 			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
5221 		ret = MC_TARGET_SWAP;
5222 		if (target)
5223 			target->ent = ent;
5224 	}
5225 	return ret;
5226 }
5227 
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5228 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5229 					unsigned long addr, unsigned long end,
5230 					struct mm_walk *walk)
5231 {
5232 	struct vm_area_struct *vma = walk->private;
5233 	pte_t *pte;
5234 	spinlock_t *ptl;
5235 
5236 	split_huge_page_pmd(walk->mm, pmd);
5237 
5238 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5239 	for (; addr != end; pte++, addr += PAGE_SIZE)
5240 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5241 			mc.precharge++;	/* increment precharge temporarily */
5242 	pte_unmap_unlock(pte - 1, ptl);
5243 	cond_resched();
5244 
5245 	return 0;
5246 }
5247 
mem_cgroup_count_precharge(struct mm_struct * mm)5248 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5249 {
5250 	unsigned long precharge;
5251 	struct vm_area_struct *vma;
5252 
5253 	down_read(&mm->mmap_sem);
5254 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5255 		struct mm_walk mem_cgroup_count_precharge_walk = {
5256 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
5257 			.mm = mm,
5258 			.private = vma,
5259 		};
5260 		if (is_vm_hugetlb_page(vma))
5261 			continue;
5262 		walk_page_range(vma->vm_start, vma->vm_end,
5263 					&mem_cgroup_count_precharge_walk);
5264 	}
5265 	up_read(&mm->mmap_sem);
5266 
5267 	precharge = mc.precharge;
5268 	mc.precharge = 0;
5269 
5270 	return precharge;
5271 }
5272 
mem_cgroup_precharge_mc(struct mm_struct * mm)5273 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5274 {
5275 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5276 
5277 	VM_BUG_ON(mc.moving_task);
5278 	mc.moving_task = current;
5279 	return mem_cgroup_do_precharge(precharge);
5280 }
5281 
5282 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)5283 static void __mem_cgroup_clear_mc(void)
5284 {
5285 	struct mem_cgroup *from = mc.from;
5286 	struct mem_cgroup *to = mc.to;
5287 
5288 	/* we must uncharge all the leftover precharges from mc.to */
5289 	if (mc.precharge) {
5290 		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
5291 		mc.precharge = 0;
5292 	}
5293 	/*
5294 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5295 	 * we must uncharge here.
5296 	 */
5297 	if (mc.moved_charge) {
5298 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5299 		mc.moved_charge = 0;
5300 	}
5301 	/* we must fixup refcnts and charges */
5302 	if (mc.moved_swap) {
5303 		/* uncharge swap account from the old cgroup */
5304 		if (!mem_cgroup_is_root(mc.from))
5305 			res_counter_uncharge(&mc.from->memsw,
5306 						PAGE_SIZE * mc.moved_swap);
5307 		__mem_cgroup_put(mc.from, mc.moved_swap);
5308 
5309 		if (!mem_cgroup_is_root(mc.to)) {
5310 			/*
5311 			 * we charged both to->res and to->memsw, so we should
5312 			 * uncharge to->res.
5313 			 */
5314 			res_counter_uncharge(&mc.to->res,
5315 						PAGE_SIZE * mc.moved_swap);
5316 		}
5317 		/* we've already done mem_cgroup_get(mc.to) */
5318 		mc.moved_swap = 0;
5319 	}
5320 	memcg_oom_recover(from);
5321 	memcg_oom_recover(to);
5322 	wake_up_all(&mc.waitq);
5323 }
5324 
mem_cgroup_clear_mc(void)5325 static void mem_cgroup_clear_mc(void)
5326 {
5327 	struct mem_cgroup *from = mc.from;
5328 
5329 	/*
5330 	 * we must clear moving_task before waking up waiters at the end of
5331 	 * task migration.
5332 	 */
5333 	mc.moving_task = NULL;
5334 	__mem_cgroup_clear_mc();
5335 	spin_lock(&mc.lock);
5336 	mc.from = NULL;
5337 	mc.to = NULL;
5338 	spin_unlock(&mc.lock);
5339 	mem_cgroup_end_move(from);
5340 }
5341 
mem_cgroup_can_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct cgroup_taskset * tset)5342 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5343 				struct cgroup *cgroup,
5344 				struct cgroup_taskset *tset)
5345 {
5346 	struct task_struct *p = cgroup_taskset_first(tset);
5347 	int ret = 0;
5348 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5349 
5350 	if (memcg->move_charge_at_immigrate) {
5351 		struct mm_struct *mm;
5352 		struct mem_cgroup *from = mem_cgroup_from_task(p);
5353 
5354 		VM_BUG_ON(from == memcg);
5355 
5356 		mm = get_task_mm(p);
5357 		if (!mm)
5358 			return 0;
5359 		/* We move charges only when we move a owner of the mm */
5360 		if (mm->owner == p) {
5361 			VM_BUG_ON(mc.from);
5362 			VM_BUG_ON(mc.to);
5363 			VM_BUG_ON(mc.precharge);
5364 			VM_BUG_ON(mc.moved_charge);
5365 			VM_BUG_ON(mc.moved_swap);
5366 			mem_cgroup_start_move(from);
5367 			spin_lock(&mc.lock);
5368 			mc.from = from;
5369 			mc.to = memcg;
5370 			spin_unlock(&mc.lock);
5371 			/* We set mc.moving_task later */
5372 
5373 			ret = mem_cgroup_precharge_mc(mm);
5374 			if (ret)
5375 				mem_cgroup_clear_mc();
5376 		}
5377 		mmput(mm);
5378 	}
5379 	return ret;
5380 }
5381 
mem_cgroup_cancel_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct cgroup_taskset * tset)5382 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5383 				struct cgroup *cgroup,
5384 				struct cgroup_taskset *tset)
5385 {
5386 	mem_cgroup_clear_mc();
5387 }
5388 
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5389 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5390 				unsigned long addr, unsigned long end,
5391 				struct mm_walk *walk)
5392 {
5393 	int ret = 0;
5394 	struct vm_area_struct *vma = walk->private;
5395 	pte_t *pte;
5396 	spinlock_t *ptl;
5397 
5398 	split_huge_page_pmd(walk->mm, pmd);
5399 retry:
5400 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5401 	for (; addr != end; addr += PAGE_SIZE) {
5402 		pte_t ptent = *(pte++);
5403 		union mc_target target;
5404 		int type;
5405 		struct page *page;
5406 		struct page_cgroup *pc;
5407 		swp_entry_t ent;
5408 
5409 		if (!mc.precharge)
5410 			break;
5411 
5412 		type = is_target_pte_for_mc(vma, addr, ptent, &target);
5413 		switch (type) {
5414 		case MC_TARGET_PAGE:
5415 			page = target.page;
5416 			if (isolate_lru_page(page))
5417 				goto put;
5418 			pc = lookup_page_cgroup(page);
5419 			if (!mem_cgroup_move_account(page, 1, pc,
5420 						     mc.from, mc.to, false)) {
5421 				mc.precharge--;
5422 				/* we uncharge from mc.from later. */
5423 				mc.moved_charge++;
5424 			}
5425 			putback_lru_page(page);
5426 put:			/* is_target_pte_for_mc() gets the page */
5427 			put_page(page);
5428 			break;
5429 		case MC_TARGET_SWAP:
5430 			ent = target.ent;
5431 			if (!mem_cgroup_move_swap_account(ent,
5432 						mc.from, mc.to, false)) {
5433 				mc.precharge--;
5434 				/* we fixup refcnts and charges later. */
5435 				mc.moved_swap++;
5436 			}
5437 			break;
5438 		default:
5439 			break;
5440 		}
5441 	}
5442 	pte_unmap_unlock(pte - 1, ptl);
5443 	cond_resched();
5444 
5445 	if (addr != end) {
5446 		/*
5447 		 * We have consumed all precharges we got in can_attach().
5448 		 * We try charge one by one, but don't do any additional
5449 		 * charges to mc.to if we have failed in charge once in attach()
5450 		 * phase.
5451 		 */
5452 		ret = mem_cgroup_do_precharge(1);
5453 		if (!ret)
5454 			goto retry;
5455 	}
5456 
5457 	return ret;
5458 }
5459 
mem_cgroup_move_charge(struct mm_struct * mm)5460 static void mem_cgroup_move_charge(struct mm_struct *mm)
5461 {
5462 	struct vm_area_struct *vma;
5463 
5464 	lru_add_drain_all();
5465 retry:
5466 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5467 		/*
5468 		 * Someone who are holding the mmap_sem might be waiting in
5469 		 * waitq. So we cancel all extra charges, wake up all waiters,
5470 		 * and retry. Because we cancel precharges, we might not be able
5471 		 * to move enough charges, but moving charge is a best-effort
5472 		 * feature anyway, so it wouldn't be a big problem.
5473 		 */
5474 		__mem_cgroup_clear_mc();
5475 		cond_resched();
5476 		goto retry;
5477 	}
5478 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5479 		int ret;
5480 		struct mm_walk mem_cgroup_move_charge_walk = {
5481 			.pmd_entry = mem_cgroup_move_charge_pte_range,
5482 			.mm = mm,
5483 			.private = vma,
5484 		};
5485 		if (is_vm_hugetlb_page(vma))
5486 			continue;
5487 		ret = walk_page_range(vma->vm_start, vma->vm_end,
5488 						&mem_cgroup_move_charge_walk);
5489 		if (ret)
5490 			/*
5491 			 * means we have consumed all precharges and failed in
5492 			 * doing additional charge. Just abandon here.
5493 			 */
5494 			break;
5495 	}
5496 	up_read(&mm->mmap_sem);
5497 }
5498 
mem_cgroup_move_task(struct cgroup_subsys * ss,struct cgroup * cont,struct cgroup_taskset * tset)5499 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5500 				struct cgroup *cont,
5501 				struct cgroup_taskset *tset)
5502 {
5503 	struct task_struct *p = cgroup_taskset_first(tset);
5504 	struct mm_struct *mm = get_task_mm(p);
5505 
5506 	if (mm) {
5507 		if (mc.to)
5508 			mem_cgroup_move_charge(mm);
5509 		put_swap_token(mm);
5510 		mmput(mm);
5511 	}
5512 	if (mc.to)
5513 		mem_cgroup_clear_mc();
5514 }
5515 #else	/* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct cgroup_taskset * tset)5516 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5517 				struct cgroup *cgroup,
5518 				struct cgroup_taskset *tset)
5519 {
5520 	return 0;
5521 }
mem_cgroup_cancel_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct cgroup_taskset * tset)5522 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5523 				struct cgroup *cgroup,
5524 				struct cgroup_taskset *tset)
5525 {
5526 }
mem_cgroup_move_task(struct cgroup_subsys * ss,struct cgroup * cont,struct cgroup_taskset * tset)5527 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5528 				struct cgroup *cont,
5529 				struct cgroup_taskset *tset)
5530 {
5531 }
5532 #endif
5533 
5534 struct cgroup_subsys mem_cgroup_subsys = {
5535 	.name = "memory",
5536 	.subsys_id = mem_cgroup_subsys_id,
5537 	.create = mem_cgroup_create,
5538 	.pre_destroy = mem_cgroup_pre_destroy,
5539 	.destroy = mem_cgroup_destroy,
5540 	.populate = mem_cgroup_populate,
5541 	.can_attach = mem_cgroup_can_attach,
5542 	.cancel_attach = mem_cgroup_cancel_attach,
5543 	.attach = mem_cgroup_move_task,
5544 	.early_init = 0,
5545 	.use_id = 1,
5546 };
5547 
5548 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
enable_swap_account(char * s)5549 static int __init enable_swap_account(char *s)
5550 {
5551 	/* consider enabled if no parameter or 1 is given */
5552 	if (!strcmp(s, "1"))
5553 		really_do_swap_account = 1;
5554 	else if (!strcmp(s, "0"))
5555 		really_do_swap_account = 0;
5556 	return 1;
5557 }
5558 __setup("swapaccount=", enable_swap_account);
5559 
5560 #endif
5561