1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/namei.h>
43 #include <linux/pagemap.h>
44 #include <linux/proc_fs.h>
45 #include <linux/rcupdate.h>
46 #include <linux/sched.h>
47 #include <linux/seq_file.h>
48 #include <linux/security.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/stat.h>
52 #include <linux/string.h>
53 #include <linux/time.h>
54 #include <linux/backing-dev.h>
55 #include <linux/sort.h>
56 
57 #include <asm/uaccess.h>
58 #include <linux/atomic.h>
59 #include <linux/mutex.h>
60 #include <linux/workqueue.h>
61 #include <linux/cgroup.h>
62 
63 /*
64  * Workqueue for cpuset related tasks.
65  *
66  * Using kevent workqueue may cause deadlock when memory_migrate
67  * is set. So we create a separate workqueue thread for cpuset.
68  */
69 static struct workqueue_struct *cpuset_wq;
70 
71 /*
72  * Tracks how many cpusets are currently defined in system.
73  * When there is only one cpuset (the root cpuset) we can
74  * short circuit some hooks.
75  */
76 int number_of_cpusets __read_mostly;
77 
78 /* Forward declare cgroup structures */
79 struct cgroup_subsys cpuset_subsys;
80 struct cpuset;
81 
82 /* See "Frequency meter" comments, below. */
83 
84 struct fmeter {
85 	int cnt;		/* unprocessed events count */
86 	int val;		/* most recent output value */
87 	time_t time;		/* clock (secs) when val computed */
88 	spinlock_t lock;	/* guards read or write of above */
89 };
90 
91 struct cpuset {
92 	struct cgroup_subsys_state css;
93 
94 	unsigned long flags;		/* "unsigned long" so bitops work */
95 	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
96 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
97 
98 	struct cpuset *parent;		/* my parent */
99 
100 	struct fmeter fmeter;		/* memory_pressure filter */
101 
102 	/* partition number for rebuild_sched_domains() */
103 	int pn;
104 
105 	/* for custom sched domain */
106 	int relax_domain_level;
107 
108 	/* used for walking a cpuset hierarchy */
109 	struct list_head stack_list;
110 };
111 
112 /* Retrieve the cpuset for a cgroup */
cgroup_cs(struct cgroup * cont)113 static inline struct cpuset *cgroup_cs(struct cgroup *cont)
114 {
115 	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
116 			    struct cpuset, css);
117 }
118 
119 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)120 static inline struct cpuset *task_cs(struct task_struct *task)
121 {
122 	return container_of(task_subsys_state(task, cpuset_subsys_id),
123 			    struct cpuset, css);
124 }
125 
126 #ifdef CONFIG_NUMA
task_has_mempolicy(struct task_struct * task)127 static inline bool task_has_mempolicy(struct task_struct *task)
128 {
129 	return task->mempolicy;
130 }
131 #else
task_has_mempolicy(struct task_struct * task)132 static inline bool task_has_mempolicy(struct task_struct *task)
133 {
134 	return false;
135 }
136 #endif
137 
138 
139 /* bits in struct cpuset flags field */
140 typedef enum {
141 	CS_CPU_EXCLUSIVE,
142 	CS_MEM_EXCLUSIVE,
143 	CS_MEM_HARDWALL,
144 	CS_MEMORY_MIGRATE,
145 	CS_SCHED_LOAD_BALANCE,
146 	CS_SPREAD_PAGE,
147 	CS_SPREAD_SLAB,
148 } cpuset_flagbits_t;
149 
150 /* convenient tests for these bits */
is_cpu_exclusive(const struct cpuset * cs)151 static inline int is_cpu_exclusive(const struct cpuset *cs)
152 {
153 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
154 }
155 
is_mem_exclusive(const struct cpuset * cs)156 static inline int is_mem_exclusive(const struct cpuset *cs)
157 {
158 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
159 }
160 
is_mem_hardwall(const struct cpuset * cs)161 static inline int is_mem_hardwall(const struct cpuset *cs)
162 {
163 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
164 }
165 
is_sched_load_balance(const struct cpuset * cs)166 static inline int is_sched_load_balance(const struct cpuset *cs)
167 {
168 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
169 }
170 
is_memory_migrate(const struct cpuset * cs)171 static inline int is_memory_migrate(const struct cpuset *cs)
172 {
173 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
174 }
175 
is_spread_page(const struct cpuset * cs)176 static inline int is_spread_page(const struct cpuset *cs)
177 {
178 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
179 }
180 
is_spread_slab(const struct cpuset * cs)181 static inline int is_spread_slab(const struct cpuset *cs)
182 {
183 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
184 }
185 
186 static struct cpuset top_cpuset = {
187 	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
188 };
189 
190 /*
191  * There are two global mutexes guarding cpuset structures.  The first
192  * is the main control groups cgroup_mutex, accessed via
193  * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
194  * callback_mutex, below. They can nest.  It is ok to first take
195  * cgroup_mutex, then nest callback_mutex.  We also require taking
196  * task_lock() when dereferencing a task's cpuset pointer.  See "The
197  * task_lock() exception", at the end of this comment.
198  *
199  * A task must hold both mutexes to modify cpusets.  If a task
200  * holds cgroup_mutex, then it blocks others wanting that mutex,
201  * ensuring that it is the only task able to also acquire callback_mutex
202  * and be able to modify cpusets.  It can perform various checks on
203  * the cpuset structure first, knowing nothing will change.  It can
204  * also allocate memory while just holding cgroup_mutex.  While it is
205  * performing these checks, various callback routines can briefly
206  * acquire callback_mutex to query cpusets.  Once it is ready to make
207  * the changes, it takes callback_mutex, blocking everyone else.
208  *
209  * Calls to the kernel memory allocator can not be made while holding
210  * callback_mutex, as that would risk double tripping on callback_mutex
211  * from one of the callbacks into the cpuset code from within
212  * __alloc_pages().
213  *
214  * If a task is only holding callback_mutex, then it has read-only
215  * access to cpusets.
216  *
217  * Now, the task_struct fields mems_allowed and mempolicy may be changed
218  * by other task, we use alloc_lock in the task_struct fields to protect
219  * them.
220  *
221  * The cpuset_common_file_read() handlers only hold callback_mutex across
222  * small pieces of code, such as when reading out possibly multi-word
223  * cpumasks and nodemasks.
224  *
225  * Accessing a task's cpuset should be done in accordance with the
226  * guidelines for accessing subsystem state in kernel/cgroup.c
227  */
228 
229 static DEFINE_MUTEX(callback_mutex);
230 
231 /*
232  * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
233  * buffers.  They are statically allocated to prevent using excess stack
234  * when calling cpuset_print_task_mems_allowed().
235  */
236 #define CPUSET_NAME_LEN		(128)
237 #define	CPUSET_NODELIST_LEN	(256)
238 static char cpuset_name[CPUSET_NAME_LEN];
239 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
240 static DEFINE_SPINLOCK(cpuset_buffer_lock);
241 
242 /*
243  * This is ugly, but preserves the userspace API for existing cpuset
244  * users. If someone tries to mount the "cpuset" filesystem, we
245  * silently switch it to mount "cgroup" instead
246  */
cpuset_mount(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data)247 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
248 			 int flags, const char *unused_dev_name, void *data)
249 {
250 	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
251 	struct dentry *ret = ERR_PTR(-ENODEV);
252 	if (cgroup_fs) {
253 		char mountopts[] =
254 			"cpuset,noprefix,"
255 			"release_agent=/sbin/cpuset_release_agent";
256 		ret = cgroup_fs->mount(cgroup_fs, flags,
257 					   unused_dev_name, mountopts);
258 		put_filesystem(cgroup_fs);
259 	}
260 	return ret;
261 }
262 
263 static struct file_system_type cpuset_fs_type = {
264 	.name = "cpuset",
265 	.mount = cpuset_mount,
266 };
267 
268 /*
269  * Return in pmask the portion of a cpusets's cpus_allowed that
270  * are online.  If none are online, walk up the cpuset hierarchy
271  * until we find one that does have some online cpus.  If we get
272  * all the way to the top and still haven't found any online cpus,
273  * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
274  * task, return cpu_online_map.
275  *
276  * One way or another, we guarantee to return some non-empty subset
277  * of cpu_online_map.
278  *
279  * Call with callback_mutex held.
280  */
281 
guarantee_online_cpus(const struct cpuset * cs,struct cpumask * pmask)282 static void guarantee_online_cpus(const struct cpuset *cs,
283 				  struct cpumask *pmask)
284 {
285 	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
286 		cs = cs->parent;
287 	if (cs)
288 		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
289 	else
290 		cpumask_copy(pmask, cpu_online_mask);
291 	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
292 }
293 
294 /*
295  * Return in *pmask the portion of a cpusets's mems_allowed that
296  * are online, with memory.  If none are online with memory, walk
297  * up the cpuset hierarchy until we find one that does have some
298  * online mems.  If we get all the way to the top and still haven't
299  * found any online mems, return node_states[N_HIGH_MEMORY].
300  *
301  * One way or another, we guarantee to return some non-empty subset
302  * of node_states[N_HIGH_MEMORY].
303  *
304  * Call with callback_mutex held.
305  */
306 
guarantee_online_mems(const struct cpuset * cs,nodemask_t * pmask)307 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
308 {
309 	while (cs && !nodes_intersects(cs->mems_allowed,
310 					node_states[N_HIGH_MEMORY]))
311 		cs = cs->parent;
312 	if (cs)
313 		nodes_and(*pmask, cs->mems_allowed,
314 					node_states[N_HIGH_MEMORY]);
315 	else
316 		*pmask = node_states[N_HIGH_MEMORY];
317 	BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
318 }
319 
320 /*
321  * update task's spread flag if cpuset's page/slab spread flag is set
322  *
323  * Called with callback_mutex/cgroup_mutex held
324  */
cpuset_update_task_spread_flag(struct cpuset * cs,struct task_struct * tsk)325 static void cpuset_update_task_spread_flag(struct cpuset *cs,
326 					struct task_struct *tsk)
327 {
328 	if (is_spread_page(cs))
329 		tsk->flags |= PF_SPREAD_PAGE;
330 	else
331 		tsk->flags &= ~PF_SPREAD_PAGE;
332 	if (is_spread_slab(cs))
333 		tsk->flags |= PF_SPREAD_SLAB;
334 	else
335 		tsk->flags &= ~PF_SPREAD_SLAB;
336 }
337 
338 /*
339  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
340  *
341  * One cpuset is a subset of another if all its allowed CPUs and
342  * Memory Nodes are a subset of the other, and its exclusive flags
343  * are only set if the other's are set.  Call holding cgroup_mutex.
344  */
345 
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)346 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
347 {
348 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
349 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
350 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
351 		is_mem_exclusive(p) <= is_mem_exclusive(q);
352 }
353 
354 /**
355  * alloc_trial_cpuset - allocate a trial cpuset
356  * @cs: the cpuset that the trial cpuset duplicates
357  */
alloc_trial_cpuset(const struct cpuset * cs)358 static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
359 {
360 	struct cpuset *trial;
361 
362 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
363 	if (!trial)
364 		return NULL;
365 
366 	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
367 		kfree(trial);
368 		return NULL;
369 	}
370 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
371 
372 	return trial;
373 }
374 
375 /**
376  * free_trial_cpuset - free the trial cpuset
377  * @trial: the trial cpuset to be freed
378  */
free_trial_cpuset(struct cpuset * trial)379 static void free_trial_cpuset(struct cpuset *trial)
380 {
381 	free_cpumask_var(trial->cpus_allowed);
382 	kfree(trial);
383 }
384 
385 /*
386  * validate_change() - Used to validate that any proposed cpuset change
387  *		       follows the structural rules for cpusets.
388  *
389  * If we replaced the flag and mask values of the current cpuset
390  * (cur) with those values in the trial cpuset (trial), would
391  * our various subset and exclusive rules still be valid?  Presumes
392  * cgroup_mutex held.
393  *
394  * 'cur' is the address of an actual, in-use cpuset.  Operations
395  * such as list traversal that depend on the actual address of the
396  * cpuset in the list must use cur below, not trial.
397  *
398  * 'trial' is the address of bulk structure copy of cur, with
399  * perhaps one or more of the fields cpus_allowed, mems_allowed,
400  * or flags changed to new, trial values.
401  *
402  * Return 0 if valid, -errno if not.
403  */
404 
validate_change(const struct cpuset * cur,const struct cpuset * trial)405 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
406 {
407 	struct cgroup *cont;
408 	struct cpuset *c, *par;
409 
410 	/* Each of our child cpusets must be a subset of us */
411 	list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
412 		if (!is_cpuset_subset(cgroup_cs(cont), trial))
413 			return -EBUSY;
414 	}
415 
416 	/* Remaining checks don't apply to root cpuset */
417 	if (cur == &top_cpuset)
418 		return 0;
419 
420 	par = cur->parent;
421 
422 	/* We must be a subset of our parent cpuset */
423 	if (!is_cpuset_subset(trial, par))
424 		return -EACCES;
425 
426 	/*
427 	 * If either I or some sibling (!= me) is exclusive, we can't
428 	 * overlap
429 	 */
430 	list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
431 		c = cgroup_cs(cont);
432 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
433 		    c != cur &&
434 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
435 			return -EINVAL;
436 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
437 		    c != cur &&
438 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
439 			return -EINVAL;
440 	}
441 
442 	/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
443 	if (cgroup_task_count(cur->css.cgroup)) {
444 		if (cpumask_empty(trial->cpus_allowed) ||
445 		    nodes_empty(trial->mems_allowed)) {
446 			return -ENOSPC;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
453 #ifdef CONFIG_SMP
454 /*
455  * Helper routine for generate_sched_domains().
456  * Do cpusets a, b have overlapping cpus_allowed masks?
457  */
cpusets_overlap(struct cpuset * a,struct cpuset * b)458 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
459 {
460 	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
461 }
462 
463 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)464 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
465 {
466 	if (dattr->relax_domain_level < c->relax_domain_level)
467 		dattr->relax_domain_level = c->relax_domain_level;
468 	return;
469 }
470 
471 static void
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * c)472 update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
473 {
474 	LIST_HEAD(q);
475 
476 	list_add(&c->stack_list, &q);
477 	while (!list_empty(&q)) {
478 		struct cpuset *cp;
479 		struct cgroup *cont;
480 		struct cpuset *child;
481 
482 		cp = list_first_entry(&q, struct cpuset, stack_list);
483 		list_del(q.next);
484 
485 		if (cpumask_empty(cp->cpus_allowed))
486 			continue;
487 
488 		if (is_sched_load_balance(cp))
489 			update_domain_attr(dattr, cp);
490 
491 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
492 			child = cgroup_cs(cont);
493 			list_add_tail(&child->stack_list, &q);
494 		}
495 	}
496 }
497 
498 /*
499  * generate_sched_domains()
500  *
501  * This function builds a partial partition of the systems CPUs
502  * A 'partial partition' is a set of non-overlapping subsets whose
503  * union is a subset of that set.
504  * The output of this function needs to be passed to kernel/sched.c
505  * partition_sched_domains() routine, which will rebuild the scheduler's
506  * load balancing domains (sched domains) as specified by that partial
507  * partition.
508  *
509  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
510  * for a background explanation of this.
511  *
512  * Does not return errors, on the theory that the callers of this
513  * routine would rather not worry about failures to rebuild sched
514  * domains when operating in the severe memory shortage situations
515  * that could cause allocation failures below.
516  *
517  * Must be called with cgroup_lock held.
518  *
519  * The three key local variables below are:
520  *    q  - a linked-list queue of cpuset pointers, used to implement a
521  *	   top-down scan of all cpusets.  This scan loads a pointer
522  *	   to each cpuset marked is_sched_load_balance into the
523  *	   array 'csa'.  For our purposes, rebuilding the schedulers
524  *	   sched domains, we can ignore !is_sched_load_balance cpusets.
525  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
526  *	   that need to be load balanced, for convenient iterative
527  *	   access by the subsequent code that finds the best partition,
528  *	   i.e the set of domains (subsets) of CPUs such that the
529  *	   cpus_allowed of every cpuset marked is_sched_load_balance
530  *	   is a subset of one of these domains, while there are as
531  *	   many such domains as possible, each as small as possible.
532  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
533  *	   the kernel/sched.c routine partition_sched_domains() in a
534  *	   convenient format, that can be easily compared to the prior
535  *	   value to determine what partition elements (sched domains)
536  *	   were changed (added or removed.)
537  *
538  * Finding the best partition (set of domains):
539  *	The triple nested loops below over i, j, k scan over the
540  *	load balanced cpusets (using the array of cpuset pointers in
541  *	csa[]) looking for pairs of cpusets that have overlapping
542  *	cpus_allowed, but which don't have the same 'pn' partition
543  *	number and gives them in the same partition number.  It keeps
544  *	looping on the 'restart' label until it can no longer find
545  *	any such pairs.
546  *
547  *	The union of the cpus_allowed masks from the set of
548  *	all cpusets having the same 'pn' value then form the one
549  *	element of the partition (one sched domain) to be passed to
550  *	partition_sched_domains().
551  */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)552 static int generate_sched_domains(cpumask_var_t **domains,
553 			struct sched_domain_attr **attributes)
554 {
555 	LIST_HEAD(q);		/* queue of cpusets to be scanned */
556 	struct cpuset *cp;	/* scans q */
557 	struct cpuset **csa;	/* array of all cpuset ptrs */
558 	int csn;		/* how many cpuset ptrs in csa so far */
559 	int i, j, k;		/* indices for partition finding loops */
560 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
561 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
562 	int ndoms = 0;		/* number of sched domains in result */
563 	int nslot;		/* next empty doms[] struct cpumask slot */
564 
565 	doms = NULL;
566 	dattr = NULL;
567 	csa = NULL;
568 
569 	/* Special case for the 99% of systems with one, full, sched domain */
570 	if (is_sched_load_balance(&top_cpuset)) {
571 		ndoms = 1;
572 		doms = alloc_sched_domains(ndoms);
573 		if (!doms)
574 			goto done;
575 
576 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
577 		if (dattr) {
578 			*dattr = SD_ATTR_INIT;
579 			update_domain_attr_tree(dattr, &top_cpuset);
580 		}
581 		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
582 
583 		goto done;
584 	}
585 
586 	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
587 	if (!csa)
588 		goto done;
589 	csn = 0;
590 
591 	list_add(&top_cpuset.stack_list, &q);
592 	while (!list_empty(&q)) {
593 		struct cgroup *cont;
594 		struct cpuset *child;   /* scans child cpusets of cp */
595 
596 		cp = list_first_entry(&q, struct cpuset, stack_list);
597 		list_del(q.next);
598 
599 		if (cpumask_empty(cp->cpus_allowed))
600 			continue;
601 
602 		/*
603 		 * All child cpusets contain a subset of the parent's cpus, so
604 		 * just skip them, and then we call update_domain_attr_tree()
605 		 * to calc relax_domain_level of the corresponding sched
606 		 * domain.
607 		 */
608 		if (is_sched_load_balance(cp)) {
609 			csa[csn++] = cp;
610 			continue;
611 		}
612 
613 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
614 			child = cgroup_cs(cont);
615 			list_add_tail(&child->stack_list, &q);
616 		}
617   	}
618 
619 	for (i = 0; i < csn; i++)
620 		csa[i]->pn = i;
621 	ndoms = csn;
622 
623 restart:
624 	/* Find the best partition (set of sched domains) */
625 	for (i = 0; i < csn; i++) {
626 		struct cpuset *a = csa[i];
627 		int apn = a->pn;
628 
629 		for (j = 0; j < csn; j++) {
630 			struct cpuset *b = csa[j];
631 			int bpn = b->pn;
632 
633 			if (apn != bpn && cpusets_overlap(a, b)) {
634 				for (k = 0; k < csn; k++) {
635 					struct cpuset *c = csa[k];
636 
637 					if (c->pn == bpn)
638 						c->pn = apn;
639 				}
640 				ndoms--;	/* one less element */
641 				goto restart;
642 			}
643 		}
644 	}
645 
646 	/*
647 	 * Now we know how many domains to create.
648 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
649 	 */
650 	doms = alloc_sched_domains(ndoms);
651 	if (!doms)
652 		goto done;
653 
654 	/*
655 	 * The rest of the code, including the scheduler, can deal with
656 	 * dattr==NULL case. No need to abort if alloc fails.
657 	 */
658 	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
659 
660 	for (nslot = 0, i = 0; i < csn; i++) {
661 		struct cpuset *a = csa[i];
662 		struct cpumask *dp;
663 		int apn = a->pn;
664 
665 		if (apn < 0) {
666 			/* Skip completed partitions */
667 			continue;
668 		}
669 
670 		dp = doms[nslot];
671 
672 		if (nslot == ndoms) {
673 			static int warnings = 10;
674 			if (warnings) {
675 				printk(KERN_WARNING
676 				 "rebuild_sched_domains confused:"
677 				  " nslot %d, ndoms %d, csn %d, i %d,"
678 				  " apn %d\n",
679 				  nslot, ndoms, csn, i, apn);
680 				warnings--;
681 			}
682 			continue;
683 		}
684 
685 		cpumask_clear(dp);
686 		if (dattr)
687 			*(dattr + nslot) = SD_ATTR_INIT;
688 		for (j = i; j < csn; j++) {
689 			struct cpuset *b = csa[j];
690 
691 			if (apn == b->pn) {
692 				cpumask_or(dp, dp, b->cpus_allowed);
693 				if (dattr)
694 					update_domain_attr_tree(dattr + nslot, b);
695 
696 				/* Done with this partition */
697 				b->pn = -1;
698 			}
699 		}
700 		nslot++;
701 	}
702 	BUG_ON(nslot != ndoms);
703 
704 done:
705 	kfree(csa);
706 
707 	/*
708 	 * Fallback to the default domain if kmalloc() failed.
709 	 * See comments in partition_sched_domains().
710 	 */
711 	if (doms == NULL)
712 		ndoms = 1;
713 
714 	*domains    = doms;
715 	*attributes = dattr;
716 	return ndoms;
717 }
718 
719 /*
720  * Rebuild scheduler domains.
721  *
722  * Call with neither cgroup_mutex held nor within get_online_cpus().
723  * Takes both cgroup_mutex and get_online_cpus().
724  *
725  * Cannot be directly called from cpuset code handling changes
726  * to the cpuset pseudo-filesystem, because it cannot be called
727  * from code that already holds cgroup_mutex.
728  */
do_rebuild_sched_domains(struct work_struct * unused)729 static void do_rebuild_sched_domains(struct work_struct *unused)
730 {
731 	struct sched_domain_attr *attr;
732 	cpumask_var_t *doms;
733 	int ndoms;
734 
735 	get_online_cpus();
736 
737 	/* Generate domain masks and attrs */
738 	cgroup_lock();
739 	ndoms = generate_sched_domains(&doms, &attr);
740 	cgroup_unlock();
741 
742 	/* Have scheduler rebuild the domains */
743 	partition_sched_domains(ndoms, doms, attr);
744 
745 	put_online_cpus();
746 }
747 #else /* !CONFIG_SMP */
do_rebuild_sched_domains(struct work_struct * unused)748 static void do_rebuild_sched_domains(struct work_struct *unused)
749 {
750 }
751 
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)752 static int generate_sched_domains(cpumask_var_t **domains,
753 			struct sched_domain_attr **attributes)
754 {
755 	*domains = NULL;
756 	return 1;
757 }
758 #endif /* CONFIG_SMP */
759 
760 static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
761 
762 /*
763  * Rebuild scheduler domains, asynchronously via workqueue.
764  *
765  * If the flag 'sched_load_balance' of any cpuset with non-empty
766  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
767  * which has that flag enabled, or if any cpuset with a non-empty
768  * 'cpus' is removed, then call this routine to rebuild the
769  * scheduler's dynamic sched domains.
770  *
771  * The rebuild_sched_domains() and partition_sched_domains()
772  * routines must nest cgroup_lock() inside get_online_cpus(),
773  * but such cpuset changes as these must nest that locking the
774  * other way, holding cgroup_lock() for much of the code.
775  *
776  * So in order to avoid an ABBA deadlock, the cpuset code handling
777  * these user changes delegates the actual sched domain rebuilding
778  * to a separate workqueue thread, which ends up processing the
779  * above do_rebuild_sched_domains() function.
780  */
async_rebuild_sched_domains(void)781 static void async_rebuild_sched_domains(void)
782 {
783 	queue_work(cpuset_wq, &rebuild_sched_domains_work);
784 }
785 
786 /*
787  * Accomplishes the same scheduler domain rebuild as the above
788  * async_rebuild_sched_domains(), however it directly calls the
789  * rebuild routine synchronously rather than calling it via an
790  * asynchronous work thread.
791  *
792  * This can only be called from code that is not holding
793  * cgroup_mutex (not nested in a cgroup_lock() call.)
794  */
rebuild_sched_domains(void)795 void rebuild_sched_domains(void)
796 {
797 	do_rebuild_sched_domains(NULL);
798 }
799 
800 /**
801  * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
802  * @tsk: task to test
803  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
804  *
805  * Call with cgroup_mutex held.  May take callback_mutex during call.
806  * Called for each task in a cgroup by cgroup_scan_tasks().
807  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
808  * words, if its mask is not equal to its cpuset's mask).
809  */
cpuset_test_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)810 static int cpuset_test_cpumask(struct task_struct *tsk,
811 			       struct cgroup_scanner *scan)
812 {
813 	return !cpumask_equal(&tsk->cpus_allowed,
814 			(cgroup_cs(scan->cg))->cpus_allowed);
815 }
816 
817 /**
818  * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
819  * @tsk: task to test
820  * @scan: struct cgroup_scanner containing the cgroup of the task
821  *
822  * Called by cgroup_scan_tasks() for each task in a cgroup whose
823  * cpus_allowed mask needs to be changed.
824  *
825  * We don't need to re-check for the cgroup/cpuset membership, since we're
826  * holding cgroup_lock() at this point.
827  */
cpuset_change_cpumask(struct task_struct * tsk,struct cgroup_scanner * scan)828 static void cpuset_change_cpumask(struct task_struct *tsk,
829 				  struct cgroup_scanner *scan)
830 {
831 	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
832 }
833 
834 /**
835  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
836  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
837  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
838  *
839  * Called with cgroup_mutex held
840  *
841  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
842  * calling callback functions for each.
843  *
844  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
845  * if @heap != NULL.
846  */
update_tasks_cpumask(struct cpuset * cs,struct ptr_heap * heap)847 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
848 {
849 	struct cgroup_scanner scan;
850 
851 	scan.cg = cs->css.cgroup;
852 	scan.test_task = cpuset_test_cpumask;
853 	scan.process_task = cpuset_change_cpumask;
854 	scan.heap = heap;
855 	cgroup_scan_tasks(&scan);
856 }
857 
858 /**
859  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
860  * @cs: the cpuset to consider
861  * @buf: buffer of cpu numbers written to this cpuset
862  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)863 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
864 			  const char *buf)
865 {
866 	struct ptr_heap heap;
867 	int retval;
868 	int is_load_balanced;
869 
870 	/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
871 	if (cs == &top_cpuset)
872 		return -EACCES;
873 
874 	/*
875 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
876 	 * Since cpulist_parse() fails on an empty mask, we special case
877 	 * that parsing.  The validate_change() call ensures that cpusets
878 	 * with tasks have cpus.
879 	 */
880 	if (!*buf) {
881 		cpumask_clear(trialcs->cpus_allowed);
882 	} else {
883 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
884 		if (retval < 0)
885 			return retval;
886 
887 		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
888 			return -EINVAL;
889 	}
890 	retval = validate_change(cs, trialcs);
891 	if (retval < 0)
892 		return retval;
893 
894 	/* Nothing to do if the cpus didn't change */
895 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
896 		return 0;
897 
898 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
899 	if (retval)
900 		return retval;
901 
902 	is_load_balanced = is_sched_load_balance(trialcs);
903 
904 	mutex_lock(&callback_mutex);
905 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
906 	mutex_unlock(&callback_mutex);
907 
908 	/*
909 	 * Scan tasks in the cpuset, and update the cpumasks of any
910 	 * that need an update.
911 	 */
912 	update_tasks_cpumask(cs, &heap);
913 
914 	heap_free(&heap);
915 
916 	if (is_load_balanced)
917 		async_rebuild_sched_domains();
918 	return 0;
919 }
920 
921 /*
922  * cpuset_migrate_mm
923  *
924  *    Migrate memory region from one set of nodes to another.
925  *
926  *    Temporarilly set tasks mems_allowed to target nodes of migration,
927  *    so that the migration code can allocate pages on these nodes.
928  *
929  *    Call holding cgroup_mutex, so current's cpuset won't change
930  *    during this call, as manage_mutex holds off any cpuset_attach()
931  *    calls.  Therefore we don't need to take task_lock around the
932  *    call to guarantee_online_mems(), as we know no one is changing
933  *    our task's cpuset.
934  *
935  *    While the mm_struct we are migrating is typically from some
936  *    other task, the task_struct mems_allowed that we are hacking
937  *    is for our current task, which must allocate new pages for that
938  *    migrating memory region.
939  */
940 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)941 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
942 							const nodemask_t *to)
943 {
944 	struct task_struct *tsk = current;
945 
946 	tsk->mems_allowed = *to;
947 
948 	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
949 
950 	guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
951 }
952 
953 /*
954  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
955  * @tsk: the task to change
956  * @newmems: new nodes that the task will be set
957  *
958  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
959  * we structure updates as setting all new allowed nodes, then clearing newly
960  * disallowed ones.
961  */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)962 static void cpuset_change_task_nodemask(struct task_struct *tsk,
963 					nodemask_t *newmems)
964 {
965 	bool need_loop;
966 
967 repeat:
968 	/*
969 	 * Allow tasks that have access to memory reserves because they have
970 	 * been OOM killed to get memory anywhere.
971 	 */
972 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
973 		return;
974 	if (current->flags & PF_EXITING) /* Let dying task have memory */
975 		return;
976 
977 	task_lock(tsk);
978 	/*
979 	 * Determine if a loop is necessary if another thread is doing
980 	 * get_mems_allowed().  If at least one node remains unchanged and
981 	 * tsk does not have a mempolicy, then an empty nodemask will not be
982 	 * possible when mems_allowed is larger than a word.
983 	 */
984 	need_loop = task_has_mempolicy(tsk) ||
985 			!nodes_intersects(*newmems, tsk->mems_allowed);
986 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
987 	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
988 
989 	/*
990 	 * ensure checking ->mems_allowed_change_disable after setting all new
991 	 * allowed nodes.
992 	 *
993 	 * the read-side task can see an nodemask with new allowed nodes and
994 	 * old allowed nodes. and if it allocates page when cpuset clears newly
995 	 * disallowed ones continuous, it can see the new allowed bits.
996 	 *
997 	 * And if setting all new allowed nodes is after the checking, setting
998 	 * all new allowed nodes and clearing newly disallowed ones will be done
999 	 * continuous, and the read-side task may find no node to alloc page.
1000 	 */
1001 	smp_mb();
1002 
1003 	/*
1004 	 * Allocation of memory is very fast, we needn't sleep when waiting
1005 	 * for the read-side.
1006 	 */
1007 	while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
1008 		task_unlock(tsk);
1009 		if (!task_curr(tsk))
1010 			yield();
1011 		goto repeat;
1012 	}
1013 
1014 	/*
1015 	 * ensure checking ->mems_allowed_change_disable before clearing all new
1016 	 * disallowed nodes.
1017 	 *
1018 	 * if clearing newly disallowed bits before the checking, the read-side
1019 	 * task may find no node to alloc page.
1020 	 */
1021 	smp_mb();
1022 
1023 	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1024 	tsk->mems_allowed = *newmems;
1025 	task_unlock(tsk);
1026 }
1027 
1028 /*
1029  * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1030  * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1031  * memory_migrate flag is set. Called with cgroup_mutex held.
1032  */
cpuset_change_nodemask(struct task_struct * p,struct cgroup_scanner * scan)1033 static void cpuset_change_nodemask(struct task_struct *p,
1034 				   struct cgroup_scanner *scan)
1035 {
1036 	struct mm_struct *mm;
1037 	struct cpuset *cs;
1038 	int migrate;
1039 	const nodemask_t *oldmem = scan->data;
1040 	static nodemask_t newmems;	/* protected by cgroup_mutex */
1041 
1042 	cs = cgroup_cs(scan->cg);
1043 	guarantee_online_mems(cs, &newmems);
1044 
1045 	cpuset_change_task_nodemask(p, &newmems);
1046 
1047 	mm = get_task_mm(p);
1048 	if (!mm)
1049 		return;
1050 
1051 	migrate = is_memory_migrate(cs);
1052 
1053 	mpol_rebind_mm(mm, &cs->mems_allowed);
1054 	if (migrate)
1055 		cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1056 	mmput(mm);
1057 }
1058 
1059 static void *cpuset_being_rebound;
1060 
1061 /**
1062  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1063  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1064  * @oldmem: old mems_allowed of cpuset cs
1065  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1066  *
1067  * Called with cgroup_mutex held
1068  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1069  * if @heap != NULL.
1070  */
update_tasks_nodemask(struct cpuset * cs,const nodemask_t * oldmem,struct ptr_heap * heap)1071 static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1072 				 struct ptr_heap *heap)
1073 {
1074 	struct cgroup_scanner scan;
1075 
1076 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1077 
1078 	scan.cg = cs->css.cgroup;
1079 	scan.test_task = NULL;
1080 	scan.process_task = cpuset_change_nodemask;
1081 	scan.heap = heap;
1082 	scan.data = (nodemask_t *)oldmem;
1083 
1084 	/*
1085 	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1086 	 * take while holding tasklist_lock.  Forks can happen - the
1087 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1088 	 * and rebind their vma mempolicies too.  Because we still hold
1089 	 * the global cgroup_mutex, we know that no other rebind effort
1090 	 * will be contending for the global variable cpuset_being_rebound.
1091 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1092 	 * is idempotent.  Also migrate pages in each mm to new nodes.
1093 	 */
1094 	cgroup_scan_tasks(&scan);
1095 
1096 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1097 	cpuset_being_rebound = NULL;
1098 }
1099 
1100 /*
1101  * Handle user request to change the 'mems' memory placement
1102  * of a cpuset.  Needs to validate the request, update the
1103  * cpusets mems_allowed, and for each task in the cpuset,
1104  * update mems_allowed and rebind task's mempolicy and any vma
1105  * mempolicies and if the cpuset is marked 'memory_migrate',
1106  * migrate the tasks pages to the new memory.
1107  *
1108  * Call with cgroup_mutex held.  May take callback_mutex during call.
1109  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1110  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1111  * their mempolicies to the cpusets new mems_allowed.
1112  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1113 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1114 			   const char *buf)
1115 {
1116 	NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1117 	int retval;
1118 	struct ptr_heap heap;
1119 
1120 	if (!oldmem)
1121 		return -ENOMEM;
1122 
1123 	/*
1124 	 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1125 	 * it's read-only
1126 	 */
1127 	if (cs == &top_cpuset) {
1128 		retval = -EACCES;
1129 		goto done;
1130 	}
1131 
1132 	/*
1133 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1134 	 * Since nodelist_parse() fails on an empty mask, we special case
1135 	 * that parsing.  The validate_change() call ensures that cpusets
1136 	 * with tasks have memory.
1137 	 */
1138 	if (!*buf) {
1139 		nodes_clear(trialcs->mems_allowed);
1140 	} else {
1141 		retval = nodelist_parse(buf, trialcs->mems_allowed);
1142 		if (retval < 0)
1143 			goto done;
1144 
1145 		if (!nodes_subset(trialcs->mems_allowed,
1146 				node_states[N_HIGH_MEMORY])) {
1147 			retval =  -EINVAL;
1148 			goto done;
1149 		}
1150 	}
1151 	*oldmem = cs->mems_allowed;
1152 	if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1153 		retval = 0;		/* Too easy - nothing to do */
1154 		goto done;
1155 	}
1156 	retval = validate_change(cs, trialcs);
1157 	if (retval < 0)
1158 		goto done;
1159 
1160 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1161 	if (retval < 0)
1162 		goto done;
1163 
1164 	mutex_lock(&callback_mutex);
1165 	cs->mems_allowed = trialcs->mems_allowed;
1166 	mutex_unlock(&callback_mutex);
1167 
1168 	update_tasks_nodemask(cs, oldmem, &heap);
1169 
1170 	heap_free(&heap);
1171 done:
1172 	NODEMASK_FREE(oldmem);
1173 	return retval;
1174 }
1175 
current_cpuset_is_being_rebound(void)1176 int current_cpuset_is_being_rebound(void)
1177 {
1178 	return task_cs(current) == cpuset_being_rebound;
1179 }
1180 
update_relax_domain_level(struct cpuset * cs,s64 val)1181 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1182 {
1183 #ifdef CONFIG_SMP
1184 	if (val < -1 || val >= sched_domain_level_max)
1185 		return -EINVAL;
1186 #endif
1187 
1188 	if (val != cs->relax_domain_level) {
1189 		cs->relax_domain_level = val;
1190 		if (!cpumask_empty(cs->cpus_allowed) &&
1191 		    is_sched_load_balance(cs))
1192 			async_rebuild_sched_domains();
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 /*
1199  * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1200  * @tsk: task to be updated
1201  * @scan: struct cgroup_scanner containing the cgroup of the task
1202  *
1203  * Called by cgroup_scan_tasks() for each task in a cgroup.
1204  *
1205  * We don't need to re-check for the cgroup/cpuset membership, since we're
1206  * holding cgroup_lock() at this point.
1207  */
cpuset_change_flag(struct task_struct * tsk,struct cgroup_scanner * scan)1208 static void cpuset_change_flag(struct task_struct *tsk,
1209 				struct cgroup_scanner *scan)
1210 {
1211 	cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1212 }
1213 
1214 /*
1215  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1216  * @cs: the cpuset in which each task's spread flags needs to be changed
1217  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1218  *
1219  * Called with cgroup_mutex held
1220  *
1221  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1222  * calling callback functions for each.
1223  *
1224  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1225  * if @heap != NULL.
1226  */
update_tasks_flags(struct cpuset * cs,struct ptr_heap * heap)1227 static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1228 {
1229 	struct cgroup_scanner scan;
1230 
1231 	scan.cg = cs->css.cgroup;
1232 	scan.test_task = NULL;
1233 	scan.process_task = cpuset_change_flag;
1234 	scan.heap = heap;
1235 	cgroup_scan_tasks(&scan);
1236 }
1237 
1238 /*
1239  * update_flag - read a 0 or a 1 in a file and update associated flag
1240  * bit:		the bit to update (see cpuset_flagbits_t)
1241  * cs:		the cpuset to update
1242  * turning_on: 	whether the flag is being set or cleared
1243  *
1244  * Call with cgroup_mutex held.
1245  */
1246 
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)1247 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1248 		       int turning_on)
1249 {
1250 	struct cpuset *trialcs;
1251 	int balance_flag_changed;
1252 	int spread_flag_changed;
1253 	struct ptr_heap heap;
1254 	int err;
1255 
1256 	trialcs = alloc_trial_cpuset(cs);
1257 	if (!trialcs)
1258 		return -ENOMEM;
1259 
1260 	if (turning_on)
1261 		set_bit(bit, &trialcs->flags);
1262 	else
1263 		clear_bit(bit, &trialcs->flags);
1264 
1265 	err = validate_change(cs, trialcs);
1266 	if (err < 0)
1267 		goto out;
1268 
1269 	err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1270 	if (err < 0)
1271 		goto out;
1272 
1273 	balance_flag_changed = (is_sched_load_balance(cs) !=
1274 				is_sched_load_balance(trialcs));
1275 
1276 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1277 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1278 
1279 	mutex_lock(&callback_mutex);
1280 	cs->flags = trialcs->flags;
1281 	mutex_unlock(&callback_mutex);
1282 
1283 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1284 		async_rebuild_sched_domains();
1285 
1286 	if (spread_flag_changed)
1287 		update_tasks_flags(cs, &heap);
1288 	heap_free(&heap);
1289 out:
1290 	free_trial_cpuset(trialcs);
1291 	return err;
1292 }
1293 
1294 /*
1295  * Frequency meter - How fast is some event occurring?
1296  *
1297  * These routines manage a digitally filtered, constant time based,
1298  * event frequency meter.  There are four routines:
1299  *   fmeter_init() - initialize a frequency meter.
1300  *   fmeter_markevent() - called each time the event happens.
1301  *   fmeter_getrate() - returns the recent rate of such events.
1302  *   fmeter_update() - internal routine used to update fmeter.
1303  *
1304  * A common data structure is passed to each of these routines,
1305  * which is used to keep track of the state required to manage the
1306  * frequency meter and its digital filter.
1307  *
1308  * The filter works on the number of events marked per unit time.
1309  * The filter is single-pole low-pass recursive (IIR).  The time unit
1310  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1311  * simulate 3 decimal digits of precision (multiplied by 1000).
1312  *
1313  * With an FM_COEF of 933, and a time base of 1 second, the filter
1314  * has a half-life of 10 seconds, meaning that if the events quit
1315  * happening, then the rate returned from the fmeter_getrate()
1316  * will be cut in half each 10 seconds, until it converges to zero.
1317  *
1318  * It is not worth doing a real infinitely recursive filter.  If more
1319  * than FM_MAXTICKS ticks have elapsed since the last filter event,
1320  * just compute FM_MAXTICKS ticks worth, by which point the level
1321  * will be stable.
1322  *
1323  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1324  * arithmetic overflow in the fmeter_update() routine.
1325  *
1326  * Given the simple 32 bit integer arithmetic used, this meter works
1327  * best for reporting rates between one per millisecond (msec) and
1328  * one per 32 (approx) seconds.  At constant rates faster than one
1329  * per msec it maxes out at values just under 1,000,000.  At constant
1330  * rates between one per msec, and one per second it will stabilize
1331  * to a value N*1000, where N is the rate of events per second.
1332  * At constant rates between one per second and one per 32 seconds,
1333  * it will be choppy, moving up on the seconds that have an event,
1334  * and then decaying until the next event.  At rates slower than
1335  * about one in 32 seconds, it decays all the way back to zero between
1336  * each event.
1337  */
1338 
1339 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
1340 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1341 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1342 #define FM_SCALE 1000		/* faux fixed point scale */
1343 
1344 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)1345 static void fmeter_init(struct fmeter *fmp)
1346 {
1347 	fmp->cnt = 0;
1348 	fmp->val = 0;
1349 	fmp->time = 0;
1350 	spin_lock_init(&fmp->lock);
1351 }
1352 
1353 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)1354 static void fmeter_update(struct fmeter *fmp)
1355 {
1356 	time_t now = get_seconds();
1357 	time_t ticks = now - fmp->time;
1358 
1359 	if (ticks == 0)
1360 		return;
1361 
1362 	ticks = min(FM_MAXTICKS, ticks);
1363 	while (ticks-- > 0)
1364 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1365 	fmp->time = now;
1366 
1367 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1368 	fmp->cnt = 0;
1369 }
1370 
1371 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)1372 static void fmeter_markevent(struct fmeter *fmp)
1373 {
1374 	spin_lock(&fmp->lock);
1375 	fmeter_update(fmp);
1376 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1377 	spin_unlock(&fmp->lock);
1378 }
1379 
1380 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)1381 static int fmeter_getrate(struct fmeter *fmp)
1382 {
1383 	int val;
1384 
1385 	spin_lock(&fmp->lock);
1386 	fmeter_update(fmp);
1387 	val = fmp->val;
1388 	spin_unlock(&fmp->lock);
1389 	return val;
1390 }
1391 
1392 /*
1393  * Protected by cgroup_lock. The nodemasks must be stored globally because
1394  * dynamically allocating them is not allowed in can_attach, and they must
1395  * persist until attach.
1396  */
1397 static cpumask_var_t cpus_attach;
1398 static nodemask_t cpuset_attach_nodemask_from;
1399 static nodemask_t cpuset_attach_nodemask_to;
1400 
1401 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
cpuset_can_attach(struct cgroup_subsys * ss,struct cgroup * cgrp,struct cgroup_taskset * tset)1402 static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1403 			     struct cgroup_taskset *tset)
1404 {
1405 	struct cpuset *cs = cgroup_cs(cgrp);
1406 	struct task_struct *task;
1407 	int ret;
1408 
1409 	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1410 		return -ENOSPC;
1411 
1412 	cgroup_taskset_for_each(task, cgrp, tset) {
1413 		/*
1414 		 * Kthreads bound to specific cpus cannot be moved to a new
1415 		 * cpuset; we cannot change their cpu affinity and
1416 		 * isolating such threads by their set of allowed nodes is
1417 		 * unnecessary.  Thus, cpusets are not applicable for such
1418 		 * threads.  This prevents checking for success of
1419 		 * set_cpus_allowed_ptr() on all attached tasks before
1420 		 * cpus_allowed may be changed.
1421 		 */
1422 		if (task->flags & PF_THREAD_BOUND)
1423 			return -EINVAL;
1424 		if ((ret = security_task_setscheduler(task)))
1425 			return ret;
1426 	}
1427 
1428 	/* prepare for attach */
1429 	if (cs == &top_cpuset)
1430 		cpumask_copy(cpus_attach, cpu_possible_mask);
1431 	else
1432 		guarantee_online_cpus(cs, cpus_attach);
1433 
1434 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1435 
1436 	return 0;
1437 }
1438 
cpuset_attach(struct cgroup_subsys * ss,struct cgroup * cgrp,struct cgroup_taskset * tset)1439 static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1440 			  struct cgroup_taskset *tset)
1441 {
1442 	struct mm_struct *mm;
1443 	struct task_struct *task;
1444 	struct task_struct *leader = cgroup_taskset_first(tset);
1445 	struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1446 	struct cpuset *cs = cgroup_cs(cgrp);
1447 	struct cpuset *oldcs = cgroup_cs(oldcgrp);
1448 
1449 	cgroup_taskset_for_each(task, cgrp, tset) {
1450 		/*
1451 		 * can_attach beforehand should guarantee that this doesn't
1452 		 * fail.  TODO: have a better way to handle failure here
1453 		 */
1454 		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1455 
1456 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1457 		cpuset_update_task_spread_flag(cs, task);
1458 	}
1459 
1460 	/*
1461 	 * Change mm, possibly for multiple threads in a threadgroup. This is
1462 	 * expensive and may sleep.
1463 	 */
1464 	cpuset_attach_nodemask_from = oldcs->mems_allowed;
1465 	cpuset_attach_nodemask_to = cs->mems_allowed;
1466 	mm = get_task_mm(leader);
1467 	if (mm) {
1468 		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1469 		if (is_memory_migrate(cs))
1470 			cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1471 					  &cpuset_attach_nodemask_to);
1472 		mmput(mm);
1473 	}
1474 }
1475 
1476 /* The various types of files and directories in a cpuset file system */
1477 
1478 typedef enum {
1479 	FILE_MEMORY_MIGRATE,
1480 	FILE_CPULIST,
1481 	FILE_MEMLIST,
1482 	FILE_CPU_EXCLUSIVE,
1483 	FILE_MEM_EXCLUSIVE,
1484 	FILE_MEM_HARDWALL,
1485 	FILE_SCHED_LOAD_BALANCE,
1486 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1487 	FILE_MEMORY_PRESSURE_ENABLED,
1488 	FILE_MEMORY_PRESSURE,
1489 	FILE_SPREAD_PAGE,
1490 	FILE_SPREAD_SLAB,
1491 } cpuset_filetype_t;
1492 
cpuset_write_u64(struct cgroup * cgrp,struct cftype * cft,u64 val)1493 static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1494 {
1495 	int retval = 0;
1496 	struct cpuset *cs = cgroup_cs(cgrp);
1497 	cpuset_filetype_t type = cft->private;
1498 
1499 	if (!cgroup_lock_live_group(cgrp))
1500 		return -ENODEV;
1501 
1502 	switch (type) {
1503 	case FILE_CPU_EXCLUSIVE:
1504 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1505 		break;
1506 	case FILE_MEM_EXCLUSIVE:
1507 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1508 		break;
1509 	case FILE_MEM_HARDWALL:
1510 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1511 		break;
1512 	case FILE_SCHED_LOAD_BALANCE:
1513 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1514 		break;
1515 	case FILE_MEMORY_MIGRATE:
1516 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1517 		break;
1518 	case FILE_MEMORY_PRESSURE_ENABLED:
1519 		cpuset_memory_pressure_enabled = !!val;
1520 		break;
1521 	case FILE_MEMORY_PRESSURE:
1522 		retval = -EACCES;
1523 		break;
1524 	case FILE_SPREAD_PAGE:
1525 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1526 		break;
1527 	case FILE_SPREAD_SLAB:
1528 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1529 		break;
1530 	default:
1531 		retval = -EINVAL;
1532 		break;
1533 	}
1534 	cgroup_unlock();
1535 	return retval;
1536 }
1537 
cpuset_write_s64(struct cgroup * cgrp,struct cftype * cft,s64 val)1538 static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1539 {
1540 	int retval = 0;
1541 	struct cpuset *cs = cgroup_cs(cgrp);
1542 	cpuset_filetype_t type = cft->private;
1543 
1544 	if (!cgroup_lock_live_group(cgrp))
1545 		return -ENODEV;
1546 
1547 	switch (type) {
1548 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1549 		retval = update_relax_domain_level(cs, val);
1550 		break;
1551 	default:
1552 		retval = -EINVAL;
1553 		break;
1554 	}
1555 	cgroup_unlock();
1556 	return retval;
1557 }
1558 
1559 /*
1560  * Common handling for a write to a "cpus" or "mems" file.
1561  */
cpuset_write_resmask(struct cgroup * cgrp,struct cftype * cft,const char * buf)1562 static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1563 				const char *buf)
1564 {
1565 	int retval = 0;
1566 	struct cpuset *cs = cgroup_cs(cgrp);
1567 	struct cpuset *trialcs;
1568 
1569 	if (!cgroup_lock_live_group(cgrp))
1570 		return -ENODEV;
1571 
1572 	trialcs = alloc_trial_cpuset(cs);
1573 	if (!trialcs) {
1574 		retval = -ENOMEM;
1575 		goto out;
1576 	}
1577 
1578 	switch (cft->private) {
1579 	case FILE_CPULIST:
1580 		retval = update_cpumask(cs, trialcs, buf);
1581 		break;
1582 	case FILE_MEMLIST:
1583 		retval = update_nodemask(cs, trialcs, buf);
1584 		break;
1585 	default:
1586 		retval = -EINVAL;
1587 		break;
1588 	}
1589 
1590 	free_trial_cpuset(trialcs);
1591 out:
1592 	cgroup_unlock();
1593 	return retval;
1594 }
1595 
1596 /*
1597  * These ascii lists should be read in a single call, by using a user
1598  * buffer large enough to hold the entire map.  If read in smaller
1599  * chunks, there is no guarantee of atomicity.  Since the display format
1600  * used, list of ranges of sequential numbers, is variable length,
1601  * and since these maps can change value dynamically, one could read
1602  * gibberish by doing partial reads while a list was changing.
1603  * A single large read to a buffer that crosses a page boundary is
1604  * ok, because the result being copied to user land is not recomputed
1605  * across a page fault.
1606  */
1607 
cpuset_sprintf_cpulist(char * page,struct cpuset * cs)1608 static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1609 {
1610 	size_t count;
1611 
1612 	mutex_lock(&callback_mutex);
1613 	count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1614 	mutex_unlock(&callback_mutex);
1615 
1616 	return count;
1617 }
1618 
cpuset_sprintf_memlist(char * page,struct cpuset * cs)1619 static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1620 {
1621 	size_t count;
1622 
1623 	mutex_lock(&callback_mutex);
1624 	count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1625 	mutex_unlock(&callback_mutex);
1626 
1627 	return count;
1628 }
1629 
cpuset_common_file_read(struct cgroup * cont,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)1630 static ssize_t cpuset_common_file_read(struct cgroup *cont,
1631 				       struct cftype *cft,
1632 				       struct file *file,
1633 				       char __user *buf,
1634 				       size_t nbytes, loff_t *ppos)
1635 {
1636 	struct cpuset *cs = cgroup_cs(cont);
1637 	cpuset_filetype_t type = cft->private;
1638 	char *page;
1639 	ssize_t retval = 0;
1640 	char *s;
1641 
1642 	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1643 		return -ENOMEM;
1644 
1645 	s = page;
1646 
1647 	switch (type) {
1648 	case FILE_CPULIST:
1649 		s += cpuset_sprintf_cpulist(s, cs);
1650 		break;
1651 	case FILE_MEMLIST:
1652 		s += cpuset_sprintf_memlist(s, cs);
1653 		break;
1654 	default:
1655 		retval = -EINVAL;
1656 		goto out;
1657 	}
1658 	*s++ = '\n';
1659 
1660 	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1661 out:
1662 	free_page((unsigned long)page);
1663 	return retval;
1664 }
1665 
cpuset_read_u64(struct cgroup * cont,struct cftype * cft)1666 static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1667 {
1668 	struct cpuset *cs = cgroup_cs(cont);
1669 	cpuset_filetype_t type = cft->private;
1670 	switch (type) {
1671 	case FILE_CPU_EXCLUSIVE:
1672 		return is_cpu_exclusive(cs);
1673 	case FILE_MEM_EXCLUSIVE:
1674 		return is_mem_exclusive(cs);
1675 	case FILE_MEM_HARDWALL:
1676 		return is_mem_hardwall(cs);
1677 	case FILE_SCHED_LOAD_BALANCE:
1678 		return is_sched_load_balance(cs);
1679 	case FILE_MEMORY_MIGRATE:
1680 		return is_memory_migrate(cs);
1681 	case FILE_MEMORY_PRESSURE_ENABLED:
1682 		return cpuset_memory_pressure_enabled;
1683 	case FILE_MEMORY_PRESSURE:
1684 		return fmeter_getrate(&cs->fmeter);
1685 	case FILE_SPREAD_PAGE:
1686 		return is_spread_page(cs);
1687 	case FILE_SPREAD_SLAB:
1688 		return is_spread_slab(cs);
1689 	default:
1690 		BUG();
1691 	}
1692 
1693 	/* Unreachable but makes gcc happy */
1694 	return 0;
1695 }
1696 
cpuset_read_s64(struct cgroup * cont,struct cftype * cft)1697 static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1698 {
1699 	struct cpuset *cs = cgroup_cs(cont);
1700 	cpuset_filetype_t type = cft->private;
1701 	switch (type) {
1702 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1703 		return cs->relax_domain_level;
1704 	default:
1705 		BUG();
1706 	}
1707 
1708 	/* Unrechable but makes gcc happy */
1709 	return 0;
1710 }
1711 
1712 
1713 /*
1714  * for the common functions, 'private' gives the type of file
1715  */
1716 
1717 static struct cftype files[] = {
1718 	{
1719 		.name = "cpus",
1720 		.read = cpuset_common_file_read,
1721 		.write_string = cpuset_write_resmask,
1722 		.max_write_len = (100U + 6 * NR_CPUS),
1723 		.private = FILE_CPULIST,
1724 	},
1725 
1726 	{
1727 		.name = "mems",
1728 		.read = cpuset_common_file_read,
1729 		.write_string = cpuset_write_resmask,
1730 		.max_write_len = (100U + 6 * MAX_NUMNODES),
1731 		.private = FILE_MEMLIST,
1732 	},
1733 
1734 	{
1735 		.name = "cpu_exclusive",
1736 		.read_u64 = cpuset_read_u64,
1737 		.write_u64 = cpuset_write_u64,
1738 		.private = FILE_CPU_EXCLUSIVE,
1739 	},
1740 
1741 	{
1742 		.name = "mem_exclusive",
1743 		.read_u64 = cpuset_read_u64,
1744 		.write_u64 = cpuset_write_u64,
1745 		.private = FILE_MEM_EXCLUSIVE,
1746 	},
1747 
1748 	{
1749 		.name = "mem_hardwall",
1750 		.read_u64 = cpuset_read_u64,
1751 		.write_u64 = cpuset_write_u64,
1752 		.private = FILE_MEM_HARDWALL,
1753 	},
1754 
1755 	{
1756 		.name = "sched_load_balance",
1757 		.read_u64 = cpuset_read_u64,
1758 		.write_u64 = cpuset_write_u64,
1759 		.private = FILE_SCHED_LOAD_BALANCE,
1760 	},
1761 
1762 	{
1763 		.name = "sched_relax_domain_level",
1764 		.read_s64 = cpuset_read_s64,
1765 		.write_s64 = cpuset_write_s64,
1766 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1767 	},
1768 
1769 	{
1770 		.name = "memory_migrate",
1771 		.read_u64 = cpuset_read_u64,
1772 		.write_u64 = cpuset_write_u64,
1773 		.private = FILE_MEMORY_MIGRATE,
1774 	},
1775 
1776 	{
1777 		.name = "memory_pressure",
1778 		.read_u64 = cpuset_read_u64,
1779 		.write_u64 = cpuset_write_u64,
1780 		.private = FILE_MEMORY_PRESSURE,
1781 		.mode = S_IRUGO,
1782 	},
1783 
1784 	{
1785 		.name = "memory_spread_page",
1786 		.read_u64 = cpuset_read_u64,
1787 		.write_u64 = cpuset_write_u64,
1788 		.private = FILE_SPREAD_PAGE,
1789 	},
1790 
1791 	{
1792 		.name = "memory_spread_slab",
1793 		.read_u64 = cpuset_read_u64,
1794 		.write_u64 = cpuset_write_u64,
1795 		.private = FILE_SPREAD_SLAB,
1796 	},
1797 };
1798 
1799 static struct cftype cft_memory_pressure_enabled = {
1800 	.name = "memory_pressure_enabled",
1801 	.read_u64 = cpuset_read_u64,
1802 	.write_u64 = cpuset_write_u64,
1803 	.private = FILE_MEMORY_PRESSURE_ENABLED,
1804 };
1805 
cpuset_populate(struct cgroup_subsys * ss,struct cgroup * cont)1806 static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1807 {
1808 	int err;
1809 
1810 	err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1811 	if (err)
1812 		return err;
1813 	/* memory_pressure_enabled is in root cpuset only */
1814 	if (!cont->parent)
1815 		err = cgroup_add_file(cont, ss,
1816 				      &cft_memory_pressure_enabled);
1817 	return err;
1818 }
1819 
1820 /*
1821  * post_clone() is called during cgroup_create() when the
1822  * clone_children mount argument was specified.  The cgroup
1823  * can not yet have any tasks.
1824  *
1825  * Currently we refuse to set up the cgroup - thereby
1826  * refusing the task to be entered, and as a result refusing
1827  * the sys_unshare() or clone() which initiated it - if any
1828  * sibling cpusets have exclusive cpus or mem.
1829  *
1830  * If this becomes a problem for some users who wish to
1831  * allow that scenario, then cpuset_post_clone() could be
1832  * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1833  * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1834  * held.
1835  */
cpuset_post_clone(struct cgroup_subsys * ss,struct cgroup * cgroup)1836 static void cpuset_post_clone(struct cgroup_subsys *ss,
1837 			      struct cgroup *cgroup)
1838 {
1839 	struct cgroup *parent, *child;
1840 	struct cpuset *cs, *parent_cs;
1841 
1842 	parent = cgroup->parent;
1843 	list_for_each_entry(child, &parent->children, sibling) {
1844 		cs = cgroup_cs(child);
1845 		if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1846 			return;
1847 	}
1848 	cs = cgroup_cs(cgroup);
1849 	parent_cs = cgroup_cs(parent);
1850 
1851 	mutex_lock(&callback_mutex);
1852 	cs->mems_allowed = parent_cs->mems_allowed;
1853 	cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1854 	mutex_unlock(&callback_mutex);
1855 	return;
1856 }
1857 
1858 /*
1859  *	cpuset_create - create a cpuset
1860  *	ss:	cpuset cgroup subsystem
1861  *	cont:	control group that the new cpuset will be part of
1862  */
1863 
cpuset_create(struct cgroup_subsys * ss,struct cgroup * cont)1864 static struct cgroup_subsys_state *cpuset_create(
1865 	struct cgroup_subsys *ss,
1866 	struct cgroup *cont)
1867 {
1868 	struct cpuset *cs;
1869 	struct cpuset *parent;
1870 
1871 	if (!cont->parent) {
1872 		return &top_cpuset.css;
1873 	}
1874 	parent = cgroup_cs(cont->parent);
1875 	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1876 	if (!cs)
1877 		return ERR_PTR(-ENOMEM);
1878 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1879 		kfree(cs);
1880 		return ERR_PTR(-ENOMEM);
1881 	}
1882 
1883 	cs->flags = 0;
1884 	if (is_spread_page(parent))
1885 		set_bit(CS_SPREAD_PAGE, &cs->flags);
1886 	if (is_spread_slab(parent))
1887 		set_bit(CS_SPREAD_SLAB, &cs->flags);
1888 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1889 	cpumask_clear(cs->cpus_allowed);
1890 	nodes_clear(cs->mems_allowed);
1891 	fmeter_init(&cs->fmeter);
1892 	cs->relax_domain_level = -1;
1893 
1894 	cs->parent = parent;
1895 	number_of_cpusets++;
1896 	return &cs->css ;
1897 }
1898 
1899 /*
1900  * If the cpuset being removed has its flag 'sched_load_balance'
1901  * enabled, then simulate turning sched_load_balance off, which
1902  * will call async_rebuild_sched_domains().
1903  */
1904 
cpuset_destroy(struct cgroup_subsys * ss,struct cgroup * cont)1905 static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1906 {
1907 	struct cpuset *cs = cgroup_cs(cont);
1908 
1909 	if (is_sched_load_balance(cs))
1910 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1911 
1912 	number_of_cpusets--;
1913 	free_cpumask_var(cs->cpus_allowed);
1914 	kfree(cs);
1915 }
1916 
1917 struct cgroup_subsys cpuset_subsys = {
1918 	.name = "cpuset",
1919 	.create = cpuset_create,
1920 	.destroy = cpuset_destroy,
1921 	.can_attach = cpuset_can_attach,
1922 	.attach = cpuset_attach,
1923 	.populate = cpuset_populate,
1924 	.post_clone = cpuset_post_clone,
1925 	.subsys_id = cpuset_subsys_id,
1926 	.early_init = 1,
1927 };
1928 
1929 /**
1930  * cpuset_init - initialize cpusets at system boot
1931  *
1932  * Description: Initialize top_cpuset and the cpuset internal file system,
1933  **/
1934 
cpuset_init(void)1935 int __init cpuset_init(void)
1936 {
1937 	int err = 0;
1938 
1939 	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1940 		BUG();
1941 
1942 	cpumask_setall(top_cpuset.cpus_allowed);
1943 	nodes_setall(top_cpuset.mems_allowed);
1944 
1945 	fmeter_init(&top_cpuset.fmeter);
1946 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1947 	top_cpuset.relax_domain_level = -1;
1948 
1949 	err = register_filesystem(&cpuset_fs_type);
1950 	if (err < 0)
1951 		return err;
1952 
1953 	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1954 		BUG();
1955 
1956 	number_of_cpusets = 1;
1957 	return 0;
1958 }
1959 
1960 /**
1961  * cpuset_do_move_task - move a given task to another cpuset
1962  * @tsk: pointer to task_struct the task to move
1963  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1964  *
1965  * Called by cgroup_scan_tasks() for each task in a cgroup.
1966  * Return nonzero to stop the walk through the tasks.
1967  */
cpuset_do_move_task(struct task_struct * tsk,struct cgroup_scanner * scan)1968 static void cpuset_do_move_task(struct task_struct *tsk,
1969 				struct cgroup_scanner *scan)
1970 {
1971 	struct cgroup *new_cgroup = scan->data;
1972 
1973 	cgroup_attach_task(new_cgroup, tsk);
1974 }
1975 
1976 /**
1977  * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1978  * @from: cpuset in which the tasks currently reside
1979  * @to: cpuset to which the tasks will be moved
1980  *
1981  * Called with cgroup_mutex held
1982  * callback_mutex must not be held, as cpuset_attach() will take it.
1983  *
1984  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1985  * calling callback functions for each.
1986  */
move_member_tasks_to_cpuset(struct cpuset * from,struct cpuset * to)1987 static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1988 {
1989 	struct cgroup_scanner scan;
1990 
1991 	scan.cg = from->css.cgroup;
1992 	scan.test_task = NULL; /* select all tasks in cgroup */
1993 	scan.process_task = cpuset_do_move_task;
1994 	scan.heap = NULL;
1995 	scan.data = to->css.cgroup;
1996 
1997 	if (cgroup_scan_tasks(&scan))
1998 		printk(KERN_ERR "move_member_tasks_to_cpuset: "
1999 				"cgroup_scan_tasks failed\n");
2000 }
2001 
2002 /*
2003  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2004  * or memory nodes, we need to walk over the cpuset hierarchy,
2005  * removing that CPU or node from all cpusets.  If this removes the
2006  * last CPU or node from a cpuset, then move the tasks in the empty
2007  * cpuset to its next-highest non-empty parent.
2008  *
2009  * Called with cgroup_mutex held
2010  * callback_mutex must not be held, as cpuset_attach() will take it.
2011  */
remove_tasks_in_empty_cpuset(struct cpuset * cs)2012 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2013 {
2014 	struct cpuset *parent;
2015 
2016 	/*
2017 	 * The cgroup's css_sets list is in use if there are tasks
2018 	 * in the cpuset; the list is empty if there are none;
2019 	 * the cs->css.refcnt seems always 0.
2020 	 */
2021 	if (list_empty(&cs->css.cgroup->css_sets))
2022 		return;
2023 
2024 	/*
2025 	 * Find its next-highest non-empty parent, (top cpuset
2026 	 * has online cpus, so can't be empty).
2027 	 */
2028 	parent = cs->parent;
2029 	while (cpumask_empty(parent->cpus_allowed) ||
2030 			nodes_empty(parent->mems_allowed))
2031 		parent = parent->parent;
2032 
2033 	move_member_tasks_to_cpuset(cs, parent);
2034 }
2035 
2036 /*
2037  * Walk the specified cpuset subtree and look for empty cpusets.
2038  * The tasks of such cpuset must be moved to a parent cpuset.
2039  *
2040  * Called with cgroup_mutex held.  We take callback_mutex to modify
2041  * cpus_allowed and mems_allowed.
2042  *
2043  * This walk processes the tree from top to bottom, completing one layer
2044  * before dropping down to the next.  It always processes a node before
2045  * any of its children.
2046  *
2047  * For now, since we lack memory hot unplug, we'll never see a cpuset
2048  * that has tasks along with an empty 'mems'.  But if we did see such
2049  * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2050  */
scan_for_empty_cpusets(struct cpuset * root)2051 static void scan_for_empty_cpusets(struct cpuset *root)
2052 {
2053 	LIST_HEAD(queue);
2054 	struct cpuset *cp;	/* scans cpusets being updated */
2055 	struct cpuset *child;	/* scans child cpusets of cp */
2056 	struct cgroup *cont;
2057 	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2058 
2059 	list_add_tail((struct list_head *)&root->stack_list, &queue);
2060 
2061 	while (!list_empty(&queue)) {
2062 		cp = list_first_entry(&queue, struct cpuset, stack_list);
2063 		list_del(queue.next);
2064 		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2065 			child = cgroup_cs(cont);
2066 			list_add_tail(&child->stack_list, &queue);
2067 		}
2068 
2069 		/* Continue past cpusets with all cpus, mems online */
2070 		if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
2071 		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2072 			continue;
2073 
2074 		oldmems = cp->mems_allowed;
2075 
2076 		/* Remove offline cpus and mems from this cpuset. */
2077 		mutex_lock(&callback_mutex);
2078 		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2079 			    cpu_active_mask);
2080 		nodes_and(cp->mems_allowed, cp->mems_allowed,
2081 						node_states[N_HIGH_MEMORY]);
2082 		mutex_unlock(&callback_mutex);
2083 
2084 		/* Move tasks from the empty cpuset to a parent */
2085 		if (cpumask_empty(cp->cpus_allowed) ||
2086 		     nodes_empty(cp->mems_allowed))
2087 			remove_tasks_in_empty_cpuset(cp);
2088 		else {
2089 			update_tasks_cpumask(cp, NULL);
2090 			update_tasks_nodemask(cp, &oldmems, NULL);
2091 		}
2092 	}
2093 }
2094 
2095 /*
2096  * The top_cpuset tracks what CPUs and Memory Nodes are online,
2097  * period.  This is necessary in order to make cpusets transparent
2098  * (of no affect) on systems that are actively using CPU hotplug
2099  * but making no active use of cpusets.
2100  *
2101  * This routine ensures that top_cpuset.cpus_allowed tracks
2102  * cpu_active_mask on each CPU hotplug (cpuhp) event.
2103  *
2104  * Called within get_online_cpus().  Needs to call cgroup_lock()
2105  * before calling generate_sched_domains().
2106  */
cpuset_update_active_cpus(void)2107 void cpuset_update_active_cpus(void)
2108 {
2109 	struct sched_domain_attr *attr;
2110 	cpumask_var_t *doms;
2111 	int ndoms;
2112 
2113 	cgroup_lock();
2114 	mutex_lock(&callback_mutex);
2115 	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2116 	mutex_unlock(&callback_mutex);
2117 	scan_for_empty_cpusets(&top_cpuset);
2118 	ndoms = generate_sched_domains(&doms, &attr);
2119 	cgroup_unlock();
2120 
2121 	/* Have scheduler rebuild the domains */
2122 	partition_sched_domains(ndoms, doms, attr);
2123 }
2124 
2125 #ifdef CONFIG_MEMORY_HOTPLUG
2126 /*
2127  * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2128  * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
2129  * See also the previous routine cpuset_track_online_cpus().
2130  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)2131 static int cpuset_track_online_nodes(struct notifier_block *self,
2132 				unsigned long action, void *arg)
2133 {
2134 	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2135 
2136 	cgroup_lock();
2137 	switch (action) {
2138 	case MEM_ONLINE:
2139 		oldmems = top_cpuset.mems_allowed;
2140 		mutex_lock(&callback_mutex);
2141 		top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2142 		mutex_unlock(&callback_mutex);
2143 		update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2144 		break;
2145 	case MEM_OFFLINE:
2146 		/*
2147 		 * needn't update top_cpuset.mems_allowed explicitly because
2148 		 * scan_for_empty_cpusets() will update it.
2149 		 */
2150 		scan_for_empty_cpusets(&top_cpuset);
2151 		break;
2152 	default:
2153 		break;
2154 	}
2155 	cgroup_unlock();
2156 
2157 	return NOTIFY_OK;
2158 }
2159 #endif
2160 
2161 /**
2162  * cpuset_init_smp - initialize cpus_allowed
2163  *
2164  * Description: Finish top cpuset after cpu, node maps are initialized
2165  **/
2166 
cpuset_init_smp(void)2167 void __init cpuset_init_smp(void)
2168 {
2169 	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2170 	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2171 
2172 	hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2173 
2174 	cpuset_wq = create_singlethread_workqueue("cpuset");
2175 	BUG_ON(!cpuset_wq);
2176 }
2177 
2178 /**
2179  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2180  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2181  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2182  *
2183  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2184  * attached to the specified @tsk.  Guaranteed to return some non-empty
2185  * subset of cpu_online_map, even if this means going outside the
2186  * tasks cpuset.
2187  **/
2188 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)2189 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2190 {
2191 	mutex_lock(&callback_mutex);
2192 	task_lock(tsk);
2193 	guarantee_online_cpus(task_cs(tsk), pmask);
2194 	task_unlock(tsk);
2195 	mutex_unlock(&callback_mutex);
2196 }
2197 
cpuset_cpus_allowed_fallback(struct task_struct * tsk)2198 int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2199 {
2200 	const struct cpuset *cs;
2201 	int cpu;
2202 
2203 	rcu_read_lock();
2204 	cs = task_cs(tsk);
2205 	if (cs)
2206 		do_set_cpus_allowed(tsk, cs->cpus_allowed);
2207 	rcu_read_unlock();
2208 
2209 	/*
2210 	 * We own tsk->cpus_allowed, nobody can change it under us.
2211 	 *
2212 	 * But we used cs && cs->cpus_allowed lockless and thus can
2213 	 * race with cgroup_attach_task() or update_cpumask() and get
2214 	 * the wrong tsk->cpus_allowed. However, both cases imply the
2215 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2216 	 * which takes task_rq_lock().
2217 	 *
2218 	 * If we are called after it dropped the lock we must see all
2219 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2220 	 * set any mask even if it is not right from task_cs() pov,
2221 	 * the pending set_cpus_allowed_ptr() will fix things.
2222 	 */
2223 
2224 	cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2225 	if (cpu >= nr_cpu_ids) {
2226 		/*
2227 		 * Either tsk->cpus_allowed is wrong (see above) or it
2228 		 * is actually empty. The latter case is only possible
2229 		 * if we are racing with remove_tasks_in_empty_cpuset().
2230 		 * Like above we can temporary set any mask and rely on
2231 		 * set_cpus_allowed_ptr() as synchronization point.
2232 		 */
2233 		do_set_cpus_allowed(tsk, cpu_possible_mask);
2234 		cpu = cpumask_any(cpu_active_mask);
2235 	}
2236 
2237 	return cpu;
2238 }
2239 
cpuset_init_current_mems_allowed(void)2240 void cpuset_init_current_mems_allowed(void)
2241 {
2242 	nodes_setall(current->mems_allowed);
2243 }
2244 
2245 /**
2246  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2247  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2248  *
2249  * Description: Returns the nodemask_t mems_allowed of the cpuset
2250  * attached to the specified @tsk.  Guaranteed to return some non-empty
2251  * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2252  * tasks cpuset.
2253  **/
2254 
cpuset_mems_allowed(struct task_struct * tsk)2255 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2256 {
2257 	nodemask_t mask;
2258 
2259 	mutex_lock(&callback_mutex);
2260 	task_lock(tsk);
2261 	guarantee_online_mems(task_cs(tsk), &mask);
2262 	task_unlock(tsk);
2263 	mutex_unlock(&callback_mutex);
2264 
2265 	return mask;
2266 }
2267 
2268 /**
2269  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2270  * @nodemask: the nodemask to be checked
2271  *
2272  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2273  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)2274 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2275 {
2276 	return nodes_intersects(*nodemask, current->mems_allowed);
2277 }
2278 
2279 /*
2280  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2281  * mem_hardwall ancestor to the specified cpuset.  Call holding
2282  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2283  * (an unusual configuration), then returns the root cpuset.
2284  */
nearest_hardwall_ancestor(const struct cpuset * cs)2285 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2286 {
2287 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
2288 		cs = cs->parent;
2289 	return cs;
2290 }
2291 
2292 /**
2293  * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2294  * @node: is this an allowed node?
2295  * @gfp_mask: memory allocation flags
2296  *
2297  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2298  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2299  * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2300  * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2301  * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2302  * flag, yes.
2303  * Otherwise, no.
2304  *
2305  * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2306  * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2307  * might sleep, and might allow a node from an enclosing cpuset.
2308  *
2309  * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2310  * cpusets, and never sleeps.
2311  *
2312  * The __GFP_THISNODE placement logic is really handled elsewhere,
2313  * by forcibly using a zonelist starting at a specified node, and by
2314  * (in get_page_from_freelist()) refusing to consider the zones for
2315  * any node on the zonelist except the first.  By the time any such
2316  * calls get to this routine, we should just shut up and say 'yes'.
2317  *
2318  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2319  * and do not allow allocations outside the current tasks cpuset
2320  * unless the task has been OOM killed as is marked TIF_MEMDIE.
2321  * GFP_KERNEL allocations are not so marked, so can escape to the
2322  * nearest enclosing hardwalled ancestor cpuset.
2323  *
2324  * Scanning up parent cpusets requires callback_mutex.  The
2325  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2326  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2327  * current tasks mems_allowed came up empty on the first pass over
2328  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2329  * cpuset are short of memory, might require taking the callback_mutex
2330  * mutex.
2331  *
2332  * The first call here from mm/page_alloc:get_page_from_freelist()
2333  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2334  * so no allocation on a node outside the cpuset is allowed (unless
2335  * in interrupt, of course).
2336  *
2337  * The second pass through get_page_from_freelist() doesn't even call
2338  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2339  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2340  * in alloc_flags.  That logic and the checks below have the combined
2341  * affect that:
2342  *	in_interrupt - any node ok (current task context irrelevant)
2343  *	GFP_ATOMIC   - any node ok
2344  *	TIF_MEMDIE   - any node ok
2345  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2346  *	GFP_USER     - only nodes in current tasks mems allowed ok.
2347  *
2348  * Rule:
2349  *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2350  *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2351  *    the code that might scan up ancestor cpusets and sleep.
2352  */
__cpuset_node_allowed_softwall(int node,gfp_t gfp_mask)2353 int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2354 {
2355 	const struct cpuset *cs;	/* current cpuset ancestors */
2356 	int allowed;			/* is allocation in zone z allowed? */
2357 
2358 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2359 		return 1;
2360 	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2361 	if (node_isset(node, current->mems_allowed))
2362 		return 1;
2363 	/*
2364 	 * Allow tasks that have access to memory reserves because they have
2365 	 * been OOM killed to get memory anywhere.
2366 	 */
2367 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2368 		return 1;
2369 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2370 		return 0;
2371 
2372 	if (current->flags & PF_EXITING) /* Let dying task have memory */
2373 		return 1;
2374 
2375 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2376 	mutex_lock(&callback_mutex);
2377 
2378 	task_lock(current);
2379 	cs = nearest_hardwall_ancestor(task_cs(current));
2380 	task_unlock(current);
2381 
2382 	allowed = node_isset(node, cs->mems_allowed);
2383 	mutex_unlock(&callback_mutex);
2384 	return allowed;
2385 }
2386 
2387 /*
2388  * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2389  * @node: is this an allowed node?
2390  * @gfp_mask: memory allocation flags
2391  *
2392  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2393  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2394  * yes.  If the task has been OOM killed and has access to memory reserves as
2395  * specified by the TIF_MEMDIE flag, yes.
2396  * Otherwise, no.
2397  *
2398  * The __GFP_THISNODE placement logic is really handled elsewhere,
2399  * by forcibly using a zonelist starting at a specified node, and by
2400  * (in get_page_from_freelist()) refusing to consider the zones for
2401  * any node on the zonelist except the first.  By the time any such
2402  * calls get to this routine, we should just shut up and say 'yes'.
2403  *
2404  * Unlike the cpuset_node_allowed_softwall() variant, above,
2405  * this variant requires that the node be in the current task's
2406  * mems_allowed or that we're in interrupt.  It does not scan up the
2407  * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2408  * It never sleeps.
2409  */
__cpuset_node_allowed_hardwall(int node,gfp_t gfp_mask)2410 int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2411 {
2412 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2413 		return 1;
2414 	if (node_isset(node, current->mems_allowed))
2415 		return 1;
2416 	/*
2417 	 * Allow tasks that have access to memory reserves because they have
2418 	 * been OOM killed to get memory anywhere.
2419 	 */
2420 	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2421 		return 1;
2422 	return 0;
2423 }
2424 
2425 /**
2426  * cpuset_unlock - release lock on cpuset changes
2427  *
2428  * Undo the lock taken in a previous cpuset_lock() call.
2429  */
2430 
cpuset_unlock(void)2431 void cpuset_unlock(void)
2432 {
2433 	mutex_unlock(&callback_mutex);
2434 }
2435 
2436 /**
2437  * cpuset_mem_spread_node() - On which node to begin search for a file page
2438  * cpuset_slab_spread_node() - On which node to begin search for a slab page
2439  *
2440  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2441  * tasks in a cpuset with is_spread_page or is_spread_slab set),
2442  * and if the memory allocation used cpuset_mem_spread_node()
2443  * to determine on which node to start looking, as it will for
2444  * certain page cache or slab cache pages such as used for file
2445  * system buffers and inode caches, then instead of starting on the
2446  * local node to look for a free page, rather spread the starting
2447  * node around the tasks mems_allowed nodes.
2448  *
2449  * We don't have to worry about the returned node being offline
2450  * because "it can't happen", and even if it did, it would be ok.
2451  *
2452  * The routines calling guarantee_online_mems() are careful to
2453  * only set nodes in task->mems_allowed that are online.  So it
2454  * should not be possible for the following code to return an
2455  * offline node.  But if it did, that would be ok, as this routine
2456  * is not returning the node where the allocation must be, only
2457  * the node where the search should start.  The zonelist passed to
2458  * __alloc_pages() will include all nodes.  If the slab allocator
2459  * is passed an offline node, it will fall back to the local node.
2460  * See kmem_cache_alloc_node().
2461  */
2462 
cpuset_spread_node(int * rotor)2463 static int cpuset_spread_node(int *rotor)
2464 {
2465 	int node;
2466 
2467 	node = next_node(*rotor, current->mems_allowed);
2468 	if (node == MAX_NUMNODES)
2469 		node = first_node(current->mems_allowed);
2470 	*rotor = node;
2471 	return node;
2472 }
2473 
cpuset_mem_spread_node(void)2474 int cpuset_mem_spread_node(void)
2475 {
2476 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2477 		current->cpuset_mem_spread_rotor =
2478 			node_random(&current->mems_allowed);
2479 
2480 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2481 }
2482 
cpuset_slab_spread_node(void)2483 int cpuset_slab_spread_node(void)
2484 {
2485 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2486 		current->cpuset_slab_spread_rotor =
2487 			node_random(&current->mems_allowed);
2488 
2489 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2490 }
2491 
2492 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2493 
2494 /**
2495  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2496  * @tsk1: pointer to task_struct of some task.
2497  * @tsk2: pointer to task_struct of some other task.
2498  *
2499  * Description: Return true if @tsk1's mems_allowed intersects the
2500  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2501  * one of the task's memory usage might impact the memory available
2502  * to the other.
2503  **/
2504 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)2505 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2506 				   const struct task_struct *tsk2)
2507 {
2508 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2509 }
2510 
2511 /**
2512  * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2513  * @task: pointer to task_struct of some task.
2514  *
2515  * Description: Prints @task's name, cpuset name, and cached copy of its
2516  * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2517  * dereferencing task_cs(task).
2518  */
cpuset_print_task_mems_allowed(struct task_struct * tsk)2519 void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2520 {
2521 	struct dentry *dentry;
2522 
2523 	dentry = task_cs(tsk)->css.cgroup->dentry;
2524 	spin_lock(&cpuset_buffer_lock);
2525 	snprintf(cpuset_name, CPUSET_NAME_LEN,
2526 		 dentry ? (const char *)dentry->d_name.name : "/");
2527 	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2528 			   tsk->mems_allowed);
2529 	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2530 	       tsk->comm, cpuset_name, cpuset_nodelist);
2531 	spin_unlock(&cpuset_buffer_lock);
2532 }
2533 
2534 /*
2535  * Collection of memory_pressure is suppressed unless
2536  * this flag is enabled by writing "1" to the special
2537  * cpuset file 'memory_pressure_enabled' in the root cpuset.
2538  */
2539 
2540 int cpuset_memory_pressure_enabled __read_mostly;
2541 
2542 /**
2543  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2544  *
2545  * Keep a running average of the rate of synchronous (direct)
2546  * page reclaim efforts initiated by tasks in each cpuset.
2547  *
2548  * This represents the rate at which some task in the cpuset
2549  * ran low on memory on all nodes it was allowed to use, and
2550  * had to enter the kernels page reclaim code in an effort to
2551  * create more free memory by tossing clean pages or swapping
2552  * or writing dirty pages.
2553  *
2554  * Display to user space in the per-cpuset read-only file
2555  * "memory_pressure".  Value displayed is an integer
2556  * representing the recent rate of entry into the synchronous
2557  * (direct) page reclaim by any task attached to the cpuset.
2558  **/
2559 
__cpuset_memory_pressure_bump(void)2560 void __cpuset_memory_pressure_bump(void)
2561 {
2562 	task_lock(current);
2563 	fmeter_markevent(&task_cs(current)->fmeter);
2564 	task_unlock(current);
2565 }
2566 
2567 #ifdef CONFIG_PROC_PID_CPUSET
2568 /*
2569  * proc_cpuset_show()
2570  *  - Print tasks cpuset path into seq_file.
2571  *  - Used for /proc/<pid>/cpuset.
2572  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2573  *    doesn't really matter if tsk->cpuset changes after we read it,
2574  *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
2575  *    anyway.
2576  */
proc_cpuset_show(struct seq_file * m,void * unused_v)2577 static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2578 {
2579 	struct pid *pid;
2580 	struct task_struct *tsk;
2581 	char *buf;
2582 	struct cgroup_subsys_state *css;
2583 	int retval;
2584 
2585 	retval = -ENOMEM;
2586 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2587 	if (!buf)
2588 		goto out;
2589 
2590 	retval = -ESRCH;
2591 	pid = m->private;
2592 	tsk = get_pid_task(pid, PIDTYPE_PID);
2593 	if (!tsk)
2594 		goto out_free;
2595 
2596 	retval = -EINVAL;
2597 	cgroup_lock();
2598 	css = task_subsys_state(tsk, cpuset_subsys_id);
2599 	retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2600 	if (retval < 0)
2601 		goto out_unlock;
2602 	seq_puts(m, buf);
2603 	seq_putc(m, '\n');
2604 out_unlock:
2605 	cgroup_unlock();
2606 	put_task_struct(tsk);
2607 out_free:
2608 	kfree(buf);
2609 out:
2610 	return retval;
2611 }
2612 
cpuset_open(struct inode * inode,struct file * file)2613 static int cpuset_open(struct inode *inode, struct file *file)
2614 {
2615 	struct pid *pid = PROC_I(inode)->pid;
2616 	return single_open(file, proc_cpuset_show, pid);
2617 }
2618 
2619 const struct file_operations proc_cpuset_operations = {
2620 	.open		= cpuset_open,
2621 	.read		= seq_read,
2622 	.llseek		= seq_lseek,
2623 	.release	= single_release,
2624 };
2625 #endif /* CONFIG_PROC_PID_CPUSET */
2626 
2627 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)2628 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2629 {
2630 	seq_printf(m, "Mems_allowed:\t");
2631 	seq_nodemask(m, &task->mems_allowed);
2632 	seq_printf(m, "\n");
2633 	seq_printf(m, "Mems_allowed_list:\t");
2634 	seq_nodemask_list(m, &task->mems_allowed);
2635 	seq_printf(m, "\n");
2636 }
2637