1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24 #include "cpuset-internal.h"
25
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel.h>
29 #include <linux/mempolicy.h>
30 #include <linux/mm.h>
31 #include <linux/memory.h>
32 #include <linux/export.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/sched/deadline.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/task.h>
38 #include <linux/security.h>
39 #include <linux/oom.h>
40 #include <linux/sched/isolation.h>
41 #include <linux/wait.h>
42 #include <linux/workqueue.h>
43
44 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
45 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
46
47 /*
48 * There could be abnormal cpuset configurations for cpu or memory
49 * node binding, add this key to provide a quick low-cost judgment
50 * of the situation.
51 */
52 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
53
54 static const char * const perr_strings[] = {
55 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
56 [PERR_INVPARENT] = "Parent is an invalid partition root",
57 [PERR_NOTPART] = "Parent is not a partition root",
58 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
59 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
60 [PERR_HOTPLUG] = "No cpu available due to hotplug",
61 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
62 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
63 [PERR_ACCESS] = "Enable partition not permitted",
64 [PERR_REMOTE] = "Have remote partition underneath",
65 };
66
67 /*
68 * For local partitions, update to subpartitions_cpus & isolated_cpus is done
69 * in update_parent_effective_cpumask(). For remote partitions, it is done in
70 * the remote_partition_*() and remote_cpus_update() helpers.
71 */
72 /*
73 * Exclusive CPUs distributed out to local or remote sub-partitions of
74 * top_cpuset
75 */
76 static cpumask_var_t subpartitions_cpus;
77
78 /*
79 * Exclusive CPUs in isolated partitions
80 */
81 static cpumask_var_t isolated_cpus;
82
83 /*
84 * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
85 */
86 static cpumask_var_t boot_hk_cpus;
87 static bool have_boot_isolcpus;
88
89 /* List of remote partition root children */
90 static struct list_head remote_children;
91
92 /*
93 * A flag to force sched domain rebuild at the end of an operation.
94 * It can be set in
95 * - update_partition_sd_lb()
96 * - update_cpumasks_hier()
97 * - cpuset_update_flag()
98 * - cpuset_hotplug_update_tasks()
99 * - cpuset_handle_hotplug()
100 *
101 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
102 *
103 * Note that update_relax_domain_level() in cpuset-v1.c can still call
104 * rebuild_sched_domains_locked() directly without using this flag.
105 */
106 static bool force_sd_rebuild;
107
108 /*
109 * Partition root states:
110 *
111 * 0 - member (not a partition root)
112 * 1 - partition root
113 * 2 - partition root without load balancing (isolated)
114 * -1 - invalid partition root
115 * -2 - invalid isolated partition root
116 *
117 * There are 2 types of partitions - local or remote. Local partitions are
118 * those whose parents are partition root themselves. Setting of
119 * cpuset.cpus.exclusive are optional in setting up local partitions.
120 * Remote partitions are those whose parents are not partition roots. Passing
121 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
122 * nodes are mandatory in creating a remote partition.
123 *
124 * For simplicity, a local partition can be created under a local or remote
125 * partition but a remote partition cannot have any partition root in its
126 * ancestor chain except the cgroup root.
127 */
128 #define PRS_MEMBER 0
129 #define PRS_ROOT 1
130 #define PRS_ISOLATED 2
131 #define PRS_INVALID_ROOT -1
132 #define PRS_INVALID_ISOLATED -2
133
is_prs_invalid(int prs_state)134 static inline bool is_prs_invalid(int prs_state)
135 {
136 return prs_state < 0;
137 }
138
139 /*
140 * Temporary cpumasks for working with partitions that are passed among
141 * functions to avoid memory allocation in inner functions.
142 */
143 struct tmpmasks {
144 cpumask_var_t addmask, delmask; /* For partition root */
145 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
146 };
147
inc_dl_tasks_cs(struct task_struct * p)148 void inc_dl_tasks_cs(struct task_struct *p)
149 {
150 struct cpuset *cs = task_cs(p);
151
152 cs->nr_deadline_tasks++;
153 }
154
dec_dl_tasks_cs(struct task_struct * p)155 void dec_dl_tasks_cs(struct task_struct *p)
156 {
157 struct cpuset *cs = task_cs(p);
158
159 cs->nr_deadline_tasks--;
160 }
161
is_partition_valid(const struct cpuset * cs)162 static inline int is_partition_valid(const struct cpuset *cs)
163 {
164 return cs->partition_root_state > 0;
165 }
166
is_partition_invalid(const struct cpuset * cs)167 static inline int is_partition_invalid(const struct cpuset *cs)
168 {
169 return cs->partition_root_state < 0;
170 }
171
172 /*
173 * Callers should hold callback_lock to modify partition_root_state.
174 */
make_partition_invalid(struct cpuset * cs)175 static inline void make_partition_invalid(struct cpuset *cs)
176 {
177 if (cs->partition_root_state > 0)
178 cs->partition_root_state = -cs->partition_root_state;
179 }
180
181 /*
182 * Send notification event of whenever partition_root_state changes.
183 */
notify_partition_change(struct cpuset * cs,int old_prs)184 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
185 {
186 if (old_prs == cs->partition_root_state)
187 return;
188 cgroup_file_notify(&cs->partition_file);
189
190 /* Reset prs_err if not invalid */
191 if (is_partition_valid(cs))
192 WRITE_ONCE(cs->prs_err, PERR_NONE);
193 }
194
195 static struct cpuset top_cpuset = {
196 .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) |
197 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
198 .partition_root_state = PRS_ROOT,
199 .relax_domain_level = -1,
200 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
201 };
202
203 /*
204 * There are two global locks guarding cpuset structures - cpuset_mutex and
205 * callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
206 * subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
207 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
208 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
209 * correctness.
210 *
211 * A task must hold both locks to modify cpusets. If a task holds
212 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
213 * also acquire callback_lock and be able to modify cpusets. It can perform
214 * various checks on the cpuset structure first, knowing nothing will change.
215 * It can also allocate memory while just holding cpuset_mutex. While it is
216 * performing these checks, various callback routines can briefly acquire
217 * callback_lock to query cpusets. Once it is ready to make the changes, it
218 * takes callback_lock, blocking everyone else.
219 *
220 * Calls to the kernel memory allocator can not be made while holding
221 * callback_lock, as that would risk double tripping on callback_lock
222 * from one of the callbacks into the cpuset code from within
223 * __alloc_pages().
224 *
225 * If a task is only holding callback_lock, then it has read-only
226 * access to cpusets.
227 *
228 * Now, the task_struct fields mems_allowed and mempolicy may be changed
229 * by other task, we use alloc_lock in the task_struct fields to protect
230 * them.
231 *
232 * The cpuset_common_seq_show() handlers only hold callback_lock across
233 * small pieces of code, such as when reading out possibly multi-word
234 * cpumasks and nodemasks.
235 */
236
237 static DEFINE_MUTEX(cpuset_mutex);
238
cpuset_lock(void)239 void cpuset_lock(void)
240 {
241 mutex_lock(&cpuset_mutex);
242 }
243
cpuset_unlock(void)244 void cpuset_unlock(void)
245 {
246 mutex_unlock(&cpuset_mutex);
247 }
248
249 static DEFINE_SPINLOCK(callback_lock);
250
cpuset_callback_lock_irq(void)251 void cpuset_callback_lock_irq(void)
252 {
253 spin_lock_irq(&callback_lock);
254 }
255
cpuset_callback_unlock_irq(void)256 void cpuset_callback_unlock_irq(void)
257 {
258 spin_unlock_irq(&callback_lock);
259 }
260
261 static struct workqueue_struct *cpuset_migrate_mm_wq;
262
263 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
264
check_insane_mems_config(nodemask_t * nodes)265 static inline void check_insane_mems_config(nodemask_t *nodes)
266 {
267 if (!cpusets_insane_config() &&
268 movable_only_nodes(nodes)) {
269 static_branch_enable(&cpusets_insane_config_key);
270 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
271 "Cpuset allocations might fail even with a lot of memory available.\n",
272 nodemask_pr_args(nodes));
273 }
274 }
275
276 /*
277 * decrease cs->attach_in_progress.
278 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
279 */
dec_attach_in_progress_locked(struct cpuset * cs)280 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
281 {
282 lockdep_assert_held(&cpuset_mutex);
283
284 cs->attach_in_progress--;
285 if (!cs->attach_in_progress)
286 wake_up(&cpuset_attach_wq);
287 }
288
dec_attach_in_progress(struct cpuset * cs)289 static inline void dec_attach_in_progress(struct cpuset *cs)
290 {
291 mutex_lock(&cpuset_mutex);
292 dec_attach_in_progress_locked(cs);
293 mutex_unlock(&cpuset_mutex);
294 }
295
cpuset_v2(void)296 static inline bool cpuset_v2(void)
297 {
298 return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
299 cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
300 }
301
302 /*
303 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
304 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
305 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
306 * With v2 behavior, "cpus" and "mems" are always what the users have
307 * requested and won't be changed by hotplug events. Only the effective
308 * cpus or mems will be affected.
309 */
is_in_v2_mode(void)310 static inline bool is_in_v2_mode(void)
311 {
312 return cpuset_v2() ||
313 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
314 }
315
316 /**
317 * partition_is_populated - check if partition has tasks
318 * @cs: partition root to be checked
319 * @excluded_child: a child cpuset to be excluded in task checking
320 * Return: true if there are tasks, false otherwise
321 *
322 * It is assumed that @cs is a valid partition root. @excluded_child should
323 * be non-NULL when this cpuset is going to become a partition itself.
324 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)325 static inline bool partition_is_populated(struct cpuset *cs,
326 struct cpuset *excluded_child)
327 {
328 struct cgroup_subsys_state *css;
329 struct cpuset *child;
330
331 if (cs->css.cgroup->nr_populated_csets)
332 return true;
333 if (!excluded_child && !cs->nr_subparts)
334 return cgroup_is_populated(cs->css.cgroup);
335
336 rcu_read_lock();
337 cpuset_for_each_child(child, css, cs) {
338 if (child == excluded_child)
339 continue;
340 if (is_partition_valid(child))
341 continue;
342 if (cgroup_is_populated(child->css.cgroup)) {
343 rcu_read_unlock();
344 return true;
345 }
346 }
347 rcu_read_unlock();
348 return false;
349 }
350
351 /*
352 * Return in pmask the portion of a task's cpusets's cpus_allowed that
353 * are online and are capable of running the task. If none are found,
354 * walk up the cpuset hierarchy until we find one that does have some
355 * appropriate cpus.
356 *
357 * One way or another, we guarantee to return some non-empty subset
358 * of cpu_online_mask.
359 *
360 * Call with callback_lock or cpuset_mutex held.
361 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)362 static void guarantee_online_cpus(struct task_struct *tsk,
363 struct cpumask *pmask)
364 {
365 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
366 struct cpuset *cs;
367
368 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
369 cpumask_copy(pmask, cpu_online_mask);
370
371 rcu_read_lock();
372 cs = task_cs(tsk);
373
374 while (!cpumask_intersects(cs->effective_cpus, pmask))
375 cs = parent_cs(cs);
376
377 cpumask_and(pmask, pmask, cs->effective_cpus);
378 rcu_read_unlock();
379 }
380
381 /*
382 * Return in *pmask the portion of a cpusets's mems_allowed that
383 * are online, with memory. If none are online with memory, walk
384 * up the cpuset hierarchy until we find one that does have some
385 * online mems. The top cpuset always has some mems online.
386 *
387 * One way or another, we guarantee to return some non-empty subset
388 * of node_states[N_MEMORY].
389 *
390 * Call with callback_lock or cpuset_mutex held.
391 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)392 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
393 {
394 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
395 cs = parent_cs(cs);
396 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
397 }
398
399 /**
400 * alloc_cpumasks - allocate three cpumasks for cpuset
401 * @cs: the cpuset that have cpumasks to be allocated.
402 * @tmp: the tmpmasks structure pointer
403 * Return: 0 if successful, -ENOMEM otherwise.
404 *
405 * Only one of the two input arguments should be non-NULL.
406 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)407 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
408 {
409 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
410
411 if (cs) {
412 pmask1 = &cs->cpus_allowed;
413 pmask2 = &cs->effective_cpus;
414 pmask3 = &cs->effective_xcpus;
415 pmask4 = &cs->exclusive_cpus;
416 } else {
417 pmask1 = &tmp->new_cpus;
418 pmask2 = &tmp->addmask;
419 pmask3 = &tmp->delmask;
420 pmask4 = NULL;
421 }
422
423 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
424 return -ENOMEM;
425
426 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
427 goto free_one;
428
429 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
430 goto free_two;
431
432 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL))
433 goto free_three;
434
435
436 return 0;
437
438 free_three:
439 free_cpumask_var(*pmask3);
440 free_two:
441 free_cpumask_var(*pmask2);
442 free_one:
443 free_cpumask_var(*pmask1);
444 return -ENOMEM;
445 }
446
447 /**
448 * free_cpumasks - free cpumasks in a tmpmasks structure
449 * @cs: the cpuset that have cpumasks to be free.
450 * @tmp: the tmpmasks structure pointer
451 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)452 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
453 {
454 if (cs) {
455 free_cpumask_var(cs->cpus_allowed);
456 free_cpumask_var(cs->effective_cpus);
457 free_cpumask_var(cs->effective_xcpus);
458 free_cpumask_var(cs->exclusive_cpus);
459 }
460 if (tmp) {
461 free_cpumask_var(tmp->new_cpus);
462 free_cpumask_var(tmp->addmask);
463 free_cpumask_var(tmp->delmask);
464 }
465 }
466
467 /**
468 * alloc_trial_cpuset - allocate a trial cpuset
469 * @cs: the cpuset that the trial cpuset duplicates
470 */
alloc_trial_cpuset(struct cpuset * cs)471 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
472 {
473 struct cpuset *trial;
474
475 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
476 if (!trial)
477 return NULL;
478
479 if (alloc_cpumasks(trial, NULL)) {
480 kfree(trial);
481 return NULL;
482 }
483
484 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
485 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
486 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
487 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
488 return trial;
489 }
490
491 /**
492 * free_cpuset - free the cpuset
493 * @cs: the cpuset to be freed
494 */
free_cpuset(struct cpuset * cs)495 static inline void free_cpuset(struct cpuset *cs)
496 {
497 free_cpumasks(cs, NULL);
498 kfree(cs);
499 }
500
501 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)502 static inline struct cpumask *user_xcpus(struct cpuset *cs)
503 {
504 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
505 : cs->exclusive_cpus;
506 }
507
xcpus_empty(struct cpuset * cs)508 static inline bool xcpus_empty(struct cpuset *cs)
509 {
510 return cpumask_empty(cs->cpus_allowed) &&
511 cpumask_empty(cs->exclusive_cpus);
512 }
513
514 /*
515 * cpusets_are_exclusive() - check if two cpusets are exclusive
516 *
517 * Return true if exclusive, false if not
518 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)519 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
520 {
521 struct cpumask *xcpus1 = user_xcpus(cs1);
522 struct cpumask *xcpus2 = user_xcpus(cs2);
523
524 if (cpumask_intersects(xcpus1, xcpus2))
525 return false;
526 return true;
527 }
528
529 /*
530 * validate_change() - Used to validate that any proposed cpuset change
531 * follows the structural rules for cpusets.
532 *
533 * If we replaced the flag and mask values of the current cpuset
534 * (cur) with those values in the trial cpuset (trial), would
535 * our various subset and exclusive rules still be valid? Presumes
536 * cpuset_mutex held.
537 *
538 * 'cur' is the address of an actual, in-use cpuset. Operations
539 * such as list traversal that depend on the actual address of the
540 * cpuset in the list must use cur below, not trial.
541 *
542 * 'trial' is the address of bulk structure copy of cur, with
543 * perhaps one or more of the fields cpus_allowed, mems_allowed,
544 * or flags changed to new, trial values.
545 *
546 * Return 0 if valid, -errno if not.
547 */
548
validate_change(struct cpuset * cur,struct cpuset * trial)549 static int validate_change(struct cpuset *cur, struct cpuset *trial)
550 {
551 struct cgroup_subsys_state *css;
552 struct cpuset *c, *par;
553 int ret = 0;
554
555 rcu_read_lock();
556
557 if (!is_in_v2_mode())
558 ret = cpuset1_validate_change(cur, trial);
559 if (ret)
560 goto out;
561
562 /* Remaining checks don't apply to root cpuset */
563 if (cur == &top_cpuset)
564 goto out;
565
566 par = parent_cs(cur);
567
568 /*
569 * Cpusets with tasks - existing or newly being attached - can't
570 * be changed to have empty cpus_allowed or mems_allowed.
571 */
572 ret = -ENOSPC;
573 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
574 if (!cpumask_empty(cur->cpus_allowed) &&
575 cpumask_empty(trial->cpus_allowed))
576 goto out;
577 if (!nodes_empty(cur->mems_allowed) &&
578 nodes_empty(trial->mems_allowed))
579 goto out;
580 }
581
582 /*
583 * We can't shrink if we won't have enough room for SCHED_DEADLINE
584 * tasks. This check is not done when scheduling is disabled as the
585 * users should know what they are doing.
586 *
587 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
588 * cpus_allowed.
589 *
590 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
591 * for non-isolated partition root. At this point, the target
592 * effective_cpus isn't computed yet. user_xcpus() is the best
593 * approximation.
594 *
595 * TBD: May need to precompute the real effective_cpus here in case
596 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
597 * becomes an issue.
598 */
599 ret = -EBUSY;
600 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
601 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
602 goto out;
603
604 /*
605 * If either I or some sibling (!= me) is exclusive, we can't
606 * overlap. exclusive_cpus cannot overlap with each other if set.
607 */
608 ret = -EINVAL;
609 cpuset_for_each_child(c, css, par) {
610 bool txset, cxset; /* Are exclusive_cpus set? */
611
612 if (c == cur)
613 continue;
614
615 txset = !cpumask_empty(trial->exclusive_cpus);
616 cxset = !cpumask_empty(c->exclusive_cpus);
617 if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
618 (txset && cxset)) {
619 if (!cpusets_are_exclusive(trial, c))
620 goto out;
621 } else if (txset || cxset) {
622 struct cpumask *xcpus, *acpus;
623
624 /*
625 * When just one of the exclusive_cpus's is set,
626 * cpus_allowed of the other cpuset, if set, cannot be
627 * a subset of it or none of those CPUs will be
628 * available if these exclusive CPUs are activated.
629 */
630 if (txset) {
631 xcpus = trial->exclusive_cpus;
632 acpus = c->cpus_allowed;
633 } else {
634 xcpus = c->exclusive_cpus;
635 acpus = trial->cpus_allowed;
636 }
637 if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
638 goto out;
639 }
640 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
641 nodes_intersects(trial->mems_allowed, c->mems_allowed))
642 goto out;
643 }
644
645 ret = 0;
646 out:
647 rcu_read_unlock();
648 return ret;
649 }
650
651 #ifdef CONFIG_SMP
652 /*
653 * Helper routine for generate_sched_domains().
654 * Do cpusets a, b have overlapping effective cpus_allowed masks?
655 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)656 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
657 {
658 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
659 }
660
661 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)662 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
663 {
664 if (dattr->relax_domain_level < c->relax_domain_level)
665 dattr->relax_domain_level = c->relax_domain_level;
666 return;
667 }
668
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)669 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
670 struct cpuset *root_cs)
671 {
672 struct cpuset *cp;
673 struct cgroup_subsys_state *pos_css;
674
675 rcu_read_lock();
676 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
677 /* skip the whole subtree if @cp doesn't have any CPU */
678 if (cpumask_empty(cp->cpus_allowed)) {
679 pos_css = css_rightmost_descendant(pos_css);
680 continue;
681 }
682
683 if (is_sched_load_balance(cp))
684 update_domain_attr(dattr, cp);
685 }
686 rcu_read_unlock();
687 }
688
689 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)690 static inline int nr_cpusets(void)
691 {
692 /* jump label reference count + the top-level cpuset */
693 return static_key_count(&cpusets_enabled_key.key) + 1;
694 }
695
696 /*
697 * generate_sched_domains()
698 *
699 * This function builds a partial partition of the systems CPUs
700 * A 'partial partition' is a set of non-overlapping subsets whose
701 * union is a subset of that set.
702 * The output of this function needs to be passed to kernel/sched/core.c
703 * partition_sched_domains() routine, which will rebuild the scheduler's
704 * load balancing domains (sched domains) as specified by that partial
705 * partition.
706 *
707 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
708 * for a background explanation of this.
709 *
710 * Does not return errors, on the theory that the callers of this
711 * routine would rather not worry about failures to rebuild sched
712 * domains when operating in the severe memory shortage situations
713 * that could cause allocation failures below.
714 *
715 * Must be called with cpuset_mutex held.
716 *
717 * The three key local variables below are:
718 * cp - cpuset pointer, used (together with pos_css) to perform a
719 * top-down scan of all cpusets. For our purposes, rebuilding
720 * the schedulers sched domains, we can ignore !is_sched_load_
721 * balance cpusets.
722 * csa - (for CpuSet Array) Array of pointers to all the cpusets
723 * that need to be load balanced, for convenient iterative
724 * access by the subsequent code that finds the best partition,
725 * i.e the set of domains (subsets) of CPUs such that the
726 * cpus_allowed of every cpuset marked is_sched_load_balance
727 * is a subset of one of these domains, while there are as
728 * many such domains as possible, each as small as possible.
729 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
730 * the kernel/sched/core.c routine partition_sched_domains() in a
731 * convenient format, that can be easily compared to the prior
732 * value to determine what partition elements (sched domains)
733 * were changed (added or removed.)
734 *
735 * Finding the best partition (set of domains):
736 * The double nested loops below over i, j scan over the load
737 * balanced cpusets (using the array of cpuset pointers in csa[])
738 * looking for pairs of cpusets that have overlapping cpus_allowed
739 * and merging them using a union-find algorithm.
740 *
741 * The union of the cpus_allowed masks from the set of all cpusets
742 * having the same root then form the one element of the partition
743 * (one sched domain) to be passed to partition_sched_domains().
744 *
745 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)746 static int generate_sched_domains(cpumask_var_t **domains,
747 struct sched_domain_attr **attributes)
748 {
749 struct cpuset *cp; /* top-down scan of cpusets */
750 struct cpuset **csa; /* array of all cpuset ptrs */
751 int csn; /* how many cpuset ptrs in csa so far */
752 int i, j; /* indices for partition finding loops */
753 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
754 struct sched_domain_attr *dattr; /* attributes for custom domains */
755 int ndoms = 0; /* number of sched domains in result */
756 int nslot; /* next empty doms[] struct cpumask slot */
757 struct cgroup_subsys_state *pos_css;
758 bool root_load_balance = is_sched_load_balance(&top_cpuset);
759 bool cgrpv2 = cpuset_v2();
760 int nslot_update;
761
762 doms = NULL;
763 dattr = NULL;
764 csa = NULL;
765
766 /* Special case for the 99% of systems with one, full, sched domain */
767 if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
768 single_root_domain:
769 ndoms = 1;
770 doms = alloc_sched_domains(ndoms);
771 if (!doms)
772 goto done;
773
774 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
775 if (dattr) {
776 *dattr = SD_ATTR_INIT;
777 update_domain_attr_tree(dattr, &top_cpuset);
778 }
779 cpumask_and(doms[0], top_cpuset.effective_cpus,
780 housekeeping_cpumask(HK_TYPE_DOMAIN));
781
782 goto done;
783 }
784
785 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
786 if (!csa)
787 goto done;
788 csn = 0;
789
790 rcu_read_lock();
791 if (root_load_balance)
792 csa[csn++] = &top_cpuset;
793 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
794 if (cp == &top_cpuset)
795 continue;
796
797 if (cgrpv2)
798 goto v2;
799
800 /*
801 * v1:
802 * Continue traversing beyond @cp iff @cp has some CPUs and
803 * isn't load balancing. The former is obvious. The
804 * latter: All child cpusets contain a subset of the
805 * parent's cpus, so just skip them, and then we call
806 * update_domain_attr_tree() to calc relax_domain_level of
807 * the corresponding sched domain.
808 */
809 if (!cpumask_empty(cp->cpus_allowed) &&
810 !(is_sched_load_balance(cp) &&
811 cpumask_intersects(cp->cpus_allowed,
812 housekeeping_cpumask(HK_TYPE_DOMAIN))))
813 continue;
814
815 if (is_sched_load_balance(cp) &&
816 !cpumask_empty(cp->effective_cpus))
817 csa[csn++] = cp;
818
819 /* skip @cp's subtree */
820 pos_css = css_rightmost_descendant(pos_css);
821 continue;
822
823 v2:
824 /*
825 * Only valid partition roots that are not isolated and with
826 * non-empty effective_cpus will be saved into csn[].
827 */
828 if ((cp->partition_root_state == PRS_ROOT) &&
829 !cpumask_empty(cp->effective_cpus))
830 csa[csn++] = cp;
831
832 /*
833 * Skip @cp's subtree if not a partition root and has no
834 * exclusive CPUs to be granted to child cpusets.
835 */
836 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
837 pos_css = css_rightmost_descendant(pos_css);
838 }
839 rcu_read_unlock();
840
841 /*
842 * If there are only isolated partitions underneath the cgroup root,
843 * we can optimize out unneeded sched domains scanning.
844 */
845 if (root_load_balance && (csn == 1))
846 goto single_root_domain;
847
848 for (i = 0; i < csn; i++)
849 uf_node_init(&csa[i]->node);
850
851 /* Merge overlapping cpusets */
852 for (i = 0; i < csn; i++) {
853 for (j = i + 1; j < csn; j++) {
854 if (cpusets_overlap(csa[i], csa[j])) {
855 /*
856 * Cgroup v2 shouldn't pass down overlapping
857 * partition root cpusets.
858 */
859 WARN_ON_ONCE(cgrpv2);
860 uf_union(&csa[i]->node, &csa[j]->node);
861 }
862 }
863 }
864
865 /* Count the total number of domains */
866 for (i = 0; i < csn; i++) {
867 if (uf_find(&csa[i]->node) == &csa[i]->node)
868 ndoms++;
869 }
870
871 /*
872 * Now we know how many domains to create.
873 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
874 */
875 doms = alloc_sched_domains(ndoms);
876 if (!doms)
877 goto done;
878
879 /*
880 * The rest of the code, including the scheduler, can deal with
881 * dattr==NULL case. No need to abort if alloc fails.
882 */
883 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
884 GFP_KERNEL);
885
886 /*
887 * Cgroup v2 doesn't support domain attributes, just set all of them
888 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
889 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
890 */
891 if (cgrpv2) {
892 for (i = 0; i < ndoms; i++) {
893 /*
894 * The top cpuset may contain some boot time isolated
895 * CPUs that need to be excluded from the sched domain.
896 */
897 if (csa[i] == &top_cpuset)
898 cpumask_and(doms[i], csa[i]->effective_cpus,
899 housekeeping_cpumask(HK_TYPE_DOMAIN));
900 else
901 cpumask_copy(doms[i], csa[i]->effective_cpus);
902 if (dattr)
903 dattr[i] = SD_ATTR_INIT;
904 }
905 goto done;
906 }
907
908 for (nslot = 0, i = 0; i < csn; i++) {
909 nslot_update = 0;
910 for (j = i; j < csn; j++) {
911 if (uf_find(&csa[j]->node) == &csa[i]->node) {
912 struct cpumask *dp = doms[nslot];
913
914 if (i == j) {
915 nslot_update = 1;
916 cpumask_clear(dp);
917 if (dattr)
918 *(dattr + nslot) = SD_ATTR_INIT;
919 }
920 cpumask_or(dp, dp, csa[j]->effective_cpus);
921 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
922 if (dattr)
923 update_domain_attr_tree(dattr + nslot, csa[j]);
924 }
925 }
926 if (nslot_update)
927 nslot++;
928 }
929 BUG_ON(nslot != ndoms);
930
931 done:
932 kfree(csa);
933
934 /*
935 * Fallback to the default domain if kmalloc() failed.
936 * See comments in partition_sched_domains().
937 */
938 if (doms == NULL)
939 ndoms = 1;
940
941 *domains = doms;
942 *attributes = dattr;
943 return ndoms;
944 }
945
dl_update_tasks_root_domain(struct cpuset * cs)946 static void dl_update_tasks_root_domain(struct cpuset *cs)
947 {
948 struct css_task_iter it;
949 struct task_struct *task;
950
951 if (cs->nr_deadline_tasks == 0)
952 return;
953
954 css_task_iter_start(&cs->css, 0, &it);
955
956 while ((task = css_task_iter_next(&it)))
957 dl_add_task_root_domain(task);
958
959 css_task_iter_end(&it);
960 }
961
dl_rebuild_rd_accounting(void)962 void dl_rebuild_rd_accounting(void)
963 {
964 struct cpuset *cs = NULL;
965 struct cgroup_subsys_state *pos_css;
966 int cpu;
967 u64 cookie = ++dl_cookie;
968
969 lockdep_assert_held(&cpuset_mutex);
970 lockdep_assert_cpus_held();
971 lockdep_assert_held(&sched_domains_mutex);
972
973 rcu_read_lock();
974
975 for_each_possible_cpu(cpu) {
976 if (dl_bw_visited(cpu, cookie))
977 continue;
978
979 dl_clear_root_domain_cpu(cpu);
980 }
981
982 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
983
984 if (cpumask_empty(cs->effective_cpus)) {
985 pos_css = css_rightmost_descendant(pos_css);
986 continue;
987 }
988
989 css_get(&cs->css);
990
991 rcu_read_unlock();
992
993 dl_update_tasks_root_domain(cs);
994
995 rcu_read_lock();
996 css_put(&cs->css);
997 }
998 rcu_read_unlock();
999 }
1000
1001 /*
1002 * Rebuild scheduler domains.
1003 *
1004 * If the flag 'sched_load_balance' of any cpuset with non-empty
1005 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1006 * which has that flag enabled, or if any cpuset with a non-empty
1007 * 'cpus' is removed, then call this routine to rebuild the
1008 * scheduler's dynamic sched domains.
1009 *
1010 * Call with cpuset_mutex held. Takes cpus_read_lock().
1011 */
rebuild_sched_domains_locked(void)1012 void rebuild_sched_domains_locked(void)
1013 {
1014 struct cgroup_subsys_state *pos_css;
1015 struct sched_domain_attr *attr;
1016 cpumask_var_t *doms;
1017 struct cpuset *cs;
1018 int ndoms;
1019
1020 lockdep_assert_cpus_held();
1021 lockdep_assert_held(&cpuset_mutex);
1022 force_sd_rebuild = false;
1023
1024 /*
1025 * If we have raced with CPU hotplug, return early to avoid
1026 * passing doms with offlined cpu to partition_sched_domains().
1027 * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
1028 *
1029 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1030 * should be the same as the active CPUs, so checking only top_cpuset
1031 * is enough to detect racing CPU offlines.
1032 */
1033 if (cpumask_empty(subpartitions_cpus) &&
1034 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1035 return;
1036
1037 /*
1038 * With subpartition CPUs, however, the effective CPUs of a partition
1039 * root should be only a subset of the active CPUs. Since a CPU in any
1040 * partition root could be offlined, all must be checked.
1041 */
1042 if (!cpumask_empty(subpartitions_cpus)) {
1043 rcu_read_lock();
1044 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1045 if (!is_partition_valid(cs)) {
1046 pos_css = css_rightmost_descendant(pos_css);
1047 continue;
1048 }
1049 if (!cpumask_subset(cs->effective_cpus,
1050 cpu_active_mask)) {
1051 rcu_read_unlock();
1052 return;
1053 }
1054 }
1055 rcu_read_unlock();
1056 }
1057
1058 /* Generate domain masks and attrs */
1059 ndoms = generate_sched_domains(&doms, &attr);
1060
1061 /* Have scheduler rebuild the domains */
1062 partition_sched_domains(ndoms, doms, attr);
1063 }
1064 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1065 void rebuild_sched_domains_locked(void)
1066 {
1067 }
1068 #endif /* CONFIG_SMP */
1069
rebuild_sched_domains_cpuslocked(void)1070 static void rebuild_sched_domains_cpuslocked(void)
1071 {
1072 mutex_lock(&cpuset_mutex);
1073 rebuild_sched_domains_locked();
1074 mutex_unlock(&cpuset_mutex);
1075 }
1076
rebuild_sched_domains(void)1077 void rebuild_sched_domains(void)
1078 {
1079 cpus_read_lock();
1080 rebuild_sched_domains_cpuslocked();
1081 cpus_read_unlock();
1082 }
1083
cpuset_reset_sched_domains(void)1084 void cpuset_reset_sched_domains(void)
1085 {
1086 mutex_lock(&cpuset_mutex);
1087 partition_sched_domains(1, NULL, NULL);
1088 mutex_unlock(&cpuset_mutex);
1089 }
1090
1091 /**
1092 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1093 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1094 * @new_cpus: the temp variable for the new effective_cpus mask
1095 *
1096 * Iterate through each task of @cs updating its cpus_allowed to the
1097 * effective cpuset's. As this function is called with cpuset_mutex held,
1098 * cpuset membership stays stable.
1099 *
1100 * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1101 * to make sure all offline CPUs are also included as hotplug code won't
1102 * update cpumasks for tasks in top_cpuset.
1103 *
1104 * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1105 * do cpu masking per task instead of doing it once for all.
1106 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1107 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1108 {
1109 struct css_task_iter it;
1110 struct task_struct *task;
1111 bool top_cs = cs == &top_cpuset;
1112
1113 css_task_iter_start(&cs->css, 0, &it);
1114 while ((task = css_task_iter_next(&it))) {
1115 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1116
1117 if (top_cs) {
1118 /*
1119 * PF_NO_SETAFFINITY tasks are ignored.
1120 * All per cpu kthreads should have PF_NO_SETAFFINITY
1121 * flag set, see kthread_set_per_cpu().
1122 */
1123 if (task->flags & PF_NO_SETAFFINITY)
1124 continue;
1125 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1126 } else {
1127 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1128 }
1129 set_cpus_allowed_ptr(task, new_cpus);
1130 }
1131 css_task_iter_end(&it);
1132 }
1133
1134 /**
1135 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1136 * @new_cpus: the temp variable for the new effective_cpus mask
1137 * @cs: the cpuset the need to recompute the new effective_cpus mask
1138 * @parent: the parent cpuset
1139 *
1140 * The result is valid only if the given cpuset isn't a partition root.
1141 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1142 static void compute_effective_cpumask(struct cpumask *new_cpus,
1143 struct cpuset *cs, struct cpuset *parent)
1144 {
1145 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1146 }
1147
1148 /*
1149 * Commands for update_parent_effective_cpumask
1150 */
1151 enum partition_cmd {
1152 partcmd_enable, /* Enable partition root */
1153 partcmd_enablei, /* Enable isolated partition root */
1154 partcmd_disable, /* Disable partition root */
1155 partcmd_update, /* Update parent's effective_cpus */
1156 partcmd_invalidate, /* Make partition invalid */
1157 };
1158
1159 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1160 struct tmpmasks *tmp);
1161
1162 /*
1163 * Update partition exclusive flag
1164 *
1165 * Return: 0 if successful, an error code otherwise
1166 */
update_partition_exclusive_flag(struct cpuset * cs,int new_prs)1167 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1168 {
1169 bool exclusive = (new_prs > PRS_MEMBER);
1170
1171 if (exclusive && !is_cpu_exclusive(cs)) {
1172 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1173 return PERR_NOTEXCL;
1174 } else if (!exclusive && is_cpu_exclusive(cs)) {
1175 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1176 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1177 }
1178 return 0;
1179 }
1180
1181 /*
1182 * Update partition load balance flag and/or rebuild sched domain
1183 *
1184 * Changing load balance flag will automatically call
1185 * rebuild_sched_domains_locked().
1186 * This function is for cgroup v2 only.
1187 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1188 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1189 {
1190 int new_prs = cs->partition_root_state;
1191 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1192 bool new_lb;
1193
1194 /*
1195 * If cs is not a valid partition root, the load balance state
1196 * will follow its parent.
1197 */
1198 if (new_prs > 0) {
1199 new_lb = (new_prs != PRS_ISOLATED);
1200 } else {
1201 new_lb = is_sched_load_balance(parent_cs(cs));
1202 }
1203 if (new_lb != !!is_sched_load_balance(cs)) {
1204 rebuild_domains = true;
1205 if (new_lb)
1206 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1207 else
1208 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1209 }
1210
1211 if (rebuild_domains)
1212 cpuset_force_rebuild();
1213 }
1214
1215 /*
1216 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1217 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1218 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1219 struct cpumask *xcpus)
1220 {
1221 /*
1222 * A populated partition (cs or parent) can't have empty effective_cpus
1223 */
1224 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1225 partition_is_populated(parent, cs)) ||
1226 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1227 partition_is_populated(cs, NULL));
1228 }
1229
reset_partition_data(struct cpuset * cs)1230 static void reset_partition_data(struct cpuset *cs)
1231 {
1232 struct cpuset *parent = parent_cs(cs);
1233
1234 if (!cpuset_v2())
1235 return;
1236
1237 lockdep_assert_held(&callback_lock);
1238
1239 cs->nr_subparts = 0;
1240 if (cpumask_empty(cs->exclusive_cpus)) {
1241 cpumask_clear(cs->effective_xcpus);
1242 if (is_cpu_exclusive(cs))
1243 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1244 }
1245 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1246 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1247 }
1248
1249 /*
1250 * isolated_cpus_update - Update the isolated_cpus mask
1251 * @old_prs: old partition_root_state
1252 * @new_prs: new partition_root_state
1253 * @xcpus: exclusive CPUs with state change
1254 */
isolated_cpus_update(int old_prs,int new_prs,struct cpumask * xcpus)1255 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1256 {
1257 WARN_ON_ONCE(old_prs == new_prs);
1258 if (new_prs == PRS_ISOLATED)
1259 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1260 else
1261 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1262 }
1263
1264 /*
1265 * partition_xcpus_add - Add new exclusive CPUs to partition
1266 * @new_prs: new partition_root_state
1267 * @parent: parent cpuset
1268 * @xcpus: exclusive CPUs to be added
1269 * Return: true if isolated_cpus modified, false otherwise
1270 *
1271 * Remote partition if parent == NULL
1272 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1273 static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
1274 struct cpumask *xcpus)
1275 {
1276 bool isolcpus_updated;
1277
1278 WARN_ON_ONCE(new_prs < 0);
1279 lockdep_assert_held(&callback_lock);
1280 if (!parent)
1281 parent = &top_cpuset;
1282
1283
1284 if (parent == &top_cpuset)
1285 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1286
1287 isolcpus_updated = (new_prs != parent->partition_root_state);
1288 if (isolcpus_updated)
1289 isolated_cpus_update(parent->partition_root_state, new_prs,
1290 xcpus);
1291
1292 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1293 return isolcpus_updated;
1294 }
1295
1296 /*
1297 * partition_xcpus_del - Remove exclusive CPUs from partition
1298 * @old_prs: old partition_root_state
1299 * @parent: parent cpuset
1300 * @xcpus: exclusive CPUs to be removed
1301 * Return: true if isolated_cpus modified, false otherwise
1302 *
1303 * Remote partition if parent == NULL
1304 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1305 static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
1306 struct cpumask *xcpus)
1307 {
1308 bool isolcpus_updated;
1309
1310 WARN_ON_ONCE(old_prs < 0);
1311 lockdep_assert_held(&callback_lock);
1312 if (!parent)
1313 parent = &top_cpuset;
1314
1315 if (parent == &top_cpuset)
1316 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1317
1318 isolcpus_updated = (old_prs != parent->partition_root_state);
1319 if (isolcpus_updated)
1320 isolated_cpus_update(old_prs, parent->partition_root_state,
1321 xcpus);
1322
1323 cpumask_and(xcpus, xcpus, cpu_active_mask);
1324 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1325 return isolcpus_updated;
1326 }
1327
update_unbound_workqueue_cpumask(bool isolcpus_updated)1328 static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
1329 {
1330 int ret;
1331
1332 lockdep_assert_cpus_held();
1333
1334 if (!isolcpus_updated)
1335 return;
1336
1337 ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1338 WARN_ON_ONCE(ret < 0);
1339 }
1340
1341 /**
1342 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1343 * @cpu: the CPU number to be checked
1344 * Return: true if CPU is used in an isolated partition, false otherwise
1345 */
cpuset_cpu_is_isolated(int cpu)1346 bool cpuset_cpu_is_isolated(int cpu)
1347 {
1348 return cpumask_test_cpu(cpu, isolated_cpus);
1349 }
1350 EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1351
1352 /*
1353 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1354 * @cs: cpuset
1355 * @xcpus: effective exclusive CPUs value to be set
1356 * @real_cs: the real cpuset (can be NULL)
1357 * Return: 0 if there is no sibling conflict, > 0 otherwise
1358 *
1359 * If exclusive_cpus isn't explicitly set or a real_cs is provided, we have to
1360 * scan the sibling cpusets and exclude their exclusive_cpus or effective_xcpus
1361 * as well. The provision of real_cs means that a cpumask is being changed and
1362 * the given cs is a trial one.
1363 */
compute_effective_exclusive_cpumask(struct cpuset * cs,struct cpumask * xcpus,struct cpuset * real_cs)1364 static int compute_effective_exclusive_cpumask(struct cpuset *cs,
1365 struct cpumask *xcpus,
1366 struct cpuset *real_cs)
1367 {
1368 struct cgroup_subsys_state *css;
1369 struct cpuset *parent = parent_cs(cs);
1370 struct cpuset *sibling;
1371 int retval = 0;
1372
1373 if (!xcpus)
1374 xcpus = cs->effective_xcpus;
1375
1376 cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
1377
1378 if (!real_cs) {
1379 if (!cpumask_empty(cs->exclusive_cpus))
1380 return 0;
1381 } else {
1382 cs = real_cs;
1383 }
1384
1385 /*
1386 * Exclude exclusive CPUs from siblings
1387 */
1388 rcu_read_lock();
1389 cpuset_for_each_child(sibling, css, parent) {
1390 if (sibling == cs)
1391 continue;
1392
1393 if (!cpumask_empty(sibling->exclusive_cpus) &&
1394 cpumask_intersects(xcpus, sibling->exclusive_cpus)) {
1395 cpumask_andnot(xcpus, xcpus, sibling->exclusive_cpus);
1396 retval++;
1397 continue;
1398 }
1399 if (!cpumask_empty(sibling->effective_xcpus) &&
1400 cpumask_intersects(xcpus, sibling->effective_xcpus)) {
1401 cpumask_andnot(xcpus, xcpus, sibling->effective_xcpus);
1402 retval++;
1403 }
1404 }
1405 rcu_read_unlock();
1406 return retval;
1407 }
1408
is_remote_partition(struct cpuset * cs)1409 static inline bool is_remote_partition(struct cpuset *cs)
1410 {
1411 return !list_empty(&cs->remote_sibling);
1412 }
1413
is_local_partition(struct cpuset * cs)1414 static inline bool is_local_partition(struct cpuset *cs)
1415 {
1416 return is_partition_valid(cs) && !is_remote_partition(cs);
1417 }
1418
1419 /*
1420 * remote_partition_enable - Enable current cpuset as a remote partition root
1421 * @cs: the cpuset to update
1422 * @new_prs: new partition_root_state
1423 * @tmp: temporary masks
1424 * Return: 0 if successful, errcode if error
1425 *
1426 * Enable the current cpuset to become a remote partition root taking CPUs
1427 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1428 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1429 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1430 struct tmpmasks *tmp)
1431 {
1432 bool isolcpus_updated;
1433
1434 /*
1435 * The user must have sysadmin privilege.
1436 */
1437 if (!capable(CAP_SYS_ADMIN))
1438 return PERR_ACCESS;
1439
1440 /*
1441 * The requested exclusive_cpus must not be allocated to other
1442 * partitions and it can't use up all the root's effective_cpus.
1443 *
1444 * Note that if there is any local partition root above it or
1445 * remote partition root underneath it, its exclusive_cpus must
1446 * have overlapped with subpartitions_cpus.
1447 */
1448 compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL);
1449 if (cpumask_empty(tmp->new_cpus) ||
1450 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
1451 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1452 return PERR_INVCPUS;
1453
1454 spin_lock_irq(&callback_lock);
1455 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1456 list_add(&cs->remote_sibling, &remote_children);
1457 cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1458 spin_unlock_irq(&callback_lock);
1459 update_unbound_workqueue_cpumask(isolcpus_updated);
1460 cpuset_force_rebuild();
1461 cs->prs_err = 0;
1462
1463 /*
1464 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1465 */
1466 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1467 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1468 return 0;
1469 }
1470
1471 /*
1472 * remote_partition_disable - Remove current cpuset from remote partition list
1473 * @cs: the cpuset to update
1474 * @tmp: temporary masks
1475 *
1476 * The effective_cpus is also updated.
1477 *
1478 * cpuset_mutex must be held by the caller.
1479 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1480 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1481 {
1482 bool isolcpus_updated;
1483
1484 WARN_ON_ONCE(!is_remote_partition(cs));
1485 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1486
1487 spin_lock_irq(&callback_lock);
1488 list_del_init(&cs->remote_sibling);
1489 isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1490 NULL, cs->effective_xcpus);
1491 if (cs->prs_err)
1492 cs->partition_root_state = -cs->partition_root_state;
1493 else
1494 cs->partition_root_state = PRS_MEMBER;
1495
1496 /* effective_xcpus may need to be changed */
1497 compute_effective_exclusive_cpumask(cs, NULL, NULL);
1498 reset_partition_data(cs);
1499 spin_unlock_irq(&callback_lock);
1500 update_unbound_workqueue_cpumask(isolcpus_updated);
1501 cpuset_force_rebuild();
1502
1503 /*
1504 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1505 */
1506 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1507 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1508 }
1509
1510 /*
1511 * remote_cpus_update - cpus_exclusive change of remote partition
1512 * @cs: the cpuset to be updated
1513 * @xcpus: the new exclusive_cpus mask, if non-NULL
1514 * @excpus: the new effective_xcpus mask
1515 * @tmp: temporary masks
1516 *
1517 * top_cpuset and subpartitions_cpus will be updated or partition can be
1518 * invalidated.
1519 */
remote_cpus_update(struct cpuset * cs,struct cpumask * xcpus,struct cpumask * excpus,struct tmpmasks * tmp)1520 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1521 struct cpumask *excpus, struct tmpmasks *tmp)
1522 {
1523 bool adding, deleting;
1524 int prs = cs->partition_root_state;
1525 int isolcpus_updated = 0;
1526
1527 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1528 return;
1529
1530 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1531
1532 if (cpumask_empty(excpus)) {
1533 cs->prs_err = PERR_CPUSEMPTY;
1534 goto invalidate;
1535 }
1536
1537 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1538 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1539
1540 /*
1541 * Additions of remote CPUs is only allowed if those CPUs are
1542 * not allocated to other partitions and there are effective_cpus
1543 * left in the top cpuset.
1544 */
1545 if (adding) {
1546 if (!capable(CAP_SYS_ADMIN))
1547 cs->prs_err = PERR_ACCESS;
1548 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1549 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1550 cs->prs_err = PERR_NOCPUS;
1551 if (cs->prs_err)
1552 goto invalidate;
1553 }
1554
1555 spin_lock_irq(&callback_lock);
1556 if (adding)
1557 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
1558 if (deleting)
1559 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
1560 /*
1561 * Need to update effective_xcpus and exclusive_cpus now as
1562 * update_sibling_cpumasks() below may iterate back to the same cs.
1563 */
1564 cpumask_copy(cs->effective_xcpus, excpus);
1565 if (xcpus)
1566 cpumask_copy(cs->exclusive_cpus, xcpus);
1567 spin_unlock_irq(&callback_lock);
1568 update_unbound_workqueue_cpumask(isolcpus_updated);
1569 if (adding || deleting)
1570 cpuset_force_rebuild();
1571
1572 /*
1573 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1574 */
1575 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1576 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1577 return;
1578
1579 invalidate:
1580 remote_partition_disable(cs, tmp);
1581 }
1582
1583 /*
1584 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1585 * @prstate: partition root state to be checked
1586 * @new_cpus: cpu mask
1587 * Return: true if there is conflict, false otherwise
1588 *
1589 * CPUs outside of boot_hk_cpus, if defined, can only be used in an
1590 * isolated partition.
1591 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1592 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1593 {
1594 if (!have_boot_isolcpus)
1595 return false;
1596
1597 if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
1598 return true;
1599
1600 return false;
1601 }
1602
1603 /**
1604 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1605 * @cs: The cpuset that requests change in partition root state
1606 * @cmd: Partition root state change command
1607 * @newmask: Optional new cpumask for partcmd_update
1608 * @tmp: Temporary addmask and delmask
1609 * Return: 0 or a partition root state error code
1610 *
1611 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1612 * root to a partition root. The effective_xcpus (cpus_allowed if
1613 * effective_xcpus not set) mask of the given cpuset will be taken away from
1614 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1615 * in effective_xcpus can be granted or an error code will be returned.
1616 *
1617 * For partcmd_disable, the cpuset is being transformed from a partition
1618 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1619 * given back to parent's effective_cpus. 0 will always be returned.
1620 *
1621 * For partcmd_update, if the optional newmask is specified, the cpu list is
1622 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1623 * assumed to remain the same. The cpuset should either be a valid or invalid
1624 * partition root. The partition root state may change from valid to invalid
1625 * or vice versa. An error code will be returned if transitioning from
1626 * invalid to valid violates the exclusivity rule.
1627 *
1628 * For partcmd_invalidate, the current partition will be made invalid.
1629 *
1630 * The partcmd_enable* and partcmd_disable commands are used by
1631 * update_prstate(). An error code may be returned and the caller will check
1632 * for error.
1633 *
1634 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1635 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1636 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1637 * check for error and so partition_root_state and prs_err will be updated
1638 * directly.
1639 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1640 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1641 struct cpumask *newmask,
1642 struct tmpmasks *tmp)
1643 {
1644 struct cpuset *parent = parent_cs(cs);
1645 int adding; /* Adding cpus to parent's effective_cpus */
1646 int deleting; /* Deleting cpus from parent's effective_cpus */
1647 int old_prs, new_prs;
1648 int part_error = PERR_NONE; /* Partition error? */
1649 int subparts_delta = 0;
1650 int isolcpus_updated = 0;
1651 struct cpumask *xcpus = user_xcpus(cs);
1652 bool nocpu;
1653
1654 lockdep_assert_held(&cpuset_mutex);
1655 WARN_ON_ONCE(is_remote_partition(cs));
1656
1657 /*
1658 * new_prs will only be changed for the partcmd_update and
1659 * partcmd_invalidate commands.
1660 */
1661 adding = deleting = false;
1662 old_prs = new_prs = cs->partition_root_state;
1663
1664 if (cmd == partcmd_invalidate) {
1665 if (is_prs_invalid(old_prs))
1666 return 0;
1667
1668 /*
1669 * Make the current partition invalid.
1670 */
1671 if (is_partition_valid(parent))
1672 adding = cpumask_and(tmp->addmask,
1673 xcpus, parent->effective_xcpus);
1674 if (old_prs > 0) {
1675 new_prs = -old_prs;
1676 subparts_delta--;
1677 }
1678 goto write_error;
1679 }
1680
1681 /*
1682 * The parent must be a partition root.
1683 * The new cpumask, if present, or the current cpus_allowed must
1684 * not be empty.
1685 */
1686 if (!is_partition_valid(parent)) {
1687 return is_partition_invalid(parent)
1688 ? PERR_INVPARENT : PERR_NOTPART;
1689 }
1690 if (!newmask && xcpus_empty(cs))
1691 return PERR_CPUSEMPTY;
1692
1693 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1694
1695 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1696 /*
1697 * Need to call compute_effective_exclusive_cpumask() in case
1698 * exclusive_cpus not set. Sibling conflict should only happen
1699 * if exclusive_cpus isn't set.
1700 */
1701 xcpus = tmp->new_cpus;
1702 if (compute_effective_exclusive_cpumask(cs, xcpus, NULL))
1703 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1704
1705 /*
1706 * Enabling partition root is not allowed if its
1707 * effective_xcpus is empty.
1708 */
1709 if (cpumask_empty(xcpus))
1710 return PERR_INVCPUS;
1711
1712 if (prstate_housekeeping_conflict(new_prs, xcpus))
1713 return PERR_HKEEPING;
1714
1715 /*
1716 * A parent can be left with no CPU as long as there is no
1717 * task directly associated with the parent partition.
1718 */
1719 if (nocpu)
1720 return PERR_NOCPUS;
1721
1722 deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus);
1723 if (deleting)
1724 subparts_delta++;
1725 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1726 } else if (cmd == partcmd_disable) {
1727 /*
1728 * May need to add cpus back to parent's effective_cpus
1729 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1730 * for valid partition root. xcpus may contain CPUs that
1731 * shouldn't be removed from the two global cpumasks.
1732 */
1733 if (is_partition_valid(cs)) {
1734 cpumask_copy(tmp->addmask, cs->effective_xcpus);
1735 adding = true;
1736 subparts_delta--;
1737 }
1738 new_prs = PRS_MEMBER;
1739 } else if (newmask) {
1740 /*
1741 * Empty cpumask is not allowed
1742 */
1743 if (cpumask_empty(newmask)) {
1744 part_error = PERR_CPUSEMPTY;
1745 goto write_error;
1746 }
1747
1748 /* Check newmask again, whether cpus are available for parent/cs */
1749 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1750
1751 /*
1752 * partcmd_update with newmask:
1753 *
1754 * Compute add/delete mask to/from effective_cpus
1755 *
1756 * For valid partition:
1757 * addmask = exclusive_cpus & ~newmask
1758 * & parent->effective_xcpus
1759 * delmask = newmask & ~exclusive_cpus
1760 * & parent->effective_xcpus
1761 *
1762 * For invalid partition:
1763 * delmask = newmask & parent->effective_xcpus
1764 */
1765 if (is_prs_invalid(old_prs)) {
1766 adding = false;
1767 deleting = cpumask_and(tmp->delmask,
1768 newmask, parent->effective_xcpus);
1769 } else {
1770 cpumask_andnot(tmp->addmask, xcpus, newmask);
1771 adding = cpumask_and(tmp->addmask, tmp->addmask,
1772 parent->effective_xcpus);
1773
1774 cpumask_andnot(tmp->delmask, newmask, xcpus);
1775 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1776 parent->effective_xcpus);
1777 }
1778 /*
1779 * Make partition invalid if parent's effective_cpus could
1780 * become empty and there are tasks in the parent.
1781 */
1782 if (nocpu && (!adding ||
1783 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1784 part_error = PERR_NOCPUS;
1785 deleting = false;
1786 adding = cpumask_and(tmp->addmask,
1787 xcpus, parent->effective_xcpus);
1788 }
1789 } else {
1790 /*
1791 * partcmd_update w/o newmask
1792 *
1793 * delmask = effective_xcpus & parent->effective_cpus
1794 *
1795 * This can be called from:
1796 * 1) update_cpumasks_hier()
1797 * 2) cpuset_hotplug_update_tasks()
1798 *
1799 * Check to see if it can be transitioned from valid to
1800 * invalid partition or vice versa.
1801 *
1802 * A partition error happens when parent has tasks and all
1803 * its effective CPUs will have to be distributed out.
1804 */
1805 WARN_ON_ONCE(!is_partition_valid(parent));
1806 if (nocpu) {
1807 part_error = PERR_NOCPUS;
1808 if (is_partition_valid(cs))
1809 adding = cpumask_and(tmp->addmask,
1810 xcpus, parent->effective_xcpus);
1811 } else if (is_partition_invalid(cs) &&
1812 cpumask_subset(xcpus, parent->effective_xcpus)) {
1813 struct cgroup_subsys_state *css;
1814 struct cpuset *child;
1815 bool exclusive = true;
1816
1817 /*
1818 * Convert invalid partition to valid has to
1819 * pass the cpu exclusivity test.
1820 */
1821 rcu_read_lock();
1822 cpuset_for_each_child(child, css, parent) {
1823 if (child == cs)
1824 continue;
1825 if (!cpusets_are_exclusive(cs, child)) {
1826 exclusive = false;
1827 break;
1828 }
1829 }
1830 rcu_read_unlock();
1831 if (exclusive)
1832 deleting = cpumask_and(tmp->delmask,
1833 xcpus, parent->effective_cpus);
1834 else
1835 part_error = PERR_NOTEXCL;
1836 }
1837 }
1838
1839 write_error:
1840 if (part_error)
1841 WRITE_ONCE(cs->prs_err, part_error);
1842
1843 if (cmd == partcmd_update) {
1844 /*
1845 * Check for possible transition between valid and invalid
1846 * partition root.
1847 */
1848 switch (cs->partition_root_state) {
1849 case PRS_ROOT:
1850 case PRS_ISOLATED:
1851 if (part_error) {
1852 new_prs = -old_prs;
1853 subparts_delta--;
1854 }
1855 break;
1856 case PRS_INVALID_ROOT:
1857 case PRS_INVALID_ISOLATED:
1858 if (!part_error) {
1859 new_prs = -old_prs;
1860 subparts_delta++;
1861 }
1862 break;
1863 }
1864 }
1865
1866 if (!adding && !deleting && (new_prs == old_prs))
1867 return 0;
1868
1869 /*
1870 * Transitioning between invalid to valid or vice versa may require
1871 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1872 * validate_change() has already been successfully called and
1873 * CPU lists in cs haven't been updated yet. So defer it to later.
1874 */
1875 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1876 int err = update_partition_exclusive_flag(cs, new_prs);
1877
1878 if (err)
1879 return err;
1880 }
1881
1882 /*
1883 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1884 * only).
1885 *
1886 * Newly added CPUs will be removed from effective_cpus and
1887 * newly deleted ones will be added back to effective_cpus.
1888 */
1889 spin_lock_irq(&callback_lock);
1890 if (old_prs != new_prs) {
1891 cs->partition_root_state = new_prs;
1892 if (new_prs <= 0)
1893 cs->nr_subparts = 0;
1894 }
1895 /*
1896 * Adding to parent's effective_cpus means deletion CPUs from cs
1897 * and vice versa.
1898 */
1899 if (adding)
1900 isolcpus_updated += partition_xcpus_del(old_prs, parent,
1901 tmp->addmask);
1902 if (deleting)
1903 isolcpus_updated += partition_xcpus_add(new_prs, parent,
1904 tmp->delmask);
1905
1906 if (is_partition_valid(parent)) {
1907 parent->nr_subparts += subparts_delta;
1908 WARN_ON_ONCE(parent->nr_subparts < 0);
1909 }
1910 spin_unlock_irq(&callback_lock);
1911 update_unbound_workqueue_cpumask(isolcpus_updated);
1912
1913 if ((old_prs != new_prs) && (cmd == partcmd_update))
1914 update_partition_exclusive_flag(cs, new_prs);
1915
1916 if (adding || deleting) {
1917 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1918 update_sibling_cpumasks(parent, cs, tmp);
1919 }
1920
1921 /*
1922 * For partcmd_update without newmask, it is being called from
1923 * cpuset_handle_hotplug(). Update the load balance flag and
1924 * scheduling domain accordingly.
1925 */
1926 if ((cmd == partcmd_update) && !newmask)
1927 update_partition_sd_lb(cs, old_prs);
1928
1929 notify_partition_change(cs, old_prs);
1930 return 0;
1931 }
1932
1933 /**
1934 * compute_partition_effective_cpumask - compute effective_cpus for partition
1935 * @cs: partition root cpuset
1936 * @new_ecpus: previously computed effective_cpus to be updated
1937 *
1938 * Compute the effective_cpus of a partition root by scanning effective_xcpus
1939 * of child partition roots and excluding their effective_xcpus.
1940 *
1941 * This has the side effect of invalidating valid child partition roots,
1942 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
1943 * or update_cpumasks_hier() where parent and children are modified
1944 * successively, we don't need to call update_parent_effective_cpumask()
1945 * and the child's effective_cpus will be updated in later iterations.
1946 *
1947 * Note that rcu_read_lock() is assumed to be held.
1948 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)1949 static void compute_partition_effective_cpumask(struct cpuset *cs,
1950 struct cpumask *new_ecpus)
1951 {
1952 struct cgroup_subsys_state *css;
1953 struct cpuset *child;
1954 bool populated = partition_is_populated(cs, NULL);
1955
1956 /*
1957 * Check child partition roots to see if they should be
1958 * invalidated when
1959 * 1) child effective_xcpus not a subset of new
1960 * excluisve_cpus
1961 * 2) All the effective_cpus will be used up and cp
1962 * has tasks
1963 */
1964 compute_effective_exclusive_cpumask(cs, new_ecpus, NULL);
1965 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
1966
1967 rcu_read_lock();
1968 cpuset_for_each_child(child, css, cs) {
1969 if (!is_partition_valid(child))
1970 continue;
1971
1972 /*
1973 * There shouldn't be a remote partition underneath another
1974 * partition root.
1975 */
1976 WARN_ON_ONCE(is_remote_partition(child));
1977 child->prs_err = 0;
1978 if (!cpumask_subset(child->effective_xcpus,
1979 cs->effective_xcpus))
1980 child->prs_err = PERR_INVCPUS;
1981 else if (populated &&
1982 cpumask_subset(new_ecpus, child->effective_xcpus))
1983 child->prs_err = PERR_NOCPUS;
1984
1985 if (child->prs_err) {
1986 int old_prs = child->partition_root_state;
1987
1988 /*
1989 * Invalidate child partition
1990 */
1991 spin_lock_irq(&callback_lock);
1992 make_partition_invalid(child);
1993 cs->nr_subparts--;
1994 child->nr_subparts = 0;
1995 spin_unlock_irq(&callback_lock);
1996 notify_partition_change(child, old_prs);
1997 continue;
1998 }
1999 cpumask_andnot(new_ecpus, new_ecpus,
2000 child->effective_xcpus);
2001 }
2002 rcu_read_unlock();
2003 }
2004
2005 /*
2006 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2007 * @cs: the cpuset to consider
2008 * @tmp: temp variables for calculating effective_cpus & partition setup
2009 * @force: don't skip any descendant cpusets if set
2010 *
2011 * When configured cpumask is changed, the effective cpumasks of this cpuset
2012 * and all its descendants need to be updated.
2013 *
2014 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2015 *
2016 * Called with cpuset_mutex held
2017 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)2018 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2019 bool force)
2020 {
2021 struct cpuset *cp;
2022 struct cgroup_subsys_state *pos_css;
2023 bool need_rebuild_sched_domains = false;
2024 int old_prs, new_prs;
2025
2026 rcu_read_lock();
2027 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2028 struct cpuset *parent = parent_cs(cp);
2029 bool remote = is_remote_partition(cp);
2030 bool update_parent = false;
2031
2032 old_prs = new_prs = cp->partition_root_state;
2033
2034 /*
2035 * For child remote partition root (!= cs), we need to call
2036 * remote_cpus_update() if effective_xcpus will be changed.
2037 * Otherwise, we can skip the whole subtree.
2038 *
2039 * remote_cpus_update() will reuse tmp->new_cpus only after
2040 * its value is being processed.
2041 */
2042 if (remote && (cp != cs)) {
2043 compute_effective_exclusive_cpumask(cp, tmp->new_cpus, NULL);
2044 if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2045 pos_css = css_rightmost_descendant(pos_css);
2046 continue;
2047 }
2048 rcu_read_unlock();
2049 remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2050 rcu_read_lock();
2051
2052 /* Remote partition may be invalidated */
2053 new_prs = cp->partition_root_state;
2054 remote = (new_prs == old_prs);
2055 }
2056
2057 if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2058 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2059 else
2060 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2061
2062 if (remote)
2063 goto get_css; /* Ready to update cpuset data */
2064
2065 /*
2066 * A partition with no effective_cpus is allowed as long as
2067 * there is no task associated with it. Call
2068 * update_parent_effective_cpumask() to check it.
2069 */
2070 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2071 update_parent = true;
2072 goto update_parent_effective;
2073 }
2074
2075 /*
2076 * If it becomes empty, inherit the effective mask of the
2077 * parent, which is guaranteed to have some CPUs unless
2078 * it is a partition root that has explicitly distributed
2079 * out all its CPUs.
2080 */
2081 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2082 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2083
2084 /*
2085 * Skip the whole subtree if
2086 * 1) the cpumask remains the same,
2087 * 2) has no partition root state,
2088 * 3) force flag not set, and
2089 * 4) for v2 load balance state same as its parent.
2090 */
2091 if (!cp->partition_root_state && !force &&
2092 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2093 (!cpuset_v2() ||
2094 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2095 pos_css = css_rightmost_descendant(pos_css);
2096 continue;
2097 }
2098
2099 update_parent_effective:
2100 /*
2101 * update_parent_effective_cpumask() should have been called
2102 * for cs already in update_cpumask(). We should also call
2103 * cpuset_update_tasks_cpumask() again for tasks in the parent
2104 * cpuset if the parent's effective_cpus changes.
2105 */
2106 if ((cp != cs) && old_prs) {
2107 switch (parent->partition_root_state) {
2108 case PRS_ROOT:
2109 case PRS_ISOLATED:
2110 update_parent = true;
2111 break;
2112
2113 default:
2114 /*
2115 * When parent is not a partition root or is
2116 * invalid, child partition roots become
2117 * invalid too.
2118 */
2119 if (is_partition_valid(cp))
2120 new_prs = -cp->partition_root_state;
2121 WRITE_ONCE(cp->prs_err,
2122 is_partition_invalid(parent)
2123 ? PERR_INVPARENT : PERR_NOTPART);
2124 break;
2125 }
2126 }
2127 get_css:
2128 if (!css_tryget_online(&cp->css))
2129 continue;
2130 rcu_read_unlock();
2131
2132 if (update_parent) {
2133 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2134 /*
2135 * The cpuset partition_root_state may become
2136 * invalid. Capture it.
2137 */
2138 new_prs = cp->partition_root_state;
2139 }
2140
2141 spin_lock_irq(&callback_lock);
2142 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2143 cp->partition_root_state = new_prs;
2144 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs))
2145 compute_effective_exclusive_cpumask(cp, NULL, NULL);
2146
2147 /*
2148 * Make sure effective_xcpus is properly set for a valid
2149 * partition root.
2150 */
2151 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2152 cpumask_and(cp->effective_xcpus,
2153 cp->cpus_allowed, parent->effective_xcpus);
2154 else if (new_prs < 0)
2155 reset_partition_data(cp);
2156 spin_unlock_irq(&callback_lock);
2157
2158 notify_partition_change(cp, old_prs);
2159
2160 WARN_ON(!is_in_v2_mode() &&
2161 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2162
2163 cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2164
2165 /*
2166 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2167 * from parent if current cpuset isn't a valid partition root
2168 * and their load balance states differ.
2169 */
2170 if (cpuset_v2() && !is_partition_valid(cp) &&
2171 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2172 if (is_sched_load_balance(parent))
2173 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2174 else
2175 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2176 }
2177
2178 /*
2179 * On legacy hierarchy, if the effective cpumask of any non-
2180 * empty cpuset is changed, we need to rebuild sched domains.
2181 * On default hierarchy, the cpuset needs to be a partition
2182 * root as well.
2183 */
2184 if (!cpumask_empty(cp->cpus_allowed) &&
2185 is_sched_load_balance(cp) &&
2186 (!cpuset_v2() || is_partition_valid(cp)))
2187 need_rebuild_sched_domains = true;
2188
2189 rcu_read_lock();
2190 css_put(&cp->css);
2191 }
2192 rcu_read_unlock();
2193
2194 if (need_rebuild_sched_domains)
2195 cpuset_force_rebuild();
2196 }
2197
2198 /**
2199 * update_sibling_cpumasks - Update siblings cpumasks
2200 * @parent: Parent cpuset
2201 * @cs: Current cpuset
2202 * @tmp: Temp variables
2203 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2204 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2205 struct tmpmasks *tmp)
2206 {
2207 struct cpuset *sibling;
2208 struct cgroup_subsys_state *pos_css;
2209
2210 lockdep_assert_held(&cpuset_mutex);
2211
2212 /*
2213 * Check all its siblings and call update_cpumasks_hier()
2214 * if their effective_cpus will need to be changed.
2215 *
2216 * It is possible a change in parent's effective_cpus
2217 * due to a change in a child partition's effective_xcpus will impact
2218 * its siblings even if they do not inherit parent's effective_cpus
2219 * directly.
2220 *
2221 * The update_cpumasks_hier() function may sleep. So we have to
2222 * release the RCU read lock before calling it.
2223 */
2224 rcu_read_lock();
2225 cpuset_for_each_child(sibling, pos_css, parent) {
2226 if (sibling == cs)
2227 continue;
2228 if (!is_partition_valid(sibling)) {
2229 compute_effective_cpumask(tmp->new_cpus, sibling,
2230 parent);
2231 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2232 continue;
2233 } else if (is_remote_partition(sibling)) {
2234 /*
2235 * Change in a sibling cpuset won't affect a remote
2236 * partition root.
2237 */
2238 continue;
2239 }
2240
2241 if (!css_tryget_online(&sibling->css))
2242 continue;
2243
2244 rcu_read_unlock();
2245 update_cpumasks_hier(sibling, tmp, false);
2246 rcu_read_lock();
2247 css_put(&sibling->css);
2248 }
2249 rcu_read_unlock();
2250 }
2251
2252 /**
2253 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2254 * @cs: the cpuset to consider
2255 * @trialcs: trial cpuset
2256 * @buf: buffer of cpu numbers written to this cpuset
2257 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2258 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2259 const char *buf)
2260 {
2261 int retval;
2262 struct tmpmasks tmp;
2263 struct cpuset *parent = parent_cs(cs);
2264 bool invalidate = false;
2265 bool force = false;
2266 int old_prs = cs->partition_root_state;
2267
2268 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
2269 if (cs == &top_cpuset)
2270 return -EACCES;
2271
2272 /*
2273 * An empty cpus_allowed is ok only if the cpuset has no tasks.
2274 * Since cpulist_parse() fails on an empty mask, we special case
2275 * that parsing. The validate_change() call ensures that cpusets
2276 * with tasks have cpus.
2277 */
2278 if (!*buf) {
2279 cpumask_clear(trialcs->cpus_allowed);
2280 if (cpumask_empty(trialcs->exclusive_cpus))
2281 cpumask_clear(trialcs->effective_xcpus);
2282 } else {
2283 retval = cpulist_parse(buf, trialcs->cpus_allowed);
2284 if (retval < 0)
2285 return retval;
2286
2287 if (!cpumask_subset(trialcs->cpus_allowed,
2288 top_cpuset.cpus_allowed))
2289 return -EINVAL;
2290
2291 /*
2292 * When exclusive_cpus isn't explicitly set, it is constrained
2293 * by cpus_allowed and parent's effective_xcpus. Otherwise,
2294 * trialcs->effective_xcpus is used as a temporary cpumask
2295 * for checking validity of the partition root.
2296 */
2297 trialcs->partition_root_state = PRS_MEMBER;
2298 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2299 compute_effective_exclusive_cpumask(trialcs, NULL, cs);
2300 }
2301
2302 /* Nothing to do if the cpus didn't change */
2303 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2304 return 0;
2305
2306 if (alloc_cpumasks(NULL, &tmp))
2307 return -ENOMEM;
2308
2309 if (old_prs) {
2310 if (is_partition_valid(cs) &&
2311 cpumask_empty(trialcs->effective_xcpus)) {
2312 invalidate = true;
2313 cs->prs_err = PERR_INVCPUS;
2314 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2315 invalidate = true;
2316 cs->prs_err = PERR_HKEEPING;
2317 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2318 invalidate = true;
2319 cs->prs_err = PERR_NOCPUS;
2320 }
2321 }
2322
2323 /*
2324 * Check all the descendants in update_cpumasks_hier() if
2325 * effective_xcpus is to be changed.
2326 */
2327 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2328
2329 retval = validate_change(cs, trialcs);
2330
2331 if ((retval == -EINVAL) && cpuset_v2()) {
2332 struct cgroup_subsys_state *css;
2333 struct cpuset *cp;
2334
2335 /*
2336 * The -EINVAL error code indicates that partition sibling
2337 * CPU exclusivity rule has been violated. We still allow
2338 * the cpumask change to proceed while invalidating the
2339 * partition. However, any conflicting sibling partitions
2340 * have to be marked as invalid too.
2341 */
2342 invalidate = true;
2343 rcu_read_lock();
2344 cpuset_for_each_child(cp, css, parent) {
2345 struct cpumask *xcpus = user_xcpus(trialcs);
2346
2347 if (is_partition_valid(cp) &&
2348 cpumask_intersects(xcpus, cp->effective_xcpus)) {
2349 rcu_read_unlock();
2350 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2351 rcu_read_lock();
2352 }
2353 }
2354 rcu_read_unlock();
2355 retval = 0;
2356 }
2357
2358 if (retval < 0)
2359 goto out_free;
2360
2361 if (is_partition_valid(cs) ||
2362 (is_partition_invalid(cs) && !invalidate)) {
2363 struct cpumask *xcpus = trialcs->effective_xcpus;
2364
2365 if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2366 xcpus = trialcs->cpus_allowed;
2367
2368 /*
2369 * Call remote_cpus_update() to handle valid remote partition
2370 */
2371 if (is_remote_partition(cs))
2372 remote_cpus_update(cs, NULL, xcpus, &tmp);
2373 else if (invalidate)
2374 update_parent_effective_cpumask(cs, partcmd_invalidate,
2375 NULL, &tmp);
2376 else
2377 update_parent_effective_cpumask(cs, partcmd_update,
2378 xcpus, &tmp);
2379 }
2380
2381 spin_lock_irq(&callback_lock);
2382 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2383 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2384 if ((old_prs > 0) && !is_partition_valid(cs))
2385 reset_partition_data(cs);
2386 spin_unlock_irq(&callback_lock);
2387
2388 /* effective_cpus/effective_xcpus will be updated here */
2389 update_cpumasks_hier(cs, &tmp, force);
2390
2391 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2392 if (cs->partition_root_state)
2393 update_partition_sd_lb(cs, old_prs);
2394 out_free:
2395 free_cpumasks(NULL, &tmp);
2396 return retval;
2397 }
2398
2399 /**
2400 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2401 * @cs: the cpuset to consider
2402 * @trialcs: trial cpuset
2403 * @buf: buffer of cpu numbers written to this cpuset
2404 *
2405 * The tasks' cpumask will be updated if cs is a valid partition root.
2406 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2407 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2408 const char *buf)
2409 {
2410 int retval;
2411 struct tmpmasks tmp;
2412 struct cpuset *parent = parent_cs(cs);
2413 bool invalidate = false;
2414 bool force = false;
2415 int old_prs = cs->partition_root_state;
2416
2417 if (!*buf) {
2418 cpumask_clear(trialcs->exclusive_cpus);
2419 cpumask_clear(trialcs->effective_xcpus);
2420 } else {
2421 retval = cpulist_parse(buf, trialcs->exclusive_cpus);
2422 if (retval < 0)
2423 return retval;
2424 }
2425
2426 /* Nothing to do if the CPUs didn't change */
2427 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2428 return 0;
2429
2430 if (*buf) {
2431 trialcs->partition_root_state = PRS_MEMBER;
2432 /*
2433 * Reject the change if there is exclusive CPUs conflict with
2434 * the siblings.
2435 */
2436 if (compute_effective_exclusive_cpumask(trialcs, NULL, cs))
2437 return -EINVAL;
2438 }
2439
2440 /*
2441 * Check all the descendants in update_cpumasks_hier() if
2442 * effective_xcpus is to be changed.
2443 */
2444 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2445
2446 retval = validate_change(cs, trialcs);
2447 if (retval)
2448 return retval;
2449
2450 if (alloc_cpumasks(NULL, &tmp))
2451 return -ENOMEM;
2452
2453 if (old_prs) {
2454 if (cpumask_empty(trialcs->effective_xcpus)) {
2455 invalidate = true;
2456 cs->prs_err = PERR_INVCPUS;
2457 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2458 invalidate = true;
2459 cs->prs_err = PERR_HKEEPING;
2460 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2461 invalidate = true;
2462 cs->prs_err = PERR_NOCPUS;
2463 }
2464
2465 if (is_remote_partition(cs)) {
2466 if (invalidate)
2467 remote_partition_disable(cs, &tmp);
2468 else
2469 remote_cpus_update(cs, trialcs->exclusive_cpus,
2470 trialcs->effective_xcpus, &tmp);
2471 } else if (invalidate) {
2472 update_parent_effective_cpumask(cs, partcmd_invalidate,
2473 NULL, &tmp);
2474 } else {
2475 update_parent_effective_cpumask(cs, partcmd_update,
2476 trialcs->effective_xcpus, &tmp);
2477 }
2478 }
2479 spin_lock_irq(&callback_lock);
2480 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2481 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2482 if ((old_prs > 0) && !is_partition_valid(cs))
2483 reset_partition_data(cs);
2484 spin_unlock_irq(&callback_lock);
2485
2486 /*
2487 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2488 * of the subtree when it is a valid partition root or effective_xcpus
2489 * is updated.
2490 */
2491 if (is_partition_valid(cs) || force)
2492 update_cpumasks_hier(cs, &tmp, force);
2493
2494 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2495 if (cs->partition_root_state)
2496 update_partition_sd_lb(cs, old_prs);
2497
2498 free_cpumasks(NULL, &tmp);
2499 return 0;
2500 }
2501
2502 /*
2503 * Migrate memory region from one set of nodes to another. This is
2504 * performed asynchronously as it can be called from process migration path
2505 * holding locks involved in process management. All mm migrations are
2506 * performed in the queued order and can be waited for by flushing
2507 * cpuset_migrate_mm_wq.
2508 */
2509
2510 struct cpuset_migrate_mm_work {
2511 struct work_struct work;
2512 struct mm_struct *mm;
2513 nodemask_t from;
2514 nodemask_t to;
2515 };
2516
cpuset_migrate_mm_workfn(struct work_struct * work)2517 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2518 {
2519 struct cpuset_migrate_mm_work *mwork =
2520 container_of(work, struct cpuset_migrate_mm_work, work);
2521
2522 /* on a wq worker, no need to worry about %current's mems_allowed */
2523 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2524 mmput(mwork->mm);
2525 kfree(mwork);
2526 }
2527
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2528 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2529 const nodemask_t *to)
2530 {
2531 struct cpuset_migrate_mm_work *mwork;
2532
2533 if (nodes_equal(*from, *to)) {
2534 mmput(mm);
2535 return;
2536 }
2537
2538 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2539 if (mwork) {
2540 mwork->mm = mm;
2541 mwork->from = *from;
2542 mwork->to = *to;
2543 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2544 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2545 } else {
2546 mmput(mm);
2547 }
2548 }
2549
cpuset_post_attach(void)2550 static void cpuset_post_attach(void)
2551 {
2552 flush_workqueue(cpuset_migrate_mm_wq);
2553 }
2554
2555 /*
2556 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2557 * @tsk: the task to change
2558 * @newmems: new nodes that the task will be set
2559 *
2560 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2561 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2562 * parallel, it might temporarily see an empty intersection, which results in
2563 * a seqlock check and retry before OOM or allocation failure.
2564 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2565 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2566 nodemask_t *newmems)
2567 {
2568 task_lock(tsk);
2569
2570 local_irq_disable();
2571 write_seqcount_begin(&tsk->mems_allowed_seq);
2572
2573 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2574 mpol_rebind_task(tsk, newmems);
2575 tsk->mems_allowed = *newmems;
2576
2577 write_seqcount_end(&tsk->mems_allowed_seq);
2578 local_irq_enable();
2579
2580 task_unlock(tsk);
2581 }
2582
2583 static void *cpuset_being_rebound;
2584
2585 /**
2586 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2587 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2588 *
2589 * Iterate through each task of @cs updating its mems_allowed to the
2590 * effective cpuset's. As this function is called with cpuset_mutex held,
2591 * cpuset membership stays stable.
2592 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2593 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2594 {
2595 static nodemask_t newmems; /* protected by cpuset_mutex */
2596 struct css_task_iter it;
2597 struct task_struct *task;
2598
2599 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2600
2601 guarantee_online_mems(cs, &newmems);
2602
2603 /*
2604 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2605 * take while holding tasklist_lock. Forks can happen - the
2606 * mpol_dup() cpuset_being_rebound check will catch such forks,
2607 * and rebind their vma mempolicies too. Because we still hold
2608 * the global cpuset_mutex, we know that no other rebind effort
2609 * will be contending for the global variable cpuset_being_rebound.
2610 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2611 * is idempotent. Also migrate pages in each mm to new nodes.
2612 */
2613 css_task_iter_start(&cs->css, 0, &it);
2614 while ((task = css_task_iter_next(&it))) {
2615 struct mm_struct *mm;
2616 bool migrate;
2617
2618 cpuset_change_task_nodemask(task, &newmems);
2619
2620 mm = get_task_mm(task);
2621 if (!mm)
2622 continue;
2623
2624 migrate = is_memory_migrate(cs);
2625
2626 mpol_rebind_mm(mm, &cs->mems_allowed);
2627 if (migrate)
2628 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2629 else
2630 mmput(mm);
2631 }
2632 css_task_iter_end(&it);
2633
2634 /*
2635 * All the tasks' nodemasks have been updated, update
2636 * cs->old_mems_allowed.
2637 */
2638 cs->old_mems_allowed = newmems;
2639
2640 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2641 cpuset_being_rebound = NULL;
2642 }
2643
2644 /*
2645 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2646 * @cs: the cpuset to consider
2647 * @new_mems: a temp variable for calculating new effective_mems
2648 *
2649 * When configured nodemask is changed, the effective nodemasks of this cpuset
2650 * and all its descendants need to be updated.
2651 *
2652 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2653 *
2654 * Called with cpuset_mutex held
2655 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2656 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2657 {
2658 struct cpuset *cp;
2659 struct cgroup_subsys_state *pos_css;
2660
2661 rcu_read_lock();
2662 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2663 struct cpuset *parent = parent_cs(cp);
2664
2665 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2666
2667 /*
2668 * If it becomes empty, inherit the effective mask of the
2669 * parent, which is guaranteed to have some MEMs.
2670 */
2671 if (is_in_v2_mode() && nodes_empty(*new_mems))
2672 *new_mems = parent->effective_mems;
2673
2674 /* Skip the whole subtree if the nodemask remains the same. */
2675 if (nodes_equal(*new_mems, cp->effective_mems)) {
2676 pos_css = css_rightmost_descendant(pos_css);
2677 continue;
2678 }
2679
2680 if (!css_tryget_online(&cp->css))
2681 continue;
2682 rcu_read_unlock();
2683
2684 spin_lock_irq(&callback_lock);
2685 cp->effective_mems = *new_mems;
2686 spin_unlock_irq(&callback_lock);
2687
2688 WARN_ON(!is_in_v2_mode() &&
2689 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2690
2691 cpuset_update_tasks_nodemask(cp);
2692
2693 rcu_read_lock();
2694 css_put(&cp->css);
2695 }
2696 rcu_read_unlock();
2697 }
2698
2699 /*
2700 * Handle user request to change the 'mems' memory placement
2701 * of a cpuset. Needs to validate the request, update the
2702 * cpusets mems_allowed, and for each task in the cpuset,
2703 * update mems_allowed and rebind task's mempolicy and any vma
2704 * mempolicies and if the cpuset is marked 'memory_migrate',
2705 * migrate the tasks pages to the new memory.
2706 *
2707 * Call with cpuset_mutex held. May take callback_lock during call.
2708 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2709 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2710 * their mempolicies to the cpusets new mems_allowed.
2711 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2712 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2713 const char *buf)
2714 {
2715 int retval;
2716
2717 /*
2718 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2719 * it's read-only
2720 */
2721 if (cs == &top_cpuset) {
2722 retval = -EACCES;
2723 goto done;
2724 }
2725
2726 /*
2727 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2728 * Since nodelist_parse() fails on an empty mask, we special case
2729 * that parsing. The validate_change() call ensures that cpusets
2730 * with tasks have memory.
2731 */
2732 if (!*buf) {
2733 nodes_clear(trialcs->mems_allowed);
2734 } else {
2735 retval = nodelist_parse(buf, trialcs->mems_allowed);
2736 if (retval < 0)
2737 goto done;
2738
2739 if (!nodes_subset(trialcs->mems_allowed,
2740 top_cpuset.mems_allowed)) {
2741 retval = -EINVAL;
2742 goto done;
2743 }
2744 }
2745
2746 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2747 retval = 0; /* Too easy - nothing to do */
2748 goto done;
2749 }
2750 retval = validate_change(cs, trialcs);
2751 if (retval < 0)
2752 goto done;
2753
2754 check_insane_mems_config(&trialcs->mems_allowed);
2755
2756 spin_lock_irq(&callback_lock);
2757 cs->mems_allowed = trialcs->mems_allowed;
2758 spin_unlock_irq(&callback_lock);
2759
2760 /* use trialcs->mems_allowed as a temp variable */
2761 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2762 done:
2763 return retval;
2764 }
2765
current_cpuset_is_being_rebound(void)2766 bool current_cpuset_is_being_rebound(void)
2767 {
2768 bool ret;
2769
2770 rcu_read_lock();
2771 ret = task_cs(current) == cpuset_being_rebound;
2772 rcu_read_unlock();
2773
2774 return ret;
2775 }
2776
2777 /*
2778 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2779 * bit: the bit to update (see cpuset_flagbits_t)
2780 * cs: the cpuset to update
2781 * turning_on: whether the flag is being set or cleared
2782 *
2783 * Call with cpuset_mutex held.
2784 */
2785
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2786 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2787 int turning_on)
2788 {
2789 struct cpuset *trialcs;
2790 int balance_flag_changed;
2791 int spread_flag_changed;
2792 int err;
2793
2794 trialcs = alloc_trial_cpuset(cs);
2795 if (!trialcs)
2796 return -ENOMEM;
2797
2798 if (turning_on)
2799 set_bit(bit, &trialcs->flags);
2800 else
2801 clear_bit(bit, &trialcs->flags);
2802
2803 err = validate_change(cs, trialcs);
2804 if (err < 0)
2805 goto out;
2806
2807 balance_flag_changed = (is_sched_load_balance(cs) !=
2808 is_sched_load_balance(trialcs));
2809
2810 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2811 || (is_spread_page(cs) != is_spread_page(trialcs)));
2812
2813 spin_lock_irq(&callback_lock);
2814 cs->flags = trialcs->flags;
2815 spin_unlock_irq(&callback_lock);
2816
2817 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2818 if (cpuset_v2())
2819 cpuset_force_rebuild();
2820 else
2821 rebuild_sched_domains_locked();
2822 }
2823
2824 if (spread_flag_changed)
2825 cpuset1_update_tasks_flags(cs);
2826 out:
2827 free_cpuset(trialcs);
2828 return err;
2829 }
2830
2831 /**
2832 * update_prstate - update partition_root_state
2833 * @cs: the cpuset to update
2834 * @new_prs: new partition root state
2835 * Return: 0 if successful, != 0 if error
2836 *
2837 * Call with cpuset_mutex held.
2838 */
update_prstate(struct cpuset * cs,int new_prs)2839 static int update_prstate(struct cpuset *cs, int new_prs)
2840 {
2841 int err = PERR_NONE, old_prs = cs->partition_root_state;
2842 struct cpuset *parent = parent_cs(cs);
2843 struct tmpmasks tmpmask;
2844 bool isolcpus_updated = false;
2845
2846 if (old_prs == new_prs)
2847 return 0;
2848
2849 /*
2850 * Treat a previously invalid partition root as if it is a "member".
2851 */
2852 if (new_prs && is_prs_invalid(old_prs))
2853 old_prs = PRS_MEMBER;
2854
2855 if (alloc_cpumasks(NULL, &tmpmask))
2856 return -ENOMEM;
2857
2858 err = update_partition_exclusive_flag(cs, new_prs);
2859 if (err)
2860 goto out;
2861
2862 if (!old_prs) {
2863 /*
2864 * cpus_allowed and exclusive_cpus cannot be both empty.
2865 */
2866 if (xcpus_empty(cs)) {
2867 err = PERR_CPUSEMPTY;
2868 goto out;
2869 }
2870
2871 /*
2872 * We don't support the creation of a new local partition with
2873 * a remote partition underneath it. This unsupported
2874 * setting can happen only if parent is the top_cpuset because
2875 * a remote partition cannot be created underneath an existing
2876 * local or remote partition.
2877 */
2878 if ((parent == &top_cpuset) &&
2879 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2880 err = PERR_REMOTE;
2881 goto out;
2882 }
2883
2884 /*
2885 * If parent is valid partition, enable local partiion.
2886 * Otherwise, enable a remote partition.
2887 */
2888 if (is_partition_valid(parent)) {
2889 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2890 ? partcmd_enable : partcmd_enablei;
2891
2892 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2893 } else {
2894 err = remote_partition_enable(cs, new_prs, &tmpmask);
2895 }
2896 } else if (old_prs && new_prs) {
2897 /*
2898 * A change in load balance state only, no change in cpumasks.
2899 * Need to update isolated_cpus.
2900 */
2901 isolcpus_updated = true;
2902 } else {
2903 /*
2904 * Switching back to member is always allowed even if it
2905 * disables child partitions.
2906 */
2907 if (is_remote_partition(cs))
2908 remote_partition_disable(cs, &tmpmask);
2909 else
2910 update_parent_effective_cpumask(cs, partcmd_disable,
2911 NULL, &tmpmask);
2912
2913 /*
2914 * Invalidation of child partitions will be done in
2915 * update_cpumasks_hier().
2916 */
2917 }
2918 out:
2919 /*
2920 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2921 * happens.
2922 */
2923 if (err) {
2924 new_prs = -new_prs;
2925 update_partition_exclusive_flag(cs, new_prs);
2926 }
2927
2928 spin_lock_irq(&callback_lock);
2929 cs->partition_root_state = new_prs;
2930 WRITE_ONCE(cs->prs_err, err);
2931 if (!is_partition_valid(cs))
2932 reset_partition_data(cs);
2933 else if (isolcpus_updated)
2934 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2935 spin_unlock_irq(&callback_lock);
2936 update_unbound_workqueue_cpumask(isolcpus_updated);
2937
2938 /* Force update if switching back to member & update effective_xcpus */
2939 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2940
2941 /* A newly created partition must have effective_xcpus set */
2942 WARN_ON_ONCE(!old_prs && (new_prs > 0)
2943 && cpumask_empty(cs->effective_xcpus));
2944
2945 /* Update sched domains and load balance flag */
2946 update_partition_sd_lb(cs, old_prs);
2947
2948 notify_partition_change(cs, old_prs);
2949 if (force_sd_rebuild)
2950 rebuild_sched_domains_locked();
2951 free_cpumasks(NULL, &tmpmask);
2952 return 0;
2953 }
2954
2955 static struct cpuset *cpuset_attach_old_cs;
2956
2957 /*
2958 * Check to see if a cpuset can accept a new task
2959 * For v1, cpus_allowed and mems_allowed can't be empty.
2960 * For v2, effective_cpus can't be empty.
2961 * Note that in v1, effective_cpus = cpus_allowed.
2962 */
cpuset_can_attach_check(struct cpuset * cs)2963 static int cpuset_can_attach_check(struct cpuset *cs)
2964 {
2965 if (cpumask_empty(cs->effective_cpus) ||
2966 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2967 return -ENOSPC;
2968 return 0;
2969 }
2970
reset_migrate_dl_data(struct cpuset * cs)2971 static void reset_migrate_dl_data(struct cpuset *cs)
2972 {
2973 cs->nr_migrate_dl_tasks = 0;
2974 cs->sum_migrate_dl_bw = 0;
2975 }
2976
2977 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2978 static int cpuset_can_attach(struct cgroup_taskset *tset)
2979 {
2980 struct cgroup_subsys_state *css;
2981 struct cpuset *cs, *oldcs;
2982 struct task_struct *task;
2983 bool cpus_updated, mems_updated;
2984 int ret;
2985
2986 /* used later by cpuset_attach() */
2987 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2988 oldcs = cpuset_attach_old_cs;
2989 cs = css_cs(css);
2990
2991 mutex_lock(&cpuset_mutex);
2992
2993 /* Check to see if task is allowed in the cpuset */
2994 ret = cpuset_can_attach_check(cs);
2995 if (ret)
2996 goto out_unlock;
2997
2998 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2999 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3000
3001 cgroup_taskset_for_each(task, css, tset) {
3002 ret = task_can_attach(task);
3003 if (ret)
3004 goto out_unlock;
3005
3006 /*
3007 * Skip rights over task check in v2 when nothing changes,
3008 * migration permission derives from hierarchy ownership in
3009 * cgroup_procs_write_permission()).
3010 */
3011 if (!cpuset_v2() || (cpus_updated || mems_updated)) {
3012 ret = security_task_setscheduler(task);
3013 if (ret)
3014 goto out_unlock;
3015 }
3016
3017 if (dl_task(task)) {
3018 cs->nr_migrate_dl_tasks++;
3019 cs->sum_migrate_dl_bw += task->dl.dl_bw;
3020 }
3021 }
3022
3023 if (!cs->nr_migrate_dl_tasks)
3024 goto out_success;
3025
3026 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3027 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3028
3029 if (unlikely(cpu >= nr_cpu_ids)) {
3030 reset_migrate_dl_data(cs);
3031 ret = -EINVAL;
3032 goto out_unlock;
3033 }
3034
3035 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3036 if (ret) {
3037 reset_migrate_dl_data(cs);
3038 goto out_unlock;
3039 }
3040 }
3041
3042 out_success:
3043 /*
3044 * Mark attach is in progress. This makes validate_change() fail
3045 * changes which zero cpus/mems_allowed.
3046 */
3047 cs->attach_in_progress++;
3048 out_unlock:
3049 mutex_unlock(&cpuset_mutex);
3050 return ret;
3051 }
3052
cpuset_cancel_attach(struct cgroup_taskset * tset)3053 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3054 {
3055 struct cgroup_subsys_state *css;
3056 struct cpuset *cs;
3057
3058 cgroup_taskset_first(tset, &css);
3059 cs = css_cs(css);
3060
3061 mutex_lock(&cpuset_mutex);
3062 dec_attach_in_progress_locked(cs);
3063
3064 if (cs->nr_migrate_dl_tasks) {
3065 int cpu = cpumask_any(cs->effective_cpus);
3066
3067 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3068 reset_migrate_dl_data(cs);
3069 }
3070
3071 mutex_unlock(&cpuset_mutex);
3072 }
3073
3074 /*
3075 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3076 * but we can't allocate it dynamically there. Define it global and
3077 * allocate from cpuset_init().
3078 */
3079 static cpumask_var_t cpus_attach;
3080 static nodemask_t cpuset_attach_nodemask_to;
3081
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3082 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3083 {
3084 lockdep_assert_held(&cpuset_mutex);
3085
3086 if (cs != &top_cpuset)
3087 guarantee_online_cpus(task, cpus_attach);
3088 else
3089 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3090 subpartitions_cpus);
3091 /*
3092 * can_attach beforehand should guarantee that this doesn't
3093 * fail. TODO: have a better way to handle failure here
3094 */
3095 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3096
3097 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3098 cpuset1_update_task_spread_flags(cs, task);
3099 }
3100
cpuset_attach(struct cgroup_taskset * tset)3101 static void cpuset_attach(struct cgroup_taskset *tset)
3102 {
3103 struct task_struct *task;
3104 struct task_struct *leader;
3105 struct cgroup_subsys_state *css;
3106 struct cpuset *cs;
3107 struct cpuset *oldcs = cpuset_attach_old_cs;
3108 bool cpus_updated, mems_updated;
3109
3110 cgroup_taskset_first(tset, &css);
3111 cs = css_cs(css);
3112
3113 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3114 mutex_lock(&cpuset_mutex);
3115 cpus_updated = !cpumask_equal(cs->effective_cpus,
3116 oldcs->effective_cpus);
3117 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3118
3119 /*
3120 * In the default hierarchy, enabling cpuset in the child cgroups
3121 * will trigger a number of cpuset_attach() calls with no change
3122 * in effective cpus and mems. In that case, we can optimize out
3123 * by skipping the task iteration and update.
3124 */
3125 if (cpuset_v2() && !cpus_updated && !mems_updated) {
3126 cpuset_attach_nodemask_to = cs->effective_mems;
3127 goto out;
3128 }
3129
3130 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3131
3132 cgroup_taskset_for_each(task, css, tset)
3133 cpuset_attach_task(cs, task);
3134
3135 /*
3136 * Change mm for all threadgroup leaders. This is expensive and may
3137 * sleep and should be moved outside migration path proper. Skip it
3138 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3139 * not set.
3140 */
3141 cpuset_attach_nodemask_to = cs->effective_mems;
3142 if (!is_memory_migrate(cs) && !mems_updated)
3143 goto out;
3144
3145 cgroup_taskset_for_each_leader(leader, css, tset) {
3146 struct mm_struct *mm = get_task_mm(leader);
3147
3148 if (mm) {
3149 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3150
3151 /*
3152 * old_mems_allowed is the same with mems_allowed
3153 * here, except if this task is being moved
3154 * automatically due to hotplug. In that case
3155 * @mems_allowed has been updated and is empty, so
3156 * @old_mems_allowed is the right nodesets that we
3157 * migrate mm from.
3158 */
3159 if (is_memory_migrate(cs))
3160 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3161 &cpuset_attach_nodemask_to);
3162 else
3163 mmput(mm);
3164 }
3165 }
3166
3167 out:
3168 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3169
3170 if (cs->nr_migrate_dl_tasks) {
3171 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3172 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3173 reset_migrate_dl_data(cs);
3174 }
3175
3176 dec_attach_in_progress_locked(cs);
3177
3178 mutex_unlock(&cpuset_mutex);
3179 }
3180
3181 /*
3182 * Common handling for a write to a "cpus" or "mems" file.
3183 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3184 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3185 char *buf, size_t nbytes, loff_t off)
3186 {
3187 struct cpuset *cs = css_cs(of_css(of));
3188 struct cpuset *trialcs;
3189 int retval = -ENODEV;
3190
3191 buf = strstrip(buf);
3192 cpus_read_lock();
3193 mutex_lock(&cpuset_mutex);
3194 if (!is_cpuset_online(cs))
3195 goto out_unlock;
3196
3197 trialcs = alloc_trial_cpuset(cs);
3198 if (!trialcs) {
3199 retval = -ENOMEM;
3200 goto out_unlock;
3201 }
3202
3203 switch (of_cft(of)->private) {
3204 case FILE_CPULIST:
3205 retval = update_cpumask(cs, trialcs, buf);
3206 break;
3207 case FILE_EXCLUSIVE_CPULIST:
3208 retval = update_exclusive_cpumask(cs, trialcs, buf);
3209 break;
3210 case FILE_MEMLIST:
3211 retval = update_nodemask(cs, trialcs, buf);
3212 break;
3213 default:
3214 retval = -EINVAL;
3215 break;
3216 }
3217
3218 free_cpuset(trialcs);
3219 if (force_sd_rebuild)
3220 rebuild_sched_domains_locked();
3221 out_unlock:
3222 mutex_unlock(&cpuset_mutex);
3223 cpus_read_unlock();
3224 flush_workqueue(cpuset_migrate_mm_wq);
3225 return retval ?: nbytes;
3226 }
3227
3228 /*
3229 * These ascii lists should be read in a single call, by using a user
3230 * buffer large enough to hold the entire map. If read in smaller
3231 * chunks, there is no guarantee of atomicity. Since the display format
3232 * used, list of ranges of sequential numbers, is variable length,
3233 * and since these maps can change value dynamically, one could read
3234 * gibberish by doing partial reads while a list was changing.
3235 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3236 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3237 {
3238 struct cpuset *cs = css_cs(seq_css(sf));
3239 cpuset_filetype_t type = seq_cft(sf)->private;
3240 int ret = 0;
3241
3242 spin_lock_irq(&callback_lock);
3243
3244 switch (type) {
3245 case FILE_CPULIST:
3246 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3247 break;
3248 case FILE_MEMLIST:
3249 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3250 break;
3251 case FILE_EFFECTIVE_CPULIST:
3252 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3253 break;
3254 case FILE_EFFECTIVE_MEMLIST:
3255 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3256 break;
3257 case FILE_EXCLUSIVE_CPULIST:
3258 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3259 break;
3260 case FILE_EFFECTIVE_XCPULIST:
3261 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3262 break;
3263 case FILE_SUBPARTS_CPULIST:
3264 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3265 break;
3266 case FILE_ISOLATED_CPULIST:
3267 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3268 break;
3269 default:
3270 ret = -EINVAL;
3271 }
3272
3273 spin_unlock_irq(&callback_lock);
3274 return ret;
3275 }
3276
cpuset_partition_show(struct seq_file * seq,void * v)3277 static int cpuset_partition_show(struct seq_file *seq, void *v)
3278 {
3279 struct cpuset *cs = css_cs(seq_css(seq));
3280 const char *err, *type = NULL;
3281
3282 switch (cs->partition_root_state) {
3283 case PRS_ROOT:
3284 seq_puts(seq, "root\n");
3285 break;
3286 case PRS_ISOLATED:
3287 seq_puts(seq, "isolated\n");
3288 break;
3289 case PRS_MEMBER:
3290 seq_puts(seq, "member\n");
3291 break;
3292 case PRS_INVALID_ROOT:
3293 type = "root";
3294 fallthrough;
3295 case PRS_INVALID_ISOLATED:
3296 if (!type)
3297 type = "isolated";
3298 err = perr_strings[READ_ONCE(cs->prs_err)];
3299 if (err)
3300 seq_printf(seq, "%s invalid (%s)\n", type, err);
3301 else
3302 seq_printf(seq, "%s invalid\n", type);
3303 break;
3304 }
3305 return 0;
3306 }
3307
cpuset_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3308 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3309 size_t nbytes, loff_t off)
3310 {
3311 struct cpuset *cs = css_cs(of_css(of));
3312 int val;
3313 int retval = -ENODEV;
3314
3315 buf = strstrip(buf);
3316
3317 if (!strcmp(buf, "root"))
3318 val = PRS_ROOT;
3319 else if (!strcmp(buf, "member"))
3320 val = PRS_MEMBER;
3321 else if (!strcmp(buf, "isolated"))
3322 val = PRS_ISOLATED;
3323 else
3324 return -EINVAL;
3325
3326 css_get(&cs->css);
3327 cpus_read_lock();
3328 mutex_lock(&cpuset_mutex);
3329 if (is_cpuset_online(cs))
3330 retval = update_prstate(cs, val);
3331 mutex_unlock(&cpuset_mutex);
3332 cpus_read_unlock();
3333 css_put(&cs->css);
3334 return retval ?: nbytes;
3335 }
3336
3337 /*
3338 * This is currently a minimal set for the default hierarchy. It can be
3339 * expanded later on by migrating more features and control files from v1.
3340 */
3341 static struct cftype dfl_files[] = {
3342 {
3343 .name = "cpus",
3344 .seq_show = cpuset_common_seq_show,
3345 .write = cpuset_write_resmask,
3346 .max_write_len = (100U + 6 * NR_CPUS),
3347 .private = FILE_CPULIST,
3348 .flags = CFTYPE_NOT_ON_ROOT,
3349 },
3350
3351 {
3352 .name = "mems",
3353 .seq_show = cpuset_common_seq_show,
3354 .write = cpuset_write_resmask,
3355 .max_write_len = (100U + 6 * MAX_NUMNODES),
3356 .private = FILE_MEMLIST,
3357 .flags = CFTYPE_NOT_ON_ROOT,
3358 },
3359
3360 {
3361 .name = "cpus.effective",
3362 .seq_show = cpuset_common_seq_show,
3363 .private = FILE_EFFECTIVE_CPULIST,
3364 },
3365
3366 {
3367 .name = "mems.effective",
3368 .seq_show = cpuset_common_seq_show,
3369 .private = FILE_EFFECTIVE_MEMLIST,
3370 },
3371
3372 {
3373 .name = "cpus.partition",
3374 .seq_show = cpuset_partition_show,
3375 .write = cpuset_partition_write,
3376 .private = FILE_PARTITION_ROOT,
3377 .flags = CFTYPE_NOT_ON_ROOT,
3378 .file_offset = offsetof(struct cpuset, partition_file),
3379 },
3380
3381 {
3382 .name = "cpus.exclusive",
3383 .seq_show = cpuset_common_seq_show,
3384 .write = cpuset_write_resmask,
3385 .max_write_len = (100U + 6 * NR_CPUS),
3386 .private = FILE_EXCLUSIVE_CPULIST,
3387 .flags = CFTYPE_NOT_ON_ROOT,
3388 },
3389
3390 {
3391 .name = "cpus.exclusive.effective",
3392 .seq_show = cpuset_common_seq_show,
3393 .private = FILE_EFFECTIVE_XCPULIST,
3394 .flags = CFTYPE_NOT_ON_ROOT,
3395 },
3396
3397 {
3398 .name = "cpus.subpartitions",
3399 .seq_show = cpuset_common_seq_show,
3400 .private = FILE_SUBPARTS_CPULIST,
3401 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3402 },
3403
3404 {
3405 .name = "cpus.isolated",
3406 .seq_show = cpuset_common_seq_show,
3407 .private = FILE_ISOLATED_CPULIST,
3408 .flags = CFTYPE_ONLY_ON_ROOT,
3409 },
3410
3411 { } /* terminate */
3412 };
3413
3414
3415 /**
3416 * cpuset_css_alloc - Allocate a cpuset css
3417 * @parent_css: Parent css of the control group that the new cpuset will be
3418 * part of
3419 * Return: cpuset css on success, -ENOMEM on failure.
3420 *
3421 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3422 * top cpuset css otherwise.
3423 */
3424 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3425 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3426 {
3427 struct cpuset *cs;
3428
3429 if (!parent_css)
3430 return &top_cpuset.css;
3431
3432 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3433 if (!cs)
3434 return ERR_PTR(-ENOMEM);
3435
3436 if (alloc_cpumasks(cs, NULL)) {
3437 kfree(cs);
3438 return ERR_PTR(-ENOMEM);
3439 }
3440
3441 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3442 fmeter_init(&cs->fmeter);
3443 cs->relax_domain_level = -1;
3444 INIT_LIST_HEAD(&cs->remote_sibling);
3445
3446 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3447 if (cpuset_v2())
3448 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3449
3450 return &cs->css;
3451 }
3452
cpuset_css_online(struct cgroup_subsys_state * css)3453 static int cpuset_css_online(struct cgroup_subsys_state *css)
3454 {
3455 struct cpuset *cs = css_cs(css);
3456 struct cpuset *parent = parent_cs(cs);
3457 struct cpuset *tmp_cs;
3458 struct cgroup_subsys_state *pos_css;
3459
3460 if (!parent)
3461 return 0;
3462
3463 cpus_read_lock();
3464 mutex_lock(&cpuset_mutex);
3465
3466 set_bit(CS_ONLINE, &cs->flags);
3467 if (is_spread_page(parent))
3468 set_bit(CS_SPREAD_PAGE, &cs->flags);
3469 if (is_spread_slab(parent))
3470 set_bit(CS_SPREAD_SLAB, &cs->flags);
3471 /*
3472 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3473 */
3474 if (cpuset_v2() && !is_sched_load_balance(parent))
3475 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3476
3477 cpuset_inc();
3478
3479 spin_lock_irq(&callback_lock);
3480 if (is_in_v2_mode()) {
3481 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3482 cs->effective_mems = parent->effective_mems;
3483 }
3484 spin_unlock_irq(&callback_lock);
3485
3486 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3487 goto out_unlock;
3488
3489 /*
3490 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3491 * set. This flag handling is implemented in cgroup core for
3492 * historical reasons - the flag may be specified during mount.
3493 *
3494 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3495 * refuse to clone the configuration - thereby refusing the task to
3496 * be entered, and as a result refusing the sys_unshare() or
3497 * clone() which initiated it. If this becomes a problem for some
3498 * users who wish to allow that scenario, then this could be
3499 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3500 * (and likewise for mems) to the new cgroup.
3501 */
3502 rcu_read_lock();
3503 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3504 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3505 rcu_read_unlock();
3506 goto out_unlock;
3507 }
3508 }
3509 rcu_read_unlock();
3510
3511 spin_lock_irq(&callback_lock);
3512 cs->mems_allowed = parent->mems_allowed;
3513 cs->effective_mems = parent->mems_allowed;
3514 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3515 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3516 spin_unlock_irq(&callback_lock);
3517 out_unlock:
3518 mutex_unlock(&cpuset_mutex);
3519 cpus_read_unlock();
3520 return 0;
3521 }
3522
3523 /*
3524 * If the cpuset being removed has its flag 'sched_load_balance'
3525 * enabled, then simulate turning sched_load_balance off, which
3526 * will call rebuild_sched_domains_locked(). That is not needed
3527 * in the default hierarchy where only changes in partition
3528 * will cause repartitioning.
3529 *
3530 * If the cpuset has the 'sched.partition' flag enabled, simulate
3531 * turning 'sched.partition" off.
3532 */
3533
cpuset_css_offline(struct cgroup_subsys_state * css)3534 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3535 {
3536 struct cpuset *cs = css_cs(css);
3537
3538 cpus_read_lock();
3539 mutex_lock(&cpuset_mutex);
3540
3541 if (!cpuset_v2() && is_sched_load_balance(cs))
3542 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3543
3544 cpuset_dec();
3545 clear_bit(CS_ONLINE, &cs->flags);
3546
3547 mutex_unlock(&cpuset_mutex);
3548 cpus_read_unlock();
3549 }
3550
cpuset_css_killed(struct cgroup_subsys_state * css)3551 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3552 {
3553 struct cpuset *cs = css_cs(css);
3554
3555 cpus_read_lock();
3556 mutex_lock(&cpuset_mutex);
3557
3558 /* Reset valid partition back to member */
3559 if (is_partition_valid(cs))
3560 update_prstate(cs, PRS_MEMBER);
3561
3562 mutex_unlock(&cpuset_mutex);
3563 cpus_read_unlock();
3564
3565 }
3566
cpuset_css_free(struct cgroup_subsys_state * css)3567 static void cpuset_css_free(struct cgroup_subsys_state *css)
3568 {
3569 struct cpuset *cs = css_cs(css);
3570
3571 free_cpuset(cs);
3572 }
3573
cpuset_bind(struct cgroup_subsys_state * root_css)3574 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3575 {
3576 mutex_lock(&cpuset_mutex);
3577 spin_lock_irq(&callback_lock);
3578
3579 if (is_in_v2_mode()) {
3580 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3581 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3582 top_cpuset.mems_allowed = node_possible_map;
3583 } else {
3584 cpumask_copy(top_cpuset.cpus_allowed,
3585 top_cpuset.effective_cpus);
3586 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3587 }
3588
3589 spin_unlock_irq(&callback_lock);
3590 mutex_unlock(&cpuset_mutex);
3591 }
3592
3593 /*
3594 * In case the child is cloned into a cpuset different from its parent,
3595 * additional checks are done to see if the move is allowed.
3596 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3597 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3598 {
3599 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3600 bool same_cs;
3601 int ret;
3602
3603 rcu_read_lock();
3604 same_cs = (cs == task_cs(current));
3605 rcu_read_unlock();
3606
3607 if (same_cs)
3608 return 0;
3609
3610 lockdep_assert_held(&cgroup_mutex);
3611 mutex_lock(&cpuset_mutex);
3612
3613 /* Check to see if task is allowed in the cpuset */
3614 ret = cpuset_can_attach_check(cs);
3615 if (ret)
3616 goto out_unlock;
3617
3618 ret = task_can_attach(task);
3619 if (ret)
3620 goto out_unlock;
3621
3622 ret = security_task_setscheduler(task);
3623 if (ret)
3624 goto out_unlock;
3625
3626 /*
3627 * Mark attach is in progress. This makes validate_change() fail
3628 * changes which zero cpus/mems_allowed.
3629 */
3630 cs->attach_in_progress++;
3631 out_unlock:
3632 mutex_unlock(&cpuset_mutex);
3633 return ret;
3634 }
3635
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3636 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3637 {
3638 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3639 bool same_cs;
3640
3641 rcu_read_lock();
3642 same_cs = (cs == task_cs(current));
3643 rcu_read_unlock();
3644
3645 if (same_cs)
3646 return;
3647
3648 dec_attach_in_progress(cs);
3649 }
3650
3651 /*
3652 * Make sure the new task conform to the current state of its parent,
3653 * which could have been changed by cpuset just after it inherits the
3654 * state from the parent and before it sits on the cgroup's task list.
3655 */
cpuset_fork(struct task_struct * task)3656 static void cpuset_fork(struct task_struct *task)
3657 {
3658 struct cpuset *cs;
3659 bool same_cs;
3660
3661 rcu_read_lock();
3662 cs = task_cs(task);
3663 same_cs = (cs == task_cs(current));
3664 rcu_read_unlock();
3665
3666 if (same_cs) {
3667 if (cs == &top_cpuset)
3668 return;
3669
3670 set_cpus_allowed_ptr(task, current->cpus_ptr);
3671 task->mems_allowed = current->mems_allowed;
3672 return;
3673 }
3674
3675 /* CLONE_INTO_CGROUP */
3676 mutex_lock(&cpuset_mutex);
3677 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3678 cpuset_attach_task(cs, task);
3679
3680 dec_attach_in_progress_locked(cs);
3681 mutex_unlock(&cpuset_mutex);
3682 }
3683
3684 struct cgroup_subsys cpuset_cgrp_subsys = {
3685 .css_alloc = cpuset_css_alloc,
3686 .css_online = cpuset_css_online,
3687 .css_offline = cpuset_css_offline,
3688 .css_killed = cpuset_css_killed,
3689 .css_free = cpuset_css_free,
3690 .can_attach = cpuset_can_attach,
3691 .cancel_attach = cpuset_cancel_attach,
3692 .attach = cpuset_attach,
3693 .post_attach = cpuset_post_attach,
3694 .bind = cpuset_bind,
3695 .can_fork = cpuset_can_fork,
3696 .cancel_fork = cpuset_cancel_fork,
3697 .fork = cpuset_fork,
3698 #ifdef CONFIG_CPUSETS_V1
3699 .legacy_cftypes = cpuset1_files,
3700 #endif
3701 .dfl_cftypes = dfl_files,
3702 .early_init = true,
3703 .threaded = true,
3704 };
3705
3706 /**
3707 * cpuset_init - initialize cpusets at system boot
3708 *
3709 * Description: Initialize top_cpuset
3710 **/
3711
cpuset_init(void)3712 int __init cpuset_init(void)
3713 {
3714 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3715 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3716 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3717 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3718 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3719 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3720
3721 cpumask_setall(top_cpuset.cpus_allowed);
3722 nodes_setall(top_cpuset.mems_allowed);
3723 cpumask_setall(top_cpuset.effective_cpus);
3724 cpumask_setall(top_cpuset.effective_xcpus);
3725 cpumask_setall(top_cpuset.exclusive_cpus);
3726 nodes_setall(top_cpuset.effective_mems);
3727
3728 fmeter_init(&top_cpuset.fmeter);
3729 INIT_LIST_HEAD(&remote_children);
3730
3731 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3732
3733 have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
3734 if (have_boot_isolcpus) {
3735 BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
3736 cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
3737 cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
3738 }
3739
3740 return 0;
3741 }
3742
3743 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3744 hotplug_update_tasks(struct cpuset *cs,
3745 struct cpumask *new_cpus, nodemask_t *new_mems,
3746 bool cpus_updated, bool mems_updated)
3747 {
3748 /* A partition root is allowed to have empty effective cpus */
3749 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3750 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3751 if (nodes_empty(*new_mems))
3752 *new_mems = parent_cs(cs)->effective_mems;
3753
3754 spin_lock_irq(&callback_lock);
3755 cpumask_copy(cs->effective_cpus, new_cpus);
3756 cs->effective_mems = *new_mems;
3757 spin_unlock_irq(&callback_lock);
3758
3759 if (cpus_updated)
3760 cpuset_update_tasks_cpumask(cs, new_cpus);
3761 if (mems_updated)
3762 cpuset_update_tasks_nodemask(cs);
3763 }
3764
cpuset_force_rebuild(void)3765 void cpuset_force_rebuild(void)
3766 {
3767 force_sd_rebuild = true;
3768 }
3769
3770 /**
3771 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3772 * @cs: cpuset in interest
3773 * @tmp: the tmpmasks structure pointer
3774 *
3775 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3776 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3777 * all its tasks are moved to the nearest ancestor with both resources.
3778 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3779 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3780 {
3781 static cpumask_t new_cpus;
3782 static nodemask_t new_mems;
3783 bool cpus_updated;
3784 bool mems_updated;
3785 bool remote;
3786 int partcmd = -1;
3787 struct cpuset *parent;
3788 retry:
3789 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3790
3791 mutex_lock(&cpuset_mutex);
3792
3793 /*
3794 * We have raced with task attaching. We wait until attaching
3795 * is finished, so we won't attach a task to an empty cpuset.
3796 */
3797 if (cs->attach_in_progress) {
3798 mutex_unlock(&cpuset_mutex);
3799 goto retry;
3800 }
3801
3802 parent = parent_cs(cs);
3803 compute_effective_cpumask(&new_cpus, cs, parent);
3804 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3805
3806 if (!tmp || !cs->partition_root_state)
3807 goto update_tasks;
3808
3809 /*
3810 * Compute effective_cpus for valid partition root, may invalidate
3811 * child partition roots if necessary.
3812 */
3813 remote = is_remote_partition(cs);
3814 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3815 compute_partition_effective_cpumask(cs, &new_cpus);
3816
3817 if (remote && cpumask_empty(&new_cpus) &&
3818 partition_is_populated(cs, NULL)) {
3819 cs->prs_err = PERR_HOTPLUG;
3820 remote_partition_disable(cs, tmp);
3821 compute_effective_cpumask(&new_cpus, cs, parent);
3822 remote = false;
3823 }
3824
3825 /*
3826 * Force the partition to become invalid if either one of
3827 * the following conditions hold:
3828 * 1) empty effective cpus but not valid empty partition.
3829 * 2) parent is invalid or doesn't grant any cpus to child
3830 * partitions.
3831 */
3832 if (is_local_partition(cs) && (!is_partition_valid(parent) ||
3833 tasks_nocpu_error(parent, cs, &new_cpus)))
3834 partcmd = partcmd_invalidate;
3835 /*
3836 * On the other hand, an invalid partition root may be transitioned
3837 * back to a regular one.
3838 */
3839 else if (is_partition_valid(parent) && is_partition_invalid(cs))
3840 partcmd = partcmd_update;
3841
3842 if (partcmd >= 0) {
3843 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3844 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3845 compute_partition_effective_cpumask(cs, &new_cpus);
3846 cpuset_force_rebuild();
3847 }
3848 }
3849
3850 update_tasks:
3851 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3852 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3853 if (!cpus_updated && !mems_updated)
3854 goto unlock; /* Hotplug doesn't affect this cpuset */
3855
3856 if (mems_updated)
3857 check_insane_mems_config(&new_mems);
3858
3859 if (is_in_v2_mode())
3860 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3861 cpus_updated, mems_updated);
3862 else
3863 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3864 cpus_updated, mems_updated);
3865
3866 unlock:
3867 mutex_unlock(&cpuset_mutex);
3868 }
3869
3870 /**
3871 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3872 *
3873 * This function is called after either CPU or memory configuration has
3874 * changed and updates cpuset accordingly. The top_cpuset is always
3875 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3876 * order to make cpusets transparent (of no affect) on systems that are
3877 * actively using CPU hotplug but making no active use of cpusets.
3878 *
3879 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3880 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3881 * all descendants.
3882 *
3883 * Note that CPU offlining during suspend is ignored. We don't modify
3884 * cpusets across suspend/resume cycles at all.
3885 *
3886 * CPU / memory hotplug is handled synchronously.
3887 */
cpuset_handle_hotplug(void)3888 static void cpuset_handle_hotplug(void)
3889 {
3890 static cpumask_t new_cpus;
3891 static nodemask_t new_mems;
3892 bool cpus_updated, mems_updated;
3893 bool on_dfl = is_in_v2_mode();
3894 struct tmpmasks tmp, *ptmp = NULL;
3895
3896 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3897 ptmp = &tmp;
3898
3899 lockdep_assert_cpus_held();
3900 mutex_lock(&cpuset_mutex);
3901
3902 /* fetch the available cpus/mems and find out which changed how */
3903 cpumask_copy(&new_cpus, cpu_active_mask);
3904 new_mems = node_states[N_MEMORY];
3905
3906 /*
3907 * If subpartitions_cpus is populated, it is likely that the check
3908 * below will produce a false positive on cpus_updated when the cpu
3909 * list isn't changed. It is extra work, but it is better to be safe.
3910 */
3911 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3912 !cpumask_empty(subpartitions_cpus);
3913 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3914
3915 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3916 if (cpus_updated) {
3917 cpuset_force_rebuild();
3918 spin_lock_irq(&callback_lock);
3919 if (!on_dfl)
3920 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3921 /*
3922 * Make sure that CPUs allocated to child partitions
3923 * do not show up in effective_cpus. If no CPU is left,
3924 * we clear the subpartitions_cpus & let the child partitions
3925 * fight for the CPUs again.
3926 */
3927 if (!cpumask_empty(subpartitions_cpus)) {
3928 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3929 top_cpuset.nr_subparts = 0;
3930 cpumask_clear(subpartitions_cpus);
3931 } else {
3932 cpumask_andnot(&new_cpus, &new_cpus,
3933 subpartitions_cpus);
3934 }
3935 }
3936 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3937 spin_unlock_irq(&callback_lock);
3938 /* we don't mess with cpumasks of tasks in top_cpuset */
3939 }
3940
3941 /* synchronize mems_allowed to N_MEMORY */
3942 if (mems_updated) {
3943 spin_lock_irq(&callback_lock);
3944 if (!on_dfl)
3945 top_cpuset.mems_allowed = new_mems;
3946 top_cpuset.effective_mems = new_mems;
3947 spin_unlock_irq(&callback_lock);
3948 cpuset_update_tasks_nodemask(&top_cpuset);
3949 }
3950
3951 mutex_unlock(&cpuset_mutex);
3952
3953 /* if cpus or mems changed, we need to propagate to descendants */
3954 if (cpus_updated || mems_updated) {
3955 struct cpuset *cs;
3956 struct cgroup_subsys_state *pos_css;
3957
3958 rcu_read_lock();
3959 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3960 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3961 continue;
3962 rcu_read_unlock();
3963
3964 cpuset_hotplug_update_tasks(cs, ptmp);
3965
3966 rcu_read_lock();
3967 css_put(&cs->css);
3968 }
3969 rcu_read_unlock();
3970 }
3971
3972 /* rebuild sched domains if necessary */
3973 if (force_sd_rebuild)
3974 rebuild_sched_domains_cpuslocked();
3975
3976 free_cpumasks(NULL, ptmp);
3977 }
3978
cpuset_update_active_cpus(void)3979 void cpuset_update_active_cpus(void)
3980 {
3981 /*
3982 * We're inside cpu hotplug critical region which usually nests
3983 * inside cgroup synchronization. Bounce actual hotplug processing
3984 * to a work item to avoid reverse locking order.
3985 */
3986 cpuset_handle_hotplug();
3987 }
3988
3989 /*
3990 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3991 * Call this routine anytime after node_states[N_MEMORY] changes.
3992 * See cpuset_update_active_cpus() for CPU hotplug handling.
3993 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3994 static int cpuset_track_online_nodes(struct notifier_block *self,
3995 unsigned long action, void *arg)
3996 {
3997 cpuset_handle_hotplug();
3998 return NOTIFY_OK;
3999 }
4000
4001 /**
4002 * cpuset_init_smp - initialize cpus_allowed
4003 *
4004 * Description: Finish top cpuset after cpu, node maps are initialized
4005 */
cpuset_init_smp(void)4006 void __init cpuset_init_smp(void)
4007 {
4008 /*
4009 * cpus_allowd/mems_allowed set to v2 values in the initial
4010 * cpuset_bind() call will be reset to v1 values in another
4011 * cpuset_bind() call when v1 cpuset is mounted.
4012 */
4013 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4014
4015 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4016 top_cpuset.effective_mems = node_states[N_MEMORY];
4017
4018 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4019
4020 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4021 BUG_ON(!cpuset_migrate_mm_wq);
4022 }
4023
4024 /**
4025 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
4026 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4027 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4028 *
4029 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4030 * attached to the specified @tsk. Guaranteed to return some non-empty
4031 * subset of cpu_online_mask, even if this means going outside the
4032 * tasks cpuset, except when the task is in the top cpuset.
4033 **/
4034
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)4035 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4036 {
4037 unsigned long flags;
4038 struct cpuset *cs;
4039
4040 spin_lock_irqsave(&callback_lock, flags);
4041 rcu_read_lock();
4042
4043 cs = task_cs(tsk);
4044 if (cs != &top_cpuset)
4045 guarantee_online_cpus(tsk, pmask);
4046 /*
4047 * Tasks in the top cpuset won't get update to their cpumasks
4048 * when a hotplug online/offline event happens. So we include all
4049 * offline cpus in the allowed cpu list.
4050 */
4051 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4052 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4053
4054 /*
4055 * We first exclude cpus allocated to partitions. If there is no
4056 * allowable online cpu left, we fall back to all possible cpus.
4057 */
4058 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4059 if (!cpumask_intersects(pmask, cpu_online_mask))
4060 cpumask_copy(pmask, possible_mask);
4061 }
4062
4063 rcu_read_unlock();
4064 spin_unlock_irqrestore(&callback_lock, flags);
4065 }
4066
4067 /**
4068 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4069 * @tsk: pointer to task_struct with which the scheduler is struggling
4070 *
4071 * Description: In the case that the scheduler cannot find an allowed cpu in
4072 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4073 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4074 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4075 * This is the absolute last resort for the scheduler and it is only used if
4076 * _every_ other avenue has been traveled.
4077 *
4078 * Returns true if the affinity of @tsk was changed, false otherwise.
4079 **/
4080
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4081 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4082 {
4083 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4084 const struct cpumask *cs_mask;
4085 bool changed = false;
4086
4087 rcu_read_lock();
4088 cs_mask = task_cs(tsk)->cpus_allowed;
4089 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4090 do_set_cpus_allowed(tsk, cs_mask);
4091 changed = true;
4092 }
4093 rcu_read_unlock();
4094
4095 /*
4096 * We own tsk->cpus_allowed, nobody can change it under us.
4097 *
4098 * But we used cs && cs->cpus_allowed lockless and thus can
4099 * race with cgroup_attach_task() or update_cpumask() and get
4100 * the wrong tsk->cpus_allowed. However, both cases imply the
4101 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4102 * which takes task_rq_lock().
4103 *
4104 * If we are called after it dropped the lock we must see all
4105 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4106 * set any mask even if it is not right from task_cs() pov,
4107 * the pending set_cpus_allowed_ptr() will fix things.
4108 *
4109 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4110 * if required.
4111 */
4112 return changed;
4113 }
4114
cpuset_init_current_mems_allowed(void)4115 void __init cpuset_init_current_mems_allowed(void)
4116 {
4117 nodes_setall(current->mems_allowed);
4118 }
4119
4120 /**
4121 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4122 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4123 *
4124 * Description: Returns the nodemask_t mems_allowed of the cpuset
4125 * attached to the specified @tsk. Guaranteed to return some non-empty
4126 * subset of node_states[N_MEMORY], even if this means going outside the
4127 * tasks cpuset.
4128 **/
4129
cpuset_mems_allowed(struct task_struct * tsk)4130 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4131 {
4132 nodemask_t mask;
4133 unsigned long flags;
4134
4135 spin_lock_irqsave(&callback_lock, flags);
4136 rcu_read_lock();
4137 guarantee_online_mems(task_cs(tsk), &mask);
4138 rcu_read_unlock();
4139 spin_unlock_irqrestore(&callback_lock, flags);
4140
4141 return mask;
4142 }
4143
4144 /**
4145 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4146 * @nodemask: the nodemask to be checked
4147 *
4148 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4149 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4150 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4151 {
4152 return nodes_intersects(*nodemask, current->mems_allowed);
4153 }
4154
4155 /*
4156 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4157 * mem_hardwall ancestor to the specified cpuset. Call holding
4158 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4159 * (an unusual configuration), then returns the root cpuset.
4160 */
nearest_hardwall_ancestor(struct cpuset * cs)4161 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4162 {
4163 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4164 cs = parent_cs(cs);
4165 return cs;
4166 }
4167
4168 /*
4169 * cpuset_node_allowed - Can we allocate on a memory node?
4170 * @node: is this an allowed node?
4171 * @gfp_mask: memory allocation flags
4172 *
4173 * If we're in interrupt, yes, we can always allocate. If @node is set in
4174 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4175 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4176 * yes. If current has access to memory reserves as an oom victim, yes.
4177 * Otherwise, no.
4178 *
4179 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4180 * and do not allow allocations outside the current tasks cpuset
4181 * unless the task has been OOM killed.
4182 * GFP_KERNEL allocations are not so marked, so can escape to the
4183 * nearest enclosing hardwalled ancestor cpuset.
4184 *
4185 * Scanning up parent cpusets requires callback_lock. The
4186 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4187 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4188 * current tasks mems_allowed came up empty on the first pass over
4189 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4190 * cpuset are short of memory, might require taking the callback_lock.
4191 *
4192 * The first call here from mm/page_alloc:get_page_from_freelist()
4193 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4194 * so no allocation on a node outside the cpuset is allowed (unless
4195 * in interrupt, of course).
4196 *
4197 * The second pass through get_page_from_freelist() doesn't even call
4198 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4199 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4200 * in alloc_flags. That logic and the checks below have the combined
4201 * affect that:
4202 * in_interrupt - any node ok (current task context irrelevant)
4203 * GFP_ATOMIC - any node ok
4204 * tsk_is_oom_victim - any node ok
4205 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4206 * GFP_USER - only nodes in current tasks mems allowed ok.
4207 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4208 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4209 {
4210 struct cpuset *cs; /* current cpuset ancestors */
4211 bool allowed; /* is allocation in zone z allowed? */
4212 unsigned long flags;
4213
4214 if (in_interrupt())
4215 return true;
4216 if (node_isset(node, current->mems_allowed))
4217 return true;
4218 /*
4219 * Allow tasks that have access to memory reserves because they have
4220 * been OOM killed to get memory anywhere.
4221 */
4222 if (unlikely(tsk_is_oom_victim(current)))
4223 return true;
4224 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4225 return false;
4226
4227 if (current->flags & PF_EXITING) /* Let dying task have memory */
4228 return true;
4229
4230 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4231 spin_lock_irqsave(&callback_lock, flags);
4232
4233 rcu_read_lock();
4234 cs = nearest_hardwall_ancestor(task_cs(current));
4235 allowed = node_isset(node, cs->mems_allowed);
4236 rcu_read_unlock();
4237
4238 spin_unlock_irqrestore(&callback_lock, flags);
4239 return allowed;
4240 }
4241
4242 /**
4243 * cpuset_spread_node() - On which node to begin search for a page
4244 * @rotor: round robin rotor
4245 *
4246 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4247 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4248 * and if the memory allocation used cpuset_mem_spread_node()
4249 * to determine on which node to start looking, as it will for
4250 * certain page cache or slab cache pages such as used for file
4251 * system buffers and inode caches, then instead of starting on the
4252 * local node to look for a free page, rather spread the starting
4253 * node around the tasks mems_allowed nodes.
4254 *
4255 * We don't have to worry about the returned node being offline
4256 * because "it can't happen", and even if it did, it would be ok.
4257 *
4258 * The routines calling guarantee_online_mems() are careful to
4259 * only set nodes in task->mems_allowed that are online. So it
4260 * should not be possible for the following code to return an
4261 * offline node. But if it did, that would be ok, as this routine
4262 * is not returning the node where the allocation must be, only
4263 * the node where the search should start. The zonelist passed to
4264 * __alloc_pages() will include all nodes. If the slab allocator
4265 * is passed an offline node, it will fall back to the local node.
4266 * See kmem_cache_alloc_node().
4267 */
cpuset_spread_node(int * rotor)4268 static int cpuset_spread_node(int *rotor)
4269 {
4270 return *rotor = next_node_in(*rotor, current->mems_allowed);
4271 }
4272
4273 /**
4274 * cpuset_mem_spread_node() - On which node to begin search for a file page
4275 */
cpuset_mem_spread_node(void)4276 int cpuset_mem_spread_node(void)
4277 {
4278 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4279 current->cpuset_mem_spread_rotor =
4280 node_random(¤t->mems_allowed);
4281
4282 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4283 }
4284
4285 /**
4286 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4287 * @tsk1: pointer to task_struct of some task.
4288 * @tsk2: pointer to task_struct of some other task.
4289 *
4290 * Description: Return true if @tsk1's mems_allowed intersects the
4291 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4292 * one of the task's memory usage might impact the memory available
4293 * to the other.
4294 **/
4295
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4296 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4297 const struct task_struct *tsk2)
4298 {
4299 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4300 }
4301
4302 /**
4303 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4304 *
4305 * Description: Prints current's name, cpuset name, and cached copy of its
4306 * mems_allowed to the kernel log.
4307 */
cpuset_print_current_mems_allowed(void)4308 void cpuset_print_current_mems_allowed(void)
4309 {
4310 struct cgroup *cgrp;
4311
4312 rcu_read_lock();
4313
4314 cgrp = task_cs(current)->css.cgroup;
4315 pr_cont(",cpuset=");
4316 pr_cont_cgroup_name(cgrp);
4317 pr_cont(",mems_allowed=%*pbl",
4318 nodemask_pr_args(¤t->mems_allowed));
4319
4320 rcu_read_unlock();
4321 }
4322
4323 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4324 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4325 {
4326 seq_printf(m, "Mems_allowed:\t%*pb\n",
4327 nodemask_pr_args(&task->mems_allowed));
4328 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4329 nodemask_pr_args(&task->mems_allowed));
4330 }
4331