Lines Matching +full:cs +full:- +full:1

7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
59 #include <linux/backing-dev.h>
89 * The user-configured masks can only be changed by writing to
103 * The user-configured masks are always the same with effective masks.
106 /* user-configured CPUs and Memory Nodes allow to tasks */
115 * CPUs allocated to child sub-partitions (default hierarchy only)
116 * - CPUs granted by the parent = effective_cpus U subparts_cpus
117 * - effective_cpus and subparts_cpus are mutually exclusive.
127 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
128 * - A new cpuset's old_mems_allowed is initialized when some
130 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
140 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
158 * use_parent_ecpus - set if using parent's effective_cpus
159 * child_ecpus_count - # of children with use_parent_ecpus set
168 * 0 - not a partition root
170 * 1 - partition root
172 * -1 - invalid partition root
180 #define PRS_ENABLED 1
181 #define PRS_ERROR -1
203 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
205 return css_cs(cs->css.parent); in parent_cs()
221 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
226 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
231 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
236 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
238 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
241 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
246 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
251 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
253 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
256 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
258 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
261 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
263 return cs->partition_root_state > 0; in is_partition_root()
267 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
268 (1 << CS_MEM_EXCLUSIVE)),
273 * cpuset_for_each_child - traverse online children of a cpuset
282 css_for_each_child((pos_css), &(parent_cs)->css) \
286 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
297 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
301 * There are two global locks guarding cpuset structures - cpuset_mutex and
321 * If a task is only holding callback_lock, then it has read-only
329 * small pieces of code, such as when reading out possibly multi-word
371 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
379 * One way or another, we guarantee to return some non-empty subset
384 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
386 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
387 cs = parent_cs(cs); in guarantee_online_cpus()
388 if (unlikely(!cs)) { in guarantee_online_cpus()
400 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
409 * One way or another, we guarantee to return some non-empty subset
414 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
416 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
417 cs = parent_cs(cs); in guarantee_online_mems()
418 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
426 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
429 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
434 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
441 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
450 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset()
451 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset()
457 * alloc_cpumasks - allocate three cpumasks for cpuset
458 * @cs: the cpuset that have cpumasks to be allocated.
460 * Return: 0 if successful, -ENOMEM otherwise.
462 * Only one of the two input arguments should be non-NULL.
464 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
468 if (cs) { in alloc_cpumasks()
469 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
470 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
471 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
473 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
474 pmask2 = &tmp->addmask; in alloc_cpumasks()
475 pmask3 = &tmp->delmask; in alloc_cpumasks()
479 return -ENOMEM; in alloc_cpumasks()
493 return -ENOMEM; in alloc_cpumasks()
497 * free_cpumasks - free cpumasks in a tmpmasks structure
498 * @cs: the cpuset that have cpumasks to be free.
501 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
503 if (cs) { in free_cpumasks()
504 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
505 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
506 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
509 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
510 free_cpumask_var(tmp->addmask); in free_cpumasks()
511 free_cpumask_var(tmp->delmask); in free_cpumasks()
516 * alloc_trial_cpuset - allocate a trial cpuset
517 * @cs: the cpuset that the trial cpuset duplicates
519 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
523 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
532 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
533 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
538 * free_cpuset - free the cpuset
539 * @cs: the cpuset to be freed
541 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
543 free_cpumasks(cs, NULL); in free_cpuset()
544 kfree(cs); in free_cpuset()
548 * validate_change() - Used to validate that any proposed cpuset change
556 * 'cur' is the address of an actual, in-use cpuset. Operations
564 * Return 0 if valid, -errno if not.
576 ret = -EBUSY; in validate_change()
589 ret = -EACCES; in validate_change()
597 ret = -EINVAL; in validate_change()
601 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) in validate_change()
605 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
610 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
613 ret = -ENOSPC; in validate_change()
614 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
615 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
616 cpumask_empty(trial->cpus_allowed)) in validate_change()
618 if (!nodes_empty(cur->mems_allowed) && in validate_change()
619 nodes_empty(trial->mems_allowed)) in validate_change()
627 ret = -EBUSY; in validate_change()
629 !cpuset_cpumask_can_shrink(cur->cpus_allowed, in validate_change()
630 trial->cpus_allowed)) in validate_change()
646 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
652 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
653 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
666 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
680 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
681 return static_key_count(&cpusets_enabled_key.key) + 1; in nr_cpusets()
688 * A 'partial partition' is a set of non-overlapping subsets whose
695 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
706 * cp - cpuset pointer, used (together with pos_css) to perform a
707 * top-down scan of all cpusets. For our purposes, rebuilding
710 * csa - (for CpuSet Array) Array of pointers to all the cpusets
717 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
740 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
757 ndoms = 1; in generate_sched_domains()
792 * If root is load-balancing, we can skip @cp if it in generate_sched_domains()
795 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
797 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
802 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) in generate_sched_domains()
806 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
816 csa[i]->pn = i; in generate_sched_domains()
823 int apn = a->pn; in generate_sched_domains()
827 int bpn = b->pn; in generate_sched_domains()
833 if (c->pn == bpn) in generate_sched_domains()
834 c->pn = apn; in generate_sched_domains()
836 ndoms--; /* one less element */ in generate_sched_domains()
860 int apn = a->pn; in generate_sched_domains()
874 warnings--; in generate_sched_domains()
885 if (apn == b->pn) { in generate_sched_domains()
886 cpumask_or(dp, dp, b->effective_cpus); in generate_sched_domains()
892 b->pn = -1; in generate_sched_domains()
907 ndoms = 1; in generate_sched_domains()
914 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
919 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
929 struct cpuset *cs = NULL; in rebuild_root_domains() local
944 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
946 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
951 css_get(&cs->css); in rebuild_root_domains()
955 update_tasks_root_domain(cs); in rebuild_root_domains()
958 css_put(&cs->css); in rebuild_root_domains()
976 * If the flag 'sched_load_balance' of any cpuset with non-empty
978 * which has that flag enabled, or if any cpuset with a non-empty
1028 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1029 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1031 * Iterate through each task of @cs updating its cpus_allowed to the
1035 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1040 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1042 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1047 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1049 * @cs: the cpuset the need to recompute the new effective_cpus mask
1058 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1060 if (parent->nr_subparts_cpus) { in compute_effective_cpumask()
1061 cpumask_or(new_cpus, parent->effective_cpus, in compute_effective_cpumask()
1062 parent->subparts_cpus); in compute_effective_cpumask()
1063 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1066 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1080 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1085 * Return: 0, 1 or an error code
1087 * For partcmd_enable, the cpuset is being transformed from a non-partition
1094 * root back to a non-partition root. any CPUs in cpus_allowed that are in
1103 * be granted by the parent. The function will return 1 if changes to
1105 * Error code should only be returned when newmask is non-NULL.
1139 (!newmask && cpumask_empty(cpuset->cpus_allowed))) in update_parent_subparts_cpumask()
1140 return -EINVAL; in update_parent_subparts_cpumask()
1146 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) in update_parent_subparts_cpumask()
1147 return -EBUSY; in update_parent_subparts_cpumask()
1155 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || in update_parent_subparts_cpumask()
1156 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) in update_parent_subparts_cpumask()
1157 return -EINVAL; in update_parent_subparts_cpumask()
1164 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); in update_parent_subparts_cpumask()
1167 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1168 parent->subparts_cpus); in update_parent_subparts_cpumask()
1173 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus in update_parent_subparts_cpumask()
1174 * addmask = newmask & parent->effective_cpus in update_parent_subparts_cpumask()
1175 * & ~parent->subparts_cpus in update_parent_subparts_cpumask()
1177 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1178 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_subparts_cpumask()
1179 parent->subparts_cpus); in update_parent_subparts_cpumask()
1181 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1182 adding = cpumask_andnot(tmp->addmask, tmp->addmask, in update_parent_subparts_cpumask()
1183 parent->subparts_cpus); in update_parent_subparts_cpumask()
1188 cpumask_equal(parent->effective_cpus, tmp->addmask)) { in update_parent_subparts_cpumask()
1190 return -EINVAL; in update_parent_subparts_cpumask()
1196 if (!cpumask_and(tmp->addmask, tmp->delmask, in update_parent_subparts_cpumask()
1198 return -EINVAL; in update_parent_subparts_cpumask()
1199 cpumask_copy(tmp->addmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1205 * addmask = cpus_allowed & parent->effectiveb_cpus in update_parent_subparts_cpumask()
1208 * pre-shrunk in case there is a change in the cpu list. in update_parent_subparts_cpumask()
1211 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1212 parent->effective_cpus); in update_parent_subparts_cpumask()
1213 part_error = cpumask_equal(tmp->addmask, in update_parent_subparts_cpumask()
1214 parent->effective_cpus); in update_parent_subparts_cpumask()
1218 int prev_prs = cpuset->partition_root_state; in update_parent_subparts_cpumask()
1224 switch (cpuset->partition_root_state) { in update_parent_subparts_cpumask()
1227 cpuset->partition_root_state = PRS_ERROR; in update_parent_subparts_cpumask()
1231 cpuset->partition_root_state = PRS_ENABLED; in update_parent_subparts_cpumask()
1240 if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) in update_parent_subparts_cpumask()
1243 if (cpuset->partition_root_state == PRS_ERROR) { in update_parent_subparts_cpumask()
1248 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1249 parent->subparts_cpus); in update_parent_subparts_cpumask()
1262 cpumask_or(parent->subparts_cpus, in update_parent_subparts_cpumask()
1263 parent->subparts_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1264 cpumask_andnot(parent->effective_cpus, in update_parent_subparts_cpumask()
1265 parent->effective_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1268 cpumask_andnot(parent->subparts_cpus, in update_parent_subparts_cpumask()
1269 parent->subparts_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1273 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); in update_parent_subparts_cpumask()
1274 cpumask_or(parent->effective_cpus, in update_parent_subparts_cpumask()
1275 parent->effective_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1278 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); in update_parent_subparts_cpumask()
1285 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1286 * @cs: the cpuset to consider
1296 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1303 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1306 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
1312 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
1313 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
1314 if (!cp->use_parent_ecpus) { in update_cpumasks_hier()
1315 cp->use_parent_ecpus = true; in update_cpumasks_hier()
1316 parent->child_ecpus_count++; in update_cpumasks_hier()
1318 } else if (cp->use_parent_ecpus) { in update_cpumasks_hier()
1319 cp->use_parent_ecpus = false; in update_cpumasks_hier()
1320 WARN_ON_ONCE(!parent->child_ecpus_count); in update_cpumasks_hier()
1321 parent->child_ecpus_count--; in update_cpumasks_hier()
1328 if (!cp->partition_root_state && in update_cpumasks_hier()
1329 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { in update_cpumasks_hier()
1336 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1340 if ((cp != cs) && cp->partition_root_state) { in update_cpumasks_hier()
1341 switch (parent->partition_root_state) { in update_cpumasks_hier()
1348 WARN_ON_ONCE(cp->partition_root_state in update_cpumasks_hier()
1350 cp->partition_root_state = 0; in update_cpumasks_hier()
1359 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); in update_cpumasks_hier()
1371 cp->partition_root_state = PRS_ERROR; in update_cpumasks_hier()
1372 if (cp->nr_subparts_cpus) { in update_cpumasks_hier()
1373 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1374 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1380 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
1386 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1387 if (cp->nr_subparts_cpus && in update_cpumasks_hier()
1388 (cp->partition_root_state != PRS_ENABLED)) { in update_cpumasks_hier()
1389 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1390 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1391 } else if (cp->nr_subparts_cpus) { in update_cpumasks_hier()
1397 * becomes empty. we clear cp->nr_subparts_cpus and in update_cpumasks_hier()
1401 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, in update_cpumasks_hier()
1402 cp->subparts_cpus); in update_cpumasks_hier()
1403 if (cpumask_empty(cp->effective_cpus)) { in update_cpumasks_hier()
1404 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1405 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1406 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1407 } else if (!cpumask_subset(cp->subparts_cpus, in update_cpumasks_hier()
1408 tmp->new_cpus)) { in update_cpumasks_hier()
1409 cpumask_andnot(cp->subparts_cpus, in update_cpumasks_hier()
1410 cp->subparts_cpus, tmp->new_cpus); in update_cpumasks_hier()
1411 cp->nr_subparts_cpus in update_cpumasks_hier()
1412 = cpumask_weight(cp->subparts_cpus); in update_cpumasks_hier()
1418 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
1423 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
1428 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
1435 css_put(&cp->css); in update_cpumasks_hier()
1444 * update_sibling_cpumasks - Update siblings cpumasks
1446 * @cs: Current cpuset
1449 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1462 if (sibling == cs) in update_sibling_cpumasks()
1464 if (!sibling->use_parent_ecpus) in update_sibling_cpumasks()
1473 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1474 * @cs: the cpuset to consider
1478 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1484 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ in update_cpumask()
1485 if (cs == &top_cpuset) in update_cpumask()
1486 return -EACCES; in update_cpumask()
1495 cpumask_clear(trialcs->cpus_allowed); in update_cpumask()
1497 retval = cpulist_parse(buf, trialcs->cpus_allowed); in update_cpumask()
1501 if (!cpumask_subset(trialcs->cpus_allowed, in update_cpumask()
1503 return -EINVAL; in update_cpumask()
1507 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1510 retval = validate_change(cs, trialcs); in update_cpumask()
1519 tmp.addmask = trialcs->subparts_cpus; in update_cpumask()
1520 tmp.delmask = trialcs->effective_cpus; in update_cpumask()
1521 tmp.new_cpus = trialcs->cpus_allowed; in update_cpumask()
1524 if (cs->partition_root_state) { in update_cpumask()
1526 if (cpumask_empty(trialcs->cpus_allowed)) in update_cpumask()
1527 return -EINVAL; in update_cpumask()
1528 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1529 trialcs->cpus_allowed, &tmp) < 0) in update_cpumask()
1530 return -EINVAL; in update_cpumask()
1534 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1539 if (cs->nr_subparts_cpus) { in update_cpumask()
1540 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1541 cs->cpus_allowed); in update_cpumask()
1542 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1546 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1548 if (cs->partition_root_state) { in update_cpumask()
1549 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1555 if (parent->child_ecpus_count) in update_cpumask()
1556 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1582 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
1583 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
1594 mwork->mm = mm; in cpuset_migrate_mm()
1595 mwork->from = *from; in cpuset_migrate_mm()
1596 mwork->to = *to; in cpuset_migrate_mm()
1597 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
1598 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
1610 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1614 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1625 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1627 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
1629 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
1631 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1640 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1641 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1643 * Iterate through each task of @cs updating its mems_allowed to the
1647 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1653 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1655 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1659 * take while holding tasklist_lock. Forks can happen - the in update_tasks_nodemask()
1667 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1678 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1680 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1682 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1690 * cs->old_mems_allowed. in update_tasks_nodemask()
1692 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1699 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1700 * @cs: the cpuset to consider
1710 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1716 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1719 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
1726 *new_mems = parent->effective_mems; in update_nodemasks_hier()
1729 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
1734 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
1739 cp->effective_mems = *new_mems; in update_nodemasks_hier()
1743 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
1748 css_put(&cp->css); in update_nodemasks_hier()
1762 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1763 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
1766 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1773 * it's read-only in update_nodemask()
1775 if (cs == &top_cpuset) { in update_nodemask()
1776 retval = -EACCES; in update_nodemask()
1787 nodes_clear(trialcs->mems_allowed); in update_nodemask()
1789 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
1793 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
1795 retval = -EINVAL; in update_nodemask()
1800 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1801 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
1804 retval = validate_change(cs, trialcs); in update_nodemask()
1809 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1812 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
1813 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1829 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1832 if (val < -1 || val >= sched_domain_level_max) in update_relax_domain_level()
1833 return -EINVAL; in update_relax_domain_level()
1836 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1837 cs->relax_domain_level = val; in update_relax_domain_level()
1838 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1839 is_sched_load_balance(cs)) in update_relax_domain_level()
1847 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1848 * @cs: the cpuset in which each task's spread flags needs to be changed
1850 * Iterate through each task of @cs updating its spread flags. As this
1854 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1859 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1861 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1866 * update_flag - read a 0 or a 1 in a file and update associated flag
1868 * cs: the cpuset to update
1874 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1882 trialcs = alloc_trial_cpuset(cs); in update_flag()
1884 return -ENOMEM; in update_flag()
1887 set_bit(bit, &trialcs->flags); in update_flag()
1889 clear_bit(bit, &trialcs->flags); in update_flag()
1891 err = validate_change(cs, trialcs); in update_flag()
1895 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1898 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1899 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1902 cs->flags = trialcs->flags; in update_flag()
1905 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) in update_flag()
1909 update_tasks_flags(cs); in update_flag()
1916 * update_prstate - update partititon_root_state
1917 * cs: the cpuset to update
1918 * val: 0 - disabled, 1 - enabled
1922 static int update_prstate(struct cpuset *cs, int val) in update_prstate() argument
1925 struct cpuset *parent = parent_cs(cs); in update_prstate()
1928 if ((val != 0) && (val != 1)) in update_prstate()
1929 return -EINVAL; in update_prstate()
1930 if (val == cs->partition_root_state) in update_prstate()
1937 if (val && cs->partition_root_state) in update_prstate()
1938 return -EINVAL; in update_prstate()
1941 return -ENOMEM; in update_prstate()
1943 err = -EINVAL; in update_prstate()
1944 if (!cs->partition_root_state) { in update_prstate()
1950 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
1953 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
1957 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
1960 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1963 cs->partition_root_state = PRS_ENABLED; in update_prstate()
1969 if (cs->partition_root_state == PRS_ERROR) { in update_prstate()
1970 cs->partition_root_state = 0; in update_prstate()
1971 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1976 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
1981 cs->partition_root_state = 0; in update_prstate()
1984 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1994 if (parent->child_ecpus_count) in update_prstate()
1995 update_sibling_cpumasks(parent, cs, &tmp); in update_prstate()
2004 * Frequency meter - How fast is some event occurring?
2008 * fmeter_init() - initialize a frequency meter.
2009 * fmeter_markevent() - called each time the event happens.
2010 * fmeter_getrate() - returns the recent rate of such events.
2011 * fmeter_update() - internal routine used to update fmeter.
2018 * The filter is single-pole low-pass recursive (IIR). The time unit
2019 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2022 * With an FM_COEF of 933, and a time base of 1 second, the filter
2023 * has a half-life of 10 seconds, meaning that if the events quit
2038 * per msec it maxes out at values just under 1,000,000. At constant
2048 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2056 fmp->cnt = 0; in fmeter_init()
2057 fmp->val = 0; in fmeter_init()
2058 fmp->time = 0; in fmeter_init()
2059 spin_lock_init(&fmp->lock); in fmeter_init()
2062 /* Internal meter update - process cnt events and update value */
2069 ticks = now - fmp->time; in fmeter_update()
2075 while (ticks-- > 0) in fmeter_update()
2076 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; in fmeter_update()
2077 fmp->time = now; in fmeter_update()
2079 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; in fmeter_update()
2080 fmp->cnt = 0; in fmeter_update()
2086 spin_lock(&fmp->lock); in fmeter_markevent()
2088 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); in fmeter_markevent()
2089 spin_unlock(&fmp->lock); in fmeter_markevent()
2097 spin_lock(&fmp->lock); in fmeter_getrate()
2099 val = fmp->val; in fmeter_getrate()
2100 spin_unlock(&fmp->lock); in fmeter_getrate()
2110 struct cpuset *cs; in cpuset_can_attach() local
2116 cs = css_cs(css); in cpuset_can_attach()
2121 ret = -ENOSPC; in cpuset_can_attach()
2123 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2127 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
2139 cs->attach_in_progress++; in cpuset_can_attach()
2153 css_cs(css)->attach_in_progress--; in cpuset_cancel_attach()
2171 struct cpuset *cs; in cpuset_attach() local
2175 cs = css_cs(css); in cpuset_attach()
2180 if (cs == &top_cpuset) in cpuset_attach()
2183 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2185 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2195 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2202 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2217 if (is_memory_migrate(cs)) in cpuset_attach()
2218 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
2225 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2227 cs->attach_in_progress--; in cpuset_attach()
2228 if (!cs->attach_in_progress) in cpuset_attach()
2258 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2259 cpuset_filetype_t type = cft->private; in cpuset_write_u64()
2264 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2265 retval = -ENODEV; in cpuset_write_u64()
2271 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2274 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2277 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2280 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2283 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2289 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2292 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2295 retval = -EINVAL; in cpuset_write_u64()
2307 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2308 cpuset_filetype_t type = cft->private; in cpuset_write_s64()
2309 int retval = -ENODEV; in cpuset_write_s64()
2313 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2318 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2321 retval = -EINVAL; in cpuset_write_s64()
2336 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2338 int retval = -ENODEV; in cpuset_write_resmask()
2343 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2348 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2357 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2361 css_get(&cs->css); in cpuset_write_resmask()
2362 kernfs_break_active_protection(of->kn); in cpuset_write_resmask()
2367 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2370 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2372 retval = -ENOMEM; in cpuset_write_resmask()
2376 switch (of_cft(of)->private) { in cpuset_write_resmask()
2378 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2381 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2384 retval = -EINVAL; in cpuset_write_resmask()
2392 kernfs_unbreak_active_protection(of->kn); in cpuset_write_resmask()
2393 css_put(&cs->css); in cpuset_write_resmask()
2408 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2409 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
2416 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2419 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2422 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2425 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2428 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2431 ret = -EINVAL; in cpuset_common_seq_show()
2440 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2441 cpuset_filetype_t type = cft->private; in cpuset_read_u64()
2444 return is_cpu_exclusive(cs); in cpuset_read_u64()
2446 return is_mem_exclusive(cs); in cpuset_read_u64()
2448 return is_mem_hardwall(cs); in cpuset_read_u64()
2450 return is_sched_load_balance(cs); in cpuset_read_u64()
2452 return is_memory_migrate(cs); in cpuset_read_u64()
2456 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2458 return is_spread_page(cs); in cpuset_read_u64()
2460 return is_spread_slab(cs); in cpuset_read_u64()
2471 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2472 cpuset_filetype_t type = cft->private; in cpuset_read_s64()
2475 return cs->relax_domain_level; in cpuset_read_s64()
2486 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2488 switch (cs->partition_root_state) { in sched_partition_show()
2505 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2507 int retval = -ENODEV; in sched_partition_write()
2519 return -EINVAL; in sched_partition_write()
2521 css_get(&cs->css); in sched_partition_write()
2524 if (!is_cpuset_online(cs)) in sched_partition_write()
2527 retval = update_prstate(cs, val); in sched_partition_write()
2531 css_put(&cs->css); in sched_partition_write()
2696 * cpuset_css_alloc - allocate a cpuset css
2703 struct cpuset *cs; in cpuset_css_alloc() local
2708 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2709 if (!cs) in cpuset_css_alloc()
2710 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2712 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2713 kfree(cs); in cpuset_css_alloc()
2714 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2717 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2718 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2719 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2720 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2721 cs->relax_domain_level = -1; in cpuset_css_alloc()
2723 return &cs->css; in cpuset_css_alloc()
2728 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2729 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2739 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2741 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2743 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2749 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2750 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2751 cs->use_parent_ecpus = true; in cpuset_css_online()
2752 parent->child_ecpus_count++; in cpuset_css_online()
2756 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
2762 * histrical reasons - the flag may be specified during mount. in cpuset_css_online()
2765 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
2769 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
2782 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2783 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2784 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2785 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2806 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2811 if (is_partition_root(cs)) in cpuset_css_offline()
2812 update_prstate(cs, 0); in cpuset_css_offline()
2815 is_sched_load_balance(cs)) in cpuset_css_offline()
2816 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2818 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2819 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2821 cs->use_parent_ecpus = false; in cpuset_css_offline()
2822 parent->child_ecpus_count--; in cpuset_css_offline()
2826 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2834 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2836 free_cpuset(cs); in cpuset_css_free()
2867 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
2868 task->mems_allowed = current->mems_allowed; in cpuset_fork()
2889 * cpuset_init - initialize cpusets at system boot
2909 top_cpuset.relax_domain_level = -1; in cpuset_init()
2921 * cpuset to its next-highest non-empty parent.
2923 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2928 * Find its next-highest non-empty parent, (top cpuset in remove_tasks_in_empty_cpuset()
2931 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2932 while (cpumask_empty(parent->cpus_allowed) || in remove_tasks_in_empty_cpuset()
2933 nodes_empty(parent->mems_allowed)) in remove_tasks_in_empty_cpuset()
2936 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2938 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2944 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2951 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2952 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2953 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2954 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
2961 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
2962 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
2963 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
2964 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
2966 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
2967 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
2977 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
2983 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
2988 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
2990 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
2993 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
2994 cs->effective_mems = *new_mems; in hotplug_update_tasks()
2998 update_tasks_cpumask(cs); in hotplug_update_tasks()
3000 update_tasks_nodemask(cs); in hotplug_update_tasks()
3011 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3012 * @cs: cpuset in interest
3015 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3016 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3019 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3027 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3035 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3040 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3041 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3042 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3044 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3049 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3051 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3059 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3060 (parent->partition_root_state == PRS_ERROR))) { in cpuset_hotplug_update_tasks()
3061 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3062 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3063 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3064 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3073 if ((parent->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3075 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3077 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3088 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3089 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && in cpuset_hotplug_update_tasks()
3090 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3094 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3095 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3098 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3101 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3108 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3116 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3188 struct cpuset *cs; in cpuset_hotplug_workfn() local
3192 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3193 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3197 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3200 css_put(&cs->css); in cpuset_hotplug_workfn()
3247 * cpuset_init_smp - initialize cpus_allowed
3267 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3268 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3272 * attached to the specified @tsk. Guaranteed to return some non-empty
3289 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3293 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3294 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3304 task_cs(tsk)->cpus_allowed : cpu_possible_mask); in cpuset_cpus_allowed_fallback()
3308 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
3310 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3312 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
3313 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
3317 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
3328 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
3332 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3333 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3336 * attached to the specified @tsk. Guaranteed to return some non-empty
3356 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3359 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3363 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
3367 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3372 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3374 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3375 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3376 return cs; in nearest_hardwall_ancestor()
3380 * cpuset_node_allowed - Can we allocate on a memory node?
3413 * in_interrupt - any node ok (current task context irrelevant)
3414 * GFP_ATOMIC - any node ok
3415 * tsk_is_oom_victim - any node ok
3416 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3417 * GFP_USER - only nodes in current tasks mems allowed ok.
3421 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3427 if (node_isset(node, current->mems_allowed)) in __cpuset_node_allowed()
3438 if (current->flags & PF_EXITING) /* Let dying task have memory */ in __cpuset_node_allowed()
3445 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3446 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()
3454 * cpuset_mem_spread_node() - On which node to begin search for a file page
3455 * cpuset_slab_spread_node() - On which node to begin search for a slab page
3470 * only set nodes in task->mems_allowed that are online. So it
3482 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
3487 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
3488 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
3489 node_random(&current->mems_allowed); in cpuset_mem_spread_node()
3491 return cpuset_spread_node(&current->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
3496 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) in cpuset_slab_spread_node()
3497 current->cpuset_slab_spread_rotor = in cpuset_slab_spread_node()
3498 node_random(&current->mems_allowed); in cpuset_slab_spread_node()
3500 return cpuset_spread_node(&current->cpuset_slab_spread_rotor); in cpuset_slab_spread_node()
3506 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3519 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
3523 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3534 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
3538 nodemask_pr_args(&current->mems_allowed)); in cpuset_print_current_mems_allowed()
3545 * this flag is enabled by writing "1" to the special
3552 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3563 * Display to user space in the per-cpuset read-only file
3572 fmeter_markevent(&task_cs(current)->fmeter); in __cpuset_memory_pressure_bump()
3579 * - Print tasks cpuset path into seq_file.
3580 * - Used for /proc/<pid>/cpuset.
3581 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3582 * doesn't really matter if tsk->cpuset changes after we read it,
3593 retval = -ENOMEM; in proc_cpuset_show()
3599 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, in proc_cpuset_show()
3600 current->nsproxy->cgroup_ns); in proc_cpuset_show()
3603 retval = -ENAMETOOLONG; in proc_cpuset_show()
3620 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
3622 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()