Lines Matching full:cs

203 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
205 return css_cs(cs->css.parent); in parent_cs()
221 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
226 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
231 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
236 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
238 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
241 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
246 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
251 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
253 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
256 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
258 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
261 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
263 return cs->partition_root_state > 0; in is_partition_root()
384 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
386 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
387 cs = parent_cs(cs); in guarantee_online_cpus()
388 if (unlikely(!cs)) { in guarantee_online_cpus()
400 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
414 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
416 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
417 cs = parent_cs(cs); in guarantee_online_mems()
418 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
426 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
429 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
434 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
458 * @cs: the cpuset that have cpumasks to be allocated.
464 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
468 if (cs) { in alloc_cpumasks()
469 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
470 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
471 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
498 * @cs: the cpuset that have cpumasks to be free.
501 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
503 if (cs) { in free_cpumasks()
504 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
505 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
506 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
517 * @cs: the cpuset that the trial cpuset duplicates
519 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
523 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
532 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
533 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
539 * @cs: the cpuset to be freed
541 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
543 free_cpumasks(cs, NULL); in free_cpuset()
544 kfree(cs); in free_cpuset()
914 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
919 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
929 struct cpuset *cs = NULL; in rebuild_root_domains() local
944 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
946 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
951 css_get(&cs->css); in rebuild_root_domains()
955 update_tasks_root_domain(cs); in rebuild_root_domains()
958 css_put(&cs->css); in rebuild_root_domains()
1029 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1031 * Iterate through each task of @cs updating its cpus_allowed to the
1035 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1040 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1042 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1049 * @cs: the cpuset the need to recompute the new effective_cpus mask
1058 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1063 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1066 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1286 * @cs: the cpuset to consider
1296 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1303 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1336 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1340 if ((cp != cs) && cp->partition_root_state) { in update_cpumasks_hier()
1446 * @cs: Current cpuset
1449 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1462 if (sibling == cs) in update_sibling_cpumasks()
1474 * @cs: the cpuset to consider
1478 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1485 if (cs == &top_cpuset) in update_cpumask()
1507 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1510 retval = validate_change(cs, trialcs); in update_cpumask()
1524 if (cs->partition_root_state) { in update_cpumask()
1528 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1534 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1539 if (cs->nr_subparts_cpus) { in update_cpumask()
1540 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1541 cs->cpus_allowed); in update_cpumask()
1542 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1546 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1548 if (cs->partition_root_state) { in update_cpumask()
1549 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1556 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1641 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1643 * Iterate through each task of @cs updating its mems_allowed to the
1647 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1653 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1655 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1667 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1678 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1680 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1682 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1690 * cs->old_mems_allowed. in update_tasks_nodemask()
1692 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1700 * @cs: the cpuset to consider
1710 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1716 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1762 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1766 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1775 if (cs == &top_cpuset) { in update_nodemask()
1800 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1804 retval = validate_change(cs, trialcs); in update_nodemask()
1809 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1813 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1829 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1836 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1837 cs->relax_domain_level = val; in update_relax_domain_level()
1838 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1839 is_sched_load_balance(cs)) in update_relax_domain_level()
1848 * @cs: the cpuset in which each task's spread flags needs to be changed
1850 * Iterate through each task of @cs updating its spread flags. As this
1854 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1859 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1861 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1868 * cs: the cpuset to update
1874 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1882 trialcs = alloc_trial_cpuset(cs); in update_flag()
1891 err = validate_change(cs, trialcs); in update_flag()
1895 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1898 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1899 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1902 cs->flags = trialcs->flags; in update_flag()
1909 update_tasks_flags(cs); in update_flag()
1917 * cs: the cpuset to update
1922 static int update_prstate(struct cpuset *cs, int val) in update_prstate() argument
1925 struct cpuset *parent = parent_cs(cs); in update_prstate()
1930 if (val == cs->partition_root_state) in update_prstate()
1937 if (val && cs->partition_root_state) in update_prstate()
1944 if (!cs->partition_root_state) { in update_prstate()
1950 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
1953 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
1957 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
1960 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1963 cs->partition_root_state = PRS_ENABLED; in update_prstate()
1969 if (cs->partition_root_state == PRS_ERROR) { in update_prstate()
1970 cs->partition_root_state = 0; in update_prstate()
1971 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1976 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
1981 cs->partition_root_state = 0; in update_prstate()
1984 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1995 update_sibling_cpumasks(parent, cs, &tmp); in update_prstate()
2110 struct cpuset *cs; in cpuset_can_attach() local
2116 cs = css_cs(css); in cpuset_can_attach()
2123 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2127 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
2139 cs->attach_in_progress++; in cpuset_can_attach()
2171 struct cpuset *cs; in cpuset_attach() local
2175 cs = css_cs(css); in cpuset_attach()
2180 if (cs == &top_cpuset) in cpuset_attach()
2183 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2185 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2195 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2202 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2217 if (is_memory_migrate(cs)) in cpuset_attach()
2225 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2227 cs->attach_in_progress--; in cpuset_attach()
2228 if (!cs->attach_in_progress) in cpuset_attach()
2258 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2264 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2271 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2274 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2277 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2280 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2283 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2289 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2292 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2307 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2313 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2318 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2336 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2343 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2348 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2357 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2361 css_get(&cs->css); in cpuset_write_resmask()
2367 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2370 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2378 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2381 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2393 css_put(&cs->css); in cpuset_write_resmask()
2408 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2416 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2419 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2422 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2425 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2428 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2440 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2444 return is_cpu_exclusive(cs); in cpuset_read_u64()
2446 return is_mem_exclusive(cs); in cpuset_read_u64()
2448 return is_mem_hardwall(cs); in cpuset_read_u64()
2450 return is_sched_load_balance(cs); in cpuset_read_u64()
2452 return is_memory_migrate(cs); in cpuset_read_u64()
2456 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2458 return is_spread_page(cs); in cpuset_read_u64()
2460 return is_spread_slab(cs); in cpuset_read_u64()
2471 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2475 return cs->relax_domain_level; in cpuset_read_s64()
2486 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2488 switch (cs->partition_root_state) { in sched_partition_show()
2505 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2521 css_get(&cs->css); in sched_partition_write()
2524 if (!is_cpuset_online(cs)) in sched_partition_write()
2527 retval = update_prstate(cs, val); in sched_partition_write()
2531 css_put(&cs->css); in sched_partition_write()
2703 struct cpuset *cs; in cpuset_css_alloc() local
2708 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2709 if (!cs) in cpuset_css_alloc()
2712 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2713 kfree(cs); in cpuset_css_alloc()
2717 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2718 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2719 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2720 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2721 cs->relax_domain_level = -1; in cpuset_css_alloc()
2723 return &cs->css; in cpuset_css_alloc()
2728 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2729 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2739 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2741 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2743 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2749 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2750 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2751 cs->use_parent_ecpus = true; in cpuset_css_online()
2782 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2783 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2784 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2785 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2806 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2811 if (is_partition_root(cs)) in cpuset_css_offline()
2812 update_prstate(cs, 0); in cpuset_css_offline()
2815 is_sched_load_balance(cs)) in cpuset_css_offline()
2816 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2818 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2819 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2821 cs->use_parent_ecpus = false; in cpuset_css_offline()
2826 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2834 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2836 free_cpuset(cs); in cpuset_css_free()
2923 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2931 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2936 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2938 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2944 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2951 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2952 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2953 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2954 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
2961 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
2962 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
2963 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
2964 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
2966 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
2967 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
2977 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
2983 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
2988 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
2990 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
2993 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
2994 cs->effective_mems = *new_mems; in hotplug_update_tasks()
2998 update_tasks_cpumask(cs); in hotplug_update_tasks()
3000 update_tasks_nodemask(cs); in hotplug_update_tasks()
3012 * @cs: cpuset in interest
3015 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3016 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3019 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3027 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3035 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3040 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3041 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3042 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3044 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3049 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3051 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3059 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3061 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3062 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3063 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3064 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3075 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3077 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3088 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3090 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3094 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3095 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3098 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3101 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3188 struct cpuset *cs; in cpuset_hotplug_workfn() local
3192 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3193 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3197 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3200 css_put(&cs->css); in cpuset_hotplug_workfn()
3310 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3372 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3374 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3375 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3376 return cs; in nearest_hardwall_ancestor()
3421 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3445 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3446 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()