Lines Matching full:cs

150 	struct cpuset *cs = task_cs(p);  in inc_dl_tasks_cs()  local
152 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
157 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
159 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
162 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
164 return cs->partition_root_state > 0; in is_partition_valid()
167 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
169 return cs->partition_root_state < 0; in is_partition_invalid()
175 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
177 if (cs->partition_root_state > 0) in make_partition_invalid()
178 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
184 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
186 if (old_prs == cs->partition_root_state) in notify_partition_change()
188 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
191 if (is_partition_valid(cs)) in notify_partition_change()
192 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
277 * decrease cs->attach_in_progress.
278 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
280 static inline void dec_attach_in_progress_locked(struct cpuset *cs) in dec_attach_in_progress_locked() argument
284 cs->attach_in_progress--; in dec_attach_in_progress_locked()
285 if (!cs->attach_in_progress) in dec_attach_in_progress_locked()
289 static inline void dec_attach_in_progress(struct cpuset *cs) in dec_attach_in_progress() argument
292 dec_attach_in_progress_locked(cs); in dec_attach_in_progress()
318 * @cs: partition root to be checked
322 * It is assumed that @cs is a valid partition root. @excluded_child should
325 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
331 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
333 if (!excluded_child && !cs->nr_subparts) in partition_is_populated()
334 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
337 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
366 struct cpuset *cs; in guarantee_online_cpus() local
372 cs = task_cs(tsk); in guarantee_online_cpus()
374 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_online_cpus()
375 cs = parent_cs(cs); in guarantee_online_cpus()
377 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
392 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
394 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
395 cs = parent_cs(cs); in guarantee_online_mems()
396 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
401 * @cs: the cpuset that have cpumasks to be allocated.
407 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
411 if (cs) { in alloc_cpumasks()
412 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
413 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
414 pmask3 = &cs->effective_xcpus; in alloc_cpumasks()
415 pmask4 = &cs->exclusive_cpus; in alloc_cpumasks()
449 * @cs: the cpuset that have cpumasks to be free.
452 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
454 if (cs) { in free_cpumasks()
455 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
456 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
457 free_cpumask_var(cs->effective_xcpus); in free_cpumasks()
458 free_cpumask_var(cs->exclusive_cpus); in free_cpumasks()
469 * @cs: the cpuset that the trial cpuset duplicates
471 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
475 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
484 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
485 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
486 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); in alloc_trial_cpuset()
487 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); in alloc_trial_cpuset()
493 * @cs: the cpuset to be freed
495 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
497 free_cpumasks(cs, NULL); in free_cpuset()
498 kfree(cs); in free_cpuset()
502 static inline struct cpumask *user_xcpus(struct cpuset *cs) in user_xcpus() argument
504 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed in user_xcpus()
505 : cs->exclusive_cpus; in user_xcpus()
508 static inline bool xcpus_empty(struct cpuset *cs) in xcpus_empty() argument
510 return cpumask_empty(cs->cpus_allowed) && in xcpus_empty()
511 cpumask_empty(cs->exclusive_cpus); in xcpus_empty()
946 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
951 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
954 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
964 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
982 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
984 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
989 css_get(&cs->css); in dl_rebuild_rd_accounting()
993 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
996 css_put(&cs->css); in dl_rebuild_rd_accounting()
1017 struct cpuset *cs; in rebuild_sched_domains_locked() local
1044 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1045 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1049 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1093 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1096 * Iterate through each task of @cs updating its cpus_allowed to the
1107 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in cpuset_update_tasks_cpumask() argument
1111 bool top_cs = cs == &top_cpuset; in cpuset_update_tasks_cpumask()
1113 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_cpumask()
1127 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in cpuset_update_tasks_cpumask()
1137 * @cs: the cpuset the need to recompute the new effective_cpus mask
1143 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1145 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1159 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1167 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs) in update_partition_exclusive_flag() argument
1171 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1172 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1)) in update_partition_exclusive_flag()
1174 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1176 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_partition_exclusive_flag()
1188 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) in update_partition_sd_lb() argument
1190 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1195 * If cs is not a valid partition root, the load balance state in update_partition_sd_lb()
1201 new_lb = is_sched_load_balance(parent_cs(cs)); in update_partition_sd_lb()
1203 if (new_lb != !!is_sched_load_balance(cs)) { in update_partition_sd_lb()
1206 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1208 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1218 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, in tasks_nocpu_error() argument
1222 * A populated partition (cs or parent) can't have empty effective_cpus in tasks_nocpu_error()
1225 partition_is_populated(parent, cs)) || in tasks_nocpu_error()
1227 partition_is_populated(cs, NULL)); in tasks_nocpu_error()
1230 static void reset_partition_data(struct cpuset *cs) in reset_partition_data() argument
1232 struct cpuset *parent = parent_cs(cs); in reset_partition_data()
1239 cs->nr_subparts = 0; in reset_partition_data()
1240 if (cpumask_empty(cs->exclusive_cpus)) { in reset_partition_data()
1241 cpumask_clear(cs->effective_xcpus); in reset_partition_data()
1242 if (is_cpu_exclusive(cs)) in reset_partition_data()
1243 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); in reset_partition_data()
1245 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed)) in reset_partition_data()
1246 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in reset_partition_data()
1354 * @cs: cpuset
1362 * the given cs is a trial one.
1364 static int compute_effective_exclusive_cpumask(struct cpuset *cs, in compute_effective_exclusive_cpumask() argument
1369 struct cpuset *parent = parent_cs(cs); in compute_effective_exclusive_cpumask()
1374 xcpus = cs->effective_xcpus; in compute_effective_exclusive_cpumask()
1376 cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus); in compute_effective_exclusive_cpumask()
1379 if (!cpumask_empty(cs->exclusive_cpus)) in compute_effective_exclusive_cpumask()
1382 cs = real_cs; in compute_effective_exclusive_cpumask()
1390 if (sibling == cs) in compute_effective_exclusive_cpumask()
1409 static inline bool is_remote_partition(struct cpuset *cs) in is_remote_partition() argument
1411 return !list_empty(&cs->remote_sibling); in is_remote_partition()
1414 static inline bool is_local_partition(struct cpuset *cs) in is_local_partition() argument
1416 return is_partition_valid(cs) && !is_remote_partition(cs); in is_local_partition()
1421 * @cs: the cpuset to update
1429 static int remote_partition_enable(struct cpuset *cs, int new_prs, in remote_partition_enable() argument
1448 compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL); in remote_partition_enable()
1456 list_add(&cs->remote_sibling, &remote_children); in remote_partition_enable()
1457 cpumask_copy(cs->effective_xcpus, tmp->new_cpus); in remote_partition_enable()
1461 cs->prs_err = 0; in remote_partition_enable()
1473 * @cs: the cpuset to update
1480 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) in remote_partition_disable() argument
1484 WARN_ON_ONCE(!is_remote_partition(cs)); in remote_partition_disable()
1485 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_partition_disable()
1488 list_del_init(&cs->remote_sibling); in remote_partition_disable()
1489 isolcpus_updated = partition_xcpus_del(cs->partition_root_state, in remote_partition_disable()
1490 NULL, cs->effective_xcpus); in remote_partition_disable()
1491 if (cs->prs_err) in remote_partition_disable()
1492 cs->partition_root_state = -cs->partition_root_state; in remote_partition_disable()
1494 cs->partition_root_state = PRS_MEMBER; in remote_partition_disable()
1497 compute_effective_exclusive_cpumask(cs, NULL, NULL); in remote_partition_disable()
1498 reset_partition_data(cs); in remote_partition_disable()
1512 * @cs: the cpuset to be updated
1520 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, in remote_cpus_update() argument
1524 int prs = cs->partition_root_state; in remote_cpus_update()
1527 if (WARN_ON_ONCE(!is_remote_partition(cs))) in remote_cpus_update()
1530 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_cpus_update()
1533 cs->prs_err = PERR_CPUSEMPTY; in remote_cpus_update()
1537 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus); in remote_cpus_update()
1538 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus); in remote_cpus_update()
1547 cs->prs_err = PERR_ACCESS; in remote_cpus_update()
1550 cs->prs_err = PERR_NOCPUS; in remote_cpus_update()
1551 if (cs->prs_err) in remote_cpus_update()
1562 * update_sibling_cpumasks() below may iterate back to the same cs. in remote_cpus_update()
1564 cpumask_copy(cs->effective_xcpus, excpus); in remote_cpus_update()
1566 cpumask_copy(cs->exclusive_cpus, xcpus); in remote_cpus_update()
1580 remote_partition_disable(cs, tmp); in remote_cpus_update()
1605 * @cs: The cpuset that requests change in partition root state
1640 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, in update_parent_effective_cpumask() argument
1644 struct cpuset *parent = parent_cs(cs); in update_parent_effective_cpumask()
1651 struct cpumask *xcpus = user_xcpus(cs); in update_parent_effective_cpumask()
1655 WARN_ON_ONCE(is_remote_partition(cs)); in update_parent_effective_cpumask()
1662 old_prs = new_prs = cs->partition_root_state; in update_parent_effective_cpumask()
1690 if (!newmask && xcpus_empty(cs)) in update_parent_effective_cpumask()
1693 nocpu = tasks_nocpu_error(parent, cs, xcpus); in update_parent_effective_cpumask()
1702 if (compute_effective_exclusive_cpumask(cs, xcpus, NULL)) in update_parent_effective_cpumask()
1703 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus)); in update_parent_effective_cpumask()
1733 if (is_partition_valid(cs)) { in update_parent_effective_cpumask()
1734 cpumask_copy(tmp->addmask, cs->effective_xcpus); in update_parent_effective_cpumask()
1748 /* Check newmask again, whether cpus are available for parent/cs */ in update_parent_effective_cpumask()
1749 nocpu |= tasks_nocpu_error(parent, cs, newmask); in update_parent_effective_cpumask()
1808 if (is_partition_valid(cs)) in update_parent_effective_cpumask()
1811 } else if (is_partition_invalid(cs) && in update_parent_effective_cpumask()
1823 if (child == cs) in update_parent_effective_cpumask()
1825 if (!cpusets_are_exclusive(cs, child)) { in update_parent_effective_cpumask()
1841 WRITE_ONCE(cs->prs_err, part_error); in update_parent_effective_cpumask()
1848 switch (cs->partition_root_state) { in update_parent_effective_cpumask()
1873 * CPU lists in cs haven't been updated yet. So defer it to later. in update_parent_effective_cpumask()
1876 int err = update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
1891 cs->partition_root_state = new_prs; in update_parent_effective_cpumask()
1893 cs->nr_subparts = 0; in update_parent_effective_cpumask()
1896 * Adding to parent's effective_cpus means deletion CPUs from cs in update_parent_effective_cpumask()
1914 update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
1918 update_sibling_cpumasks(parent, cs, tmp); in update_parent_effective_cpumask()
1927 update_partition_sd_lb(cs, old_prs); in update_parent_effective_cpumask()
1929 notify_partition_change(cs, old_prs); in update_parent_effective_cpumask()
1935 * @cs: partition root cpuset
1949 static void compute_partition_effective_cpumask(struct cpuset *cs, in compute_partition_effective_cpumask() argument
1954 bool populated = partition_is_populated(cs, NULL); in compute_partition_effective_cpumask()
1964 compute_effective_exclusive_cpumask(cs, new_ecpus, NULL); in compute_partition_effective_cpumask()
1968 cpuset_for_each_child(child, css, cs) { in compute_partition_effective_cpumask()
1979 cs->effective_xcpus)) in compute_partition_effective_cpumask()
1993 cs->nr_subparts--; in compute_partition_effective_cpumask()
2007 * @cs: the cpuset to consider
2018 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
2027 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
2035 * For child remote partition root (!= cs), we need to call in update_cpumasks_hier()
2042 if (remote && (cp != cs)) { in update_cpumasks_hier()
2102 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
2106 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
2144 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) in update_cpumasks_hier()
2201 * @cs: Current cpuset
2204 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
2226 if (sibling == cs) in update_sibling_cpumasks()
2254 * @cs: the cpuset to consider
2258 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
2263 struct cpuset *parent = parent_cs(cs); in update_cpumask()
2266 int old_prs = cs->partition_root_state; in update_cpumask()
2269 if (cs == &top_cpuset) in update_cpumask()
2298 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) in update_cpumask()
2299 compute_effective_exclusive_cpumask(trialcs, NULL, cs); in update_cpumask()
2303 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
2310 if (is_partition_valid(cs) && in update_cpumask()
2313 cs->prs_err = PERR_INVCPUS; in update_cpumask()
2316 cs->prs_err = PERR_HKEEPING; in update_cpumask()
2317 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_cpumask()
2319 cs->prs_err = PERR_NOCPUS; in update_cpumask()
2327 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2329 retval = validate_change(cs, trialcs); in update_cpumask()
2361 if (is_partition_valid(cs) || in update_cpumask()
2362 (is_partition_invalid(cs) && !invalidate)) { in update_cpumask()
2365 if (cpumask_empty(xcpus) && is_partition_invalid(cs)) in update_cpumask()
2371 if (is_remote_partition(cs)) in update_cpumask()
2372 remote_cpus_update(cs, NULL, xcpus, &tmp); in update_cpumask()
2374 update_parent_effective_cpumask(cs, partcmd_invalidate, in update_cpumask()
2377 update_parent_effective_cpumask(cs, partcmd_update, in update_cpumask()
2382 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
2383 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2384 if ((old_prs > 0) && !is_partition_valid(cs)) in update_cpumask()
2385 reset_partition_data(cs); in update_cpumask()
2389 update_cpumasks_hier(cs, &tmp, force); in update_cpumask()
2392 if (cs->partition_root_state) in update_cpumask()
2393 update_partition_sd_lb(cs, old_prs); in update_cpumask()
2401 * @cs: the cpuset to consider
2405 * The tasks' cpumask will be updated if cs is a valid partition root.
2407 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_exclusive_cpumask() argument
2412 struct cpuset *parent = parent_cs(cs); in update_exclusive_cpumask()
2415 int old_prs = cs->partition_root_state; in update_exclusive_cpumask()
2427 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) in update_exclusive_cpumask()
2436 if (compute_effective_exclusive_cpumask(trialcs, NULL, cs)) in update_exclusive_cpumask()
2444 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2446 retval = validate_change(cs, trialcs); in update_exclusive_cpumask()
2456 cs->prs_err = PERR_INVCPUS; in update_exclusive_cpumask()
2459 cs->prs_err = PERR_HKEEPING; in update_exclusive_cpumask()
2460 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2462 cs->prs_err = PERR_NOCPUS; in update_exclusive_cpumask()
2465 if (is_remote_partition(cs)) { in update_exclusive_cpumask()
2467 remote_partition_disable(cs, &tmp); in update_exclusive_cpumask()
2469 remote_cpus_update(cs, trialcs->exclusive_cpus, in update_exclusive_cpumask()
2472 update_parent_effective_cpumask(cs, partcmd_invalidate, in update_exclusive_cpumask()
2475 update_parent_effective_cpumask(cs, partcmd_update, in update_exclusive_cpumask()
2480 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2481 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2482 if ((old_prs > 0) && !is_partition_valid(cs)) in update_exclusive_cpumask()
2483 reset_partition_data(cs); in update_exclusive_cpumask()
2491 if (is_partition_valid(cs) || force) in update_exclusive_cpumask()
2492 update_cpumasks_hier(cs, &tmp, force); in update_exclusive_cpumask()
2495 if (cs->partition_root_state) in update_exclusive_cpumask()
2496 update_partition_sd_lb(cs, old_prs); in update_exclusive_cpumask()
2587 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2589 * Iterate through each task of @cs updating its mems_allowed to the
2593 void cpuset_update_tasks_nodemask(struct cpuset *cs) in cpuset_update_tasks_nodemask() argument
2599 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in cpuset_update_tasks_nodemask()
2601 guarantee_online_mems(cs, &newmems); in cpuset_update_tasks_nodemask()
2613 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_nodemask()
2624 migrate = is_memory_migrate(cs); in cpuset_update_tasks_nodemask()
2626 mpol_rebind_mm(mm, &cs->mems_allowed); in cpuset_update_tasks_nodemask()
2628 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in cpuset_update_tasks_nodemask()
2636 * cs->old_mems_allowed. in cpuset_update_tasks_nodemask()
2638 cs->old_mems_allowed = newmems; in cpuset_update_tasks_nodemask()
2646 * @cs: the cpuset to consider
2656 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2662 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2708 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2712 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2721 if (cs == &top_cpuset) { in update_nodemask()
2746 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2750 retval = validate_change(cs, trialcs); in update_nodemask()
2757 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2761 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2780 * cs: the cpuset to update
2786 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in cpuset_update_flag() argument
2794 trialcs = alloc_trial_cpuset(cs); in cpuset_update_flag()
2803 err = validate_change(cs, trialcs); in cpuset_update_flag()
2807 balance_flag_changed = (is_sched_load_balance(cs) != in cpuset_update_flag()
2810 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in cpuset_update_flag()
2811 || (is_spread_page(cs) != is_spread_page(trialcs))); in cpuset_update_flag()
2814 cs->flags = trialcs->flags; in cpuset_update_flag()
2825 cpuset1_update_tasks_flags(cs); in cpuset_update_flag()
2833 * @cs: the cpuset to update
2839 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2841 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2842 struct cpuset *parent = parent_cs(cs); in update_prstate()
2858 err = update_partition_exclusive_flag(cs, new_prs); in update_prstate()
2866 if (xcpus_empty(cs)) { in update_prstate()
2879 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) { in update_prstate()
2892 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask); in update_prstate()
2894 err = remote_partition_enable(cs, new_prs, &tmpmask); in update_prstate()
2907 if (is_remote_partition(cs)) in update_prstate()
2908 remote_partition_disable(cs, &tmpmask); in update_prstate()
2910 update_parent_effective_cpumask(cs, partcmd_disable, in update_prstate()
2925 update_partition_exclusive_flag(cs, new_prs); in update_prstate()
2929 cs->partition_root_state = new_prs; in update_prstate()
2930 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2931 if (!is_partition_valid(cs)) in update_prstate()
2932 reset_partition_data(cs); in update_prstate()
2934 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus); in update_prstate()
2939 update_cpumasks_hier(cs, &tmpmask, !new_prs); in update_prstate()
2943 && cpumask_empty(cs->effective_xcpus)); in update_prstate()
2946 update_partition_sd_lb(cs, old_prs); in update_prstate()
2948 notify_partition_change(cs, old_prs); in update_prstate()
2963 static int cpuset_can_attach_check(struct cpuset *cs) in cpuset_can_attach_check() argument
2965 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
2966 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
2971 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
2973 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2974 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2981 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
2989 cs = css_cs(css); in cpuset_can_attach()
2994 ret = cpuset_can_attach_check(cs); in cpuset_can_attach()
2998 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
2999 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
3018 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
3019 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
3023 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
3026 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
3027 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
3030 reset_migrate_dl_data(cs); in cpuset_can_attach()
3035 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
3037 reset_migrate_dl_data(cs); in cpuset_can_attach()
3047 cs->attach_in_progress++; in cpuset_can_attach()
3056 struct cpuset *cs; in cpuset_cancel_attach() local
3059 cs = css_cs(css); in cpuset_cancel_attach()
3062 dec_attach_in_progress_locked(cs); in cpuset_cancel_attach()
3064 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
3065 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
3067 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
3068 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
3082 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) in cpuset_attach_task() argument
3086 if (cs != &top_cpuset) in cpuset_attach_task()
3098 cpuset1_update_task_spread_flags(cs, task); in cpuset_attach_task()
3106 struct cpuset *cs; in cpuset_attach() local
3111 cs = css_cs(css); in cpuset_attach()
3115 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
3117 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
3126 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3130 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
3133 cpuset_attach_task(cs, task); in cpuset_attach()
3141 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3142 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
3159 if (is_memory_migrate(cs)) in cpuset_attach()
3168 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
3170 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
3171 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
3172 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
3173 reset_migrate_dl_data(cs); in cpuset_attach()
3176 dec_attach_in_progress_locked(cs); in cpuset_attach()
3187 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
3194 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
3197 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
3205 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3208 retval = update_exclusive_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3211 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
3238 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
3246 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
3249 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
3252 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
3255 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
3258 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); in cpuset_common_seq_show()
3261 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); in cpuset_common_seq_show()
3279 struct cpuset *cs = css_cs(seq_css(seq)); in cpuset_partition_show() local
3282 switch (cs->partition_root_state) { in cpuset_partition_show()
3298 err = perr_strings[READ_ONCE(cs->prs_err)]; in cpuset_partition_show()
3311 struct cpuset *cs = css_cs(of_css(of)); in cpuset_partition_write() local
3326 css_get(&cs->css); in cpuset_partition_write()
3329 if (is_cpuset_online(cs)) in cpuset_partition_write()
3330 retval = update_prstate(cs, val); in cpuset_partition_write()
3333 css_put(&cs->css); in cpuset_partition_write()
3427 struct cpuset *cs; in cpuset_css_alloc() local
3432 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3433 if (!cs) in cpuset_css_alloc()
3436 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3437 kfree(cs); in cpuset_css_alloc()
3441 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3442 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3443 cs->relax_domain_level = -1; in cpuset_css_alloc()
3444 INIT_LIST_HEAD(&cs->remote_sibling); in cpuset_css_alloc()
3448 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3450 return &cs->css; in cpuset_css_alloc()
3455 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3456 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3466 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3468 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3470 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3475 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3481 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3482 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3512 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3513 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3514 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3515 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3536 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3541 if (!cpuset_v2() && is_sched_load_balance(cs)) in cpuset_css_offline()
3542 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3545 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3553 struct cpuset *cs = css_cs(css); in cpuset_css_killed() local
3559 if (is_partition_valid(cs)) in cpuset_css_killed()
3560 update_prstate(cs, PRS_MEMBER); in cpuset_css_killed()
3569 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3571 free_cpuset(cs); in cpuset_css_free()
3599 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork() local
3604 same_cs = (cs == task_cs(current)); in cpuset_can_fork()
3614 ret = cpuset_can_attach_check(cs); in cpuset_can_fork()
3630 cs->attach_in_progress++; in cpuset_can_fork()
3638 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork() local
3642 same_cs = (cs == task_cs(current)); in cpuset_cancel_fork()
3648 dec_attach_in_progress(cs); in cpuset_cancel_fork()
3658 struct cpuset *cs; in cpuset_fork() local
3662 cs = task_cs(task); in cpuset_fork()
3663 same_cs = (cs == task_cs(current)); in cpuset_fork()
3667 if (cs == &top_cpuset) in cpuset_fork()
3677 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_fork()
3678 cpuset_attach_task(cs, task); in cpuset_fork()
3680 dec_attach_in_progress_locked(cs); in cpuset_fork()
3744 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3749 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3750 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3752 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3755 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3756 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3760 cpuset_update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3762 cpuset_update_tasks_nodemask(cs); in hotplug_update_tasks()
3772 * @cs: cpuset in interest
3775 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3776 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3779 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3789 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3797 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3802 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3803 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3804 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3806 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3813 remote = is_remote_partition(cs); in cpuset_hotplug_update_tasks()
3814 if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) in cpuset_hotplug_update_tasks()
3815 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
3818 partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3819 cs->prs_err = PERR_HOTPLUG; in cpuset_hotplug_update_tasks()
3820 remote_partition_disable(cs, tmp); in cpuset_hotplug_update_tasks()
3821 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3832 if (is_local_partition(cs) && (!is_partition_valid(parent) || in cpuset_hotplug_update_tasks()
3833 tasks_nocpu_error(parent, cs, &new_cpus))) in cpuset_hotplug_update_tasks()
3839 else if (is_partition_valid(parent) && is_partition_invalid(cs)) in cpuset_hotplug_update_tasks()
3843 update_parent_effective_cpumask(cs, partcmd, NULL, tmp); in cpuset_hotplug_update_tasks()
3844 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3845 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
3851 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3852 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3860 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3863 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3955 struct cpuset *cs; in cpuset_handle_hotplug() local
3959 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_handle_hotplug()
3960 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_handle_hotplug()
3964 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_handle_hotplug()
3967 css_put(&cs->css); in cpuset_handle_hotplug()
4038 struct cpuset *cs; in cpuset_cpus_allowed() local
4043 cs = task_cs(tsk); in cpuset_cpus_allowed()
4044 if (cs != &top_cpuset) in cpuset_cpus_allowed()
4051 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in cpuset_cpus_allowed()
4098 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
4161 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
4163 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
4164 cs = parent_cs(cs); in nearest_hardwall_ancestor()
4165 return cs; in nearest_hardwall_ancestor()
4210 struct cpuset *cs; /* current cpuset ancestors */ in cpuset_node_allowed() local
4234 cs = nearest_hardwall_ancestor(task_cs(current)); in cpuset_node_allowed()
4235 allowed = node_isset(node, cs->mems_allowed); in cpuset_node_allowed()