Home
last modified time | relevance | path

Searched refs:cpus_allowed (Results 1 – 13 of 13) sorted by relevance

/linux/kernel/sched/
H A Dext_idle.c115 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_in_node() argument
121 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); in pick_idle_cpu_in_node()
129 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed); in pick_idle_cpu_in_node()
150 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_from_online_nodes() argument
181 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in pick_idle_cpu_from_online_nodes()
191 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags) in pick_idle_cpu_from_online_nodes() argument
200 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags) in scx_pick_idle_cpu() argument
209 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in scx_pick_idle_cpu()
224 return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags); in scx_pick_idle_cpu()
452 const struct cpumask *cpus_allowed, u64 flags) in scx_select_cpu_dfl() argument
[all …]
H A Dsyscalls.c1150 cpumask_var_t cpus_allowed, new_mask; in __sched_setaffinity() local
1152 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) in __sched_setaffinity()
1160 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1161 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); in __sched_setaffinity()
1174 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
1175 if (!cpumask_subset(new_mask, cpus_allowed)) { in __sched_setaffinity()
1180 cpumask_copy(new_mask, cpus_allowed); in __sched_setaffinity()
1195 cpumask_copy(new_mask, cpus_allowed); in __sched_setaffinity()
1204 free_cpumask_var(cpus_allowed); in __sched_setaffinity()
H A Dext_idle.h19 const struct cpumask *cpus_allowed, u64 flags);
/linux/arch/mips/kernel/
H A Dmips-mt-fpaff.c69 cpumask_var_t cpus_allowed, new_mask, effective_mask; in mipsmt_sys_sched_setaffinity() local
94 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { in mipsmt_sys_sched_setaffinity()
132 cpuset_cpus_allowed(p, cpus_allowed); in mipsmt_sys_sched_setaffinity()
133 if (!cpumask_subset(effective_mask, cpus_allowed)) { in mipsmt_sys_sched_setaffinity()
139 cpumask_copy(new_mask, cpus_allowed); in mipsmt_sys_sched_setaffinity()
148 free_cpumask_var(cpus_allowed); in mipsmt_sys_sched_setaffinity()
/linux/tools/sched_ext/include/scx/
H A Dcompat.bpf.h223 #define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \ argument
225 scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
226 scx_bpf_pick_idle_cpu(cpus_allowed, flags))
228 #define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \ argument
230 scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
231 scx_bpf_pick_any_cpu(cpus_allowed, flags))
H A Dcommon.bpf.h64 const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
99 s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
100 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
101 s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
102 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
/linux/kernel/cgroup/
H A Dcpuset-v1.c157 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
264 while (cpumask_empty(parent->cpus_allowed) || in remove_tasks_in_empty_cpuset()
292 cpumask_copy(cs->cpus_allowed, new_cpus); in cpuset1_hotplug_update_tasks()
302 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in cpuset1_hotplug_update_tasks()
307 is_empty = cpumask_empty(cs->cpus_allowed) || in cpuset1_hotplug_update_tasks()
341 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset()
H A Dcpuset.c518 &trial->cpus_allowed, in dup_or_alloc_cpuset()
531 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in dup_or_alloc_cpuset()
546 free_cpumask_var(cs->cpus_allowed); in free_cpuset()
556 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed in user_xcpus()
562 return cpumask_empty(cs->cpus_allowed) && in xcpus_empty()
604 if (!cpumask_empty(cs1->cpus_allowed) && in cpus_excl_conflict()
605 cpumask_subset(cs1->cpus_allowed, cs2->exclusive_cpus)) in cpus_excl_conflict()
608 if (!cpumask_empty(cs2->cpus_allowed) && in cpus_excl_conflict()
609 cpumask_subset(cs2->cpus_allowed, cs1->exclusive_cpus)) in cpus_excl_conflict()
667 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
[all …]
H A Dcpuset-internal.h100 cpumask_var_t cpus_allowed; member
/linux/tools/testing/selftests/ublk/
H A Dtest_generic_12.sh42 --cpus_allowed=0 > /dev/null 2>&1
/linux/arch/powerpc/platforms/cell/spufs/
H A Dspufs.h111 cpumask_t cpus_allowed; member
H A Dsched.c131 cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); in __spu_update_sched_info()
160 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
/linux/Documentation/admin-guide/cgroup-v1/
H A Dcpusets.rst61 schedule a task on a CPU that is not allowed in its cpus_allowed
161 displaying the task's cpus_allowed (on which CPUs it may be scheduled)
497 (makes sure that all the CPUs in the cpus_allowed of that cpuset are
680 their cpus_allowed to allow all online CPUs. When memory hotplug