Lines Matching full:cpu

5  * Built-in idle CPU tracking policy.
14 /* Enable/disable built-in idle CPU selection policy */
34 cpumask_var_t cpu; member
60 * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if
63 static int scx_cpu_node_if_enabled(int cpu) in scx_cpu_node_if_enabled() argument
68 return cpu_to_node(cpu); in scx_cpu_node_if_enabled()
71 bool scx_idle_test_and_clear_cpu(int cpu) in scx_idle_test_and_clear_cpu() argument
73 int node = scx_cpu_node_if_enabled(cpu); in scx_idle_test_and_clear_cpu()
74 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; in scx_idle_test_and_clear_cpu()
78 * SMT mask should be cleared whether we can claim @cpu or not. The SMT in scx_idle_test_and_clear_cpu()
83 const struct cpumask *smt = cpu_smt_mask(cpu); in scx_idle_test_and_clear_cpu()
87 * If offline, @cpu is not its own sibling and in scx_idle_test_and_clear_cpu()
89 * @cpu is never cleared from the idle SMT mask. Ensure that in scx_idle_test_and_clear_cpu()
90 * @cpu is eventually cleared. in scx_idle_test_and_clear_cpu()
98 else if (cpumask_test_cpu(cpu, idle_smts)) in scx_idle_test_and_clear_cpu()
99 __cpumask_clear_cpu(cpu, idle_smts); in scx_idle_test_and_clear_cpu()
103 return cpumask_test_and_clear_cpu(cpu, idle_cpus); in scx_idle_test_and_clear_cpu()
107 * Pick an idle CPU in a specific NUMA node.
111 int cpu; in pick_idle_cpu_in_node() local
115 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); in pick_idle_cpu_in_node()
116 if (cpu < nr_cpu_ids) in pick_idle_cpu_in_node()
123 cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed); in pick_idle_cpu_in_node()
124 if (cpu >= nr_cpu_ids) in pick_idle_cpu_in_node()
128 if (scx_idle_test_and_clear_cpu(cpu)) in pick_idle_cpu_in_node()
129 return cpu; in pick_idle_cpu_in_node()
136 * CPU across all available nodes.
141 * Search for an idle CPU across all nodes, excluding @node.
146 s32 cpu = -EBUSY; in pick_idle_cpu_from_online_nodes() local
165 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU in pick_idle_cpu_from_online_nodes()
174 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in pick_idle_cpu_from_online_nodes()
175 if (cpu >= 0) in pick_idle_cpu_from_online_nodes()
180 return cpu; in pick_idle_cpu_from_online_nodes()
184 * Find an idle CPU in the system, starting from @node.
188 s32 cpu; in scx_pick_idle_cpu() local
195 cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags); in scx_pick_idle_cpu()
196 if (cpu >= 0) in scx_pick_idle_cpu()
197 return cpu; in scx_pick_idle_cpu()
214 * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
217 static unsigned int llc_weight(s32 cpu) in llc_weight() argument
221 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in llc_weight()
229 * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
232 static struct cpumask *llc_span(s32 cpu) in llc_span() argument
236 sd = rcu_dereference(per_cpu(sd_llc, cpu)); in llc_span()
244 * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
247 static unsigned int numa_weight(s32 cpu) in numa_weight() argument
252 sd = rcu_dereference(per_cpu(sd_numa, cpu)); in numa_weight()
263 * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
266 static struct cpumask *numa_span(s32 cpu) in numa_span() argument
271 sd = rcu_dereference(per_cpu(sd_numa, cpu)); in numa_span()
287 int cpu; in llc_numa_mismatch() local
294 * topologies, CPU hotplugging or virtualized environments can result in llc_numa_mismatch()
307 * In this case, if we only check the first online CPU (cpu0), we might in llc_numa_mismatch()
312 for_each_online_cpu(cpu) in llc_numa_mismatch()
313 if (llc_weight(cpu) != numa_weight(cpu)) in llc_numa_mismatch()
323 * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
327 * CPU belongs to a single LLC domain, and that each LLC domain is entirely
334 s32 cpu = cpumask_first(cpu_online_mask); in scx_idle_update_selcpu_topology() local
339 * single LLC domain, the idle CPU selection logic can choose any in scx_idle_update_selcpu_topology()
340 * online CPU without bias. in scx_idle_update_selcpu_topology()
343 * online CPU to determine whether a single LLC domain includes all in scx_idle_update_selcpu_topology()
347 nr_cpus = llc_weight(cpu); in scx_idle_update_selcpu_topology()
352 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); in scx_idle_update_selcpu_topology()
362 * for an idle CPU in the same domain twice is redundant. in scx_idle_update_selcpu_topology()
369 nr_cpus = numa_weight(cpu); in scx_idle_update_selcpu_topology()
374 cpumask_pr_args(numa_span(cpu)), nr_cpus); in scx_idle_update_selcpu_topology()
395 * Built-in CPU idle selection policy:
401 * 2. Reuse the same CPU:
402 * - prefer the last used CPU to take advantage of cached data (L1, L2) and
405 * 3. Pick a CPU within the same LLC (Last-Level Cache):
406 * - if the above conditions aren't met, pick a CPU that shares the same LLC
409 * 4. Pick a CPU within the same NUMA node, if enabled:
410 * - choose a CPU from the same NUMA node to reduce memory access latency.
412 * 5. Pick any idle CPU usable by the task.
422 * Return the picked CPU if idle, or a negative value otherwise.
424 * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
432 s32 cpu; in scx_select_cpu_dfl() local
444 * updating a cpumask every time we need to select an idle CPU (which in scx_select_cpu_dfl()
447 * CPU affinity), the task will simply use the flat scheduling domain in scx_select_cpu_dfl()
459 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU. in scx_select_cpu_dfl()
465 * If the waker's CPU is cache affine and prev_cpu is idle, in scx_select_cpu_dfl()
468 cpu = smp_processor_id(); in scx_select_cpu_dfl()
469 if (cpus_share_cache(cpu, prev_cpu) && in scx_select_cpu_dfl()
471 cpu = prev_cpu; in scx_select_cpu_dfl()
488 waker_node = cpu_to_node(cpu); in scx_select_cpu_dfl()
490 cpu_rq(cpu)->scx.local_dsq.nr == 0 && in scx_select_cpu_dfl()
492 !cpumask_empty(idle_cpumask(waker_node)->cpu)) { in scx_select_cpu_dfl()
493 if (cpumask_test_cpu(cpu, p->cpus_ptr)) in scx_select_cpu_dfl()
499 * If CPU has SMT, any wholly idle CPU is likely a better pick than in scx_select_cpu_dfl()
508 cpu = prev_cpu; in scx_select_cpu_dfl()
516 cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
517 if (cpu >= 0) in scx_select_cpu_dfl()
525 cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
526 if (cpu >= 0) in scx_select_cpu_dfl()
533 * If the node-aware idle CPU selection policy is enabled in scx_select_cpu_dfl()
538 cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
539 if (cpu >= 0) in scx_select_cpu_dfl()
547 cpu = -EBUSY; in scx_select_cpu_dfl()
556 cpu = prev_cpu; in scx_select_cpu_dfl()
561 * Search for any idle CPU in the same LLC domain. in scx_select_cpu_dfl()
564 cpu = pick_idle_cpu_in_node(llc_cpus, node, 0); in scx_select_cpu_dfl()
565 if (cpu >= 0) in scx_select_cpu_dfl()
570 * Search for any idle CPU in the same NUMA node. in scx_select_cpu_dfl()
573 cpu = pick_idle_cpu_in_node(numa_cpus, node, 0); in scx_select_cpu_dfl()
574 if (cpu >= 0) in scx_select_cpu_dfl()
579 * Search for any idle CPU usable by the task. in scx_select_cpu_dfl()
581 * If the node-aware idle CPU selection policy is enabled in scx_select_cpu_dfl()
586 cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags); in scx_select_cpu_dfl()
591 return cpu; in scx_select_cpu_dfl()
602 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL)); in scx_idle_init_masks()
615 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->cpu, GFP_KERNEL, node)); in scx_idle_init_masks()
620 static void update_builtin_idle(int cpu, bool idle) in update_builtin_idle() argument
622 int node = scx_cpu_node_if_enabled(cpu); in update_builtin_idle()
623 struct cpumask *idle_cpus = idle_cpumask(node)->cpu; in update_builtin_idle()
625 assign_cpu(cpu, idle_cpus, idle); in update_builtin_idle()
629 const struct cpumask *smt = cpu_smt_mask(cpu); in update_builtin_idle()
648 * Update the idle state of a CPU to @idle.
655 * This distinction is necessary, because an idle CPU can be "reserved" and
657 * busy even if no tasks are dispatched. In this case, the CPU may return
665 int cpu = cpu_of(rq); in __scx_update_idle() local
695 update_builtin_idle(cpu, idle); in __scx_update_idle()
707 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask); in reset_idle_masks()
715 cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask); in reset_idle_masks()
786 * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
787 * trigger an error if @cpu is invalid
788 * @cpu: target CPU
790 __bpf_kfunc int scx_bpf_cpu_node(s32 cpu) in scx_bpf_cpu_node() argument
793 if (!ops_cpu_valid(cpu, NULL)) in scx_bpf_cpu_node()
796 return cpu_to_node(cpu); in scx_bpf_cpu_node()
804 * @p: task_struct to select a CPU for
805 * @prev_cpu: CPU @p was on previously
807 * @is_idle: out parameter indicating whether the returned CPU is idle
809 * Can only be called from ops.select_cpu() if the built-in CPU selection is
813 * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
820 s32 cpu; in scx_bpf_select_cpu_dfl() local
832 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); in scx_bpf_select_cpu_dfl()
833 if (cpu >= 0) { in scx_bpf_select_cpu_dfl()
835 return cpu; in scx_bpf_select_cpu_dfl()
846 * idle-tracking per-CPU cpumask of a target NUMA node.
860 return idle_cpumask(node)->cpu; in scx_bpf_get_idle_cpumask_node()
868 * per-CPU cpumask.
884 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_cpumask()
910 return idle_cpumask(node)->cpu; in scx_bpf_get_idle_smtmask_node()
938 return idle_cpumask(NUMA_NO_NODE)->cpu; in scx_bpf_get_idle_smtmask()
960 * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
961 * @cpu: cpu to test and clear idle for
963 * Returns %true if @cpu was idle and its idle state was successfully cleared.
969 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) in scx_bpf_test_and_clear_cpu_idle() argument
974 if (ops_cpu_valid(cpu, NULL)) in scx_bpf_test_and_clear_cpu_idle()
975 return scx_idle_test_and_clear_cpu(cpu); in scx_bpf_test_and_clear_cpu_idle()
981 * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node
986 * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
988 * Returns the picked idle cpu number on success, or -%EBUSY if no matching
989 * cpu was found.
1010 * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
1014 * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
1015 * number on success. -%EBUSY if no matching cpu was found.
1017 * Idle CPU tracking may race against CPU scheduling state transitions. For
1046 * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available
1047 * or pick any CPU from @node
1052 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1053 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1060 * the CPU idle state).
1064 * CPU.
1069 s32 cpu; in scx_bpf_pick_any_cpu_node() local
1075 cpu = scx_pick_idle_cpu(cpus_allowed, node, flags); in scx_bpf_pick_any_cpu_node()
1076 if (cpu >= 0) in scx_bpf_pick_any_cpu_node()
1077 return cpu; in scx_bpf_pick_any_cpu_node()
1080 cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed); in scx_bpf_pick_any_cpu_node()
1082 cpu = cpumask_any_distribute(cpus_allowed); in scx_bpf_pick_any_cpu_node()
1083 if (cpu < nr_cpu_ids) in scx_bpf_pick_any_cpu_node()
1084 return cpu; in scx_bpf_pick_any_cpu_node()
1090 * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
1094 * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1095 * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1101 * CPU.
1109 s32 cpu; in scx_bpf_pick_any_cpu() local
1117 cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags); in scx_bpf_pick_any_cpu()
1118 if (cpu >= 0) in scx_bpf_pick_any_cpu()
1119 return cpu; in scx_bpf_pick_any_cpu()
1122 cpu = cpumask_any_distribute(cpus_allowed); in scx_bpf_pick_any_cpu()
1123 if (cpu < nr_cpu_ids) in scx_bpf_pick_any_cpu()
1124 return cpu; in scx_bpf_pick_any_cpu()