xref: /linux/kernel/sched/ext_idle.c (revision 5bdb4078e1efba9650c03753616866192d680718)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Built-in idle CPU tracking policy.
6  *
7  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
8  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
9  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
10  * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com>
11  */
12 #include "ext_idle.h"
13 
14 /* Enable/disable built-in idle CPU selection policy */
15 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
16 
17 /* Enable/disable per-node idle cpumasks */
18 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
19 
20 /* Enable/disable LLC aware optimizations */
21 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
22 
23 /* Enable/disable NUMA aware optimizations */
24 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
25 
26 /*
27  * cpumasks to track idle CPUs within each NUMA node.
28  *
29  * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask
30  * from is used to track all the idle CPUs in the system.
31  */
32 struct scx_idle_cpus {
33 	cpumask_var_t cpu;
34 	cpumask_var_t smt;
35 };
36 
37 /*
38  * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE
39  * is not enabled).
40  */
41 static struct scx_idle_cpus scx_idle_global_masks;
42 
43 /*
44  * Per-node idle cpumasks.
45  */
46 static struct scx_idle_cpus **scx_idle_node_masks;
47 
48 /*
49  * Local per-CPU cpumasks (used to generate temporary idle cpumasks).
50  */
51 static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask);
52 static DEFINE_PER_CPU(cpumask_var_t, local_llc_idle_cpumask);
53 static DEFINE_PER_CPU(cpumask_var_t, local_numa_idle_cpumask);
54 
55 /*
56  * Return the idle masks associated to a target @node.
57  *
58  * NUMA_NO_NODE identifies the global idle cpumask.
59  */
idle_cpumask(int node)60 static struct scx_idle_cpus *idle_cpumask(int node)
61 {
62 	return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node];
63 }
64 
65 /*
66  * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if
67  * per-node idle cpumasks are disabled.
68  */
scx_cpu_node_if_enabled(int cpu)69 static int scx_cpu_node_if_enabled(int cpu)
70 {
71 	if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
72 		return NUMA_NO_NODE;
73 
74 	return cpu_to_node(cpu);
75 }
76 
scx_idle_test_and_clear_cpu(int cpu)77 static bool scx_idle_test_and_clear_cpu(int cpu)
78 {
79 	int node = scx_cpu_node_if_enabled(cpu);
80 	struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
81 
82 #ifdef CONFIG_SCHED_SMT
83 	/*
84 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
85 	 * cluster is not wholly idle either way. This also prevents
86 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
87 	 */
88 	if (sched_smt_active()) {
89 		const struct cpumask *smt = cpu_smt_mask(cpu);
90 		struct cpumask *idle_smts = idle_cpumask(node)->smt;
91 
92 		/*
93 		 * If offline, @cpu is not its own sibling and
94 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
95 		 * @cpu is never cleared from the idle SMT mask. Ensure that
96 		 * @cpu is eventually cleared.
97 		 *
98 		 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
99 		 * reduce memory writes, which may help alleviate cache
100 		 * coherence pressure.
101 		 */
102 		if (cpumask_intersects(smt, idle_smts))
103 			cpumask_andnot(idle_smts, idle_smts, smt);
104 		else if (cpumask_test_cpu(cpu, idle_smts))
105 			__cpumask_clear_cpu(cpu, idle_smts);
106 	}
107 #endif
108 
109 	return cpumask_test_and_clear_cpu(cpu, idle_cpus);
110 }
111 
112 /*
113  * Pick an idle CPU in a specific NUMA node.
114  */
pick_idle_cpu_in_node(const struct cpumask * cpus_allowed,int node,u64 flags)115 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags)
116 {
117 	int cpu;
118 
119 retry:
120 	if (sched_smt_active()) {
121 		cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
122 		if (cpu < nr_cpu_ids)
123 			goto found;
124 
125 		if (flags & SCX_PICK_IDLE_CORE)
126 			return -EBUSY;
127 	}
128 
129 	cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
130 	if (cpu >= nr_cpu_ids)
131 		return -EBUSY;
132 
133 found:
134 	if (scx_idle_test_and_clear_cpu(cpu))
135 		return cpu;
136 	else
137 		goto retry;
138 }
139 
140 #ifdef CONFIG_NUMA
141 /*
142  * Tracks nodes that have not yet been visited when searching for an idle
143  * CPU across all available nodes.
144  */
145 static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited);
146 
147 /*
148  * Search for an idle CPU across all nodes, excluding @node.
149  */
pick_idle_cpu_from_online_nodes(const struct cpumask * cpus_allowed,int node,u64 flags)150 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
151 {
152 	nodemask_t *unvisited;
153 	s32 cpu = -EBUSY;
154 
155 	preempt_disable();
156 	unvisited = this_cpu_ptr(&per_cpu_unvisited);
157 
158 	/*
159 	 * Restrict the search to the online nodes (excluding the current
160 	 * node that has been visited already).
161 	 */
162 	nodes_copy(*unvisited, node_states[N_ONLINE]);
163 	node_clear(node, *unvisited);
164 
165 	/*
166 	 * Traverse all nodes in order of increasing distance, starting
167 	 * from @node.
168 	 *
169 	 * This loop is O(N^2), with N being the amount of NUMA nodes,
170 	 * which might be quite expensive in large NUMA systems. However,
171 	 * this complexity comes into play only when a scheduler enables
172 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU
173 	 * without specifying a target NUMA node, so it shouldn't be a
174 	 * bottleneck is most cases.
175 	 *
176 	 * As a future optimization we may want to cache the list of nodes
177 	 * in a per-node array, instead of actually traversing them every
178 	 * time.
179 	 */
180 	for_each_node_numadist(node, *unvisited) {
181 		cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
182 		if (cpu >= 0)
183 			break;
184 	}
185 	preempt_enable();
186 
187 	return cpu;
188 }
189 #else
190 static inline s32
pick_idle_cpu_from_online_nodes(const struct cpumask * cpus_allowed,int node,u64 flags)191 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
192 {
193 	return -EBUSY;
194 }
195 #endif
196 
197 /*
198  * Find an idle CPU in the system, starting from @node.
199  */
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,int node,u64 flags)200 static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
201 {
202 	s32 cpu;
203 
204 	/*
205 	 * Always search in the starting node first (this is an
206 	 * optimization that can save some cycles even when the search is
207 	 * not limited to a single node).
208 	 */
209 	cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
210 	if (cpu >= 0)
211 		return cpu;
212 
213 	/*
214 	 * Stop the search if we are using only a single global cpumask
215 	 * (NUMA_NO_NODE) or if the search is restricted to the first node
216 	 * only.
217 	 */
218 	if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE)
219 		return -EBUSY;
220 
221 	/*
222 	 * Extend the search to the other online nodes.
223 	 */
224 	return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags);
225 }
226 
227 /*
228  * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
229  * domain is not defined).
230  */
llc_weight(s32 cpu)231 static unsigned int llc_weight(s32 cpu)
232 {
233 	struct sched_domain *sd;
234 
235 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
236 	if (!sd)
237 		return 0;
238 
239 	return sd->span_weight;
240 }
241 
242 /*
243  * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
244  * domain is not defined).
245  */
llc_span(s32 cpu)246 static struct cpumask *llc_span(s32 cpu)
247 {
248 	struct sched_domain *sd;
249 
250 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
251 	if (!sd)
252 		return NULL;
253 
254 	return sched_domain_span(sd);
255 }
256 
257 /*
258  * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
259  * NUMA domain is not defined).
260  */
numa_weight(s32 cpu)261 static unsigned int numa_weight(s32 cpu)
262 {
263 	struct sched_domain *sd;
264 	struct sched_group *sg;
265 
266 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
267 	if (!sd)
268 		return 0;
269 	sg = sd->groups;
270 	if (!sg)
271 		return 0;
272 
273 	return sg->group_weight;
274 }
275 
276 /*
277  * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
278  * domain is not defined).
279  */
numa_span(s32 cpu)280 static struct cpumask *numa_span(s32 cpu)
281 {
282 	struct sched_domain *sd;
283 	struct sched_group *sg;
284 
285 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
286 	if (!sd)
287 		return NULL;
288 	sg = sd->groups;
289 	if (!sg)
290 		return NULL;
291 
292 	return sched_group_span(sg);
293 }
294 
295 /*
296  * Return true if the LLC domains do not perfectly overlap with the NUMA
297  * domains, false otherwise.
298  */
llc_numa_mismatch(void)299 static bool llc_numa_mismatch(void)
300 {
301 	int cpu;
302 
303 	/*
304 	 * We need to scan all online CPUs to verify whether their scheduling
305 	 * domains overlap.
306 	 *
307 	 * While it is rare to encounter architectures with asymmetric NUMA
308 	 * topologies, CPU hotplugging or virtualized environments can result
309 	 * in asymmetric configurations.
310 	 *
311 	 * For example:
312 	 *
313 	 *  NUMA 0:
314 	 *    - LLC 0: cpu0..cpu7
315 	 *    - LLC 1: cpu8..cpu15 [offline]
316 	 *
317 	 *  NUMA 1:
318 	 *    - LLC 0: cpu16..cpu23
319 	 *    - LLC 1: cpu24..cpu31
320 	 *
321 	 * In this case, if we only check the first online CPU (cpu0), we might
322 	 * incorrectly assume that the LLC and NUMA domains are fully
323 	 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
324 	 * domains).
325 	 */
326 	for_each_online_cpu(cpu)
327 		if (llc_weight(cpu) != numa_weight(cpu))
328 			return true;
329 
330 	return false;
331 }
332 
333 /*
334  * Initialize topology-aware scheduling.
335  *
336  * Detect if the system has multiple LLC or multiple NUMA domains and enable
337  * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
338  * selection policy.
339  *
340  * Assumption: the kernel's internal topology representation assumes that each
341  * CPU belongs to a single LLC domain, and that each LLC domain is entirely
342  * contained within a single NUMA node.
343  */
scx_idle_update_selcpu_topology(struct sched_ext_ops * ops)344 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
345 {
346 	bool enable_llc = false, enable_numa = false;
347 	unsigned int nr_cpus;
348 	s32 cpu = cpumask_first(cpu_online_mask);
349 
350 	/*
351 	 * Enable LLC domain optimization only when there are multiple LLC
352 	 * domains among the online CPUs. If all online CPUs are part of a
353 	 * single LLC domain, the idle CPU selection logic can choose any
354 	 * online CPU without bias.
355 	 *
356 	 * Note that it is sufficient to check the LLC domain of the first
357 	 * online CPU to determine whether a single LLC domain includes all
358 	 * CPUs.
359 	 */
360 	rcu_read_lock();
361 	nr_cpus = llc_weight(cpu);
362 	if (nr_cpus > 0) {
363 		if (nr_cpus < num_online_cpus())
364 			enable_llc = true;
365 		pr_debug("sched_ext: LLC=%*pb weight=%u\n",
366 			 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
367 	}
368 
369 	/*
370 	 * Enable NUMA optimization only when there are multiple NUMA domains
371 	 * among the online CPUs and the NUMA domains don't perfectly overlap
372 	 * with the LLC domains.
373 	 *
374 	 * If all CPUs belong to the same NUMA node and the same LLC domain,
375 	 * enabling both NUMA and LLC optimizations is unnecessary, as checking
376 	 * for an idle CPU in the same domain twice is redundant.
377 	 *
378 	 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
379 	 * optimization, as we would naturally select idle CPUs within
380 	 * specific NUMA nodes querying the corresponding per-node cpumask.
381 	 */
382 	if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
383 		nr_cpus = numa_weight(cpu);
384 		if (nr_cpus > 0) {
385 			if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
386 				enable_numa = true;
387 			pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
388 				 cpumask_pr_args(numa_span(cpu)), nr_cpus);
389 		}
390 	}
391 	rcu_read_unlock();
392 
393 	pr_debug("sched_ext: LLC idle selection %s\n",
394 		 str_enabled_disabled(enable_llc));
395 	pr_debug("sched_ext: NUMA idle selection %s\n",
396 		 str_enabled_disabled(enable_numa));
397 
398 	if (enable_llc)
399 		static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
400 	else
401 		static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
402 	if (enable_numa)
403 		static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
404 	else
405 		static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
406 }
407 
408 /*
409  * Return true if @p can run on all possible CPUs, false otherwise.
410  */
task_affinity_all(const struct task_struct * p)411 static inline bool task_affinity_all(const struct task_struct *p)
412 {
413 	return p->nr_cpus_allowed >= num_possible_cpus();
414 }
415 
416 /*
417  * Built-in CPU idle selection policy:
418  *
419  * 1. Prioritize full-idle cores:
420  *   - always prioritize CPUs from fully idle cores (both logical CPUs are
421  *     idle) to avoid interference caused by SMT.
422  *
423  * 2. Reuse the same CPU:
424  *   - prefer the last used CPU to take advantage of cached data (L1, L2) and
425  *     branch prediction optimizations.
426  *
427  * 3. Prefer @prev_cpu's SMT sibling:
428  *   - if @prev_cpu is busy and no fully idle core is available, try to
429  *     place the task on an idle SMT sibling of @prev_cpu; keeping the
430  *     task on the same core makes migration cheaper, preserves L1 cache
431  *     locality and reduces wakeup latency.
432  *
433  * 4. Pick a CPU within the same LLC (Last-Level Cache):
434  *   - if the above conditions aren't met, pick a CPU that shares the same
435  *     LLC, if the LLC domain is a subset of @cpus_allowed, to maintain
436  *     cache locality.
437  *
438  * 5. Pick a CPU within the same NUMA node, if enabled:
439  *   - choose a CPU from the same NUMA node, if the node cpumask is a
440  *     subset of @cpus_allowed, to reduce memory access latency.
441  *
442  * 6. Pick any idle CPU within the @cpus_allowed domain.
443  *
444  * Step 4 and 5 are performed only if the system has, respectively,
445  * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
446  * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
447  *
448  * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
449  * begin in @prev_cpu's node and proceed to other nodes in order of
450  * increasing distance.
451  *
452  * Return the picked CPU if idle, or a negative value otherwise.
453  *
454  * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
455  * we never call ops.select_cpu() for them, see select_task_rq().
456  */
scx_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * cpus_allowed,u64 flags)457 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
458 		       const struct cpumask *cpus_allowed, u64 flags)
459 {
460 	const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL;
461 	const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr;
462 	int node = scx_cpu_node_if_enabled(prev_cpu);
463 	bool is_prev_allowed;
464 	s32 cpu;
465 
466 	preempt_disable();
467 
468 	/*
469 	 * Check whether @prev_cpu is still within the allowed set. If not,
470 	 * we can still try selecting a nearby CPU.
471 	 */
472 	is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed);
473 
474 	/*
475 	 * Determine the subset of CPUs usable by @p within @cpus_allowed.
476 	 */
477 	if (allowed != p->cpus_ptr) {
478 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_idle_cpumask);
479 
480 		if (task_affinity_all(p)) {
481 			allowed = cpus_allowed;
482 		} else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) {
483 			allowed = local_cpus;
484 		} else {
485 			cpu = -EBUSY;
486 			goto out_enable;
487 		}
488 	}
489 
490 	/*
491 	 * This is necessary to protect llc_cpus.
492 	 */
493 	rcu_read_lock();
494 
495 	/*
496 	 * Determine the subset of CPUs that the task can use in its
497 	 * current LLC and node.
498 	 *
499 	 * If the task can run on all CPUs, use the node and LLC cpumasks
500 	 * directly.
501 	 */
502 	if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) {
503 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask);
504 		const struct cpumask *cpus = numa_span(prev_cpu);
505 
506 		if (allowed == p->cpus_ptr && task_affinity_all(p))
507 			numa_cpus = cpus;
508 		else if (cpus && cpumask_and(local_cpus, allowed, cpus))
509 			numa_cpus = local_cpus;
510 	}
511 
512 	if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
513 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask);
514 		const struct cpumask *cpus = llc_span(prev_cpu);
515 
516 		if (allowed == p->cpus_ptr && task_affinity_all(p))
517 			llc_cpus = cpus;
518 		else if (cpus && cpumask_and(local_cpus, allowed, cpus))
519 			llc_cpus = local_cpus;
520 	}
521 
522 	/*
523 	 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
524 	 */
525 	if (wake_flags & SCX_WAKE_SYNC) {
526 		int waker_node;
527 
528 		/*
529 		 * If the waker's CPU is cache affine and prev_cpu is idle,
530 		 * then avoid a migration.
531 		 */
532 		cpu = smp_processor_id();
533 		if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
534 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
535 			cpu = prev_cpu;
536 			goto out_unlock;
537 		}
538 
539 		/*
540 		 * If the waker's local DSQ is empty, and the system is under
541 		 * utilized, try to wake up @p to the local DSQ of the waker.
542 		 *
543 		 * Checking only for an empty local DSQ is insufficient as it
544 		 * could give the wakee an unfair advantage when the system is
545 		 * oversaturated.
546 		 *
547 		 * Checking only for the presence of idle CPUs is also
548 		 * insufficient as the local DSQ of the waker could have tasks
549 		 * piled up on it even if there is an idle core elsewhere on
550 		 * the system.
551 		 */
552 		waker_node = scx_cpu_node_if_enabled(cpu);
553 		if (!(current->flags & PF_EXITING) &&
554 		    cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
555 		    (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
556 		    !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
557 			if (cpumask_test_cpu(cpu, allowed))
558 				goto out_unlock;
559 		}
560 	}
561 
562 	/*
563 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
564 	 * partially idle @prev_cpu.
565 	 */
566 	if (sched_smt_active()) {
567 		/*
568 		 * Keep using @prev_cpu if it's part of a fully idle core.
569 		 */
570 		if (is_prev_allowed &&
571 		    cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
572 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
573 			cpu = prev_cpu;
574 			goto out_unlock;
575 		}
576 
577 		/*
578 		 * Search for any fully idle core in the same LLC domain.
579 		 */
580 		if (llc_cpus) {
581 			cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
582 			if (cpu >= 0)
583 				goto out_unlock;
584 		}
585 
586 		/*
587 		 * Search for any fully idle core in the same NUMA node.
588 		 */
589 		if (numa_cpus) {
590 			cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
591 			if (cpu >= 0)
592 				goto out_unlock;
593 		}
594 
595 		/*
596 		 * Search for any full-idle core usable by the task.
597 		 *
598 		 * If the node-aware idle CPU selection policy is enabled
599 		 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always
600 		 * begin in prev_cpu's node and proceed to other nodes in
601 		 * order of increasing distance.
602 		 */
603 		cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
604 		if (cpu >= 0)
605 			goto out_unlock;
606 
607 		/*
608 		 * Give up if we're strictly looking for a full-idle SMT
609 		 * core.
610 		 */
611 		if (flags & SCX_PICK_IDLE_CORE) {
612 			cpu = -EBUSY;
613 			goto out_unlock;
614 		}
615 	}
616 
617 	/*
618 	 * Use @prev_cpu if it's idle.
619 	 */
620 	if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) {
621 		cpu = prev_cpu;
622 		goto out_unlock;
623 	}
624 
625 #ifdef CONFIG_SCHED_SMT
626 	/*
627 	 * Use @prev_cpu's sibling if it's idle.
628 	 */
629 	if (sched_smt_active()) {
630 		for_each_cpu_and(cpu, cpu_smt_mask(prev_cpu), allowed) {
631 			if (cpu == prev_cpu)
632 				continue;
633 			if (scx_idle_test_and_clear_cpu(cpu))
634 				goto out_unlock;
635 		}
636 	}
637 #endif
638 
639 	/*
640 	 * Search for any idle CPU in the same LLC domain.
641 	 */
642 	if (llc_cpus) {
643 		cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
644 		if (cpu >= 0)
645 			goto out_unlock;
646 	}
647 
648 	/*
649 	 * Search for any idle CPU in the same NUMA node.
650 	 */
651 	if (numa_cpus) {
652 		cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
653 		if (cpu >= 0)
654 			goto out_unlock;
655 	}
656 
657 	/*
658 	 * Search for any idle CPU usable by the task.
659 	 *
660 	 * If the node-aware idle CPU selection policy is enabled
661 	 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin
662 	 * in prev_cpu's node and proceed to other nodes in order of
663 	 * increasing distance.
664 	 */
665 	cpu = scx_pick_idle_cpu(allowed, node, flags);
666 
667 out_unlock:
668 	rcu_read_unlock();
669 out_enable:
670 	preempt_enable();
671 
672 	return cpu;
673 }
674 
675 /*
676  * Initialize global and per-node idle cpumasks.
677  */
scx_idle_init_masks(void)678 void scx_idle_init_masks(void)
679 {
680 	int i;
681 
682 	/* Allocate global idle cpumasks */
683 	BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
684 	BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL));
685 
686 	/* Allocate per-node idle cpumasks (use nr_node_ids for non-contiguous NUMA nodes) */
687 	scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, nr_node_ids);
688 	BUG_ON(!scx_idle_node_masks);
689 
690 	for_each_node(i) {
691 		scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks),
692 							 GFP_KERNEL, i);
693 		BUG_ON(!scx_idle_node_masks[i]);
694 
695 		BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i));
696 		BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i));
697 	}
698 
699 	/* Allocate local per-cpu idle cpumasks */
700 	for_each_possible_cpu(i) {
701 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i),
702 					       GFP_KERNEL, cpu_to_node(i)));
703 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i),
704 					       GFP_KERNEL, cpu_to_node(i)));
705 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i),
706 					       GFP_KERNEL, cpu_to_node(i)));
707 	}
708 }
709 
update_builtin_idle(int cpu,bool idle)710 static void update_builtin_idle(int cpu, bool idle)
711 {
712 	int node = scx_cpu_node_if_enabled(cpu);
713 	struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
714 
715 	assign_cpu(cpu, idle_cpus, idle);
716 
717 #ifdef CONFIG_SCHED_SMT
718 	if (sched_smt_active()) {
719 		const struct cpumask *smt = cpu_smt_mask(cpu);
720 		struct cpumask *idle_smts = idle_cpumask(node)->smt;
721 
722 		if (idle) {
723 			/*
724 			 * idle_smt handling is racy but that's fine as it's
725 			 * only for optimization and self-correcting.
726 			 */
727 			if (!cpumask_subset(smt, idle_cpus))
728 				return;
729 			cpumask_or(idle_smts, idle_smts, smt);
730 		} else {
731 			cpumask_andnot(idle_smts, idle_smts, smt);
732 		}
733 	}
734 #endif
735 }
736 
737 /*
738  * Update the idle state of a CPU to @idle.
739  *
740  * If @do_notify is true, ops.update_idle() is invoked to notify the scx
741  * scheduler of an actual idle state transition (idle to busy or vice
742  * versa). If @do_notify is false, only the idle state in the idle masks is
743  * refreshed without invoking ops.update_idle().
744  *
745  * This distinction is necessary, because an idle CPU can be "reserved" and
746  * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
747  * busy even if no tasks are dispatched. In this case, the CPU may return
748  * to idle without a true state transition. Refreshing the idle masks
749  * without invoking ops.update_idle() ensures accurate idle state tracking
750  * while avoiding unnecessary updates and maintaining balanced state
751  * transitions.
752  */
__scx_update_idle(struct rq * rq,bool idle,bool do_notify)753 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
754 {
755 	struct scx_sched *sch = scx_root;
756 	int cpu = cpu_of(rq);
757 
758 	lockdep_assert_rq_held(rq);
759 
760 	/*
761 	 * Update the idle masks:
762 	 * - for real idle transitions (do_notify == true)
763 	 * - for idle-to-idle transitions (indicated by the previous task
764 	 *   being the idle thread, managed by pick_task_idle())
765 	 *
766 	 * Skip updating idle masks if the previous task is not the idle
767 	 * thread, since set_next_task_idle() has already handled it when
768 	 * transitioning from a task to the idle thread (calling this
769 	 * function with do_notify == true).
770 	 *
771 	 * In this way we can avoid updating the idle masks twice,
772 	 * unnecessarily.
773 	 */
774 	if (static_branch_likely(&scx_builtin_idle_enabled))
775 		if (do_notify || is_idle_task(rq->curr))
776 			update_builtin_idle(cpu, idle);
777 
778 	/*
779 	 * Trigger ops.update_idle() only when transitioning from a task to
780 	 * the idle thread and vice versa.
781 	 *
782 	 * Idle transitions are indicated by do_notify being set to true,
783 	 * managed by put_prev_task_idle()/set_next_task_idle().
784 	 *
785 	 * This must come after builtin idle update so that BPF schedulers can
786 	 * create interlocking between ops.update_idle() and ops.enqueue() -
787 	 * either enqueue() sees the idle bit or update_idle() sees the task
788 	 * that enqueue() queued.
789 	 */
790 	if (SCX_HAS_OP(sch, update_idle) && do_notify &&
791 	    !scx_bypassing(sch, cpu_of(rq)))
792 		SCX_CALL_OP(sch, update_idle, rq, cpu_of(rq), idle);
793 }
794 
reset_idle_masks(struct sched_ext_ops * ops)795 static void reset_idle_masks(struct sched_ext_ops *ops)
796 {
797 	int node;
798 
799 	/*
800 	 * Consider all online cpus idle. Should converge to the actual state
801 	 * quickly.
802 	 */
803 	if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
804 		cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask);
805 		cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask);
806 		return;
807 	}
808 
809 	for_each_node(node) {
810 		const struct cpumask *node_mask = cpumask_of_node(node);
811 
812 		cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
813 		cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
814 	}
815 }
816 
scx_idle_enable(struct sched_ext_ops * ops)817 void scx_idle_enable(struct sched_ext_ops *ops)
818 {
819 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
820 		static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
821 	else
822 		static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
823 
824 	if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
825 		static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
826 	else
827 		static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
828 
829 	reset_idle_masks(ops);
830 }
831 
scx_idle_disable(void)832 void scx_idle_disable(void)
833 {
834 	static_branch_disable(&scx_builtin_idle_enabled);
835 	static_branch_disable(&scx_builtin_idle_per_node);
836 }
837 
838 /********************************************************************************
839  * Helpers that can be called from the BPF scheduler.
840  */
841 
validate_node(struct scx_sched * sch,int node)842 static int validate_node(struct scx_sched *sch, int node)
843 {
844 	if (!static_branch_likely(&scx_builtin_idle_per_node)) {
845 		scx_error(sch, "per-node idle tracking is disabled");
846 		return -EOPNOTSUPP;
847 	}
848 
849 	/* Return no entry for NUMA_NO_NODE (not a critical scx error) */
850 	if (node == NUMA_NO_NODE)
851 		return -ENOENT;
852 
853 	/* Make sure node is in a valid range */
854 	if (node < 0 || node >= nr_node_ids) {
855 		scx_error(sch, "invalid node %d", node);
856 		return -EINVAL;
857 	}
858 
859 	/* Make sure the node is part of the set of possible nodes */
860 	if (!node_possible(node)) {
861 		scx_error(sch, "unavailable node %d", node);
862 		return -EINVAL;
863 	}
864 
865 	return node;
866 }
867 
868 __bpf_kfunc_start_defs();
869 
check_builtin_idle_enabled(struct scx_sched * sch)870 static bool check_builtin_idle_enabled(struct scx_sched *sch)
871 {
872 	if (static_branch_likely(&scx_builtin_idle_enabled))
873 		return true;
874 
875 	scx_error(sch, "built-in idle tracking is disabled");
876 	return false;
877 }
878 
879 /*
880  * Determine whether @p is a migration-disabled task in the context of BPF
881  * code.
882  *
883  * We can't simply check whether @p->migration_disabled is set in a
884  * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable
885  * migration for the current task while running BPF code.
886  *
887  * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU
888  * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for
889  * the current task is ambiguous only in that case: it could be from the BPF
890  * prolog rather than a real migrate_disable() call.
891  *
892  * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(),
893  * so migration_disabled == 1 always means the task is truly
894  * migration-disabled.
895  *
896  * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled,
897  * check whether @p is the current task or not: if it is, then migration was
898  * not disabled before entering the callback, otherwise migration was disabled.
899  *
900  * Returns true if @p is migration-disabled, false otherwise.
901  */
is_bpf_migration_disabled(const struct task_struct * p)902 static bool is_bpf_migration_disabled(const struct task_struct *p)
903 {
904 	if (p->migration_disabled == 1) {
905 		if (IS_ENABLED(CONFIG_PREEMPT_RCU))
906 			return p != current;
907 		return true;
908 	}
909 	return p->migration_disabled;
910 }
911 
select_cpu_from_kfunc(struct scx_sched * sch,struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * allowed,u64 flags)912 static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,
913 				 s32 prev_cpu, u64 wake_flags,
914 				 const struct cpumask *allowed, u64 flags)
915 {
916 	unsigned long irq_flags;
917 	bool we_locked = false;
918 	s32 cpu;
919 
920 	if (!ops_cpu_valid(sch, prev_cpu, NULL))
921 		return -EINVAL;
922 
923 	if (!check_builtin_idle_enabled(sch))
924 		return -EBUSY;
925 
926 	/*
927 	 * Accessing p->cpus_ptr / p->nr_cpus_allowed needs either @p's rq
928 	 * lock or @p's pi_lock. Three cases:
929 	 *
930 	 *  - inside ops.select_cpu(): try_to_wake_up() holds @p's pi_lock.
931 	 *  - other rq-locked SCX op: scx_locked_rq() points at the held rq.
932 	 *  - truly unlocked (UNLOCKED ops, SYSCALL, non-SCX struct_ops):
933 	 *    nothing held, take pi_lock ourselves.
934 	 */
935 	if (this_rq()->scx.in_select_cpu) {
936 		lockdep_assert_held(&p->pi_lock);
937 	} else if (!scx_locked_rq()) {
938 		raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
939 		we_locked = true;
940 	}
941 
942 	/*
943 	 * This may also be called from ops.enqueue(), so we need to handle
944 	 * per-CPU tasks as well. For these tasks, we can skip all idle CPU
945 	 * selection optimizations and simply check whether the previously
946 	 * used CPU is idle and within the allowed cpumask.
947 	 */
948 	if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) {
949 		if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
950 		    scx_idle_test_and_clear_cpu(prev_cpu))
951 			cpu = prev_cpu;
952 		else
953 			cpu = -EBUSY;
954 	} else {
955 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
956 					 allowed ?: p->cpus_ptr, flags);
957 	}
958 
959 	if (we_locked)
960 		raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
961 
962 	return cpu;
963 }
964 
965 /**
966  * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
967  *		      trigger an error if @cpu is invalid
968  * @cpu: target CPU
969  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
970  */
scx_bpf_cpu_node(s32 cpu,const struct bpf_prog_aux * aux)971 __bpf_kfunc s32 scx_bpf_cpu_node(s32 cpu, const struct bpf_prog_aux *aux)
972 {
973 	struct scx_sched *sch;
974 
975 	guard(rcu)();
976 
977 	sch = scx_prog_sched(aux);
978 	if (unlikely(!sch) || !ops_cpu_valid(sch, cpu, NULL))
979 		return NUMA_NO_NODE;
980 	return cpu_to_node(cpu);
981 }
982 
983 /**
984  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
985  * @p: task_struct to select a CPU for
986  * @prev_cpu: CPU @p was on previously
987  * @wake_flags: %SCX_WAKE_* flags
988  * @is_idle: out parameter indicating whether the returned CPU is idle
989  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
990  *
991  * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
992  * context such as a BPF test_run() call, as long as built-in CPU selection
993  * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
994  * is set.
995  *
996  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
997  * currently idle and thus a good candidate for direct dispatching.
998  */
scx_bpf_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * is_idle,const struct bpf_prog_aux * aux)999 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
1000 				       u64 wake_flags, bool *is_idle,
1001 				       const struct bpf_prog_aux *aux)
1002 {
1003 	struct scx_sched *sch;
1004 	s32 cpu;
1005 
1006 	guard(rcu)();
1007 
1008 	sch = scx_prog_sched(aux);
1009 	if (unlikely(!sch))
1010 		return -ENODEV;
1011 
1012 	cpu = select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, NULL, 0);
1013 	if (cpu >= 0) {
1014 		*is_idle = true;
1015 		return cpu;
1016 	}
1017 	*is_idle = false;
1018 	return prev_cpu;
1019 }
1020 
1021 struct scx_bpf_select_cpu_and_args {
1022 	/* @p and @cpus_allowed can't be packed together as KF_RCU is not transitive */
1023 	s32			prev_cpu;
1024 	u64			wake_flags;
1025 	u64			flags;
1026 };
1027 
1028 /**
1029  * __scx_bpf_select_cpu_and - Arg-wrapped CPU selection with cpumask
1030  * @p: task_struct to select a CPU for
1031  * @cpus_allowed: cpumask of allowed CPUs
1032  * @args: struct containing the rest of the arguments
1033  *       @args->prev_cpu: CPU @p was on previously
1034  *       @args->wake_flags: %SCX_WAKE_* flags
1035  *       @args->flags: %SCX_PICK_IDLE* flags
1036  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1037  *
1038  * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
1039  * limit. BPF programs should use scx_bpf_select_cpu_and() which is provided
1040  * as an inline wrapper in common.bpf.h.
1041  *
1042  * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
1043  * context such as a BPF test_run() call, as long as built-in CPU selection
1044  * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
1045  * is set.
1046  *
1047  * @p, @args->prev_cpu and @args->wake_flags match ops.select_cpu().
1048  *
1049  * Returns the selected idle CPU, which will be automatically awakened upon
1050  * returning from ops.select_cpu() and can be used for direct dispatch, or
1051  * a negative value if no idle CPU is available.
1052  */
1053 __bpf_kfunc s32
__scx_bpf_select_cpu_and(struct task_struct * p,const struct cpumask * cpus_allowed,struct scx_bpf_select_cpu_and_args * args,const struct bpf_prog_aux * aux)1054 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
1055 			 struct scx_bpf_select_cpu_and_args *args,
1056 			 const struct bpf_prog_aux *aux)
1057 {
1058 	struct scx_sched *sch;
1059 
1060 	guard(rcu)();
1061 
1062 	sch = scx_prog_sched(aux);
1063 	if (unlikely(!sch))
1064 		return -ENODEV;
1065 
1066 	return select_cpu_from_kfunc(sch, p, args->prev_cpu, args->wake_flags,
1067 				     cpus_allowed, args->flags);
1068 }
1069 
1070 /*
1071  * COMPAT: Will be removed in v6.22.
1072  */
scx_bpf_select_cpu_and(struct task_struct * p,s32 prev_cpu,u64 wake_flags,const struct cpumask * cpus_allowed,u64 flags)1073 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
1074 				       const struct cpumask *cpus_allowed, u64 flags)
1075 {
1076 	struct scx_sched *sch;
1077 
1078 	guard(rcu)();
1079 
1080 	sch = rcu_dereference(scx_root);
1081 	if (unlikely(!sch))
1082 		return -ENODEV;
1083 
1084 #ifdef CONFIG_EXT_SUB_SCHED
1085 	/*
1086 	 * Disallow if any sub-scheds are attached. There is no way to tell
1087 	 * which scheduler called us, just error out @p's scheduler.
1088 	 */
1089 	if (unlikely(!list_empty(&sch->children))) {
1090 		scx_error(scx_task_sched(p), "__scx_bpf_select_cpu_and() must be used");
1091 		return -EINVAL;
1092 	}
1093 #endif
1094 
1095 	return select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags,
1096 				     cpus_allowed, flags);
1097 }
1098 
1099 /**
1100  * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the
1101  * idle-tracking per-CPU cpumask of a target NUMA node.
1102  * @node: target NUMA node
1103  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1104  *
1105  * Returns an empty cpumask if idle tracking is not enabled, if @node is
1106  * not valid, or running on a UP kernel. In this case the actual error will
1107  * be reported to the BPF scheduler via scx_error().
1108  */
1109 __bpf_kfunc const struct cpumask *
scx_bpf_get_idle_cpumask_node(s32 node,const struct bpf_prog_aux * aux)1110 scx_bpf_get_idle_cpumask_node(s32 node, const struct bpf_prog_aux *aux)
1111 {
1112 	struct scx_sched *sch;
1113 
1114 	guard(rcu)();
1115 
1116 	sch = scx_prog_sched(aux);
1117 	if (unlikely(!sch))
1118 		return cpu_none_mask;
1119 
1120 	node = validate_node(sch, node);
1121 	if (node < 0)
1122 		return cpu_none_mask;
1123 
1124 	return idle_cpumask(node)->cpu;
1125 }
1126 
1127 /**
1128  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
1129  * per-CPU cpumask.
1130  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1131  *
1132  * Returns an empty mask if idle tracking is not enabled, or running on a
1133  * UP kernel.
1134  */
scx_bpf_get_idle_cpumask(const struct bpf_prog_aux * aux)1135 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(const struct bpf_prog_aux *aux)
1136 {
1137 	struct scx_sched *sch;
1138 
1139 	guard(rcu)();
1140 
1141 	sch = scx_prog_sched(aux);
1142 	if (unlikely(!sch))
1143 		return cpu_none_mask;
1144 
1145 	if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1146 		scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1147 		return cpu_none_mask;
1148 	}
1149 
1150 	if (!check_builtin_idle_enabled(sch))
1151 		return cpu_none_mask;
1152 
1153 	return idle_cpumask(NUMA_NO_NODE)->cpu;
1154 }
1155 
1156 /**
1157  * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the
1158  * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be
1159  * used to determine if an entire physical core is free.
1160  * @node: target NUMA node
1161  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1162  *
1163  * Returns an empty cpumask if idle tracking is not enabled, if @node is
1164  * not valid, or running on a UP kernel. In this case the actual error will
1165  * be reported to the BPF scheduler via scx_error().
1166  */
1167 __bpf_kfunc const struct cpumask *
scx_bpf_get_idle_smtmask_node(s32 node,const struct bpf_prog_aux * aux)1168 scx_bpf_get_idle_smtmask_node(s32 node, const struct bpf_prog_aux *aux)
1169 {
1170 	struct scx_sched *sch;
1171 
1172 	guard(rcu)();
1173 
1174 	sch = scx_prog_sched(aux);
1175 	if (unlikely(!sch))
1176 		return cpu_none_mask;
1177 
1178 	node = validate_node(sch, node);
1179 	if (node < 0)
1180 		return cpu_none_mask;
1181 
1182 	if (sched_smt_active())
1183 		return idle_cpumask(node)->smt;
1184 	else
1185 		return idle_cpumask(node)->cpu;
1186 }
1187 
1188 /**
1189  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
1190  * per-physical-core cpumask. Can be used to determine if an entire physical
1191  * core is free.
1192  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1193  *
1194  * Returns an empty mask if idle tracking is not enabled, or running on a
1195  * UP kernel.
1196  */
scx_bpf_get_idle_smtmask(const struct bpf_prog_aux * aux)1197 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(const struct bpf_prog_aux *aux)
1198 {
1199 	struct scx_sched *sch;
1200 
1201 	guard(rcu)();
1202 
1203 	sch = scx_prog_sched(aux);
1204 	if (unlikely(!sch))
1205 		return cpu_none_mask;
1206 
1207 	if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1208 		scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1209 		return cpu_none_mask;
1210 	}
1211 
1212 	if (!check_builtin_idle_enabled(sch))
1213 		return cpu_none_mask;
1214 
1215 	if (sched_smt_active())
1216 		return idle_cpumask(NUMA_NO_NODE)->smt;
1217 	else
1218 		return idle_cpumask(NUMA_NO_NODE)->cpu;
1219 }
1220 
1221 /**
1222  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
1223  * either the percpu, or SMT idle-tracking cpumask.
1224  * @idle_mask: &cpumask to use
1225  */
scx_bpf_put_idle_cpumask(const struct cpumask * idle_mask)1226 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
1227 {
1228 	/*
1229 	 * Empty function body because we aren't actually acquiring or releasing
1230 	 * a reference to a global idle cpumask, which is read-only in the
1231 	 * caller and is never released. The acquire / release semantics here
1232 	 * are just used to make the cpumask a trusted pointer in the caller.
1233 	 */
1234 }
1235 
1236 /**
1237  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
1238  * @cpu: cpu to test and clear idle for
1239  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1240  *
1241  * Returns %true if @cpu was idle and its idle state was successfully cleared.
1242  * %false otherwise.
1243  *
1244  * Unavailable if ops.update_idle() is implemented and
1245  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1246  */
scx_bpf_test_and_clear_cpu_idle(s32 cpu,const struct bpf_prog_aux * aux)1247 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu, const struct bpf_prog_aux *aux)
1248 {
1249 	struct scx_sched *sch;
1250 
1251 	guard(rcu)();
1252 
1253 	sch = scx_prog_sched(aux);
1254 	if (unlikely(!sch))
1255 		return false;
1256 
1257 	if (!check_builtin_idle_enabled(sch))
1258 		return false;
1259 
1260 	if (!ops_cpu_valid(sch, cpu, NULL))
1261 		return false;
1262 
1263 	return scx_idle_test_and_clear_cpu(cpu);
1264 }
1265 
1266 /**
1267  * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node
1268  * @cpus_allowed: Allowed cpumask
1269  * @node: target NUMA node
1270  * @flags: %SCX_PICK_IDLE_* flags
1271  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1272  *
1273  * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
1274  *
1275  * Returns the picked idle cpu number on success, or -%EBUSY if no matching
1276  * cpu was found.
1277  *
1278  * The search starts from @node and proceeds to other online NUMA nodes in
1279  * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified,
1280  * in which case the search is limited to the target @node).
1281  *
1282  * Always returns an error if ops.update_idle() is implemented and
1283  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if
1284  * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set.
1285  */
scx_bpf_pick_idle_cpu_node(const struct cpumask * cpus_allowed,s32 node,u64 flags,const struct bpf_prog_aux * aux)1286 __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
1287 					   s32 node, u64 flags,
1288 					   const struct bpf_prog_aux *aux)
1289 {
1290 	struct scx_sched *sch;
1291 
1292 	guard(rcu)();
1293 
1294 	sch = scx_prog_sched(aux);
1295 	if (unlikely(!sch))
1296 		return -ENODEV;
1297 
1298 	node = validate_node(sch, node);
1299 	if (node < 0)
1300 		return node;
1301 
1302 	return scx_pick_idle_cpu(cpus_allowed, node, flags);
1303 }
1304 
1305 /**
1306  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
1307  * @cpus_allowed: Allowed cpumask
1308  * @flags: %SCX_PICK_IDLE_CPU_* flags
1309  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1310  *
1311  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
1312  * number on success. -%EBUSY if no matching cpu was found.
1313  *
1314  * Idle CPU tracking may race against CPU scheduling state transitions. For
1315  * example, this function may return -%EBUSY as CPUs are transitioning into the
1316  * idle state. If the caller then assumes that there will be dispatch events on
1317  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
1318  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
1319  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
1320  * event in the near future.
1321  *
1322  * Unavailable if ops.update_idle() is implemented and
1323  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1324  *
1325  * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1326  * scx_bpf_pick_idle_cpu_node() instead.
1327  */
scx_bpf_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags,const struct bpf_prog_aux * aux)1328 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
1329 				      u64 flags, const struct bpf_prog_aux *aux)
1330 {
1331 	struct scx_sched *sch;
1332 
1333 	guard(rcu)();
1334 
1335 	sch = scx_prog_sched(aux);
1336 	if (unlikely(!sch))
1337 		return -ENODEV;
1338 
1339 	if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1340 		scx_error(sch, "per-node idle tracking is enabled");
1341 		return -EBUSY;
1342 	}
1343 
1344 	if (!check_builtin_idle_enabled(sch))
1345 		return -EBUSY;
1346 
1347 	return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1348 }
1349 
1350 /**
1351  * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available
1352  *			       or pick any CPU from @node
1353  * @cpus_allowed: Allowed cpumask
1354  * @node: target NUMA node
1355  * @flags: %SCX_PICK_IDLE_CPU_* flags
1356  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1357  *
1358  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1359  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1360  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1361  * empty.
1362  *
1363  * The search starts from @node and proceeds to other online NUMA nodes in
1364  * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified,
1365  * in which case the search is limited to the target @node, regardless of
1366  * the CPU idle state).
1367  *
1368  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1369  * set, this function can't tell which CPUs are idle and will always pick any
1370  * CPU.
1371  */
scx_bpf_pick_any_cpu_node(const struct cpumask * cpus_allowed,s32 node,u64 flags,const struct bpf_prog_aux * aux)1372 __bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed,
1373 					  s32 node, u64 flags,
1374 					  const struct bpf_prog_aux *aux)
1375 {
1376 	struct scx_sched *sch;
1377 	s32 cpu;
1378 
1379 	guard(rcu)();
1380 
1381 	sch = scx_prog_sched(aux);
1382 	if (unlikely(!sch))
1383 		return -ENODEV;
1384 
1385 	node = validate_node(sch, node);
1386 	if (node < 0)
1387 		return node;
1388 
1389 	cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
1390 	if (cpu >= 0)
1391 		return cpu;
1392 
1393 	if (flags & SCX_PICK_IDLE_IN_NODE)
1394 		cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
1395 	else
1396 		cpu = cpumask_any_distribute(cpus_allowed);
1397 	if (cpu < nr_cpu_ids)
1398 		return cpu;
1399 	else
1400 		return -EBUSY;
1401 }
1402 
1403 /**
1404  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
1405  * @cpus_allowed: Allowed cpumask
1406  * @flags: %SCX_PICK_IDLE_CPU_* flags
1407  * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
1408  *
1409  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1410  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1411  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1412  * empty.
1413  *
1414  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1415  * set, this function can't tell which CPUs are idle and will always pick any
1416  * CPU.
1417  *
1418  * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1419  * scx_bpf_pick_any_cpu_node() instead.
1420  */
scx_bpf_pick_any_cpu(const struct cpumask * cpus_allowed,u64 flags,const struct bpf_prog_aux * aux)1421 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
1422 				     u64 flags, const struct bpf_prog_aux *aux)
1423 {
1424 	struct scx_sched *sch;
1425 	s32 cpu;
1426 
1427 	guard(rcu)();
1428 
1429 	sch = scx_prog_sched(aux);
1430 	if (unlikely(!sch))
1431 		return -ENODEV;
1432 
1433 	if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1434 		scx_error(sch, "per-node idle tracking is enabled");
1435 		return -EBUSY;
1436 	}
1437 
1438 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
1439 		cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1440 		if (cpu >= 0)
1441 			return cpu;
1442 	}
1443 
1444 	cpu = cpumask_any_distribute(cpus_allowed);
1445 	if (cpu < nr_cpu_ids)
1446 		return cpu;
1447 	else
1448 		return -EBUSY;
1449 }
1450 
1451 __bpf_kfunc_end_defs();
1452 
1453 BTF_KFUNCS_START(scx_kfunc_ids_idle)
1454 BTF_ID_FLAGS(func, scx_bpf_cpu_node, KF_IMPLICIT_ARGS)
1455 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1456 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1457 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1458 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_IMPLICIT_ARGS | KF_ACQUIRE)
1459 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
1460 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle, KF_IMPLICIT_ARGS)
1461 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_IMPLICIT_ARGS | KF_RCU)
1462 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_IMPLICIT_ARGS | KF_RCU)
1463 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_IMPLICIT_ARGS | KF_RCU)
1464 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_IMPLICIT_ARGS | KF_RCU)
1465 BTF_KFUNCS_END(scx_kfunc_ids_idle)
1466 
1467 static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
1468 	.owner			= THIS_MODULE,
1469 	.set			= &scx_kfunc_ids_idle,
1470 };
1471 
1472 /*
1473  * The select_cpu kfuncs internally call task_rq_lock() when invoked from an
1474  * rq-unlocked context, and thus cannot be safely called from arbitrary tracing
1475  * contexts where @p's pi_lock state is unknown. Keep them out of
1476  * BPF_PROG_TYPE_TRACING by registering them in their own set which is exposed
1477  * only to STRUCT_OPS and SYSCALL programs.
1478  *
1479  * These kfuncs are also members of scx_kfunc_ids_unlocked (see ext.c) because
1480  * they're callable from unlocked contexts in addition to ops.select_cpu() and
1481  * ops.enqueue().
1482  */
1483 BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
1484 BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_IMPLICIT_ARGS | KF_RCU)
1485 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
1486 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_IMPLICIT_ARGS | KF_RCU)
1487 BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
1488 
1489 static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
1490 	.owner			= THIS_MODULE,
1491 	.set			= &scx_kfunc_ids_select_cpu,
1492 	.filter			= scx_kfunc_context_filter,
1493 };
1494 
scx_idle_init(void)1495 int scx_idle_init(void)
1496 {
1497 	int ret;
1498 
1499 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
1500 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
1501 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle) ||
1502 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) ||
1503 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_select_cpu);
1504 
1505 	return ret;
1506 }
1507