1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Built-in idle CPU tracking policy.
6  *
7  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
8  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
9  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
10  * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com>
11  */
12 #include "ext_idle.h"
13 
14 /* Enable/disable built-in idle CPU selection policy */
15 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
16 
17 /* Enable/disable per-node idle cpumasks */
18 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
19 
20 #ifdef CONFIG_SMP
21 /* Enable/disable LLC aware optimizations */
22 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
23 
24 /* Enable/disable NUMA aware optimizations */
25 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
26 
27 /*
28  * cpumasks to track idle CPUs within each NUMA node.
29  *
30  * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask
31  * from is used to track all the idle CPUs in the system.
32  */
33 struct scx_idle_cpus {
34 	cpumask_var_t cpu;
35 	cpumask_var_t smt;
36 };
37 
38 /*
39  * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE
40  * is not enabled).
41  */
42 static struct scx_idle_cpus scx_idle_global_masks;
43 
44 /*
45  * Per-node idle cpumasks.
46  */
47 static struct scx_idle_cpus **scx_idle_node_masks;
48 
49 /*
50  * Local per-CPU cpumasks (used to generate temporary idle cpumasks).
51  */
52 static DEFINE_PER_CPU(cpumask_var_t, local_idle_cpumask);
53 static DEFINE_PER_CPU(cpumask_var_t, local_llc_idle_cpumask);
54 static DEFINE_PER_CPU(cpumask_var_t, local_numa_idle_cpumask);
55 
56 /*
57  * Return the idle masks associated to a target @node.
58  *
59  * NUMA_NO_NODE identifies the global idle cpumask.
60  */
61 static struct scx_idle_cpus *idle_cpumask(int node)
62 {
63 	return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node];
64 }
65 
66 /*
67  * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if
68  * per-node idle cpumasks are disabled.
69  */
70 static int scx_cpu_node_if_enabled(int cpu)
71 {
72 	if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
73 		return NUMA_NO_NODE;
74 
75 	return cpu_to_node(cpu);
76 }
77 
78 bool scx_idle_test_and_clear_cpu(int cpu)
79 {
80 	int node = scx_cpu_node_if_enabled(cpu);
81 	struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
82 
83 #ifdef CONFIG_SCHED_SMT
84 	/*
85 	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
86 	 * cluster is not wholly idle either way. This also prevents
87 	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
88 	 */
89 	if (sched_smt_active()) {
90 		const struct cpumask *smt = cpu_smt_mask(cpu);
91 		struct cpumask *idle_smts = idle_cpumask(node)->smt;
92 
93 		/*
94 		 * If offline, @cpu is not its own sibling and
95 		 * scx_pick_idle_cpu() can get caught in an infinite loop as
96 		 * @cpu is never cleared from the idle SMT mask. Ensure that
97 		 * @cpu is eventually cleared.
98 		 *
99 		 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
100 		 * reduce memory writes, which may help alleviate cache
101 		 * coherence pressure.
102 		 */
103 		if (cpumask_intersects(smt, idle_smts))
104 			cpumask_andnot(idle_smts, idle_smts, smt);
105 		else if (cpumask_test_cpu(cpu, idle_smts))
106 			__cpumask_clear_cpu(cpu, idle_smts);
107 	}
108 #endif
109 
110 	return cpumask_test_and_clear_cpu(cpu, idle_cpus);
111 }
112 
113 /*
114  * Pick an idle CPU in a specific NUMA node.
115  */
116 static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags)
117 {
118 	int cpu;
119 
120 retry:
121 	if (sched_smt_active()) {
122 		cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
123 		if (cpu < nr_cpu_ids)
124 			goto found;
125 
126 		if (flags & SCX_PICK_IDLE_CORE)
127 			return -EBUSY;
128 	}
129 
130 	cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
131 	if (cpu >= nr_cpu_ids)
132 		return -EBUSY;
133 
134 found:
135 	if (scx_idle_test_and_clear_cpu(cpu))
136 		return cpu;
137 	else
138 		goto retry;
139 }
140 
141 #ifdef CONFIG_NUMA
142 /*
143  * Tracks nodes that have not yet been visited when searching for an idle
144  * CPU across all available nodes.
145  */
146 static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited);
147 
148 /*
149  * Search for an idle CPU across all nodes, excluding @node.
150  */
151 static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
152 {
153 	nodemask_t *unvisited;
154 	s32 cpu = -EBUSY;
155 
156 	preempt_disable();
157 	unvisited = this_cpu_ptr(&per_cpu_unvisited);
158 
159 	/*
160 	 * Restrict the search to the online nodes (excluding the current
161 	 * node that has been visited already).
162 	 */
163 	nodes_copy(*unvisited, node_states[N_ONLINE]);
164 	node_clear(node, *unvisited);
165 
166 	/*
167 	 * Traverse all nodes in order of increasing distance, starting
168 	 * from @node.
169 	 *
170 	 * This loop is O(N^2), with N being the amount of NUMA nodes,
171 	 * which might be quite expensive in large NUMA systems. However,
172 	 * this complexity comes into play only when a scheduler enables
173 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU
174 	 * without specifying a target NUMA node, so it shouldn't be a
175 	 * bottleneck is most cases.
176 	 *
177 	 * As a future optimization we may want to cache the list of nodes
178 	 * in a per-node array, instead of actually traversing them every
179 	 * time.
180 	 */
181 	for_each_node_numadist(node, *unvisited) {
182 		cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
183 		if (cpu >= 0)
184 			break;
185 	}
186 	preempt_enable();
187 
188 	return cpu;
189 }
190 #else
191 static inline s32
192 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
193 {
194 	return -EBUSY;
195 }
196 #endif
197 
198 /*
199  * Find an idle CPU in the system, starting from @node.
200  */
201 s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
202 {
203 	s32 cpu;
204 
205 	/*
206 	 * Always search in the starting node first (this is an
207 	 * optimization that can save some cycles even when the search is
208 	 * not limited to a single node).
209 	 */
210 	cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
211 	if (cpu >= 0)
212 		return cpu;
213 
214 	/*
215 	 * Stop the search if we are using only a single global cpumask
216 	 * (NUMA_NO_NODE) or if the search is restricted to the first node
217 	 * only.
218 	 */
219 	if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE)
220 		return -EBUSY;
221 
222 	/*
223 	 * Extend the search to the other online nodes.
224 	 */
225 	return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags);
226 }
227 
228 /*
229  * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
230  * domain is not defined).
231  */
232 static unsigned int llc_weight(s32 cpu)
233 {
234 	struct sched_domain *sd;
235 
236 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
237 	if (!sd)
238 		return 0;
239 
240 	return sd->span_weight;
241 }
242 
243 /*
244  * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
245  * domain is not defined).
246  */
247 static struct cpumask *llc_span(s32 cpu)
248 {
249 	struct sched_domain *sd;
250 
251 	sd = rcu_dereference(per_cpu(sd_llc, cpu));
252 	if (!sd)
253 		return 0;
254 
255 	return sched_domain_span(sd);
256 }
257 
258 /*
259  * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
260  * NUMA domain is not defined).
261  */
262 static unsigned int numa_weight(s32 cpu)
263 {
264 	struct sched_domain *sd;
265 	struct sched_group *sg;
266 
267 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
268 	if (!sd)
269 		return 0;
270 	sg = sd->groups;
271 	if (!sg)
272 		return 0;
273 
274 	return sg->group_weight;
275 }
276 
277 /*
278  * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
279  * domain is not defined).
280  */
281 static struct cpumask *numa_span(s32 cpu)
282 {
283 	struct sched_domain *sd;
284 	struct sched_group *sg;
285 
286 	sd = rcu_dereference(per_cpu(sd_numa, cpu));
287 	if (!sd)
288 		return NULL;
289 	sg = sd->groups;
290 	if (!sg)
291 		return NULL;
292 
293 	return sched_group_span(sg);
294 }
295 
296 /*
297  * Return true if the LLC domains do not perfectly overlap with the NUMA
298  * domains, false otherwise.
299  */
300 static bool llc_numa_mismatch(void)
301 {
302 	int cpu;
303 
304 	/*
305 	 * We need to scan all online CPUs to verify whether their scheduling
306 	 * domains overlap.
307 	 *
308 	 * While it is rare to encounter architectures with asymmetric NUMA
309 	 * topologies, CPU hotplugging or virtualized environments can result
310 	 * in asymmetric configurations.
311 	 *
312 	 * For example:
313 	 *
314 	 *  NUMA 0:
315 	 *    - LLC 0: cpu0..cpu7
316 	 *    - LLC 1: cpu8..cpu15 [offline]
317 	 *
318 	 *  NUMA 1:
319 	 *    - LLC 0: cpu16..cpu23
320 	 *    - LLC 1: cpu24..cpu31
321 	 *
322 	 * In this case, if we only check the first online CPU (cpu0), we might
323 	 * incorrectly assume that the LLC and NUMA domains are fully
324 	 * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
325 	 * domains).
326 	 */
327 	for_each_online_cpu(cpu)
328 		if (llc_weight(cpu) != numa_weight(cpu))
329 			return true;
330 
331 	return false;
332 }
333 
334 /*
335  * Initialize topology-aware scheduling.
336  *
337  * Detect if the system has multiple LLC or multiple NUMA domains and enable
338  * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
339  * selection policy.
340  *
341  * Assumption: the kernel's internal topology representation assumes that each
342  * CPU belongs to a single LLC domain, and that each LLC domain is entirely
343  * contained within a single NUMA node.
344  */
345 void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
346 {
347 	bool enable_llc = false, enable_numa = false;
348 	unsigned int nr_cpus;
349 	s32 cpu = cpumask_first(cpu_online_mask);
350 
351 	/*
352 	 * Enable LLC domain optimization only when there are multiple LLC
353 	 * domains among the online CPUs. If all online CPUs are part of a
354 	 * single LLC domain, the idle CPU selection logic can choose any
355 	 * online CPU without bias.
356 	 *
357 	 * Note that it is sufficient to check the LLC domain of the first
358 	 * online CPU to determine whether a single LLC domain includes all
359 	 * CPUs.
360 	 */
361 	rcu_read_lock();
362 	nr_cpus = llc_weight(cpu);
363 	if (nr_cpus > 0) {
364 		if (nr_cpus < num_online_cpus())
365 			enable_llc = true;
366 		pr_debug("sched_ext: LLC=%*pb weight=%u\n",
367 			 cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
368 	}
369 
370 	/*
371 	 * Enable NUMA optimization only when there are multiple NUMA domains
372 	 * among the online CPUs and the NUMA domains don't perfectly overlaps
373 	 * with the LLC domains.
374 	 *
375 	 * If all CPUs belong to the same NUMA node and the same LLC domain,
376 	 * enabling both NUMA and LLC optimizations is unnecessary, as checking
377 	 * for an idle CPU in the same domain twice is redundant.
378 	 *
379 	 * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
380 	 * optimization, as we would naturally select idle CPUs within
381 	 * specific NUMA nodes querying the corresponding per-node cpumask.
382 	 */
383 	if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
384 		nr_cpus = numa_weight(cpu);
385 		if (nr_cpus > 0) {
386 			if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
387 				enable_numa = true;
388 			pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
389 				 cpumask_pr_args(numa_span(cpu)), nr_cpus);
390 		}
391 	}
392 	rcu_read_unlock();
393 
394 	pr_debug("sched_ext: LLC idle selection %s\n",
395 		 str_enabled_disabled(enable_llc));
396 	pr_debug("sched_ext: NUMA idle selection %s\n",
397 		 str_enabled_disabled(enable_numa));
398 
399 	if (enable_llc)
400 		static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
401 	else
402 		static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
403 	if (enable_numa)
404 		static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
405 	else
406 		static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
407 }
408 
409 /*
410  * Return true if @p can run on all possible CPUs, false otherwise.
411  */
412 static inline bool task_affinity_all(const struct task_struct *p)
413 {
414 	return p->nr_cpus_allowed >= num_possible_cpus();
415 }
416 
417 /*
418  * Built-in CPU idle selection policy:
419  *
420  * 1. Prioritize full-idle cores:
421  *   - always prioritize CPUs from fully idle cores (both logical CPUs are
422  *     idle) to avoid interference caused by SMT.
423  *
424  * 2. Reuse the same CPU:
425  *   - prefer the last used CPU to take advantage of cached data (L1, L2) and
426  *     branch prediction optimizations.
427  *
428  * 3. Pick a CPU within the same LLC (Last-Level Cache):
429  *   - if the above conditions aren't met, pick a CPU that shares the same
430  *     LLC, if the LLC domain is a subset of @cpus_allowed, to maintain
431  *     cache locality.
432  *
433  * 4. Pick a CPU within the same NUMA node, if enabled:
434  *   - choose a CPU from the same NUMA node, if the node cpumask is a
435  *     subset of @cpus_allowed, to reduce memory access latency.
436  *
437  * 5. Pick any idle CPU within the @cpus_allowed domain.
438  *
439  * Step 3 and 4 are performed only if the system has, respectively,
440  * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
441  * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
442  *
443  * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
444  * begin in @prev_cpu's node and proceed to other nodes in order of
445  * increasing distance.
446  *
447  * Return the picked CPU if idle, or a negative value otherwise.
448  *
449  * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
450  * we never call ops.select_cpu() for them, see select_task_rq().
451  */
452 s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
453 		       const struct cpumask *cpus_allowed, u64 flags)
454 {
455 	const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL;
456 	const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr;
457 	int node = scx_cpu_node_if_enabled(prev_cpu);
458 	bool is_prev_allowed;
459 	s32 cpu;
460 
461 	preempt_disable();
462 
463 	/*
464 	 * Check whether @prev_cpu is still within the allowed set. If not,
465 	 * we can still try selecting a nearby CPU.
466 	 */
467 	is_prev_allowed = cpumask_test_cpu(prev_cpu, allowed);
468 
469 	/*
470 	 * Determine the subset of CPUs usable by @p within @cpus_allowed.
471 	 */
472 	if (allowed != p->cpus_ptr) {
473 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_idle_cpumask);
474 
475 		if (task_affinity_all(p)) {
476 			allowed = cpus_allowed;
477 		} else if (cpumask_and(local_cpus, cpus_allowed, p->cpus_ptr)) {
478 			allowed = local_cpus;
479 		} else {
480 			cpu = -EBUSY;
481 			goto out_enable;
482 		}
483 	}
484 
485 	/*
486 	 * This is necessary to protect llc_cpus.
487 	 */
488 	rcu_read_lock();
489 
490 	/*
491 	 * Determine the subset of CPUs that the task can use in its
492 	 * current LLC and node.
493 	 *
494 	 * If the task can run on all CPUs, use the node and LLC cpumasks
495 	 * directly.
496 	 */
497 	if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) {
498 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask);
499 		const struct cpumask *cpus = numa_span(prev_cpu);
500 
501 		if (allowed == p->cpus_ptr && task_affinity_all(p))
502 			numa_cpus = cpus;
503 		else if (cpus && cpumask_and(local_cpus, allowed, cpus))
504 			numa_cpus = local_cpus;
505 	}
506 
507 	if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
508 		struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask);
509 		const struct cpumask *cpus = llc_span(prev_cpu);
510 
511 		if (allowed == p->cpus_ptr && task_affinity_all(p))
512 			llc_cpus = cpus;
513 		else if (cpus && cpumask_and(local_cpus, allowed, cpus))
514 			llc_cpus = local_cpus;
515 	}
516 
517 	/*
518 	 * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
519 	 */
520 	if (wake_flags & SCX_WAKE_SYNC) {
521 		int waker_node;
522 
523 		/*
524 		 * If the waker's CPU is cache affine and prev_cpu is idle,
525 		 * then avoid a migration.
526 		 */
527 		cpu = smp_processor_id();
528 		if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
529 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
530 			cpu = prev_cpu;
531 			goto out_unlock;
532 		}
533 
534 		/*
535 		 * If the waker's local DSQ is empty, and the system is under
536 		 * utilized, try to wake up @p to the local DSQ of the waker.
537 		 *
538 		 * Checking only for an empty local DSQ is insufficient as it
539 		 * could give the wakee an unfair advantage when the system is
540 		 * oversaturated.
541 		 *
542 		 * Checking only for the presence of idle CPUs is also
543 		 * insufficient as the local DSQ of the waker could have tasks
544 		 * piled up on it even if there is an idle core elsewhere on
545 		 * the system.
546 		 */
547 		waker_node = cpu_to_node(cpu);
548 		if (!(current->flags & PF_EXITING) &&
549 		    cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
550 		    (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
551 		    !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
552 			if (cpumask_test_cpu(cpu, allowed))
553 				goto out_unlock;
554 		}
555 	}
556 
557 	/*
558 	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
559 	 * partially idle @prev_cpu.
560 	 */
561 	if (sched_smt_active()) {
562 		/*
563 		 * Keep using @prev_cpu if it's part of a fully idle core.
564 		 */
565 		if (is_prev_allowed &&
566 		    cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
567 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
568 			cpu = prev_cpu;
569 			goto out_unlock;
570 		}
571 
572 		/*
573 		 * Search for any fully idle core in the same LLC domain.
574 		 */
575 		if (llc_cpus) {
576 			cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
577 			if (cpu >= 0)
578 				goto out_unlock;
579 		}
580 
581 		/*
582 		 * Search for any fully idle core in the same NUMA node.
583 		 */
584 		if (numa_cpus) {
585 			cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
586 			if (cpu >= 0)
587 				goto out_unlock;
588 		}
589 
590 		/*
591 		 * Search for any full-idle core usable by the task.
592 		 *
593 		 * If the node-aware idle CPU selection policy is enabled
594 		 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always
595 		 * begin in prev_cpu's node and proceed to other nodes in
596 		 * order of increasing distance.
597 		 */
598 		cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
599 		if (cpu >= 0)
600 			goto out_unlock;
601 
602 		/*
603 		 * Give up if we're strictly looking for a full-idle SMT
604 		 * core.
605 		 */
606 		if (flags & SCX_PICK_IDLE_CORE) {
607 			cpu = -EBUSY;
608 			goto out_unlock;
609 		}
610 	}
611 
612 	/*
613 	 * Use @prev_cpu if it's idle.
614 	 */
615 	if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) {
616 		cpu = prev_cpu;
617 		goto out_unlock;
618 	}
619 
620 	/*
621 	 * Search for any idle CPU in the same LLC domain.
622 	 */
623 	if (llc_cpus) {
624 		cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
625 		if (cpu >= 0)
626 			goto out_unlock;
627 	}
628 
629 	/*
630 	 * Search for any idle CPU in the same NUMA node.
631 	 */
632 	if (numa_cpus) {
633 		cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
634 		if (cpu >= 0)
635 			goto out_unlock;
636 	}
637 
638 	/*
639 	 * Search for any idle CPU usable by the task.
640 	 *
641 	 * If the node-aware idle CPU selection policy is enabled
642 	 * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin
643 	 * in prev_cpu's node and proceed to other nodes in order of
644 	 * increasing distance.
645 	 */
646 	cpu = scx_pick_idle_cpu(allowed, node, flags);
647 
648 out_unlock:
649 	rcu_read_unlock();
650 out_enable:
651 	preempt_enable();
652 
653 	return cpu;
654 }
655 
656 /*
657  * Initialize global and per-node idle cpumasks.
658  */
659 void scx_idle_init_masks(void)
660 {
661 	int i;
662 
663 	/* Allocate global idle cpumasks */
664 	BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
665 	BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL));
666 
667 	/* Allocate per-node idle cpumasks */
668 	scx_idle_node_masks = kcalloc(num_possible_nodes(),
669 				      sizeof(*scx_idle_node_masks), GFP_KERNEL);
670 	BUG_ON(!scx_idle_node_masks);
671 
672 	for_each_node(i) {
673 		scx_idle_node_masks[i] = kzalloc_node(sizeof(**scx_idle_node_masks),
674 							 GFP_KERNEL, i);
675 		BUG_ON(!scx_idle_node_masks[i]);
676 
677 		BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->cpu, GFP_KERNEL, i));
678 		BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i));
679 	}
680 
681 	/* Allocate local per-cpu idle cpumasks */
682 	for_each_possible_cpu(i) {
683 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i),
684 					       GFP_KERNEL, cpu_to_node(i)));
685 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i),
686 					       GFP_KERNEL, cpu_to_node(i)));
687 		BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i),
688 					       GFP_KERNEL, cpu_to_node(i)));
689 	}
690 }
691 
692 static void update_builtin_idle(int cpu, bool idle)
693 {
694 	int node = scx_cpu_node_if_enabled(cpu);
695 	struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
696 
697 	assign_cpu(cpu, idle_cpus, idle);
698 
699 #ifdef CONFIG_SCHED_SMT
700 	if (sched_smt_active()) {
701 		const struct cpumask *smt = cpu_smt_mask(cpu);
702 		struct cpumask *idle_smts = idle_cpumask(node)->smt;
703 
704 		if (idle) {
705 			/*
706 			 * idle_smt handling is racy but that's fine as it's
707 			 * only for optimization and self-correcting.
708 			 */
709 			if (!cpumask_subset(smt, idle_cpus))
710 				return;
711 			cpumask_or(idle_smts, idle_smts, smt);
712 		} else {
713 			cpumask_andnot(idle_smts, idle_smts, smt);
714 		}
715 	}
716 #endif
717 }
718 
719 /*
720  * Update the idle state of a CPU to @idle.
721  *
722  * If @do_notify is true, ops.update_idle() is invoked to notify the scx
723  * scheduler of an actual idle state transition (idle to busy or vice
724  * versa). If @do_notify is false, only the idle state in the idle masks is
725  * refreshed without invoking ops.update_idle().
726  *
727  * This distinction is necessary, because an idle CPU can be "reserved" and
728  * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
729  * busy even if no tasks are dispatched. In this case, the CPU may return
730  * to idle without a true state transition. Refreshing the idle masks
731  * without invoking ops.update_idle() ensures accurate idle state tracking
732  * while avoiding unnecessary updates and maintaining balanced state
733  * transitions.
734  */
735 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
736 {
737 	struct scx_sched *sch = scx_root;
738 	int cpu = cpu_of(rq);
739 
740 	lockdep_assert_rq_held(rq);
741 
742 	/*
743 	 * Update the idle masks:
744 	 * - for real idle transitions (do_notify == true)
745 	 * - for idle-to-idle transitions (indicated by the previous task
746 	 *   being the idle thread, managed by pick_task_idle())
747 	 *
748 	 * Skip updating idle masks if the previous task is not the idle
749 	 * thread, since set_next_task_idle() has already handled it when
750 	 * transitioning from a task to the idle thread (calling this
751 	 * function with do_notify == true).
752 	 *
753 	 * In this way we can avoid updating the idle masks twice,
754 	 * unnecessarily.
755 	 */
756 	if (static_branch_likely(&scx_builtin_idle_enabled))
757 		if (do_notify || is_idle_task(rq->curr))
758 			update_builtin_idle(cpu, idle);
759 
760 	/*
761 	 * Trigger ops.update_idle() only when transitioning from a task to
762 	 * the idle thread and vice versa.
763 	 *
764 	 * Idle transitions are indicated by do_notify being set to true,
765 	 * managed by put_prev_task_idle()/set_next_task_idle().
766 	 *
767 	 * This must come after builtin idle update so that BPF schedulers can
768 	 * create interlocking between ops.update_idle() and ops.enqueue() -
769 	 * either enqueue() sees the idle bit or update_idle() sees the task
770 	 * that enqueue() queued.
771 	 */
772 	if (SCX_HAS_OP(sch, update_idle) && do_notify && !scx_rq_bypassing(rq))
773 		SCX_CALL_OP(sch, SCX_KF_REST, update_idle, rq, cpu_of(rq), idle);
774 }
775 
776 static void reset_idle_masks(struct sched_ext_ops *ops)
777 {
778 	int node;
779 
780 	/*
781 	 * Consider all online cpus idle. Should converge to the actual state
782 	 * quickly.
783 	 */
784 	if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
785 		cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask);
786 		cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask);
787 		return;
788 	}
789 
790 	for_each_node(node) {
791 		const struct cpumask *node_mask = cpumask_of_node(node);
792 
793 		cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
794 		cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
795 	}
796 }
797 #endif	/* CONFIG_SMP */
798 
799 void scx_idle_enable(struct sched_ext_ops *ops)
800 {
801 	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
802 		static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
803 	else
804 		static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
805 
806 	if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
807 		static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
808 	else
809 		static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
810 
811 #ifdef CONFIG_SMP
812 	reset_idle_masks(ops);
813 #endif
814 }
815 
816 void scx_idle_disable(void)
817 {
818 	static_branch_disable(&scx_builtin_idle_enabled);
819 	static_branch_disable(&scx_builtin_idle_per_node);
820 }
821 
822 /********************************************************************************
823  * Helpers that can be called from the BPF scheduler.
824  */
825 
826 static int validate_node(int node)
827 {
828 	if (!static_branch_likely(&scx_builtin_idle_per_node)) {
829 		scx_kf_error("per-node idle tracking is disabled");
830 		return -EOPNOTSUPP;
831 	}
832 
833 	/* Return no entry for NUMA_NO_NODE (not a critical scx error) */
834 	if (node == NUMA_NO_NODE)
835 		return -ENOENT;
836 
837 	/* Make sure node is in a valid range */
838 	if (node < 0 || node >= nr_node_ids) {
839 		scx_kf_error("invalid node %d", node);
840 		return -EINVAL;
841 	}
842 
843 	/* Make sure the node is part of the set of possible nodes */
844 	if (!node_possible(node)) {
845 		scx_kf_error("unavailable node %d", node);
846 		return -EINVAL;
847 	}
848 
849 	return node;
850 }
851 
852 __bpf_kfunc_start_defs();
853 
854 static bool check_builtin_idle_enabled(void)
855 {
856 	if (static_branch_likely(&scx_builtin_idle_enabled))
857 		return true;
858 
859 	scx_kf_error("built-in idle tracking is disabled");
860 	return false;
861 }
862 
863 s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
864 			  const struct cpumask *allowed, u64 flags)
865 {
866 	struct rq *rq;
867 	struct rq_flags rf;
868 	s32 cpu;
869 
870 	if (!kf_cpu_valid(prev_cpu, NULL))
871 		return -EINVAL;
872 
873 	if (!check_builtin_idle_enabled())
874 		return -EBUSY;
875 
876 	/*
877 	 * If called from an unlocked context, acquire the task's rq lock,
878 	 * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed.
879 	 *
880 	 * Otherwise, allow to use this kfunc only from ops.select_cpu()
881 	 * and ops.select_enqueue().
882 	 */
883 	if (scx_kf_allowed_if_unlocked()) {
884 		rq = task_rq_lock(p, &rf);
885 	} else {
886 		if (!scx_kf_allowed(SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE))
887 			return -EPERM;
888 		rq = scx_locked_rq();
889 	}
890 
891 	/*
892 	 * Validate locking correctness to access p->cpus_ptr and
893 	 * p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
894 	 * otherwise, assert that p->pi_lock is held.
895 	 */
896 	if (!rq)
897 		lockdep_assert_held(&p->pi_lock);
898 
899 #ifdef CONFIG_SMP
900 	/*
901 	 * This may also be called from ops.enqueue(), so we need to handle
902 	 * per-CPU tasks as well. For these tasks, we can skip all idle CPU
903 	 * selection optimizations and simply check whether the previously
904 	 * used CPU is idle and within the allowed cpumask.
905 	 */
906 	if (p->nr_cpus_allowed == 1) {
907 		if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
908 		    scx_idle_test_and_clear_cpu(prev_cpu))
909 			cpu = prev_cpu;
910 		else
911 			cpu = -EBUSY;
912 	} else {
913 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
914 					 allowed ?: p->cpus_ptr, flags);
915 	}
916 #else
917 	cpu = -EBUSY;
918 #endif
919 	if (scx_kf_allowed_if_unlocked())
920 		task_rq_unlock(rq, p, &rf);
921 
922 	return cpu;
923 }
924 
925 /**
926  * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
927  *		      trigger an error if @cpu is invalid
928  * @cpu: target CPU
929  */
930 __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
931 {
932 #ifdef CONFIG_NUMA
933 	if (!kf_cpu_valid(cpu, NULL))
934 		return NUMA_NO_NODE;
935 
936 	return cpu_to_node(cpu);
937 #else
938 	return 0;
939 #endif
940 }
941 
942 /**
943  * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
944  * @p: task_struct to select a CPU for
945  * @prev_cpu: CPU @p was on previously
946  * @wake_flags: %SCX_WAKE_* flags
947  * @is_idle: out parameter indicating whether the returned CPU is idle
948  *
949  * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
950  * context such as a BPF test_run() call, as long as built-in CPU selection
951  * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
952  * is set.
953  *
954  * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
955  * currently idle and thus a good candidate for direct dispatching.
956  */
957 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
958 				       u64 wake_flags, bool *is_idle)
959 {
960 	s32 cpu;
961 
962 	cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0);
963 	if (cpu >= 0) {
964 		*is_idle = true;
965 		return cpu;
966 	}
967 	*is_idle = false;
968 
969 	return prev_cpu;
970 }
971 
972 /**
973  * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p,
974  *			    prioritizing those in @cpus_allowed
975  * @p: task_struct to select a CPU for
976  * @prev_cpu: CPU @p was on previously
977  * @wake_flags: %SCX_WAKE_* flags
978  * @cpus_allowed: cpumask of allowed CPUs
979  * @flags: %SCX_PICK_IDLE* flags
980  *
981  * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
982  * context such as a BPF test_run() call, as long as built-in CPU selection
983  * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
984  * is set.
985  *
986  * @p, @prev_cpu and @wake_flags match ops.select_cpu().
987  *
988  * Returns the selected idle CPU, which will be automatically awakened upon
989  * returning from ops.select_cpu() and can be used for direct dispatch, or
990  * a negative value if no idle CPU is available.
991  */
992 __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
993 				       const struct cpumask *cpus_allowed, u64 flags)
994 {
995 	return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags);
996 }
997 
998 /**
999  * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the
1000  * idle-tracking per-CPU cpumask of a target NUMA node.
1001  * @node: target NUMA node
1002  *
1003  * Returns an empty cpumask if idle tracking is not enabled, if @node is
1004  * not valid, or running on a UP kernel. In this case the actual error will
1005  * be reported to the BPF scheduler via scx_error().
1006  */
1007 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
1008 {
1009 	node = validate_node(node);
1010 	if (node < 0)
1011 		return cpu_none_mask;
1012 
1013 #ifdef CONFIG_SMP
1014 	return idle_cpumask(node)->cpu;
1015 #else
1016 	return cpu_none_mask;
1017 #endif
1018 }
1019 
1020 /**
1021  * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
1022  * per-CPU cpumask.
1023  *
1024  * Returns an empty mask if idle tracking is not enabled, or running on a
1025  * UP kernel.
1026  */
1027 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
1028 {
1029 	if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1030 		scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1031 		return cpu_none_mask;
1032 	}
1033 
1034 	if (!check_builtin_idle_enabled())
1035 		return cpu_none_mask;
1036 
1037 #ifdef CONFIG_SMP
1038 	return idle_cpumask(NUMA_NO_NODE)->cpu;
1039 #else
1040 	return cpu_none_mask;
1041 #endif
1042 }
1043 
1044 /**
1045  * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the
1046  * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be
1047  * used to determine if an entire physical core is free.
1048  * @node: target NUMA node
1049  *
1050  * Returns an empty cpumask if idle tracking is not enabled, if @node is
1051  * not valid, or running on a UP kernel. In this case the actual error will
1052  * be reported to the BPF scheduler via scx_error().
1053  */
1054 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
1055 {
1056 	node = validate_node(node);
1057 	if (node < 0)
1058 		return cpu_none_mask;
1059 
1060 #ifdef CONFIG_SMP
1061 	if (sched_smt_active())
1062 		return idle_cpumask(node)->smt;
1063 	else
1064 		return idle_cpumask(node)->cpu;
1065 #else
1066 	return cpu_none_mask;
1067 #endif
1068 }
1069 
1070 /**
1071  * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
1072  * per-physical-core cpumask. Can be used to determine if an entire physical
1073  * core is free.
1074  *
1075  * Returns an empty mask if idle tracking is not enabled, or running on a
1076  * UP kernel.
1077  */
1078 __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
1079 {
1080 	if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
1081 		scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
1082 		return cpu_none_mask;
1083 	}
1084 
1085 	if (!check_builtin_idle_enabled())
1086 		return cpu_none_mask;
1087 
1088 #ifdef CONFIG_SMP
1089 	if (sched_smt_active())
1090 		return idle_cpumask(NUMA_NO_NODE)->smt;
1091 	else
1092 		return idle_cpumask(NUMA_NO_NODE)->cpu;
1093 #else
1094 	return cpu_none_mask;
1095 #endif
1096 }
1097 
1098 /**
1099  * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
1100  * either the percpu, or SMT idle-tracking cpumask.
1101  * @idle_mask: &cpumask to use
1102  */
1103 __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
1104 {
1105 	/*
1106 	 * Empty function body because we aren't actually acquiring or releasing
1107 	 * a reference to a global idle cpumask, which is read-only in the
1108 	 * caller and is never released. The acquire / release semantics here
1109 	 * are just used to make the cpumask a trusted pointer in the caller.
1110 	 */
1111 }
1112 
1113 /**
1114  * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
1115  * @cpu: cpu to test and clear idle for
1116  *
1117  * Returns %true if @cpu was idle and its idle state was successfully cleared.
1118  * %false otherwise.
1119  *
1120  * Unavailable if ops.update_idle() is implemented and
1121  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1122  */
1123 __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
1124 {
1125 	if (!check_builtin_idle_enabled())
1126 		return false;
1127 
1128 	if (kf_cpu_valid(cpu, NULL))
1129 		return scx_idle_test_and_clear_cpu(cpu);
1130 	else
1131 		return false;
1132 }
1133 
1134 /**
1135  * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node
1136  * @cpus_allowed: Allowed cpumask
1137  * @node: target NUMA node
1138  * @flags: %SCX_PICK_IDLE_* flags
1139  *
1140  * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
1141  *
1142  * Returns the picked idle cpu number on success, or -%EBUSY if no matching
1143  * cpu was found.
1144  *
1145  * The search starts from @node and proceeds to other online NUMA nodes in
1146  * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified,
1147  * in which case the search is limited to the target @node).
1148  *
1149  * Always returns an error if ops.update_idle() is implemented and
1150  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if
1151  * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set.
1152  */
1153 __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
1154 					   int node, u64 flags)
1155 {
1156 	node = validate_node(node);
1157 	if (node < 0)
1158 		return node;
1159 
1160 	return scx_pick_idle_cpu(cpus_allowed, node, flags);
1161 }
1162 
1163 /**
1164  * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
1165  * @cpus_allowed: Allowed cpumask
1166  * @flags: %SCX_PICK_IDLE_CPU_* flags
1167  *
1168  * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
1169  * number on success. -%EBUSY if no matching cpu was found.
1170  *
1171  * Idle CPU tracking may race against CPU scheduling state transitions. For
1172  * example, this function may return -%EBUSY as CPUs are transitioning into the
1173  * idle state. If the caller then assumes that there will be dispatch events on
1174  * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
1175  * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
1176  * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
1177  * event in the near future.
1178  *
1179  * Unavailable if ops.update_idle() is implemented and
1180  * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
1181  *
1182  * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1183  * scx_bpf_pick_idle_cpu_node() instead.
1184  */
1185 __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
1186 				      u64 flags)
1187 {
1188 	if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1189 		scx_kf_error("per-node idle tracking is enabled");
1190 		return -EBUSY;
1191 	}
1192 
1193 	if (!check_builtin_idle_enabled())
1194 		return -EBUSY;
1195 
1196 	return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1197 }
1198 
1199 /**
1200  * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available
1201  *			       or pick any CPU from @node
1202  * @cpus_allowed: Allowed cpumask
1203  * @node: target NUMA node
1204  * @flags: %SCX_PICK_IDLE_CPU_* flags
1205  *
1206  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1207  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1208  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1209  * empty.
1210  *
1211  * The search starts from @node and proceeds to other online NUMA nodes in
1212  * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified,
1213  * in which case the search is limited to the target @node, regardless of
1214  * the CPU idle state).
1215  *
1216  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1217  * set, this function can't tell which CPUs are idle and will always pick any
1218  * CPU.
1219  */
1220 __bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed,
1221 					  int node, u64 flags)
1222 {
1223 	s32 cpu;
1224 
1225 	node = validate_node(node);
1226 	if (node < 0)
1227 		return node;
1228 
1229 	cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
1230 	if (cpu >= 0)
1231 		return cpu;
1232 
1233 	if (flags & SCX_PICK_IDLE_IN_NODE)
1234 		cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
1235 	else
1236 		cpu = cpumask_any_distribute(cpus_allowed);
1237 	if (cpu < nr_cpu_ids)
1238 		return cpu;
1239 	else
1240 		return -EBUSY;
1241 }
1242 
1243 /**
1244  * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
1245  * @cpus_allowed: Allowed cpumask
1246  * @flags: %SCX_PICK_IDLE_CPU_* flags
1247  *
1248  * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
1249  * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
1250  * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
1251  * empty.
1252  *
1253  * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
1254  * set, this function can't tell which CPUs are idle and will always pick any
1255  * CPU.
1256  *
1257  * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
1258  * scx_bpf_pick_any_cpu_node() instead.
1259  */
1260 __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
1261 				     u64 flags)
1262 {
1263 	s32 cpu;
1264 
1265 	if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
1266 		scx_kf_error("per-node idle tracking is enabled");
1267 		return -EBUSY;
1268 	}
1269 
1270 	if (static_branch_likely(&scx_builtin_idle_enabled)) {
1271 		cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
1272 		if (cpu >= 0)
1273 			return cpu;
1274 	}
1275 
1276 	cpu = cpumask_any_distribute(cpus_allowed);
1277 	if (cpu < nr_cpu_ids)
1278 		return cpu;
1279 	else
1280 		return -EBUSY;
1281 }
1282 
1283 __bpf_kfunc_end_defs();
1284 
1285 BTF_KFUNCS_START(scx_kfunc_ids_idle)
1286 BTF_ID_FLAGS(func, scx_bpf_cpu_node)
1287 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_ACQUIRE)
1288 BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
1289 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_ACQUIRE)
1290 BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
1291 BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
1292 BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
1293 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
1294 BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
1295 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU)
1296 BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
1297 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
1298 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
1299 BTF_KFUNCS_END(scx_kfunc_ids_idle)
1300 
1301 static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
1302 	.owner			= THIS_MODULE,
1303 	.set			= &scx_kfunc_ids_idle,
1304 };
1305 
1306 int scx_idle_init(void)
1307 {
1308 	int ret;
1309 
1310 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
1311 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
1312 	      register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle);
1313 
1314 	return ret;
1315 }
1316