1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2025 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2025 Tejun Heo <tj@kernel.org>
7 */
8 #define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
9 #define SCX_MOFF_IDX(moff) ((moff) / sizeof(void (*)(void)))
10
11 enum scx_consts {
12 SCX_DSP_DFL_MAX_BATCH = 32,
13 SCX_DSP_MAX_LOOPS = 32,
14 SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
15
16 SCX_EXIT_BT_LEN = 64,
17 SCX_EXIT_MSG_LEN = 1024,
18 SCX_EXIT_DUMP_DFL_LEN = 32768,
19
20 SCX_CPUPERF_ONE = SCHED_CAPACITY_SCALE,
21
22 /*
23 * Iterating all tasks may take a while. Periodically drop
24 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25 */
26 SCX_TASK_ITER_BATCH = 32,
27
28 SCX_BYPASS_HOST_NTH = 2,
29
30 SCX_BYPASS_LB_DFL_INTV_US = 500 * USEC_PER_MSEC,
31 SCX_BYPASS_LB_DONOR_PCT = 125,
32 SCX_BYPASS_LB_MIN_DELTA_DIV = 4,
33 SCX_BYPASS_LB_BATCH = 256,
34
35 SCX_REENQ_LOCAL_MAX_REPEAT = 256,
36
37 SCX_SUB_MAX_DEPTH = 4,
38 };
39
40 enum scx_exit_kind {
41 SCX_EXIT_NONE,
42 SCX_EXIT_DONE,
43
44 SCX_EXIT_UNREG = 64, /* user-space initiated unregistration */
45 SCX_EXIT_UNREG_BPF, /* BPF-initiated unregistration */
46 SCX_EXIT_UNREG_KERN, /* kernel-initiated unregistration */
47 SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */
48 SCX_EXIT_PARENT, /* parent exiting */
49
50 SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */
51 SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */
52 SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */
53 };
54
55 /*
56 * An exit code can be specified when exiting with scx_bpf_exit() or scx_exit(),
57 * corresponding to exit_kind UNREG_BPF and UNREG_KERN respectively. The codes
58 * are 64bit of the format:
59 *
60 * Bits: [63 .. 48 47 .. 32 31 .. 0]
61 * [ SYS ACT ] [ SYS RSN ] [ USR ]
62 *
63 * SYS ACT: System-defined exit actions
64 * SYS RSN: System-defined exit reasons
65 * USR : User-defined exit codes and reasons
66 *
67 * Using the above, users may communicate intention and context by ORing system
68 * actions and/or system reasons with a user-defined exit code.
69 */
70 enum scx_exit_code {
71 /* Reasons */
72 SCX_ECODE_RSN_HOTPLUG = 1LLU << 32,
73 SCX_ECODE_RSN_CGROUP_OFFLINE = 2LLU << 32,
74
75 /* Actions */
76 SCX_ECODE_ACT_RESTART = 1LLU << 48,
77 };
78
79 enum scx_exit_flags {
80 /*
81 * ops.exit() may be called even if the loading failed before ops.init()
82 * finishes successfully. This is because ops.exit() allows rich exit
83 * info communication. The following flag indicates whether ops.init()
84 * finished successfully.
85 */
86 SCX_EFLAG_INITIALIZED = 1LLU << 0,
87 };
88
89 /*
90 * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
91 * being disabled.
92 */
93 struct scx_exit_info {
94 /* %SCX_EXIT_* - broad category of the exit reason */
95 enum scx_exit_kind kind;
96
97 /* exit code if gracefully exiting */
98 s64 exit_code;
99
100 /* %SCX_EFLAG_* */
101 u64 flags;
102
103 /* textual representation of the above */
104 const char *reason;
105
106 /* backtrace if exiting due to an error */
107 unsigned long *bt;
108 u32 bt_len;
109
110 /* informational message */
111 char *msg;
112
113 /* debug dump */
114 char *dump;
115 };
116
117 /* sched_ext_ops.flags */
118 enum scx_ops_flags {
119 /*
120 * Keep built-in idle tracking even if ops.update_idle() is implemented.
121 */
122 SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
123
124 /*
125 * By default, if there are no other task to run on the CPU, ext core
126 * keeps running the current task even after its slice expires. If this
127 * flag is specified, such tasks are passed to ops.enqueue() with
128 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
129 */
130 SCX_OPS_ENQ_LAST = 1LLU << 1,
131
132 /*
133 * An exiting task may schedule after PF_EXITING is set. In such cases,
134 * bpf_task_from_pid() may not be able to find the task and if the BPF
135 * scheduler depends on pid lookup for dispatching, the task will be
136 * lost leading to various issues including RCU grace period stalls.
137 *
138 * To mask this problem, by default, unhashed tasks are automatically
139 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
140 * depend on pid lookups and wants to handle these tasks directly, the
141 * following flag can be used.
142 */
143 SCX_OPS_ENQ_EXITING = 1LLU << 2,
144
145 /*
146 * If set, only tasks with policy set to SCHED_EXT are attached to
147 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
148 */
149 SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
150
151 /*
152 * A migration disabled task can only execute on its current CPU. By
153 * default, such tasks are automatically put on the CPU's local DSQ with
154 * the default slice on enqueue. If this ops flag is set, they also go
155 * through ops.enqueue().
156 *
157 * A migration disabled task never invokes ops.select_cpu() as it can
158 * only select the current CPU. Also, p->cpus_ptr will only contain its
159 * current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
160 * and thus may disagree with cpumask_weight(p->cpus_ptr).
161 */
162 SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
163
164 /*
165 * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
166 * ops.enqueue() on the ops.select_cpu() selected or the wakee's
167 * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
168 * transfers. When this optimization is enabled, ops.select_cpu() is
169 * skipped in some cases (when racing against the wakee switching out).
170 * As the BPF scheduler may depend on ops.select_cpu() being invoked
171 * during wakeups, queued wakeup is disabled by default.
172 *
173 * If this ops flag is set, queued wakeup optimization is enabled and
174 * the BPF scheduler must be able to handle ops.enqueue() invoked on the
175 * wakee's CPU without preceding ops.select_cpu() even for tasks which
176 * may be executed on multiple CPUs.
177 */
178 SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
179
180 /*
181 * If set, enable per-node idle cpumasks. If clear, use a single global
182 * flat idle cpumask.
183 */
184 SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
185
186 /*
187 * If set, %SCX_ENQ_IMMED is assumed to be set on all local DSQ
188 * enqueues.
189 */
190 SCX_OPS_ALWAYS_ENQ_IMMED = 1LLU << 7,
191
192 SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
193 SCX_OPS_ENQ_LAST |
194 SCX_OPS_ENQ_EXITING |
195 SCX_OPS_ENQ_MIGRATION_DISABLED |
196 SCX_OPS_ALLOW_QUEUED_WAKEUP |
197 SCX_OPS_SWITCH_PARTIAL |
198 SCX_OPS_BUILTIN_IDLE_PER_NODE |
199 SCX_OPS_ALWAYS_ENQ_IMMED,
200
201 /* high 8 bits are internal, don't include in SCX_OPS_ALL_FLAGS */
202 __SCX_OPS_INTERNAL_MASK = 0xffLLU << 56,
203
204 SCX_OPS_HAS_CPU_PREEMPT = 1LLU << 56,
205 };
206
207 /* argument container for ops.init_task() */
208 struct scx_init_task_args {
209 /*
210 * Set if ops.init_task() is being invoked on the fork path, as opposed
211 * to the scheduler transition path.
212 */
213 bool fork;
214 #ifdef CONFIG_EXT_GROUP_SCHED
215 /* the cgroup the task is joining */
216 struct cgroup *cgroup;
217 #endif
218 };
219
220 /* argument container for ops.exit_task() */
221 struct scx_exit_task_args {
222 /* Whether the task exited before running on sched_ext. */
223 bool cancelled;
224 };
225
226 /* argument container for ops.cgroup_init() */
227 struct scx_cgroup_init_args {
228 /* the weight of the cgroup [1..10000] */
229 u32 weight;
230
231 /* bandwidth control parameters from cpu.max and cpu.max.burst */
232 u64 bw_period_us;
233 u64 bw_quota_us;
234 u64 bw_burst_us;
235 };
236
237 enum scx_cpu_preempt_reason {
238 /* next task is being scheduled by &sched_class_rt */
239 SCX_CPU_PREEMPT_RT,
240 /* next task is being scheduled by &sched_class_dl */
241 SCX_CPU_PREEMPT_DL,
242 /* next task is being scheduled by &sched_class_stop */
243 SCX_CPU_PREEMPT_STOP,
244 /* unknown reason for SCX being preempted */
245 SCX_CPU_PREEMPT_UNKNOWN,
246 };
247
248 /*
249 * Argument container for ops.cpu_acquire(). Currently empty, but may be
250 * expanded in the future.
251 */
252 struct scx_cpu_acquire_args {};
253
254 /* argument container for ops.cpu_release() */
255 struct scx_cpu_release_args {
256 /* the reason the CPU was preempted */
257 enum scx_cpu_preempt_reason reason;
258
259 /* the task that's going to be scheduled on the CPU */
260 struct task_struct *task;
261 };
262
263 /* informational context provided to dump operations */
264 struct scx_dump_ctx {
265 enum scx_exit_kind kind;
266 s64 exit_code;
267 const char *reason;
268 u64 at_ns;
269 u64 at_jiffies;
270 };
271
272 /* argument container for ops.sub_attach() */
273 struct scx_sub_attach_args {
274 struct sched_ext_ops *ops;
275 char *cgroup_path;
276 };
277
278 /* argument container for ops.sub_detach() */
279 struct scx_sub_detach_args {
280 struct sched_ext_ops *ops;
281 char *cgroup_path;
282 };
283
284 /**
285 * struct sched_ext_ops - Operation table for BPF scheduler implementation
286 *
287 * A BPF scheduler can implement an arbitrary scheduling policy by
288 * implementing and loading operations in this table. Note that a userland
289 * scheduling policy can also be implemented using the BPF scheduler
290 * as a shim layer.
291 */
292 struct sched_ext_ops {
293 /**
294 * @select_cpu: Pick the target CPU for a task which is being woken up
295 * @p: task being woken up
296 * @prev_cpu: the cpu @p was on before sleeping
297 * @wake_flags: SCX_WAKE_*
298 *
299 * Decision made here isn't final. @p may be moved to any CPU while it
300 * is getting dispatched for execution later. However, as @p is not on
301 * the rq at this point, getting the eventual execution CPU right here
302 * saves a small bit of overhead down the line.
303 *
304 * If an idle CPU is returned, the CPU is kicked and will try to
305 * dispatch. While an explicit custom mechanism can be added,
306 * select_cpu() serves as the default way to wake up idle CPUs.
307 *
308 * @p may be inserted into a DSQ directly by calling
309 * scx_bpf_dsq_insert(). If so, the ops.enqueue() will be skipped.
310 * Directly inserting into %SCX_DSQ_LOCAL will put @p in the local DSQ
311 * of the CPU returned by this operation.
312 *
313 * Note that select_cpu() is never called for tasks that can only run
314 * on a single CPU or tasks with migration disabled, as they don't have
315 * the option to select a different CPU. See select_task_rq() for
316 * details.
317 */
318 s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
319
320 /**
321 * @enqueue: Enqueue a task on the BPF scheduler
322 * @p: task being enqueued
323 * @enq_flags: %SCX_ENQ_*
324 *
325 * @p is ready to run. Insert directly into a DSQ by calling
326 * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
327 * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
328 * the task will stall.
329 *
330 * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
331 * skipped.
332 */
333 void (*enqueue)(struct task_struct *p, u64 enq_flags);
334
335 /**
336 * @dequeue: Remove a task from the BPF scheduler
337 * @p: task being dequeued
338 * @deq_flags: %SCX_DEQ_*
339 *
340 * Remove @p from the BPF scheduler. This is usually called to isolate
341 * the task while updating its scheduling properties (e.g. priority).
342 *
343 * The ext core keeps track of whether the BPF side owns a given task or
344 * not and can gracefully ignore spurious dispatches from BPF side,
345 * which makes it safe to not implement this method. However, depending
346 * on the scheduling logic, this can lead to confusing behaviors - e.g.
347 * scheduling position not being updated across a priority change.
348 */
349 void (*dequeue)(struct task_struct *p, u64 deq_flags);
350
351 /**
352 * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs
353 * @cpu: CPU to dispatch tasks for
354 * @prev: previous task being switched out
355 *
356 * Called when a CPU's local dsq is empty. The operation should dispatch
357 * one or more tasks from the BPF scheduler into the DSQs using
358 * scx_bpf_dsq_insert() and/or move from user DSQs into the local DSQ
359 * using scx_bpf_dsq_move_to_local().
360 *
361 * The maximum number of times scx_bpf_dsq_insert() can be called
362 * without an intervening scx_bpf_dsq_move_to_local() is specified by
363 * ops.dispatch_max_batch. See the comments on top of the two functions
364 * for more details.
365 *
366 * When not %NULL, @prev is an SCX task with its slice depleted. If
367 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
368 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
369 * ops.dispatch() returns. To keep executing @prev, return without
370 * dispatching or moving any tasks. Also see %SCX_OPS_ENQ_LAST.
371 */
372 void (*dispatch)(s32 cpu, struct task_struct *prev);
373
374 /**
375 * @tick: Periodic tick
376 * @p: task running currently
377 *
378 * This operation is called every 1/HZ seconds on CPUs which are
379 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
380 * immediate dispatch cycle on the CPU.
381 */
382 void (*tick)(struct task_struct *p);
383
384 /**
385 * @runnable: A task is becoming runnable on its associated CPU
386 * @p: task becoming runnable
387 * @enq_flags: %SCX_ENQ_*
388 *
389 * This and the following three functions can be used to track a task's
390 * execution state transitions. A task becomes ->runnable() on a CPU,
391 * and then goes through one or more ->running() and ->stopping() pairs
392 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
393 * done running on the CPU.
394 *
395 * @p is becoming runnable on the CPU because it's
396 *
397 * - waking up (%SCX_ENQ_WAKEUP)
398 * - being moved from another CPU
399 * - being restored after temporarily taken off the queue for an
400 * attribute change.
401 *
402 * This and ->enqueue() are related but not coupled. This operation
403 * notifies @p's state transition and may not be followed by ->enqueue()
404 * e.g. when @p is being dispatched to a remote CPU, or when @p is
405 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
406 * task may be ->enqueue()'d without being preceded by this operation
407 * e.g. after exhausting its slice.
408 */
409 void (*runnable)(struct task_struct *p, u64 enq_flags);
410
411 /**
412 * @running: A task is starting to run on its associated CPU
413 * @p: task starting to run
414 *
415 * Note that this callback may be called from a CPU other than the
416 * one the task is going to run on. This can happen when a task
417 * property is changed (i.e., affinity), since scx_next_task_scx(),
418 * which triggers this callback, may run on a CPU different from
419 * the task's assigned CPU.
420 *
421 * Therefore, always use scx_bpf_task_cpu(@p) to determine the
422 * target CPU the task is going to use.
423 *
424 * See ->runnable() for explanation on the task state notifiers.
425 */
426 void (*running)(struct task_struct *p);
427
428 /**
429 * @stopping: A task is stopping execution
430 * @p: task stopping to run
431 * @runnable: is task @p still runnable?
432 *
433 * Note that this callback may be called from a CPU other than the
434 * one the task was running on. This can happen when a task
435 * property is changed (i.e., affinity), since dequeue_task_scx(),
436 * which triggers this callback, may run on a CPU different from
437 * the task's assigned CPU.
438 *
439 * Therefore, always use scx_bpf_task_cpu(@p) to retrieve the CPU
440 * the task was running on.
441 *
442 * See ->runnable() for explanation on the task state notifiers. If
443 * !@runnable, ->quiescent() will be invoked after this operation
444 * returns.
445 */
446 void (*stopping)(struct task_struct *p, bool runnable);
447
448 /**
449 * @quiescent: A task is becoming not runnable on its associated CPU
450 * @p: task becoming not runnable
451 * @deq_flags: %SCX_DEQ_*
452 *
453 * See ->runnable() for explanation on the task state notifiers.
454 *
455 * @p is becoming quiescent on the CPU because it's
456 *
457 * - sleeping (%SCX_DEQ_SLEEP)
458 * - being moved to another CPU
459 * - being temporarily taken off the queue for an attribute change
460 * (%SCX_DEQ_SAVE)
461 *
462 * This and ->dequeue() are related but not coupled. This operation
463 * notifies @p's state transition and may not be preceded by ->dequeue()
464 * e.g. when @p is being dispatched to a remote CPU.
465 */
466 void (*quiescent)(struct task_struct *p, u64 deq_flags);
467
468 /**
469 * @yield: Yield CPU
470 * @from: yielding task
471 * @to: optional yield target task
472 *
473 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
474 * The BPF scheduler should ensure that other available tasks are
475 * dispatched before the yielding task. Return value is ignored in this
476 * case.
477 *
478 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
479 * scheduler can implement the request, return %true; otherwise, %false.
480 */
481 bool (*yield)(struct task_struct *from, struct task_struct *to);
482
483 /**
484 * @core_sched_before: Task ordering for core-sched
485 * @a: task A
486 * @b: task B
487 *
488 * Used by core-sched to determine the ordering between two tasks. See
489 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
490 * core-sched.
491 *
492 * Both @a and @b are runnable and may or may not currently be queued on
493 * the BPF scheduler. Should return %true if @a should run before @b.
494 * %false if there's no required ordering or @b should run before @a.
495 *
496 * If not specified, the default is ordering them according to when they
497 * became runnable.
498 */
499 bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
500
501 /**
502 * @set_weight: Set task weight
503 * @p: task to set weight for
504 * @weight: new weight [1..10000]
505 *
506 * Update @p's weight to @weight.
507 */
508 void (*set_weight)(struct task_struct *p, u32 weight);
509
510 /**
511 * @set_cpumask: Set CPU affinity
512 * @p: task to set CPU affinity for
513 * @cpumask: cpumask of cpus that @p can run on
514 *
515 * Update @p's CPU affinity to @cpumask.
516 */
517 void (*set_cpumask)(struct task_struct *p,
518 const struct cpumask *cpumask);
519
520 /**
521 * @update_idle: Update the idle state of a CPU
522 * @cpu: CPU to update the idle state for
523 * @idle: whether entering or exiting the idle state
524 *
525 * This operation is called when @rq's CPU goes or leaves the idle
526 * state. By default, implementing this operation disables the built-in
527 * idle CPU tracking and the following helpers become unavailable:
528 *
529 * - scx_bpf_select_cpu_dfl()
530 * - scx_bpf_select_cpu_and()
531 * - scx_bpf_test_and_clear_cpu_idle()
532 * - scx_bpf_pick_idle_cpu()
533 *
534 * The user also must implement ops.select_cpu() as the default
535 * implementation relies on scx_bpf_select_cpu_dfl().
536 *
537 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
538 * tracking.
539 */
540 void (*update_idle)(s32 cpu, bool idle);
541
542 /**
543 * @cpu_acquire: A CPU is becoming available to the BPF scheduler
544 * @cpu: The CPU being acquired by the BPF scheduler.
545 * @args: Acquire arguments, see the struct definition.
546 *
547 * A CPU that was previously released from the BPF scheduler is now once
548 * again under its control.
549 */
550 void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
551
552 /**
553 * @cpu_release: A CPU is taken away from the BPF scheduler
554 * @cpu: The CPU being released by the BPF scheduler.
555 * @args: Release arguments, see the struct definition.
556 *
557 * The specified CPU is no longer under the control of the BPF
558 * scheduler. This could be because it was preempted by a higher
559 * priority sched_class, though there may be other reasons as well. The
560 * caller should consult @args->reason to determine the cause.
561 */
562 void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
563
564 /**
565 * @init_task: Initialize a task to run in a BPF scheduler
566 * @p: task to initialize for BPF scheduling
567 * @args: init arguments, see the struct definition
568 *
569 * Either we're loading a BPF scheduler or a new task is being forked.
570 * Initialize @p for BPF scheduling. This operation may block and can
571 * be used for allocations, and is called exactly once for a task.
572 *
573 * Return 0 for success, -errno for failure. An error return while
574 * loading will abort loading of the BPF scheduler. During a fork, it
575 * will abort that specific fork.
576 */
577 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
578
579 /**
580 * @exit_task: Exit a previously-running task from the system
581 * @p: task to exit
582 * @args: exit arguments, see the struct definition
583 *
584 * @p is exiting or the BPF scheduler is being unloaded. Perform any
585 * necessary cleanup for @p.
586 */
587 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
588
589 /**
590 * @enable: Enable BPF scheduling for a task
591 * @p: task to enable BPF scheduling for
592 *
593 * Enable @p for BPF scheduling. enable() is called on @p any time it
594 * enters SCX, and is always paired with a matching disable().
595 */
596 void (*enable)(struct task_struct *p);
597
598 /**
599 * @disable: Disable BPF scheduling for a task
600 * @p: task to disable BPF scheduling for
601 *
602 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
603 * Disable BPF scheduling for @p. A disable() call is always matched
604 * with a prior enable() call.
605 */
606 void (*disable)(struct task_struct *p);
607
608 /**
609 * @dump: Dump BPF scheduler state on error
610 * @ctx: debug dump context
611 *
612 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
613 */
614 void (*dump)(struct scx_dump_ctx *ctx);
615
616 /**
617 * @dump_cpu: Dump BPF scheduler state for a CPU on error
618 * @ctx: debug dump context
619 * @cpu: CPU to generate debug dump for
620 * @idle: @cpu is currently idle without any runnable tasks
621 *
622 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
623 * @cpu. If @idle is %true and this operation doesn't produce any
624 * output, @cpu is skipped for dump.
625 */
626 void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
627
628 /**
629 * @dump_task: Dump BPF scheduler state for a runnable task on error
630 * @ctx: debug dump context
631 * @p: runnable task to generate debug dump for
632 *
633 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
634 * @p.
635 */
636 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
637
638 #ifdef CONFIG_EXT_GROUP_SCHED
639 /**
640 * @cgroup_init: Initialize a cgroup
641 * @cgrp: cgroup being initialized
642 * @args: init arguments, see the struct definition
643 *
644 * Either the BPF scheduler is being loaded or @cgrp created, initialize
645 * @cgrp for sched_ext. This operation may block.
646 *
647 * Return 0 for success, -errno for failure. An error return while
648 * loading will abort loading of the BPF scheduler. During cgroup
649 * creation, it will abort the specific cgroup creation.
650 */
651 s32 (*cgroup_init)(struct cgroup *cgrp,
652 struct scx_cgroup_init_args *args);
653
654 /**
655 * @cgroup_exit: Exit a cgroup
656 * @cgrp: cgroup being exited
657 *
658 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
659 * @cgrp for sched_ext. This operation my block.
660 */
661 void (*cgroup_exit)(struct cgroup *cgrp);
662
663 /**
664 * @cgroup_prep_move: Prepare a task to be moved to a different cgroup
665 * @p: task being moved
666 * @from: cgroup @p is being moved from
667 * @to: cgroup @p is being moved to
668 *
669 * Prepare @p for move from cgroup @from to @to. This operation may
670 * block and can be used for allocations.
671 *
672 * Return 0 for success, -errno for failure. An error return aborts the
673 * migration.
674 */
675 s32 (*cgroup_prep_move)(struct task_struct *p,
676 struct cgroup *from, struct cgroup *to);
677
678 /**
679 * @cgroup_move: Commit cgroup move
680 * @p: task being moved
681 * @from: cgroup @p is being moved from
682 * @to: cgroup @p is being moved to
683 *
684 * Commit the move. @p is dequeued during this operation.
685 */
686 void (*cgroup_move)(struct task_struct *p,
687 struct cgroup *from, struct cgroup *to);
688
689 /**
690 * @cgroup_cancel_move: Cancel cgroup move
691 * @p: task whose cgroup move is being canceled
692 * @from: cgroup @p was being moved from
693 * @to: cgroup @p was being moved to
694 *
695 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
696 * Undo the preparation.
697 */
698 void (*cgroup_cancel_move)(struct task_struct *p,
699 struct cgroup *from, struct cgroup *to);
700
701 /**
702 * @cgroup_set_weight: A cgroup's weight is being changed
703 * @cgrp: cgroup whose weight is being updated
704 * @weight: new weight [1..10000]
705 *
706 * Update @cgrp's weight to @weight.
707 */
708 void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
709
710 /**
711 * @cgroup_set_bandwidth: A cgroup's bandwidth is being changed
712 * @cgrp: cgroup whose bandwidth is being updated
713 * @period_us: bandwidth control period
714 * @quota_us: bandwidth control quota
715 * @burst_us: bandwidth control burst
716 *
717 * Update @cgrp's bandwidth control parameters. This is from the cpu.max
718 * cgroup interface.
719 *
720 * @quota_us / @period_us determines the CPU bandwidth @cgrp is entitled
721 * to. For example, if @period_us is 1_000_000 and @quota_us is
722 * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be
723 * interpreted in the same fashion and specifies how much @cgrp can
724 * burst temporarily. The specific control mechanism and thus the
725 * interpretation of @period_us and burstiness is up to the BPF
726 * scheduler.
727 */
728 void (*cgroup_set_bandwidth)(struct cgroup *cgrp,
729 u64 period_us, u64 quota_us, u64 burst_us);
730
731 /**
732 * @cgroup_set_idle: A cgroup's idle state is being changed
733 * @cgrp: cgroup whose idle state is being updated
734 * @idle: whether the cgroup is entering or exiting idle state
735 *
736 * Update @cgrp's idle state to @idle. This callback is invoked when
737 * a cgroup transitions between idle and non-idle states, allowing the
738 * BPF scheduler to adjust its behavior accordingly.
739 */
740 void (*cgroup_set_idle)(struct cgroup *cgrp, bool idle);
741
742 #endif /* CONFIG_EXT_GROUP_SCHED */
743
744 /**
745 * @sub_attach: Attach a sub-scheduler
746 * @args: argument container, see the struct definition
747 *
748 * Return 0 to accept the sub-scheduler. -errno to reject.
749 */
750 s32 (*sub_attach)(struct scx_sub_attach_args *args);
751
752 /**
753 * @sub_detach: Detach a sub-scheduler
754 * @args: argument container, see the struct definition
755 */
756 void (*sub_detach)(struct scx_sub_detach_args *args);
757
758 /*
759 * All online ops must come before ops.cpu_online().
760 */
761
762 /**
763 * @cpu_online: A CPU became online
764 * @cpu: CPU which just came up
765 *
766 * @cpu just came online. @cpu will not call ops.enqueue() or
767 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
768 */
769 void (*cpu_online)(s32 cpu);
770
771 /**
772 * @cpu_offline: A CPU is going offline
773 * @cpu: CPU which is going offline
774 *
775 * @cpu is going offline. @cpu will not call ops.enqueue() or
776 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
777 */
778 void (*cpu_offline)(s32 cpu);
779
780 /*
781 * All CPU hotplug ops must come before ops.init().
782 */
783
784 /**
785 * @init: Initialize the BPF scheduler
786 */
787 s32 (*init)(void);
788
789 /**
790 * @exit: Clean up after the BPF scheduler
791 * @info: Exit info
792 *
793 * ops.exit() is also called on ops.init() failure, which is a bit
794 * unusual. This is to allow rich reporting through @info on how
795 * ops.init() failed.
796 */
797 void (*exit)(struct scx_exit_info *info);
798
799 /*
800 * Data fields must comes after all ops fields.
801 */
802
803 /**
804 * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch
805 */
806 u32 dispatch_max_batch;
807
808 /**
809 * @flags: %SCX_OPS_* flags
810 */
811 u64 flags;
812
813 /**
814 * @timeout_ms: The maximum amount of time, in milliseconds, that a
815 * runnable task should be able to wait before being scheduled. The
816 * maximum timeout may not exceed the default timeout of 30 seconds.
817 *
818 * Defaults to the maximum allowed timeout value of 30 seconds.
819 */
820 u32 timeout_ms;
821
822 /**
823 * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default
824 * value of 32768 is used.
825 */
826 u32 exit_dump_len;
827
828 /**
829 * @hotplug_seq: A sequence number that may be set by the scheduler to
830 * detect when a hotplug event has occurred during the loading process.
831 * If 0, no detection occurs. Otherwise, the scheduler will fail to
832 * load if the sequence number does not match @scx_hotplug_seq on the
833 * enable path.
834 */
835 u64 hotplug_seq;
836
837 /**
838 * @cgroup_id: When >1, attach the scheduler as a sub-scheduler on the
839 * specified cgroup.
840 */
841 u64 sub_cgroup_id;
842
843 /**
844 * @name: BPF scheduler's name
845 *
846 * Must be a non-zero valid BPF object name including only isalnum(),
847 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
848 * BPF scheduler is enabled.
849 */
850 char name[SCX_OPS_NAME_LEN];
851
852 /* internal use only, must be NULL */
853 void __rcu *priv;
854 };
855
856 enum scx_opi {
857 SCX_OPI_BEGIN = 0,
858 SCX_OPI_NORMAL_BEGIN = 0,
859 SCX_OPI_NORMAL_END = SCX_OP_IDX(cpu_online),
860 SCX_OPI_CPU_HOTPLUG_BEGIN = SCX_OP_IDX(cpu_online),
861 SCX_OPI_CPU_HOTPLUG_END = SCX_OP_IDX(init),
862 SCX_OPI_END = SCX_OP_IDX(init),
863 };
864
865 /*
866 * Collection of event counters. Event types are placed in descending order.
867 */
868 struct scx_event_stats {
869 /*
870 * If ops.select_cpu() returns a CPU which can't be used by the task,
871 * the core scheduler code silently picks a fallback CPU.
872 */
873 s64 SCX_EV_SELECT_CPU_FALLBACK;
874
875 /*
876 * When dispatching to a local DSQ, the CPU may have gone offline in
877 * the meantime. In this case, the task is bounced to the global DSQ.
878 */
879 s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
880
881 /*
882 * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
883 * continued to run because there were no other tasks on the CPU.
884 */
885 s64 SCX_EV_DISPATCH_KEEP_LAST;
886
887 /*
888 * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
889 * is dispatched to a local DSQ when exiting.
890 */
891 s64 SCX_EV_ENQ_SKIP_EXITING;
892
893 /*
894 * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
895 * migration disabled task skips ops.enqueue() and is dispatched to its
896 * local DSQ.
897 */
898 s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
899
900 /*
901 * The number of times a task, enqueued on a local DSQ with
902 * SCX_ENQ_IMMED, was re-enqueued because the CPU was not available for
903 * immediate execution.
904 */
905 s64 SCX_EV_REENQ_IMMED;
906
907 /*
908 * The number of times a reenq of local DSQ caused another reenq of
909 * local DSQ. This can happen when %SCX_ENQ_IMMED races against a higher
910 * priority class task even if the BPF scheduler always satisfies the
911 * prerequisites for %SCX_ENQ_IMMED at the time of enqueue. However,
912 * that scenario is very unlikely and this count going up regularly
913 * indicates that the BPF scheduler is handling %SCX_ENQ_REENQ
914 * incorrectly causing recursive reenqueues.
915 */
916 s64 SCX_EV_REENQ_LOCAL_REPEAT;
917
918 /*
919 * Total number of times a task's time slice was refilled with the
920 * default value (SCX_SLICE_DFL).
921 */
922 s64 SCX_EV_REFILL_SLICE_DFL;
923
924 /*
925 * The total duration of bypass modes in nanoseconds.
926 */
927 s64 SCX_EV_BYPASS_DURATION;
928
929 /*
930 * The number of tasks dispatched in the bypassing mode.
931 */
932 s64 SCX_EV_BYPASS_DISPATCH;
933
934 /*
935 * The number of times the bypassing mode has been activated.
936 */
937 s64 SCX_EV_BYPASS_ACTIVATE;
938
939 /*
940 * The number of times the scheduler attempted to insert a task that it
941 * doesn't own into a DSQ. Such attempts are ignored.
942 *
943 * As BPF schedulers are allowed to ignore dequeues, it's difficult to
944 * tell whether such an attempt is from a scheduler malfunction or an
945 * ignored dequeue around sub-sched enabling. If this count keeps going
946 * up regardless of sub-sched enabling, it likely indicates a bug in the
947 * scheduler.
948 */
949 s64 SCX_EV_INSERT_NOT_OWNED;
950
951 /*
952 * The number of times tasks from bypassing descendants are scheduled
953 * from sub_bypass_dsq's.
954 */
955 s64 SCX_EV_SUB_BYPASS_DISPATCH;
956 };
957
958 struct scx_sched;
959
960 enum scx_sched_pcpu_flags {
961 SCX_SCHED_PCPU_BYPASSING = 1LLU << 0,
962 };
963
964 /* dispatch buf */
965 struct scx_dsp_buf_ent {
966 struct task_struct *task;
967 unsigned long qseq;
968 u64 dsq_id;
969 u64 enq_flags;
970 };
971
972 struct scx_dsp_ctx {
973 struct rq *rq;
974 u32 cursor;
975 u32 nr_tasks;
976 struct scx_dsp_buf_ent buf[];
977 };
978
979 struct scx_deferred_reenq_local {
980 struct list_head node;
981 u64 flags;
982 u64 seq;
983 u32 cnt;
984 };
985
986 struct scx_sched_pcpu {
987 struct scx_sched *sch;
988 u64 flags; /* protected by rq lock */
989
990 /*
991 * The event counters are in a per-CPU variable to minimize the
992 * accounting overhead. A system-wide view on the event counter is
993 * constructed when requested by scx_bpf_events().
994 */
995 struct scx_event_stats event_stats;
996
997 struct scx_deferred_reenq_local deferred_reenq_local;
998 struct scx_dispatch_q bypass_dsq;
999 #ifdef CONFIG_EXT_SUB_SCHED
1000 u32 bypass_host_seq;
1001 #endif
1002
1003 /* must be the last entry - contains flex array */
1004 struct scx_dsp_ctx dsp_ctx;
1005 };
1006
1007 struct scx_sched_pnode {
1008 struct scx_dispatch_q global_dsq;
1009 };
1010
1011 struct scx_sched {
1012 struct sched_ext_ops ops;
1013 DECLARE_BITMAP(has_op, SCX_OPI_END);
1014
1015 /*
1016 * Dispatch queues.
1017 *
1018 * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability.
1019 * This is to avoid live-locking in bypass mode where all tasks are
1020 * dispatched to %SCX_DSQ_GLOBAL and all CPUs consume from it. If
1021 * per-node split isn't sufficient, it can be further split.
1022 */
1023 struct rhashtable dsq_hash;
1024 struct scx_sched_pnode **pnode;
1025 struct scx_sched_pcpu __percpu *pcpu;
1026
1027 u64 slice_dfl;
1028 u64 bypass_timestamp;
1029 s32 bypass_depth;
1030
1031 /* bypass dispatch path enable state, see bypass_dsp_enabled() */
1032 unsigned long bypass_dsp_claim;
1033 atomic_t bypass_dsp_enable_depth;
1034
1035 bool aborting;
1036 bool dump_disabled; /* protected by scx_dump_lock */
1037 u32 dsp_max_batch;
1038 s32 level;
1039
1040 /*
1041 * Updates to the following warned bitfields can race causing RMW issues
1042 * but it doesn't really matter.
1043 */
1044 bool warned_zero_slice:1;
1045 bool warned_deprecated_rq:1;
1046 bool warned_unassoc_progs:1;
1047
1048 struct list_head all;
1049
1050 #ifdef CONFIG_EXT_SUB_SCHED
1051 struct rhash_head hash_node;
1052
1053 struct list_head children;
1054 struct list_head sibling;
1055 struct cgroup *cgrp;
1056 char *cgrp_path;
1057 struct kset *sub_kset;
1058
1059 bool sub_attached;
1060 #endif /* CONFIG_EXT_SUB_SCHED */
1061
1062 /*
1063 * The maximum amount of time in jiffies that a task may be runnable
1064 * without being scheduled on a CPU. If this timeout is exceeded, it
1065 * will trigger scx_error().
1066 */
1067 unsigned long watchdog_timeout;
1068
1069 atomic_t exit_kind;
1070 struct scx_exit_info *exit_info;
1071
1072 struct kobject kobj;
1073
1074 struct kthread_worker *helper;
1075 struct irq_work disable_irq_work;
1076 struct kthread_work disable_work;
1077 struct timer_list bypass_lb_timer;
1078 struct rcu_work rcu_work;
1079
1080 /* all ancestors including self */
1081 struct scx_sched *ancestors[];
1082 };
1083
1084 enum scx_wake_flags {
1085 /* expose select WF_* flags as enums */
1086 SCX_WAKE_FORK = WF_FORK,
1087 SCX_WAKE_TTWU = WF_TTWU,
1088 SCX_WAKE_SYNC = WF_SYNC,
1089 };
1090
1091 enum scx_enq_flags {
1092 /* expose select ENQUEUE_* flags as enums */
1093 SCX_ENQ_WAKEUP = ENQUEUE_WAKEUP,
1094 SCX_ENQ_HEAD = ENQUEUE_HEAD,
1095 SCX_ENQ_CPU_SELECTED = ENQUEUE_RQ_SELECTED,
1096
1097 /* high 32bits are SCX specific */
1098
1099 /*
1100 * Set the following to trigger preemption when calling
1101 * scx_bpf_dsq_insert() with a local dsq as the target. The slice of the
1102 * current task is cleared to zero and the CPU is kicked into the
1103 * scheduling path. Implies %SCX_ENQ_HEAD.
1104 */
1105 SCX_ENQ_PREEMPT = 1LLU << 32,
1106
1107 /*
1108 * Only allowed on local DSQs. Guarantees that the task either gets
1109 * on the CPU immediately and stays on it, or gets reenqueued back
1110 * to the BPF scheduler. It will never linger on a local DSQ or be
1111 * silently put back after preemption.
1112 *
1113 * The protection persists until the next fresh enqueue - it
1114 * survives SAVE/RESTORE cycles, slice extensions and preemption.
1115 * If the task can't stay on the CPU for any reason, it gets
1116 * reenqueued back to the BPF scheduler.
1117 *
1118 * Exiting and migration-disabled tasks bypass ops.enqueue() and
1119 * are placed directly on a local DSQ without IMMED protection
1120 * unless %SCX_OPS_ENQ_EXITING and %SCX_OPS_ENQ_MIGRATION_DISABLED
1121 * are set respectively.
1122 */
1123 SCX_ENQ_IMMED = 1LLU << 33,
1124
1125 /*
1126 * The task being enqueued was previously enqueued on a DSQ, but was
1127 * removed and is being re-enqueued. See SCX_TASK_REENQ_* flags to find
1128 * out why a given task is being reenqueued.
1129 */
1130 SCX_ENQ_REENQ = 1LLU << 40,
1131
1132 /*
1133 * The task being enqueued is the only task available for the cpu. By
1134 * default, ext core keeps executing such tasks but when
1135 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
1136 * %SCX_ENQ_LAST flag set.
1137 *
1138 * The BPF scheduler is responsible for triggering a follow-up
1139 * scheduling event. Otherwise, Execution may stall.
1140 */
1141 SCX_ENQ_LAST = 1LLU << 41,
1142
1143 /* high 8 bits are internal */
1144 __SCX_ENQ_INTERNAL_MASK = 0xffLLU << 56,
1145
1146 SCX_ENQ_CLEAR_OPSS = 1LLU << 56,
1147 SCX_ENQ_DSQ_PRIQ = 1LLU << 57,
1148 SCX_ENQ_NESTED = 1LLU << 58,
1149 SCX_ENQ_GDSQ_FALLBACK = 1LLU << 59, /* fell back to global DSQ */
1150 };
1151
1152 enum scx_deq_flags {
1153 /* expose select DEQUEUE_* flags as enums */
1154 SCX_DEQ_SLEEP = DEQUEUE_SLEEP,
1155
1156 /* high 32bits are SCX specific */
1157
1158 /*
1159 * The generic core-sched layer decided to execute the task even though
1160 * it hasn't been dispatched yet. Dequeue from the BPF side.
1161 */
1162 SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32,
1163
1164 /*
1165 * The task is being dequeued due to a property change (e.g.,
1166 * sched_setaffinity(), sched_setscheduler(), set_user_nice(),
1167 * etc.).
1168 */
1169 SCX_DEQ_SCHED_CHANGE = 1LLU << 33,
1170 };
1171
1172 enum scx_reenq_flags {
1173 /* low 16bits determine which tasks should be reenqueued */
1174 SCX_REENQ_ANY = 1LLU << 0, /* all tasks */
1175
1176 __SCX_REENQ_FILTER_MASK = 0xffffLLU,
1177
1178 __SCX_REENQ_USER_MASK = SCX_REENQ_ANY,
1179
1180 /* bits 32-35 used by task_should_reenq() */
1181 SCX_REENQ_TSR_RQ_OPEN = 1LLU << 32,
1182 SCX_REENQ_TSR_NOT_FIRST = 1LLU << 33,
1183
1184 __SCX_REENQ_TSR_MASK = 0xfLLU << 32,
1185 };
1186
1187 enum scx_pick_idle_cpu_flags {
1188 SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
1189 SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
1190 };
1191
1192 enum scx_kick_flags {
1193 /*
1194 * Kick the target CPU if idle. Guarantees that the target CPU goes
1195 * through at least one full scheduling cycle before going idle. If the
1196 * target CPU can be determined to be currently not idle and going to go
1197 * through a scheduling cycle before going idle, noop.
1198 */
1199 SCX_KICK_IDLE = 1LLU << 0,
1200
1201 /*
1202 * Preempt the current task and execute the dispatch path. If the
1203 * current task of the target CPU is an SCX task, its ->scx.slice is
1204 * cleared to zero before the scheduling path is invoked so that the
1205 * task expires and the dispatch path is invoked.
1206 */
1207 SCX_KICK_PREEMPT = 1LLU << 1,
1208
1209 /*
1210 * The scx_bpf_kick_cpu() call will return after the current SCX task of
1211 * the target CPU switches out. This can be used to implement e.g. core
1212 * scheduling. This has no effect if the current task on the target CPU
1213 * is not on SCX.
1214 */
1215 SCX_KICK_WAIT = 1LLU << 2,
1216 };
1217
1218 enum scx_tg_flags {
1219 SCX_TG_ONLINE = 1U << 0,
1220 SCX_TG_INITED = 1U << 1,
1221 };
1222
1223 enum scx_enable_state {
1224 SCX_ENABLING,
1225 SCX_ENABLED,
1226 SCX_DISABLING,
1227 SCX_DISABLED,
1228 };
1229
1230 static const char *scx_enable_state_str[] = {
1231 [SCX_ENABLING] = "enabling",
1232 [SCX_ENABLED] = "enabled",
1233 [SCX_DISABLING] = "disabling",
1234 [SCX_DISABLED] = "disabled",
1235 };
1236
1237 /*
1238 * Task Ownership State Machine (sched_ext_entity->ops_state)
1239 *
1240 * The sched_ext core uses this state machine to track task ownership
1241 * between the SCX core and the BPF scheduler. This allows the BPF
1242 * scheduler to dispatch tasks without strict ordering requirements, while
1243 * the SCX core safely rejects invalid dispatches.
1244 *
1245 * State Transitions
1246 *
1247 * .------------> NONE (owned by SCX core)
1248 * | | ^
1249 * | enqueue | | direct dispatch
1250 * | v |
1251 * | QUEUEING -------'
1252 * | |
1253 * | enqueue |
1254 * | completes |
1255 * | v
1256 * | QUEUED (owned by BPF scheduler)
1257 * | |
1258 * | dispatch |
1259 * | |
1260 * | v
1261 * | DISPATCHING
1262 * | |
1263 * | dispatch |
1264 * | completes |
1265 * `---------------'
1266 *
1267 * State Descriptions
1268 *
1269 * - %SCX_OPSS_NONE:
1270 * Task is owned by the SCX core. It's either on a run queue, running,
1271 * or being manipulated by the core scheduler. The BPF scheduler has no
1272 * claim on this task.
1273 *
1274 * - %SCX_OPSS_QUEUEING:
1275 * Transitional state while transferring a task from the SCX core to
1276 * the BPF scheduler. The task's rq lock is held during this state.
1277 * Since QUEUEING is both entered and exited under the rq lock, dequeue
1278 * can never observe this state (it would be a BUG). When finishing a
1279 * dispatch, if the task is still in %SCX_OPSS_QUEUEING the completion
1280 * path busy-waits for it to leave this state (via wait_ops_state())
1281 * before retrying.
1282 *
1283 * - %SCX_OPSS_QUEUED:
1284 * Task is owned by the BPF scheduler. It's on a DSQ (dispatch queue)
1285 * and the BPF scheduler is responsible for dispatching it. A QSEQ
1286 * (queue sequence number) is embedded in this state to detect
1287 * dispatch/dequeue races: if a task is dequeued and re-enqueued, the
1288 * QSEQ changes and any in-flight dispatch operations targeting the old
1289 * QSEQ are safely ignored.
1290 *
1291 * - %SCX_OPSS_DISPATCHING:
1292 * Transitional state while transferring a task from the BPF scheduler
1293 * back to the SCX core. This state indicates the BPF scheduler has
1294 * selected the task for execution. When dequeue needs to take the task
1295 * off a DSQ and it is still in %SCX_OPSS_DISPATCHING, the dequeue path
1296 * busy-waits for it to leave this state (via wait_ops_state()) before
1297 * proceeding. Exits to %SCX_OPSS_NONE when dispatch completes.
1298 *
1299 * Memory Ordering
1300 *
1301 * Transitions out of %SCX_OPSS_QUEUEING and %SCX_OPSS_DISPATCHING into
1302 * %SCX_OPSS_NONE or %SCX_OPSS_QUEUED must use atomic_long_set_release()
1303 * and waiters must use atomic_long_read_acquire(). This ensures proper
1304 * synchronization between concurrent operations.
1305 *
1306 * Cross-CPU Task Migration
1307 *
1308 * When moving a task in the %SCX_OPSS_DISPATCHING state, we can't simply
1309 * grab the target CPU's rq lock because a concurrent dequeue might be
1310 * waiting on %SCX_OPSS_DISPATCHING while holding the source rq lock
1311 * (deadlock).
1312 *
1313 * The sched_ext core uses a "lock dancing" protocol coordinated by
1314 * p->scx.holding_cpu. When moving a task to a different rq:
1315 *
1316 * 1. Verify task can be moved (CPU affinity, migration_disabled, etc.)
1317 * 2. Set p->scx.holding_cpu to the current CPU
1318 * 3. Set task state to %SCX_OPSS_NONE; dequeue waits while DISPATCHING
1319 * is set, so clearing DISPATCHING first prevents the circular wait
1320 * (safe to lock the rq we need)
1321 * 4. Unlock the current CPU's rq
1322 * 5. Lock src_rq (where the task currently lives)
1323 * 6. Verify p->scx.holding_cpu == current CPU, if not, dequeue won the
1324 * race (dequeue clears holding_cpu to -1 when it takes the task), in
1325 * this case migration is aborted
1326 * 7. If src_rq == dst_rq: clear holding_cpu and enqueue directly
1327 * into dst_rq's local DSQ (no lock swap needed)
1328 * 8. Otherwise: call move_remote_task_to_local_dsq(), which releases
1329 * src_rq, locks dst_rq, and performs the deactivate/activate
1330 * migration cycle (dst_rq is held on return)
1331 * 9. Unlock dst_rq and re-lock the current CPU's rq to restore
1332 * the lock state expected by the caller
1333 *
1334 * If any verification fails, abort the migration.
1335 *
1336 * This state tracking allows the BPF scheduler to try to dispatch any task
1337 * at any time regardless of its state. The SCX core can safely
1338 * reject/ignore invalid dispatches, simplifying the BPF scheduler
1339 * implementation.
1340 */
1341 enum scx_ops_state {
1342 SCX_OPSS_NONE, /* owned by the SCX core */
1343 SCX_OPSS_QUEUEING, /* in transit to the BPF scheduler */
1344 SCX_OPSS_QUEUED, /* owned by the BPF scheduler */
1345 SCX_OPSS_DISPATCHING, /* in transit back to the SCX core */
1346
1347 /*
1348 * QSEQ brands each QUEUED instance so that, when dispatch races
1349 * dequeue/requeue, the dispatcher can tell whether it still has a claim
1350 * on the task being dispatched.
1351 *
1352 * As some 32bit archs can't do 64bit store_release/load_acquire,
1353 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
1354 * 32bit machines. The dispatch race window QSEQ protects is very narrow
1355 * and runs with IRQ disabled. 30 bits should be sufficient.
1356 */
1357 SCX_OPSS_QSEQ_SHIFT = 2,
1358 };
1359
1360 /* Use macros to ensure that the type is unsigned long for the masks */
1361 #define SCX_OPSS_STATE_MASK ((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
1362 #define SCX_OPSS_QSEQ_MASK (~SCX_OPSS_STATE_MASK)
1363
1364 extern struct scx_sched __rcu *scx_root;
1365 DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
1366
1367 int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id);
1368
1369 /*
1370 * Return the rq currently locked from an scx callback, or NULL if no rq is
1371 * locked.
1372 */
scx_locked_rq(void)1373 static inline struct rq *scx_locked_rq(void)
1374 {
1375 return __this_cpu_read(scx_locked_rq_state);
1376 }
1377
scx_bypassing(struct scx_sched * sch,s32 cpu)1378 static inline bool scx_bypassing(struct scx_sched *sch, s32 cpu)
1379 {
1380 return unlikely(per_cpu_ptr(sch->pcpu, cpu)->flags &
1381 SCX_SCHED_PCPU_BYPASSING);
1382 }
1383
1384 #ifdef CONFIG_EXT_SUB_SCHED
1385 /**
1386 * scx_task_sched - Find scx_sched scheduling a task
1387 * @p: task of interest
1388 *
1389 * Return @p's scheduler instance. Must be called with @p's pi_lock or rq lock
1390 * held.
1391 */
scx_task_sched(const struct task_struct * p)1392 static inline struct scx_sched *scx_task_sched(const struct task_struct *p)
1393 {
1394 return rcu_dereference_protected(p->scx.sched,
1395 lockdep_is_held(&p->pi_lock) ||
1396 lockdep_is_held(__rq_lockp(task_rq(p))));
1397 }
1398
1399 /**
1400 * scx_task_sched_rcu - Find scx_sched scheduling a task
1401 * @p: task of interest
1402 *
1403 * Return @p's scheduler instance. The returned scx_sched is RCU protected.
1404 */
scx_task_sched_rcu(const struct task_struct * p)1405 static inline struct scx_sched *scx_task_sched_rcu(const struct task_struct *p)
1406 {
1407 return rcu_dereference_all(p->scx.sched);
1408 }
1409
1410 /**
1411 * scx_task_on_sched - Is a task on the specified sched?
1412 * @sch: sched to test against
1413 * @p: task of interest
1414 *
1415 * Returns %true if @p is on @sch, %false otherwise.
1416 */
scx_task_on_sched(struct scx_sched * sch,const struct task_struct * p)1417 static inline bool scx_task_on_sched(struct scx_sched *sch,
1418 const struct task_struct *p)
1419 {
1420 return rcu_access_pointer(p->scx.sched) == sch;
1421 }
1422
1423 /**
1424 * scx_prog_sched - Find scx_sched associated with a BPF prog
1425 * @aux: aux passed in from BPF to a kfunc
1426 *
1427 * To be called from kfuncs. Return the scheduler instance associated with the
1428 * BPF program given the implicit kfunc argument aux. The returned scx_sched is
1429 * RCU protected.
1430 */
scx_prog_sched(const struct bpf_prog_aux * aux)1431 static inline struct scx_sched *scx_prog_sched(const struct bpf_prog_aux *aux)
1432 {
1433 struct sched_ext_ops *ops;
1434 struct scx_sched *root;
1435
1436 ops = bpf_prog_get_assoc_struct_ops(aux);
1437 if (likely(ops))
1438 return rcu_dereference_all(ops->priv);
1439
1440 root = rcu_dereference_all(scx_root);
1441 if (root) {
1442 /*
1443 * COMPAT-v6.19: Schedulers built before sub-sched support was
1444 * introduced may have unassociated non-struct_ops programs.
1445 */
1446 if (!root->ops.sub_attach)
1447 return root;
1448
1449 if (!root->warned_unassoc_progs) {
1450 printk_deferred(KERN_WARNING "sched_ext: Unassociated program %s (id %d)\n",
1451 aux->name, aux->id);
1452 root->warned_unassoc_progs = true;
1453 }
1454 }
1455
1456 return NULL;
1457 }
1458 #else /* CONFIG_EXT_SUB_SCHED */
scx_task_sched(const struct task_struct * p)1459 static inline struct scx_sched *scx_task_sched(const struct task_struct *p)
1460 {
1461 return rcu_dereference_protected(scx_root,
1462 lockdep_is_held(&p->pi_lock) ||
1463 lockdep_is_held(__rq_lockp(task_rq(p))));
1464 }
1465
scx_task_sched_rcu(const struct task_struct * p)1466 static inline struct scx_sched *scx_task_sched_rcu(const struct task_struct *p)
1467 {
1468 return rcu_dereference_all(scx_root);
1469 }
1470
scx_task_on_sched(struct scx_sched * sch,const struct task_struct * p)1471 static inline bool scx_task_on_sched(struct scx_sched *sch,
1472 const struct task_struct *p)
1473 {
1474 return true;
1475 }
1476
scx_prog_sched(const struct bpf_prog_aux * aux)1477 static struct scx_sched *scx_prog_sched(const struct bpf_prog_aux *aux)
1478 {
1479 return rcu_dereference_all(scx_root);
1480 }
1481 #endif /* CONFIG_EXT_SUB_SCHED */
1482