Lines Matching refs:sch
1174 static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
1177 static __printf(4, 5) void scx_exit(struct scx_sched *sch,
1184 scx_vexit(sch, kind, exit_code, fmt, args);
1191 struct scx_sched *sch;
1195 sch = rcu_dereference(scx_root);
1196 if (sch) {
1198 scx_vexit(sch, kind, exit_code, fmt, args);
1204 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
1207 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op)
1237 struct scx_sched *sch = scx_root;
1239 return sch->global_dsqs[cpu_to_node(task_cpu(p))];
1242 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
1244 return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params);
1291 #define SCX_CALL_OP(sch, mask, op, rq, args...) \
1297 (sch)->ops.op(args); \
1300 (sch)->ops.op(args); \
1306 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
1308 __typeof__((sch)->ops.op(args)) __ret; \
1314 __ret = (sch)->ops.op(args); \
1317 __ret = (sch)->ops.op(args); \
1335 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
1339 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
1343 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
1345 __typeof__((sch)->ops.op(task, ##args)) __ret; \
1348 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
1353 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \
1355 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
1359 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
1660 * @sch: scx_sched to account events for
1666 #define scx_add_event(sch, name, cnt) do { \
1667 this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1673 * @sch: scx_sched to account events for
1679 #define __scx_add_event(sch, name, cnt) do { \
1680 __this_cpu_add((sch)->event_stats_cpu->name, (cnt)); \
1705 static void scx_read_events(struct scx_sched *sch,
1750 * @sch: scx_sched to abort on error
1758 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
1763 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1773 * The same as ops_cpu_valid() but @sch is implicit.
1787 * @sch: scx_sched to error out on error
1798 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
1803 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
1951 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1963 scx_error(sch, "attempting to dispatch to a destroyed dsq");
1980 scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1994 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
2016 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
2132 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
2144 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
2153 dsq = find_user_dsq(sch, dsq_id);
2156 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
2194 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
2199 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2240 dispatch_enqueue(sch, dsq, p,
2259 struct scx_sched *sch = scx_root;
2278 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2286 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
2288 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
2293 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
2295 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
2299 if (unlikely(!SCX_HAS_OP(sch, enqueue)))
2312 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
2326 direct_dispatch(sch, p, enq_flags);
2338 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
2344 dispatch_enqueue(sch, find_global_dsq(p), p, enq_flags);
2377 struct scx_sched *sch = scx_root;
2407 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
2408 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
2419 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
2424 struct scx_sched *sch = scx_root;
2443 if (SCX_HAS_OP(sch, dequeue))
2444 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
2473 struct scx_sched *sch = scx_root;
2494 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
2496 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
2499 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
2500 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
2517 struct scx_sched *sch = scx_root;
2520 if (SCX_HAS_OP(sch, yield))
2521 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
2528 struct scx_sched *sch = scx_root;
2531 if (SCX_HAS_OP(sch, yield))
2532 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
2612 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
2634 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2647 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2728 * @sch: scx_sched being operated on
2742 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
2756 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2790 dispatch_enqueue(sch, dst_dsq, p, enq_flags);
2825 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
2858 if (task_can_run_on_remote_rq(sch, p, rq, false)) {
2869 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
2873 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
2878 * @sch: scx_sched being operated on
2891 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2906 dispatch_enqueue(sch, dst_dsq, p,
2912 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2913 dispatch_enqueue(sch, find_global_dsq(p), p,
2951 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
2991 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
3045 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
3048 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
3050 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
3053 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
3061 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
3071 struct scx_sched *sch = scx_root;
3081 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
3089 if (SCX_HAS_OP(sch, cpu_acquire))
3090 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
3118 if (consume_global_dsq(sch, rq))
3121 if (unlikely(!SCX_HAS_OP(sch, dispatch)) ||
3137 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
3140 flush_dispatch_buf(sch, rq);
3148 if (consume_global_dsq(sch, rq))
3172 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
3174 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
3234 struct scx_sched *sch = scx_root;
3239 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
3241 dispatch_to_local_dsq(sch, rq, dsq, p,
3248 struct scx_sched *sch = scx_root;
3262 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
3263 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
3304 struct scx_sched *sch = scx_root;
3313 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
3335 if (SCX_HAS_OP(sch, cpu_release)) {
3341 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
3351 struct scx_sched *sch = scx_root;
3355 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3356 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
3368 dispatch_enqueue(sch, &rq->scx.local_dsq, p,
3380 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
3455 struct scx_sched *sch = scx_root;
3457 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
3460 sch->warned_zero_slice = true;
3491 struct scx_sched *sch = scx_root;
3498 if (SCX_HAS_OP(sch, core_sched_before) &&
3500 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
3511 struct scx_sched *sch = scx_root;
3528 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
3536 cpu = SCX_CALL_OP_TASK_RET(sch,
3542 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
3559 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
3572 struct scx_sched *sch = scx_root;
3584 if (SCX_HAS_OP(sch, set_cpumask))
3585 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
3591 struct scx_sched *sch = scx_root;
3601 if (unlikely(!sch))
3605 scx_idle_update_selcpu_topology(&sch->ops);
3607 if (online && SCX_HAS_OP(sch, cpu_online))
3608 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
3609 else if (!online && SCX_HAS_OP(sch, cpu_offline))
3610 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
3612 scx_exit(sch, SCX_EXIT_UNREG_KERN,
3641 struct scx_sched *sch;
3647 sch = rcu_dereference_bh(scx_root);
3648 if (unlikely(!sch))
3658 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3688 struct scx_sched *sch;
3694 sch = rcu_dereference_bh(scx_root);
3695 if (unlikely(!sch))
3703 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3713 struct scx_sched *sch = scx_root;
3724 } else if (SCX_HAS_OP(sch, tick)) {
3725 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
3792 struct scx_sched *sch = scx_root;
3797 if (SCX_HAS_OP(sch, init_task)) {
3803 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
3806 ret = ops_sanitize_err(sch, "init_task", ret);
3834 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3845 struct scx_sched *sch = scx_root;
3862 if (SCX_HAS_OP(sch, enable))
3863 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
3866 if (SCX_HAS_OP(sch, set_weight))
3867 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3873 struct scx_sched *sch = scx_root;
3879 if (SCX_HAS_OP(sch, disable))
3880 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
3886 struct scx_sched *sch = scx_root;
3909 if (SCX_HAS_OP(sch, exit_task))
3910 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
4016 struct scx_sched *sch = scx_root;
4021 if (SCX_HAS_OP(sch, set_weight))
4022 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
4032 struct scx_sched *sch = scx_root;
4040 if (SCX_HAS_OP(sch, set_cpumask))
4041 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
4099 struct scx_sched *sch = scx_root;
4107 if (SCX_HAS_OP(sch, cgroup_init)) {
4114 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
4117 ret = ops_sanitize_err(sch, "cgroup_init", ret);
4131 struct scx_sched *sch = scx_root;
4137 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
4139 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4148 struct scx_sched *sch = scx_root;
4173 if (SCX_HAS_OP(sch, cgroup_prep_move)) {
4174 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
4188 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4190 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4196 return ops_sanitize_err(sch, "cgroup_prep_move", ret);
4201 struct scx_sched *sch = scx_root;
4210 if (SCX_HAS_OP(sch, cgroup_move) &&
4212 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
4225 struct scx_sched *sch = scx_root;
4233 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4235 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
4245 struct scx_sched *sch = scx_root;
4249 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
4251 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
4267 struct scx_sched *sch = scx_root;
4271 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
4275 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL,
4370 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
4377 dsq = find_user_dsq(sch, dsq_id);
4384 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4389 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
4410 static void scx_cgroup_exit(struct scx_sched *sch)
4430 if (!sch->ops.cgroup_exit)
4437 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
4446 static int scx_cgroup_init(struct scx_sched *sch)
4471 if (!sch->ops.cgroup_init) {
4480 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
4484 scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
4501 static void scx_cgroup_exit(struct scx_sched *sch) {}
4502 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
4569 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
4574 kthread_stop(sch->helper->task);
4575 free_percpu(sch->event_stats_cpu);
4578 kfree(sch->global_dsqs[node]);
4579 kfree(sch->global_dsqs);
4581 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
4586 destroy_dsq(sch, dsq->id);
4592 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4593 free_exit_info(sch->exit_info);
4594 kfree(sch);
4599 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4601 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
4602 queue_rcu_work(system_unbound_wq, &sch->rcu_work);
4619 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4623 scx_read_events(sch, &events);
4689 struct scx_sched *sch;
4693 sch = rcu_dereference(scx_root);
4694 if (unlikely(!sch)) {
4708 scx_error(sch, "RCU CPU stall detected!");
4726 struct scx_sched *sch;
4730 sch = rcu_dereference(scx_root);
4731 if (unlikely(!sch))
4755 scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s);
4801 struct scx_sched *sch;
4806 sch = rcu_dereference_bh(scx_root);
4814 if (sch)
4815 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
4821 if (sch)
4822 scx_add_event(sch, SCX_EV_BYPASS_DURATION,
4942 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
4943 struct scx_exit_info *ei = sch->exit_info;
4948 kind = atomic_read(&sch->exit_kind);
4953 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
4968 sch->exit_info->msg);
4990 scx_cgroup_exit(sch);
5035 bitmap_zero(sch->has_op, SCX_OPI_END);
5041 sch->ops.name, ei->reason);
5044 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
5050 sch->ops.name, ei->reason);
5053 if (sch->ops.exit)
5054 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
5071 kobject_del(&sch->kobj);
5087 struct scx_sched *sch;
5093 sch = rcu_dereference(scx_root);
5094 if (sch) {
5095 atomic_try_cmpxchg(&sch->exit_kind, &none, kind);
5096 kthread_queue_work(sch->helper, &sch->disable_work);
5216 struct scx_sched *sch = scx_root;
5239 if (SCX_HAS_OP(sch, dump_task)) {
5241 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
5258 struct scx_sched *sch = scx_root;
5287 if (SCX_HAS_OP(sch, dump)) {
5289 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
5310 if (idle && !SCX_HAS_OP(sch, dump_cpu))
5344 if (SCX_HAS_OP(sch, dump_cpu)) {
5346 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
5381 scx_read_events(sch, &events);
5401 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
5402 struct scx_exit_info *ei = sch->exit_info;
5405 scx_dump_state(ei, sch->ops.exit_dump_len);
5407 kthread_queue_work(sch->helper, &sch->disable_work);
5410 static void scx_vexit(struct scx_sched *sch,
5414 struct scx_exit_info *ei = sch->exit_info;
5417 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
5434 irq_work_queue(&sch->error_irq_work);
5439 struct scx_sched *sch;
5442 sch = kzalloc(sizeof(*sch), GFP_KERNEL);
5443 if (!sch)
5446 sch->exit_info = alloc_exit_info(ops->exit_dump_len);
5447 if (!sch->exit_info) {
5452 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
5456 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
5458 if (!sch->global_dsqs) {
5473 sch->global_dsqs[node] = dsq;
5476 sch->event_stats_cpu = alloc_percpu(struct scx_event_stats);
5477 if (!sch->event_stats_cpu)
5480 sch->helper = kthread_run_worker(0, "sched_ext_helper");
5481 if (!sch->helper)
5483 sched_set_fifo(sch->helper->task);
5485 atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
5486 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
5487 kthread_init_work(&sch->disable_work, scx_disable_workfn);
5488 sch->ops = *ops;
5489 ops->priv = sch;
5491 sch->kobj.kset = scx_kset;
5492 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
5496 return sch;
5499 kthread_stop(sch->helper->task);
5501 free_percpu(sch->event_stats_cpu);
5504 kfree(sch->global_dsqs[node]);
5505 kfree(sch->global_dsqs);
5507 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
5509 free_exit_info(sch->exit_info);
5511 kfree(sch);
5515 static void check_hotplug_seq(struct scx_sched *sch,
5528 scx_exit(sch, SCX_EXIT_UNREG_KERN,
5536 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
5543 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5553 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
5565 struct scx_sched *sch;
5584 sch = scx_alloc_and_add_sched(ops);
5585 if (IS_ERR(sch)) {
5586 ret = PTR_ERR(sch);
5612 rcu_assign_pointer(scx_root, sch);
5616 if (sch->ops.init) {
5617 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
5619 ret = ops_sanitize_err(sch, "init", ret);
5621 scx_error(sch, "ops.init() failed (%d)", ret);
5628 set_bit(i, sch->has_op);
5630 check_hotplug_seq(sch, ops);
5635 ret = validate_ops(sch, ops);
5669 set_bit(i, sch->has_op);
5671 if (sch->ops.cpu_acquire || sch->ops.cpu_release)
5672 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
5697 ret = scx_cgroup_init(sch);
5718 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
5771 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
5779 sch->ops.name, scx_switched_all() ? "" : " (partial)");
5780 kobject_uevent(&sch->kobj, KOBJ_ADD);
5804 * completion. sch's base reference will be put by bpf_scx_unreg().
5806 scx_error(sch, "scx_enable() failed (%d)", ret);
5807 kthread_flush_work(&sch->disable_work);
5944 struct scx_sched *sch = ops->priv;
5947 kthread_flush_work(&sch->disable_work);
5948 kobject_put(&sch->kobj);
6220 struct scx_sched *sch = scx_root;
6236 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
6248 log_lvl, sch->ops.name, scx_enable_state_str[state], all,
6475 struct scx_sched *sch = scx_root;
6528 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
6541 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
6608 struct scx_sched *sch = scx_root;
6615 flush_dispatch_buf(sch, dspc->rq);
6617 dsq = find_user_dsq(sch, dsq_id);
6619 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
6623 if (consume_dispatch_q(sch, dspc->rq, dsq)) {
6832 struct scx_sched *sch;
6850 sch = rcu_dereference(scx_root);
6851 if (sch)
6852 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
6952 struct scx_sched *sch;
6958 sch = rcu_dereference_sched(scx_root);
6959 if (unlikely(!sch)) {
6970 if (ops_cpu_valid(sch, cpu, NULL)) {
6975 dsq = find_user_dsq(sch, dsq_id);
6998 struct scx_sched *sch;
7001 sch = rcu_dereference(scx_root);
7002 if (sch)
7003 destroy_dsq(sch, dsq_id);
7021 struct scx_sched *sch;
7034 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
7035 if (unlikely(!sch))
7041 kit->dsq = find_user_dsq(sch, dsq_id);
7519 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
7527 e_cpu = per_cpu_ptr(sch->event_stats_cpu, cpu);
7548 struct scx_sched *sch;
7552 sch = rcu_dereference(scx_root);
7553 if (sch)
7554 scx_read_events(sch, &e_sys);