1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #ifdef CONFIG_SCHED_CLASS_EXT
10 
11 DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
12 
13 void scx_tick(struct rq *rq);
14 void init_scx_entity(struct sched_ext_entity *scx);
15 void scx_pre_fork(struct task_struct *p);
16 int scx_fork(struct task_struct *p);
17 void scx_post_fork(struct task_struct *p);
18 void scx_cancel_fork(struct task_struct *p);
19 bool scx_can_stop_tick(struct rq *rq);
20 void scx_rq_activate(struct rq *rq);
21 void scx_rq_deactivate(struct rq *rq);
22 int scx_check_setscheduler(struct task_struct *p, int policy);
23 bool task_should_scx(int policy);
24 void init_sched_ext_class(void);
25 
scx_cpuperf_target(s32 cpu)26 static inline u32 scx_cpuperf_target(s32 cpu)
27 {
28 	if (scx_enabled())
29 		return cpu_rq(cpu)->scx.cpuperf_target;
30 	else
31 		return 0;
32 }
33 
task_on_scx(const struct task_struct * p)34 static inline bool task_on_scx(const struct task_struct *p)
35 {
36 	return scx_enabled() && p->sched_class == &ext_sched_class;
37 }
38 
scx_allow_ttwu_queue(const struct task_struct * p)39 static inline bool scx_allow_ttwu_queue(const struct task_struct *p)
40 {
41 	return !scx_enabled() ||
42 		static_branch_likely(&scx_ops_allow_queued_wakeup) ||
43 		p->sched_class != &ext_sched_class;
44 }
45 
46 #ifdef CONFIG_SCHED_CORE
47 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
48 		   bool in_fi);
49 #endif
50 
51 #else	/* CONFIG_SCHED_CLASS_EXT */
52 
scx_tick(struct rq * rq)53 static inline void scx_tick(struct rq *rq) {}
scx_pre_fork(struct task_struct * p)54 static inline void scx_pre_fork(struct task_struct *p) {}
scx_fork(struct task_struct * p)55 static inline int scx_fork(struct task_struct *p) { return 0; }
scx_post_fork(struct task_struct * p)56 static inline void scx_post_fork(struct task_struct *p) {}
scx_cancel_fork(struct task_struct * p)57 static inline void scx_cancel_fork(struct task_struct *p) {}
scx_cpuperf_target(s32 cpu)58 static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
scx_can_stop_tick(struct rq * rq)59 static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
scx_rq_activate(struct rq * rq)60 static inline void scx_rq_activate(struct rq *rq) {}
scx_rq_deactivate(struct rq * rq)61 static inline void scx_rq_deactivate(struct rq *rq) {}
scx_check_setscheduler(struct task_struct * p,int policy)62 static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
task_on_scx(const struct task_struct * p)63 static inline bool task_on_scx(const struct task_struct *p) { return false; }
scx_allow_ttwu_queue(const struct task_struct * p)64 static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; }
init_sched_ext_class(void)65 static inline void init_sched_ext_class(void) {}
66 
67 #endif	/* CONFIG_SCHED_CLASS_EXT */
68 
69 #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
70 void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
71 
scx_update_idle(struct rq * rq,bool idle,bool do_notify)72 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
73 {
74 	if (scx_enabled())
75 		__scx_update_idle(rq, idle, do_notify);
76 }
77 #else
scx_update_idle(struct rq * rq,bool idle,bool do_notify)78 static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
79 #endif
80 
81 #ifdef CONFIG_CGROUP_SCHED
82 #ifdef CONFIG_EXT_GROUP_SCHED
83 int scx_tg_online(struct task_group *tg);
84 void scx_tg_offline(struct task_group *tg);
85 int scx_cgroup_can_attach(struct cgroup_taskset *tset);
86 void scx_cgroup_move_task(struct task_struct *p);
87 void scx_cgroup_finish_attach(void);
88 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
89 void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
90 void scx_group_set_idle(struct task_group *tg, bool idle);
91 #else	/* CONFIG_EXT_GROUP_SCHED */
scx_tg_online(struct task_group * tg)92 static inline int scx_tg_online(struct task_group *tg) { return 0; }
scx_tg_offline(struct task_group * tg)93 static inline void scx_tg_offline(struct task_group *tg) {}
scx_cgroup_can_attach(struct cgroup_taskset * tset)94 static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
scx_cgroup_move_task(struct task_struct * p)95 static inline void scx_cgroup_move_task(struct task_struct *p) {}
scx_cgroup_finish_attach(void)96 static inline void scx_cgroup_finish_attach(void) {}
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)97 static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
scx_group_set_weight(struct task_group * tg,unsigned long cgrp_weight)98 static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
scx_group_set_idle(struct task_group * tg,bool idle)99 static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
100 #endif	/* CONFIG_EXT_GROUP_SCHED */
101 #endif	/* CONFIG_CGROUP_SCHED */
102