1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4 * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5 * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6 */
7 #ifndef __SCX_COMPAT_BPF_H
8 #define __SCX_COMPAT_BPF_H
9
10 #define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
11 ({ \
12 __type __ret = 0; \
13 if (bpf_core_enum_value_exists(__type, __ent)) \
14 __ret = __ent; \
15 __ret; \
16 })
17
18 /* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
19 #define __COMPAT_scx_bpf_task_cgroup(p) \
20 (bpf_ksym_exists(scx_bpf_task_cgroup) ? \
21 scx_bpf_task_cgroup((p)) : NULL)
22
23 /*
24 * v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
25 * renamed to unload the verb.
26 *
27 * Build error is triggered if old names are used. New binaries work with both
28 * new and old names. The compat macros will be removed on v6.15 release.
29 *
30 * scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
31 * 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
32 * Preserve __COMPAT macros until v6.15.
33 */
34 void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
35 void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
36 bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
37 void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
38 void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
39 bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
40 bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
41
42 #define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
43 (bpf_ksym_exists(scx_bpf_dsq_insert) ? \
44 scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \
45 scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
46
47 #define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \
48 (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \
49 scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
50 scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
51
52 #define scx_bpf_dsq_move_to_local(dsq_id) \
53 (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \
54 scx_bpf_dsq_move_to_local((dsq_id)) : \
55 scx_bpf_consume___compat((dsq_id)))
56
57 #define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \
58 (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \
59 scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \
60 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \
61 scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \
62 (void)0))
63
64 #define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
65 (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \
66 scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \
67 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \
68 scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \
69 (void) 0))
70
71 #define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
72 (bpf_ksym_exists(scx_bpf_dsq_move) ? \
73 scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \
74 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \
75 scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
76 false))
77
78 #define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
79 (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \
80 scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \
81 (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \
82 scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
83 false))
84
85 #define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
86 _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
87
88 #define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \
89 _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
90
91 #define scx_bpf_consume(dsq_id) ({ \
92 _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
93 false; \
94 })
95
96 #define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
97 _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
98
99 #define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
100 _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
101
102 #define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
103 _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
104 false; \
105 })
106
107 #define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
108 _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
109 false; \
110 })
111
112 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
113 _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
114
115 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
116 _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
117
118 #define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
119 _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
120 false; \
121 })
122
123 #define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
124 _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
125 false; \
126 })
127
128 /**
129 * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
130 * in a compatible way. We will preserve this __COMPAT helper until v6.16.
131 *
132 * @enq_flags: enqueue flags from ops.enqueue()
133 *
134 * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
135 */
__COMPAT_is_enq_cpu_selected(u64 enq_flags)136 static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
137 {
138 #ifdef HAVE_SCX_ENQ_CPU_SELECTED
139 /*
140 * This is the case that a BPF code compiled against vmlinux.h
141 * where the enum SCX_ENQ_CPU_SELECTED exists.
142 */
143
144 /*
145 * We should temporarily suspend the macro expansion of
146 * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
147 * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
148 * is defined in 'scripts/gen_enums.py'.
149 */
150 #pragma push_macro("SCX_ENQ_CPU_SELECTED")
151 #undef SCX_ENQ_CPU_SELECTED
152 u64 flag;
153
154 /*
155 * When the kernel did not have SCX_ENQ_CPU_SELECTED,
156 * select_task_rq_scx() has never been skipped. Thus, this case
157 * should be considered that the CPU has already been selected.
158 */
159 if (!bpf_core_enum_value_exists(enum scx_enq_flags,
160 SCX_ENQ_CPU_SELECTED))
161 return true;
162
163 flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
164 return enq_flags & flag;
165
166 /*
167 * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
168 */
169 #pragma pop_macro("SCX_ENQ_CPU_SELECTED")
170 #else
171 /*
172 * This is the case that a BPF code compiled against vmlinux.h
173 * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
174 */
175 return true;
176 #endif /* HAVE_SCX_ENQ_CPU_SELECTED */
177 }
178
179
180 #define scx_bpf_now() \
181 (bpf_ksym_exists(scx_bpf_now) ? \
182 scx_bpf_now() : \
183 bpf_ktime_get_ns())
184
185 /*
186 * v6.15: Introduce event counters.
187 *
188 * Preserve the following macro until v6.17.
189 */
190 #define __COMPAT_scx_bpf_events(events, size) \
191 (bpf_ksym_exists(scx_bpf_events) ? \
192 scx_bpf_events(events, size) : ({}))
193
194 /*
195 * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
196 * cpumasks.
197 *
198 * Preserve the following __COMPAT_scx_*_node macros until v6.17.
199 */
200 #define __COMPAT_scx_bpf_nr_node_ids() \
201 (bpf_ksym_exists(scx_bpf_nr_node_ids) ? \
202 scx_bpf_nr_node_ids() : 1U)
203
204 #define __COMPAT_scx_bpf_cpu_node(cpu) \
205 (bpf_ksym_exists(scx_bpf_cpu_node) ? \
206 scx_bpf_cpu_node(cpu) : 0)
207
208 #define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
209 (bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \
210 scx_bpf_get_idle_cpumask_node(node) : \
211 scx_bpf_get_idle_cpumask()) \
212
213 #define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
214 (bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \
215 scx_bpf_get_idle_smtmask_node(node) : \
216 scx_bpf_get_idle_smtmask())
217
218 #define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
219 (bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \
220 scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
221 scx_bpf_pick_idle_cpu(cpus_allowed, flags))
222
223 #define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
224 (bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \
225 scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
226 scx_bpf_pick_any_cpu(cpus_allowed, flags))
227
228 /*
229 * Define sched_ext_ops. This may be expanded to define multiple variants for
230 * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
231 */
232 #define SCX_OPS_DEFINE(__name, ...) \
233 SEC(".struct_ops.link") \
234 struct sched_ext_ops __name = { \
235 __VA_ARGS__, \
236 };
237
238 #endif /* __SCX_COMPAT_BPF_H */
239