1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4 * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5 * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6 */
7 #ifndef __SCX_COMPAT_H
8 #define __SCX_COMPAT_H
9
10 #include <bpf/btf.h>
11 #include <bpf/libbpf.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
14 #include <unistd.h>
15
16 struct btf *__COMPAT_vmlinux_btf __attribute__((weak));
17
__COMPAT_load_vmlinux_btf(void)18 static inline void __COMPAT_load_vmlinux_btf(void)
19 {
20 if (!__COMPAT_vmlinux_btf) {
21 __COMPAT_vmlinux_btf = btf__load_vmlinux_btf();
22 SCX_BUG_ON(!__COMPAT_vmlinux_btf, "btf__load_vmlinux_btf()");
23 }
24 }
25
__COMPAT_read_enum(const char * type,const char * name,u64 * v)26 static inline bool __COMPAT_read_enum(const char *type, const char *name, u64 *v)
27 {
28 const struct btf_type *t;
29 const char *n;
30 s32 tid;
31 int i;
32
33 __COMPAT_load_vmlinux_btf();
34
35 tid = btf__find_by_name(__COMPAT_vmlinux_btf, type);
36 if (tid < 0)
37 return false;
38
39 t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
40 SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
41
42 if (btf_is_enum(t)) {
43 struct btf_enum *e = btf_enum(t);
44
45 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
46 n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
47 SCX_BUG_ON(!n, "btf__name_by_offset()");
48 if (!strcmp(n, name)) {
49 *v = e[i].val;
50 return true;
51 }
52 }
53 } else if (btf_is_enum64(t)) {
54 struct btf_enum64 *e = btf_enum64(t);
55
56 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
57 n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
58 SCX_BUG_ON(!n, "btf__name_by_offset()");
59 if (!strcmp(n, name)) {
60 *v = btf_enum64_value(&e[i]);
61 return true;
62 }
63 }
64 }
65
66 return false;
67 }
68
69 #define __COMPAT_ENUM_OR_ZERO(__type, __ent) \
70 ({ \
71 u64 __val = 0; \
72 __COMPAT_read_enum(__type, __ent, &__val); \
73 __val; \
74 })
75
__COMPAT_has_ksym(const char * ksym)76 static inline bool __COMPAT_has_ksym(const char *ksym)
77 {
78 __COMPAT_load_vmlinux_btf();
79 return btf__find_by_name(__COMPAT_vmlinux_btf, ksym) >= 0;
80 }
81
__COMPAT_struct_has_field(const char * type,const char * field)82 static inline bool __COMPAT_struct_has_field(const char *type, const char *field)
83 {
84 const struct btf_type *t;
85 const struct btf_member *m;
86 const char *n;
87 s32 tid;
88 int i;
89
90 __COMPAT_load_vmlinux_btf();
91 tid = btf__find_by_name_kind(__COMPAT_vmlinux_btf, type, BTF_KIND_STRUCT);
92 if (tid < 0)
93 return false;
94
95 t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
96 SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
97
98 m = btf_members(t);
99
100 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
101 n = btf__name_by_offset(__COMPAT_vmlinux_btf, m[i].name_off);
102 SCX_BUG_ON(!n, "btf__name_by_offset()");
103 if (!strcmp(n, field))
104 return true;
105 }
106
107 return false;
108 }
109
110 #define SCX_OPS_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_ops_flags", #name)
111
112 #define SCX_OPS_KEEP_BUILTIN_IDLE SCX_OPS_FLAG(SCX_OPS_KEEP_BUILTIN_IDLE)
113 #define SCX_OPS_ENQ_LAST SCX_OPS_FLAG(SCX_OPS_ENQ_LAST)
114 #define SCX_OPS_ENQ_EXITING SCX_OPS_FLAG(SCX_OPS_ENQ_EXITING)
115 #define SCX_OPS_SWITCH_PARTIAL SCX_OPS_FLAG(SCX_OPS_SWITCH_PARTIAL)
116 #define SCX_OPS_ENQ_MIGRATION_DISABLED SCX_OPS_FLAG(SCX_OPS_ENQ_MIGRATION_DISABLED)
117 #define SCX_OPS_ALLOW_QUEUED_WAKEUP SCX_OPS_FLAG(SCX_OPS_ALLOW_QUEUED_WAKEUP)
118 #define SCX_OPS_BUILTIN_IDLE_PER_NODE SCX_OPS_FLAG(SCX_OPS_BUILTIN_IDLE_PER_NODE)
119 #define SCX_OPS_ALWAYS_ENQ_IMMED SCX_OPS_FLAG(SCX_OPS_ALWAYS_ENQ_IMMED)
120
121 #define SCX_PICK_IDLE_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_pick_idle_cpu_flags", #name)
122
123 #define SCX_PICK_IDLE_CORE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_CORE)
124 #define SCX_PICK_IDLE_IN_NODE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_IN_NODE)
125
scx_hotplug_seq(void)126 static inline long scx_hotplug_seq(void)
127 {
128 int fd;
129 char buf[32];
130 char *endptr;
131 ssize_t len;
132 long val;
133
134 fd = open("/sys/kernel/sched_ext/hotplug_seq", O_RDONLY);
135 if (fd < 0)
136 return -ENOENT;
137
138 len = read(fd, buf, sizeof(buf) - 1);
139 SCX_BUG_ON(len <= 0, "read failed (%ld)", len);
140 buf[len] = 0;
141 close(fd);
142
143 errno = 0;
144 val = strtoul(buf, &endptr, 10);
145 SCX_BUG_ON(errno == ERANGE || endptr == buf ||
146 (*endptr != '\n' && *endptr != '\0'), "invalid num hotplug events: %ld", val);
147
148 return val;
149 }
150
151 /*
152 * struct sched_ext_ops can change over time. If compat.bpf.h::SCX_OPS_DEFINE()
153 * is used to define ops and compat.h::SCX_OPS_LOAD/ATTACH() are used to load
154 * and attach it, backward compatibility is automatically maintained where
155 * reasonable.
156 *
157 * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
158 * the current minimum required kernel version.
159 *
160 * COMPAT:
161 * - v6.17: ops.cgroup_set_bandwidth()
162 * - v6.19: ops.cgroup_set_idle()
163 * - v7.1: ops.sub_attach(), ops.sub_detach(), ops.sub_cgroup_id
164 */
165 #define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \
166 struct __scx_name *__skel; \
167 \
168 SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"), \
169 "sched_ext_ops.dump() missing, kernel too old?"); \
170 \
171 __skel = __scx_name##__open(); \
172 SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
173 __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
174 SCX_ENUM_INIT(__skel); \
175 if (__skel->struct_ops.__ops_name->cgroup_set_bandwidth && \
176 !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_bandwidth")) { \
177 fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_bandwidth()\n"); \
178 __skel->struct_ops.__ops_name->cgroup_set_bandwidth = NULL; \
179 } \
180 if (__skel->struct_ops.__ops_name->cgroup_set_idle && \
181 !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_idle")) { \
182 fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_idle()\n"); \
183 __skel->struct_ops.__ops_name->cgroup_set_idle = NULL; \
184 } \
185 if (__skel->struct_ops.__ops_name->sub_attach && \
186 !__COMPAT_struct_has_field("sched_ext_ops", "sub_attach")) { \
187 fprintf(stderr, "WARNING: kernel doesn't support ops.sub_attach()\n"); \
188 __skel->struct_ops.__ops_name->sub_attach = NULL; \
189 } \
190 if (__skel->struct_ops.__ops_name->sub_detach && \
191 !__COMPAT_struct_has_field("sched_ext_ops", "sub_detach")) { \
192 fprintf(stderr, "WARNING: kernel doesn't support ops.sub_detach()\n"); \
193 __skel->struct_ops.__ops_name->sub_detach = NULL; \
194 } \
195 if (__skel->struct_ops.__ops_name->sub_cgroup_id > 0 && \
196 !__COMPAT_struct_has_field("sched_ext_ops", "sub_cgroup_id")) { \
197 fprintf(stderr, "WARNING: kernel doesn't support ops.sub_cgroup_id\n"); \
198 __skel->struct_ops.__ops_name->sub_cgroup_id = 0; \
199 } \
200 __skel; \
201 })
202
203 /*
204 * Associate non-struct_ops BPF programs with the scheduler's struct_ops map so
205 * that scx_prog_sched() can determine which scheduler a BPF program belongs
206 * to. Requires libbpf >= 1.7.
207 */
208 #if LIBBPF_MAJOR_VERSION > 1 || \
209 (LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 7)
__scx_ops_assoc_prog(struct bpf_program * prog,struct bpf_map * map,const char * ops_name)210 static inline void __scx_ops_assoc_prog(struct bpf_program *prog,
211 struct bpf_map *map,
212 const char *ops_name)
213 {
214 s32 err = bpf_program__assoc_struct_ops(prog, map, NULL);
215 if (err)
216 fprintf(stderr,
217 "ERROR: Failed to associate %s with %s: %d\n",
218 bpf_program__name(prog), ops_name, err);
219 }
220 #else
__scx_ops_assoc_prog(struct bpf_program * prog,struct bpf_map * map,const char * ops_name)221 static inline void __scx_ops_assoc_prog(struct bpf_program *prog,
222 struct bpf_map *map,
223 const char *ops_name)
224 {
225 }
226 #endif
227
228 #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({ \
229 struct bpf_program *__prog; \
230 UEI_SET_SIZE(__skel, __ops_name, __uei_name); \
231 SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \
232 bpf_object__for_each_program(__prog, (__skel)->obj) { \
233 if (bpf_program__type(__prog) == BPF_PROG_TYPE_STRUCT_OPS) \
234 continue; \
235 __scx_ops_assoc_prog(__prog, (__skel)->maps.__ops_name, \
236 #__ops_name); \
237 } \
238 })
239
240 /*
241 * New versions of bpftool now emit additional link placeholders for BPF maps,
242 * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps
243 * automatically, assuming libbpf is recent enough (v1.5+). Old libbpf will do
244 * nothing with those links and won't attempt to auto-attach maps.
245 *
246 * To maintain compatibility with older libbpf while avoiding trying to attach
247 * twice, disable the autoattach feature on newer libbpf.
248 */
249 #if LIBBPF_MAJOR_VERSION > 1 || \
250 (LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 5)
251 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) \
252 bpf_map__set_autoattach((__skel)->maps.__ops_name, false)
253 #else
254 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) do {} while (0)
255 #endif
256
257 #define SCX_OPS_ATTACH(__skel, __ops_name, __scx_name) ({ \
258 struct bpf_link *__link; \
259 __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name); \
260 SCX_BUG_ON(__scx_name##__attach((__skel)), "Failed to attach skel"); \
261 __link = bpf_map__attach_struct_ops((__skel)->maps.__ops_name); \
262 SCX_BUG_ON(!__link, "Failed to attach struct_ops"); \
263 __link; \
264 })
265
266 #endif /* __SCX_COMPAT_H */
267