1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include <vmlinux.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include "bpf_misc.h"
8 #include "xdp_metadata.h"
9 #include "bpf_kfuncs.h"
10
11 int arr[1];
12 int unkn_idx;
13 const volatile bool call_dead_subprog = false;
14
global_bad(void)15 __noinline long global_bad(void)
16 {
17 return arr[unkn_idx]; /* BOOM */
18 }
19
global_good(void)20 __noinline long global_good(void)
21 {
22 return arr[0];
23 }
24
global_calls_bad(void)25 __noinline long global_calls_bad(void)
26 {
27 return global_good() + global_bad() /* does BOOM indirectly */;
28 }
29
global_calls_good_only(void)30 __noinline long global_calls_good_only(void)
31 {
32 return global_good();
33 }
34
global_dead(void)35 __noinline long global_dead(void)
36 {
37 return arr[0] * 2;
38 }
39
40 SEC("?raw_tp")
41 __success __log_level(2)
42 /* main prog is validated completely first */
43 __msg("('global_calls_good_only') is global and assumed valid.")
44 /* eventually global_good() is transitively validated as well */
45 __msg("Validating global_good() func")
46 __msg("('global_good') is safe for any args that match its prototype")
chained_global_func_calls_success(void)47 int chained_global_func_calls_success(void)
48 {
49 int sum = 0;
50
51 if (call_dead_subprog)
52 sum += global_dead();
53 return global_calls_good_only() + sum;
54 }
55
56 SEC("?raw_tp")
57 __failure __log_level(2)
58 /* main prog validated successfully first */
59 __msg("('global_calls_bad') is global and assumed valid.")
60 /* eventually we validate global_bad() and fail */
61 __msg("Validating global_bad() func")
62 __msg("math between map_value pointer and register") /* BOOM */
chained_global_func_calls_bad(void)63 int chained_global_func_calls_bad(void)
64 {
65 return global_calls_bad();
66 }
67
68 /* do out of bounds access forcing verifier to fail verification if this
69 * global func is called
70 */
global_unsupp(const int * mem)71 __noinline int global_unsupp(const int *mem)
72 {
73 if (!mem)
74 return 0;
75 return mem[100]; /* BOOM */
76 }
77
78 const volatile bool skip_unsupp_global = true;
79
80 SEC("?raw_tp")
81 __success
guarded_unsupp_global_called(void)82 int guarded_unsupp_global_called(void)
83 {
84 if (!skip_unsupp_global)
85 return global_unsupp(NULL);
86 return 0;
87 }
88
89 SEC("?raw_tp")
90 __failure __log_level(2)
91 __msg("Func#1 ('global_unsupp') is global and assumed valid.")
92 __msg("Validating global_unsupp() func#1...")
93 __msg("value is outside of the allowed memory range")
unguarded_unsupp_global_called(void)94 int unguarded_unsupp_global_called(void)
95 {
96 int x = 0;
97
98 return global_unsupp(&x);
99 }
100
101 long stack[128];
102
subprog_nullable_ptr_bad(int * p)103 __weak int subprog_nullable_ptr_bad(int *p)
104 {
105 return (*p) * 2; /* bad, missing null check */
106 }
107
108 SEC("?raw_tp")
109 __failure __log_level(2)
110 __msg("invalid mem access 'mem_or_null'")
arg_tag_nullable_ptr_fail(void * ctx)111 int arg_tag_nullable_ptr_fail(void *ctx)
112 {
113 int x = 42;
114
115 return subprog_nullable_ptr_bad(&x);
116 }
117
subprog_nonnull_ptr_good(int * p1 __arg_nonnull,int * p2 __arg_nonnull)118 __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
119 {
120 return (*p1) * (*p2); /* good, no need for NULL checks */
121 }
122
123 int x = 47;
124
125 SEC("?raw_tp")
126 __success __log_level(2)
arg_tag_nonnull_ptr_good(void * ctx)127 int arg_tag_nonnull_ptr_good(void *ctx)
128 {
129 int y = 74;
130
131 return subprog_nonnull_ptr_good(&x, &y);
132 }
133
134 /* this global subprog can be now called from many types of entry progs, each
135 * with different context type
136 */
subprog_ctx_tag(void * ctx __arg_ctx)137 __weak int subprog_ctx_tag(void *ctx __arg_ctx)
138 {
139 return bpf_get_stack(ctx, stack, sizeof(stack), 0);
140 }
141
raw_tp_canonical(struct bpf_raw_tracepoint_args * ctx __arg_ctx)142 __weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
143 {
144 return 0;
145 }
146
raw_tp_u64_array(u64 * ctx __arg_ctx)147 __weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
148 {
149 return 0;
150 }
151
152 SEC("?raw_tp")
153 __success __log_level(2)
arg_tag_ctx_raw_tp(void * ctx)154 int arg_tag_ctx_raw_tp(void *ctx)
155 {
156 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
157 }
158
159 SEC("?raw_tp.w")
160 __success __log_level(2)
arg_tag_ctx_raw_tp_writable(void * ctx)161 int arg_tag_ctx_raw_tp_writable(void *ctx)
162 {
163 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
164 }
165
166 SEC("?tp_btf/sys_enter")
167 __success __log_level(2)
arg_tag_ctx_raw_tp_btf(void * ctx)168 int arg_tag_ctx_raw_tp_btf(void *ctx)
169 {
170 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
171 }
172
173 struct whatever { };
174
tp_whatever(struct whatever * ctx __arg_ctx)175 __weak int tp_whatever(struct whatever *ctx __arg_ctx)
176 {
177 return 0;
178 }
179
180 SEC("?tp")
181 __success __log_level(2)
arg_tag_ctx_tp(void * ctx)182 int arg_tag_ctx_tp(void *ctx)
183 {
184 return subprog_ctx_tag(ctx) + tp_whatever(ctx);
185 }
186
kprobe_subprog_pt_regs(struct pt_regs * ctx __arg_ctx)187 __weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
188 {
189 return 0;
190 }
191
kprobe_subprog_typedef(bpf_user_pt_regs_t * ctx __arg_ctx)192 __weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
193 {
194 return 0;
195 }
196
197 SEC("?kprobe")
198 __success __log_level(2)
arg_tag_ctx_kprobe(void * ctx)199 int arg_tag_ctx_kprobe(void *ctx)
200 {
201 return subprog_ctx_tag(ctx) +
202 kprobe_subprog_pt_regs(ctx) +
203 kprobe_subprog_typedef(ctx);
204 }
205
perf_subprog_regs(struct user_regs_struct * ctx __arg_ctx)206 __weak int perf_subprog_regs(
207 #if defined(bpf_target_riscv)
208 struct user_regs_struct *ctx __arg_ctx
209 #elif defined(bpf_target_s390)
210 /* user_pt_regs typedef is anonymous struct, so only `void *` works */
211 void *ctx __arg_ctx
212 #elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
213 struct user_pt_regs *ctx __arg_ctx
214 #else
215 struct pt_regs *ctx __arg_ctx
216 #endif
217 )
218 {
219 return 0;
220 }
221
perf_subprog_typedef(bpf_user_pt_regs_t * ctx __arg_ctx)222 __weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
223 {
224 return 0;
225 }
226
perf_subprog_canonical(struct bpf_perf_event_data * ctx __arg_ctx)227 __weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
228 {
229 return 0;
230 }
231
232 SEC("?perf_event")
233 __success __log_level(2)
arg_tag_ctx_perf(void * ctx)234 int arg_tag_ctx_perf(void *ctx)
235 {
236 return subprog_ctx_tag(ctx) +
237 perf_subprog_regs(ctx) +
238 perf_subprog_typedef(ctx) +
239 perf_subprog_canonical(ctx);
240 }
241
iter_subprog_void(void * ctx __arg_ctx)242 __weak int iter_subprog_void(void *ctx __arg_ctx)
243 {
244 return 0;
245 }
246
iter_subprog_typed(struct bpf_iter__task * ctx __arg_ctx)247 __weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
248 {
249 return 0;
250 }
251
252 SEC("?iter/task")
253 __success __log_level(2)
arg_tag_ctx_iter_task(struct bpf_iter__task * ctx)254 int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
255 {
256 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
257 }
258
tracing_subprog_void(void * ctx __arg_ctx)259 __weak int tracing_subprog_void(void *ctx __arg_ctx)
260 {
261 return 0;
262 }
263
tracing_subprog_u64(u64 * ctx __arg_ctx)264 __weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
265 {
266 return 0;
267 }
268
269 int acc;
270
271 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
272 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fentry)273 int BPF_PROG(arg_tag_ctx_fentry)
274 {
275 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
276 return 0;
277 }
278
279 SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
280 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fexit)281 int BPF_PROG(arg_tag_ctx_fexit)
282 {
283 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
284 return 0;
285 }
286
287 SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
288 __success __log_level(2)
BPF_PROG(arg_tag_ctx_fmod_ret)289 int BPF_PROG(arg_tag_ctx_fmod_ret)
290 {
291 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
292 }
293
294 SEC("?lsm/bpf")
295 __success __log_level(2)
BPF_PROG(arg_tag_ctx_lsm)296 int BPF_PROG(arg_tag_ctx_lsm)
297 {
298 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
299 }
300
301 SEC("?struct_ops/test_1")
302 __success __log_level(2)
BPF_PROG(arg_tag_ctx_struct_ops)303 int BPF_PROG(arg_tag_ctx_struct_ops)
304 {
305 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
306 }
307
308 SEC(".struct_ops")
309 struct bpf_dummy_ops dummy_1 = {
310 .test_1 = (void *)arg_tag_ctx_struct_ops,
311 };
312
313 SEC("?syscall")
314 __success __log_level(2)
arg_tag_ctx_syscall(void * ctx)315 int arg_tag_ctx_syscall(void *ctx)
316 {
317 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
318 }
319
subprog_dynptr(struct bpf_dynptr * dptr)320 __weak int subprog_dynptr(struct bpf_dynptr *dptr)
321 {
322 long *d, t, buf[1] = {};
323
324 d = bpf_dynptr_data(dptr, 0, sizeof(long));
325 if (!d)
326 return 0;
327
328 t = *d + 1;
329
330 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
331 if (!d)
332 return t;
333
334 t = *d + 2;
335
336 return t;
337 }
338
339 SEC("?xdp")
340 __success __log_level(2)
arg_tag_dynptr(struct xdp_md * ctx)341 int arg_tag_dynptr(struct xdp_md *ctx)
342 {
343 struct bpf_dynptr dptr;
344
345 bpf_dynptr_from_xdp(ctx, 0, &dptr);
346
347 return subprog_dynptr(&dptr);
348 }
349
350 char _license[] SEC("license") = "GPL";
351