1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include "bpf_misc.h"
6 #include "bpf_experimental.h"
7
8 /* From include/linux/filter.h */
9 #define MAX_BPF_STACK 512
10
11 #if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
12
13 struct elem {
14 struct bpf_timer t;
15 char pad[256];
16 };
17
18 struct {
19 __uint(type, BPF_MAP_TYPE_ARRAY);
20 __uint(max_entries, 1);
21 __type(key, int);
22 __type(value, struct elem);
23 } array SEC(".maps");
24
25 SEC("kprobe")
26 __description("Private stack, single prog")
27 __success
28 __arch_x86_64
29 __jited(" movabsq $0x{{.*}}, %r9")
30 __jited(" addq %gs:{{.*}}, %r9")
31 __jited(" movl $0x2a, %edi")
32 __jited(" movq %rdi, -0x100(%r9)")
33 __arch_arm64
34 __jited(" stp x25, x27, [sp, {{.*}}]!")
35 __jited(" mov x27, {{.*}}")
36 __jited(" movk x27, {{.*}}, lsl #16")
37 __jited(" movk x27, {{.*}}")
38 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
39 __jited(" add x27, x27, x10")
40 __jited(" add x25, x27, {{.*}}")
41 __jited(" mov x0, #0x2a")
42 __jited(" str x0, [x27]")
43 __jited("...")
44 __jited(" ldp x25, x27, [sp], {{.*}}")
private_stack_single_prog(void)45 __naked void private_stack_single_prog(void)
46 {
47 asm volatile (" \
48 r1 = 42; \
49 *(u64 *)(r10 - 256) = r1; \
50 r0 = 0; \
51 exit; \
52 " ::: __clobber_all);
53 }
54
55 SEC("raw_tp")
56 __description("No private stack")
57 __success
58 __arch_x86_64
59 __jited(" subq $0x8, %rsp")
60 __arch_arm64
61 __jited(" mov x25, sp")
62 __jited(" sub sp, sp, #0x10")
no_private_stack_nested(void)63 __naked void no_private_stack_nested(void)
64 {
65 asm volatile (" \
66 r1 = 42; \
67 *(u64 *)(r10 - 8) = r1; \
68 r0 = 0; \
69 exit; \
70 " ::: __clobber_all);
71 }
72
73 __used
cumulative_stack_depth_subprog(void)74 __naked static void cumulative_stack_depth_subprog(void)
75 {
76 asm volatile (" \
77 r1 = 41; \
78 *(u64 *)(r10 - 32) = r1; \
79 call %[bpf_get_smp_processor_id]; \
80 exit; \
81 " :
82 : __imm(bpf_get_smp_processor_id)
83 : __clobber_all);
84 }
85
86 SEC("kprobe")
87 __description("Private stack, subtree > MAX_BPF_STACK")
88 __success
89 __arch_x86_64
90 /* private stack fp for the main prog */
91 __jited(" movabsq $0x{{.*}}, %r9")
92 __jited(" addq %gs:{{.*}}, %r9")
93 __jited(" movl $0x2a, %edi")
94 __jited(" movq %rdi, -0x200(%r9)")
95 __jited(" pushq %r9")
96 __jited(" callq 0x{{.*}}")
97 __jited(" popq %r9")
98 __jited(" xorl %eax, %eax")
99 __arch_arm64
100 __jited(" stp x25, x27, [sp, {{.*}}]!")
101 __jited(" mov x27, {{.*}}")
102 __jited(" movk x27, {{.*}}, lsl #16")
103 __jited(" movk x27, {{.*}}")
104 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
105 __jited(" add x27, x27, x10")
106 __jited(" add x25, x27, {{.*}}")
107 __jited(" mov x0, #0x2a")
108 __jited(" str x0, [x27]")
109 __jited(" bl {{.*}}")
110 __jited("...")
111 __jited(" ldp x25, x27, [sp], {{.*}}")
private_stack_nested_1(void)112 __naked void private_stack_nested_1(void)
113 {
114 asm volatile (" \
115 r1 = 42; \
116 *(u64 *)(r10 - %[max_bpf_stack]) = r1; \
117 call cumulative_stack_depth_subprog; \
118 r0 = 0; \
119 exit; \
120 " :
121 : __imm_const(max_bpf_stack, MAX_BPF_STACK)
122 : __clobber_all);
123 }
124
125 __naked __noinline __used
loop_callback(void)126 static unsigned long loop_callback(void)
127 {
128 asm volatile (" \
129 call %[bpf_get_prandom_u32]; \
130 r1 = 42; \
131 *(u64 *)(r10 - 512) = r1; \
132 call cumulative_stack_depth_subprog; \
133 r0 = 0; \
134 exit; \
135 " :
136 : __imm(bpf_get_prandom_u32)
137 : __clobber_common);
138 }
139
140 SEC("raw_tp")
141 __description("Private stack, callback")
142 __success
143 __arch_x86_64
144 /* for func loop_callback */
145 __jited("func #1")
146 __jited(" endbr64")
147 __jited(" nopl (%rax,%rax)")
148 __jited(" nopl (%rax)")
149 __jited(" pushq %rbp")
150 __jited(" movq %rsp, %rbp")
151 __jited(" endbr64")
152 __jited(" movabsq $0x{{.*}}, %r9")
153 __jited(" addq %gs:{{.*}}, %r9")
154 __jited(" pushq %r9")
155 __jited(" callq")
156 __jited(" popq %r9")
157 __jited(" movl $0x2a, %edi")
158 __jited(" movq %rdi, -0x200(%r9)")
159 __jited(" pushq %r9")
160 __jited(" callq")
161 __jited(" popq %r9")
162 __arch_arm64
163 __jited("func #1")
164 __jited("...")
165 __jited(" stp x25, x27, [sp, {{.*}}]!")
166 __jited(" mov x27, {{.*}}")
167 __jited(" movk x27, {{.*}}, lsl #16")
168 __jited(" movk x27, {{.*}}")
169 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
170 __jited(" add x27, x27, x10")
171 __jited(" add x25, x27, {{.*}}")
172 __jited(" bl 0x{{.*}}")
173 __jited(" add x7, x0, #0x0")
174 __jited(" mov x0, #0x2a")
175 __jited(" str x0, [x27]")
176 __jited(" bl 0x{{.*}}")
177 __jited(" add x7, x0, #0x0")
178 __jited(" mov x7, #0x0")
179 __jited(" ldp x25, x27, [sp], {{.*}}")
private_stack_callback(void)180 __naked void private_stack_callback(void)
181 {
182 asm volatile (" \
183 r1 = 1; \
184 r2 = %[loop_callback]; \
185 r3 = 0; \
186 r4 = 0; \
187 call %[bpf_loop]; \
188 r0 = 0; \
189 exit; \
190 " :
191 : __imm_ptr(loop_callback),
192 __imm(bpf_loop)
193 : __clobber_common);
194 }
195
196 SEC("fentry/bpf_fentry_test9")
197 __description("Private stack, exception in main prog")
198 __success __retval(0)
199 __arch_x86_64
200 __jited(" pushq %r9")
201 __jited(" callq")
202 __jited(" popq %r9")
203 __arch_arm64
204 __jited(" stp x29, x30, [sp, #-0x10]!")
205 __jited(" mov x29, sp")
206 __jited(" stp xzr, x26, [sp, #-0x10]!")
207 __jited(" mov x26, sp")
208 __jited(" stp x19, x20, [sp, #-0x10]!")
209 __jited(" stp x21, x22, [sp, #-0x10]!")
210 __jited(" stp x23, x24, [sp, #-0x10]!")
211 __jited(" stp x25, x26, [sp, #-0x10]!")
212 __jited(" stp x27, x28, [sp, #-0x10]!")
213 __jited(" mov x27, {{.*}}")
214 __jited(" movk x27, {{.*}}, lsl #16")
215 __jited(" movk x27, {{.*}}")
216 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
217 __jited(" add x27, x27, x10")
218 __jited(" add x25, x27, {{.*}}")
219 __jited(" mov x0, #0x2a")
220 __jited(" str x0, [x27]")
221 __jited(" mov x0, #0x0")
222 __jited(" bl 0x{{.*}}")
223 __jited(" add x7, x0, #0x0")
224 __jited(" ldp x27, x28, [sp], #0x10")
private_stack_exception_main_prog(void)225 int private_stack_exception_main_prog(void)
226 {
227 asm volatile (" \
228 r1 = 42; \
229 *(u64 *)(r10 - 512) = r1; \
230 " ::: __clobber_common);
231
232 bpf_throw(0);
233 return 0;
234 }
235
subprog_exception(void)236 __used static int subprog_exception(void)
237 {
238 bpf_throw(0);
239 return 0;
240 }
241
242 SEC("fentry/bpf_fentry_test9")
243 __description("Private stack, exception in subprog")
244 __success __retval(0)
245 __arch_x86_64
246 __jited(" movq %rdi, -0x200(%r9)")
247 __jited(" pushq %r9")
248 __jited(" callq")
249 __jited(" popq %r9")
250 __arch_arm64
251 __jited(" stp x27, x28, [sp, #-0x10]!")
252 __jited(" mov x27, {{.*}}")
253 __jited(" movk x27, {{.*}}, lsl #16")
254 __jited(" movk x27, {{.*}}")
255 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
256 __jited(" add x27, x27, x10")
257 __jited(" add x25, x27, {{.*}}")
258 __jited(" mov x0, #0x2a")
259 __jited(" str x0, [x27]")
260 __jited(" bl 0x{{.*}}")
261 __jited(" add x7, x0, #0x0")
262 __jited(" ldp x27, x28, [sp], #0x10")
private_stack_exception_sub_prog(void)263 int private_stack_exception_sub_prog(void)
264 {
265 asm volatile (" \
266 r1 = 42; \
267 *(u64 *)(r10 - 512) = r1; \
268 call subprog_exception; \
269 " ::: __clobber_common);
270
271 return 0;
272 }
273
274 int glob;
subprog2(int * val)275 __noinline static void subprog2(int *val)
276 {
277 glob += val[0] * 2;
278 }
279
subprog1(int * val)280 __noinline static void subprog1(int *val)
281 {
282 int tmp[64] = {};
283
284 tmp[0] = *val;
285 subprog2(tmp);
286 }
287
timer_cb1(void * map,int * key,struct bpf_timer * timer)288 __noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
289 {
290 subprog1(key);
291 return 0;
292 }
293
timer_cb2(void * map,int * key,struct bpf_timer * timer)294 __noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer)
295 {
296 return 0;
297 }
298
299 SEC("fentry/bpf_fentry_test9")
300 __description("Private stack, async callback, not nested")
301 __success __retval(0)
302 __arch_x86_64
303 __jited(" movabsq $0x{{.*}}, %r9")
304 __arch_arm64
305 __jited(" mrs x10, TPIDR_EL{{[0-1]}}")
306 __jited(" add x27, x27, x10")
307 __jited(" add x25, x27, {{.*}}")
private_stack_async_callback_1(void)308 int private_stack_async_callback_1(void)
309 {
310 struct bpf_timer *arr_timer;
311 int array_key = 0;
312
313 arr_timer = bpf_map_lookup_elem(&array, &array_key);
314 if (!arr_timer)
315 return 0;
316
317 bpf_timer_init(arr_timer, &array, 1);
318 bpf_timer_set_callback(arr_timer, timer_cb2);
319 bpf_timer_start(arr_timer, 0, 0);
320 subprog1(&array_key);
321 return 0;
322 }
323
324 SEC("fentry/bpf_fentry_test9")
325 __description("Private stack, async callback, potential nesting")
326 __success __retval(0)
327 __arch_x86_64
328 __jited(" subq $0x100, %rsp")
329 __arch_arm64
330 __jited(" sub sp, sp, #0x100")
private_stack_async_callback_2(void)331 int private_stack_async_callback_2(void)
332 {
333 struct bpf_timer *arr_timer;
334 int array_key = 0;
335
336 arr_timer = bpf_map_lookup_elem(&array, &array_key);
337 if (!arr_timer)
338 return 0;
339
340 bpf_timer_init(arr_timer, &array, 1);
341 bpf_timer_set_callback(arr_timer, timer_cb1);
342 bpf_timer_start(arr_timer, 0, 0);
343 subprog1(&array_key);
344 return 0;
345 }
346
347 #else
348
349 SEC("kprobe")
350 __description("private stack is not supported, use a dummy test")
351 __success
dummy_test(void)352 int dummy_test(void)
353 {
354 return 0;
355 }
356
357 #endif
358
359 char _license[] SEC("license") = "GPL";
360