1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "../../../include/linux/filter.h"
7 #include "bpf_misc.h"
8
9 #define BPF_SK_LOOKUP(func) \
10 /* struct bpf_sock_tuple tuple = {} */ \
11 "r2 = 0;" \
12 "*(u32*)(r10 - 8) = r2;" \
13 "*(u64*)(r10 - 16) = r2;" \
14 "*(u64*)(r10 - 24) = r2;" \
15 "*(u64*)(r10 - 32) = r2;" \
16 "*(u64*)(r10 - 40) = r2;" \
17 "*(u64*)(r10 - 48) = r2;" \
18 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
19 "r2 = r10;" \
20 "r2 += -48;" \
21 "r3 = %[sizeof_bpf_sock_tuple];"\
22 "r4 = 0;" \
23 "r5 = 0;" \
24 "call %[" #func "];"
25
26 struct {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __uint(max_entries, 1);
29 __type(key, long long);
30 __type(value, long long);
31 } map_hash_8b SEC(".maps");
32
33 void dummy_prog_42_socket(void);
34 void dummy_prog_24_socket(void);
35 void dummy_prog_loop1_socket(void);
36
37 struct {
38 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
39 __uint(max_entries, 4);
40 __uint(key_size, sizeof(int));
41 __array(values, void (void));
42 } map_prog1_socket SEC(".maps") = {
43 .values = {
44 [0] = (void *)&dummy_prog_42_socket,
45 [1] = (void *)&dummy_prog_loop1_socket,
46 [2] = (void *)&dummy_prog_24_socket,
47 },
48 };
49
50 SEC("socket")
51 __auxiliary __auxiliary_unpriv
dummy_prog_42_socket(void)52 __naked void dummy_prog_42_socket(void)
53 {
54 asm volatile ("r0 = 42; exit;");
55 }
56
57 SEC("socket")
58 __auxiliary __auxiliary_unpriv
dummy_prog_24_socket(void)59 __naked void dummy_prog_24_socket(void)
60 {
61 asm volatile ("r0 = 24; exit;");
62 }
63
64 SEC("socket")
65 __auxiliary __auxiliary_unpriv
dummy_prog_loop1_socket(void)66 __naked void dummy_prog_loop1_socket(void)
67 {
68 asm volatile (" \
69 r3 = 1; \
70 r2 = %[map_prog1_socket] ll; \
71 call %[bpf_tail_call]; \
72 r0 = 41; \
73 exit; \
74 " :
75 : __imm(bpf_tail_call),
76 __imm_addr(map_prog1_socket)
77 : __clobber_all);
78 }
79
80 SEC("socket")
81 __description("unpriv: return pointer")
82 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)83 __retval(POINTER_VALUE)
84 __naked void unpriv_return_pointer(void)
85 {
86 asm volatile (" \
87 r0 = r10; \
88 exit; \
89 " ::: __clobber_all);
90 }
91
92 SEC("socket")
93 __description("unpriv: add const to pointer")
94 __success __success_unpriv __retval(0)
unpriv_add_const_to_pointer(void)95 __naked void unpriv_add_const_to_pointer(void)
96 {
97 asm volatile (" \
98 r1 += 8; \
99 r0 = 0; \
100 exit; \
101 " ::: __clobber_all);
102 }
103
104 SEC("socket")
105 __description("unpriv: add pointer to pointer")
106 __failure __msg("R1 pointer += pointer")
107 __failure_unpriv
unpriv_add_pointer_to_pointer(void)108 __naked void unpriv_add_pointer_to_pointer(void)
109 {
110 asm volatile (" \
111 r1 += r10; \
112 r0 = 0; \
113 exit; \
114 " ::: __clobber_all);
115 }
116
117 SEC("socket")
118 __description("unpriv: neg pointer")
119 __success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
120 __retval(0)
unpriv_neg_pointer(void)121 __naked void unpriv_neg_pointer(void)
122 {
123 asm volatile (" \
124 r1 = -r1; \
125 r0 = 0; \
126 exit; \
127 " ::: __clobber_all);
128 }
129
130 SEC("socket")
131 __description("unpriv: cmp pointer with const")
132 __success __failure_unpriv __msg_unpriv("R1 pointer comparison")
133 __retval(0)
unpriv_cmp_pointer_with_const(void)134 __naked void unpriv_cmp_pointer_with_const(void)
135 {
136 asm volatile (" \
137 if r1 == 0 goto l0_%=; \
138 l0_%=: r0 = 0; \
139 exit; \
140 " ::: __clobber_all);
141 }
142
143 SEC("socket")
144 __description("unpriv: cmp pointer with pointer")
145 __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
146 __retval(0)
unpriv_cmp_pointer_with_pointer(void)147 __naked void unpriv_cmp_pointer_with_pointer(void)
148 {
149 asm volatile (" \
150 if r1 == r10 goto l0_%=; \
151 l0_%=: r0 = 0; \
152 exit; \
153 " ::: __clobber_all);
154 }
155
156 SEC("tracepoint")
157 __description("unpriv: check that printk is disallowed")
158 __success
check_that_printk_is_disallowed(void)159 __naked void check_that_printk_is_disallowed(void)
160 {
161 asm volatile (" \
162 r1 = 0; \
163 *(u64*)(r10 - 8) = r1; \
164 r1 = r10; \
165 r1 += -8; \
166 r2 = 8; \
167 r3 = r1; \
168 call %[bpf_trace_printk]; \
169 r0 = 0; \
170 exit; \
171 " :
172 : __imm(bpf_trace_printk)
173 : __clobber_all);
174 }
175
176 SEC("socket")
177 __description("unpriv: pass pointer to helper function")
178 __success __failure_unpriv __msg_unpriv("R4 leaks addr")
179 __retval(0)
pass_pointer_to_helper_function(void)180 __naked void pass_pointer_to_helper_function(void)
181 {
182 asm volatile (" \
183 r1 = 0; \
184 *(u64*)(r10 - 8) = r1; \
185 r2 = r10; \
186 r2 += -8; \
187 r1 = %[map_hash_8b] ll; \
188 r3 = r2; \
189 r4 = r2; \
190 call %[bpf_map_update_elem]; \
191 r0 = 0; \
192 exit; \
193 " :
194 : __imm(bpf_map_update_elem),
195 __imm_addr(map_hash_8b)
196 : __clobber_all);
197 }
198
199 SEC("socket")
200 __description("unpriv: indirectly pass pointer on stack to helper function")
201 __success __failure_unpriv
202 __msg_unpriv("invalid read from stack R2 off -8+0 size 8")
203 __retval(0)
on_stack_to_helper_function(void)204 __naked void on_stack_to_helper_function(void)
205 {
206 asm volatile (" \
207 *(u64*)(r10 - 8) = r10; \
208 r2 = r10; \
209 r2 += -8; \
210 r1 = %[map_hash_8b] ll; \
211 call %[bpf_map_lookup_elem]; \
212 r0 = 0; \
213 exit; \
214 " :
215 : __imm(bpf_map_lookup_elem),
216 __imm_addr(map_hash_8b)
217 : __clobber_all);
218 }
219
220 SEC("socket")
221 __description("unpriv: mangle pointer on stack 1")
222 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
223 __retval(0)
mangle_pointer_on_stack_1(void)224 __naked void mangle_pointer_on_stack_1(void)
225 {
226 asm volatile (" \
227 *(u64*)(r10 - 8) = r10; \
228 r0 = 0; \
229 *(u32*)(r10 - 8) = r0; \
230 r0 = 0; \
231 exit; \
232 " ::: __clobber_all);
233 }
234
235 SEC("socket")
236 __description("unpriv: mangle pointer on stack 2")
237 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
238 __retval(0)
mangle_pointer_on_stack_2(void)239 __naked void mangle_pointer_on_stack_2(void)
240 {
241 asm volatile (" \
242 *(u64*)(r10 - 8) = r10; \
243 r0 = 0; \
244 *(u8*)(r10 - 1) = r0; \
245 r0 = 0; \
246 exit; \
247 " ::: __clobber_all);
248 }
249
250 SEC("socket")
251 __description("unpriv: read pointer from stack in small chunks")
252 __failure __msg("invalid size")
253 __failure_unpriv
from_stack_in_small_chunks(void)254 __naked void from_stack_in_small_chunks(void)
255 {
256 asm volatile (" \
257 *(u64*)(r10 - 8) = r10; \
258 r0 = *(u32*)(r10 - 8); \
259 r0 = 0; \
260 exit; \
261 " ::: __clobber_all);
262 }
263
264 SEC("socket")
265 __description("unpriv: write pointer into ctx")
266 __failure __msg("invalid bpf_context access")
267 __failure_unpriv __msg_unpriv("R1 leaks addr")
unpriv_write_pointer_into_ctx(void)268 __naked void unpriv_write_pointer_into_ctx(void)
269 {
270 asm volatile (" \
271 *(u64*)(r1 + 0) = r1; \
272 r0 = 0; \
273 exit; \
274 " ::: __clobber_all);
275 }
276
277 SEC("socket")
278 __description("unpriv: spill/fill of ctx")
279 __success __success_unpriv __retval(0)
unpriv_spill_fill_of_ctx(void)280 __naked void unpriv_spill_fill_of_ctx(void)
281 {
282 asm volatile (" \
283 r6 = r10; \
284 r6 += -8; \
285 *(u64*)(r6 + 0) = r1; \
286 r1 = *(u64*)(r6 + 0); \
287 r0 = 0; \
288 exit; \
289 " ::: __clobber_all);
290 }
291
292 SEC("tc")
293 __description("unpriv: spill/fill of ctx 2")
294 __success __retval(0)
spill_fill_of_ctx_2(void)295 __naked void spill_fill_of_ctx_2(void)
296 {
297 asm volatile (" \
298 r6 = r10; \
299 r6 += -8; \
300 *(u64*)(r6 + 0) = r1; \
301 r1 = *(u64*)(r6 + 0); \
302 call %[bpf_get_hash_recalc]; \
303 r0 = 0; \
304 exit; \
305 " :
306 : __imm(bpf_get_hash_recalc)
307 : __clobber_all);
308 }
309
310 SEC("tc")
311 __description("unpriv: spill/fill of ctx 3")
312 __failure __msg("R1 type=fp expected=ctx")
spill_fill_of_ctx_3(void)313 __naked void spill_fill_of_ctx_3(void)
314 {
315 asm volatile (" \
316 r6 = r10; \
317 r6 += -8; \
318 *(u64*)(r6 + 0) = r1; \
319 *(u64*)(r6 + 0) = r10; \
320 r1 = *(u64*)(r6 + 0); \
321 call %[bpf_get_hash_recalc]; \
322 exit; \
323 " :
324 : __imm(bpf_get_hash_recalc)
325 : __clobber_all);
326 }
327
328 SEC("tc")
329 __description("unpriv: spill/fill of ctx 4")
330 __failure __msg("R1 type=scalar expected=ctx")
spill_fill_of_ctx_4(void)331 __naked void spill_fill_of_ctx_4(void)
332 {
333 asm volatile (" \
334 r6 = r10; \
335 r6 += -8; \
336 *(u64*)(r6 + 0) = r1; \
337 r0 = 1; \
338 lock *(u64 *)(r10 - 8) += r0; \
339 r1 = *(u64*)(r6 + 0); \
340 call %[bpf_get_hash_recalc]; \
341 exit; \
342 " :
343 : __imm(bpf_get_hash_recalc)
344 : __clobber_all);
345 }
346
347 SEC("tc")
348 __description("unpriv: spill/fill of different pointers stx")
349 __failure __msg("same insn cannot be used with different pointers")
fill_of_different_pointers_stx(void)350 __naked void fill_of_different_pointers_stx(void)
351 {
352 asm volatile (" \
353 r3 = 42; \
354 r6 = r10; \
355 r6 += -8; \
356 if r1 == 0 goto l0_%=; \
357 r2 = r10; \
358 r2 += -16; \
359 *(u64*)(r6 + 0) = r2; \
360 l0_%=: if r1 != 0 goto l1_%=; \
361 *(u64*)(r6 + 0) = r1; \
362 l1_%=: r1 = *(u64*)(r6 + 0); \
363 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
364 r0 = 0; \
365 exit; \
366 " :
367 : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
368 : __clobber_all);
369 }
370
371 /* Same as above, but use BPF_ST_MEM to save 42
372 * instead of BPF_STX_MEM.
373 */
374 SEC("tc")
375 __description("unpriv: spill/fill of different pointers st")
376 __failure __msg("same insn cannot be used with different pointers")
fill_of_different_pointers_st(void)377 __naked void fill_of_different_pointers_st(void)
378 {
379 asm volatile (" \
380 r6 = r10; \
381 r6 += -8; \
382 if r1 == 0 goto l0_%=; \
383 r2 = r10; \
384 r2 += -16; \
385 *(u64*)(r6 + 0) = r2; \
386 l0_%=: if r1 != 0 goto l1_%=; \
387 *(u64*)(r6 + 0) = r1; \
388 l1_%=: r1 = *(u64*)(r6 + 0); \
389 .8byte %[st_mem]; \
390 r0 = 0; \
391 exit; \
392 " :
393 : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
394 __imm_insn(st_mem,
395 BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
396 : __clobber_all);
397 }
398
399 SEC("tc")
400 __description("unpriv: spill/fill of different pointers stx - ctx and sock")
401 __failure __msg("type=ctx expected=sock")
pointers_stx_ctx_and_sock(void)402 __naked void pointers_stx_ctx_and_sock(void)
403 {
404 asm volatile (" \
405 r8 = r1; \
406 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
407 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
408 " r2 = r0; \
409 /* u64 foo; */ \
410 /* void *target = &foo; */ \
411 r6 = r10; \
412 r6 += -8; \
413 r1 = r8; \
414 /* if (skb == NULL) *target = sock; */ \
415 if r1 == 0 goto l0_%=; \
416 *(u64*)(r6 + 0) = r2; \
417 l0_%=: /* else *target = skb; */ \
418 if r1 != 0 goto l1_%=; \
419 *(u64*)(r6 + 0) = r1; \
420 l1_%=: /* struct __sk_buff *skb = *target; */ \
421 r1 = *(u64*)(r6 + 0); \
422 /* skb->mark = 42; */ \
423 r3 = 42; \
424 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
425 /* if (sk) bpf_sk_release(sk) */ \
426 if r1 == 0 goto l2_%=; \
427 call %[bpf_sk_release]; \
428 l2_%=: r0 = 0; \
429 exit; \
430 " :
431 : __imm(bpf_sk_lookup_tcp),
432 __imm(bpf_sk_release),
433 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
434 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
435 : __clobber_all);
436 }
437
438 SEC("tc")
439 __description("unpriv: spill/fill of different pointers stx - leak sock")
440 __failure
441 //.errstr = "same insn cannot be used with different pointers",
442 __msg("Unreleased reference")
different_pointers_stx_leak_sock(void)443 __naked void different_pointers_stx_leak_sock(void)
444 {
445 asm volatile (" \
446 r8 = r1; \
447 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
448 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
449 " r2 = r0; \
450 /* u64 foo; */ \
451 /* void *target = &foo; */ \
452 r6 = r10; \
453 r6 += -8; \
454 r1 = r8; \
455 /* if (skb == NULL) *target = sock; */ \
456 if r1 == 0 goto l0_%=; \
457 *(u64*)(r6 + 0) = r2; \
458 l0_%=: /* else *target = skb; */ \
459 if r1 != 0 goto l1_%=; \
460 *(u64*)(r6 + 0) = r1; \
461 l1_%=: /* struct __sk_buff *skb = *target; */ \
462 r1 = *(u64*)(r6 + 0); \
463 /* skb->mark = 42; */ \
464 r3 = 42; \
465 *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
466 exit; \
467 " :
468 : __imm(bpf_sk_lookup_tcp),
469 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
470 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
471 : __clobber_all);
472 }
473
474 SEC("tc")
475 __description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
476 __failure __msg("same insn cannot be used with different pointers")
stx_sock_and_ctx_read(void)477 __naked void stx_sock_and_ctx_read(void)
478 {
479 asm volatile (" \
480 r8 = r1; \
481 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
482 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
483 " r2 = r0; \
484 /* u64 foo; */ \
485 /* void *target = &foo; */ \
486 r6 = r10; \
487 r6 += -8; \
488 r1 = r8; \
489 /* if (skb) *target = skb */ \
490 if r1 == 0 goto l0_%=; \
491 *(u64*)(r6 + 0) = r1; \
492 l0_%=: /* else *target = sock */ \
493 if r1 != 0 goto l1_%=; \
494 *(u64*)(r6 + 0) = r2; \
495 l1_%=: /* struct bpf_sock *sk = *target; */ \
496 r1 = *(u64*)(r6 + 0); \
497 /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
498 if r1 == 0 goto l2_%=; \
499 r3 = *(u32*)(r1 + %[bpf_sock_mark]); \
500 call %[bpf_sk_release]; \
501 l2_%=: r0 = 0; \
502 exit; \
503 " :
504 : __imm(bpf_sk_lookup_tcp),
505 __imm(bpf_sk_release),
506 __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
507 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
508 : __clobber_all);
509 }
510
511 SEC("tc")
512 __description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
513 __failure
514 //.errstr = "same insn cannot be used with different pointers",
515 __msg("cannot write into sock")
stx_sock_and_ctx_write(void)516 __naked void stx_sock_and_ctx_write(void)
517 {
518 asm volatile (" \
519 r8 = r1; \
520 /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
521 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
522 " r2 = r0; \
523 /* u64 foo; */ \
524 /* void *target = &foo; */ \
525 r6 = r10; \
526 r6 += -8; \
527 r1 = r8; \
528 /* if (skb) *target = skb */ \
529 if r1 == 0 goto l0_%=; \
530 *(u64*)(r6 + 0) = r1; \
531 l0_%=: /* else *target = sock */ \
532 if r1 != 0 goto l1_%=; \
533 *(u64*)(r6 + 0) = r2; \
534 l1_%=: /* struct bpf_sock *sk = *target; */ \
535 r1 = *(u64*)(r6 + 0); \
536 /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
537 if r1 == 0 goto l2_%=; \
538 r3 = 42; \
539 *(u32*)(r1 + %[bpf_sock_mark]) = r3; \
540 call %[bpf_sk_release]; \
541 l2_%=: r0 = 0; \
542 exit; \
543 " :
544 : __imm(bpf_sk_lookup_tcp),
545 __imm(bpf_sk_release),
546 __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
547 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
548 : __clobber_all);
549 }
550
551 SEC("socket")
552 __description("unpriv: write pointer into map elem value")
553 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
554 __retval(0)
pointer_into_map_elem_value(void)555 __naked void pointer_into_map_elem_value(void)
556 {
557 asm volatile (" \
558 r1 = 0; \
559 *(u64*)(r10 - 8) = r1; \
560 r2 = r10; \
561 r2 += -8; \
562 r1 = %[map_hash_8b] ll; \
563 call %[bpf_map_lookup_elem]; \
564 if r0 == 0 goto l0_%=; \
565 *(u64*)(r0 + 0) = r0; \
566 l0_%=: exit; \
567 " :
568 : __imm(bpf_map_lookup_elem),
569 __imm_addr(map_hash_8b)
570 : __clobber_all);
571 }
572
573 SEC("socket")
574 __description("alu32: mov u32 const")
575 __success __success_unpriv
576 __retval(0)
577 #ifdef SPEC_V1
578 __xlated_unpriv("if r0 == 0x0 goto pc+2")
579 __xlated_unpriv("nospec") /* inserted to prevent `R7 invalid mem access 'scalar'` */
580 __xlated_unpriv("goto pc-1") /* sanitized dead code */
581 __xlated_unpriv("exit")
582 #endif
alu32_mov_u32_const(void)583 __naked void alu32_mov_u32_const(void)
584 {
585 asm volatile (" \
586 w7 = 0; \
587 w7 &= 1; \
588 w0 = w7; \
589 if r0 == 0 goto l0_%=; \
590 r0 = *(u64*)(r7 + 0); \
591 l0_%=: exit; \
592 " ::: __clobber_all);
593 }
594
595 SEC("socket")
596 __description("unpriv: partial copy of pointer")
597 __success __failure_unpriv __msg_unpriv("R10 partial copy")
598 __retval(0)
unpriv_partial_copy_of_pointer(void)599 __naked void unpriv_partial_copy_of_pointer(void)
600 {
601 asm volatile (" \
602 w1 = w10; \
603 r0 = 0; \
604 exit; \
605 " ::: __clobber_all);
606 }
607
608 SEC("socket")
609 __description("unpriv: pass pointer to tail_call")
610 __success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
611 __retval(0)
pass_pointer_to_tail_call(void)612 __naked void pass_pointer_to_tail_call(void)
613 {
614 asm volatile (" \
615 r3 = r1; \
616 r2 = %[map_prog1_socket] ll; \
617 call %[bpf_tail_call]; \
618 r0 = 0; \
619 exit; \
620 " :
621 : __imm(bpf_tail_call),
622 __imm_addr(map_prog1_socket)
623 : __clobber_all);
624 }
625
626 SEC("socket")
627 __description("unpriv: cmp map pointer with zero")
628 __success __success_unpriv
629 __retval(0)
cmp_map_pointer_with_zero(void)630 __naked void cmp_map_pointer_with_zero(void)
631 {
632 asm volatile (" \
633 r1 = %[map_hash_8b] ll; \
634 if r1 == 0 goto l0_%=; \
635 l0_%=: r0 = 0; \
636 exit; \
637 " :
638 : __imm_addr(map_hash_8b)
639 : __clobber_all);
640 }
641
642 SEC("socket")
643 __description("unpriv: cmp map pointer with const")
644 __success __failure_unpriv __msg_unpriv("R1 pointer comparison prohibited")
645 __retval(0)
cmp_map_pointer_with_const(void)646 __naked void cmp_map_pointer_with_const(void)
647 {
648 asm volatile (" \
649 r1 = %[map_hash_8b] ll; \
650 if r1 == 0x0000beef goto l0_%=; \
651 l0_%=: r0 = 0; \
652 exit; \
653 " :
654 : __imm_addr(map_hash_8b)
655 : __clobber_all);
656 }
657
658 SEC("socket")
659 __description("unpriv: write into frame pointer")
660 __failure __msg("frame pointer is read only")
661 __failure_unpriv
unpriv_write_into_frame_pointer(void)662 __naked void unpriv_write_into_frame_pointer(void)
663 {
664 asm volatile (" \
665 r10 = r1; \
666 r0 = 0; \
667 exit; \
668 " ::: __clobber_all);
669 }
670
671 SEC("socket")
672 __description("unpriv: spill/fill frame pointer")
673 __failure __msg("frame pointer is read only")
674 __failure_unpriv
unpriv_spill_fill_frame_pointer(void)675 __naked void unpriv_spill_fill_frame_pointer(void)
676 {
677 asm volatile (" \
678 r6 = r10; \
679 r6 += -8; \
680 *(u64*)(r6 + 0) = r10; \
681 r10 = *(u64*)(r6 + 0); \
682 r0 = 0; \
683 exit; \
684 " ::: __clobber_all);
685 }
686
687 SEC("socket")
688 __description("unpriv: cmp of frame pointer")
689 __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
690 __retval(0)
unpriv_cmp_of_frame_pointer(void)691 __naked void unpriv_cmp_of_frame_pointer(void)
692 {
693 asm volatile (" \
694 if r10 == 0 goto l0_%=; \
695 l0_%=: r0 = 0; \
696 exit; \
697 " ::: __clobber_all);
698 }
699
700 SEC("socket")
701 __description("unpriv: adding of fp, reg")
702 __success __failure_unpriv
703 __msg_unpriv("R1 stack pointer arithmetic goes out of range")
704 __retval(0)
unpriv_adding_of_fp_reg(void)705 __naked void unpriv_adding_of_fp_reg(void)
706 {
707 asm volatile (" \
708 r0 = 0; \
709 r1 = 0; \
710 r1 += r10; \
711 *(u64*)(r1 - 8) = r0; \
712 exit; \
713 " ::: __clobber_all);
714 }
715
716 SEC("socket")
717 __description("unpriv: adding of fp, imm")
718 __success __failure_unpriv
719 __msg_unpriv("R1 stack pointer arithmetic goes out of range")
720 __retval(0)
unpriv_adding_of_fp_imm(void)721 __naked void unpriv_adding_of_fp_imm(void)
722 {
723 asm volatile (" \
724 r0 = 0; \
725 r1 = r10; \
726 r1 += 0; \
727 *(u64*)(r1 - 8) = r0; \
728 exit; \
729 " ::: __clobber_all);
730 }
731
732 SEC("socket")
733 __description("unpriv: cmp of stack pointer")
734 __success __failure_unpriv __msg_unpriv("R2 pointer comparison")
735 __retval(0)
unpriv_cmp_of_stack_pointer(void)736 __naked void unpriv_cmp_of_stack_pointer(void)
737 {
738 asm volatile (" \
739 r2 = r10; \
740 r2 += -8; \
741 if r2 == 0 goto l0_%=; \
742 l0_%=: r0 = 0; \
743 exit; \
744 " ::: __clobber_all);
745 }
746
747 SEC("socket")
748 __description("unpriv: Spectre v1 path-based type confusion of scalar as stack-ptr")
749 __success __success_unpriv __retval(0)
750 #ifdef SPEC_V1
751 __xlated_unpriv("if r0 != 0x1 goto pc+2")
752 /* This nospec prevents the exploit because it forces the mispredicted (not
753 * taken) `if r0 != 0x0 goto l0_%=` to resolve before using r6 as a pointer.
754 * This causes the CPU to realize that `r6 = r9` should have never executed. It
755 * ensures that r6 always contains a readable stack slot ptr when the insn after
756 * the nospec executes.
757 */
758 __xlated_unpriv("nospec")
759 __xlated_unpriv("r9 = *(u8 *)(r6 +0)")
760 #endif
unpriv_spec_v1_type_confusion(void)761 __naked void unpriv_spec_v1_type_confusion(void)
762 {
763 asm volatile (" \
764 r1 = 0; \
765 *(u64*)(r10 - 8) = r1; \
766 r2 = r10; \
767 r2 += -8; \
768 r1 = %[map_hash_8b] ll; \
769 call %[bpf_map_lookup_elem]; \
770 if r0 == 0 goto l2_%=; \
771 /* r0: pointer to a map array entry */ \
772 r2 = r10; \
773 r2 += -8; \
774 r1 = %[map_hash_8b] ll; \
775 /* r1, r2: prepared call args */ \
776 r6 = r10; \
777 r6 += -8; \
778 /* r6: pointer to readable stack slot */ \
779 r9 = 0xffffc900; \
780 r9 <<= 32; \
781 /* r9: scalar controlled by attacker */ \
782 r0 = *(u64 *)(r0 + 0); /* cache miss */ \
783 if r0 != 0x0 goto l0_%=; \
784 r6 = r9; \
785 l0_%=: if r0 != 0x1 goto l1_%=; \
786 r9 = *(u8 *)(r6 + 0); \
787 l1_%=: /* leak r9 */ \
788 r9 &= 1; \
789 r9 <<= 9; \
790 *(u64*)(r10 - 8) = r9; \
791 call %[bpf_map_lookup_elem]; \
792 if r0 == 0 goto l2_%=; \
793 /* leak secret into is_cached(map[0|512]): */ \
794 r0 = *(u64 *)(r0 + 0); \
795 l2_%=: \
796 r0 = 0; \
797 exit; \
798 " :
799 : __imm(bpf_map_lookup_elem),
800 __imm_addr(map_hash_8b)
801 : __clobber_all);
802 }
803
804 SEC("socket")
805 __description("unpriv: ldimm64 before Spectre v4 barrier")
806 __success __success_unpriv
807 __retval(0)
808 #ifdef SPEC_V4
809 __xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */
810 __xlated_unpriv("*(u64 *)(r10 -8) = r1")
811 __xlated_unpriv("nospec")
812 #endif
unpriv_ldimm64_spectre_v4(void)813 __naked void unpriv_ldimm64_spectre_v4(void)
814 {
815 asm volatile (" \
816 r1 = 0x2020200005642020 ll; \
817 *(u64 *)(r10 -8) = r1; \
818 r0 = 0; \
819 exit; \
820 " ::: __clobber_all);
821 }
822
823 SEC("socket")
824 __description("unpriv: Spectre v1 and v4 barrier")
825 __success __success_unpriv
826 __retval(0)
827 #ifdef SPEC_V1
828 #ifdef SPEC_V4
829 /* starts with r0 == r8 == r9 == 0 */
830 __xlated_unpriv("if r8 != 0x0 goto pc+1")
831 __xlated_unpriv("goto pc+2")
832 __xlated_unpriv("if r9 == 0x0 goto pc+4")
833 __xlated_unpriv("r2 = r0")
834 /* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64)
835 * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The
836 * test therefore ensures the Spectre-v4--induced nospec does not prevent the
837 * Spectre-v1--induced speculative path from being fully analyzed.
838 */
839 __xlated_unpriv("nospec") /* Spectre v1 */
840 __xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */
841 __xlated_unpriv("nospec") /* Spectre v4 */
842 #endif
843 #endif
unpriv_spectre_v1_and_v4(void)844 __naked void unpriv_spectre_v1_and_v4(void)
845 {
846 asm volatile (" \
847 r1 = 0; \
848 *(u64*)(r10 - 8) = r1; \
849 r2 = r10; \
850 r2 += -8; \
851 r1 = %[map_hash_8b] ll; \
852 call %[bpf_map_lookup_elem]; \
853 r8 = r0; \
854 r2 = r10; \
855 r2 += -8; \
856 r1 = %[map_hash_8b] ll; \
857 call %[bpf_map_lookup_elem]; \
858 r9 = r0; \
859 r0 = r10; \
860 r1 = 0; \
861 r2 = r10; \
862 if r8 != 0 goto l0_%=; \
863 if r9 != 0 goto l0_%=; \
864 r0 = 0; \
865 l0_%=: if r8 != 0 goto l1_%=; \
866 goto l2_%=; \
867 l1_%=: if r9 == 0 goto l3_%=; \
868 r2 = r0; \
869 l2_%=: *(u64 *)(r2 -64) = r1; \
870 l3_%=: r0 = 0; \
871 exit; \
872 " :
873 : __imm(bpf_map_lookup_elem),
874 __imm_addr(map_hash_8b)
875 : __clobber_all);
876 }
877
878 SEC("socket")
879 __description("unpriv: Spectre v1 and v4 barrier (simple)")
880 __success __success_unpriv
881 __retval(0)
882 #ifdef SPEC_V1
883 #ifdef SPEC_V4
884 __xlated_unpriv("if r8 != 0x0 goto pc+1")
885 __xlated_unpriv("goto pc+2")
886 __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
887 __xlated_unpriv("goto pc-1") /* r2 = r0 */
888 __xlated_unpriv("nospec")
889 __xlated_unpriv("*(u64 *)(r2 -64) = r1")
890 __xlated_unpriv("nospec")
891 #endif
892 #endif
unpriv_spectre_v1_and_v4_simple(void)893 __naked void unpriv_spectre_v1_and_v4_simple(void)
894 {
895 asm volatile (" \
896 r8 = 0; \
897 r9 = 0; \
898 r0 = r10; \
899 r1 = 0; \
900 r2 = r10; \
901 if r8 != 0 goto l0_%=; \
902 if r9 != 0 goto l0_%=; \
903 r0 = 0; \
904 l0_%=: if r8 != 0 goto l1_%=; \
905 goto l2_%=; \
906 l1_%=: if r9 == 0 goto l3_%=; \
907 r2 = r0; \
908 l2_%=: *(u64 *)(r2 -64) = r1; \
909 l3_%=: r0 = 0; \
910 exit; \
911 " ::: __clobber_all);
912 }
913
914 SEC("socket")
915 __description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)")
916 __success __success_unpriv
917 __retval(0)
918 #ifdef SPEC_V1
919 #ifdef SPEC_V4
920 __xlated_unpriv("if r8 != 0x0 goto pc+1")
921 __xlated_unpriv("goto pc+4")
922 __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
923 __xlated_unpriv("goto pc-1") /* r2 = r0 */
924 __xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */
925 __xlated_unpriv("goto pc-1") /* second part of ldimm64 */
926 __xlated_unpriv("nospec")
927 __xlated_unpriv("*(u64 *)(r2 -64) = r1")
928 __xlated_unpriv("nospec")
929 #endif
930 #endif
unpriv_ldimm64_spectre_v1_and_v4_simple(void)931 __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
932 {
933 asm volatile (" \
934 r8 = 0; \
935 r9 = 0; \
936 r0 = r10; \
937 r1 = 0; \
938 r2 = r10; \
939 if r8 != 0 goto l0_%=; \
940 if r9 != 0 goto l0_%=; \
941 r0 = 0; \
942 l0_%=: if r8 != 0 goto l1_%=; \
943 goto l2_%=; \
944 l1_%=: if r9 == 0 goto l3_%=; \
945 r2 = r0; \
946 r1 = 0x2020200005642020 ll; \
947 l2_%=: *(u64 *)(r2 -64) = r1; \
948 l3_%=: r0 = 0; \
949 exit; \
950 " ::: __clobber_all);
951 }
952
953 char _license[] SEC("license") = "GPL";
954