1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Facebook */
3
4 #include <vmlinux.h>
5 #include <string.h>
6 #include <stdbool.h>
7 #include <bpf/bpf_helpers.h>
8 #include <bpf/bpf_tracing.h>
9 #include "bpf_misc.h"
10 #include "errno.h"
11
12 char _license[] SEC("license") = "GPL";
13
14 int pid, err, val;
15
16 struct ringbuf_sample {
17 int pid;
18 int seq;
19 long value;
20 char comm[16];
21 };
22
23 struct {
24 __uint(type, BPF_MAP_TYPE_RINGBUF);
25 __uint(max_entries, 4096);
26 } ringbuf SEC(".maps");
27
28 struct {
29 __uint(type, BPF_MAP_TYPE_ARRAY);
30 __uint(max_entries, 1);
31 __type(key, __u32);
32 __type(value, __u32);
33 } array_map SEC(".maps");
34
35 SEC("?tp/syscalls/sys_enter_nanosleep")
test_read_write(void * ctx)36 int test_read_write(void *ctx)
37 {
38 char write_data[64] = "hello there, world!!";
39 char read_data[64] = {};
40 struct bpf_dynptr ptr;
41 int i;
42
43 if (bpf_get_current_pid_tgid() >> 32 != pid)
44 return 0;
45
46 bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
47
48 /* Write data into the dynptr */
49 err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
50
51 /* Read the data that was written into the dynptr */
52 err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
53
54 /* Ensure the data we read matches the data we wrote */
55 for (i = 0; i < sizeof(read_data); i++) {
56 if (read_data[i] != write_data[i]) {
57 err = 1;
58 break;
59 }
60 }
61
62 bpf_ringbuf_discard_dynptr(&ptr, 0);
63 return 0;
64 }
65
66 SEC("?tp/syscalls/sys_enter_nanosleep")
test_dynptr_data(void * ctx)67 int test_dynptr_data(void *ctx)
68 {
69 __u32 key = 0, val = 235, *map_val;
70 struct bpf_dynptr ptr;
71 __u32 map_val_size;
72 void *data;
73
74 map_val_size = sizeof(*map_val);
75
76 if (bpf_get_current_pid_tgid() >> 32 != pid)
77 return 0;
78
79 bpf_map_update_elem(&array_map, &key, &val, 0);
80
81 map_val = bpf_map_lookup_elem(&array_map, &key);
82 if (!map_val) {
83 err = 1;
84 return 0;
85 }
86
87 bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
88
89 /* Try getting a data slice that is out of range */
90 data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
91 if (data) {
92 err = 2;
93 return 0;
94 }
95
96 /* Try getting more bytes than available */
97 data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
98 if (data) {
99 err = 3;
100 return 0;
101 }
102
103 data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
104 if (!data) {
105 err = 4;
106 return 0;
107 }
108
109 *(__u32 *)data = 999;
110
111 err = bpf_probe_read_kernel(&val, sizeof(val), data);
112 if (err)
113 return 0;
114
115 if (val != *(int *)data)
116 err = 5;
117
118 return 0;
119 }
120
ringbuf_callback(__u32 index,void * data)121 static int ringbuf_callback(__u32 index, void *data)
122 {
123 struct ringbuf_sample *sample;
124
125 struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
126
127 sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
128 if (!sample)
129 err = 2;
130 else
131 sample->pid += index;
132
133 return 0;
134 }
135
136 SEC("?tp/syscalls/sys_enter_nanosleep")
test_ringbuf(void * ctx)137 int test_ringbuf(void *ctx)
138 {
139 struct bpf_dynptr ptr;
140 struct ringbuf_sample *sample;
141
142 if (bpf_get_current_pid_tgid() >> 32 != pid)
143 return 0;
144
145 val = 100;
146
147 /* check that you can reserve a dynamic size reservation */
148 err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
149
150 sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
151 if (!sample) {
152 err = 1;
153 goto done;
154 }
155
156 sample->pid = 10;
157
158 /* Can pass dynptr to callback functions */
159 bpf_loop(10, ringbuf_callback, &ptr, 0);
160
161 if (sample->pid != 55)
162 err = 2;
163
164 done:
165 bpf_ringbuf_discard_dynptr(&ptr, 0);
166 return 0;
167 }
168
169 SEC("?cgroup_skb/egress")
test_skb_readonly(struct __sk_buff * skb)170 int test_skb_readonly(struct __sk_buff *skb)
171 {
172 __u8 write_data[2] = {1, 2};
173 struct bpf_dynptr ptr;
174 int ret;
175
176 if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
177 err = 1;
178 return 1;
179 }
180
181 /* since cgroup skbs are read only, writes should fail */
182 ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
183 if (ret != -EINVAL) {
184 err = 2;
185 return 1;
186 }
187
188 return 1;
189 }
190
191 SEC("?cgroup_skb/egress")
test_dynptr_skb_data(struct __sk_buff * skb)192 int test_dynptr_skb_data(struct __sk_buff *skb)
193 {
194 struct bpf_dynptr ptr;
195 __u64 *data;
196
197 if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
198 err = 1;
199 return 1;
200 }
201
202 /* This should return NULL. Must use bpf_dynptr_slice API */
203 data = bpf_dynptr_data(&ptr, 0, 1);
204 if (data) {
205 err = 2;
206 return 1;
207 }
208
209 return 1;
210 }
211
212 SEC("tp/syscalls/sys_enter_nanosleep")
test_adjust(void * ctx)213 int test_adjust(void *ctx)
214 {
215 struct bpf_dynptr ptr;
216 __u32 bytes = 64;
217 __u32 off = 10;
218 __u32 trim = 15;
219
220 if (bpf_get_current_pid_tgid() >> 32 != pid)
221 return 0;
222
223 err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
224 if (err) {
225 err = 1;
226 goto done;
227 }
228
229 if (bpf_dynptr_size(&ptr) != bytes) {
230 err = 2;
231 goto done;
232 }
233
234 /* Advance the dynptr by off */
235 err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
236 if (err) {
237 err = 3;
238 goto done;
239 }
240
241 if (bpf_dynptr_size(&ptr) != bytes - off) {
242 err = 4;
243 goto done;
244 }
245
246 /* Trim the dynptr */
247 err = bpf_dynptr_adjust(&ptr, off, 15);
248 if (err) {
249 err = 5;
250 goto done;
251 }
252
253 /* Check that the size was adjusted correctly */
254 if (bpf_dynptr_size(&ptr) != trim - off) {
255 err = 6;
256 goto done;
257 }
258
259 done:
260 bpf_ringbuf_discard_dynptr(&ptr, 0);
261 return 0;
262 }
263
264 SEC("tp/syscalls/sys_enter_nanosleep")
test_adjust_err(void * ctx)265 int test_adjust_err(void *ctx)
266 {
267 char write_data[45] = "hello there, world!!";
268 struct bpf_dynptr ptr;
269 __u32 size = 64;
270 __u32 off = 20;
271
272 if (bpf_get_current_pid_tgid() >> 32 != pid)
273 return 0;
274
275 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
276 err = 1;
277 goto done;
278 }
279
280 /* Check that start can't be greater than end */
281 if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
282 err = 2;
283 goto done;
284 }
285
286 /* Check that start can't be greater than size */
287 if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
288 err = 3;
289 goto done;
290 }
291
292 /* Check that end can't be greater than size */
293 if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
294 err = 4;
295 goto done;
296 }
297
298 if (bpf_dynptr_adjust(&ptr, off, size)) {
299 err = 5;
300 goto done;
301 }
302
303 /* Check that you can't write more bytes than available into the dynptr
304 * after you've adjusted it
305 */
306 if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
307 err = 6;
308 goto done;
309 }
310
311 /* Check that even after adjusting, submitting/discarding
312 * a ringbuf dynptr works
313 */
314 bpf_ringbuf_submit_dynptr(&ptr, 0);
315 return 0;
316
317 done:
318 bpf_ringbuf_discard_dynptr(&ptr, 0);
319 return 0;
320 }
321
322 SEC("tp/syscalls/sys_enter_nanosleep")
test_zero_size_dynptr(void * ctx)323 int test_zero_size_dynptr(void *ctx)
324 {
325 char write_data = 'x', read_data;
326 struct bpf_dynptr ptr;
327 __u32 size = 64;
328
329 if (bpf_get_current_pid_tgid() >> 32 != pid)
330 return 0;
331
332 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
333 err = 1;
334 goto done;
335 }
336
337 /* After this, the dynptr has a size of 0 */
338 if (bpf_dynptr_adjust(&ptr, size, size)) {
339 err = 2;
340 goto done;
341 }
342
343 /* Test that reading + writing non-zero bytes is not ok */
344 if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
345 err = 3;
346 goto done;
347 }
348
349 if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
350 err = 4;
351 goto done;
352 }
353
354 /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
355 if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
356 err = 5;
357 goto done;
358 }
359
360 if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
361 err = 6;
362 goto done;
363 }
364
365 err = 0;
366
367 done:
368 bpf_ringbuf_discard_dynptr(&ptr, 0);
369 return 0;
370 }
371
372 SEC("tp/syscalls/sys_enter_nanosleep")
test_dynptr_is_null(void * ctx)373 int test_dynptr_is_null(void *ctx)
374 {
375 struct bpf_dynptr ptr1;
376 struct bpf_dynptr ptr2;
377 __u64 size = 4;
378
379 if (bpf_get_current_pid_tgid() >> 32 != pid)
380 return 0;
381
382 /* Pass in invalid flags, get back an invalid dynptr */
383 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
384 err = 1;
385 goto exit_early;
386 }
387
388 /* Test that the invalid dynptr is null */
389 if (!bpf_dynptr_is_null(&ptr1)) {
390 err = 2;
391 goto exit_early;
392 }
393
394 /* Get a valid dynptr */
395 if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
396 err = 3;
397 goto exit;
398 }
399
400 /* Test that the valid dynptr is not null */
401 if (bpf_dynptr_is_null(&ptr2)) {
402 err = 4;
403 goto exit;
404 }
405
406 exit:
407 bpf_ringbuf_discard_dynptr(&ptr2, 0);
408 exit_early:
409 bpf_ringbuf_discard_dynptr(&ptr1, 0);
410 return 0;
411 }
412
413 SEC("cgroup_skb/egress")
test_dynptr_is_rdonly(struct __sk_buff * skb)414 int test_dynptr_is_rdonly(struct __sk_buff *skb)
415 {
416 struct bpf_dynptr ptr1;
417 struct bpf_dynptr ptr2;
418 struct bpf_dynptr ptr3;
419
420 /* Pass in invalid flags, get back an invalid dynptr */
421 if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
422 err = 1;
423 return 0;
424 }
425
426 /* Test that an invalid dynptr is_rdonly returns false */
427 if (bpf_dynptr_is_rdonly(&ptr1)) {
428 err = 2;
429 return 0;
430 }
431
432 /* Get a read-only dynptr */
433 if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
434 err = 3;
435 return 0;
436 }
437
438 /* Test that the dynptr is read-only */
439 if (!bpf_dynptr_is_rdonly(&ptr2)) {
440 err = 4;
441 return 0;
442 }
443
444 /* Get a read-writeable dynptr */
445 if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
446 err = 5;
447 goto done;
448 }
449
450 /* Test that the dynptr is read-only */
451 if (bpf_dynptr_is_rdonly(&ptr3)) {
452 err = 6;
453 goto done;
454 }
455
456 done:
457 bpf_ringbuf_discard_dynptr(&ptr3, 0);
458 return 0;
459 }
460
461 SEC("cgroup_skb/egress")
test_dynptr_clone(struct __sk_buff * skb)462 int test_dynptr_clone(struct __sk_buff *skb)
463 {
464 struct bpf_dynptr ptr1;
465 struct bpf_dynptr ptr2;
466 __u32 off = 2, size;
467
468 /* Get a dynptr */
469 if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
470 err = 1;
471 return 0;
472 }
473
474 if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
475 err = 2;
476 return 0;
477 }
478
479 /* Clone the dynptr */
480 if (bpf_dynptr_clone(&ptr1, &ptr2)) {
481 err = 3;
482 return 0;
483 }
484
485 size = bpf_dynptr_size(&ptr1);
486
487 /* Check that the clone has the same size and rd-only */
488 if (bpf_dynptr_size(&ptr2) != size) {
489 err = 4;
490 return 0;
491 }
492
493 if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
494 err = 5;
495 return 0;
496 }
497
498 /* Advance and trim the original dynptr */
499 bpf_dynptr_adjust(&ptr1, 5, 5);
500
501 /* Check that only original dynptr was affected, and the clone wasn't */
502 if (bpf_dynptr_size(&ptr2) != size) {
503 err = 6;
504 return 0;
505 }
506
507 return 0;
508 }
509
510 SEC("?cgroup_skb/egress")
test_dynptr_skb_no_buff(struct __sk_buff * skb)511 int test_dynptr_skb_no_buff(struct __sk_buff *skb)
512 {
513 struct bpf_dynptr ptr;
514 __u64 *data;
515
516 if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
517 err = 1;
518 return 1;
519 }
520
521 /* This may return NULL. SKB may require a buffer */
522 data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
523
524 return !!data;
525 }
526
527 SEC("?cgroup_skb/egress")
test_dynptr_skb_strcmp(struct __sk_buff * skb)528 int test_dynptr_skb_strcmp(struct __sk_buff *skb)
529 {
530 struct bpf_dynptr ptr;
531 char *data;
532
533 if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
534 err = 1;
535 return 1;
536 }
537
538 /* This may return NULL. SKB may require a buffer */
539 data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
540 if (data) {
541 bpf_strncmp(data, 10, "foo");
542 return 1;
543 }
544
545 return 1;
546 }
547
548 SEC("tp_btf/kfree_skb")
BPF_PROG(test_dynptr_skb_tp_btf,void * skb,void * location)549 int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location)
550 {
551 __u8 write_data[2] = {1, 2};
552 struct bpf_dynptr ptr;
553 int ret;
554
555 if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
556 err = 1;
557 return 1;
558 }
559
560 /* since tp_btf skbs are read only, writes should fail */
561 ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
562 if (ret != -EINVAL) {
563 err = 2;
564 return 1;
565 }
566
567 return 1;
568 }
569
bpf_memcmp(const char * a,const char * b,u32 size)570 static inline int bpf_memcmp(const char *a, const char *b, u32 size)
571 {
572 int i;
573
574 bpf_for(i, 0, size) {
575 if (a[i] != b[i])
576 return a[i] < b[i] ? -1 : 1;
577 }
578 return 0;
579 }
580
581 SEC("?tp/syscalls/sys_enter_nanosleep")
test_dynptr_copy(void * ctx)582 int test_dynptr_copy(void *ctx)
583 {
584 char data[] = "hello there, world!!";
585 char buf[32] = {'\0'};
586 __u32 sz = sizeof(data);
587 struct bpf_dynptr src, dst;
588
589 bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &src);
590 bpf_ringbuf_reserve_dynptr(&ringbuf, sz, 0, &dst);
591
592 /* Test basic case of copying contiguous memory backed dynptrs */
593 err = bpf_dynptr_write(&src, 0, data, sz, 0);
594 err = err ?: bpf_dynptr_copy(&dst, 0, &src, 0, sz);
595 err = err ?: bpf_dynptr_read(buf, sz, &dst, 0, 0);
596 err = err ?: bpf_memcmp(data, buf, sz);
597
598 /* Test that offsets are handled correctly */
599 err = err ?: bpf_dynptr_copy(&dst, 3, &src, 5, sz - 5);
600 err = err ?: bpf_dynptr_read(buf, sz - 5, &dst, 3, 0);
601 err = err ?: bpf_memcmp(data + 5, buf, sz - 5);
602
603 bpf_ringbuf_discard_dynptr(&src, 0);
604 bpf_ringbuf_discard_dynptr(&dst, 0);
605 return 0;
606 }
607
608 SEC("xdp")
test_dynptr_copy_xdp(struct xdp_md * xdp)609 int test_dynptr_copy_xdp(struct xdp_md *xdp)
610 {
611 struct bpf_dynptr ptr_buf, ptr_xdp;
612 char data[] = "qwertyuiopasdfghjkl";
613 char buf[32] = {'\0'};
614 __u32 len = sizeof(data);
615 int i, chunks = 200;
616
617 /* ptr_xdp is backed by non-contiguous memory */
618 bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
619 bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf);
620
621 /* Destination dynptr is backed by non-contiguous memory */
622 bpf_for(i, 0, chunks) {
623 err = bpf_dynptr_write(&ptr_buf, i * len, data, len, 0);
624 if (err)
625 goto out;
626 }
627
628 err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks);
629 if (err)
630 goto out;
631
632 bpf_for(i, 0, chunks) {
633 __builtin_memset(buf, 0, sizeof(buf));
634 err = bpf_dynptr_read(&buf, len, &ptr_xdp, i * len, 0);
635 if (err)
636 goto out;
637 if (bpf_memcmp(data, buf, len) != 0)
638 goto out;
639 }
640
641 /* Source dynptr is backed by non-contiguous memory */
642 __builtin_memset(buf, 0, sizeof(buf));
643 bpf_for(i, 0, chunks) {
644 err = bpf_dynptr_write(&ptr_buf, i * len, buf, len, 0);
645 if (err)
646 goto out;
647 }
648
649 err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks);
650 if (err)
651 goto out;
652
653 bpf_for(i, 0, chunks) {
654 __builtin_memset(buf, 0, sizeof(buf));
655 err = bpf_dynptr_read(&buf, len, &ptr_buf, i * len, 0);
656 if (err)
657 goto out;
658 if (bpf_memcmp(data, buf, len) != 0)
659 goto out;
660 }
661
662 /* Both source and destination dynptrs are backed by non-contiguous memory */
663 err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1));
664 if (err)
665 goto out;
666
667 bpf_for(i, 0, chunks - 1) {
668 __builtin_memset(buf, 0, sizeof(buf));
669 err = bpf_dynptr_read(&buf, len, &ptr_xdp, 2 + i * len, 0);
670 if (err)
671 goto out;
672 if (bpf_memcmp(data, buf, len) != 0)
673 goto out;
674 }
675
676 if (bpf_dynptr_copy(&ptr_xdp, 2000, &ptr_xdp, 0, len * chunks) != -E2BIG)
677 err = 1;
678
679 out:
680 bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
681 return XDP_DROP;
682 }
683