1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_btf.skel.h"
11 #include "bpf_iter_tcp4.skel.h"
12 #include "bpf_iter_tcp6.skel.h"
13 #include "bpf_iter_udp4.skel.h"
14 #include "bpf_iter_udp6.skel.h"
15 #include "bpf_iter_test_kern1.skel.h"
16 #include "bpf_iter_test_kern2.skel.h"
17 #include "bpf_iter_test_kern3.skel.h"
18 #include "bpf_iter_test_kern4.skel.h"
19 #include "bpf_iter_bpf_hash_map.skel.h"
20 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
21 #include "bpf_iter_bpf_array_map.skel.h"
22 #include "bpf_iter_bpf_percpu_array_map.skel.h"
23 #include "bpf_iter_bpf_sk_storage_map.skel.h"
24 #include "bpf_iter_test_kern5.skel.h"
25 #include "bpf_iter_test_kern6.skel.h"
26
27 static int duration;
28
test_btf_id_or_null(void)29 static void test_btf_id_or_null(void)
30 {
31 struct bpf_iter_test_kern3 *skel;
32
33 skel = bpf_iter_test_kern3__open_and_load();
34 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
35 "skeleton open_and_load unexpectedly succeeded\n")) {
36 bpf_iter_test_kern3__destroy(skel);
37 return;
38 }
39 }
40
do_dummy_read(struct bpf_program * prog)41 static void do_dummy_read(struct bpf_program *prog)
42 {
43 struct bpf_link *link;
44 char buf[16] = {};
45 int iter_fd, len;
46
47 link = bpf_program__attach_iter(prog, NULL);
48 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
49 return;
50
51 iter_fd = bpf_iter_create(bpf_link__fd(link));
52 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
53 goto free_link;
54
55 /* not check contents, but ensure read() ends without error */
56 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
57 ;
58 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
59
60 close(iter_fd);
61
62 free_link:
63 bpf_link__destroy(link);
64 }
65
test_ipv6_route(void)66 static void test_ipv6_route(void)
67 {
68 struct bpf_iter_ipv6_route *skel;
69
70 skel = bpf_iter_ipv6_route__open_and_load();
71 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
72 "skeleton open_and_load failed\n"))
73 return;
74
75 do_dummy_read(skel->progs.dump_ipv6_route);
76
77 bpf_iter_ipv6_route__destroy(skel);
78 }
79
test_netlink(void)80 static void test_netlink(void)
81 {
82 struct bpf_iter_netlink *skel;
83
84 skel = bpf_iter_netlink__open_and_load();
85 if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
86 "skeleton open_and_load failed\n"))
87 return;
88
89 do_dummy_read(skel->progs.dump_netlink);
90
91 bpf_iter_netlink__destroy(skel);
92 }
93
test_bpf_map(void)94 static void test_bpf_map(void)
95 {
96 struct bpf_iter_bpf_map *skel;
97
98 skel = bpf_iter_bpf_map__open_and_load();
99 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
100 "skeleton open_and_load failed\n"))
101 return;
102
103 do_dummy_read(skel->progs.dump_bpf_map);
104
105 bpf_iter_bpf_map__destroy(skel);
106 }
107
test_task(void)108 static void test_task(void)
109 {
110 struct bpf_iter_task *skel;
111
112 skel = bpf_iter_task__open_and_load();
113 if (CHECK(!skel, "bpf_iter_task__open_and_load",
114 "skeleton open_and_load failed\n"))
115 return;
116
117 do_dummy_read(skel->progs.dump_task);
118
119 bpf_iter_task__destroy(skel);
120 }
121
test_task_stack(void)122 static void test_task_stack(void)
123 {
124 struct bpf_iter_task_stack *skel;
125
126 skel = bpf_iter_task_stack__open_and_load();
127 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
128 "skeleton open_and_load failed\n"))
129 return;
130
131 do_dummy_read(skel->progs.dump_task_stack);
132
133 bpf_iter_task_stack__destroy(skel);
134 }
135
do_nothing(void * arg)136 static void *do_nothing(void *arg)
137 {
138 pthread_exit(arg);
139 }
140
test_task_file(void)141 static void test_task_file(void)
142 {
143 struct bpf_iter_task_file *skel;
144 pthread_t thread_id;
145 void *ret;
146
147 skel = bpf_iter_task_file__open_and_load();
148 if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
149 "skeleton open_and_load failed\n"))
150 return;
151
152 skel->bss->tgid = getpid();
153
154 if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
155 "pthread_create", "pthread_create failed\n"))
156 goto done;
157
158 do_dummy_read(skel->progs.dump_task_file);
159
160 if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
161 "pthread_join", "pthread_join failed\n"))
162 goto done;
163
164 CHECK(skel->bss->count != 0, "check_count",
165 "invalid non pthread file visit count %d\n", skel->bss->count);
166
167 done:
168 bpf_iter_task_file__destroy(skel);
169 }
170
171 #define TASKBUFSZ 32768
172
173 static char taskbuf[TASKBUFSZ];
174
do_btf_read(struct bpf_iter_task_btf * skel)175 static int do_btf_read(struct bpf_iter_task_btf *skel)
176 {
177 struct bpf_program *prog = skel->progs.dump_task_struct;
178 struct bpf_iter_task_btf__bss *bss = skel->bss;
179 int iter_fd = -1, len = 0, bufleft = TASKBUFSZ;
180 struct bpf_link *link;
181 char *buf = taskbuf;
182 int ret = 0;
183
184 link = bpf_program__attach_iter(prog, NULL);
185 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
186 return ret;
187
188 iter_fd = bpf_iter_create(bpf_link__fd(link));
189 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
190 goto free_link;
191
192 do {
193 len = read(iter_fd, buf, bufleft);
194 if (len > 0) {
195 buf += len;
196 bufleft -= len;
197 }
198 } while (len > 0);
199
200 if (bss->skip) {
201 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
202 ret = 1;
203 test__skip();
204 goto free_link;
205 }
206
207 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
208 goto free_link;
209
210 CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
211 "check for btf representation of task_struct in iter data",
212 "struct task_struct not found");
213 free_link:
214 if (iter_fd > 0)
215 close(iter_fd);
216 bpf_link__destroy(link);
217 return ret;
218 }
219
test_task_btf(void)220 static void test_task_btf(void)
221 {
222 struct bpf_iter_task_btf__bss *bss;
223 struct bpf_iter_task_btf *skel;
224 int ret;
225
226 skel = bpf_iter_task_btf__open_and_load();
227 if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
228 "skeleton open_and_load failed\n"))
229 return;
230
231 bss = skel->bss;
232
233 ret = do_btf_read(skel);
234 if (ret)
235 goto cleanup;
236
237 if (CHECK(bss->tasks == 0, "check if iterated over tasks",
238 "no task iteration, did BPF program run?\n"))
239 goto cleanup;
240
241 CHECK(bss->seq_err != 0, "check for unexpected err",
242 "bpf_seq_printf_btf returned %ld", bss->seq_err);
243
244 cleanup:
245 bpf_iter_task_btf__destroy(skel);
246 }
247
test_tcp4(void)248 static void test_tcp4(void)
249 {
250 struct bpf_iter_tcp4 *skel;
251
252 skel = bpf_iter_tcp4__open_and_load();
253 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
254 "skeleton open_and_load failed\n"))
255 return;
256
257 do_dummy_read(skel->progs.dump_tcp4);
258
259 bpf_iter_tcp4__destroy(skel);
260 }
261
test_tcp6(void)262 static void test_tcp6(void)
263 {
264 struct bpf_iter_tcp6 *skel;
265
266 skel = bpf_iter_tcp6__open_and_load();
267 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
268 "skeleton open_and_load failed\n"))
269 return;
270
271 do_dummy_read(skel->progs.dump_tcp6);
272
273 bpf_iter_tcp6__destroy(skel);
274 }
275
test_udp4(void)276 static void test_udp4(void)
277 {
278 struct bpf_iter_udp4 *skel;
279
280 skel = bpf_iter_udp4__open_and_load();
281 if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
282 "skeleton open_and_load failed\n"))
283 return;
284
285 do_dummy_read(skel->progs.dump_udp4);
286
287 bpf_iter_udp4__destroy(skel);
288 }
289
test_udp6(void)290 static void test_udp6(void)
291 {
292 struct bpf_iter_udp6 *skel;
293
294 skel = bpf_iter_udp6__open_and_load();
295 if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
296 "skeleton open_and_load failed\n"))
297 return;
298
299 do_dummy_read(skel->progs.dump_udp6);
300
301 bpf_iter_udp6__destroy(skel);
302 }
303
304 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)305 static int do_read_with_fd(int iter_fd, const char *expected,
306 bool read_one_char)
307 {
308 int err = -1, len, read_buf_len, start;
309 char buf[16] = {};
310
311 read_buf_len = read_one_char ? 1 : 16;
312 start = 0;
313 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
314 start += len;
315 if (CHECK(start >= 16, "read", "read len %d\n", len))
316 return -1;
317 read_buf_len = read_one_char ? 1 : 16 - start;
318 }
319 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
320 return -1;
321
322 err = strcmp(buf, expected);
323 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
324 buf, expected))
325 return -1;
326
327 return 0;
328 }
329
test_anon_iter(bool read_one_char)330 static void test_anon_iter(bool read_one_char)
331 {
332 struct bpf_iter_test_kern1 *skel;
333 struct bpf_link *link;
334 int iter_fd, err;
335
336 skel = bpf_iter_test_kern1__open_and_load();
337 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
338 "skeleton open_and_load failed\n"))
339 return;
340
341 err = bpf_iter_test_kern1__attach(skel);
342 if (CHECK(err, "bpf_iter_test_kern1__attach",
343 "skeleton attach failed\n")) {
344 goto out;
345 }
346
347 link = skel->links.dump_task;
348 iter_fd = bpf_iter_create(bpf_link__fd(link));
349 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
350 goto out;
351
352 do_read_with_fd(iter_fd, "abcd", read_one_char);
353 close(iter_fd);
354
355 out:
356 bpf_iter_test_kern1__destroy(skel);
357 }
358
do_read(const char * path,const char * expected)359 static int do_read(const char *path, const char *expected)
360 {
361 int err, iter_fd;
362
363 iter_fd = open(path, O_RDONLY);
364 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
365 path, strerror(errno)))
366 return -1;
367
368 err = do_read_with_fd(iter_fd, expected, false);
369 close(iter_fd);
370 return err;
371 }
372
test_file_iter(void)373 static void test_file_iter(void)
374 {
375 const char *path = "/sys/fs/bpf/bpf_iter_test1";
376 struct bpf_iter_test_kern1 *skel1;
377 struct bpf_iter_test_kern2 *skel2;
378 struct bpf_link *link;
379 int err;
380
381 skel1 = bpf_iter_test_kern1__open_and_load();
382 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
383 "skeleton open_and_load failed\n"))
384 return;
385
386 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
387 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
388 goto out;
389
390 /* unlink this path if it exists. */
391 unlink(path);
392
393 err = bpf_link__pin(link, path);
394 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
395 goto free_link;
396
397 err = do_read(path, "abcd");
398 if (err)
399 goto unlink_path;
400
401 /* file based iterator seems working fine. Let us a link update
402 * of the underlying link and `cat` the iterator again, its content
403 * should change.
404 */
405 skel2 = bpf_iter_test_kern2__open_and_load();
406 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
407 "skeleton open_and_load failed\n"))
408 goto unlink_path;
409
410 err = bpf_link__update_program(link, skel2->progs.dump_task);
411 if (CHECK(err, "update_prog", "update_prog failed\n"))
412 goto destroy_skel2;
413
414 do_read(path, "ABCD");
415
416 destroy_skel2:
417 bpf_iter_test_kern2__destroy(skel2);
418 unlink_path:
419 unlink(path);
420 free_link:
421 bpf_link__destroy(link);
422 out:
423 bpf_iter_test_kern1__destroy(skel1);
424 }
425
test_overflow(bool test_e2big_overflow,bool ret1)426 static void test_overflow(bool test_e2big_overflow, bool ret1)
427 {
428 __u32 map_info_len, total_read_len, expected_read_len;
429 int err, iter_fd, map1_fd, map2_fd, len;
430 struct bpf_map_info map_info = {};
431 struct bpf_iter_test_kern4 *skel;
432 struct bpf_link *link;
433 __u32 iter_size;
434 char *buf;
435
436 skel = bpf_iter_test_kern4__open();
437 if (CHECK(!skel, "bpf_iter_test_kern4__open",
438 "skeleton open failed\n"))
439 return;
440
441 /* create two maps: bpf program will only do bpf_seq_write
442 * for these two maps. The goal is one map output almost
443 * fills seq_file buffer and then the other will trigger
444 * overflow and needs restart.
445 */
446 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
447 if (CHECK(map1_fd < 0, "bpf_create_map",
448 "map_creation failed: %s\n", strerror(errno)))
449 goto out;
450 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
451 if (CHECK(map2_fd < 0, "bpf_create_map",
452 "map_creation failed: %s\n", strerror(errno)))
453 goto free_map1;
454
455 /* bpf_seq_printf kernel buffer is 8 pages, so one map
456 * bpf_seq_write will mostly fill it, and the other map
457 * will partially fill and then trigger overflow and need
458 * bpf_seq_read restart.
459 */
460 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
461
462 if (test_e2big_overflow) {
463 skel->rodata->print_len = (iter_size + 8) / 8;
464 expected_read_len = 2 * (iter_size + 8);
465 } else if (!ret1) {
466 skel->rodata->print_len = (iter_size - 8) / 8;
467 expected_read_len = 2 * (iter_size - 8);
468 } else {
469 skel->rodata->print_len = 1;
470 expected_read_len = 2 * 8;
471 }
472 skel->rodata->ret1 = ret1;
473
474 if (CHECK(bpf_iter_test_kern4__load(skel),
475 "bpf_iter_test_kern4__load", "skeleton load failed\n"))
476 goto free_map2;
477
478 /* setup filtering map_id in bpf program */
479 map_info_len = sizeof(map_info);
480 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
481 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
482 strerror(errno)))
483 goto free_map2;
484 skel->bss->map1_id = map_info.id;
485
486 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
487 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
488 strerror(errno)))
489 goto free_map2;
490 skel->bss->map2_id = map_info.id;
491
492 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
493 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
494 goto free_map2;
495
496 iter_fd = bpf_iter_create(bpf_link__fd(link));
497 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
498 goto free_link;
499
500 buf = malloc(expected_read_len);
501 if (!buf)
502 goto close_iter;
503
504 /* do read */
505 total_read_len = 0;
506 if (test_e2big_overflow) {
507 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
508 total_read_len += len;
509
510 CHECK(len != -1 || errno != E2BIG, "read",
511 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
512 len, strerror(errno));
513 goto free_buf;
514 } else if (!ret1) {
515 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
516 total_read_len += len;
517
518 if (CHECK(len < 0, "read", "read failed: %s\n",
519 strerror(errno)))
520 goto free_buf;
521 } else {
522 do {
523 len = read(iter_fd, buf, expected_read_len);
524 if (len > 0)
525 total_read_len += len;
526 } while (len > 0 || len == -EAGAIN);
527
528 if (CHECK(len < 0, "read", "read failed: %s\n",
529 strerror(errno)))
530 goto free_buf;
531 }
532
533 if (CHECK(total_read_len != expected_read_len, "read",
534 "total len %u, expected len %u\n", total_read_len,
535 expected_read_len))
536 goto free_buf;
537
538 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
539 "expected 1 actual %d\n", skel->bss->map1_accessed))
540 goto free_buf;
541
542 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
543 "expected 2 actual %d\n", skel->bss->map2_accessed))
544 goto free_buf;
545
546 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
547 "map2_seqnum", "two different seqnum %lld %lld\n",
548 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
549
550 free_buf:
551 free(buf);
552 close_iter:
553 close(iter_fd);
554 free_link:
555 bpf_link__destroy(link);
556 free_map2:
557 close(map2_fd);
558 free_map1:
559 close(map1_fd);
560 out:
561 bpf_iter_test_kern4__destroy(skel);
562 }
563
test_bpf_hash_map(void)564 static void test_bpf_hash_map(void)
565 {
566 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
567 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
568 struct bpf_iter_bpf_hash_map *skel;
569 int err, i, len, map_fd, iter_fd;
570 union bpf_iter_link_info linfo;
571 __u64 val, expected_val = 0;
572 struct bpf_link *link;
573 struct key_t {
574 int a;
575 int b;
576 int c;
577 } key;
578 char buf[64];
579
580 skel = bpf_iter_bpf_hash_map__open();
581 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
582 "skeleton open failed\n"))
583 return;
584
585 skel->bss->in_test_mode = true;
586
587 err = bpf_iter_bpf_hash_map__load(skel);
588 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
589 "skeleton load failed\n"))
590 goto out;
591
592 /* iterator with hashmap2 and hashmap3 should fail */
593 memset(&linfo, 0, sizeof(linfo));
594 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
595 opts.link_info = &linfo;
596 opts.link_info_len = sizeof(linfo);
597 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
598 if (CHECK(!IS_ERR(link), "attach_iter",
599 "attach_iter for hashmap2 unexpected succeeded\n"))
600 goto out;
601
602 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
603 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
604 if (CHECK(!IS_ERR(link), "attach_iter",
605 "attach_iter for hashmap3 unexpected succeeded\n"))
606 goto out;
607
608 /* hashmap1 should be good, update map values here */
609 map_fd = bpf_map__fd(skel->maps.hashmap1);
610 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
611 key.a = i + 1;
612 key.b = i + 2;
613 key.c = i + 3;
614 val = i + 4;
615 expected_key_a += key.a;
616 expected_key_b += key.b;
617 expected_key_c += key.c;
618 expected_val += val;
619
620 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
621 if (CHECK(err, "map_update", "map_update failed\n"))
622 goto out;
623 }
624
625 linfo.map.map_fd = map_fd;
626 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
627 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
628 goto out;
629
630 iter_fd = bpf_iter_create(bpf_link__fd(link));
631 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
632 goto free_link;
633
634 /* do some tests */
635 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
636 ;
637 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
638 goto close_iter;
639
640 /* test results */
641 if (CHECK(skel->bss->key_sum_a != expected_key_a,
642 "key_sum_a", "got %u expected %u\n",
643 skel->bss->key_sum_a, expected_key_a))
644 goto close_iter;
645 if (CHECK(skel->bss->key_sum_b != expected_key_b,
646 "key_sum_b", "got %u expected %u\n",
647 skel->bss->key_sum_b, expected_key_b))
648 goto close_iter;
649 if (CHECK(skel->bss->val_sum != expected_val,
650 "val_sum", "got %llu expected %llu\n",
651 skel->bss->val_sum, expected_val))
652 goto close_iter;
653
654 close_iter:
655 close(iter_fd);
656 free_link:
657 bpf_link__destroy(link);
658 out:
659 bpf_iter_bpf_hash_map__destroy(skel);
660 }
661
test_bpf_percpu_hash_map(void)662 static void test_bpf_percpu_hash_map(void)
663 {
664 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
665 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
666 struct bpf_iter_bpf_percpu_hash_map *skel;
667 int err, i, j, len, map_fd, iter_fd;
668 union bpf_iter_link_info linfo;
669 __u32 expected_val = 0;
670 struct bpf_link *link;
671 struct key_t {
672 int a;
673 int b;
674 int c;
675 } key;
676 char buf[64];
677 void *val;
678
679 val = malloc(8 * bpf_num_possible_cpus());
680
681 skel = bpf_iter_bpf_percpu_hash_map__open();
682 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
683 "skeleton open failed\n"))
684 return;
685
686 skel->rodata->num_cpus = bpf_num_possible_cpus();
687
688 err = bpf_iter_bpf_percpu_hash_map__load(skel);
689 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
690 "skeleton load failed\n"))
691 goto out;
692
693 /* update map values here */
694 map_fd = bpf_map__fd(skel->maps.hashmap1);
695 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
696 key.a = i + 1;
697 key.b = i + 2;
698 key.c = i + 3;
699 expected_key_a += key.a;
700 expected_key_b += key.b;
701 expected_key_c += key.c;
702
703 for (j = 0; j < bpf_num_possible_cpus(); j++) {
704 *(__u32 *)(val + j * 8) = i + j;
705 expected_val += i + j;
706 }
707
708 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
709 if (CHECK(err, "map_update", "map_update failed\n"))
710 goto out;
711 }
712
713 memset(&linfo, 0, sizeof(linfo));
714 linfo.map.map_fd = map_fd;
715 opts.link_info = &linfo;
716 opts.link_info_len = sizeof(linfo);
717 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
718 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
719 goto out;
720
721 iter_fd = bpf_iter_create(bpf_link__fd(link));
722 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
723 goto free_link;
724
725 /* do some tests */
726 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
727 ;
728 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
729 goto close_iter;
730
731 /* test results */
732 if (CHECK(skel->bss->key_sum_a != expected_key_a,
733 "key_sum_a", "got %u expected %u\n",
734 skel->bss->key_sum_a, expected_key_a))
735 goto close_iter;
736 if (CHECK(skel->bss->key_sum_b != expected_key_b,
737 "key_sum_b", "got %u expected %u\n",
738 skel->bss->key_sum_b, expected_key_b))
739 goto close_iter;
740 if (CHECK(skel->bss->val_sum != expected_val,
741 "val_sum", "got %u expected %u\n",
742 skel->bss->val_sum, expected_val))
743 goto close_iter;
744
745 close_iter:
746 close(iter_fd);
747 free_link:
748 bpf_link__destroy(link);
749 out:
750 bpf_iter_bpf_percpu_hash_map__destroy(skel);
751 }
752
test_bpf_array_map(void)753 static void test_bpf_array_map(void)
754 {
755 __u64 val, expected_val = 0, res_first_val, first_val = 0;
756 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
757 __u32 expected_key = 0, res_first_key;
758 struct bpf_iter_bpf_array_map *skel;
759 union bpf_iter_link_info linfo;
760 int err, i, map_fd, iter_fd;
761 struct bpf_link *link;
762 char buf[64] = {};
763 int len, start;
764
765 skel = bpf_iter_bpf_array_map__open_and_load();
766 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
767 "skeleton open_and_load failed\n"))
768 return;
769
770 map_fd = bpf_map__fd(skel->maps.arraymap1);
771 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
772 val = i + 4;
773 expected_key += i;
774 expected_val += val;
775
776 if (i == 0)
777 first_val = val;
778
779 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
780 if (CHECK(err, "map_update", "map_update failed\n"))
781 goto out;
782 }
783
784 memset(&linfo, 0, sizeof(linfo));
785 linfo.map.map_fd = map_fd;
786 opts.link_info = &linfo;
787 opts.link_info_len = sizeof(linfo);
788 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
789 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
790 goto out;
791
792 iter_fd = bpf_iter_create(bpf_link__fd(link));
793 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
794 goto free_link;
795
796 /* do some tests */
797 start = 0;
798 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
799 start += len;
800 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
801 goto close_iter;
802
803 /* test results */
804 res_first_key = *(__u32 *)buf;
805 res_first_val = *(__u64 *)(buf + sizeof(__u32));
806 if (CHECK(res_first_key != 0 || res_first_val != first_val,
807 "bpf_seq_write",
808 "seq_write failure: first key %u vs expected 0, "
809 " first value %llu vs expected %llu\n",
810 res_first_key, res_first_val, first_val))
811 goto close_iter;
812
813 if (CHECK(skel->bss->key_sum != expected_key,
814 "key_sum", "got %u expected %u\n",
815 skel->bss->key_sum, expected_key))
816 goto close_iter;
817 if (CHECK(skel->bss->val_sum != expected_val,
818 "val_sum", "got %llu expected %llu\n",
819 skel->bss->val_sum, expected_val))
820 goto close_iter;
821
822 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
823 err = bpf_map_lookup_elem(map_fd, &i, &val);
824 if (CHECK(err, "map_lookup", "map_lookup failed\n"))
825 goto out;
826 if (CHECK(i != val, "invalid_val",
827 "got value %llu expected %u\n", val, i))
828 goto out;
829 }
830
831 close_iter:
832 close(iter_fd);
833 free_link:
834 bpf_link__destroy(link);
835 out:
836 bpf_iter_bpf_array_map__destroy(skel);
837 }
838
test_bpf_percpu_array_map(void)839 static void test_bpf_percpu_array_map(void)
840 {
841 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
842 struct bpf_iter_bpf_percpu_array_map *skel;
843 __u32 expected_key = 0, expected_val = 0;
844 union bpf_iter_link_info linfo;
845 int err, i, j, map_fd, iter_fd;
846 struct bpf_link *link;
847 char buf[64];
848 void *val;
849 int len;
850
851 val = malloc(8 * bpf_num_possible_cpus());
852
853 skel = bpf_iter_bpf_percpu_array_map__open();
854 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
855 "skeleton open failed\n"))
856 return;
857
858 skel->rodata->num_cpus = bpf_num_possible_cpus();
859
860 err = bpf_iter_bpf_percpu_array_map__load(skel);
861 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
862 "skeleton load failed\n"))
863 goto out;
864
865 /* update map values here */
866 map_fd = bpf_map__fd(skel->maps.arraymap1);
867 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
868 expected_key += i;
869
870 for (j = 0; j < bpf_num_possible_cpus(); j++) {
871 *(__u32 *)(val + j * 8) = i + j;
872 expected_val += i + j;
873 }
874
875 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
876 if (CHECK(err, "map_update", "map_update failed\n"))
877 goto out;
878 }
879
880 memset(&linfo, 0, sizeof(linfo));
881 linfo.map.map_fd = map_fd;
882 opts.link_info = &linfo;
883 opts.link_info_len = sizeof(linfo);
884 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
885 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
886 goto out;
887
888 iter_fd = bpf_iter_create(bpf_link__fd(link));
889 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
890 goto free_link;
891
892 /* do some tests */
893 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
894 ;
895 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
896 goto close_iter;
897
898 /* test results */
899 if (CHECK(skel->bss->key_sum != expected_key,
900 "key_sum", "got %u expected %u\n",
901 skel->bss->key_sum, expected_key))
902 goto close_iter;
903 if (CHECK(skel->bss->val_sum != expected_val,
904 "val_sum", "got %u expected %u\n",
905 skel->bss->val_sum, expected_val))
906 goto close_iter;
907
908 close_iter:
909 close(iter_fd);
910 free_link:
911 bpf_link__destroy(link);
912 out:
913 bpf_iter_bpf_percpu_array_map__destroy(skel);
914 }
915
test_bpf_sk_storage_map(void)916 static void test_bpf_sk_storage_map(void)
917 {
918 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
919 int err, i, len, map_fd, iter_fd, num_sockets;
920 struct bpf_iter_bpf_sk_storage_map *skel;
921 union bpf_iter_link_info linfo;
922 int sock_fd[3] = {-1, -1, -1};
923 __u32 val, expected_val = 0;
924 struct bpf_link *link;
925 char buf[64];
926
927 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
928 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
929 "skeleton open_and_load failed\n"))
930 return;
931
932 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
933 num_sockets = ARRAY_SIZE(sock_fd);
934 for (i = 0; i < num_sockets; i++) {
935 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
936 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
937 goto out;
938
939 val = i + 1;
940 expected_val += val;
941
942 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
943 BPF_NOEXIST);
944 if (CHECK(err, "map_update", "map_update failed\n"))
945 goto out;
946 }
947
948 memset(&linfo, 0, sizeof(linfo));
949 linfo.map.map_fd = map_fd;
950 opts.link_info = &linfo;
951 opts.link_info_len = sizeof(linfo);
952 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
953 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
954 goto out;
955
956 iter_fd = bpf_iter_create(bpf_link__fd(link));
957 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
958 goto free_link;
959
960 /* do some tests */
961 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
962 ;
963 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
964 goto close_iter;
965
966 /* test results */
967 if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
968 "ipv6_sk_count", "got %u expected %u\n",
969 skel->bss->ipv6_sk_count, num_sockets))
970 goto close_iter;
971
972 if (CHECK(skel->bss->val_sum != expected_val,
973 "val_sum", "got %u expected %u\n",
974 skel->bss->val_sum, expected_val))
975 goto close_iter;
976
977 close_iter:
978 close(iter_fd);
979 free_link:
980 bpf_link__destroy(link);
981 out:
982 for (i = 0; i < num_sockets; i++) {
983 if (sock_fd[i] >= 0)
984 close(sock_fd[i]);
985 }
986 bpf_iter_bpf_sk_storage_map__destroy(skel);
987 }
988
test_rdonly_buf_out_of_bound(void)989 static void test_rdonly_buf_out_of_bound(void)
990 {
991 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
992 struct bpf_iter_test_kern5 *skel;
993 union bpf_iter_link_info linfo;
994 struct bpf_link *link;
995
996 skel = bpf_iter_test_kern5__open_and_load();
997 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
998 "skeleton open_and_load failed\n"))
999 return;
1000
1001 memset(&linfo, 0, sizeof(linfo));
1002 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1003 opts.link_info = &linfo;
1004 opts.link_info_len = sizeof(linfo);
1005 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1006 if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
1007 bpf_link__destroy(link);
1008
1009 bpf_iter_test_kern5__destroy(skel);
1010 }
1011
test_buf_neg_offset(void)1012 static void test_buf_neg_offset(void)
1013 {
1014 struct bpf_iter_test_kern6 *skel;
1015
1016 skel = bpf_iter_test_kern6__open_and_load();
1017 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
1018 "skeleton open_and_load unexpected success\n"))
1019 bpf_iter_test_kern6__destroy(skel);
1020 }
1021
test_bpf_iter(void)1022 void test_bpf_iter(void)
1023 {
1024 if (test__start_subtest("btf_id_or_null"))
1025 test_btf_id_or_null();
1026 if (test__start_subtest("ipv6_route"))
1027 test_ipv6_route();
1028 if (test__start_subtest("netlink"))
1029 test_netlink();
1030 if (test__start_subtest("bpf_map"))
1031 test_bpf_map();
1032 if (test__start_subtest("task"))
1033 test_task();
1034 if (test__start_subtest("task_stack"))
1035 test_task_stack();
1036 if (test__start_subtest("task_file"))
1037 test_task_file();
1038 if (test__start_subtest("task_btf"))
1039 test_task_btf();
1040 if (test__start_subtest("tcp4"))
1041 test_tcp4();
1042 if (test__start_subtest("tcp6"))
1043 test_tcp6();
1044 if (test__start_subtest("udp4"))
1045 test_udp4();
1046 if (test__start_subtest("udp6"))
1047 test_udp6();
1048 if (test__start_subtest("anon"))
1049 test_anon_iter(false);
1050 if (test__start_subtest("anon-read-one-char"))
1051 test_anon_iter(true);
1052 if (test__start_subtest("file"))
1053 test_file_iter();
1054 if (test__start_subtest("overflow"))
1055 test_overflow(false, false);
1056 if (test__start_subtest("overflow-e2big"))
1057 test_overflow(true, false);
1058 if (test__start_subtest("prog-ret-1"))
1059 test_overflow(false, true);
1060 if (test__start_subtest("bpf_hash_map"))
1061 test_bpf_hash_map();
1062 if (test__start_subtest("bpf_percpu_hash_map"))
1063 test_bpf_percpu_hash_map();
1064 if (test__start_subtest("bpf_array_map"))
1065 test_bpf_array_map();
1066 if (test__start_subtest("bpf_percpu_array_map"))
1067 test_bpf_percpu_array_map();
1068 if (test__start_subtest("bpf_sk_storage_map"))
1069 test_bpf_sk_storage_map();
1070 if (test__start_subtest("rdonly-buf-out-of-bound"))
1071 test_rdonly_buf_out_of_bound();
1072 if (test__start_subtest("buf-neg-offset"))
1073 test_buf_neg_offset();
1074 }
1075