1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #define _GNU_SOURCE
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
8 #include <argp.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <sys/sysinfo.h> /* get_nprocs */
14 #include <netinet/in.h>
15 #include <sys/select.h>
16 #include <sys/socket.h>
17 #include <sys/un.h>
18 #include <bpf/btf.h>
19 #include <time.h>
20 #include "json_writer.h"
21
22 #include "network_helpers.h"
23
24 /* backtrace() and backtrace_symbols_fd() are glibc specific,
25 * use header file when glibc is available and provide stub
26 * implementations when another libc implementation is used.
27 */
28 #ifdef __GLIBC__
29 #include <execinfo.h> /* backtrace */
30 #else
backtrace(void ** buffer,int size)31 __weak int backtrace(void **buffer, int size)
32 {
33 return 0;
34 }
35
backtrace_symbols_fd(void * const * buffer,int size,int fd)36 __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd)
37 {
38 dprintf(fd, "<backtrace not supported>\n");
39 }
40 #endif /*__GLIBC__ */
41
42 int env_verbosity = 0;
43
verbose(void)44 static bool verbose(void)
45 {
46 return env.verbosity > VERBOSE_NONE;
47 }
48
stdio_hijack_init(char ** log_buf,size_t * log_cnt)49 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
50 {
51 #ifdef __GLIBC__
52 if (verbose() && env.worker_id == -1) {
53 /* nothing to do, output to stdout by default */
54 return;
55 }
56
57 fflush(stdout);
58 fflush(stderr);
59
60 stdout = open_memstream(log_buf, log_cnt);
61 if (!stdout) {
62 stdout = env.stdout_saved;
63 perror("open_memstream");
64 return;
65 }
66
67 if (env.subtest_state)
68 env.subtest_state->stdout_saved = stdout;
69 else
70 env.test_state->stdout_saved = stdout;
71
72 stderr = stdout;
73 #endif
74 }
75
stdio_hijack(char ** log_buf,size_t * log_cnt)76 static void stdio_hijack(char **log_buf, size_t *log_cnt)
77 {
78 #ifdef __GLIBC__
79 if (verbose() && env.worker_id == -1) {
80 /* nothing to do, output to stdout by default */
81 return;
82 }
83
84 env.stdout_saved = stdout;
85 env.stderr_saved = stderr;
86
87 stdio_hijack_init(log_buf, log_cnt);
88 #endif
89 }
90
91 static pthread_mutex_t stdout_lock = PTHREAD_MUTEX_INITIALIZER;
92
stdio_restore(void)93 static void stdio_restore(void)
94 {
95 #ifdef __GLIBC__
96 if (verbose() && env.worker_id == -1) {
97 /* nothing to do, output to stdout by default */
98 return;
99 }
100
101 fflush(stdout);
102
103 pthread_mutex_lock(&stdout_lock);
104
105 if (env.subtest_state) {
106 if (env.subtest_state->stdout_saved)
107 fclose(env.subtest_state->stdout_saved);
108 env.subtest_state->stdout_saved = NULL;
109 stdout = env.test_state->stdout_saved;
110 stderr = env.test_state->stdout_saved;
111 } else {
112 if (env.test_state->stdout_saved)
113 fclose(env.test_state->stdout_saved);
114 env.test_state->stdout_saved = NULL;
115 stdout = env.stdout_saved;
116 stderr = env.stderr_saved;
117 }
118
119 pthread_mutex_unlock(&stdout_lock);
120 #endif
121 }
122
traffic_monitor_print_fn(const char * format,va_list args)123 static int traffic_monitor_print_fn(const char *format, va_list args)
124 {
125 pthread_mutex_lock(&stdout_lock);
126 vfprintf(stdout, format, args);
127 pthread_mutex_unlock(&stdout_lock);
128
129 return 0;
130 }
131
132 /* Adapted from perf/util/string.c */
glob_match(const char * str,const char * pat)133 static bool glob_match(const char *str, const char *pat)
134 {
135 while (*str && *pat && *pat != '*') {
136 if (*str != *pat)
137 return false;
138 str++;
139 pat++;
140 }
141 /* Check wild card */
142 if (*pat == '*') {
143 while (*pat == '*')
144 pat++;
145 if (!*pat) /* Tail wild card matches all */
146 return true;
147 while (*str)
148 if (glob_match(str++, pat))
149 return true;
150 }
151 return !*str && !*pat;
152 }
153
154 #define EXIT_NO_TEST 2
155 #define EXIT_ERR_SETUP_INFRA 3
156
157 /* defined in test_progs.h */
158 struct test_env env = {};
159
160 struct prog_test_def {
161 const char *test_name;
162 int test_num;
163 void (*run_test)(void);
164 void (*run_serial_test)(void);
165 bool should_run;
166 bool need_cgroup_cleanup;
167 bool should_tmon;
168 };
169
170 /* Override C runtime library's usleep() implementation to ensure nanosleep()
171 * is always called. Usleep is frequently used in selftests as a way to
172 * trigger kprobe and tracepoints.
173 */
usleep(useconds_t usec)174 int usleep(useconds_t usec)
175 {
176 struct timespec ts = {
177 .tv_sec = usec / 1000000,
178 .tv_nsec = (usec % 1000000) * 1000,
179 };
180
181 return syscall(__NR_nanosleep, &ts, NULL);
182 }
183
184 /* Watchdog timer is started by watchdog_start() and stopped by watchdog_stop().
185 * If timer is active for longer than env.secs_till_notify,
186 * it prints the name of the current test to the stderr.
187 * If timer is active for longer than env.secs_till_kill,
188 * it kills the thread executing the test by sending a SIGSEGV signal to it.
189 */
watchdog_timer_func(union sigval sigval)190 static void watchdog_timer_func(union sigval sigval)
191 {
192 struct itimerspec timeout = {};
193 char test_name[256];
194 int err;
195
196 if (env.subtest_state)
197 snprintf(test_name, sizeof(test_name), "%s/%s",
198 env.test->test_name, env.subtest_state->name);
199 else
200 snprintf(test_name, sizeof(test_name), "%s",
201 env.test->test_name);
202
203 switch (env.watchdog_state) {
204 case WD_NOTIFY:
205 fprintf(env.stderr_saved, "WATCHDOG: test case %s executes for %d seconds...\n",
206 test_name, env.secs_till_notify);
207 timeout.it_value.tv_sec = env.secs_till_kill - env.secs_till_notify;
208 env.watchdog_state = WD_KILL;
209 err = timer_settime(env.watchdog, 0, &timeout, NULL);
210 if (err)
211 fprintf(env.stderr_saved, "Failed to arm watchdog timer\n");
212 break;
213 case WD_KILL:
214 fprintf(env.stderr_saved,
215 "WATCHDOG: test case %s executes for %d seconds, terminating with SIGSEGV\n",
216 test_name, env.secs_till_kill);
217 pthread_kill(env.main_thread, SIGSEGV);
218 break;
219 }
220 }
221
watchdog_start(void)222 static void watchdog_start(void)
223 {
224 struct itimerspec timeout = {};
225 int err;
226
227 if (env.secs_till_kill == 0)
228 return;
229 if (env.secs_till_notify > 0) {
230 env.watchdog_state = WD_NOTIFY;
231 timeout.it_value.tv_sec = env.secs_till_notify;
232 } else {
233 env.watchdog_state = WD_KILL;
234 timeout.it_value.tv_sec = env.secs_till_kill;
235 }
236 err = timer_settime(env.watchdog, 0, &timeout, NULL);
237 if (err)
238 fprintf(env.stderr_saved, "Failed to start watchdog timer\n");
239 }
240
watchdog_stop(void)241 static void watchdog_stop(void)
242 {
243 struct itimerspec timeout = {};
244 int err;
245
246 env.watchdog_state = WD_NOTIFY;
247 err = timer_settime(env.watchdog, 0, &timeout, NULL);
248 if (err)
249 fprintf(env.stderr_saved, "Failed to stop watchdog timer\n");
250 }
251
watchdog_init(void)252 static void watchdog_init(void)
253 {
254 struct sigevent watchdog_sev = {
255 .sigev_notify = SIGEV_THREAD,
256 .sigev_notify_function = watchdog_timer_func,
257 };
258 int err;
259
260 env.main_thread = pthread_self();
261 err = timer_create(CLOCK_MONOTONIC, &watchdog_sev, &env.watchdog);
262 if (err)
263 fprintf(stderr, "Failed to initialize watchdog timer\n");
264 }
265
should_run(struct test_selector * sel,int num,const char * name)266 static bool should_run(struct test_selector *sel, int num, const char *name)
267 {
268 int i;
269
270 for (i = 0; i < sel->blacklist.cnt; i++) {
271 if (glob_match(name, sel->blacklist.tests[i].name) &&
272 !sel->blacklist.tests[i].subtest_cnt)
273 return false;
274 }
275
276 for (i = 0; i < sel->whitelist.cnt; i++) {
277 if (glob_match(name, sel->whitelist.tests[i].name))
278 return true;
279 }
280
281 if (!sel->whitelist.cnt && !sel->num_set)
282 return true;
283
284 return num < sel->num_set_len && sel->num_set[num];
285 }
286
match_subtest(struct test_filter_set * filter,const char * test_name,const char * subtest_name)287 static bool match_subtest(struct test_filter_set *filter,
288 const char *test_name,
289 const char *subtest_name)
290 {
291 int i, j;
292
293 for (i = 0; i < filter->cnt; i++) {
294 if (glob_match(test_name, filter->tests[i].name)) {
295 if (!filter->tests[i].subtest_cnt)
296 return true;
297
298 for (j = 0; j < filter->tests[i].subtest_cnt; j++) {
299 if (glob_match(subtest_name,
300 filter->tests[i].subtests[j]))
301 return true;
302 }
303 }
304 }
305
306 return false;
307 }
308
should_run_subtest(struct test_selector * sel,struct test_selector * subtest_sel,int subtest_num,const char * test_name,const char * subtest_name)309 static bool should_run_subtest(struct test_selector *sel,
310 struct test_selector *subtest_sel,
311 int subtest_num,
312 const char *test_name,
313 const char *subtest_name)
314 {
315 if (match_subtest(&sel->blacklist, test_name, subtest_name))
316 return false;
317
318 if (match_subtest(&sel->whitelist, test_name, subtest_name))
319 return true;
320
321 if (!sel->whitelist.cnt && !subtest_sel->num_set)
322 return true;
323
324 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
325 }
326
should_tmon(struct test_selector * sel,const char * name)327 static bool should_tmon(struct test_selector *sel, const char *name)
328 {
329 int i;
330
331 for (i = 0; i < sel->whitelist.cnt; i++) {
332 if (glob_match(name, sel->whitelist.tests[i].name) &&
333 !sel->whitelist.tests[i].subtest_cnt)
334 return true;
335 }
336
337 return false;
338 }
339
test_result(bool failed,bool skipped)340 static char *test_result(bool failed, bool skipped)
341 {
342 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
343 }
344
345 #define TEST_NUM_WIDTH 7
346
print_test_result(const struct prog_test_def * test,const struct test_state * test_state)347 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
348 {
349 int skipped_cnt = test_state->skip_cnt;
350 int subtests_cnt = test_state->subtest_num;
351
352 fprintf(env.stdout_saved, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
353 if (test_state->error_cnt)
354 fprintf(env.stdout_saved, "FAIL");
355 else if (!skipped_cnt)
356 fprintf(env.stdout_saved, "OK");
357 else if (skipped_cnt == subtests_cnt || !subtests_cnt)
358 fprintf(env.stdout_saved, "SKIP");
359 else
360 fprintf(env.stdout_saved, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
361
362 fprintf(env.stdout_saved, "\n");
363 }
364
print_test_log(char * log_buf,size_t log_cnt)365 static void print_test_log(char *log_buf, size_t log_cnt)
366 {
367 log_buf[log_cnt] = '\0';
368 fprintf(env.stdout_saved, "%s", log_buf);
369 if (log_buf[log_cnt - 1] != '\n')
370 fprintf(env.stdout_saved, "\n");
371 }
372
print_subtest_name(int test_num,int subtest_num,const char * test_name,char * subtest_name,char * result)373 static void print_subtest_name(int test_num, int subtest_num,
374 const char *test_name, char *subtest_name,
375 char *result)
376 {
377 char test_num_str[32];
378
379 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
380
381 fprintf(env.stdout_saved, "#%-*s %s/%s",
382 TEST_NUM_WIDTH, test_num_str,
383 test_name, subtest_name);
384
385 if (result)
386 fprintf(env.stdout_saved, ":%s", result);
387
388 fprintf(env.stdout_saved, "\n");
389 }
390
jsonw_write_log_message(json_writer_t * w,char * log_buf,size_t log_cnt)391 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
392 {
393 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
394 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message.
395 */
396 if (log_cnt) {
397 jsonw_string_field(w, "message", log_buf);
398 } else {
399 jsonw_string_field(w, "message", "");
400 }
401 }
402
dump_test_log(const struct prog_test_def * test,const struct test_state * test_state,bool skip_ok_subtests,bool par_exec_result,json_writer_t * w)403 static void dump_test_log(const struct prog_test_def *test,
404 const struct test_state *test_state,
405 bool skip_ok_subtests,
406 bool par_exec_result,
407 json_writer_t *w)
408 {
409 bool test_failed = test_state->error_cnt > 0;
410 bool force_log = test_state->force_log;
411 bool print_test = verbose() || force_log || test_failed;
412 int i;
413 struct subtest_state *subtest_state;
414 bool subtest_failed;
415 bool subtest_filtered;
416 bool print_subtest;
417
418 /* we do not print anything in the worker thread */
419 if (env.worker_id != -1)
420 return;
421
422 /* there is nothing to print when verbose log is used and execution
423 * is not in parallel mode
424 */
425 if (verbose() && !par_exec_result)
426 return;
427
428 if (test_state->log_cnt && print_test)
429 print_test_log(test_state->log_buf, test_state->log_cnt);
430
431 if (w && print_test) {
432 jsonw_start_object(w);
433 jsonw_string_field(w, "name", test->test_name);
434 jsonw_uint_field(w, "number", test->test_num);
435 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
436 jsonw_bool_field(w, "failed", test_failed);
437 jsonw_name(w, "subtests");
438 jsonw_start_array(w);
439 }
440
441 for (i = 0; i < test_state->subtest_num; i++) {
442 subtest_state = &test_state->subtest_states[i];
443 subtest_failed = subtest_state->error_cnt;
444 subtest_filtered = subtest_state->filtered;
445 print_subtest = verbose() || force_log || subtest_failed;
446
447 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
448 continue;
449
450 if (subtest_state->log_cnt && print_subtest) {
451 print_test_log(subtest_state->log_buf,
452 subtest_state->log_cnt);
453 }
454
455 print_subtest_name(test->test_num, i + 1,
456 test->test_name, subtest_state->name,
457 test_result(subtest_state->error_cnt,
458 subtest_state->skipped));
459
460 if (w && print_subtest) {
461 jsonw_start_object(w);
462 jsonw_string_field(w, "name", subtest_state->name);
463 jsonw_uint_field(w, "number", i+1);
464 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
465 jsonw_bool_field(w, "failed", subtest_failed);
466 jsonw_end_object(w);
467 }
468 }
469
470 if (w && print_test) {
471 jsonw_end_array(w);
472 jsonw_end_object(w);
473 }
474
475 print_test_result(test, test_state);
476 }
477
478 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
479 * it after each test/sub-test.
480 */
reset_affinity(void)481 static void reset_affinity(void)
482 {
483 cpu_set_t cpuset;
484 int i, err;
485
486 CPU_ZERO(&cpuset);
487 for (i = 0; i < env.nr_cpus; i++)
488 CPU_SET(i, &cpuset);
489
490 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
491 if (err < 0) {
492 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
493 exit(EXIT_ERR_SETUP_INFRA);
494 }
495 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
496 if (err < 0) {
497 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
498 exit(EXIT_ERR_SETUP_INFRA);
499 }
500 }
501
save_netns(void)502 static void save_netns(void)
503 {
504 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
505 if (env.saved_netns_fd == -1) {
506 perror("open(/proc/self/ns/net)");
507 exit(EXIT_ERR_SETUP_INFRA);
508 }
509 }
510
restore_netns(void)511 static void restore_netns(void)
512 {
513 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
514 perror("setns(CLONE_NEWNS)");
515 exit(EXIT_ERR_SETUP_INFRA);
516 }
517 }
518
test__end_subtest(void)519 void test__end_subtest(void)
520 {
521 struct prog_test_def *test = env.test;
522 struct test_state *test_state = env.test_state;
523 struct subtest_state *subtest_state = env.subtest_state;
524
525 if (subtest_state->error_cnt) {
526 test_state->error_cnt++;
527 } else {
528 if (!subtest_state->skipped)
529 test_state->sub_succ_cnt++;
530 else
531 test_state->skip_cnt++;
532 }
533
534 if (verbose() && !env.workers)
535 print_subtest_name(test->test_num, test_state->subtest_num,
536 test->test_name, subtest_state->name,
537 test_result(subtest_state->error_cnt,
538 subtest_state->skipped));
539
540 stdio_restore();
541
542 env.subtest_state = NULL;
543 }
544
test__start_subtest(const char * subtest_name)545 bool test__start_subtest(const char *subtest_name)
546 {
547 struct prog_test_def *test = env.test;
548 struct test_state *state = env.test_state;
549 struct subtest_state *subtest_state;
550 size_t sub_state_size = sizeof(*subtest_state);
551
552 if (env.subtest_state)
553 test__end_subtest();
554
555 state->subtest_num++;
556 state->subtest_states =
557 realloc(state->subtest_states,
558 state->subtest_num * sub_state_size);
559 if (!state->subtest_states) {
560 fprintf(stderr, "Not enough memory to allocate subtest result\n");
561 return false;
562 }
563
564 subtest_state = &state->subtest_states[state->subtest_num - 1];
565
566 memset(subtest_state, 0, sub_state_size);
567
568 if (!subtest_name || !subtest_name[0]) {
569 fprintf(env.stderr_saved,
570 "Subtest #%d didn't provide sub-test name!\n",
571 state->subtest_num);
572 return false;
573 }
574
575 subtest_state->name = strdup(subtest_name);
576 if (!subtest_state->name) {
577 fprintf(env.stderr_saved,
578 "Subtest #%d: failed to copy subtest name!\n",
579 state->subtest_num);
580 return false;
581 }
582
583 if (!should_run_subtest(&env.test_selector,
584 &env.subtest_selector,
585 state->subtest_num,
586 test->test_name,
587 subtest_name)) {
588 subtest_state->filtered = true;
589 return false;
590 }
591
592 subtest_state->should_tmon = match_subtest(&env.tmon_selector.whitelist,
593 test->test_name,
594 subtest_name);
595
596 env.subtest_state = subtest_state;
597 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
598 watchdog_start();
599
600 return true;
601 }
602
test__force_log(void)603 void test__force_log(void)
604 {
605 env.test_state->force_log = true;
606 }
607
test__skip(void)608 void test__skip(void)
609 {
610 if (env.subtest_state)
611 env.subtest_state->skipped = true;
612 else
613 env.test_state->skip_cnt++;
614 }
615
test__fail(void)616 void test__fail(void)
617 {
618 if (env.subtest_state)
619 env.subtest_state->error_cnt++;
620 else
621 env.test_state->error_cnt++;
622 }
623
test__join_cgroup(const char * path)624 int test__join_cgroup(const char *path)
625 {
626 int fd;
627
628 if (!env.test->need_cgroup_cleanup) {
629 if (setup_cgroup_environment()) {
630 fprintf(stderr,
631 "#%d %s: Failed to setup cgroup environment\n",
632 env.test->test_num, env.test->test_name);
633 return -1;
634 }
635
636 env.test->need_cgroup_cleanup = true;
637 }
638
639 fd = create_and_get_cgroup(path);
640 if (fd < 0) {
641 fprintf(stderr,
642 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
643 env.test->test_num, env.test->test_name, path, errno);
644 return fd;
645 }
646
647 if (join_cgroup(path)) {
648 fprintf(stderr,
649 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
650 env.test->test_num, env.test->test_name, path, errno);
651 return -1;
652 }
653
654 return fd;
655 }
656
bpf_find_map(const char * test,struct bpf_object * obj,const char * name)657 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
658 {
659 struct bpf_map *map;
660
661 map = bpf_object__find_map_by_name(obj, name);
662 if (!map) {
663 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
664 test__fail();
665 return -1;
666 }
667 return bpf_map__fd(map);
668 }
669
compare_map_keys(int map1_fd,int map2_fd)670 int compare_map_keys(int map1_fd, int map2_fd)
671 {
672 __u32 key, next_key;
673 char val_buf[PERF_MAX_STACK_DEPTH *
674 sizeof(struct bpf_stack_build_id)];
675 int err;
676
677 err = bpf_map_get_next_key(map1_fd, NULL, &key);
678 if (err)
679 return err;
680 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
681 if (err)
682 return err;
683
684 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
685 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
686 if (err)
687 return err;
688
689 key = next_key;
690 }
691 if (errno != ENOENT)
692 return -1;
693
694 return 0;
695 }
696
compare_stack_ips(int smap_fd,int amap_fd,int stack_trace_len)697 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
698 {
699 __u32 key, next_key, *cur_key_p, *next_key_p;
700 char *val_buf1, *val_buf2;
701 int i, err = 0;
702
703 val_buf1 = malloc(stack_trace_len);
704 val_buf2 = malloc(stack_trace_len);
705 cur_key_p = NULL;
706 next_key_p = &key;
707 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
708 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
709 if (err)
710 goto out;
711 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
712 if (err)
713 goto out;
714 for (i = 0; i < stack_trace_len; i++) {
715 if (val_buf1[i] != val_buf2[i]) {
716 err = -1;
717 goto out;
718 }
719 }
720 key = *next_key_p;
721 cur_key_p = &key;
722 next_key_p = &next_key;
723 }
724 if (errno != ENOENT)
725 err = -1;
726
727 out:
728 free(val_buf1);
729 free(val_buf2);
730 return err;
731 }
732
733 struct netns_obj {
734 char *nsname;
735 struct tmonitor_ctx *tmon;
736 struct nstoken *nstoken;
737 };
738
739 /* Create a new network namespace with the given name.
740 *
741 * Create a new network namespace and set the network namespace of the
742 * current process to the new network namespace if the argument "open" is
743 * true. This function should be paired with netns_free() to release the
744 * resource and delete the network namespace.
745 *
746 * It also implements the functionality of the option "-m" by starting
747 * traffic monitor on the background to capture the packets in this network
748 * namespace if the current test or subtest matching the pattern.
749 *
750 * nsname: the name of the network namespace to create.
751 * open: open the network namespace if true.
752 *
753 * Return: the network namespace object on success, NULL on failure.
754 */
netns_new(const char * nsname,bool open)755 struct netns_obj *netns_new(const char *nsname, bool open)
756 {
757 struct netns_obj *netns_obj = malloc(sizeof(*netns_obj));
758 const char *test_name, *subtest_name;
759 int r;
760
761 if (!netns_obj)
762 return NULL;
763 memset(netns_obj, 0, sizeof(*netns_obj));
764
765 netns_obj->nsname = strdup(nsname);
766 if (!netns_obj->nsname)
767 goto fail;
768
769 /* Create the network namespace */
770 r = make_netns(nsname);
771 if (r)
772 goto fail;
773
774 /* Start traffic monitor */
775 if (env.test->should_tmon ||
776 (env.subtest_state && env.subtest_state->should_tmon)) {
777 test_name = env.test->test_name;
778 subtest_name = env.subtest_state ? env.subtest_state->name : NULL;
779 netns_obj->tmon = traffic_monitor_start(nsname, test_name, subtest_name);
780 if (!netns_obj->tmon) {
781 fprintf(stderr, "Failed to start traffic monitor for %s\n", nsname);
782 goto fail;
783 }
784 } else {
785 netns_obj->tmon = NULL;
786 }
787
788 if (open) {
789 netns_obj->nstoken = open_netns(nsname);
790 if (!netns_obj->nstoken)
791 goto fail;
792 }
793
794 return netns_obj;
795 fail:
796 traffic_monitor_stop(netns_obj->tmon);
797 remove_netns(nsname);
798 free(netns_obj->nsname);
799 free(netns_obj);
800 return NULL;
801 }
802
803 /* Delete the network namespace.
804 *
805 * This function should be paired with netns_new() to delete the namespace
806 * created by netns_new().
807 */
netns_free(struct netns_obj * netns_obj)808 void netns_free(struct netns_obj *netns_obj)
809 {
810 if (!netns_obj)
811 return;
812 traffic_monitor_stop(netns_obj->tmon);
813 close_netns(netns_obj->nstoken);
814 remove_netns(netns_obj->nsname);
815 free(netns_obj->nsname);
816 free(netns_obj);
817 }
818
819 /* extern declarations for test funcs */
820 #define DEFINE_TEST(name) \
821 extern void test_##name(void) __weak; \
822 extern void serial_test_##name(void) __weak;
823 #include <prog_tests/tests.h>
824 #undef DEFINE_TEST
825
826 static struct prog_test_def prog_test_defs[] = {
827 #define DEFINE_TEST(name) { \
828 .test_name = #name, \
829 .run_test = &test_##name, \
830 .run_serial_test = &serial_test_##name, \
831 },
832 #include <prog_tests/tests.h>
833 #undef DEFINE_TEST
834 };
835
836 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
837
838 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
839
840 const char *argp_program_version = "test_progs 0.1";
841 const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
842 static const char argp_program_doc[] =
843 "BPF selftests test runner\v"
844 "Options accepting the NAMES parameter take either a comma-separated list\n"
845 "of test names, or a filename prefixed with @. The file contains one name\n"
846 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
847 "\n"
848 "These options can be passed repeatedly to read multiple files.\n";
849
850 enum ARG_KEYS {
851 ARG_TEST_NUM = 'n',
852 ARG_TEST_NAME = 't',
853 ARG_TEST_NAME_BLACKLIST = 'b',
854 ARG_VERIFIER_STATS = 's',
855 ARG_VERBOSE = 'v',
856 ARG_GET_TEST_CNT = 'c',
857 ARG_LIST_TEST_NAMES = 'l',
858 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
859 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
860 ARG_NUM_WORKERS = 'j',
861 ARG_DEBUG = -1,
862 ARG_JSON_SUMMARY = 'J',
863 ARG_TRAFFIC_MONITOR = 'm',
864 ARG_WATCHDOG_TIMEOUT = 'w',
865 };
866
867 static const struct argp_option opts[] = {
868 { "num", ARG_TEST_NUM, "NUM", 0,
869 "Run test number NUM only " },
870 { "name", ARG_TEST_NAME, "NAMES", 0,
871 "Run tests with names containing any string from NAMES list" },
872 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
873 "Don't run tests with names containing any string from NAMES list" },
874 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
875 "Output verifier statistics", },
876 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
877 "Verbose output (use -vv or -vvv for progressively verbose output)" },
878 { "count", ARG_GET_TEST_CNT, NULL, 0,
879 "Get number of selected top-level tests " },
880 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
881 "List test names that would run (without running them) " },
882 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
883 "Run tests with name matching the pattern (supports '*' wildcard)." },
884 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
885 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
886 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
887 "Number of workers to run in parallel, default to number of cpus." },
888 { "debug", ARG_DEBUG, NULL, 0,
889 "print extra debug information for test_progs." },
890 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
891 #ifdef TRAFFIC_MONITOR
892 { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0,
893 "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." },
894 #endif
895 { "watchdog-timeout", ARG_WATCHDOG_TIMEOUT, "SECONDS", 0,
896 "Kill the process if tests are not making progress for specified number of seconds." },
897 {},
898 };
899
900 static FILE *libbpf_capture_stream;
901
902 static struct {
903 char *buf;
904 size_t buf_sz;
905 } libbpf_output_capture;
906
907 /* Creates a global memstream capturing INFO and WARN level output
908 * passed to libbpf_print_fn.
909 * Returns 0 on success, negative value on failure.
910 * On failure the description is printed using PRINT_FAIL and
911 * current test case is marked as fail.
912 */
start_libbpf_log_capture(void)913 int start_libbpf_log_capture(void)
914 {
915 if (libbpf_capture_stream) {
916 PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
917 return -EINVAL;
918 }
919
920 libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
921 &libbpf_output_capture.buf_sz);
922 if (!libbpf_capture_stream) {
923 PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
924 return -EINVAL;
925 }
926
927 return 0;
928 }
929
930 /* Destroys global memstream created by start_libbpf_log_capture().
931 * Returns a pointer to captured data which has to be freed.
932 * Returned buffer is null terminated.
933 */
stop_libbpf_log_capture(void)934 char *stop_libbpf_log_capture(void)
935 {
936 char *buf;
937
938 if (!libbpf_capture_stream)
939 return NULL;
940
941 fputc(0, libbpf_capture_stream);
942 fclose(libbpf_capture_stream);
943 libbpf_capture_stream = NULL;
944 /* get 'buf' after fclose(), see open_memstream() documentation */
945 buf = libbpf_output_capture.buf;
946 memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
947 return buf;
948 }
949
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)950 static int libbpf_print_fn(enum libbpf_print_level level,
951 const char *format, va_list args)
952 {
953 if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
954 va_list args2;
955
956 va_copy(args2, args);
957 vfprintf(libbpf_capture_stream, format, args2);
958 va_end(args2);
959 }
960
961 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
962 return 0;
963
964 vfprintf(stdout, format, args);
965 return 0;
966 }
967
free_test_filter_set(const struct test_filter_set * set)968 static void free_test_filter_set(const struct test_filter_set *set)
969 {
970 int i, j;
971
972 if (!set)
973 return;
974
975 for (i = 0; i < set->cnt; i++) {
976 free((void *)set->tests[i].name);
977 for (j = 0; j < set->tests[i].subtest_cnt; j++)
978 free((void *)set->tests[i].subtests[j]);
979
980 free((void *)set->tests[i].subtests);
981 }
982
983 free((void *)set->tests);
984 }
985
free_test_selector(struct test_selector * test_selector)986 static void free_test_selector(struct test_selector *test_selector)
987 {
988 free_test_filter_set(&test_selector->blacklist);
989 free_test_filter_set(&test_selector->whitelist);
990 free(test_selector->num_set);
991 }
992
993 extern int extra_prog_load_log_flags;
994
parse_arg(int key,char * arg,struct argp_state * state)995 static error_t parse_arg(int key, char *arg, struct argp_state *state)
996 {
997 struct test_env *env = state->input;
998 int err = 0;
999
1000 switch (key) {
1001 case ARG_TEST_NUM: {
1002 char *subtest_str = strchr(arg, '/');
1003
1004 if (subtest_str) {
1005 *subtest_str = '\0';
1006 if (parse_num_list(subtest_str + 1,
1007 &env->subtest_selector.num_set,
1008 &env->subtest_selector.num_set_len)) {
1009 fprintf(stderr,
1010 "Failed to parse subtest numbers.\n");
1011 return -EINVAL;
1012 }
1013 }
1014 if (parse_num_list(arg, &env->test_selector.num_set,
1015 &env->test_selector.num_set_len)) {
1016 fprintf(stderr, "Failed to parse test numbers.\n");
1017 return -EINVAL;
1018 }
1019 break;
1020 }
1021 case ARG_TEST_NAME_GLOB_ALLOWLIST:
1022 case ARG_TEST_NAME: {
1023 if (arg[0] == '@')
1024 err = parse_test_list_file(arg + 1,
1025 &env->test_selector.whitelist,
1026 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
1027 else
1028 err = parse_test_list(arg,
1029 &env->test_selector.whitelist,
1030 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
1031
1032 break;
1033 }
1034 case ARG_TEST_NAME_GLOB_DENYLIST:
1035 case ARG_TEST_NAME_BLACKLIST: {
1036 if (arg[0] == '@')
1037 err = parse_test_list_file(arg + 1,
1038 &env->test_selector.blacklist,
1039 key == ARG_TEST_NAME_GLOB_DENYLIST);
1040 else
1041 err = parse_test_list(arg,
1042 &env->test_selector.blacklist,
1043 key == ARG_TEST_NAME_GLOB_DENYLIST);
1044
1045 break;
1046 }
1047 case ARG_VERIFIER_STATS:
1048 env->verifier_stats = true;
1049 break;
1050 case ARG_VERBOSE:
1051 env->verbosity = VERBOSE_NORMAL;
1052 if (arg) {
1053 if (strcmp(arg, "v") == 0) {
1054 env->verbosity = VERBOSE_VERY;
1055 extra_prog_load_log_flags = 1;
1056 } else if (strcmp(arg, "vv") == 0) {
1057 env->verbosity = VERBOSE_SUPER;
1058 extra_prog_load_log_flags = 2;
1059 } else {
1060 fprintf(stderr,
1061 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
1062 arg);
1063 return -EINVAL;
1064 }
1065 }
1066 env_verbosity = env->verbosity;
1067
1068 if (verbose()) {
1069 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
1070 fprintf(stderr,
1071 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
1072 errno);
1073 return -EINVAL;
1074 }
1075 }
1076
1077 break;
1078 case ARG_GET_TEST_CNT:
1079 env->get_test_cnt = true;
1080 break;
1081 case ARG_LIST_TEST_NAMES:
1082 env->list_test_names = true;
1083 break;
1084 case ARG_NUM_WORKERS:
1085 if (arg) {
1086 env->workers = atoi(arg);
1087 if (!env->workers) {
1088 fprintf(stderr, "Invalid number of worker: %s.", arg);
1089 return -EINVAL;
1090 }
1091 } else {
1092 env->workers = get_nprocs();
1093 }
1094 break;
1095 case ARG_DEBUG:
1096 env->debug = true;
1097 break;
1098 case ARG_JSON_SUMMARY:
1099 env->json = fopen(arg, "w");
1100 if (env->json == NULL) {
1101 perror("Failed to open json summary file");
1102 return -errno;
1103 }
1104 break;
1105 case ARGP_KEY_ARG:
1106 argp_usage(state);
1107 break;
1108 case ARGP_KEY_END:
1109 break;
1110 #ifdef TRAFFIC_MONITOR
1111 case ARG_TRAFFIC_MONITOR:
1112 if (arg[0] == '@')
1113 err = parse_test_list_file(arg + 1,
1114 &env->tmon_selector.whitelist,
1115 true);
1116 else
1117 err = parse_test_list(arg,
1118 &env->tmon_selector.whitelist,
1119 true);
1120 break;
1121 #endif
1122 case ARG_WATCHDOG_TIMEOUT:
1123 env->secs_till_kill = atoi(arg);
1124 if (env->secs_till_kill < 0) {
1125 fprintf(stderr, "Invalid watchdog timeout: %s.\n", arg);
1126 return -EINVAL;
1127 }
1128 if (env->secs_till_kill < env->secs_till_notify) {
1129 env->secs_till_notify = 0;
1130 }
1131 break;
1132 default:
1133 return ARGP_ERR_UNKNOWN;
1134 }
1135 return err;
1136 }
1137
1138 /*
1139 * Determine if test_progs is running as a "flavored" test runner and switch
1140 * into corresponding sub-directory to load correct BPF objects.
1141 *
1142 * This is done by looking at executable name. If it contains "-flavor"
1143 * suffix, then we are running as a flavored test runner.
1144 */
cd_flavor_subdir(const char * exec_name)1145 int cd_flavor_subdir(const char *exec_name)
1146 {
1147 /* General form of argv[0] passed here is:
1148 * some/path/to/test_progs[-flavor], where -flavor part is optional.
1149 * First cut out "test_progs[-flavor]" part, then extract "flavor"
1150 * part, if it's there.
1151 */
1152 const char *flavor = strrchr(exec_name, '/');
1153
1154 if (!flavor)
1155 flavor = exec_name;
1156 else
1157 flavor++;
1158
1159 flavor = strrchr(flavor, '-');
1160 if (!flavor)
1161 return 0;
1162 flavor++;
1163 if (verbose())
1164 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
1165
1166 return chdir(flavor);
1167 }
1168
trigger_module_test_read(int read_sz)1169 int trigger_module_test_read(int read_sz)
1170 {
1171 int fd, err;
1172
1173 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
1174 err = -errno;
1175 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
1176 return err;
1177
1178 read(fd, NULL, read_sz);
1179 close(fd);
1180
1181 return 0;
1182 }
1183
trigger_module_test_write(int write_sz)1184 int trigger_module_test_write(int write_sz)
1185 {
1186 int fd, err;
1187 char *buf = malloc(write_sz);
1188
1189 if (!buf)
1190 return -ENOMEM;
1191
1192 memset(buf, 'a', write_sz);
1193 buf[write_sz-1] = '\0';
1194
1195 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
1196 err = -errno;
1197 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
1198 free(buf);
1199 return err;
1200 }
1201
1202 write(fd, buf, write_sz);
1203 close(fd);
1204 free(buf);
1205 return 0;
1206 }
1207
write_sysctl(const char * sysctl,const char * value)1208 int write_sysctl(const char *sysctl, const char *value)
1209 {
1210 int fd, err, len;
1211
1212 fd = open(sysctl, O_WRONLY);
1213 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
1214 return -1;
1215
1216 len = strlen(value);
1217 err = write(fd, value, len);
1218 close(fd);
1219 if (!ASSERT_EQ(err, len, "write sysctl"))
1220 return -1;
1221
1222 return 0;
1223 }
1224
get_bpf_max_tramp_links_from(struct btf * btf)1225 int get_bpf_max_tramp_links_from(struct btf *btf)
1226 {
1227 const struct btf_enum *e;
1228 const struct btf_type *t;
1229 __u32 i, type_cnt;
1230 const char *name;
1231 __u16 j, vlen;
1232
1233 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
1234 t = btf__type_by_id(btf, i);
1235 if (!t || !btf_is_enum(t) || t->name_off)
1236 continue;
1237 e = btf_enum(t);
1238 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
1239 name = btf__str_by_offset(btf, e->name_off);
1240 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
1241 return e->val;
1242 }
1243 }
1244
1245 return -1;
1246 }
1247
get_bpf_max_tramp_links(void)1248 int get_bpf_max_tramp_links(void)
1249 {
1250 struct btf *vmlinux_btf;
1251 int ret;
1252
1253 vmlinux_btf = btf__load_vmlinux_btf();
1254 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
1255 return -1;
1256 ret = get_bpf_max_tramp_links_from(vmlinux_btf);
1257 btf__free(vmlinux_btf);
1258
1259 return ret;
1260 }
1261
1262 #define MAX_BACKTRACE_SZ 128
crash_handler(int signum)1263 void crash_handler(int signum)
1264 {
1265 void *bt[MAX_BACKTRACE_SZ];
1266 size_t sz;
1267
1268 sz = backtrace(bt, ARRAY_SIZE(bt));
1269
1270 fflush(stdout);
1271 stdout = env.stdout_saved;
1272 stderr = env.stderr_saved;
1273
1274 if (env.test) {
1275 env.test_state->error_cnt++;
1276 dump_test_log(env.test, env.test_state, true, false, NULL);
1277 }
1278 if (env.worker_id != -1)
1279 fprintf(stderr, "[%d]: ", env.worker_id);
1280 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
1281 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1282 }
1283
hexdump(const char * prefix,const void * buf,size_t len)1284 void hexdump(const char *prefix, const void *buf, size_t len)
1285 {
1286 for (int i = 0; i < len; i++) {
1287 if (!(i % 16)) {
1288 if (i)
1289 fprintf(stdout, "\n");
1290 fprintf(stdout, "%s", prefix);
1291 }
1292 if (i && !(i % 8) && (i % 16))
1293 fprintf(stdout, "\t");
1294 fprintf(stdout, "%02X ", ((uint8_t *)(buf))[i]);
1295 }
1296 fprintf(stdout, "\n");
1297 }
1298
sigint_handler(int signum)1299 static void sigint_handler(int signum)
1300 {
1301 int i;
1302
1303 for (i = 0; i < env.workers; i++)
1304 if (env.worker_socks[i] > 0)
1305 close(env.worker_socks[i]);
1306 }
1307
1308 static int current_test_idx;
1309 static pthread_mutex_t current_test_lock;
1310 static pthread_mutex_t stdout_output_lock;
1311
str_msg(const struct msg * msg,char * buf)1312 static inline const char *str_msg(const struct msg *msg, char *buf)
1313 {
1314 switch (msg->type) {
1315 case MSG_DO_TEST:
1316 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1317 break;
1318 case MSG_TEST_DONE:
1319 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1320 msg->test_done.num,
1321 msg->test_done.have_log);
1322 break;
1323 case MSG_SUBTEST_DONE:
1324 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1325 msg->subtest_done.num,
1326 msg->subtest_done.have_log);
1327 break;
1328 case MSG_TEST_LOG:
1329 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1330 strlen(msg->test_log.log_buf),
1331 msg->test_log.is_last);
1332 break;
1333 case MSG_EXIT:
1334 sprintf(buf, "MSG_EXIT");
1335 break;
1336 default:
1337 sprintf(buf, "UNKNOWN");
1338 break;
1339 }
1340
1341 return buf;
1342 }
1343
send_message(int sock,const struct msg * msg)1344 static int send_message(int sock, const struct msg *msg)
1345 {
1346 char buf[256];
1347
1348 if (env.debug)
1349 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1350 return send(sock, msg, sizeof(*msg), 0);
1351 }
1352
recv_message(int sock,struct msg * msg)1353 static int recv_message(int sock, struct msg *msg)
1354 {
1355 int ret;
1356 char buf[256];
1357
1358 memset(msg, 0, sizeof(*msg));
1359 ret = recv(sock, msg, sizeof(*msg), 0);
1360 if (ret >= 0) {
1361 if (env.debug)
1362 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1363 }
1364 return ret;
1365 }
1366
ns_is_needed(const char * test_name)1367 static bool ns_is_needed(const char *test_name)
1368 {
1369 if (strlen(test_name) < 3)
1370 return false;
1371
1372 return !strncmp(test_name, "ns_", 3);
1373 }
1374
run_one_test(int test_num)1375 static void run_one_test(int test_num)
1376 {
1377 struct prog_test_def *test = &prog_test_defs[test_num];
1378 struct test_state *state = &test_states[test_num];
1379 struct netns_obj *ns = NULL;
1380
1381 env.test = test;
1382 env.test_state = state;
1383
1384 stdio_hijack(&state->log_buf, &state->log_cnt);
1385
1386 watchdog_start();
1387 if (ns_is_needed(test->test_name))
1388 ns = netns_new(test->test_name, true);
1389 if (test->run_test)
1390 test->run_test();
1391 else if (test->run_serial_test)
1392 test->run_serial_test();
1393 netns_free(ns);
1394 watchdog_stop();
1395
1396 /* ensure last sub-test is finalized properly */
1397 if (env.subtest_state)
1398 test__end_subtest();
1399
1400 state->tested = true;
1401
1402 stdio_restore();
1403
1404 if (verbose() && env.worker_id == -1)
1405 print_test_result(test, state);
1406
1407 reset_affinity();
1408 restore_netns();
1409 if (test->need_cgroup_cleanup)
1410 cleanup_cgroup_environment();
1411
1412 free(stop_libbpf_log_capture());
1413
1414 dump_test_log(test, state, false, false, NULL);
1415 }
1416
1417 struct dispatch_data {
1418 int worker_id;
1419 int sock_fd;
1420 };
1421
read_prog_test_msg(int sock_fd,struct msg * msg,enum msg_type type)1422 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1423 {
1424 if (recv_message(sock_fd, msg) < 0)
1425 return 1;
1426
1427 if (msg->type != type) {
1428 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1429 return 1;
1430 }
1431
1432 return 0;
1433 }
1434
dispatch_thread_read_log(int sock_fd,char ** log_buf,size_t * log_cnt)1435 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1436 {
1437 FILE *log_fp = NULL;
1438 int result = 0;
1439
1440 log_fp = open_memstream(log_buf, log_cnt);
1441 if (!log_fp)
1442 return 1;
1443
1444 while (true) {
1445 struct msg msg;
1446
1447 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1448 result = 1;
1449 goto out;
1450 }
1451
1452 fprintf(log_fp, "%s", msg.test_log.log_buf);
1453 if (msg.test_log.is_last)
1454 break;
1455 }
1456
1457 out:
1458 fclose(log_fp);
1459 log_fp = NULL;
1460 return result;
1461 }
1462
dispatch_thread_send_subtests(int sock_fd,struct test_state * state)1463 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1464 {
1465 struct msg msg;
1466 struct subtest_state *subtest_state;
1467 int subtest_num = state->subtest_num;
1468
1469 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1470
1471 for (int i = 0; i < subtest_num; i++) {
1472 subtest_state = &state->subtest_states[i];
1473
1474 memset(subtest_state, 0, sizeof(*subtest_state));
1475
1476 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1477 return 1;
1478
1479 subtest_state->name = strdup(msg.subtest_done.name);
1480 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1481 subtest_state->skipped = msg.subtest_done.skipped;
1482 subtest_state->filtered = msg.subtest_done.filtered;
1483
1484 /* collect all logs */
1485 if (msg.subtest_done.have_log)
1486 if (dispatch_thread_read_log(sock_fd,
1487 &subtest_state->log_buf,
1488 &subtest_state->log_cnt))
1489 return 1;
1490 }
1491
1492 return 0;
1493 }
1494
dispatch_thread(void * ctx)1495 static void *dispatch_thread(void *ctx)
1496 {
1497 struct dispatch_data *data = ctx;
1498 int sock_fd;
1499
1500 sock_fd = data->sock_fd;
1501
1502 while (true) {
1503 int test_to_run = -1;
1504 struct prog_test_def *test;
1505 struct test_state *state;
1506
1507 /* grab a test */
1508 {
1509 pthread_mutex_lock(¤t_test_lock);
1510
1511 if (current_test_idx >= prog_test_cnt) {
1512 pthread_mutex_unlock(¤t_test_lock);
1513 goto done;
1514 }
1515
1516 test = &prog_test_defs[current_test_idx];
1517 test_to_run = current_test_idx;
1518 current_test_idx++;
1519
1520 pthread_mutex_unlock(¤t_test_lock);
1521 }
1522
1523 if (!test->should_run || test->run_serial_test)
1524 continue;
1525
1526 /* run test through worker */
1527 {
1528 struct msg msg_do_test;
1529
1530 memset(&msg_do_test, 0, sizeof(msg_do_test));
1531 msg_do_test.type = MSG_DO_TEST;
1532 msg_do_test.do_test.num = test_to_run;
1533 if (send_message(sock_fd, &msg_do_test) < 0) {
1534 perror("Fail to send command");
1535 goto done;
1536 }
1537 env.worker_current_test[data->worker_id] = test_to_run;
1538 }
1539
1540 /* wait for test done */
1541 do {
1542 struct msg msg;
1543
1544 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1545 goto error;
1546 if (test_to_run != msg.test_done.num)
1547 goto error;
1548
1549 state = &test_states[test_to_run];
1550 state->tested = true;
1551 state->error_cnt = msg.test_done.error_cnt;
1552 state->skip_cnt = msg.test_done.skip_cnt;
1553 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1554 state->subtest_num = msg.test_done.subtest_num;
1555
1556 /* collect all logs */
1557 if (msg.test_done.have_log) {
1558 if (dispatch_thread_read_log(sock_fd,
1559 &state->log_buf,
1560 &state->log_cnt))
1561 goto error;
1562 }
1563
1564 /* collect all subtests and subtest logs */
1565 if (!state->subtest_num)
1566 break;
1567
1568 if (dispatch_thread_send_subtests(sock_fd, state))
1569 goto error;
1570 } while (false);
1571
1572 pthread_mutex_lock(&stdout_output_lock);
1573 dump_test_log(test, state, false, true, NULL);
1574 pthread_mutex_unlock(&stdout_output_lock);
1575 } /* while (true) */
1576 error:
1577 if (env.debug)
1578 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1579
1580 done:
1581 {
1582 struct msg msg_exit;
1583
1584 msg_exit.type = MSG_EXIT;
1585 if (send_message(sock_fd, &msg_exit) < 0) {
1586 if (env.debug)
1587 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1588 data->worker_id, strerror(errno));
1589 }
1590 }
1591 return NULL;
1592 }
1593
calculate_summary_and_print_errors(struct test_env * env)1594 static void calculate_summary_and_print_errors(struct test_env *env)
1595 {
1596 int i;
1597 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1598 json_writer_t *w = NULL;
1599
1600 for (i = 0; i < prog_test_cnt; i++) {
1601 struct test_state *state = &test_states[i];
1602
1603 if (!state->tested)
1604 continue;
1605
1606 sub_succ_cnt += state->sub_succ_cnt;
1607 skip_cnt += state->skip_cnt;
1608
1609 if (state->error_cnt)
1610 fail_cnt++;
1611 else
1612 succ_cnt++;
1613 }
1614
1615 if (env->json) {
1616 w = jsonw_new(env->json);
1617 if (!w)
1618 fprintf(env->stderr_saved, "Failed to create new JSON stream.");
1619 }
1620
1621 if (w) {
1622 jsonw_start_object(w);
1623 jsonw_uint_field(w, "success", succ_cnt);
1624 jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
1625 jsonw_uint_field(w, "skipped", skip_cnt);
1626 jsonw_uint_field(w, "failed", fail_cnt);
1627 jsonw_name(w, "results");
1628 jsonw_start_array(w);
1629 }
1630
1631 /*
1632 * We only print error logs summary when there are failed tests and
1633 * verbose mode is not enabled. Otherwise, results may be inconsistent.
1634 *
1635 */
1636 if (!verbose() && fail_cnt) {
1637 printf("\nAll error logs:\n");
1638
1639 /* print error logs again */
1640 for (i = 0; i < prog_test_cnt; i++) {
1641 struct prog_test_def *test = &prog_test_defs[i];
1642 struct test_state *state = &test_states[i];
1643
1644 if (!state->tested || !state->error_cnt)
1645 continue;
1646
1647 dump_test_log(test, state, true, true, w);
1648 }
1649 }
1650
1651 if (w) {
1652 jsonw_end_array(w);
1653 jsonw_end_object(w);
1654 jsonw_destroy(&w);
1655 }
1656
1657 if (env->json)
1658 fclose(env->json);
1659
1660 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1661 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1662
1663 env->succ_cnt = succ_cnt;
1664 env->sub_succ_cnt = sub_succ_cnt;
1665 env->fail_cnt = fail_cnt;
1666 env->skip_cnt = skip_cnt;
1667 }
1668
server_main(void)1669 static void server_main(void)
1670 {
1671 pthread_t *dispatcher_threads;
1672 struct dispatch_data *data;
1673 struct sigaction sigact_int = {
1674 .sa_handler = sigint_handler,
1675 .sa_flags = SA_RESETHAND,
1676 };
1677 int i;
1678
1679 sigaction(SIGINT, &sigact_int, NULL);
1680
1681 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1682 data = calloc(sizeof(struct dispatch_data), env.workers);
1683
1684 env.worker_current_test = calloc(sizeof(int), env.workers);
1685 for (i = 0; i < env.workers; i++) {
1686 int rc;
1687
1688 data[i].worker_id = i;
1689 data[i].sock_fd = env.worker_socks[i];
1690 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1691 if (rc < 0) {
1692 perror("Failed to launch dispatcher thread");
1693 exit(EXIT_ERR_SETUP_INFRA);
1694 }
1695 }
1696
1697 /* wait for all dispatcher to finish */
1698 for (i = 0; i < env.workers; i++) {
1699 while (true) {
1700 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1701
1702 if (!ret) {
1703 break;
1704 } else if (ret == EBUSY) {
1705 if (env.debug)
1706 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1707 i, env.worker_current_test[i] + 1);
1708 usleep(1000 * 1000);
1709 continue;
1710 } else {
1711 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1712 break;
1713 }
1714 }
1715 }
1716 free(dispatcher_threads);
1717 free(env.worker_current_test);
1718 free(data);
1719
1720 /* run serial tests */
1721 save_netns();
1722
1723 for (int i = 0; i < prog_test_cnt; i++) {
1724 struct prog_test_def *test = &prog_test_defs[i];
1725
1726 if (!test->should_run || !test->run_serial_test)
1727 continue;
1728
1729 run_one_test(i);
1730 }
1731
1732 /* generate summary */
1733 fflush(stderr);
1734 fflush(stdout);
1735
1736 calculate_summary_and_print_errors(&env);
1737
1738 /* reap all workers */
1739 for (i = 0; i < env.workers; i++) {
1740 int wstatus, pid;
1741
1742 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1743 if (pid != env.worker_pids[i])
1744 perror("Unable to reap worker");
1745 }
1746 }
1747
worker_main_send_log(int sock,char * log_buf,size_t log_cnt)1748 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1749 {
1750 char *src;
1751 size_t slen;
1752
1753 src = log_buf;
1754 slen = log_cnt;
1755 while (slen) {
1756 struct msg msg_log;
1757 char *dest;
1758 size_t len;
1759
1760 memset(&msg_log, 0, sizeof(msg_log));
1761 msg_log.type = MSG_TEST_LOG;
1762 dest = msg_log.test_log.log_buf;
1763 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1764 memcpy(dest, src, len);
1765
1766 src += len;
1767 slen -= len;
1768 if (!slen)
1769 msg_log.test_log.is_last = true;
1770
1771 assert(send_message(sock, &msg_log) >= 0);
1772 }
1773 }
1774
free_subtest_state(struct subtest_state * state)1775 static void free_subtest_state(struct subtest_state *state)
1776 {
1777 if (state->log_buf) {
1778 free(state->log_buf);
1779 state->log_buf = NULL;
1780 state->log_cnt = 0;
1781 }
1782 free(state->name);
1783 state->name = NULL;
1784 }
1785
worker_main_send_subtests(int sock,struct test_state * state)1786 static int worker_main_send_subtests(int sock, struct test_state *state)
1787 {
1788 int i, result = 0;
1789 struct msg msg;
1790 struct subtest_state *subtest_state;
1791
1792 memset(&msg, 0, sizeof(msg));
1793 msg.type = MSG_SUBTEST_DONE;
1794
1795 for (i = 0; i < state->subtest_num; i++) {
1796 subtest_state = &state->subtest_states[i];
1797
1798 msg.subtest_done.num = i;
1799
1800 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1801
1802 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1803 msg.subtest_done.skipped = subtest_state->skipped;
1804 msg.subtest_done.filtered = subtest_state->filtered;
1805 msg.subtest_done.have_log = false;
1806
1807 if (verbose() || state->force_log || subtest_state->error_cnt) {
1808 if (subtest_state->log_cnt)
1809 msg.subtest_done.have_log = true;
1810 }
1811
1812 if (send_message(sock, &msg) < 0) {
1813 perror("Fail to send message done");
1814 result = 1;
1815 goto out;
1816 }
1817
1818 /* send logs */
1819 if (msg.subtest_done.have_log)
1820 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1821
1822 free_subtest_state(subtest_state);
1823 free(subtest_state->name);
1824 }
1825
1826 out:
1827 for (; i < state->subtest_num; i++)
1828 free_subtest_state(&state->subtest_states[i]);
1829 free(state->subtest_states);
1830 return result;
1831 }
1832
worker_main(int sock)1833 static int worker_main(int sock)
1834 {
1835 save_netns();
1836 watchdog_init();
1837
1838 while (true) {
1839 /* receive command */
1840 struct msg msg;
1841
1842 if (recv_message(sock, &msg) < 0)
1843 goto out;
1844
1845 switch (msg.type) {
1846 case MSG_EXIT:
1847 if (env.debug)
1848 fprintf(stderr, "[%d]: worker exit.\n",
1849 env.worker_id);
1850 goto out;
1851 case MSG_DO_TEST: {
1852 int test_to_run = msg.do_test.num;
1853 struct prog_test_def *test = &prog_test_defs[test_to_run];
1854 struct test_state *state = &test_states[test_to_run];
1855 struct msg msg;
1856
1857 if (env.debug)
1858 fprintf(stderr, "[%d]: #%d:%s running.\n",
1859 env.worker_id,
1860 test_to_run + 1,
1861 test->test_name);
1862
1863 run_one_test(test_to_run);
1864
1865 memset(&msg, 0, sizeof(msg));
1866 msg.type = MSG_TEST_DONE;
1867 msg.test_done.num = test_to_run;
1868 msg.test_done.error_cnt = state->error_cnt;
1869 msg.test_done.skip_cnt = state->skip_cnt;
1870 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1871 msg.test_done.subtest_num = state->subtest_num;
1872 msg.test_done.have_log = false;
1873
1874 if (verbose() || state->force_log || state->error_cnt) {
1875 if (state->log_cnt)
1876 msg.test_done.have_log = true;
1877 }
1878 if (send_message(sock, &msg) < 0) {
1879 perror("Fail to send message done");
1880 goto out;
1881 }
1882
1883 /* send logs */
1884 if (msg.test_done.have_log)
1885 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1886
1887 if (state->log_buf) {
1888 free(state->log_buf);
1889 state->log_buf = NULL;
1890 state->log_cnt = 0;
1891 }
1892
1893 if (state->subtest_num)
1894 if (worker_main_send_subtests(sock, state))
1895 goto out;
1896
1897 if (env.debug)
1898 fprintf(stderr, "[%d]: #%d:%s done.\n",
1899 env.worker_id,
1900 test_to_run + 1,
1901 test->test_name);
1902 break;
1903 } /* case MSG_DO_TEST */
1904 default:
1905 if (env.debug)
1906 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1907 return -1;
1908 }
1909 }
1910 out:
1911 return 0;
1912 }
1913
free_test_states(void)1914 static void free_test_states(void)
1915 {
1916 int i, j;
1917
1918 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1919 struct test_state *test_state = &test_states[i];
1920
1921 for (j = 0; j < test_state->subtest_num; j++)
1922 free_subtest_state(&test_state->subtest_states[j]);
1923
1924 free(test_state->subtest_states);
1925 free(test_state->log_buf);
1926 test_state->subtest_states = NULL;
1927 test_state->log_buf = NULL;
1928 }
1929 }
1930
main(int argc,char ** argv)1931 int main(int argc, char **argv)
1932 {
1933 static const struct argp argp = {
1934 .options = opts,
1935 .parser = parse_arg,
1936 .doc = argp_program_doc,
1937 };
1938 struct sigaction sigact = {
1939 .sa_handler = crash_handler,
1940 .sa_flags = SA_RESETHAND,
1941 };
1942 int err, i;
1943
1944 sigaction(SIGSEGV, &sigact, NULL);
1945
1946 env.stdout_saved = stdout;
1947 env.stderr_saved = stderr;
1948
1949 env.secs_till_notify = 10;
1950 env.secs_till_kill = 120;
1951 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1952 if (err)
1953 return err;
1954
1955 err = cd_flavor_subdir(argv[0]);
1956 if (err)
1957 return err;
1958
1959 watchdog_init();
1960
1961 /* Use libbpf 1.0 API mode */
1962 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1963 libbpf_set_print(libbpf_print_fn);
1964
1965 traffic_monitor_set_print(traffic_monitor_print_fn);
1966
1967 srand(time(NULL));
1968
1969 env.jit_enabled = is_jit_enabled();
1970 env.nr_cpus = libbpf_num_possible_cpus();
1971 if (env.nr_cpus < 0) {
1972 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1973 env.nr_cpus);
1974 return -1;
1975 }
1976
1977 env.has_testmod = true;
1978 if (!env.list_test_names) {
1979 /* ensure previous instance of the module is unloaded */
1980 unload_bpf_testmod(verbose());
1981
1982 if (load_bpf_testmod(verbose())) {
1983 fprintf(env.stderr_saved, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1984 env.has_testmod = false;
1985 }
1986 }
1987
1988 /* initializing tests */
1989 for (i = 0; i < prog_test_cnt; i++) {
1990 struct prog_test_def *test = &prog_test_defs[i];
1991
1992 test->test_num = i + 1;
1993 test->should_run = should_run(&env.test_selector,
1994 test->test_num, test->test_name);
1995
1996 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1997 (test->run_test != NULL && test->run_serial_test != NULL)) {
1998 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1999 test->test_num, test->test_name, test->test_name, test->test_name);
2000 exit(EXIT_ERR_SETUP_INFRA);
2001 }
2002 if (test->should_run)
2003 test->should_tmon = should_tmon(&env.tmon_selector, test->test_name);
2004 }
2005
2006 /* ignore workers if we are just listing */
2007 if (env.get_test_cnt || env.list_test_names)
2008 env.workers = 0;
2009
2010 /* launch workers if requested */
2011 env.worker_id = -1; /* main process */
2012 if (env.workers) {
2013 env.worker_pids = calloc(sizeof(pid_t), env.workers);
2014 env.worker_socks = calloc(sizeof(int), env.workers);
2015 if (env.debug)
2016 fprintf(stdout, "Launching %d workers.\n", env.workers);
2017 for (i = 0; i < env.workers; i++) {
2018 int sv[2];
2019 pid_t pid;
2020
2021 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
2022 perror("Fail to create worker socket");
2023 return -1;
2024 }
2025 pid = fork();
2026 if (pid < 0) {
2027 perror("Failed to fork worker");
2028 return -1;
2029 } else if (pid != 0) { /* main process */
2030 close(sv[1]);
2031 env.worker_pids[i] = pid;
2032 env.worker_socks[i] = sv[0];
2033 } else { /* inside each worker process */
2034 close(sv[0]);
2035 env.worker_id = i;
2036 return worker_main(sv[1]);
2037 }
2038 }
2039
2040 if (env.worker_id == -1) {
2041 server_main();
2042 goto out;
2043 }
2044 }
2045
2046 /* The rest of the main process */
2047
2048 /* on single mode */
2049 save_netns();
2050
2051 for (i = 0; i < prog_test_cnt; i++) {
2052 struct prog_test_def *test = &prog_test_defs[i];
2053
2054 if (!test->should_run)
2055 continue;
2056
2057 if (env.get_test_cnt) {
2058 env.succ_cnt++;
2059 continue;
2060 }
2061
2062 if (env.list_test_names) {
2063 fprintf(env.stdout_saved, "%s\n", test->test_name);
2064 env.succ_cnt++;
2065 continue;
2066 }
2067
2068 run_one_test(i);
2069 }
2070
2071 if (env.get_test_cnt) {
2072 printf("%d\n", env.succ_cnt);
2073 goto out;
2074 }
2075
2076 if (env.list_test_names)
2077 goto out;
2078
2079 calculate_summary_and_print_errors(&env);
2080
2081 close(env.saved_netns_fd);
2082 out:
2083 if (!env.list_test_names && env.has_testmod)
2084 unload_bpf_testmod(verbose());
2085
2086 free_test_selector(&env.test_selector);
2087 free_test_selector(&env.subtest_selector);
2088 free_test_selector(&env.tmon_selector);
2089 free_test_states();
2090
2091 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
2092 return EXIT_NO_TEST;
2093
2094 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
2095 }
2096