xref: /linux/tools/perf/tests/builtin-test.c (revision a7bee4e7f78089c101be2ad51f4b5ec64782053e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * builtin-test.c
4  *
5  * Builtin regression testing command: ever growing number of sanity tests
6  */
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <poll.h>
10 #include <unistd.h>
11 #include <setjmp.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <sys/types.h>
15 #include <dirent.h>
16 #include <sys/wait.h>
17 #include <sys/stat.h>
18 #include "builtin.h"
19 #include "config.h"
20 #include "hist.h"
21 #include "intlist.h"
22 #include "tests.h"
23 #include "debug.h"
24 #include "color.h"
25 #include <subcmd/parse-options.h>
26 #include <subcmd/run-command.h>
27 #include "string2.h"
28 #include "symbol.h"
29 #include "util/rlimit.h"
30 #include "util/strbuf.h"
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <subcmd/exec-cmd.h>
34 #include <linux/zalloc.h>
35 
36 #include "tests-scripts.h"
37 
38 /*
39  * Command line option to not fork the test running in the same process and
40  * making them easier to debug.
41  */
42 static bool dont_fork;
43 /* Fork the tests in parallel and wait for their completion. */
44 static bool sequential;
45 /* Number of times each test is run. */
46 static unsigned int runs_per_test = 1;
47 const char *dso_to_test;
48 const char *test_objdump_path = "objdump";
49 
50 /*
51  * List of architecture specific tests. Not a weak symbol as the array length is
52  * dependent on the initialization, as such GCC with LTO complains of
53  * conflicting definitions with a weak symbol.
54  */
55 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
56 extern struct test_suite *arch_tests[];
57 #else
58 static struct test_suite *arch_tests[] = {
59 	NULL,
60 };
61 #endif
62 
63 static struct test_suite *generic_tests[] = {
64 	&suite__vmlinux_matches_kallsyms,
65 	&suite__openat_syscall_event,
66 	&suite__openat_syscall_event_on_all_cpus,
67 	&suite__basic_mmap,
68 	&suite__mem,
69 	&suite__parse_events,
70 	&suite__expr,
71 	&suite__PERF_RECORD,
72 	&suite__pmu,
73 	&suite__pmu_events,
74 	&suite__hwmon_pmu,
75 	&suite__tool_pmu,
76 	&suite__dso_data,
77 	&suite__perf_evsel__roundtrip_name_test,
78 #ifdef HAVE_LIBTRACEEVENT
79 	&suite__perf_evsel__tp_sched_test,
80 	&suite__syscall_openat_tp_fields,
81 #endif
82 	&suite__hists_link,
83 	&suite__python_use,
84 	&suite__bp_signal,
85 	&suite__bp_signal_overflow,
86 	&suite__bp_accounting,
87 	&suite__wp,
88 	&suite__task_exit,
89 	&suite__sw_clock_freq,
90 	&suite__code_reading,
91 	&suite__sample_parsing,
92 	&suite__keep_tracking,
93 	&suite__parse_no_sample_id_all,
94 	&suite__hists_filter,
95 	&suite__mmap_thread_lookup,
96 	&suite__thread_maps_share,
97 	&suite__hists_output,
98 	&suite__hists_cumulate,
99 #ifdef HAVE_LIBTRACEEVENT
100 	&suite__switch_tracking,
101 #endif
102 	&suite__fdarray__filter,
103 	&suite__fdarray__add,
104 	&suite__kmod_path__parse,
105 	&suite__thread_map,
106 	&suite__session_topology,
107 	&suite__thread_map_synthesize,
108 	&suite__thread_map_remove,
109 	&suite__cpu_map,
110 	&suite__synthesize_stat_config,
111 	&suite__synthesize_stat,
112 	&suite__synthesize_stat_round,
113 	&suite__event_update,
114 	&suite__event_times,
115 	&suite__backward_ring_buffer,
116 	&suite__sdt_event,
117 	&suite__is_printable_array,
118 	&suite__bitmap_print,
119 	&suite__perf_hooks,
120 	&suite__unit_number__scnprint,
121 	&suite__mem2node,
122 	&suite__time_utils,
123 	&suite__jit_write_elf,
124 	&suite__pfm,
125 	&suite__api_io,
126 	&suite__maps__merge_in,
127 	&suite__demangle_java,
128 	&suite__demangle_ocaml,
129 	&suite__demangle_rust,
130 	&suite__parse_metric,
131 	&suite__pe_file_parsing,
132 	&suite__expand_cgroup_events,
133 	&suite__perf_time_to_tsc,
134 	&suite__dlfilter,
135 	&suite__sigtrap,
136 	&suite__event_groups,
137 	&suite__symbols,
138 	&suite__util,
139 	NULL,
140 };
141 
142 static struct test_workload *workloads[] = {
143 	&workload__noploop,
144 	&workload__thloop,
145 	&workload__leafloop,
146 	&workload__sqrtloop,
147 	&workload__brstack,
148 	&workload__datasym,
149 	&workload__landlock,
150 };
151 
152 #define workloads__for_each(workload) \
153 	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
154 
155 #define test_suite__for_each_test_case(suite, idx)			\
156 	for (idx = 0; (suite)->test_cases && (suite)->test_cases[idx].name != NULL; idx++)
157 
158 static int test_suite__num_test_cases(const struct test_suite *t)
159 {
160 	int num;
161 
162 	test_suite__for_each_test_case(t, num);
163 
164 	return num;
165 }
166 
167 static const char *skip_reason(const struct test_suite *t, int test_case)
168 {
169 	if (!t->test_cases)
170 		return NULL;
171 
172 	return t->test_cases[test_case >= 0 ? test_case : 0].skip_reason;
173 }
174 
175 static const char *test_description(const struct test_suite *t, int test_case)
176 {
177 	if (t->test_cases && test_case >= 0)
178 		return t->test_cases[test_case].desc;
179 
180 	return t->desc;
181 }
182 
183 static test_fnptr test_function(const struct test_suite *t, int test_case)
184 {
185 	if (test_case <= 0)
186 		return t->test_cases[0].run_case;
187 
188 	return t->test_cases[test_case].run_case;
189 }
190 
191 static bool test_exclusive(const struct test_suite *t, int test_case)
192 {
193 	if (test_case <= 0)
194 		return t->test_cases[0].exclusive;
195 
196 	return t->test_cases[test_case].exclusive;
197 }
198 
199 static bool perf_test__matches(const char *desc, int suite_num, int argc, const char *argv[])
200 {
201 	int i;
202 
203 	if (argc == 0)
204 		return true;
205 
206 	for (i = 0; i < argc; ++i) {
207 		char *end;
208 		long nr = strtoul(argv[i], &end, 10);
209 
210 		if (*end == '\0') {
211 			if (nr == suite_num + 1)
212 				return true;
213 			continue;
214 		}
215 
216 		if (strcasestr(desc, argv[i]))
217 			return true;
218 	}
219 
220 	return false;
221 }
222 
223 struct child_test {
224 	struct child_process process;
225 	struct test_suite *test;
226 	int suite_num;
227 	int test_case_num;
228 };
229 
230 static jmp_buf run_test_jmp_buf;
231 
232 static void child_test_sig_handler(int sig)
233 {
234 	siglongjmp(run_test_jmp_buf, sig);
235 }
236 
237 static int run_test_child(struct child_process *process)
238 {
239 	const int signals[] = {
240 		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
241 	};
242 	struct child_test *child = container_of(process, struct child_test, process);
243 	int err;
244 
245 	err = sigsetjmp(run_test_jmp_buf, 1);
246 	if (err) {
247 		fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
248 		err = err > 0 ? -err : -1;
249 		goto err_out;
250 	}
251 
252 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
253 		signal(signals[i], child_test_sig_handler);
254 
255 	pr_debug("--- start ---\n");
256 	pr_debug("test child forked, pid %d\n", getpid());
257 	err = test_function(child->test, child->test_case_num)(child->test, child->test_case_num);
258 	pr_debug("---- end(%d) ----\n", err);
259 
260 err_out:
261 	fflush(NULL);
262 	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
263 		signal(signals[i], SIG_DFL);
264 	return -err;
265 }
266 
267 #define TEST_RUNNING -3
268 
269 static int print_test_result(struct test_suite *t, int curr_suite, int curr_test_case,
270 			     int result, int width, int running)
271 {
272 	if (test_suite__num_test_cases(t) > 1) {
273 		int subw = width > 2 ? width - 2 : width;
274 
275 		pr_info("%3d.%1d: %-*s:", curr_suite + 1, curr_test_case + 1, subw,
276 			test_description(t, curr_test_case));
277 	} else
278 		pr_info("%3d: %-*s:", curr_suite + 1, width, test_description(t, curr_test_case));
279 
280 	switch (result) {
281 	case TEST_RUNNING:
282 		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
283 		break;
284 	case TEST_OK:
285 		pr_info(" Ok\n");
286 		break;
287 	case TEST_SKIP: {
288 		const char *reason = skip_reason(t, curr_test_case);
289 
290 		if (reason)
291 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
292 		else
293 			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
294 	}
295 		break;
296 	case TEST_FAIL:
297 	default:
298 		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
299 		break;
300 	}
301 
302 	return 0;
303 }
304 
305 static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
306 		int width)
307 {
308 	struct child_test *child_test = child_tests[running_test];
309 	struct test_suite *t;
310 	int curr_suite, curr_test_case, err;
311 	bool err_done = false;
312 	struct strbuf err_output = STRBUF_INIT;
313 	int last_running = -1;
314 	int ret;
315 
316 	if (child_test == NULL) {
317 		/* Test wasn't started. */
318 		return;
319 	}
320 	t = child_test->test;
321 	curr_suite = child_test->suite_num;
322 	curr_test_case = child_test->test_case_num;
323 	err = child_test->process.err;
324 	/*
325 	 * For test suites with subtests, display the suite name ahead of the
326 	 * sub test names.
327 	 */
328 	if (test_suite__num_test_cases(t) > 1 && curr_test_case == 0)
329 		pr_info("%3d: %-*s:\n", curr_suite + 1, width, test_description(t, -1));
330 
331 	/*
332 	 * Busy loop reading from the child's stdout/stderr that are set to be
333 	 * non-blocking until EOF.
334 	 */
335 	if (err > 0)
336 		fcntl(err, F_SETFL, O_NONBLOCK);
337 	if (verbose > 1) {
338 		if (test_suite__num_test_cases(t) > 1)
339 			pr_info("%3d.%1d: %s:\n", curr_suite + 1, curr_test_case + 1,
340 				test_description(t, curr_test_case));
341 		else
342 			pr_info("%3d: %s:\n", curr_suite + 1, test_description(t, -1));
343 	}
344 	while (!err_done) {
345 		struct pollfd pfds[1] = {
346 			{ .fd = err,
347 			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
348 			},
349 		};
350 		if (perf_use_color_default) {
351 			int running = 0;
352 
353 			for (int y = running_test; y < child_test_num; y++) {
354 				if (child_tests[y] == NULL)
355 					continue;
356 				if (check_if_command_finished(&child_tests[y]->process) == 0)
357 					running++;
358 			}
359 			if (running != last_running) {
360 				if (last_running != -1) {
361 					/*
362 					 * Erase "Running (.. active)" line
363 					 * printed before poll/sleep.
364 					 */
365 					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
366 				}
367 				print_test_result(t, curr_suite, curr_test_case, TEST_RUNNING,
368 						  width, running);
369 				last_running = running;
370 			}
371 		}
372 
373 		err_done = true;
374 		if (err <= 0) {
375 			/* No child stderr to poll, sleep for 10ms for child to complete. */
376 			usleep(10 * 1000);
377 		} else {
378 			/* Poll to avoid excessive spinning, timeout set for 100ms. */
379 			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
380 			if (pfds[0].revents) {
381 				char buf[512];
382 				ssize_t len;
383 
384 				len = read(err, buf, sizeof(buf) - 1);
385 
386 				if (len > 0) {
387 					err_done = false;
388 					buf[len] = '\0';
389 					strbuf_addstr(&err_output, buf);
390 				}
391 			}
392 		}
393 		if (err_done)
394 			err_done = check_if_command_finished(&child_test->process);
395 	}
396 	if (perf_use_color_default && last_running != -1) {
397 		/* Erase "Running (.. active)" line printed before poll/sleep. */
398 		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
399 	}
400 	/* Clean up child process. */
401 	ret = finish_command(&child_test->process);
402 	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
403 		fprintf(stderr, "%s", err_output.buf);
404 
405 	strbuf_release(&err_output);
406 	print_test_result(t, curr_suite, curr_test_case, ret, width, /*running=*/0);
407 	if (err > 0)
408 		close(err);
409 	zfree(&child_tests[running_test]);
410 }
411 
412 static int start_test(struct test_suite *test, int curr_suite, int curr_test_case,
413 		struct child_test **child, int width, int pass)
414 {
415 	int err;
416 
417 	*child = NULL;
418 	if (dont_fork) {
419 		if (pass == 1) {
420 			pr_debug("--- start ---\n");
421 			err = test_function(test, curr_test_case)(test, curr_test_case);
422 			pr_debug("---- end ----\n");
423 			print_test_result(test, curr_suite, curr_test_case, err, width,
424 					  /*running=*/0);
425 		}
426 		return 0;
427 	}
428 	if (pass == 1 && !sequential && test_exclusive(test, curr_test_case)) {
429 		/* When parallel, skip exclusive tests on the first pass. */
430 		return 0;
431 	}
432 	if (pass != 1 && (sequential || !test_exclusive(test, curr_test_case))) {
433 		/* Sequential and non-exclusive tests were run on the first pass. */
434 		return 0;
435 	}
436 	*child = zalloc(sizeof(**child));
437 	if (!*child)
438 		return -ENOMEM;
439 
440 	(*child)->test = test;
441 	(*child)->suite_num = curr_suite;
442 	(*child)->test_case_num = curr_test_case;
443 	(*child)->process.pid = -1;
444 	(*child)->process.no_stdin = 1;
445 	if (verbose <= 0) {
446 		(*child)->process.no_stdout = 1;
447 		(*child)->process.no_stderr = 1;
448 	} else {
449 		(*child)->process.stdout_to_stderr = 1;
450 		(*child)->process.out = -1;
451 		(*child)->process.err = -1;
452 	}
453 	(*child)->process.no_exec_cmd = run_test_child;
454 	if (sequential || pass == 2) {
455 		err = start_command(&(*child)->process);
456 		if (err)
457 			return err;
458 		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
459 		return 0;
460 	}
461 	return start_command(&(*child)->process);
462 }
463 
464 /* State outside of __cmd_test for the sake of the signal handler. */
465 
466 static size_t num_tests;
467 static struct child_test **child_tests;
468 static jmp_buf cmd_test_jmp_buf;
469 
470 static void cmd_test_sig_handler(int sig)
471 {
472 	siglongjmp(cmd_test_jmp_buf, sig);
473 }
474 
475 static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
476 		      struct intlist *skiplist)
477 {
478 	static int width = 0;
479 	int err = 0;
480 
481 	for (struct test_suite **t = suites; *t; t++) {
482 		int i, len = strlen(test_description(*t, -1));
483 
484 		if (width < len)
485 			width = len;
486 
487 		test_suite__for_each_test_case(*t, i) {
488 			len = strlen(test_description(*t, i));
489 			if (width < len)
490 				width = len;
491 			num_tests += runs_per_test;
492 		}
493 	}
494 	child_tests = calloc(num_tests, sizeof(*child_tests));
495 	if (!child_tests)
496 		return -ENOMEM;
497 
498 	err = sigsetjmp(cmd_test_jmp_buf, 1);
499 	if (err) {
500 		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
501 		       err);
502 		for (size_t x = 0; x < num_tests; x++) {
503 			struct child_test *child_test = child_tests[x];
504 
505 			if (!child_test || child_test->process.pid <= 0)
506 				continue;
507 
508 			pr_debug3("Killing %d pid %d\n",
509 				  child_test->suite_num + 1,
510 				  child_test->process.pid);
511 			kill(child_test->process.pid, err);
512 		}
513 		goto err_out;
514 	}
515 	signal(SIGINT, cmd_test_sig_handler);
516 	signal(SIGTERM, cmd_test_sig_handler);
517 
518 	/*
519 	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
520 	 * runs the exclusive tests sequentially. In other modes all tests are
521 	 * run in pass 1.
522 	 */
523 	for (int pass = 1; pass <= 2; pass++) {
524 		int child_test_num = 0;
525 		int curr_suite = 0;
526 
527 		for (struct test_suite **t = suites; *t; t++, curr_suite++) {
528 			int curr_test_case;
529 
530 			if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv)) {
531 				/*
532 				 * Test suite shouldn't be run based on
533 				 * description. See if any test case should.
534 				 */
535 				bool skip = true;
536 
537 				test_suite__for_each_test_case(*t, curr_test_case) {
538 					if (perf_test__matches(test_description(*t, curr_test_case),
539 							       curr_suite, argc, argv)) {
540 						skip = false;
541 						break;
542 					}
543 				}
544 				if (skip)
545 					continue;
546 			}
547 
548 			if (intlist__find(skiplist, curr_suite + 1)) {
549 				pr_info("%3d: %-*s:", curr_suite + 1, width,
550 					test_description(*t, -1));
551 				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
552 				continue;
553 			}
554 
555 			for (unsigned int run = 0; run < runs_per_test; run++) {
556 				test_suite__for_each_test_case(*t, curr_test_case) {
557 					if (!perf_test__matches(test_description(*t, curr_test_case),
558 								curr_suite, argc, argv))
559 						continue;
560 
561 					err = start_test(*t, curr_suite, curr_test_case,
562 							 &child_tests[child_test_num++],
563 							 width, pass);
564 					if (err)
565 						goto err_out;
566 				}
567 			}
568 		}
569 		if (!sequential) {
570 			/* Parallel mode starts tests but doesn't finish them. Do that now. */
571 			for (size_t x = 0; x < num_tests; x++)
572 				finish_test(child_tests, x, num_tests, width);
573 		}
574 	}
575 err_out:
576 	signal(SIGINT, SIG_DFL);
577 	signal(SIGTERM, SIG_DFL);
578 	if (err) {
579 		pr_err("Internal test harness failure. Completing any started tests:\n:");
580 		for (size_t x = 0; x < num_tests; x++)
581 			finish_test(child_tests, x, num_tests, width);
582 	}
583 	free(child_tests);
584 	return err;
585 }
586 
587 static int perf_test__list(FILE *fp, struct test_suite **suites, int argc, const char **argv)
588 {
589 	int curr_suite = 0;
590 
591 	for (struct test_suite **t = suites; *t; t++, curr_suite++) {
592 		int curr_test_case;
593 
594 		if (!perf_test__matches(test_description(*t, -1), curr_suite, argc, argv))
595 			continue;
596 
597 		fprintf(fp, "%3d: %s\n", curr_suite + 1, test_description(*t, -1));
598 
599 		if (test_suite__num_test_cases(*t) <= 1)
600 			continue;
601 
602 		test_suite__for_each_test_case(*t, curr_test_case) {
603 			fprintf(fp, "%3d.%1d: %s\n", curr_suite + 1, curr_test_case + 1,
604 				test_description(*t, curr_test_case));
605 		}
606 	}
607 	return 0;
608 }
609 
610 static int workloads__fprintf_list(FILE *fp)
611 {
612 	struct test_workload *twl;
613 	int printed = 0;
614 
615 	workloads__for_each(twl)
616 		printed += fprintf(fp, "%s\n", twl->name);
617 
618 	return printed;
619 }
620 
621 static int run_workload(const char *work, int argc, const char **argv)
622 {
623 	struct test_workload *twl;
624 
625 	workloads__for_each(twl) {
626 		if (!strcmp(twl->name, work))
627 			return twl->func(argc, argv);
628 	}
629 
630 	pr_info("No workload found: %s\n", work);
631 	return -1;
632 }
633 
634 static int perf_test__config(const char *var, const char *value,
635 			     void *data __maybe_unused)
636 {
637 	if (!strcmp(var, "annotate.objdump"))
638 		test_objdump_path = value;
639 
640 	return 0;
641 }
642 
643 static struct test_suite **build_suites(void)
644 {
645 	/*
646 	 * TODO: suites is static to avoid needing to clean up the scripts tests
647 	 * for leak sanitizer.
648 	 */
649 	static struct test_suite **suites[] = {
650 		generic_tests,
651 		arch_tests,
652 		NULL,
653 	};
654 	struct test_suite **result;
655 	struct test_suite *t;
656 	size_t n = 0, num_suites = 0;
657 
658 	if (suites[2] == NULL)
659 		suites[2] = create_script_test_suites();
660 
661 #define for_each_suite(suite)						\
662 	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
663 		while ((suite = suites[i][j++]) != NULL)
664 
665 	for_each_suite(t)
666 		num_suites++;
667 
668 	result = calloc(num_suites + 1, sizeof(struct test_suite *));
669 
670 	for (int pass = 1; pass <= 2; pass++) {
671 		for_each_suite(t) {
672 			bool exclusive = false;
673 			int curr_test_case;
674 
675 			test_suite__for_each_test_case(t, curr_test_case) {
676 				if (test_exclusive(t, curr_test_case)) {
677 					exclusive = true;
678 					break;
679 				}
680 			}
681 			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
682 				result[n++] = t;
683 		}
684 	}
685 	return result;
686 #undef for_each_suite
687 }
688 
689 int cmd_test(int argc, const char **argv)
690 {
691 	const char *test_usage[] = {
692 	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
693 	NULL,
694 	};
695 	const char *skip = NULL;
696 	const char *workload = NULL;
697 	bool list_workloads = false;
698 	const struct option test_options[] = {
699 	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
700 	OPT_INCR('v', "verbose", &verbose,
701 		    "be more verbose (show symbol address, etc)"),
702 	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
703 		    "Do not fork for testcase"),
704 	OPT_BOOLEAN('S', "sequential", &sequential,
705 		    "Run the tests one after another rather than in parallel"),
706 	OPT_UINTEGER('r', "runs-per-test", &runs_per_test,
707 		     "Run each test the given number of times, default 1"),
708 	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
709 	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
710 	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
711 	OPT_STRING(0, "objdump", &test_objdump_path, "path",
712 		   "objdump binary to use for disassembly and annotations"),
713 	OPT_END()
714 	};
715 	const char * const test_subcommands[] = { "list", NULL };
716 	struct intlist *skiplist = NULL;
717         int ret = hists__init();
718 	struct test_suite **suites;
719 
720         if (ret < 0)
721                 return ret;
722 
723 	perf_config(perf_test__config, NULL);
724 
725 	/* Unbuffered output */
726 	setvbuf(stdout, NULL, _IONBF, 0);
727 
728 	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
729 	if (argc >= 1 && !strcmp(argv[0], "list")) {
730 		suites = build_suites();
731 		ret = perf_test__list(stdout, suites, argc - 1, argv + 1);
732 		free(suites);
733 		return ret;
734 	}
735 
736 	if (workload)
737 		return run_workload(workload, argc, argv);
738 
739 	if (list_workloads) {
740 		workloads__fprintf_list(stdout);
741 		return 0;
742 	}
743 
744 	if (dont_fork)
745 		sequential = true;
746 
747 	symbol_conf.priv_size = sizeof(int);
748 	symbol_conf.try_vmlinux_path = true;
749 
750 
751 	if (symbol__init(NULL) < 0)
752 		return -1;
753 
754 	if (skip != NULL)
755 		skiplist = intlist__new(skip);
756 	/*
757 	 * Tests that create BPF maps, for instance, need more than the 64K
758 	 * default:
759 	 */
760 	rlimit__bump_memlock();
761 
762 	suites = build_suites();
763 	ret = __cmd_test(suites, argc, argv, skiplist);
764 	free(suites);
765 	return ret;
766 }
767