Lines Matching +full:ftrace +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * builtin-ftrace.c
25 #include <subcmd/parse-options.h>
36 #include "util/ftrace.h"
39 #include "util/parse-sublevel-options.h"
64 workload_exec_errno = info->si_value.sival_int; in ftrace__workload_exec_failed_signal()
78 pr_err("ftrace only works for %s!\n", in check_ftrace_capable()
106 int fd, ret = -1; in __write_tracing_file()
107 ssize_t size = strlen(val); in __write_tracing_file() local
115 return -1; in __write_tracing_file()
137 val_copy[size] = '\n'; in __write_tracing_file()
139 if (write(fd, val_copy, size + 1) == size + 1) in __write_tracing_file()
168 int ret = -1; in read_tracing_file_to_stdout()
173 return -1; in read_tracing_file_to_stdout()
215 return -1; in read_tracing_file_by_line()
222 return -1; in read_tracing_file_by_line()
225 while (getline(&line, &len, fp) != -1) { in read_tracing_file_by_line()
243 return -1; in write_tracing_file_int()
254 return -1; in write_tracing_option_file()
264 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused) in reset_tracing_options()
266 write_tracing_option_file("function-fork", "0"); in reset_tracing_options()
268 write_tracing_option_file("sleep-time", "1"); in reset_tracing_options()
269 write_tracing_option_file("funcgraph-irqs", "1"); in reset_tracing_options()
270 write_tracing_option_file("funcgraph-proc", "0"); in reset_tracing_options()
271 write_tracing_option_file("funcgraph-abstime", "0"); in reset_tracing_options()
272 write_tracing_option_file("funcgraph-tail", "0"); in reset_tracing_options()
273 write_tracing_option_file("latency-format", "0"); in reset_tracing_options()
274 write_tracing_option_file("irq-info", "0"); in reset_tracing_options()
277 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused) in reset_tracing_files()
280 return -1; in reset_tracing_files()
283 return -1; in reset_tracing_files()
286 return -1; in reset_tracing_files()
289 return -1; in reset_tracing_files()
292 return -1; in reset_tracing_files()
295 return -1; in reset_tracing_files()
298 reset_tracing_options(ftrace); in reset_tracing_files()
302 static int set_tracing_pid(struct perf_ftrace *ftrace) in set_tracing_pid() argument
307 if (target__has_cpu(&ftrace->target)) in set_tracing_pid()
310 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { in set_tracing_pid()
312 perf_thread_map__pid(ftrace->evlist->core.threads, i)); in set_tracing_pid()
314 return -1; in set_tracing_pid()
326 last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu; in set_tracing_cpumask()
333 return -1; in set_tracing_cpumask()
344 static int set_tracing_cpu(struct perf_ftrace *ftrace) in set_tracing_cpu() argument
346 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus; in set_tracing_cpu()
348 if (!target__has_cpu(&ftrace->target)) in set_tracing_cpu()
354 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace) in set_tracing_func_stack_trace() argument
356 if (!ftrace->func_stack_trace) in set_tracing_func_stack_trace()
360 return -1; in set_tracing_func_stack_trace()
365 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) in set_tracing_func_irqinfo() argument
367 if (!ftrace->func_irq_info) in set_tracing_func_irqinfo()
370 if (write_tracing_option_file("irq-info", "1") < 0) in set_tracing_func_irqinfo()
371 return -1; in set_tracing_func_irqinfo()
391 if (append_tracing_file(filter_file, pos->name) < 0) in __set_tracing_filter()
392 return -1; in __set_tracing_filter()
398 static int set_tracing_filters(struct perf_ftrace *ftrace) in set_tracing_filters() argument
402 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters); in set_tracing_filters()
406 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace); in set_tracing_filters()
410 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs); in set_tracing_filters()
415 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs); in set_tracing_filters()
428 static int set_tracing_depth(struct perf_ftrace *ftrace) in set_tracing_depth() argument
430 if (ftrace->graph_depth == 0) in set_tracing_depth()
433 if (ftrace->graph_depth < 0) { in set_tracing_depth()
434 pr_err("invalid graph depth: %d\n", ftrace->graph_depth); in set_tracing_depth()
435 return -1; in set_tracing_depth()
438 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0) in set_tracing_depth()
439 return -1; in set_tracing_depth()
444 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace) in set_tracing_percpu_buffer_size() argument
448 if (ftrace->percpu_buffer_size == 0) in set_tracing_percpu_buffer_size()
452 ftrace->percpu_buffer_size / 1024); in set_tracing_percpu_buffer_size()
459 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace) in set_tracing_trace_inherit() argument
461 if (!ftrace->inherit) in set_tracing_trace_inherit()
464 if (write_tracing_option_file("function-fork", "1") < 0) in set_tracing_trace_inherit()
465 return -1; in set_tracing_trace_inherit()
470 static int set_tracing_sleep_time(struct perf_ftrace *ftrace) in set_tracing_sleep_time() argument
472 if (!ftrace->graph_nosleep_time) in set_tracing_sleep_time()
475 if (write_tracing_option_file("sleep-time", "0") < 0) in set_tracing_sleep_time()
476 return -1; in set_tracing_sleep_time()
481 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace) in set_tracing_funcgraph_irqs() argument
483 if (!ftrace->graph_noirqs) in set_tracing_funcgraph_irqs()
486 if (write_tracing_option_file("funcgraph-irqs", "0") < 0) in set_tracing_funcgraph_irqs()
487 return -1; in set_tracing_funcgraph_irqs()
492 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace) in set_tracing_funcgraph_verbose() argument
494 if (!ftrace->graph_verbose) in set_tracing_funcgraph_verbose()
497 if (write_tracing_option_file("funcgraph-proc", "1") < 0) in set_tracing_funcgraph_verbose()
498 return -1; in set_tracing_funcgraph_verbose()
500 if (write_tracing_option_file("funcgraph-abstime", "1") < 0) in set_tracing_funcgraph_verbose()
501 return -1; in set_tracing_funcgraph_verbose()
503 if (write_tracing_option_file("latency-format", "1") < 0) in set_tracing_funcgraph_verbose()
504 return -1; in set_tracing_funcgraph_verbose()
509 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace) in set_tracing_funcgraph_tail() argument
511 if (!ftrace->graph_tail) in set_tracing_funcgraph_tail()
514 if (write_tracing_option_file("funcgraph-tail", "1") < 0) in set_tracing_funcgraph_tail()
515 return -1; in set_tracing_funcgraph_tail()
520 static int set_tracing_thresh(struct perf_ftrace *ftrace) in set_tracing_thresh() argument
524 if (ftrace->graph_thresh == 0) in set_tracing_thresh()
527 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh); in set_tracing_thresh()
534 static int set_tracing_options(struct perf_ftrace *ftrace) in set_tracing_options() argument
536 if (set_tracing_pid(ftrace) < 0) { in set_tracing_options()
537 pr_err("failed to set ftrace pid\n"); in set_tracing_options()
538 return -1; in set_tracing_options()
541 if (set_tracing_cpu(ftrace) < 0) { in set_tracing_options()
543 return -1; in set_tracing_options()
546 if (set_tracing_func_stack_trace(ftrace) < 0) { in set_tracing_options()
548 return -1; in set_tracing_options()
551 if (set_tracing_func_irqinfo(ftrace) < 0) { in set_tracing_options()
552 pr_err("failed to set tracing option irq-info\n"); in set_tracing_options()
553 return -1; in set_tracing_options()
556 if (set_tracing_filters(ftrace) < 0) { in set_tracing_options()
558 return -1; in set_tracing_options()
561 if (set_tracing_depth(ftrace) < 0) { in set_tracing_options()
563 return -1; in set_tracing_options()
566 if (set_tracing_percpu_buffer_size(ftrace) < 0) { in set_tracing_options()
567 pr_err("failed to set tracing per-cpu buffer size\n"); in set_tracing_options()
568 return -1; in set_tracing_options()
571 if (set_tracing_trace_inherit(ftrace) < 0) { in set_tracing_options()
572 pr_err("failed to set tracing option function-fork\n"); in set_tracing_options()
573 return -1; in set_tracing_options()
576 if (set_tracing_sleep_time(ftrace) < 0) { in set_tracing_options()
577 pr_err("failed to set tracing option sleep-time\n"); in set_tracing_options()
578 return -1; in set_tracing_options()
581 if (set_tracing_funcgraph_irqs(ftrace) < 0) { in set_tracing_options()
582 pr_err("failed to set tracing option funcgraph-irqs\n"); in set_tracing_options()
583 return -1; in set_tracing_options()
586 if (set_tracing_funcgraph_verbose(ftrace) < 0) { in set_tracing_options()
587 pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n"); in set_tracing_options()
588 return -1; in set_tracing_options()
591 if (set_tracing_thresh(ftrace) < 0) { in set_tracing_options()
593 return -1; in set_tracing_options()
596 if (set_tracing_funcgraph_tail(ftrace) < 0) { in set_tracing_options()
597 pr_err("failed to set tracing option funcgraph-tail\n"); in set_tracing_options()
598 return -1; in set_tracing_options()
604 static void select_tracer(struct perf_ftrace *ftrace) in select_tracer() argument
606 bool graph = !list_empty(&ftrace->graph_funcs) || in select_tracer()
607 !list_empty(&ftrace->nograph_funcs); in select_tracer()
608 bool func = !list_empty(&ftrace->filters) || in select_tracer()
609 !list_empty(&ftrace->notrace); in select_tracer()
613 ftrace->tracer = "function_graph"; in select_tracer()
615 ftrace->tracer = "function"; in select_tracer()
618 pr_debug("%s tracer is used\n", ftrace->tracer); in select_tracer()
621 static int __cmd_ftrace(struct perf_ftrace *ftrace) in __cmd_ftrace() argument
630 select_tracer(ftrace); in __cmd_ftrace()
632 if (reset_tracing_files(ftrace) < 0) { in __cmd_ftrace()
633 pr_err("failed to reset ftrace\n"); in __cmd_ftrace()
637 /* reset ftrace buffer */ in __cmd_ftrace()
641 if (set_tracing_options(ftrace) < 0) in __cmd_ftrace()
644 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { in __cmd_ftrace()
645 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); in __cmd_ftrace()
672 if (!ftrace->target.initial_delay) { in __cmd_ftrace()
679 evlist__start_workload(ftrace->evlist); in __cmd_ftrace()
681 if (ftrace->target.initial_delay > 0) { in __cmd_ftrace()
682 usleep(ftrace->target.initial_delay * 1000); in __cmd_ftrace()
690 if (poll(&pollfd, 1, -1) < 0) in __cmd_ftrace()
726 reset_tracing_files(ftrace); in __cmd_ftrace()
728 return (done && !workload_exec_errno) ? 0 : -1; in __cmd_ftrace()
731 static void make_histogram(struct perf_ftrace *ftrace, int buckets[], in make_histogram() argument
734 int min_latency = ftrace->min_latency; in make_histogram()
735 int max_latency = ftrace->max_latency; in make_histogram()
736 unsigned int bucket_num = ftrace->bucket_num; in make_histogram()
782 if (ftrace->use_nsec) in make_histogram()
789 num -= min_latency; in make_histogram()
791 if (!ftrace->bucket_range) { in make_histogram()
799 i = num / ftrace->bucket_range + 1; in make_histogram()
800 if (num >= max_latency - min_latency) in make_histogram()
801 i = bucket_num -1; in make_histogram()
804 i = bucket_num - 1; in make_histogram()
820 static void display_histogram(struct perf_ftrace *ftrace, int buckets[]) in display_histogram() argument
822 int min_latency = ftrace->min_latency; in display_histogram()
823 bool use_nsec = ftrace->use_nsec; in display_histogram()
824 unsigned int bucket_num = ftrace->bucket_num; in display_histogram()
839 printf("# %14s | %10s | %-*s |\n", in display_histogram()
844 if (!ftrace->hide_empty || buckets[0]) in display_histogram()
845 printf(" %4d - %4d %s | %10d | %.*s%*s |\n", in display_histogram()
847 buckets[0], bar_len, bar, bar_total - bar_len, ""); in display_histogram()
849 for (i = 1; i < bucket_num - 1; i++) { in display_histogram()
853 if (ftrace->hide_empty && !buckets[i]) in display_histogram()
855 if (!ftrace->bucket_range) { in display_histogram()
856 start = (1 << (i - 1)); in display_histogram()
865 start = (i - 1) * ftrace->bucket_range + min_latency; in display_histogram()
866 stop = i * ftrace->bucket_range + min_latency; in display_histogram()
868 if (start >= ftrace->max_latency) in display_histogram()
870 if (stop > ftrace->max_latency) in display_histogram()
871 stop = ftrace->max_latency; in display_histogram()
876 printf(" %4.2f - %-4.2f", dstart, dstop); in display_histogram()
882 printf(" %4d - %4d", start, stop); in display_histogram()
886 bar_total - bar_len, ""); in display_histogram()
889 bar_len = buckets[bucket_num - 1] * bar_total / total; in display_histogram()
890 if (ftrace->hide_empty && !buckets[bucket_num - 1]) in display_histogram()
892 if (!ftrace->bucket_range) { in display_histogram()
893 printf(" %4d - %-4s %s", 1, "...", use_nsec ? "ms" : "s "); in display_histogram()
895 unsigned int upper_outlier = (bucket_num - 2) * ftrace->bucket_range + min_latency; in display_histogram()
896 if (upper_outlier > ftrace->max_latency) in display_histogram()
897 upper_outlier = ftrace->max_latency; in display_histogram()
902 printf(" %4.2f - %-4s %s", dstart, "...", use_nsec ? "us" : "ms"); in display_histogram()
904 printf(" %4d - %4s %s", upper_outlier, "...", use_nsec ? "ns" : "us"); in display_histogram()
907 printf(" | %10d | %.*s%*s |\n", buckets[bucket_num - 1], in display_histogram()
908 bar_len, bar, bar_total - bar_len, ""); in display_histogram()
911 printf("\n# statistics (in %s)\n", ftrace->use_nsec ? "nsec" : "usec"); in display_histogram()
919 static int prepare_func_latency(struct perf_ftrace *ftrace) in prepare_func_latency() argument
924 if (ftrace->target.use_bpf) in prepare_func_latency()
925 return perf_ftrace__latency_prepare_bpf(ftrace); in prepare_func_latency()
927 if (reset_tracing_files(ftrace) < 0) { in prepare_func_latency()
928 pr_err("failed to reset ftrace\n"); in prepare_func_latency()
929 return -1; in prepare_func_latency()
932 /* reset ftrace buffer */ in prepare_func_latency()
934 return -1; in prepare_func_latency()
936 if (set_tracing_options(ftrace) < 0) in prepare_func_latency()
937 return -1; in prepare_func_latency()
942 return -1; in prepare_func_latency()
948 return -1; in prepare_func_latency()
961 static int start_func_latency(struct perf_ftrace *ftrace) in start_func_latency() argument
963 if (ftrace->target.use_bpf) in start_func_latency()
964 return perf_ftrace__latency_start_bpf(ftrace); in start_func_latency()
968 return -1; in start_func_latency()
974 static int stop_func_latency(struct perf_ftrace *ftrace) in stop_func_latency() argument
976 if (ftrace->target.use_bpf) in stop_func_latency()
977 return perf_ftrace__latency_stop_bpf(ftrace); in stop_func_latency()
983 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[]) in read_func_latency() argument
985 if (ftrace->target.use_bpf) in read_func_latency()
986 return perf_ftrace__latency_read_bpf(ftrace, buckets, &latency_stats); in read_func_latency()
991 static int cleanup_func_latency(struct perf_ftrace *ftrace) in cleanup_func_latency() argument
993 if (ftrace->target.use_bpf) in cleanup_func_latency()
994 return perf_ftrace__latency_cleanup_bpf(ftrace); in cleanup_func_latency()
996 reset_tracing_files(ftrace); in cleanup_func_latency()
1000 static int __cmd_latency(struct perf_ftrace *ftrace) in __cmd_latency() argument
1010 trace_fd = prepare_func_latency(ftrace); in __cmd_latency()
1017 if (start_func_latency(ftrace) < 0) in __cmd_latency()
1020 evlist__start_workload(ftrace->evlist); in __cmd_latency()
1022 buckets = calloc(ftrace->bucket_num, sizeof(*buckets)); in __cmd_latency()
1030 if (poll(&pollfd, 1, -1) < 0) in __cmd_latency()
1034 int n = read(trace_fd, buf, sizeof(buf) - 1); in __cmd_latency()
1038 make_histogram(ftrace, buckets, buf, n, line); in __cmd_latency()
1042 stop_func_latency(ftrace); in __cmd_latency()
1051 while (!ftrace->target.use_bpf) { in __cmd_latency()
1052 int n = read(trace_fd, buf, sizeof(buf) - 1); in __cmd_latency()
1055 make_histogram(ftrace, buckets, buf, n, line); in __cmd_latency()
1058 read_func_latency(ftrace, buckets); in __cmd_latency()
1060 display_histogram(ftrace, buckets); in __cmd_latency()
1066 cleanup_func_latency(ftrace); in __cmd_latency()
1068 return (done && !workload_exec_errno) ? 0 : -1; in __cmd_latency()
1081 static int prepare_func_profile(struct perf_ftrace *ftrace) in prepare_func_profile() argument
1083 ftrace->tracer = "function_graph"; in prepare_func_profile()
1084 ftrace->graph_tail = 1; in prepare_func_profile()
1085 ftrace->graph_verbose = 0; in prepare_func_profile()
1087 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); in prepare_func_profile()
1088 if (ftrace->profile_hash == NULL) in prepare_func_profile()
1089 return -ENOMEM; in prepare_func_profile()
1099 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns) in add_func_duration() argument
1103 if (!hashmap__find(ftrace->profile_hash, func, &prof)) { in add_func_duration()
1107 return -ENOMEM; in add_func_duration()
1112 return -ENOMEM; in add_func_duration()
1115 init_stats(&prof->st); in add_func_duration()
1116 hashmap__add(ftrace->profile_hash, key, prof); in add_func_duration()
1119 update_stats(&prof->st, time_ns); in add_func_duration()
1124 * The ftrace function_graph text output normally looks like below:
1140 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len) in parse_func_duration() argument
1165 pr_debug("non-usec time found.. ignoring\n"); in parse_func_duration()
1180 return -EINVAL; in parse_func_duration()
1189 /* remove semi-colon or end of comment at the end */ in parse_func_duration()
1190 p = line + len - 1; in parse_func_duration()
1193 --p; in parse_func_duration()
1196 return add_func_duration(ftrace, func, duration); in parse_func_duration()
1213 struct ftrace_profile_data *p1 = e1->pvalue; in cmp_profile_data()
1214 struct ftrace_profile_data *p2 = e2->pvalue; in cmp_profile_data()
1219 return strcmp(e1->pkey, e2->pkey); in cmp_profile_data()
1221 v1 = p1->st.mean; in cmp_profile_data()
1222 v2 = p2->st.mean; in cmp_profile_data()
1225 v1 = p1->st.max; in cmp_profile_data()
1226 v2 = p2->st.max; in cmp_profile_data()
1229 v1 = p1->st.n; in cmp_profile_data()
1230 v2 = p2->st.n; in cmp_profile_data()
1234 v1 = p1->st.n * p1->st.mean; in cmp_profile_data()
1235 v2 = p2->st.n * p2->st.mean; in cmp_profile_data()
1240 return -1; in cmp_profile_data()
1246 static void print_profile_result(struct perf_ftrace *ftrace) in print_profile_result() argument
1251 nr = hashmap__size(ftrace->profile_hash); in print_profile_result()
1262 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) in print_profile_result()
1274 const char *name = profile[i]->pkey; in print_profile_result()
1275 struct ftrace_profile_data *p = profile[i]->pvalue; in print_profile_result()
1278 p->st.n * p->st.mean / 1000, p->st.mean / 1000, in print_profile_result()
1279 p->st.max / 1000, p->st.max % 1000, p->st.n, name); in print_profile_result()
1284 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) { in print_profile_result()
1285 free((char *)entry->pkey); in print_profile_result()
1286 free(entry->pvalue); in print_profile_result()
1289 hashmap__free(ftrace->profile_hash); in print_profile_result()
1290 ftrace->profile_hash = NULL; in print_profile_result()
1293 static int __cmd_profile(struct perf_ftrace *ftrace) in __cmd_profile() argument
1302 if (prepare_func_profile(ftrace) < 0) { in __cmd_profile()
1307 if (reset_tracing_files(ftrace) < 0) { in __cmd_profile()
1308 pr_err("failed to reset ftrace\n"); in __cmd_profile()
1312 /* reset ftrace buffer */ in __cmd_profile()
1316 if (set_tracing_options(ftrace) < 0) in __cmd_profile()
1317 return -1; in __cmd_profile()
1319 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { in __cmd_profile()
1320 pr_err("failed to set current_tracer to %s\n", ftrace->tracer); in __cmd_profile()
1348 evlist__start_workload(ftrace->evlist); in __cmd_profile()
1351 io.timeout_ms = -1; in __cmd_profile()
1357 if (parse_func_duration(ftrace, line, line_len) < 0) in __cmd_profile()
1377 if (parse_func_duration(ftrace, line, line_len) < 0) in __cmd_profile()
1381 print_profile_result(ftrace); in __cmd_profile()
1388 reset_tracing_files(ftrace); in __cmd_profile()
1390 return (done && !workload_exec_errno) ? 0 : -1; in __cmd_profile()
1395 struct perf_ftrace *ftrace = cb; in perf_ftrace_config() local
1397 if (!strstarts(var, "ftrace.")) in perf_ftrace_config()
1400 if (strcmp(var, "ftrace.tracer")) in perf_ftrace_config()
1401 return -1; in perf_ftrace_config()
1405 ftrace->tracer = value; in perf_ftrace_config()
1410 return -1; in perf_ftrace_config()
1429 return -1; in opt_list_avail_functions()
1433 return err ? -EINVAL : -ENOMEM; in opt_list_avail_functions()
1436 if (ret == -EINVAL) { in opt_list_avail_functions()
1437 pr_err("Filter parse error at %td.\n", err - str + 1); in opt_list_avail_functions()
1439 pr_err(" %*c\n", (int)(err - str + 1), '^'); in opt_list_avail_functions()
1456 struct list_head *head = opt->value; in parse_filter_func()
1461 return -ENOMEM; in parse_filter_func()
1463 strcpy(entry->name, str); in parse_filter_func()
1464 list_add_tail(&entry->list, head); in parse_filter_func()
1474 list_del_init(&pos->list); in delete_filter_func()
1482 unsigned long *s = (unsigned long *)opt->value; in parse_buffer_size()
1498 if (val != (unsigned long) -1) { in parse_buffer_size()
1500 pr_err("buffer size too small, must larger than 1KB."); in parse_buffer_size()
1501 return -1; in parse_buffer_size()
1507 return -1; in parse_buffer_size()
1514 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; in parse_func_tracer_opts() local
1516 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace }, in parse_func_tracer_opts()
1517 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info }, in parse_func_tracer_opts()
1535 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; in parse_graph_tracer_opts() local
1537 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time }, in parse_graph_tracer_opts()
1538 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs }, in parse_graph_tracer_opts()
1539 { .name = "verbose", .value_ptr = &ftrace->graph_verbose }, in parse_graph_tracer_opts()
1540 { .name = "thresh", .value_ptr = &ftrace->graph_thresh }, in parse_graph_tracer_opts()
1541 { .name = "depth", .value_ptr = &ftrace->graph_depth }, in parse_graph_tracer_opts()
1542 { .name = "tail", .value_ptr = &ftrace->graph_tail }, in parse_graph_tracer_opts()
1558 enum perf_ftrace_profile_sort_key *key = (void *)opt->value; in parse_sort_key()
1575 return -1; in parse_sort_key()
1591 struct perf_ftrace ftrace = { in cmd_ftrace() local
1596 OPT_STRING('p', "pid", &ftrace.target.pid, "pid", in cmd_ftrace()
1598 /* TODO: Add short option -t after -t/--tracer can be removed. */ in cmd_ftrace()
1599 OPT_STRING(0, "tid", &ftrace.target.tid, "tid", in cmd_ftrace()
1600 "Trace on existing thread id (exclusive to --pid)"), in cmd_ftrace()
1603 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, in cmd_ftrace()
1604 "System-wide collection from all CPUs"), in cmd_ftrace()
1605 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", in cmd_ftrace()
1610 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", in cmd_ftrace()
1615 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", in cmd_ftrace()
1618 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", in cmd_ftrace()
1620 OPT_CALLBACK(0, "func-opts", &ftrace, "options", in cmd_ftrace()
1621 "Function tracer options, available options: call-graph,irq-info", in cmd_ftrace()
1623 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", in cmd_ftrace()
1626 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", in cmd_ftrace()
1628 OPT_CALLBACK(0, "graph-opts", &ftrace, "options", in cmd_ftrace()
1629 "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>", in cmd_ftrace()
1631 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", in cmd_ftrace()
1632 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), in cmd_ftrace()
1633 OPT_BOOLEAN(0, "inherit", &ftrace.inherit, in cmd_ftrace()
1635 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay, in cmd_ftrace()
1640 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", in cmd_ftrace()
1643 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, in cmd_ftrace()
1646 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, in cmd_ftrace()
1647 "Use nano-second histogram"), in cmd_ftrace()
1648 OPT_UINTEGER(0, "bucket-range", &ftrace.bucket_range, in cmd_ftrace()
1649 "Bucket range in ms or ns (-n/--use-nsec), default is log2() mode"), in cmd_ftrace()
1650 OPT_UINTEGER(0, "min-latency", &ftrace.min_latency, in cmd_ftrace()
1651 "Minimum latency (1st bucket). Works only with --bucket-range."), in cmd_ftrace()
1652 OPT_UINTEGER(0, "max-latency", &ftrace.max_latency, in cmd_ftrace()
1653 "Maximum latency (last bucket). Works only with --bucket-range."), in cmd_ftrace()
1654 OPT_BOOLEAN(0, "hide-empty", &ftrace.hide_empty, in cmd_ftrace()
1659 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", in cmd_ftrace()
1662 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", in cmd_ftrace()
1664 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", in cmd_ftrace()
1667 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", in cmd_ftrace()
1669 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", in cmd_ftrace()
1670 "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), in cmd_ftrace()
1674 OPT_CALLBACK(0, "graph-opts", &ftrace, "options", in cmd_ftrace()
1675 "Graph tracer options, available options: nosleep-time,noirqs,thresh=<n>,depth=<n>", in cmd_ftrace()
1682 "perf ftrace [<options>] [<command>]", in cmd_ftrace()
1683 "perf ftrace [<options>] -- [<command>] [<options>]", in cmd_ftrace()
1684 "perf ftrace {trace|latency|profile} [<options>] [<command>]", in cmd_ftrace()
1685 "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]", in cmd_ftrace()
1690 INIT_LIST_HEAD(&ftrace.filters); in cmd_ftrace()
1691 INIT_LIST_HEAD(&ftrace.notrace); in cmd_ftrace()
1692 INIT_LIST_HEAD(&ftrace.graph_funcs); in cmd_ftrace()
1693 INIT_LIST_HEAD(&ftrace.nograph_funcs); in cmd_ftrace()
1701 return -1; in cmd_ftrace()
1704 pr_err("ftrace is not supported on this system\n"); in cmd_ftrace()
1705 return -ENOTSUP; in cmd_ftrace()
1708 ret = perf_config(perf_ftrace_config, &ftrace); in cmd_ftrace()
1710 return -1; in cmd_ftrace()
1724 argc--; in cmd_ftrace()
1735 ret = -EINVAL; in cmd_ftrace()
1739 /* Make system wide (-a) the default target. */ in cmd_ftrace()
1740 if (!argc && target__none(&ftrace.target)) in cmd_ftrace()
1741 ftrace.target.system_wide = true; in cmd_ftrace()
1748 if (list_empty(&ftrace.filters)) { in cmd_ftrace()
1751 ret = -EINVAL; in cmd_ftrace()
1754 if (!ftrace.bucket_range && ftrace.min_latency) { in cmd_ftrace()
1755 pr_err("--min-latency works only with --bucket-range\n"); in cmd_ftrace()
1757 "min-latency", /*short_opt=*/false); in cmd_ftrace()
1758 ret = -EINVAL; in cmd_ftrace()
1761 if (ftrace.bucket_range && !ftrace.min_latency) { in cmd_ftrace()
1763 ftrace.min_latency = ftrace.bucket_range; in cmd_ftrace()
1765 if (!ftrace.bucket_range && ftrace.max_latency) { in cmd_ftrace()
1766 pr_err("--max-latency works only with --bucket-range\n"); in cmd_ftrace()
1768 "max-latency", /*short_opt=*/false); in cmd_ftrace()
1769 ret = -EINVAL; in cmd_ftrace()
1772 if (ftrace.bucket_range && ftrace.max_latency && in cmd_ftrace()
1773 ftrace.max_latency < ftrace.min_latency + ftrace.bucket_range) { in cmd_ftrace()
1775 pr_err("--max-latency must be larger than min-latency + bucket-range\n"); in cmd_ftrace()
1777 "max-latency", /*short_opt=*/false); in cmd_ftrace()
1778 ret = -EINVAL; in cmd_ftrace()
1782 ftrace.bucket_num = NUM_BUCKET; in cmd_ftrace()
1783 if (ftrace.bucket_range) { in cmd_ftrace()
1784 if (ftrace.max_latency) in cmd_ftrace()
1785 ftrace.bucket_num = (ftrace.max_latency - ftrace.min_latency) / in cmd_ftrace()
1786 ftrace.bucket_range + 2; in cmd_ftrace()
1789 ftrace.max_latency = (NUM_BUCKET - 2) * ftrace.bucket_range + in cmd_ftrace()
1790 ftrace.min_latency; in cmd_ftrace()
1800 ret = -EINVAL; in cmd_ftrace()
1804 ret = target__validate(&ftrace.target); in cmd_ftrace()
1808 target__strerror(&ftrace.target, ret, errbuf, 512); in cmd_ftrace()
1813 ftrace.evlist = evlist__new(); in cmd_ftrace()
1814 if (ftrace.evlist == NULL) { in cmd_ftrace()
1815 ret = -ENOMEM; in cmd_ftrace()
1819 ret = evlist__create_maps(ftrace.evlist, &ftrace.target); in cmd_ftrace()
1824 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target, in cmd_ftrace()
1831 ret = cmd_func(&ftrace); in cmd_ftrace()
1834 evlist__delete(ftrace.evlist); in cmd_ftrace()
1837 delete_filter_func(&ftrace.filters); in cmd_ftrace()
1838 delete_filter_func(&ftrace.notrace); in cmd_ftrace()
1839 delete_filter_func(&ftrace.graph_funcs); in cmd_ftrace()
1840 delete_filter_func(&ftrace.nograph_funcs); in cmd_ftrace()