Lines Matching defs:ftrace

3  * builtin-ftrace.c
37 #include "util/ftrace.h"
81 pr_err("ftrace only works for %s!\n",
295 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
312 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
333 reset_tracing_options(ftrace);
352 char dirname[] = "instances/perf-ftrace-XXXXXX";
370 static int set_tracing_pid(struct perf_ftrace *ftrace)
375 if (target__has_cpu(&ftrace->target))
378 for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
380 perf_thread_map__pid(ftrace->evlist->core.threads, i));
412 static int set_tracing_cpu(struct perf_ftrace *ftrace)
414 struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
416 if (!target__has_cpu(&ftrace->target))
422 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
424 if (!ftrace->func_stack_trace)
433 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
435 if (!ftrace->func_irq_info)
466 static int set_tracing_filters(struct perf_ftrace *ftrace)
470 ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
474 ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
478 ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
483 __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
496 static int set_tracing_depth(struct perf_ftrace *ftrace)
498 if (ftrace->graph_depth == 0)
501 if (ftrace->graph_depth < 0) {
502 pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
506 if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
512 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
516 if (ftrace->percpu_buffer_size == 0)
520 ftrace->percpu_buffer_size / 1024);
527 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
529 if (!ftrace->inherit)
538 static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
540 if (!ftrace->graph_nosleep_time)
549 static int set_tracing_funcgraph_args(struct perf_ftrace *ftrace)
551 if (ftrace->graph_args) {
559 static int set_tracing_funcgraph_retval(struct perf_ftrace *ftrace)
561 if (ftrace->graph_retval || ftrace->graph_retval_hex) {
566 if (ftrace->graph_retval_hex) {
574 static int set_tracing_funcgraph_retaddr(struct perf_ftrace *ftrace)
576 if (ftrace->graph_retaddr) {
584 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
586 if (!ftrace->graph_noirqs)
595 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
597 if (!ftrace->graph_verbose)
612 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace)
614 if (!ftrace->graph_tail)
623 static int set_tracing_thresh(struct perf_ftrace *ftrace)
627 if (ftrace->graph_thresh == 0)
630 ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
637 static int set_tracing_options(struct perf_ftrace *ftrace)
639 if (set_tracing_pid(ftrace) < 0) {
640 pr_err("failed to set ftrace pid\n");
644 if (set_tracing_cpu(ftrace) < 0) {
649 if (set_tracing_func_stack_trace(ftrace) < 0) {
654 if (set_tracing_func_irqinfo(ftrace) < 0) {
659 if (set_tracing_filters(ftrace) < 0) {
664 if (set_tracing_depth(ftrace) < 0) {
669 if (set_tracing_percpu_buffer_size(ftrace) < 0) {
674 if (set_tracing_trace_inherit(ftrace) < 0) {
679 if (set_tracing_sleep_time(ftrace) < 0) {
684 if (set_tracing_funcgraph_args(ftrace) < 0) {
689 if (set_tracing_funcgraph_retval(ftrace) < 0) {
694 if (set_tracing_funcgraph_retaddr(ftrace) < 0) {
699 if (set_tracing_funcgraph_irqs(ftrace) < 0) {
704 if (set_tracing_funcgraph_verbose(ftrace) < 0) {
709 if (set_tracing_thresh(ftrace) < 0) {
714 if (set_tracing_funcgraph_tail(ftrace) < 0) {
722 static void select_tracer(struct perf_ftrace *ftrace)
724 bool graph = !list_empty(&ftrace->graph_funcs) ||
725 !list_empty(&ftrace->nograph_funcs);
726 bool func = !list_empty(&ftrace->filters) ||
727 !list_empty(&ftrace->notrace);
731 ftrace->tracer = "function_graph";
733 ftrace->tracer = "function";
736 pr_debug("%s tracer is used\n", ftrace->tracer);
739 static int __cmd_ftrace(struct perf_ftrace *ftrace)
748 select_tracer(ftrace);
753 if (reset_tracing_files(ftrace) < 0) {
754 pr_err("failed to reset ftrace\n");
758 /* reset ftrace buffer */
762 if (set_tracing_options(ftrace) < 0)
765 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
766 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
793 if (!ftrace->target.initial_delay) {
800 evlist__start_workload(ftrace->evlist);
802 if (ftrace->target.initial_delay > 0) {
803 usleep(ftrace->target.initial_delay * 1000);
852 static void make_histogram(struct perf_ftrace *ftrace, int buckets[],
855 int min_latency = ftrace->min_latency;
856 int max_latency = ftrace->max_latency;
857 unsigned int bucket_num = ftrace->bucket_num;
903 if (ftrace->use_nsec)
912 if (!ftrace->bucket_range) {
920 i = num / ftrace->bucket_range + 1;
941 static void display_histogram(struct perf_ftrace *ftrace, int buckets[])
943 int min_latency = ftrace->min_latency;
944 bool use_nsec = ftrace->use_nsec;
945 unsigned int bucket_num = ftrace->bucket_num;
965 if (!ftrace->hide_empty || buckets[0])
974 if (ftrace->hide_empty && !buckets[i])
976 if (!ftrace->bucket_range) {
986 start = (i - 1) * ftrace->bucket_range + min_latency;
987 stop = i * ftrace->bucket_range + min_latency;
989 if (start >= ftrace->max_latency)
991 if (stop > ftrace->max_latency)
992 stop = ftrace->max_latency;
1011 if (ftrace->hide_empty && !buckets[bucket_num - 1])
1013 if (!ftrace->bucket_range) {
1016 unsigned int upper_outlier = (bucket_num - 2) * ftrace->bucket_range + min_latency;
1017 if (upper_outlier > ftrace->max_latency)
1018 upper_outlier = ftrace->max_latency;
1032 printf("\n# statistics (in %s)\n", ftrace->use_nsec ? "nsec" : "usec");
1040 static int prepare_func_latency(struct perf_ftrace *ftrace)
1045 if (ftrace->target.use_bpf)
1046 return perf_ftrace__latency_prepare_bpf(ftrace);
1051 if (reset_tracing_files(ftrace) < 0) {
1052 pr_err("failed to reset ftrace\n");
1056 /* reset ftrace buffer */
1060 if (set_tracing_options(ftrace) < 0)
1085 static int start_func_latency(struct perf_ftrace *ftrace)
1087 if (ftrace->target.use_bpf)
1088 return perf_ftrace__latency_start_bpf(ftrace);
1098 static int stop_func_latency(struct perf_ftrace *ftrace)
1100 if (ftrace->target.use_bpf)
1101 return perf_ftrace__latency_stop_bpf(ftrace);
1107 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
1109 if (ftrace->target.use_bpf)
1110 return perf_ftrace__latency_read_bpf(ftrace, buckets, &latency_stats);
1115 static int cleanup_func_latency(struct perf_ftrace *ftrace)
1117 if (ftrace->target.use_bpf)
1118 return perf_ftrace__latency_cleanup_bpf(ftrace);
1124 static int __cmd_latency(struct perf_ftrace *ftrace)
1134 trace_fd = prepare_func_latency(ftrace);
1141 if (start_func_latency(ftrace) < 0)
1144 evlist__start_workload(ftrace->evlist);
1146 buckets = calloc(ftrace->bucket_num, sizeof(*buckets));
1162 make_histogram(ftrace, buckets, buf, n, line);
1166 stop_func_latency(ftrace);
1175 while (!ftrace->target.use_bpf) {
1179 make_histogram(ftrace, buckets, buf, n, line);
1182 read_func_latency(ftrace, buckets);
1184 display_histogram(ftrace, buckets);
1190 cleanup_func_latency(ftrace);
1205 static int prepare_func_profile(struct perf_ftrace *ftrace)
1207 ftrace->tracer = "function_graph";
1208 ftrace->graph_tail = 1;
1209 ftrace->graph_verbose = 0;
1211 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
1212 if (ftrace->profile_hash == NULL)
1223 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns)
1227 if (!hashmap__find(ftrace->profile_hash, func, &prof)) {
1240 hashmap__add(ftrace->profile_hash, key, prof);
1248 * The ftrace function_graph text output normally looks like below:
1264 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len)
1320 return add_func_duration(ftrace, func, duration);
1370 static void print_profile_result(struct perf_ftrace *ftrace)
1375 nr = hashmap__size(ftrace->profile_hash);
1386 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt)
1408 hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) {
1413 hashmap__free(ftrace->profile_hash);
1414 ftrace->profile_hash = NULL;
1417 static int __cmd_profile(struct perf_ftrace *ftrace)
1426 if (prepare_func_profile(ftrace) < 0) {
1434 if (reset_tracing_files(ftrace) < 0) {
1435 pr_err("failed to reset ftrace\n");
1439 /* reset ftrace buffer */
1443 if (set_tracing_options(ftrace) < 0)
1446 if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
1447 pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
1475 evlist__start_workload(ftrace->evlist);
1484 if (parse_func_duration(ftrace, line, line_len) < 0)
1504 if (parse_func_duration(ftrace, line, line_len) < 0)
1508 print_profile_result(ftrace);
1522 struct perf_ftrace *ftrace = cb;
1524 if (!strstarts(var, "ftrace."))
1527 if (strcmp(var, "ftrace.tracer"))
1532 ftrace->tracer = value;
1668 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1670 { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
1671 { .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
1689 struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1691 { .name = "args", .value_ptr = &ftrace->graph_args },
1692 { .name = "retval", .value_ptr = &ftrace->graph_retval },
1693 { .name = "retval-hex", .value_ptr = &ftrace->graph_retval_hex },
1694 { .name = "retaddr", .value_ptr = &ftrace->graph_retaddr },
1695 { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
1696 { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
1697 { .name = "verbose", .value_ptr = &ftrace->graph_verbose },
1698 { .name = "thresh", .value_ptr = &ftrace->graph_thresh },
1699 { .name = "depth", .value_ptr = &ftrace->graph_depth },
1700 { .name = "tail", .value_ptr = &ftrace->graph_tail },
1749 struct perf_ftrace ftrace = {
1753 OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
1756 OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
1760 OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
1762 OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
1767 OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
1772 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1775 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
1777 OPT_CALLBACK(0, "func-opts", &ftrace, "options",
1780 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
1783 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
1785 OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
1788 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
1790 OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
1792 OPT_INTEGER('D', "delay", &ftrace.target.initial_delay,
1797 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1799 OPT_CALLBACK('e', "events", &ftrace.event_pair, "event1,event2",
1802 OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
1805 OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
1807 OPT_UINTEGER(0, "bucket-range", &ftrace.bucket_range,
1809 OPT_UINTEGER(0, "min-latency", &ftrace.min_latency,
1811 OPT_UINTEGER(0, "max-latency", &ftrace.max_latency,
1813 OPT_BOOLEAN(0, "hide-empty", &ftrace.hide_empty,
1818 OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1821 OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
1823 OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
1826 OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
1828 OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
1833 OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
1841 "perf ftrace [<options>] [<command>]",
1842 "perf ftrace [<options>] -- [<command>] [<options>]",
1843 "perf ftrace {trace|latency|profile} [<options>] [<command>]",
1844 "perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]",
1849 INIT_LIST_HEAD(&ftrace.filters);
1850 INIT_LIST_HEAD(&ftrace.notrace);
1851 INIT_LIST_HEAD(&ftrace.graph_funcs);
1852 INIT_LIST_HEAD(&ftrace.nograph_funcs);
1853 INIT_LIST_HEAD(&ftrace.event_pair);
1864 pr_err("ftrace is not supported on this system\n");
1868 ret = perf_config(perf_ftrace_config, &ftrace);
1900 if (!argc && target__none(&ftrace.target))
1901 ftrace.target.system_wide = true;
1908 if (list_empty(&ftrace.filters) && list_empty(&ftrace.event_pair)) {
1915 if (!list_empty(&ftrace.filters) && !list_empty(&ftrace.event_pair)) {
1922 if (!list_empty(&ftrace.event_pair) && !ftrace.target.use_bpf) {
1929 if (!ftrace.bucket_range && ftrace.min_latency) {
1936 if (ftrace.bucket_range && !ftrace.min_latency) {
1938 ftrace.min_latency = ftrace.bucket_range;
1940 if (!ftrace.bucket_range && ftrace.max_latency) {
1947 if (ftrace.bucket_range && ftrace.max_latency &&
1948 ftrace.max_latency < ftrace.min_latency + ftrace.bucket_range) {
1957 ftrace.bucket_num = NUM_BUCKET;
1958 if (ftrace.bucket_range) {
1959 if (ftrace.max_latency)
1960 ftrace.bucket_num = (ftrace.max_latency - ftrace.min_latency) /
1961 ftrace.bucket_range + 2;
1964 ftrace.max_latency = (NUM_BUCKET - 2) * ftrace.bucket_range +
1965 ftrace.min_latency;
1979 ret = target__validate(&ftrace.target);
1983 target__strerror(&ftrace.target, ret, errbuf, 512);
1988 ftrace.evlist = evlist__new();
1989 if (ftrace.evlist == NULL) {
1994 ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
1999 ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target,
2006 ret = cmd_func(&ftrace);
2009 evlist__delete(ftrace.evlist);
2012 delete_filter_func(&ftrace.filters);
2013 delete_filter_func(&ftrace.notrace);
2014 delete_filter_func(&ftrace.graph_funcs);
2015 delete_filter_func(&ftrace.nograph_funcs);
2016 delete_filter_func(&ftrace.event_pair);