xref: /linux/tools/perf/util/ftrace.h (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 #ifndef __PERF_FTRACE_H__
2 #define __PERF_FTRACE_H__
3 
4 #include <linux/list.h>
5 
6 #include "target.h"
7 
8 struct evlist;
9 struct hashamp;
10 struct stats;
11 
12 struct perf_ftrace {
13 	struct evlist		*evlist;
14 	struct target		target;
15 	const char		*tracer;
16 	struct list_head	filters;
17 	struct list_head	notrace;
18 	struct list_head	graph_funcs;
19 	struct list_head	nograph_funcs;
20 	struct list_head	event_pair;
21 	struct hashmap		*profile_hash;
22 	unsigned long		percpu_buffer_size;
23 	bool			inherit;
24 	bool			use_nsec;
25 	unsigned int		bucket_range;
26 	unsigned int		min_latency;
27 	unsigned int		max_latency;
28 	unsigned int		bucket_num;
29 	bool			hide_empty;
30 	int			graph_depth;
31 	int			func_stack_trace;
32 	int			func_irq_info;
33 	int			graph_args;
34 	int			graph_retval;
35 	int			graph_retval_hex;
36 	int			graph_retaddr;
37 	int			graph_nosleep_time;
38 	int			graph_noirqs;
39 	int			graph_verbose;
40 	int			graph_thresh;
41 	int			graph_tail;
42 };
43 
44 struct filter_entry {
45 	struct list_head	list;
46 	char			name[];
47 };
48 
49 #define NUM_BUCKET  22  /* 20 + 2 (for outliers in both direction) */
50 
51 #ifdef HAVE_BPF_SKEL
52 
53 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
54 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
55 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
56 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
57 				  int buckets[], struct stats *stats);
58 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
59 
60 #else  /* !HAVE_BPF_SKEL */
61 
62 static inline int
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace __maybe_unused)63 perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
64 {
65 	return -1;
66 }
67 
68 static inline int
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)69 perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
70 {
71 	return -1;
72 }
73 
74 static inline int
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)75 perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
76 {
77 	return -1;
78 }
79 
80 static inline int
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[]__maybe_unused,struct stats * stats __maybe_unused)81 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
82 			      int buckets[] __maybe_unused,
83 			      struct stats *stats __maybe_unused)
84 {
85 	return -1;
86 }
87 
88 static inline int
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)89 perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
90 {
91 	return -1;
92 }
93 
94 #endif  /* HAVE_BPF_SKEL */
95 
96 #endif  /* __PERF_FTRACE_H__ */
97