1 #ifndef __PERF_FTRACE_H__
2 #define __PERF_FTRACE_H__
3 
4 #include <linux/list.h>
5 
6 #include "target.h"
7 
8 struct evlist;
9 struct hashamp;
10 struct stats;
11 
12 struct perf_ftrace {
13 	struct evlist		*evlist;
14 	struct target		target;
15 	const char		*tracer;
16 	struct list_head	filters;
17 	struct list_head	notrace;
18 	struct list_head	graph_funcs;
19 	struct list_head	nograph_funcs;
20 	struct hashmap		*profile_hash;
21 	unsigned long		percpu_buffer_size;
22 	bool			inherit;
23 	bool			use_nsec;
24 	unsigned int		bucket_range;
25 	unsigned int		min_latency;
26 	unsigned int		max_latency;
27 	unsigned int		bucket_num;
28 	bool			hide_empty;
29 	int			graph_depth;
30 	int			func_stack_trace;
31 	int			func_irq_info;
32 	int			graph_nosleep_time;
33 	int			graph_noirqs;
34 	int			graph_verbose;
35 	int			graph_thresh;
36 	int			graph_tail;
37 };
38 
39 struct filter_entry {
40 	struct list_head	list;
41 	char			name[];
42 };
43 
44 #define NUM_BUCKET  22  /* 20 + 2 (for outliers in both direction) */
45 
46 #ifdef HAVE_BPF_SKEL
47 
48 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
49 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
50 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
51 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
52 				  int buckets[], struct stats *stats);
53 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
54 
55 #else  /* !HAVE_BPF_SKEL */
56 
57 static inline int
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace __maybe_unused)58 perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
59 {
60 	return -1;
61 }
62 
63 static inline int
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)64 perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
65 {
66 	return -1;
67 }
68 
69 static inline int
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)70 perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
71 {
72 	return -1;
73 }
74 
75 static inline int
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[]__maybe_unused,struct stats * stats __maybe_unused)76 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
77 			      int buckets[] __maybe_unused,
78 			      struct stats *stats __maybe_unused)
79 {
80 	return -1;
81 }
82 
83 static inline int
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)84 perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
85 {
86 	return -1;
87 }
88 
89 #endif  /* HAVE_BPF_SKEL */
90 
91 #endif  /* __PERF_FTRACE_H__ */
92