1  #ifndef __PERF_FTRACE_H__
2  #define __PERF_FTRACE_H__
3  
4  #include <linux/list.h>
5  
6  #include "target.h"
7  
8  struct evlist;
9  struct hashamp;
10  
11  struct perf_ftrace {
12  	struct evlist		*evlist;
13  	struct target		target;
14  	const char		*tracer;
15  	struct list_head	filters;
16  	struct list_head	notrace;
17  	struct list_head	graph_funcs;
18  	struct list_head	nograph_funcs;
19  	struct hashmap		*profile_hash;
20  	unsigned long		percpu_buffer_size;
21  	bool			inherit;
22  	bool			use_nsec;
23  	int			graph_depth;
24  	int			func_stack_trace;
25  	int			func_irq_info;
26  	int			graph_nosleep_time;
27  	int			graph_noirqs;
28  	int			graph_verbose;
29  	int			graph_thresh;
30  	int			graph_tail;
31  };
32  
33  struct filter_entry {
34  	struct list_head	list;
35  	char			name[];
36  };
37  
38  #define NUM_BUCKET  22  /* 20 + 2 (for outliers in both direction) */
39  
40  #ifdef HAVE_BPF_SKEL
41  
42  int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
43  int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
44  int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
45  int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
46  				  int buckets[]);
47  int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
48  
49  #else  /* !HAVE_BPF_SKEL */
50  
51  static inline int
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace __maybe_unused)52  perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
53  {
54  	return -1;
55  }
56  
57  static inline int
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)58  perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
59  {
60  	return -1;
61  }
62  
63  static inline int
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)64  perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
65  {
66  	return -1;
67  }
68  
69  static inline int
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[]__maybe_unused)70  perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
71  			      int buckets[] __maybe_unused)
72  {
73  	return -1;
74  }
75  
76  static inline int
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)77  perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
78  {
79  	return -1;
80  }
81  
82  #endif  /* HAVE_BPF_SKEL */
83  
84  #endif  /* __PERF_FTRACE_H__ */
85