1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20 
21 /*
22  * User-space ABI bits:
23  */
24 
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29 	PERF_TYPE_HARDWARE			= 0,
30 	PERF_TYPE_SOFTWARE			= 1,
31 	PERF_TYPE_TRACEPOINT			= 2,
32 	PERF_TYPE_HW_CACHE			= 3,
33 	PERF_TYPE_RAW				= 4,
34 	PERF_TYPE_BREAKPOINT			= 5,
35 
36 	PERF_TYPE_MAX,				/* non-ABI */
37 };
38 
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45 	/*
46 	 * Common hardware events, generalized by the kernel:
47 	 */
48 	PERF_COUNT_HW_CPU_CYCLES		= 0,
49 	PERF_COUNT_HW_INSTRUCTIONS		= 1,
50 	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
51 	PERF_COUNT_HW_CACHE_MISSES		= 3,
52 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
53 	PERF_COUNT_HW_BRANCH_MISSES		= 5,
54 	PERF_COUNT_HW_BUS_CYCLES		= 6,
55 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
56 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
57 	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
58 
59 	PERF_COUNT_HW_MAX,			/* non-ABI */
60 };
61 
62 /*
63  * Generalized hardware cache events:
64  *
65  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66  *       { read, write, prefetch } x
67  *       { accesses, misses }
68  */
69 enum perf_hw_cache_id {
70 	PERF_COUNT_HW_CACHE_L1D			= 0,
71 	PERF_COUNT_HW_CACHE_L1I			= 1,
72 	PERF_COUNT_HW_CACHE_LL			= 2,
73 	PERF_COUNT_HW_CACHE_DTLB		= 3,
74 	PERF_COUNT_HW_CACHE_ITLB		= 4,
75 	PERF_COUNT_HW_CACHE_BPU			= 5,
76 	PERF_COUNT_HW_CACHE_NODE		= 6,
77 
78 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
79 };
80 
81 enum perf_hw_cache_op_id {
82 	PERF_COUNT_HW_CACHE_OP_READ		= 0,
83 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
84 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
85 
86 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
87 };
88 
89 enum perf_hw_cache_op_result_id {
90 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
91 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
92 
93 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
94 };
95 
96 /*
97  * Special "software" events provided by the kernel, even if the hardware
98  * does not support performance events. These events measure various
99  * physical and sw events of the kernel (and allow the profiling of them as
100  * well):
101  */
102 enum perf_sw_ids {
103 	PERF_COUNT_SW_CPU_CLOCK			= 0,
104 	PERF_COUNT_SW_TASK_CLOCK		= 1,
105 	PERF_COUNT_SW_PAGE_FAULTS		= 2,
106 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
107 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
108 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
109 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
110 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
111 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
112 
113 	PERF_COUNT_SW_MAX,			/* non-ABI */
114 };
115 
116 /*
117  * Bits that can be set in attr.sample_type to request information
118  * in the overflow packets.
119  */
120 enum perf_event_sample_format {
121 	PERF_SAMPLE_IP				= 1U << 0,
122 	PERF_SAMPLE_TID				= 1U << 1,
123 	PERF_SAMPLE_TIME			= 1U << 2,
124 	PERF_SAMPLE_ADDR			= 1U << 3,
125 	PERF_SAMPLE_READ			= 1U << 4,
126 	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
127 	PERF_SAMPLE_ID				= 1U << 6,
128 	PERF_SAMPLE_CPU				= 1U << 7,
129 	PERF_SAMPLE_PERIOD			= 1U << 8,
130 	PERF_SAMPLE_STREAM_ID			= 1U << 9,
131 	PERF_SAMPLE_RAW				= 1U << 10,
132 
133 	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
134 };
135 
136 /*
137  * The format of the data returned by read() on a perf event fd,
138  * as specified by attr.read_format:
139  *
140  * struct read_format {
141  *	{ u64		value;
142  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
143  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
144  *	  { u64		id;           } && PERF_FORMAT_ID
145  *	} && !PERF_FORMAT_GROUP
146  *
147  *	{ u64		nr;
148  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
149  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
150  *	  { u64		value;
151  *	    { u64	id;           } && PERF_FORMAT_ID
152  *	  }		cntr[nr];
153  *	} && PERF_FORMAT_GROUP
154  * };
155  */
156 enum perf_event_read_format {
157 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
158 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
159 	PERF_FORMAT_ID				= 1U << 2,
160 	PERF_FORMAT_GROUP			= 1U << 3,
161 
162 	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
163 };
164 
165 #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
166 
167 /*
168  * Hardware event_id to monitor via a performance monitoring event:
169  */
170 struct perf_event_attr {
171 
172 	/*
173 	 * Major type: hardware/software/tracepoint/etc.
174 	 */
175 	__u32			type;
176 
177 	/*
178 	 * Size of the attr structure, for fwd/bwd compat.
179 	 */
180 	__u32			size;
181 
182 	/*
183 	 * Type specific configuration information.
184 	 */
185 	__u64			config;
186 
187 	union {
188 		__u64		sample_period;
189 		__u64		sample_freq;
190 	};
191 
192 	__u64			sample_type;
193 	__u64			read_format;
194 
195 	__u64			disabled       :  1, /* off by default        */
196 				inherit	       :  1, /* children inherit it   */
197 				pinned	       :  1, /* must always be on PMU */
198 				exclusive      :  1, /* only group on PMU     */
199 				exclude_user   :  1, /* don't count user      */
200 				exclude_kernel :  1, /* ditto kernel          */
201 				exclude_hv     :  1, /* ditto hypervisor      */
202 				exclude_idle   :  1, /* don't count when idle */
203 				mmap           :  1, /* include mmap data     */
204 				comm	       :  1, /* include comm data     */
205 				freq           :  1, /* use freq, not period  */
206 				inherit_stat   :  1, /* per task counts       */
207 				enable_on_exec :  1, /* next exec enables     */
208 				task           :  1, /* trace fork/exit       */
209 				watermark      :  1, /* wakeup_watermark      */
210 				/*
211 				 * precise_ip:
212 				 *
213 				 *  0 - SAMPLE_IP can have arbitrary skid
214 				 *  1 - SAMPLE_IP must have constant skid
215 				 *  2 - SAMPLE_IP requested to have 0 skid
216 				 *  3 - SAMPLE_IP must have 0 skid
217 				 *
218 				 *  See also PERF_RECORD_MISC_EXACT_IP
219 				 */
220 				precise_ip     :  2, /* skid constraint       */
221 				mmap_data      :  1, /* non-exec mmap data    */
222 				sample_id_all  :  1, /* sample_type all events */
223 
224 				exclude_host   :  1, /* don't count in host   */
225 				exclude_guest  :  1, /* don't count in guest  */
226 
227 				__reserved_1   : 43;
228 
229 	union {
230 		__u32		wakeup_events;	  /* wakeup every n events */
231 		__u32		wakeup_watermark; /* bytes before wakeup   */
232 	};
233 
234 	__u32			bp_type;
235 	union {
236 		__u64		bp_addr;
237 		__u64		config1; /* extension of config */
238 	};
239 	union {
240 		__u64		bp_len;
241 		__u64		config2; /* extension of config1 */
242 	};
243 };
244 
245 /*
246  * Ioctls that can be done on a perf event fd:
247  */
248 #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
249 #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
250 #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
251 #define PERF_EVENT_IOC_RESET		_IO ('$', 3)
252 #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
253 #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
254 #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
255 
256 enum perf_event_ioc_flags {
257 	PERF_IOC_FLAG_GROUP		= 1U << 0,
258 };
259 
260 /*
261  * Structure of the page that can be mapped via mmap
262  */
263 struct perf_event_mmap_page {
264 	__u32	version;		/* version number of this structure */
265 	__u32	compat_version;		/* lowest version this is compat with */
266 
267 	/*
268 	 * Bits needed to read the hw events in user-space.
269 	 *
270 	 *   u32 seq;
271 	 *   s64 count;
272 	 *
273 	 *   do {
274 	 *     seq = pc->lock;
275 	 *
276 	 *     barrier()
277 	 *     if (pc->index) {
278 	 *       count = pmc_read(pc->index - 1);
279 	 *       count += pc->offset;
280 	 *     } else
281 	 *       goto regular_read;
282 	 *
283 	 *     barrier();
284 	 *   } while (pc->lock != seq);
285 	 *
286 	 * NOTE: for obvious reason this only works on self-monitoring
287 	 *       processes.
288 	 */
289 	__u32	lock;			/* seqlock for synchronization */
290 	__u32	index;			/* hardware event identifier */
291 	__s64	offset;			/* add to hardware event value */
292 	__u64	time_enabled;		/* time event active */
293 	__u64	time_running;		/* time event on cpu */
294 
295 		/*
296 		 * Hole for extension of the self monitor capabilities
297 		 */
298 
299 	__u64	__reserved[123];	/* align to 1k */
300 
301 	/*
302 	 * Control data for the mmap() data buffer.
303 	 *
304 	 * User-space reading the @data_head value should issue an rmb(), on
305 	 * SMP capable platforms, after reading this value -- see
306 	 * perf_event_wakeup().
307 	 *
308 	 * When the mapping is PROT_WRITE the @data_tail value should be
309 	 * written by userspace to reflect the last read data. In this case
310 	 * the kernel will not over-write unread data.
311 	 */
312 	__u64   data_head;		/* head in the data section */
313 	__u64	data_tail;		/* user-space written tail */
314 };
315 
316 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
317 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
318 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
319 #define PERF_RECORD_MISC_USER			(2 << 0)
320 #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
321 #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
322 #define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
323 
324 /*
325  * Indicates that the content of PERF_SAMPLE_IP points to
326  * the actual instruction that triggered the event. See also
327  * perf_event_attr::precise_ip.
328  */
329 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
330 /*
331  * Reserve the last bit to indicate some extended misc field
332  */
333 #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)
334 
335 struct perf_event_header {
336 	__u32	type;
337 	__u16	misc;
338 	__u16	size;
339 };
340 
341 enum perf_event_type {
342 
343 	/*
344 	 * If perf_event_attr.sample_id_all is set then all event types will
345 	 * have the sample_type selected fields related to where/when
346 	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
347 	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
348 	 * the perf_event_header and the fields already present for the existing
349 	 * fields, i.e. at the end of the payload. That way a newer perf.data
350 	 * file will be supported by older perf tools, with these new optional
351 	 * fields being ignored.
352 	 *
353 	 * The MMAP events record the PROT_EXEC mappings so that we can
354 	 * correlate userspace IPs to code. They have the following structure:
355 	 *
356 	 * struct {
357 	 *	struct perf_event_header	header;
358 	 *
359 	 *	u32				pid, tid;
360 	 *	u64				addr;
361 	 *	u64				len;
362 	 *	u64				pgoff;
363 	 *	char				filename[];
364 	 * };
365 	 */
366 	PERF_RECORD_MMAP			= 1,
367 
368 	/*
369 	 * struct {
370 	 *	struct perf_event_header	header;
371 	 *	u64				id;
372 	 *	u64				lost;
373 	 * };
374 	 */
375 	PERF_RECORD_LOST			= 2,
376 
377 	/*
378 	 * struct {
379 	 *	struct perf_event_header	header;
380 	 *
381 	 *	u32				pid, tid;
382 	 *	char				comm[];
383 	 * };
384 	 */
385 	PERF_RECORD_COMM			= 3,
386 
387 	/*
388 	 * struct {
389 	 *	struct perf_event_header	header;
390 	 *	u32				pid, ppid;
391 	 *	u32				tid, ptid;
392 	 *	u64				time;
393 	 * };
394 	 */
395 	PERF_RECORD_EXIT			= 4,
396 
397 	/*
398 	 * struct {
399 	 *	struct perf_event_header	header;
400 	 *	u64				time;
401 	 *	u64				id;
402 	 *	u64				stream_id;
403 	 * };
404 	 */
405 	PERF_RECORD_THROTTLE			= 5,
406 	PERF_RECORD_UNTHROTTLE			= 6,
407 
408 	/*
409 	 * struct {
410 	 *	struct perf_event_header	header;
411 	 *	u32				pid, ppid;
412 	 *	u32				tid, ptid;
413 	 *	u64				time;
414 	 * };
415 	 */
416 	PERF_RECORD_FORK			= 7,
417 
418 	/*
419 	 * struct {
420 	 *	struct perf_event_header	header;
421 	 *	u32				pid, tid;
422 	 *
423 	 *	struct read_format		values;
424 	 * };
425 	 */
426 	PERF_RECORD_READ			= 8,
427 
428 	/*
429 	 * struct {
430 	 *	struct perf_event_header	header;
431 	 *
432 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
433 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
434 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
435 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
436 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
437 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
438 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
439 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
440 	 *
441 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
442 	 *
443 	 *	{ u64			nr,
444 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
445 	 *
446 	 *	#
447 	 *	# The RAW record below is opaque data wrt the ABI
448 	 *	#
449 	 *	# That is, the ABI doesn't make any promises wrt to
450 	 *	# the stability of its content, it may vary depending
451 	 *	# on event, hardware, kernel version and phase of
452 	 *	# the moon.
453 	 *	#
454 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
455 	 *	#
456 	 *
457 	 *	{ u32			size;
458 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
459 	 * };
460 	 */
461 	PERF_RECORD_SAMPLE			= 9,
462 
463 	PERF_RECORD_MAX,			/* non-ABI */
464 };
465 
466 enum perf_callchain_context {
467 	PERF_CONTEXT_HV			= (__u64)-32,
468 	PERF_CONTEXT_KERNEL		= (__u64)-128,
469 	PERF_CONTEXT_USER		= (__u64)-512,
470 
471 	PERF_CONTEXT_GUEST		= (__u64)-2048,
472 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
473 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
474 
475 	PERF_CONTEXT_MAX		= (__u64)-4095,
476 };
477 
478 #define PERF_FLAG_FD_NO_GROUP		(1U << 0)
479 #define PERF_FLAG_FD_OUTPUT		(1U << 1)
480 #define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */
481 
482 #ifdef __KERNEL__
483 /*
484  * Kernel-internal data types and definitions:
485  */
486 
487 #ifdef CONFIG_PERF_EVENTS
488 # include <linux/cgroup.h>
489 # include <asm/perf_event.h>
490 # include <asm/local64.h>
491 #endif
492 
493 struct perf_guest_info_callbacks {
494 	int				(*is_in_guest)(void);
495 	int				(*is_user_mode)(void);
496 	unsigned long			(*get_guest_ip)(void);
497 };
498 
499 #ifdef CONFIG_HAVE_HW_BREAKPOINT
500 #include <asm/hw_breakpoint.h>
501 #endif
502 
503 #include <linux/list.h>
504 #include <linux/mutex.h>
505 #include <linux/rculist.h>
506 #include <linux/rcupdate.h>
507 #include <linux/spinlock.h>
508 #include <linux/hrtimer.h>
509 #include <linux/fs.h>
510 #include <linux/pid_namespace.h>
511 #include <linux/workqueue.h>
512 #include <linux/ftrace.h>
513 #include <linux/cpu.h>
514 #include <linux/irq_work.h>
515 #include <linux/jump_label.h>
516 #include <linux/atomic.h>
517 #include <asm/local.h>
518 
519 #define PERF_MAX_STACK_DEPTH		255
520 
521 struct perf_callchain_entry {
522 	__u64				nr;
523 	__u64				ip[PERF_MAX_STACK_DEPTH];
524 };
525 
526 struct perf_raw_record {
527 	u32				size;
528 	void				*data;
529 };
530 
531 struct perf_branch_entry {
532 	__u64				from;
533 	__u64				to;
534 	__u64				flags;
535 };
536 
537 struct perf_branch_stack {
538 	__u64				nr;
539 	struct perf_branch_entry	entries[0];
540 };
541 
542 struct task_struct;
543 
544 /*
545  * extra PMU register associated with an event
546  */
547 struct hw_perf_event_extra {
548 	u64		config;	/* register value */
549 	unsigned int	reg;	/* register address or index */
550 	int		alloc;	/* extra register already allocated */
551 	int		idx;	/* index in shared_regs->regs[] */
552 };
553 
554 /**
555  * struct hw_perf_event - performance event hardware details:
556  */
557 struct hw_perf_event {
558 #ifdef CONFIG_PERF_EVENTS
559 	union {
560 		struct { /* hardware */
561 			u64		config;
562 			u64		last_tag;
563 			unsigned long	config_base;
564 			unsigned long	event_base;
565 			int		idx;
566 			int		last_cpu;
567 			struct hw_perf_event_extra extra_reg;
568 		};
569 		struct { /* software */
570 			struct hrtimer	hrtimer;
571 		};
572 #ifdef CONFIG_HAVE_HW_BREAKPOINT
573 		struct { /* breakpoint */
574 			struct arch_hw_breakpoint	info;
575 			struct list_head		bp_list;
576 			/*
577 			 * Crufty hack to avoid the chicken and egg
578 			 * problem hw_breakpoint has with context
579 			 * creation and event initalization.
580 			 */
581 			struct task_struct		*bp_target;
582 		};
583 #endif
584 	};
585 	int				state;
586 	local64_t			prev_count;
587 	u64				sample_period;
588 	u64				last_period;
589 	local64_t			period_left;
590 	u64                             interrupts_seq;
591 	u64				interrupts;
592 
593 	u64				freq_time_stamp;
594 	u64				freq_count_stamp;
595 #endif
596 };
597 
598 /*
599  * hw_perf_event::state flags
600  */
601 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
602 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
603 #define PERF_HES_ARCH		0x04
604 
605 struct perf_event;
606 
607 /*
608  * Common implementation detail of pmu::{start,commit,cancel}_txn
609  */
610 #define PERF_EVENT_TXN 0x1
611 
612 /**
613  * struct pmu - generic performance monitoring unit
614  */
615 struct pmu {
616 	struct list_head		entry;
617 
618 	struct device			*dev;
619 	char				*name;
620 	int				type;
621 
622 	int * __percpu			pmu_disable_count;
623 	struct perf_cpu_context * __percpu pmu_cpu_context;
624 	int				task_ctx_nr;
625 
626 	/*
627 	 * Fully disable/enable this PMU, can be used to protect from the PMI
628 	 * as well as for lazy/batch writing of the MSRs.
629 	 */
630 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
631 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
632 
633 	/*
634 	 * Try and initialize the event for this PMU.
635 	 * Should return -ENOENT when the @event doesn't match this PMU.
636 	 */
637 	int (*event_init)		(struct perf_event *event);
638 
639 #define PERF_EF_START	0x01		/* start the counter when adding    */
640 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
641 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
642 
643 	/*
644 	 * Adds/Removes a counter to/from the PMU, can be done inside
645 	 * a transaction, see the ->*_txn() methods.
646 	 */
647 	int  (*add)			(struct perf_event *event, int flags);
648 	void (*del)			(struct perf_event *event, int flags);
649 
650 	/*
651 	 * Starts/Stops a counter present on the PMU. The PMI handler
652 	 * should stop the counter when perf_event_overflow() returns
653 	 * !0. ->start() will be used to continue.
654 	 */
655 	void (*start)			(struct perf_event *event, int flags);
656 	void (*stop)			(struct perf_event *event, int flags);
657 
658 	/*
659 	 * Updates the counter value of the event.
660 	 */
661 	void (*read)			(struct perf_event *event);
662 
663 	/*
664 	 * Group events scheduling is treated as a transaction, add
665 	 * group events as a whole and perform one schedulability test.
666 	 * If the test fails, roll back the whole group
667 	 *
668 	 * Start the transaction, after this ->add() doesn't need to
669 	 * do schedulability tests.
670 	 */
671 	void (*start_txn)		(struct pmu *pmu); /* optional */
672 	/*
673 	 * If ->start_txn() disabled the ->add() schedulability test
674 	 * then ->commit_txn() is required to perform one. On success
675 	 * the transaction is closed. On error the transaction is kept
676 	 * open until ->cancel_txn() is called.
677 	 */
678 	int  (*commit_txn)		(struct pmu *pmu); /* optional */
679 	/*
680 	 * Will cancel the transaction, assumes ->del() is called
681 	 * for each successful ->add() during the transaction.
682 	 */
683 	void (*cancel_txn)		(struct pmu *pmu); /* optional */
684 };
685 
686 /**
687  * enum perf_event_active_state - the states of a event
688  */
689 enum perf_event_active_state {
690 	PERF_EVENT_STATE_ERROR		= -2,
691 	PERF_EVENT_STATE_OFF		= -1,
692 	PERF_EVENT_STATE_INACTIVE	=  0,
693 	PERF_EVENT_STATE_ACTIVE		=  1,
694 };
695 
696 struct file;
697 struct perf_sample_data;
698 
699 typedef void (*perf_overflow_handler_t)(struct perf_event *,
700 					struct perf_sample_data *,
701 					struct pt_regs *regs);
702 
703 enum perf_group_flag {
704 	PERF_GROUP_SOFTWARE		= 0x1,
705 };
706 
707 #define SWEVENT_HLIST_BITS		8
708 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
709 
710 struct swevent_hlist {
711 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
712 	struct rcu_head			rcu_head;
713 };
714 
715 #define PERF_ATTACH_CONTEXT	0x01
716 #define PERF_ATTACH_GROUP	0x02
717 #define PERF_ATTACH_TASK	0x04
718 
719 #ifdef CONFIG_CGROUP_PERF
720 /*
721  * perf_cgroup_info keeps track of time_enabled for a cgroup.
722  * This is a per-cpu dynamically allocated data structure.
723  */
724 struct perf_cgroup_info {
725 	u64				time;
726 	u64				timestamp;
727 };
728 
729 struct perf_cgroup {
730 	struct				cgroup_subsys_state css;
731 	struct				perf_cgroup_info *info;	/* timing info, one per cpu */
732 };
733 #endif
734 
735 struct ring_buffer;
736 
737 /**
738  * struct perf_event - performance event kernel representation:
739  */
740 struct perf_event {
741 #ifdef CONFIG_PERF_EVENTS
742 	struct list_head		group_entry;
743 	struct list_head		event_entry;
744 	struct list_head		sibling_list;
745 	struct hlist_node		hlist_entry;
746 	int				nr_siblings;
747 	int				group_flags;
748 	struct perf_event		*group_leader;
749 	struct pmu			*pmu;
750 
751 	enum perf_event_active_state	state;
752 	unsigned int			attach_state;
753 	local64_t			count;
754 	atomic64_t			child_count;
755 
756 	/*
757 	 * These are the total time in nanoseconds that the event
758 	 * has been enabled (i.e. eligible to run, and the task has
759 	 * been scheduled in, if this is a per-task event)
760 	 * and running (scheduled onto the CPU), respectively.
761 	 *
762 	 * They are computed from tstamp_enabled, tstamp_running and
763 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
764 	 */
765 	u64				total_time_enabled;
766 	u64				total_time_running;
767 
768 	/*
769 	 * These are timestamps used for computing total_time_enabled
770 	 * and total_time_running when the event is in INACTIVE or
771 	 * ACTIVE state, measured in nanoseconds from an arbitrary point
772 	 * in time.
773 	 * tstamp_enabled: the notional time when the event was enabled
774 	 * tstamp_running: the notional time when the event was scheduled on
775 	 * tstamp_stopped: in INACTIVE state, the notional time when the
776 	 *	event was scheduled off.
777 	 */
778 	u64				tstamp_enabled;
779 	u64				tstamp_running;
780 	u64				tstamp_stopped;
781 
782 	/*
783 	 * timestamp shadows the actual context timing but it can
784 	 * be safely used in NMI interrupt context. It reflects the
785 	 * context time as it was when the event was last scheduled in.
786 	 *
787 	 * ctx_time already accounts for ctx->timestamp. Therefore to
788 	 * compute ctx_time for a sample, simply add perf_clock().
789 	 */
790 	u64				shadow_ctx_time;
791 
792 	struct perf_event_attr		attr;
793 	u16				header_size;
794 	u16				id_header_size;
795 	u16				read_size;
796 	struct hw_perf_event		hw;
797 
798 	struct perf_event_context	*ctx;
799 	struct file			*filp;
800 
801 	/*
802 	 * These accumulate total time (in nanoseconds) that children
803 	 * events have been enabled and running, respectively.
804 	 */
805 	atomic64_t			child_total_time_enabled;
806 	atomic64_t			child_total_time_running;
807 
808 	/*
809 	 * Protect attach/detach and child_list:
810 	 */
811 	struct mutex			child_mutex;
812 	struct list_head		child_list;
813 	struct perf_event		*parent;
814 
815 	int				oncpu;
816 	int				cpu;
817 
818 	struct list_head		owner_entry;
819 	struct task_struct		*owner;
820 
821 	/* mmap bits */
822 	struct mutex			mmap_mutex;
823 	atomic_t			mmap_count;
824 	int				mmap_locked;
825 	struct user_struct		*mmap_user;
826 	struct ring_buffer		*rb;
827 	struct list_head		rb_entry;
828 
829 	/* poll related */
830 	wait_queue_head_t		waitq;
831 	struct fasync_struct		*fasync;
832 
833 	/* delayed work for NMIs and such */
834 	int				pending_wakeup;
835 	int				pending_kill;
836 	int				pending_disable;
837 	struct irq_work			pending;
838 
839 	atomic_t			event_limit;
840 
841 	void (*destroy)(struct perf_event *);
842 	struct rcu_head			rcu_head;
843 
844 	struct pid_namespace		*ns;
845 	u64				id;
846 
847 	perf_overflow_handler_t		overflow_handler;
848 	void				*overflow_handler_context;
849 
850 #ifdef CONFIG_EVENT_TRACING
851 	struct ftrace_event_call	*tp_event;
852 	struct event_filter		*filter;
853 #endif
854 
855 #ifdef CONFIG_CGROUP_PERF
856 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
857 	int				cgrp_defer_enabled;
858 #endif
859 
860 #endif /* CONFIG_PERF_EVENTS */
861 };
862 
863 enum perf_event_context_type {
864 	task_context,
865 	cpu_context,
866 };
867 
868 /**
869  * struct perf_event_context - event context structure
870  *
871  * Used as a container for task events and CPU events as well:
872  */
873 struct perf_event_context {
874 	struct pmu			*pmu;
875 	enum perf_event_context_type	type;
876 	/*
877 	 * Protect the states of the events in the list,
878 	 * nr_active, and the list:
879 	 */
880 	raw_spinlock_t			lock;
881 	/*
882 	 * Protect the list of events.  Locking either mutex or lock
883 	 * is sufficient to ensure the list doesn't change; to change
884 	 * the list you need to lock both the mutex and the spinlock.
885 	 */
886 	struct mutex			mutex;
887 
888 	struct list_head		pinned_groups;
889 	struct list_head		flexible_groups;
890 	struct list_head		event_list;
891 	int				nr_events;
892 	int				nr_active;
893 	int				is_active;
894 	int				nr_stat;
895 	int				nr_freq;
896 	int				rotate_disable;
897 	atomic_t			refcount;
898 	struct task_struct		*task;
899 
900 	/*
901 	 * Context clock, runs when context enabled.
902 	 */
903 	u64				time;
904 	u64				timestamp;
905 
906 	/*
907 	 * These fields let us detect when two contexts have both
908 	 * been cloned (inherited) from a common ancestor.
909 	 */
910 	struct perf_event_context	*parent_ctx;
911 	u64				parent_gen;
912 	u64				generation;
913 	int				pin_count;
914 	int				nr_cgroups; /* cgroup events present */
915 	struct rcu_head			rcu_head;
916 };
917 
918 /*
919  * Number of contexts where an event can trigger:
920  *	task, softirq, hardirq, nmi.
921  */
922 #define PERF_NR_CONTEXTS	4
923 
924 /**
925  * struct perf_event_cpu_context - per cpu event context structure
926  */
927 struct perf_cpu_context {
928 	struct perf_event_context	ctx;
929 	struct perf_event_context	*task_ctx;
930 	int				active_oncpu;
931 	int				exclusive;
932 	struct list_head		rotation_list;
933 	int				jiffies_interval;
934 	struct pmu			*active_pmu;
935 	struct perf_cgroup		*cgrp;
936 };
937 
938 struct perf_output_handle {
939 	struct perf_event		*event;
940 	struct ring_buffer		*rb;
941 	unsigned long			wakeup;
942 	unsigned long			size;
943 	void				*addr;
944 	int				page;
945 };
946 
947 #ifdef CONFIG_PERF_EVENTS
948 
949 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
950 extern void perf_pmu_unregister(struct pmu *pmu);
951 
952 extern int perf_num_counters(void);
953 extern const char *perf_pmu_name(void);
954 extern void __perf_event_task_sched_in(struct task_struct *prev,
955 				       struct task_struct *task);
956 extern void __perf_event_task_sched_out(struct task_struct *prev,
957 					struct task_struct *next);
958 extern int perf_event_init_task(struct task_struct *child);
959 extern void perf_event_exit_task(struct task_struct *child);
960 extern void perf_event_free_task(struct task_struct *task);
961 extern void perf_event_delayed_put(struct task_struct *task);
962 extern void perf_event_print_debug(void);
963 extern void perf_pmu_disable(struct pmu *pmu);
964 extern void perf_pmu_enable(struct pmu *pmu);
965 extern int perf_event_task_disable(void);
966 extern int perf_event_task_enable(void);
967 extern int perf_event_refresh(struct perf_event *event, int refresh);
968 extern void perf_event_update_userpage(struct perf_event *event);
969 extern int perf_event_release_kernel(struct perf_event *event);
970 extern struct perf_event *
971 perf_event_create_kernel_counter(struct perf_event_attr *attr,
972 				int cpu,
973 				struct task_struct *task,
974 				perf_overflow_handler_t callback,
975 				void *context);
976 extern u64 perf_event_read_value(struct perf_event *event,
977 				 u64 *enabled, u64 *running);
978 
979 struct perf_sample_data {
980 	u64				type;
981 
982 	u64				ip;
983 	struct {
984 		u32	pid;
985 		u32	tid;
986 	}				tid_entry;
987 	u64				time;
988 	u64				addr;
989 	u64				id;
990 	u64				stream_id;
991 	struct {
992 		u32	cpu;
993 		u32	reserved;
994 	}				cpu_entry;
995 	u64				period;
996 	struct perf_callchain_entry	*callchain;
997 	struct perf_raw_record		*raw;
998 };
999 
perf_sample_data_init(struct perf_sample_data * data,u64 addr)1000 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1001 {
1002 	data->addr = addr;
1003 	data->raw  = NULL;
1004 }
1005 
1006 extern void perf_output_sample(struct perf_output_handle *handle,
1007 			       struct perf_event_header *header,
1008 			       struct perf_sample_data *data,
1009 			       struct perf_event *event);
1010 extern void perf_prepare_sample(struct perf_event_header *header,
1011 				struct perf_sample_data *data,
1012 				struct perf_event *event,
1013 				struct pt_regs *regs);
1014 
1015 extern int perf_event_overflow(struct perf_event *event,
1016 				 struct perf_sample_data *data,
1017 				 struct pt_regs *regs);
1018 
is_sampling_event(struct perf_event * event)1019 static inline bool is_sampling_event(struct perf_event *event)
1020 {
1021 	return event->attr.sample_period != 0;
1022 }
1023 
1024 /*
1025  * Return 1 for a software event, 0 for a hardware event
1026  */
is_software_event(struct perf_event * event)1027 static inline int is_software_event(struct perf_event *event)
1028 {
1029 	return event->pmu->task_ctx_nr == perf_sw_context;
1030 }
1031 
1032 extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1033 
1034 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1035 
1036 #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1037 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1038 #endif
1039 
1040 /*
1041  * Take a snapshot of the regs. Skip ip and frame pointer to
1042  * the nth caller. We only need a few of the regs:
1043  * - ip for PERF_SAMPLE_IP
1044  * - cs for user_mode() tests
1045  * - bp for callchains
1046  * - eflags, for future purposes, just in case
1047  */
perf_fetch_caller_regs(struct pt_regs * regs)1048 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1049 {
1050 	memset(regs, 0, sizeof(*regs));
1051 
1052 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1053 }
1054 
1055 static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1056 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1057 {
1058 	struct pt_regs hot_regs;
1059 
1060 	if (static_branch(&perf_swevent_enabled[event_id])) {
1061 		if (!regs) {
1062 			perf_fetch_caller_regs(&hot_regs);
1063 			regs = &hot_regs;
1064 		}
1065 		__perf_sw_event(event_id, nr, regs, addr);
1066 	}
1067 }
1068 
1069 extern struct jump_label_key_deferred perf_sched_events;
1070 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1071 static inline void perf_event_task_sched_in(struct task_struct *prev,
1072 					    struct task_struct *task)
1073 {
1074 	if (static_branch(&perf_sched_events.key))
1075 		__perf_event_task_sched_in(prev, task);
1076 }
1077 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1078 static inline void perf_event_task_sched_out(struct task_struct *prev,
1079 					     struct task_struct *next)
1080 {
1081 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1082 
1083 	if (static_branch(&perf_sched_events.key))
1084 		__perf_event_task_sched_out(prev, next);
1085 }
1086 
1087 extern void perf_event_mmap(struct vm_area_struct *vma);
1088 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1089 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1090 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1091 
1092 extern void perf_event_comm(struct task_struct *tsk);
1093 extern void perf_event_fork(struct task_struct *tsk);
1094 
1095 /* Callchains */
1096 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1097 
1098 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1099 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1100 
perf_callchain_store(struct perf_callchain_entry * entry,u64 ip)1101 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1102 {
1103 	if (entry->nr < PERF_MAX_STACK_DEPTH)
1104 		entry->ip[entry->nr++] = ip;
1105 }
1106 
1107 extern int sysctl_perf_event_paranoid;
1108 extern int sysctl_perf_event_mlock;
1109 extern int sysctl_perf_event_sample_rate;
1110 
1111 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1112 		void __user *buffer, size_t *lenp,
1113 		loff_t *ppos);
1114 
perf_paranoid_tracepoint_raw(void)1115 static inline bool perf_paranoid_tracepoint_raw(void)
1116 {
1117 	return sysctl_perf_event_paranoid > -1;
1118 }
1119 
perf_paranoid_cpu(void)1120 static inline bool perf_paranoid_cpu(void)
1121 {
1122 	return sysctl_perf_event_paranoid > 0;
1123 }
1124 
perf_paranoid_kernel(void)1125 static inline bool perf_paranoid_kernel(void)
1126 {
1127 	return sysctl_perf_event_paranoid > 1;
1128 }
1129 
1130 extern void perf_event_init(void);
1131 extern void perf_tp_event(u64 addr, u64 count, void *record,
1132 			  int entry_size, struct pt_regs *regs,
1133 			  struct hlist_head *head, int rctx);
1134 extern void perf_bp_event(struct perf_event *event, void *data);
1135 
1136 #ifndef perf_misc_flags
1137 # define perf_misc_flags(regs) \
1138 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1139 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1140 #endif
1141 
1142 extern int perf_output_begin(struct perf_output_handle *handle,
1143 			     struct perf_event *event, unsigned int size);
1144 extern void perf_output_end(struct perf_output_handle *handle);
1145 extern void perf_output_copy(struct perf_output_handle *handle,
1146 			     const void *buf, unsigned int len);
1147 extern int perf_swevent_get_recursion_context(void);
1148 extern void perf_swevent_put_recursion_context(int rctx);
1149 extern void perf_event_enable(struct perf_event *event);
1150 extern void perf_event_disable(struct perf_event *event);
1151 extern void perf_event_task_tick(void);
1152 #else
1153 static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1154 perf_event_task_sched_in(struct task_struct *prev,
1155 			 struct task_struct *task)			{ }
1156 static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1157 perf_event_task_sched_out(struct task_struct *prev,
1158 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child)1159 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
perf_event_exit_task(struct task_struct * child)1160 static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1161 static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1162 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_print_debug(void)1163 static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1164 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1165 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1166 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1167 {
1168 	return -EINVAL;
1169 }
1170 
1171 static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1172 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1173 static inline void
perf_bp_event(struct perf_event * event,void * data)1174 perf_bp_event(struct perf_event *event, void *data)			{ }
1175 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1176 static inline int perf_register_guest_info_callbacks
1177 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * callbacks)1178 static inline int perf_unregister_guest_info_callbacks
1179 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1180 
perf_event_mmap(struct vm_area_struct * vma)1181 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
perf_event_comm(struct task_struct * tsk)1182 static inline void perf_event_comm(struct task_struct *tsk)		{ }
perf_event_fork(struct task_struct * tsk)1183 static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_init(void)1184 static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1185 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1186 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_event_enable(struct perf_event * event)1187 static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1188 static inline void perf_event_disable(struct perf_event *event)		{ }
perf_event_task_tick(void)1189 static inline void perf_event_task_tick(void)				{ }
1190 #endif
1191 
1192 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1193 
1194 /*
1195  * This has to have a higher priority than migration_notifier in sched.c.
1196  */
1197 #define perf_cpu_notifier(fn)						\
1198 do {									\
1199 	static struct notifier_block fn##_nb __cpuinitdata =		\
1200 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
1201 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
1202 		(void *)(unsigned long)smp_processor_id());		\
1203 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
1204 		(void *)(unsigned long)smp_processor_id());		\
1205 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
1206 		(void *)(unsigned long)smp_processor_id());		\
1207 	register_cpu_notifier(&fn##_nb);				\
1208 } while (0)
1209 
1210 #endif /* __KERNEL__ */
1211 #endif /* _LINUX_PERF_EVENT_H */
1212