xref: /linux/kernel/events/core.c (revision 33c66eb5e9844429911bf5478c96c60f9f8af9d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance events core code:
4  *
5  *  Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner <tglx@kernel.org>
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/idr.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/tick.h>
21 #include <linux/sysfs.h>
22 #include <linux/dcache.h>
23 #include <linux/percpu.h>
24 #include <linux/ptrace.h>
25 #include <linux/reboot.h>
26 #include <linux/vmstat.h>
27 #include <linux/device.h>
28 #include <linux/export.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hardirq.h>
31 #include <linux/hugetlb.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49 #include <linux/sched/clock.h>
50 #include <linux/sched/mm.h>
51 #include <linux/proc_ns.h>
52 #include <linux/mount.h>
53 #include <linux/min_heap.h>
54 #include <linux/highmem.h>
55 #include <linux/pgtable.h>
56 #include <linux/buildid.h>
57 #include <linux/task_work.h>
58 #include <linux/percpu-rwsem.h>
59 #include <linux/unwind_deferred.h>
60 #include <linux/kvm_types.h>
61 
62 #include "internal.h"
63 
64 #include <asm/irq_regs.h>
65 
66 typedef int (*remote_function_f)(void *);
67 
68 struct remote_function_call {
69 	struct task_struct	*p;
70 	remote_function_f	func;
71 	void			*info;
72 	int			ret;
73 };
74 
remote_function(void * data)75 static void remote_function(void *data)
76 {
77 	struct remote_function_call *tfc = data;
78 	struct task_struct *p = tfc->p;
79 
80 	if (p) {
81 		/* -EAGAIN */
82 		if (task_cpu(p) != smp_processor_id())
83 			return;
84 
85 		/*
86 		 * Now that we're on right CPU with IRQs disabled, we can test
87 		 * if we hit the right task without races.
88 		 */
89 
90 		tfc->ret = -ESRCH; /* No such (running) process */
91 		if (p != current)
92 			return;
93 	}
94 
95 	tfc->ret = tfc->func(tfc->info);
96 }
97 
98 /**
99  * task_function_call - call a function on the cpu on which a task runs
100  * @p:		the task to evaluate
101  * @func:	the function to be called
102  * @info:	the function call argument
103  *
104  * Calls the function @func when the task is currently running. This might
105  * be on the current CPU, which just calls the function directly.  This will
106  * retry due to any failures in smp_call_function_single(), such as if the
107  * task_cpu() goes offline concurrently.
108  *
109  * returns @func return value or -ESRCH or -ENXIO when the process isn't running
110  */
111 static int
task_function_call(struct task_struct * p,remote_function_f func,void * info)112 task_function_call(struct task_struct *p, remote_function_f func, void *info)
113 {
114 	struct remote_function_call data = {
115 		.p	= p,
116 		.func	= func,
117 		.info	= info,
118 		.ret	= -EAGAIN,
119 	};
120 	int ret;
121 
122 	for (;;) {
123 		ret = smp_call_function_single(task_cpu(p), remote_function,
124 					       &data, 1);
125 		if (!ret)
126 			ret = data.ret;
127 
128 		if (ret != -EAGAIN)
129 			break;
130 
131 		cond_resched();
132 	}
133 
134 	return ret;
135 }
136 
137 /**
138  * cpu_function_call - call a function on the cpu
139  * @cpu:	target cpu to queue this function
140  * @func:	the function to be called
141  * @info:	the function call argument
142  *
143  * Calls the function @func on the remote cpu.
144  *
145  * returns: @func return value or -ENXIO when the cpu is offline
146  */
cpu_function_call(int cpu,remote_function_f func,void * info)147 static int cpu_function_call(int cpu, remote_function_f func, void *info)
148 {
149 	struct remote_function_call data = {
150 		.p	= NULL,
151 		.func	= func,
152 		.info	= info,
153 		.ret	= -ENXIO, /* No such CPU */
154 	};
155 
156 	smp_call_function_single(cpu, remote_function, &data, 1);
157 
158 	return data.ret;
159 }
160 
161 enum event_type_t {
162 	EVENT_FLEXIBLE	= 0x01,
163 	EVENT_PINNED	= 0x02,
164 	EVENT_TIME	= 0x04,
165 	EVENT_FROZEN	= 0x08,
166 	/* see ctx_resched() for details */
167 	EVENT_CPU	= 0x10,
168 	EVENT_CGROUP	= 0x20,
169 
170 	/*
171 	 * EVENT_GUEST is set when scheduling in/out events between the host
172 	 * and a guest with a mediated vPMU.  Among other things, EVENT_GUEST
173 	 * is used:
174 	 *
175 	 * - In for_each_epc() to skip PMUs that don't support events in a
176 	 *   MEDIATED_VPMU guest, i.e. don't need to be context switched.
177 	 * - To indicate the start/end point of the events in a guest.  Guest
178 	 *   running time is deducted for host-only (exclude_guest) events.
179 	 */
180 	EVENT_GUEST	= 0x40,
181 	EVENT_FLAGS	= EVENT_CGROUP | EVENT_GUEST,
182 	/* compound helpers */
183 	EVENT_ALL         = EVENT_FLEXIBLE | EVENT_PINNED,
184 	EVENT_TIME_FROZEN = EVENT_TIME | EVENT_FROZEN,
185 };
186 
__perf_ctx_lock(struct perf_event_context * ctx)187 static inline void __perf_ctx_lock(struct perf_event_context *ctx)
188 {
189 	raw_spin_lock(&ctx->lock);
190 	WARN_ON_ONCE(ctx->is_active & EVENT_FROZEN);
191 }
192 
perf_ctx_lock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)193 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
194 			  struct perf_event_context *ctx)
195 {
196 	__perf_ctx_lock(&cpuctx->ctx);
197 	if (ctx)
198 		__perf_ctx_lock(ctx);
199 }
200 
__perf_ctx_unlock(struct perf_event_context * ctx)201 static inline void __perf_ctx_unlock(struct perf_event_context *ctx)
202 {
203 	/*
204 	 * If ctx_sched_in() didn't again set any ALL flags, clean up
205 	 * after ctx_sched_out() by clearing is_active.
206 	 */
207 	if (ctx->is_active & EVENT_FROZEN) {
208 		if (!(ctx->is_active & EVENT_ALL))
209 			ctx->is_active = 0;
210 		else
211 			ctx->is_active &= ~EVENT_FROZEN;
212 	}
213 	raw_spin_unlock(&ctx->lock);
214 }
215 
perf_ctx_unlock(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)216 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
217 			    struct perf_event_context *ctx)
218 {
219 	if (ctx)
220 		__perf_ctx_unlock(ctx);
221 	__perf_ctx_unlock(&cpuctx->ctx);
222 }
223 
224 typedef struct {
225 	struct perf_cpu_context *cpuctx;
226 	struct perf_event_context *ctx;
227 } class_perf_ctx_lock_t;
228 
class_perf_ctx_lock_destructor(class_perf_ctx_lock_t * _T)229 static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
230 { perf_ctx_unlock(_T->cpuctx, _T->ctx); }
231 
232 static inline class_perf_ctx_lock_t
class_perf_ctx_lock_constructor(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)233 class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
234 				struct perf_event_context *ctx)
235 { perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; }
236 
237 #define TASK_TOMBSTONE ((void *)-1L)
238 
is_kernel_event(struct perf_event * event)239 static bool is_kernel_event(struct perf_event *event)
240 {
241 	return READ_ONCE(event->owner) == TASK_TOMBSTONE;
242 }
243 
244 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
245 
perf_cpu_task_ctx(void)246 struct perf_event_context *perf_cpu_task_ctx(void)
247 {
248 	lockdep_assert_irqs_disabled();
249 	return this_cpu_ptr(&perf_cpu_context)->task_ctx;
250 }
251 
252 /*
253  * On task ctx scheduling...
254  *
255  * When !ctx->nr_events a task context will not be scheduled. This means
256  * we can disable the scheduler hooks (for performance) without leaving
257  * pending task ctx state.
258  *
259  * This however results in two special cases:
260  *
261  *  - removing the last event from a task ctx; this is relatively straight
262  *    forward and is done in __perf_remove_from_context.
263  *
264  *  - adding the first event to a task ctx; this is tricky because we cannot
265  *    rely on ctx->is_active and therefore cannot use event_function_call().
266  *    See perf_install_in_context().
267  *
268  * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
269  */
270 
271 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
272 			struct perf_event_context *, void *);
273 
274 struct event_function_struct {
275 	struct perf_event *event;
276 	event_f func;
277 	void *data;
278 };
279 
event_function(void * info)280 static int event_function(void *info)
281 {
282 	struct event_function_struct *efs = info;
283 	struct perf_event *event = efs->event;
284 	struct perf_event_context *ctx = event->ctx;
285 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
286 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
287 	int ret = 0;
288 
289 	lockdep_assert_irqs_disabled();
290 
291 	perf_ctx_lock(cpuctx, task_ctx);
292 	/*
293 	 * Since we do the IPI call without holding ctx->lock things can have
294 	 * changed, double check we hit the task we set out to hit.
295 	 */
296 	if (ctx->task) {
297 		if (ctx->task != current) {
298 			ret = -ESRCH;
299 			goto unlock;
300 		}
301 
302 		/*
303 		 * We only use event_function_call() on established contexts,
304 		 * and event_function() is only ever called when active (or
305 		 * rather, we'll have bailed in task_function_call() or the
306 		 * above ctx->task != current test), therefore we must have
307 		 * ctx->is_active here.
308 		 */
309 		WARN_ON_ONCE(!ctx->is_active);
310 		/*
311 		 * And since we have ctx->is_active, cpuctx->task_ctx must
312 		 * match.
313 		 */
314 		WARN_ON_ONCE(task_ctx != ctx);
315 	} else {
316 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
317 	}
318 
319 	efs->func(event, cpuctx, ctx, efs->data);
320 unlock:
321 	perf_ctx_unlock(cpuctx, task_ctx);
322 
323 	return ret;
324 }
325 
event_function_call(struct perf_event * event,event_f func,void * data)326 static void event_function_call(struct perf_event *event, event_f func, void *data)
327 {
328 	struct perf_event_context *ctx = event->ctx;
329 	struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
330 	struct perf_cpu_context *cpuctx;
331 	struct event_function_struct efs = {
332 		.event = event,
333 		.func = func,
334 		.data = data,
335 	};
336 
337 	if (!event->parent) {
338 		/*
339 		 * If this is a !child event, we must hold ctx::mutex to
340 		 * stabilize the event->ctx relation. See
341 		 * perf_event_ctx_lock().
342 		 */
343 		lockdep_assert_held(&ctx->mutex);
344 	}
345 
346 	if (!task) {
347 		cpu_function_call(event->cpu, event_function, &efs);
348 		return;
349 	}
350 
351 	if (task == TASK_TOMBSTONE)
352 		return;
353 
354 again:
355 	if (!task_function_call(task, event_function, &efs))
356 		return;
357 
358 	local_irq_disable();
359 	cpuctx = this_cpu_ptr(&perf_cpu_context);
360 	perf_ctx_lock(cpuctx, ctx);
361 	/*
362 	 * Reload the task pointer, it might have been changed by
363 	 * a concurrent perf_event_context_sched_out().
364 	 */
365 	task = ctx->task;
366 	if (task == TASK_TOMBSTONE)
367 		goto unlock;
368 	if (ctx->is_active) {
369 		perf_ctx_unlock(cpuctx, ctx);
370 		local_irq_enable();
371 		goto again;
372 	}
373 	func(event, NULL, ctx, data);
374 unlock:
375 	perf_ctx_unlock(cpuctx, ctx);
376 	local_irq_enable();
377 }
378 
379 /*
380  * Similar to event_function_call() + event_function(), but hard assumes IRQs
381  * are already disabled and we're on the right CPU.
382  */
event_function_local(struct perf_event * event,event_f func,void * data)383 static void event_function_local(struct perf_event *event, event_f func, void *data)
384 {
385 	struct perf_event_context *ctx = event->ctx;
386 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
387 	struct task_struct *task = READ_ONCE(ctx->task);
388 	struct perf_event_context *task_ctx = NULL;
389 
390 	lockdep_assert_irqs_disabled();
391 
392 	if (task) {
393 		if (task == TASK_TOMBSTONE)
394 			return;
395 
396 		task_ctx = ctx;
397 	}
398 
399 	perf_ctx_lock(cpuctx, task_ctx);
400 
401 	task = ctx->task;
402 	if (task == TASK_TOMBSTONE)
403 		goto unlock;
404 
405 	if (task) {
406 		/*
407 		 * We must be either inactive or active and the right task,
408 		 * otherwise we're screwed, since we cannot IPI to somewhere
409 		 * else.
410 		 */
411 		if (ctx->is_active) {
412 			if (WARN_ON_ONCE(task != current))
413 				goto unlock;
414 
415 			if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
416 				goto unlock;
417 		}
418 	} else {
419 		WARN_ON_ONCE(&cpuctx->ctx != ctx);
420 	}
421 
422 	func(event, cpuctx, ctx, data);
423 unlock:
424 	perf_ctx_unlock(cpuctx, task_ctx);
425 }
426 
427 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
428 		       PERF_FLAG_FD_OUTPUT  |\
429 		       PERF_FLAG_PID_CGROUP |\
430 		       PERF_FLAG_FD_CLOEXEC)
431 
432 /*
433  * branch priv levels that need permission checks
434  */
435 #define PERF_SAMPLE_BRANCH_PERM_PLM \
436 	(PERF_SAMPLE_BRANCH_KERNEL |\
437 	 PERF_SAMPLE_BRANCH_HV)
438 
439 /*
440  * perf_sched_events : >0 events exist
441  */
442 
443 static void perf_sched_delayed(struct work_struct *work);
444 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
445 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
446 static DEFINE_MUTEX(perf_sched_mutex);
447 static atomic_t perf_sched_count;
448 
449 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
450 
451 static atomic_t nr_mmap_events __read_mostly;
452 static atomic_t nr_comm_events __read_mostly;
453 static atomic_t nr_namespaces_events __read_mostly;
454 static atomic_t nr_task_events __read_mostly;
455 static atomic_t nr_freq_events __read_mostly;
456 static atomic_t nr_switch_events __read_mostly;
457 static atomic_t nr_ksymbol_events __read_mostly;
458 static atomic_t nr_bpf_events __read_mostly;
459 static atomic_t nr_cgroup_events __read_mostly;
460 static atomic_t nr_text_poke_events __read_mostly;
461 static atomic_t nr_build_id_events __read_mostly;
462 
463 static LIST_HEAD(pmus);
464 static DEFINE_MUTEX(pmus_lock);
465 static struct srcu_struct pmus_srcu;
466 static cpumask_var_t perf_online_mask;
467 static cpumask_var_t perf_online_core_mask;
468 static cpumask_var_t perf_online_die_mask;
469 static cpumask_var_t perf_online_cluster_mask;
470 static cpumask_var_t perf_online_pkg_mask;
471 static cpumask_var_t perf_online_sys_mask;
472 static struct kmem_cache *perf_event_cache;
473 
474 #ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
475 static DEFINE_PER_CPU(bool, guest_ctx_loaded);
476 
is_guest_mediated_pmu_loaded(void)477 static __always_inline bool is_guest_mediated_pmu_loaded(void)
478 {
479 	return __this_cpu_read(guest_ctx_loaded);
480 }
481 #else
is_guest_mediated_pmu_loaded(void)482 static __always_inline bool is_guest_mediated_pmu_loaded(void)
483 {
484 	return false;
485 }
486 #endif
487 
488 /*
489  * perf event paranoia level:
490  *  -1 - not paranoid at all
491  *   0 - disallow raw tracepoint access for unpriv
492  *   1 - disallow cpu events for unpriv
493  *   2 - disallow kernel profiling for unpriv
494  */
495 int sysctl_perf_event_paranoid __read_mostly = 2;
496 
497 /* Minimum for 512 kiB + 1 user control page. 'free' kiB per user. */
498 static int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
499 
500 /*
501  * max perf event sample rate
502  */
503 #define DEFAULT_MAX_SAMPLE_RATE		100000
504 #define DEFAULT_SAMPLE_PERIOD_NS	(NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
505 #define DEFAULT_CPU_TIME_MAX_PERCENT	25
506 
507 int sysctl_perf_event_sample_rate __read_mostly	= DEFAULT_MAX_SAMPLE_RATE;
508 static int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
509 
510 static int max_samples_per_tick __read_mostly	= DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
511 static int perf_sample_period_ns __read_mostly	= DEFAULT_SAMPLE_PERIOD_NS;
512 
513 static int perf_sample_allowed_ns __read_mostly =
514 	DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
515 
update_perf_cpu_limits(void)516 static void update_perf_cpu_limits(void)
517 {
518 	u64 tmp = perf_sample_period_ns;
519 
520 	tmp *= sysctl_perf_cpu_time_max_percent;
521 	tmp = div_u64(tmp, 100);
522 	if (!tmp)
523 		tmp = 1;
524 
525 	WRITE_ONCE(perf_sample_allowed_ns, tmp);
526 }
527 
528 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
529 
perf_event_max_sample_rate_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)530 static int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write,
531 				       void *buffer, size_t *lenp, loff_t *ppos)
532 {
533 	int ret;
534 	int perf_cpu = sysctl_perf_cpu_time_max_percent;
535 	/*
536 	 * If throttling is disabled don't allow the write:
537 	 */
538 	if (write && (perf_cpu == 100 || perf_cpu == 0))
539 		return -EINVAL;
540 
541 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
542 	if (ret || !write)
543 		return ret;
544 
545 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
546 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
547 	update_perf_cpu_limits();
548 
549 	return 0;
550 }
551 
perf_cpu_time_max_percent_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)552 static int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write,
553 		void *buffer, size_t *lenp, loff_t *ppos)
554 {
555 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
556 
557 	if (ret || !write)
558 		return ret;
559 
560 	if (sysctl_perf_cpu_time_max_percent == 100 ||
561 	    sysctl_perf_cpu_time_max_percent == 0) {
562 		printk(KERN_WARNING
563 		       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
564 		WRITE_ONCE(perf_sample_allowed_ns, 0);
565 	} else {
566 		update_perf_cpu_limits();
567 	}
568 
569 	return 0;
570 }
571 
572 static const struct ctl_table events_core_sysctl_table[] = {
573 	/*
574 	 * User-space relies on this file as a feature check for
575 	 * perf_events being enabled. It's an ABI, do not remove!
576 	 */
577 	{
578 		.procname	= "perf_event_paranoid",
579 		.data		= &sysctl_perf_event_paranoid,
580 		.maxlen		= sizeof(sysctl_perf_event_paranoid),
581 		.mode		= 0644,
582 		.proc_handler	= proc_dointvec,
583 	},
584 	{
585 		.procname	= "perf_event_mlock_kb",
586 		.data		= &sysctl_perf_event_mlock,
587 		.maxlen		= sizeof(sysctl_perf_event_mlock),
588 		.mode		= 0644,
589 		.proc_handler	= proc_dointvec,
590 	},
591 	{
592 		.procname	= "perf_event_max_sample_rate",
593 		.data		= &sysctl_perf_event_sample_rate,
594 		.maxlen		= sizeof(sysctl_perf_event_sample_rate),
595 		.mode		= 0644,
596 		.proc_handler	= perf_event_max_sample_rate_handler,
597 		.extra1		= SYSCTL_ONE,
598 	},
599 	{
600 		.procname	= "perf_cpu_time_max_percent",
601 		.data		= &sysctl_perf_cpu_time_max_percent,
602 		.maxlen		= sizeof(sysctl_perf_cpu_time_max_percent),
603 		.mode		= 0644,
604 		.proc_handler	= perf_cpu_time_max_percent_handler,
605 		.extra1		= SYSCTL_ZERO,
606 		.extra2		= SYSCTL_ONE_HUNDRED,
607 	},
608 };
609 
init_events_core_sysctls(void)610 static int __init init_events_core_sysctls(void)
611 {
612 	register_sysctl_init("kernel", events_core_sysctl_table);
613 	return 0;
614 }
615 core_initcall(init_events_core_sysctls);
616 
617 
618 /*
619  * perf samples are done in some very critical code paths (NMIs).
620  * If they take too much CPU time, the system can lock up and not
621  * get any real work done.  This will drop the sample rate when
622  * we detect that events are taking too long.
623  */
624 #define NR_ACCUMULATED_SAMPLES 128
625 static DEFINE_PER_CPU(u64, running_sample_length);
626 
627 static u64 __report_avg;
628 static u64 __report_allowed;
629 
perf_duration_warn(struct irq_work * w)630 static void perf_duration_warn(struct irq_work *w)
631 {
632 	printk_ratelimited(KERN_INFO
633 		"perf: interrupt took too long (%lld > %lld), lowering "
634 		"kernel.perf_event_max_sample_rate to %d\n",
635 		__report_avg, __report_allowed,
636 		sysctl_perf_event_sample_rate);
637 }
638 
639 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
640 
perf_sample_event_took(u64 sample_len_ns)641 void perf_sample_event_took(u64 sample_len_ns)
642 {
643 	u64 max_len = READ_ONCE(perf_sample_allowed_ns);
644 	u64 running_len;
645 	u64 avg_len;
646 	u32 max;
647 
648 	if (max_len == 0)
649 		return;
650 
651 	/* Decay the counter by 1 average sample. */
652 	running_len = __this_cpu_read(running_sample_length);
653 	running_len -= running_len/NR_ACCUMULATED_SAMPLES;
654 	running_len += sample_len_ns;
655 	__this_cpu_write(running_sample_length, running_len);
656 
657 	/*
658 	 * Note: this will be biased artificially low until we have
659 	 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
660 	 * from having to maintain a count.
661 	 */
662 	avg_len = running_len/NR_ACCUMULATED_SAMPLES;
663 	if (avg_len <= max_len)
664 		return;
665 
666 	__report_avg = avg_len;
667 	__report_allowed = max_len;
668 
669 	/*
670 	 * Compute a throttle threshold 25% below the current duration.
671 	 */
672 	avg_len += avg_len / 4;
673 	max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
674 	if (avg_len < max)
675 		max /= (u32)avg_len;
676 	else
677 		max = 1;
678 
679 	WRITE_ONCE(perf_sample_allowed_ns, avg_len);
680 	WRITE_ONCE(max_samples_per_tick, max);
681 
682 	sysctl_perf_event_sample_rate = max * HZ;
683 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
684 
685 	if (!irq_work_queue(&perf_duration_work)) {
686 		early_printk("perf: interrupt took too long (%lld > %lld), lowering "
687 			     "kernel.perf_event_max_sample_rate to %d\n",
688 			     __report_avg, __report_allowed,
689 			     sysctl_perf_event_sample_rate);
690 	}
691 }
692 
693 static atomic64_t perf_event_id;
694 
695 static void update_context_time(struct perf_event_context *ctx);
696 static u64 perf_event_time(struct perf_event *event);
697 
perf_event_print_debug(void)698 void __weak perf_event_print_debug(void)	{ }
699 
perf_clock(void)700 static inline u64 perf_clock(void)
701 {
702 	return local_clock();
703 }
704 
perf_event_clock(struct perf_event * event)705 static inline u64 perf_event_clock(struct perf_event *event)
706 {
707 	return event->clock();
708 }
709 
710 /*
711  * State based event timekeeping...
712  *
713  * The basic idea is to use event->state to determine which (if any) time
714  * fields to increment with the current delta. This means we only need to
715  * update timestamps when we change state or when they are explicitly requested
716  * (read).
717  *
718  * Event groups make things a little more complicated, but not terribly so. The
719  * rules for a group are that if the group leader is OFF the entire group is
720  * OFF, irrespective of what the group member states are. This results in
721  * __perf_effective_state().
722  *
723  * A further ramification is that when a group leader flips between OFF and
724  * !OFF, we need to update all group member times.
725  *
726  *
727  * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
728  * need to make sure the relevant context time is updated before we try and
729  * update our timestamps.
730  */
731 
732 static __always_inline enum perf_event_state
__perf_effective_state(struct perf_event * event)733 __perf_effective_state(struct perf_event *event)
734 {
735 	struct perf_event *leader = event->group_leader;
736 
737 	if (leader->state <= PERF_EVENT_STATE_OFF)
738 		return leader->state;
739 
740 	return event->state;
741 }
742 
743 static __always_inline void
__perf_update_times(struct perf_event * event,u64 now,u64 * enabled,u64 * running)744 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
745 {
746 	enum perf_event_state state = __perf_effective_state(event);
747 	u64 delta = now - event->tstamp;
748 
749 	*enabled = event->total_time_enabled;
750 	if (state >= PERF_EVENT_STATE_INACTIVE)
751 		*enabled += delta;
752 
753 	*running = event->total_time_running;
754 	if (state >= PERF_EVENT_STATE_ACTIVE)
755 		*running += delta;
756 }
757 
perf_event_update_time(struct perf_event * event)758 static void perf_event_update_time(struct perf_event *event)
759 {
760 	u64 now = perf_event_time(event);
761 
762 	__perf_update_times(event, now, &event->total_time_enabled,
763 					&event->total_time_running);
764 	event->tstamp = now;
765 }
766 
perf_event_update_sibling_time(struct perf_event * leader)767 static void perf_event_update_sibling_time(struct perf_event *leader)
768 {
769 	struct perf_event *sibling;
770 
771 	for_each_sibling_event(sibling, leader)
772 		perf_event_update_time(sibling);
773 }
774 
775 static void
perf_event_set_state(struct perf_event * event,enum perf_event_state state)776 perf_event_set_state(struct perf_event *event, enum perf_event_state state)
777 {
778 	if (event->state == state)
779 		return;
780 
781 	perf_event_update_time(event);
782 	/*
783 	 * If a group leader gets enabled/disabled all its siblings
784 	 * are affected too.
785 	 */
786 	if ((event->state < 0) ^ (state < 0))
787 		perf_event_update_sibling_time(event);
788 
789 	WRITE_ONCE(event->state, state);
790 }
791 
792 /*
793  * UP store-release, load-acquire
794  */
795 
796 #define __store_release(ptr, val)					\
797 do {									\
798 	barrier();							\
799 	WRITE_ONCE(*(ptr), (val));					\
800 } while (0)
801 
802 #define __load_acquire(ptr)						\
803 ({									\
804 	__unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr));	\
805 	barrier();							\
806 	___p;								\
807 })
808 
perf_skip_pmu_ctx(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)809 static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx,
810 			      enum event_type_t event_type)
811 {
812 	if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups)
813 		return true;
814 	if ((event_type & EVENT_GUEST) &&
815 	    !(pmu_ctx->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU))
816 		return true;
817 	return false;
818 }
819 
820 #define for_each_epc(_epc, _ctx, _pmu, _event_type)			\
821 	list_for_each_entry(_epc, &((_ctx)->pmu_ctx_list), pmu_ctx_entry) \
822 		if (perf_skip_pmu_ctx(_epc, _event_type))		\
823 			continue;					\
824 		else if (_pmu && _epc->pmu != _pmu)			\
825 			continue;					\
826 		else
827 
perf_ctx_disable(struct perf_event_context * ctx,enum event_type_t event_type)828 static void perf_ctx_disable(struct perf_event_context *ctx,
829 			     enum event_type_t event_type)
830 {
831 	struct perf_event_pmu_context *pmu_ctx;
832 
833 	for_each_epc(pmu_ctx, ctx, NULL, event_type)
834 		perf_pmu_disable(pmu_ctx->pmu);
835 }
836 
perf_ctx_enable(struct perf_event_context * ctx,enum event_type_t event_type)837 static void perf_ctx_enable(struct perf_event_context *ctx,
838 			    enum event_type_t event_type)
839 {
840 	struct perf_event_pmu_context *pmu_ctx;
841 
842 	for_each_epc(pmu_ctx, ctx, NULL, event_type)
843 		perf_pmu_enable(pmu_ctx->pmu);
844 }
845 
846 static void ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
847 static void ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
848 
update_perf_time_ctx(struct perf_time_ctx * time,u64 now,bool adv)849 static inline void update_perf_time_ctx(struct perf_time_ctx *time, u64 now, bool adv)
850 {
851 	if (adv)
852 		time->time += now - time->stamp;
853 	time->stamp = now;
854 
855 	/*
856 	 * The above: time' = time + (now - timestamp), can be re-arranged
857 	 * into: time` = now + (time - timestamp), which gives a single value
858 	 * offset to compute future time without locks on.
859 	 *
860 	 * See perf_event_time_now(), which can be used from NMI context where
861 	 * it's (obviously) not possible to acquire ctx->lock in order to read
862 	 * both the above values in a consistent manner.
863 	 */
864 	WRITE_ONCE(time->offset, time->time - time->stamp);
865 }
866 
867 static_assert(offsetof(struct perf_event_context, timeguest) -
868 	      offsetof(struct perf_event_context, time) ==
869 	      sizeof(struct perf_time_ctx));
870 
871 #define T_TOTAL		0
872 #define T_GUEST		1
873 
__perf_event_time_ctx(struct perf_event * event,struct perf_time_ctx * times)874 static inline u64 __perf_event_time_ctx(struct perf_event *event,
875 					struct perf_time_ctx *times)
876 {
877 	u64 time = times[T_TOTAL].time;
878 
879 	if (event->attr.exclude_guest)
880 		time -= times[T_GUEST].time;
881 
882 	return time;
883 }
884 
__perf_event_time_ctx_now(struct perf_event * event,struct perf_time_ctx * times,u64 now)885 static inline u64 __perf_event_time_ctx_now(struct perf_event *event,
886 					    struct perf_time_ctx *times,
887 					    u64 now)
888 {
889 	if (is_guest_mediated_pmu_loaded() && event->attr.exclude_guest) {
890 		/*
891 		 * (now + times[total].offset) - (now + times[guest].offset) :=
892 		 * times[total].offset - times[guest].offset
893 		 */
894 		return READ_ONCE(times[T_TOTAL].offset) - READ_ONCE(times[T_GUEST].offset);
895 	}
896 
897 	return now + READ_ONCE(times[T_TOTAL].offset);
898 }
899 
900 #ifdef CONFIG_CGROUP_PERF
901 
902 static inline bool
perf_cgroup_match(struct perf_event * event)903 perf_cgroup_match(struct perf_event *event)
904 {
905 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
906 
907 	/* @event doesn't care about cgroup */
908 	if (!event->cgrp)
909 		return true;
910 
911 	/* wants specific cgroup scope but @cpuctx isn't associated with any */
912 	if (!cpuctx->cgrp)
913 		return false;
914 
915 	/*
916 	 * Cgroup scoping is recursive.  An event enabled for a cgroup is
917 	 * also enabled for all its descendant cgroups.  If @cpuctx's
918 	 * cgroup is a descendant of @event's (the test covers identity
919 	 * case), it's a match.
920 	 */
921 	return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
922 				    event->cgrp->css.cgroup);
923 }
924 
perf_detach_cgroup(struct perf_event * event)925 static inline void perf_detach_cgroup(struct perf_event *event)
926 {
927 	css_put(&event->cgrp->css);
928 	event->cgrp = NULL;
929 }
930 
is_cgroup_event(struct perf_event * event)931 static inline int is_cgroup_event(struct perf_event *event)
932 {
933 	return event->cgrp != NULL;
934 }
935 
936 static_assert(offsetof(struct perf_cgroup_info, timeguest) -
937 	      offsetof(struct perf_cgroup_info, time) ==
938 	      sizeof(struct perf_time_ctx));
939 
perf_cgroup_event_time(struct perf_event * event)940 static inline u64 perf_cgroup_event_time(struct perf_event *event)
941 {
942 	struct perf_cgroup_info *t;
943 
944 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
945 	return __perf_event_time_ctx(event, &t->time);
946 }
947 
perf_cgroup_event_time_now(struct perf_event * event,u64 now)948 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
949 {
950 	struct perf_cgroup_info *t;
951 
952 	t = per_cpu_ptr(event->cgrp->info, event->cpu);
953 	if (!__load_acquire(&t->active))
954 		return __perf_event_time_ctx(event, &t->time);
955 
956 	return __perf_event_time_ctx_now(event, &t->time, now);
957 }
958 
__update_cgrp_guest_time(struct perf_cgroup_info * info,u64 now,bool adv)959 static inline void __update_cgrp_guest_time(struct perf_cgroup_info *info, u64 now, bool adv)
960 {
961 	update_perf_time_ctx(&info->timeguest, now, adv);
962 }
963 
update_cgrp_time(struct perf_cgroup_info * info,u64 now)964 static inline void update_cgrp_time(struct perf_cgroup_info *info, u64 now)
965 {
966 	update_perf_time_ctx(&info->time, now, true);
967 	if (is_guest_mediated_pmu_loaded())
968 		__update_cgrp_guest_time(info, now, true);
969 }
970 
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)971 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
972 {
973 	struct perf_cgroup *cgrp = cpuctx->cgrp;
974 	struct cgroup_subsys_state *css;
975 	struct perf_cgroup_info *info;
976 
977 	if (cgrp) {
978 		u64 now = perf_clock();
979 
980 		for (css = &cgrp->css; css; css = css->parent) {
981 			cgrp = container_of(css, struct perf_cgroup, css);
982 			info = this_cpu_ptr(cgrp->info);
983 
984 			update_cgrp_time(info, now);
985 			if (final)
986 				__store_release(&info->active, 0);
987 		}
988 	}
989 }
990 
update_cgrp_time_from_event(struct perf_event * event)991 static inline void update_cgrp_time_from_event(struct perf_event *event)
992 {
993 	struct perf_cgroup_info *info;
994 
995 	/*
996 	 * ensure we access cgroup data only when needed and
997 	 * when we know the cgroup is pinned (css_get)
998 	 */
999 	if (!is_cgroup_event(event))
1000 		return;
1001 
1002 	info = this_cpu_ptr(event->cgrp->info);
1003 	/*
1004 	 * Do not update time when cgroup is not active
1005 	 */
1006 	if (info->active)
1007 		update_cgrp_time(info, perf_clock());
1008 }
1009 
1010 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx,bool guest)1011 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx, bool guest)
1012 {
1013 	struct perf_event_context *ctx = &cpuctx->ctx;
1014 	struct perf_cgroup *cgrp = cpuctx->cgrp;
1015 	struct perf_cgroup_info *info;
1016 	struct cgroup_subsys_state *css;
1017 
1018 	/*
1019 	 * ctx->lock held by caller
1020 	 * ensure we do not access cgroup data
1021 	 * unless we have the cgroup pinned (css_get)
1022 	 */
1023 	if (!cgrp)
1024 		return;
1025 
1026 	WARN_ON_ONCE(!ctx->nr_cgroups);
1027 
1028 	for (css = &cgrp->css; css; css = css->parent) {
1029 		cgrp = container_of(css, struct perf_cgroup, css);
1030 		info = this_cpu_ptr(cgrp->info);
1031 		if (guest) {
1032 			__update_cgrp_guest_time(info, ctx->time.stamp, false);
1033 		} else {
1034 			update_perf_time_ctx(&info->time, ctx->time.stamp, false);
1035 			__store_release(&info->active, 1);
1036 		}
1037 	}
1038 }
1039 
1040 /*
1041  * reschedule events based on the cgroup constraint of task.
1042  */
perf_cgroup_switch(struct task_struct * task)1043 static void perf_cgroup_switch(struct task_struct *task)
1044 {
1045 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
1046 	struct perf_cgroup *cgrp;
1047 
1048 	/*
1049 	 * cpuctx->cgrp is set when the first cgroup event enabled,
1050 	 * and is cleared when the last cgroup event disabled.
1051 	 */
1052 	if (READ_ONCE(cpuctx->cgrp) == NULL)
1053 		return;
1054 
1055 	cgrp = perf_cgroup_from_task(task, NULL);
1056 	if (READ_ONCE(cpuctx->cgrp) == cgrp)
1057 		return;
1058 
1059 	guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
1060 	/*
1061 	 * Re-check, could've raced vs perf_remove_from_context().
1062 	 */
1063 	if (READ_ONCE(cpuctx->cgrp) == NULL)
1064 		return;
1065 
1066 	WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
1067 	perf_ctx_disable(&cpuctx->ctx, EVENT_CGROUP);
1068 
1069 	ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
1070 	/*
1071 	 * must not be done before ctxswout due
1072 	 * to update_cgrp_time_from_cpuctx() in
1073 	 * ctx_sched_out()
1074 	 */
1075 	cpuctx->cgrp = cgrp;
1076 	/*
1077 	 * set cgrp before ctxsw in to allow
1078 	 * perf_cgroup_set_timestamp() in ctx_sched_in()
1079 	 * to not have to pass task around
1080 	 */
1081 	ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP);
1082 
1083 	perf_ctx_enable(&cpuctx->ctx, EVENT_CGROUP);
1084 }
1085 
perf_cgroup_ensure_storage(struct perf_event * event,struct cgroup_subsys_state * css)1086 static int perf_cgroup_ensure_storage(struct perf_event *event,
1087 				struct cgroup_subsys_state *css)
1088 {
1089 	struct perf_cpu_context *cpuctx;
1090 	struct perf_event **storage;
1091 	int cpu, heap_size, ret = 0;
1092 
1093 	/*
1094 	 * Allow storage to have sufficient space for an iterator for each
1095 	 * possibly nested cgroup plus an iterator for events with no cgroup.
1096 	 */
1097 	for (heap_size = 1; css; css = css->parent)
1098 		heap_size++;
1099 
1100 	for_each_possible_cpu(cpu) {
1101 		cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
1102 		if (heap_size <= cpuctx->heap_size)
1103 			continue;
1104 
1105 		storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
1106 				       GFP_KERNEL, cpu_to_node(cpu));
1107 		if (!storage) {
1108 			ret = -ENOMEM;
1109 			break;
1110 		}
1111 
1112 		raw_spin_lock_irq(&cpuctx->ctx.lock);
1113 		if (cpuctx->heap_size < heap_size) {
1114 			swap(cpuctx->heap, storage);
1115 			if (storage == cpuctx->heap_default)
1116 				storage = NULL;
1117 			cpuctx->heap_size = heap_size;
1118 		}
1119 		raw_spin_unlock_irq(&cpuctx->ctx.lock);
1120 
1121 		kfree(storage);
1122 	}
1123 
1124 	return ret;
1125 }
1126 
perf_cgroup_connect(int fd,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1127 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
1128 				      struct perf_event_attr *attr,
1129 				      struct perf_event *group_leader)
1130 {
1131 	struct perf_cgroup *cgrp;
1132 	struct cgroup_subsys_state *css;
1133 	CLASS(fd, f)(fd);
1134 	int ret = 0;
1135 
1136 	if (fd_empty(f))
1137 		return -EBADF;
1138 
1139 	css = css_tryget_online_from_dir(fd_file(f)->f_path.dentry,
1140 					 &perf_event_cgrp_subsys);
1141 	if (IS_ERR(css))
1142 		return PTR_ERR(css);
1143 
1144 	ret = perf_cgroup_ensure_storage(event, css);
1145 	if (ret)
1146 		return ret;
1147 
1148 	cgrp = container_of(css, struct perf_cgroup, css);
1149 	event->cgrp = cgrp;
1150 
1151 	/*
1152 	 * all events in a group must monitor
1153 	 * the same cgroup because a task belongs
1154 	 * to only one perf cgroup at a time
1155 	 */
1156 	if (group_leader && group_leader->cgrp != cgrp) {
1157 		perf_detach_cgroup(event);
1158 		ret = -EINVAL;
1159 	}
1160 	return ret;
1161 }
1162 
1163 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1164 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1165 {
1166 	struct perf_cpu_context *cpuctx;
1167 
1168 	if (!is_cgroup_event(event))
1169 		return;
1170 
1171 	event->pmu_ctx->nr_cgroups++;
1172 
1173 	/*
1174 	 * Because cgroup events are always per-cpu events,
1175 	 * @ctx == &cpuctx->ctx.
1176 	 */
1177 	cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1178 
1179 	if (ctx->nr_cgroups++)
1180 		return;
1181 
1182 	cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
1183 }
1184 
1185 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1186 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1187 {
1188 	struct perf_cpu_context *cpuctx;
1189 
1190 	if (!is_cgroup_event(event))
1191 		return;
1192 
1193 	event->pmu_ctx->nr_cgroups--;
1194 
1195 	/*
1196 	 * Because cgroup events are always per-cpu events,
1197 	 * @ctx == &cpuctx->ctx.
1198 	 */
1199 	cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1200 
1201 	if (--ctx->nr_cgroups)
1202 		return;
1203 
1204 	cpuctx->cgrp = NULL;
1205 }
1206 
1207 #else /* !CONFIG_CGROUP_PERF */
1208 
1209 static inline bool
perf_cgroup_match(struct perf_event * event)1210 perf_cgroup_match(struct perf_event *event)
1211 {
1212 	return true;
1213 }
1214 
perf_detach_cgroup(struct perf_event * event)1215 static inline void perf_detach_cgroup(struct perf_event *event)
1216 {}
1217 
is_cgroup_event(struct perf_event * event)1218 static inline int is_cgroup_event(struct perf_event *event)
1219 {
1220 	return 0;
1221 }
1222 
update_cgrp_time_from_event(struct perf_event * event)1223 static inline void update_cgrp_time_from_event(struct perf_event *event)
1224 {
1225 }
1226 
update_cgrp_time_from_cpuctx(struct perf_cpu_context * cpuctx,bool final)1227 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
1228 						bool final)
1229 {
1230 }
1231 
perf_cgroup_connect(pid_t pid,struct perf_event * event,struct perf_event_attr * attr,struct perf_event * group_leader)1232 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1233 				      struct perf_event_attr *attr,
1234 				      struct perf_event *group_leader)
1235 {
1236 	return -EINVAL;
1237 }
1238 
1239 static inline void
perf_cgroup_set_timestamp(struct perf_cpu_context * cpuctx,bool guest)1240 perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx, bool guest)
1241 {
1242 }
1243 
perf_cgroup_event_time(struct perf_event * event)1244 static inline u64 perf_cgroup_event_time(struct perf_event *event)
1245 {
1246 	return 0;
1247 }
1248 
perf_cgroup_event_time_now(struct perf_event * event,u64 now)1249 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
1250 {
1251 	return 0;
1252 }
1253 
1254 static inline void
perf_cgroup_event_enable(struct perf_event * event,struct perf_event_context * ctx)1255 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
1256 {
1257 }
1258 
1259 static inline void
perf_cgroup_event_disable(struct perf_event * event,struct perf_event_context * ctx)1260 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1261 {
1262 }
1263 
perf_cgroup_switch(struct task_struct * task)1264 static void perf_cgroup_switch(struct task_struct *task)
1265 {
1266 }
1267 #endif
1268 
1269 /*
1270  * set default to be dependent on timer tick just
1271  * like original code
1272  */
1273 #define PERF_CPU_HRTIMER (1000 / HZ)
1274 /*
1275  * function must be called with interrupts disabled
1276  */
perf_mux_hrtimer_handler(struct hrtimer * hr)1277 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1278 {
1279 	struct perf_cpu_pmu_context *cpc;
1280 	bool rotations;
1281 
1282 	lockdep_assert_irqs_disabled();
1283 
1284 	cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
1285 	rotations = perf_rotate_context(cpc);
1286 
1287 	raw_spin_lock(&cpc->hrtimer_lock);
1288 	if (rotations)
1289 		hrtimer_forward_now(hr, cpc->hrtimer_interval);
1290 	else
1291 		cpc->hrtimer_active = 0;
1292 	raw_spin_unlock(&cpc->hrtimer_lock);
1293 
1294 	return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1295 }
1296 
__perf_mux_hrtimer_init(struct perf_cpu_pmu_context * cpc,int cpu)1297 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
1298 {
1299 	struct hrtimer *timer = &cpc->hrtimer;
1300 	struct pmu *pmu = cpc->epc.pmu;
1301 	u64 interval;
1302 
1303 	/*
1304 	 * check default is sane, if not set then force to
1305 	 * default interval (1/tick)
1306 	 */
1307 	interval = pmu->hrtimer_interval_ms;
1308 	if (interval < 1)
1309 		interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1310 
1311 	cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1312 
1313 	raw_spin_lock_init(&cpc->hrtimer_lock);
1314 	hrtimer_setup(timer, perf_mux_hrtimer_handler, CLOCK_MONOTONIC,
1315 		      HRTIMER_MODE_ABS_PINNED_HARD);
1316 }
1317 
perf_mux_hrtimer_restart(struct perf_cpu_pmu_context * cpc)1318 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
1319 {
1320 	struct hrtimer *timer = &cpc->hrtimer;
1321 	unsigned long flags;
1322 
1323 	raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
1324 	if (!cpc->hrtimer_active) {
1325 		cpc->hrtimer_active = 1;
1326 		hrtimer_forward_now(timer, cpc->hrtimer_interval);
1327 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
1328 	}
1329 	raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
1330 
1331 	return 0;
1332 }
1333 
perf_mux_hrtimer_restart_ipi(void * arg)1334 static int perf_mux_hrtimer_restart_ipi(void *arg)
1335 {
1336 	return perf_mux_hrtimer_restart(arg);
1337 }
1338 
this_cpc(struct pmu * pmu)1339 static __always_inline struct perf_cpu_pmu_context *this_cpc(struct pmu *pmu)
1340 {
1341 	return *this_cpu_ptr(pmu->cpu_pmu_context);
1342 }
1343 
perf_pmu_disable(struct pmu * pmu)1344 void perf_pmu_disable(struct pmu *pmu)
1345 {
1346 	int *count = &this_cpc(pmu)->pmu_disable_count;
1347 	if (!(*count)++)
1348 		pmu->pmu_disable(pmu);
1349 }
1350 
perf_pmu_enable(struct pmu * pmu)1351 void perf_pmu_enable(struct pmu *pmu)
1352 {
1353 	int *count = &this_cpc(pmu)->pmu_disable_count;
1354 	if (!--(*count))
1355 		pmu->pmu_enable(pmu);
1356 }
1357 
perf_assert_pmu_disabled(struct pmu * pmu)1358 static void perf_assert_pmu_disabled(struct pmu *pmu)
1359 {
1360 	int *count = &this_cpc(pmu)->pmu_disable_count;
1361 	WARN_ON_ONCE(*count == 0);
1362 }
1363 
perf_pmu_read(struct perf_event * event)1364 static inline void perf_pmu_read(struct perf_event *event)
1365 {
1366 	if (event->state == PERF_EVENT_STATE_ACTIVE)
1367 		event->pmu->read(event);
1368 }
1369 
get_ctx(struct perf_event_context * ctx)1370 static void get_ctx(struct perf_event_context *ctx)
1371 {
1372 	refcount_inc(&ctx->refcount);
1373 }
1374 
free_ctx(struct rcu_head * head)1375 static void free_ctx(struct rcu_head *head)
1376 {
1377 	struct perf_event_context *ctx;
1378 
1379 	ctx = container_of(head, struct perf_event_context, rcu_head);
1380 	kfree(ctx);
1381 }
1382 
put_ctx(struct perf_event_context * ctx)1383 static void put_ctx(struct perf_event_context *ctx)
1384 {
1385 	if (refcount_dec_and_test(&ctx->refcount)) {
1386 		if (ctx->parent_ctx)
1387 			put_ctx(ctx->parent_ctx);
1388 		if (ctx->task && ctx->task != TASK_TOMBSTONE)
1389 			put_task_struct(ctx->task);
1390 		call_rcu(&ctx->rcu_head, free_ctx);
1391 	} else {
1392 		smp_mb__after_atomic(); /* pairs with wait_var_event() */
1393 		if (ctx->task == TASK_TOMBSTONE)
1394 			wake_up_var(&ctx->refcount);
1395 	}
1396 }
1397 
1398 /*
1399  * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1400  * perf_pmu_migrate_context() we need some magic.
1401  *
1402  * Those places that change perf_event::ctx will hold both
1403  * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1404  *
1405  * Lock ordering is by mutex address. There are two other sites where
1406  * perf_event_context::mutex nests and those are:
1407  *
1408  *  - perf_event_exit_task_context()	[ child , 0 ]
1409  *      perf_event_exit_event()
1410  *        put_event()			[ parent, 1 ]
1411  *
1412  *  - perf_event_init_context()		[ parent, 0 ]
1413  *      inherit_task_group()
1414  *        inherit_group()
1415  *          inherit_event()
1416  *            perf_event_alloc()
1417  *              perf_init_event()
1418  *                perf_try_init_event()	[ child , 1 ]
1419  *
1420  * While it appears there is an obvious deadlock here -- the parent and child
1421  * nesting levels are inverted between the two. This is in fact safe because
1422  * life-time rules separate them. That is an exiting task cannot fork, and a
1423  * spawning task cannot (yet) exit.
1424  *
1425  * But remember that these are parent<->child context relations, and
1426  * migration does not affect children, therefore these two orderings should not
1427  * interact.
1428  *
1429  * The change in perf_event::ctx does not affect children (as claimed above)
1430  * because the sys_perf_event_open() case will install a new event and break
1431  * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1432  * concerned with cpuctx and that doesn't have children.
1433  *
1434  * The places that change perf_event::ctx will issue:
1435  *
1436  *   perf_remove_from_context();
1437  *   synchronize_rcu();
1438  *   perf_install_in_context();
1439  *
1440  * to affect the change. The remove_from_context() + synchronize_rcu() should
1441  * quiesce the event, after which we can install it in the new location. This
1442  * means that only external vectors (perf_fops, prctl) can perturb the event
1443  * while in transit. Therefore all such accessors should also acquire
1444  * perf_event_context::mutex to serialize against this.
1445  *
1446  * However; because event->ctx can change while we're waiting to acquire
1447  * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1448  * function.
1449  *
1450  * Lock order:
1451  *    exec_update_lock
1452  *	task_struct::perf_event_mutex
1453  *	  perf_event_context::mutex
1454  *	    perf_event::child_mutex;
1455  *	      perf_event_context::lock
1456  *	    mmap_lock
1457  *	      perf_event::mmap_mutex
1458  *	        perf_buffer::aux_mutex
1459  *	      perf_addr_filters_head::lock
1460  *
1461  *    cpu_hotplug_lock
1462  *      pmus_lock
1463  *	  cpuctx->mutex / perf_event_context::mutex
1464  */
1465 static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event * event,int nesting)1466 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1467 {
1468 	struct perf_event_context *ctx;
1469 
1470 again:
1471 	rcu_read_lock();
1472 	ctx = READ_ONCE(event->ctx);
1473 	if (!refcount_inc_not_zero(&ctx->refcount)) {
1474 		rcu_read_unlock();
1475 		goto again;
1476 	}
1477 	rcu_read_unlock();
1478 
1479 	mutex_lock_nested(&ctx->mutex, nesting);
1480 	if (event->ctx != ctx) {
1481 		mutex_unlock(&ctx->mutex);
1482 		put_ctx(ctx);
1483 		goto again;
1484 	}
1485 
1486 	return ctx;
1487 }
1488 
1489 static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event * event)1490 perf_event_ctx_lock(struct perf_event *event)
1491 {
1492 	return perf_event_ctx_lock_nested(event, 0);
1493 }
1494 
perf_event_ctx_unlock(struct perf_event * event,struct perf_event_context * ctx)1495 static void perf_event_ctx_unlock(struct perf_event *event,
1496 				  struct perf_event_context *ctx)
1497 {
1498 	mutex_unlock(&ctx->mutex);
1499 	put_ctx(ctx);
1500 }
1501 
1502 /*
1503  * This must be done under the ctx->lock, such as to serialize against
1504  * context_equiv(), therefore we cannot call put_ctx() since that might end up
1505  * calling scheduler related locks and ctx->lock nests inside those.
1506  */
1507 static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context * ctx)1508 unclone_ctx(struct perf_event_context *ctx)
1509 {
1510 	struct perf_event_context *parent_ctx = ctx->parent_ctx;
1511 
1512 	lockdep_assert_held(&ctx->lock);
1513 
1514 	if (parent_ctx)
1515 		ctx->parent_ctx = NULL;
1516 	ctx->generation++;
1517 
1518 	return parent_ctx;
1519 }
1520 
perf_event_pid_type(struct perf_event * event,struct task_struct * p,enum pid_type type)1521 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1522 				enum pid_type type)
1523 {
1524 	u32 nr;
1525 	/*
1526 	 * only top level events have the pid namespace they were created in
1527 	 */
1528 	if (event->parent)
1529 		event = event->parent;
1530 
1531 	nr = __task_pid_nr_ns(p, type, event->ns);
1532 	/* avoid -1 if it is idle thread or runs in another ns */
1533 	if (!nr && !pid_alive(p))
1534 		nr = -1;
1535 	return nr;
1536 }
1537 
perf_event_pid(struct perf_event * event,struct task_struct * p)1538 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1539 {
1540 	return perf_event_pid_type(event, p, PIDTYPE_TGID);
1541 }
1542 
perf_event_tid(struct perf_event * event,struct task_struct * p)1543 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1544 {
1545 	return perf_event_pid_type(event, p, PIDTYPE_PID);
1546 }
1547 
1548 /*
1549  * If we inherit events we want to return the parent event id
1550  * to userspace.
1551  */
primary_event_id(struct perf_event * event)1552 static u64 primary_event_id(struct perf_event *event)
1553 {
1554 	u64 id = event->id;
1555 
1556 	if (event->parent)
1557 		id = event->parent->id;
1558 
1559 	return id;
1560 }
1561 
1562 /*
1563  * Get the perf_event_context for a task and lock it.
1564  *
1565  * This has to cope with the fact that until it is locked,
1566  * the context could get moved to another task.
1567  */
1568 static struct perf_event_context *
perf_lock_task_context(struct task_struct * task,unsigned long * flags)1569 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
1570 {
1571 	struct perf_event_context *ctx;
1572 
1573 retry:
1574 	/*
1575 	 * One of the few rules of preemptible RCU is that one cannot do
1576 	 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1577 	 * part of the read side critical section was irqs-enabled -- see
1578 	 * rcu_read_unlock_special().
1579 	 *
1580 	 * Since ctx->lock nests under rq->lock we must ensure the entire read
1581 	 * side critical section has interrupts disabled.
1582 	 */
1583 	local_irq_save(*flags);
1584 	rcu_read_lock();
1585 	ctx = rcu_dereference(task->perf_event_ctxp);
1586 	if (ctx) {
1587 		/*
1588 		 * If this context is a clone of another, it might
1589 		 * get swapped for another underneath us by
1590 		 * perf_event_task_sched_out, though the
1591 		 * rcu_read_lock() protects us from any context
1592 		 * getting freed.  Lock the context and check if it
1593 		 * got swapped before we could get the lock, and retry
1594 		 * if so.  If we locked the right context, then it
1595 		 * can't get swapped on us any more.
1596 		 */
1597 		raw_spin_lock(&ctx->lock);
1598 		if (ctx != rcu_dereference(task->perf_event_ctxp)) {
1599 			raw_spin_unlock(&ctx->lock);
1600 			rcu_read_unlock();
1601 			local_irq_restore(*flags);
1602 			goto retry;
1603 		}
1604 
1605 		if (ctx->task == TASK_TOMBSTONE ||
1606 		    !refcount_inc_not_zero(&ctx->refcount)) {
1607 			raw_spin_unlock(&ctx->lock);
1608 			ctx = NULL;
1609 		} else {
1610 			WARN_ON_ONCE(ctx->task != task);
1611 		}
1612 	}
1613 	rcu_read_unlock();
1614 	if (!ctx)
1615 		local_irq_restore(*flags);
1616 	return ctx;
1617 }
1618 
1619 /*
1620  * Get the context for a task and increment its pin_count so it
1621  * can't get swapped to another task.  This also increments its
1622  * reference count so that the context can't get freed.
1623  */
1624 static struct perf_event_context *
perf_pin_task_context(struct task_struct * task)1625 perf_pin_task_context(struct task_struct *task)
1626 {
1627 	struct perf_event_context *ctx;
1628 	unsigned long flags;
1629 
1630 	ctx = perf_lock_task_context(task, &flags);
1631 	if (ctx) {
1632 		++ctx->pin_count;
1633 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
1634 	}
1635 	return ctx;
1636 }
1637 
perf_unpin_context(struct perf_event_context * ctx)1638 static void perf_unpin_context(struct perf_event_context *ctx)
1639 {
1640 	unsigned long flags;
1641 
1642 	raw_spin_lock_irqsave(&ctx->lock, flags);
1643 	--ctx->pin_count;
1644 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
1645 }
1646 
1647 /*
1648  * Update the record of the current time in a context.
1649  */
__update_context_time(struct perf_event_context * ctx,bool adv)1650 static void __update_context_time(struct perf_event_context *ctx, bool adv)
1651 {
1652 	lockdep_assert_held(&ctx->lock);
1653 
1654 	update_perf_time_ctx(&ctx->time, perf_clock(), adv);
1655 }
1656 
__update_context_guest_time(struct perf_event_context * ctx,bool adv)1657 static void __update_context_guest_time(struct perf_event_context *ctx, bool adv)
1658 {
1659 	lockdep_assert_held(&ctx->lock);
1660 
1661 	/* must be called after __update_context_time(); */
1662 	update_perf_time_ctx(&ctx->timeguest, ctx->time.stamp, adv);
1663 }
1664 
update_context_time(struct perf_event_context * ctx)1665 static void update_context_time(struct perf_event_context *ctx)
1666 {
1667 	__update_context_time(ctx, true);
1668 	if (is_guest_mediated_pmu_loaded())
1669 		__update_context_guest_time(ctx, true);
1670 }
1671 
perf_event_time(struct perf_event * event)1672 static u64 perf_event_time(struct perf_event *event)
1673 {
1674 	struct perf_event_context *ctx = event->ctx;
1675 
1676 	if (unlikely(!ctx))
1677 		return 0;
1678 
1679 	if (is_cgroup_event(event))
1680 		return perf_cgroup_event_time(event);
1681 
1682 	return __perf_event_time_ctx(event, &ctx->time);
1683 }
1684 
perf_event_time_now(struct perf_event * event,u64 now)1685 static u64 perf_event_time_now(struct perf_event *event, u64 now)
1686 {
1687 	struct perf_event_context *ctx = event->ctx;
1688 
1689 	if (unlikely(!ctx))
1690 		return 0;
1691 
1692 	if (is_cgroup_event(event))
1693 		return perf_cgroup_event_time_now(event, now);
1694 
1695 	if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
1696 		return __perf_event_time_ctx(event, &ctx->time);
1697 
1698 	return __perf_event_time_ctx_now(event, &ctx->time, now);
1699 }
1700 
get_event_type(struct perf_event * event)1701 static enum event_type_t get_event_type(struct perf_event *event)
1702 {
1703 	struct perf_event_context *ctx = event->ctx;
1704 	enum event_type_t event_type;
1705 
1706 	lockdep_assert_held(&ctx->lock);
1707 
1708 	/*
1709 	 * It's 'group type', really, because if our group leader is
1710 	 * pinned, so are we.
1711 	 */
1712 	if (event->group_leader != event)
1713 		event = event->group_leader;
1714 
1715 	event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1716 	if (!ctx->task)
1717 		event_type |= EVENT_CPU;
1718 
1719 	return event_type;
1720 }
1721 
1722 /*
1723  * Helper function to initialize event group nodes.
1724  */
init_event_group(struct perf_event * event)1725 static void init_event_group(struct perf_event *event)
1726 {
1727 	RB_CLEAR_NODE(&event->group_node);
1728 	event->group_index = 0;
1729 }
1730 
1731 /*
1732  * Extract pinned or flexible groups from the context
1733  * based on event attrs bits.
1734  */
1735 static struct perf_event_groups *
get_event_groups(struct perf_event * event,struct perf_event_context * ctx)1736 get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
1737 {
1738 	if (event->attr.pinned)
1739 		return &ctx->pinned_groups;
1740 	else
1741 		return &ctx->flexible_groups;
1742 }
1743 
1744 /*
1745  * Helper function to initializes perf_event_group trees.
1746  */
perf_event_groups_init(struct perf_event_groups * groups)1747 static void perf_event_groups_init(struct perf_event_groups *groups)
1748 {
1749 	groups->tree = RB_ROOT;
1750 	groups->index = 0;
1751 }
1752 
event_cgroup(const struct perf_event * event)1753 static inline struct cgroup *event_cgroup(const struct perf_event *event)
1754 {
1755 	struct cgroup *cgroup = NULL;
1756 
1757 #ifdef CONFIG_CGROUP_PERF
1758 	if (event->cgrp)
1759 		cgroup = event->cgrp->css.cgroup;
1760 #endif
1761 
1762 	return cgroup;
1763 }
1764 
1765 /*
1766  * Compare function for event groups;
1767  *
1768  * Implements complex key that first sorts by CPU and then by virtual index
1769  * which provides ordering when rotating groups for the same CPU.
1770  */
1771 static __always_inline int
perf_event_groups_cmp(const int left_cpu,const struct pmu * left_pmu,const struct cgroup * left_cgroup,const u64 left_group_index,const struct perf_event * right)1772 perf_event_groups_cmp(const int left_cpu, const struct pmu *left_pmu,
1773 		      const struct cgroup *left_cgroup, const u64 left_group_index,
1774 		      const struct perf_event *right)
1775 {
1776 	if (left_cpu < right->cpu)
1777 		return -1;
1778 	if (left_cpu > right->cpu)
1779 		return 1;
1780 
1781 	if (left_pmu) {
1782 		if (left_pmu < right->pmu_ctx->pmu)
1783 			return -1;
1784 		if (left_pmu > right->pmu_ctx->pmu)
1785 			return 1;
1786 	}
1787 
1788 #ifdef CONFIG_CGROUP_PERF
1789 	{
1790 		const struct cgroup *right_cgroup = event_cgroup(right);
1791 
1792 		if (left_cgroup != right_cgroup) {
1793 			if (!left_cgroup) {
1794 				/*
1795 				 * Left has no cgroup but right does, no
1796 				 * cgroups come first.
1797 				 */
1798 				return -1;
1799 			}
1800 			if (!right_cgroup) {
1801 				/*
1802 				 * Right has no cgroup but left does, no
1803 				 * cgroups come first.
1804 				 */
1805 				return 1;
1806 			}
1807 			/* Two dissimilar cgroups, order by id. */
1808 			if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
1809 				return -1;
1810 
1811 			return 1;
1812 		}
1813 	}
1814 #endif
1815 
1816 	if (left_group_index < right->group_index)
1817 		return -1;
1818 	if (left_group_index > right->group_index)
1819 		return 1;
1820 
1821 	return 0;
1822 }
1823 
1824 #define __node_2_pe(node) \
1825 	rb_entry((node), struct perf_event, group_node)
1826 
__group_less(struct rb_node * a,const struct rb_node * b)1827 static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
1828 {
1829 	struct perf_event *e = __node_2_pe(a);
1830 	return perf_event_groups_cmp(e->cpu, e->pmu_ctx->pmu, event_cgroup(e),
1831 				     e->group_index, __node_2_pe(b)) < 0;
1832 }
1833 
1834 struct __group_key {
1835 	int cpu;
1836 	struct pmu *pmu;
1837 	struct cgroup *cgroup;
1838 };
1839 
__group_cmp(const void * key,const struct rb_node * node)1840 static inline int __group_cmp(const void *key, const struct rb_node *node)
1841 {
1842 	const struct __group_key *a = key;
1843 	const struct perf_event *b = __node_2_pe(node);
1844 
1845 	/* partial/subtree match: @cpu, @pmu, @cgroup; ignore: @group_index */
1846 	return perf_event_groups_cmp(a->cpu, a->pmu, a->cgroup, b->group_index, b);
1847 }
1848 
1849 static inline int
__group_cmp_ignore_cgroup(const void * key,const struct rb_node * node)1850 __group_cmp_ignore_cgroup(const void *key, const struct rb_node *node)
1851 {
1852 	const struct __group_key *a = key;
1853 	const struct perf_event *b = __node_2_pe(node);
1854 
1855 	/* partial/subtree match: @cpu, @pmu, ignore: @cgroup, @group_index */
1856 	return perf_event_groups_cmp(a->cpu, a->pmu, event_cgroup(b),
1857 				     b->group_index, b);
1858 }
1859 
1860 /*
1861  * Insert @event into @groups' tree; using
1862  *   {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1863  * as key. This places it last inside the {cpu,pmu,cgroup} subtree.
1864  */
1865 static void
perf_event_groups_insert(struct perf_event_groups * groups,struct perf_event * event)1866 perf_event_groups_insert(struct perf_event_groups *groups,
1867 			 struct perf_event *event)
1868 {
1869 	event->group_index = ++groups->index;
1870 
1871 	rb_add(&event->group_node, &groups->tree, __group_less);
1872 }
1873 
1874 /*
1875  * Helper function to insert event into the pinned or flexible groups.
1876  */
1877 static void
add_event_to_groups(struct perf_event * event,struct perf_event_context * ctx)1878 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1879 {
1880 	struct perf_event_groups *groups;
1881 
1882 	groups = get_event_groups(event, ctx);
1883 	perf_event_groups_insert(groups, event);
1884 }
1885 
1886 /*
1887  * Delete a group from a tree.
1888  */
1889 static void
perf_event_groups_delete(struct perf_event_groups * groups,struct perf_event * event)1890 perf_event_groups_delete(struct perf_event_groups *groups,
1891 			 struct perf_event *event)
1892 {
1893 	WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1894 		     RB_EMPTY_ROOT(&groups->tree));
1895 
1896 	rb_erase(&event->group_node, &groups->tree);
1897 	init_event_group(event);
1898 }
1899 
1900 /*
1901  * Helper function to delete event from its groups.
1902  */
1903 static void
del_event_from_groups(struct perf_event * event,struct perf_event_context * ctx)1904 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1905 {
1906 	struct perf_event_groups *groups;
1907 
1908 	groups = get_event_groups(event, ctx);
1909 	perf_event_groups_delete(groups, event);
1910 }
1911 
1912 /*
1913  * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1914  */
1915 static struct perf_event *
perf_event_groups_first(struct perf_event_groups * groups,int cpu,struct pmu * pmu,struct cgroup * cgrp)1916 perf_event_groups_first(struct perf_event_groups *groups, int cpu,
1917 			struct pmu *pmu, struct cgroup *cgrp)
1918 {
1919 	struct __group_key key = {
1920 		.cpu = cpu,
1921 		.pmu = pmu,
1922 		.cgroup = cgrp,
1923 	};
1924 	struct rb_node *node;
1925 
1926 	node = rb_find_first(&key, &groups->tree, __group_cmp);
1927 	if (node)
1928 		return __node_2_pe(node);
1929 
1930 	return NULL;
1931 }
1932 
1933 static struct perf_event *
perf_event_groups_next(struct perf_event * event,struct pmu * pmu)1934 perf_event_groups_next(struct perf_event *event, struct pmu *pmu)
1935 {
1936 	struct __group_key key = {
1937 		.cpu = event->cpu,
1938 		.pmu = pmu,
1939 		.cgroup = event_cgroup(event),
1940 	};
1941 	struct rb_node *next;
1942 
1943 	next = rb_next_match(&key, &event->group_node, __group_cmp);
1944 	if (next)
1945 		return __node_2_pe(next);
1946 
1947 	return NULL;
1948 }
1949 
1950 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu)		\
1951 	for (event = perf_event_groups_first(groups, cpu, pmu, NULL);	\
1952 	     event; event = perf_event_groups_next(event, pmu))
1953 
1954 /*
1955  * Iterate through the whole groups tree.
1956  */
1957 #define perf_event_groups_for_each(event, groups)			\
1958 	for (event = rb_entry_safe(rb_first(&((groups)->tree)),		\
1959 				typeof(*event), group_node); event;	\
1960 		event = rb_entry_safe(rb_next(&event->group_node),	\
1961 				typeof(*event), group_node))
1962 
1963 /*
1964  * Does the event attribute request inherit with PERF_SAMPLE_READ
1965  */
has_inherit_and_sample_read(struct perf_event_attr * attr)1966 static inline bool has_inherit_and_sample_read(struct perf_event_attr *attr)
1967 {
1968 	return attr->inherit && (attr->sample_type & PERF_SAMPLE_READ);
1969 }
1970 
1971 /*
1972  * Add an event from the lists for its context.
1973  * Must be called with ctx->mutex and ctx->lock held.
1974  */
1975 static void
list_add_event(struct perf_event * event,struct perf_event_context * ctx)1976 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1977 {
1978 	lockdep_assert_held(&ctx->lock);
1979 
1980 	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1981 	event->attach_state |= PERF_ATTACH_CONTEXT;
1982 
1983 	event->tstamp = perf_event_time(event);
1984 
1985 	/*
1986 	 * If we're a stand alone event or group leader, we go to the context
1987 	 * list, group events are kept attached to the group so that
1988 	 * perf_group_detach can, at all times, locate all siblings.
1989 	 */
1990 	if (event->group_leader == event) {
1991 		event->group_caps = event->event_caps;
1992 		add_event_to_groups(event, ctx);
1993 	}
1994 
1995 	list_add_rcu(&event->event_entry, &ctx->event_list);
1996 	ctx->nr_events++;
1997 	if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
1998 		ctx->nr_user++;
1999 	if (event->attr.inherit_stat)
2000 		ctx->nr_stat++;
2001 	if (has_inherit_and_sample_read(&event->attr))
2002 		local_inc(&ctx->nr_no_switch_fast);
2003 
2004 	if (event->state > PERF_EVENT_STATE_OFF)
2005 		perf_cgroup_event_enable(event, ctx);
2006 
2007 	ctx->generation++;
2008 	event->pmu_ctx->nr_events++;
2009 }
2010 
2011 /*
2012  * Initialize event state based on the perf_event_attr::disabled.
2013  */
perf_event__state_init(struct perf_event * event)2014 static inline void perf_event__state_init(struct perf_event *event)
2015 {
2016 	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
2017 					      PERF_EVENT_STATE_INACTIVE;
2018 }
2019 
__perf_event_read_size(u64 read_format,int nr_siblings)2020 static int __perf_event_read_size(u64 read_format, int nr_siblings)
2021 {
2022 	int entry = sizeof(u64); /* value */
2023 	int size = 0;
2024 	int nr = 1;
2025 
2026 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2027 		size += sizeof(u64);
2028 
2029 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2030 		size += sizeof(u64);
2031 
2032 	if (read_format & PERF_FORMAT_ID)
2033 		entry += sizeof(u64);
2034 
2035 	if (read_format & PERF_FORMAT_LOST)
2036 		entry += sizeof(u64);
2037 
2038 	if (read_format & PERF_FORMAT_GROUP) {
2039 		nr += nr_siblings;
2040 		size += sizeof(u64);
2041 	}
2042 
2043 	/*
2044 	 * Since perf_event_validate_size() limits this to 16k and inhibits
2045 	 * adding more siblings, this will never overflow.
2046 	 */
2047 	return size + nr * entry;
2048 }
2049 
__perf_event_header_size(struct perf_event * event,u64 sample_type)2050 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
2051 {
2052 	struct perf_sample_data *data;
2053 	u16 size = 0;
2054 
2055 	if (sample_type & PERF_SAMPLE_IP)
2056 		size += sizeof(data->ip);
2057 
2058 	if (sample_type & PERF_SAMPLE_ADDR)
2059 		size += sizeof(data->addr);
2060 
2061 	if (sample_type & PERF_SAMPLE_PERIOD)
2062 		size += sizeof(data->period);
2063 
2064 	if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
2065 		size += sizeof(data->weight.full);
2066 
2067 	if (sample_type & PERF_SAMPLE_READ)
2068 		size += event->read_size;
2069 
2070 	if (sample_type & PERF_SAMPLE_DATA_SRC)
2071 		size += sizeof(data->data_src.val);
2072 
2073 	if (sample_type & PERF_SAMPLE_TRANSACTION)
2074 		size += sizeof(data->txn);
2075 
2076 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
2077 		size += sizeof(data->phys_addr);
2078 
2079 	if (sample_type & PERF_SAMPLE_CGROUP)
2080 		size += sizeof(data->cgroup);
2081 
2082 	if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
2083 		size += sizeof(data->data_page_size);
2084 
2085 	if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
2086 		size += sizeof(data->code_page_size);
2087 
2088 	event->header_size = size;
2089 }
2090 
2091 /*
2092  * Called at perf_event creation and when events are attached/detached from a
2093  * group.
2094  */
perf_event__header_size(struct perf_event * event)2095 static void perf_event__header_size(struct perf_event *event)
2096 {
2097 	event->read_size =
2098 		__perf_event_read_size(event->attr.read_format,
2099 				       event->group_leader->nr_siblings);
2100 	__perf_event_header_size(event, event->attr.sample_type);
2101 }
2102 
perf_event__id_header_size(struct perf_event * event)2103 static void perf_event__id_header_size(struct perf_event *event)
2104 {
2105 	struct perf_sample_data *data;
2106 	u64 sample_type = event->attr.sample_type;
2107 	u16 size = 0;
2108 
2109 	if (sample_type & PERF_SAMPLE_TID)
2110 		size += sizeof(data->tid_entry);
2111 
2112 	if (sample_type & PERF_SAMPLE_TIME)
2113 		size += sizeof(data->time);
2114 
2115 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
2116 		size += sizeof(data->id);
2117 
2118 	if (sample_type & PERF_SAMPLE_ID)
2119 		size += sizeof(data->id);
2120 
2121 	if (sample_type & PERF_SAMPLE_STREAM_ID)
2122 		size += sizeof(data->stream_id);
2123 
2124 	if (sample_type & PERF_SAMPLE_CPU)
2125 		size += sizeof(data->cpu_entry);
2126 
2127 	event->id_header_size = size;
2128 }
2129 
2130 /*
2131  * Check that adding an event to the group does not result in anybody
2132  * overflowing the 64k event limit imposed by the output buffer.
2133  *
2134  * Specifically, check that the read_size for the event does not exceed 16k,
2135  * read_size being the one term that grows with groups size. Since read_size
2136  * depends on per-event read_format, also (re)check the existing events.
2137  *
2138  * This leaves 48k for the constant size fields and things like callchains,
2139  * branch stacks and register sets.
2140  */
perf_event_validate_size(struct perf_event * event)2141 static bool perf_event_validate_size(struct perf_event *event)
2142 {
2143 	struct perf_event *sibling, *group_leader = event->group_leader;
2144 
2145 	if (__perf_event_read_size(event->attr.read_format,
2146 				   group_leader->nr_siblings + 1) > 16*1024)
2147 		return false;
2148 
2149 	if (__perf_event_read_size(group_leader->attr.read_format,
2150 				   group_leader->nr_siblings + 1) > 16*1024)
2151 		return false;
2152 
2153 	/*
2154 	 * When creating a new group leader, group_leader->ctx is initialized
2155 	 * after the size has been validated, but we cannot safely use
2156 	 * for_each_sibling_event() until group_leader->ctx is set. A new group
2157 	 * leader cannot have any siblings yet, so we can safely skip checking
2158 	 * the non-existent siblings.
2159 	 */
2160 	if (event == group_leader)
2161 		return true;
2162 
2163 	for_each_sibling_event(sibling, group_leader) {
2164 		if (__perf_event_read_size(sibling->attr.read_format,
2165 					   group_leader->nr_siblings + 1) > 16*1024)
2166 			return false;
2167 	}
2168 
2169 	return true;
2170 }
2171 
perf_group_attach(struct perf_event * event)2172 static void perf_group_attach(struct perf_event *event)
2173 {
2174 	struct perf_event *group_leader = event->group_leader, *pos;
2175 
2176 	lockdep_assert_held(&event->ctx->lock);
2177 
2178 	/*
2179 	 * We can have double attach due to group movement (move_group) in
2180 	 * perf_event_open().
2181 	 */
2182 	if (event->attach_state & PERF_ATTACH_GROUP)
2183 		return;
2184 
2185 	event->attach_state |= PERF_ATTACH_GROUP;
2186 
2187 	if (group_leader == event)
2188 		return;
2189 
2190 	WARN_ON_ONCE(group_leader->ctx != event->ctx);
2191 
2192 	group_leader->group_caps &= event->event_caps;
2193 
2194 	list_add_tail(&event->sibling_list, &group_leader->sibling_list);
2195 	group_leader->nr_siblings++;
2196 	group_leader->group_generation++;
2197 
2198 	perf_event__header_size(group_leader);
2199 
2200 	for_each_sibling_event(pos, group_leader)
2201 		perf_event__header_size(pos);
2202 }
2203 
2204 /*
2205  * Remove an event from the lists for its context.
2206  * Must be called with ctx->mutex and ctx->lock held.
2207  */
2208 static void
list_del_event(struct perf_event * event,struct perf_event_context * ctx)2209 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
2210 {
2211 	WARN_ON_ONCE(event->ctx != ctx);
2212 	lockdep_assert_held(&ctx->lock);
2213 
2214 	/*
2215 	 * We can have double detach due to exit/hot-unplug + close.
2216 	 */
2217 	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
2218 		return;
2219 
2220 	event->attach_state &= ~PERF_ATTACH_CONTEXT;
2221 
2222 	ctx->nr_events--;
2223 	if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)
2224 		ctx->nr_user--;
2225 	if (event->attr.inherit_stat)
2226 		ctx->nr_stat--;
2227 	if (has_inherit_and_sample_read(&event->attr))
2228 		local_dec(&ctx->nr_no_switch_fast);
2229 
2230 	list_del_rcu(&event->event_entry);
2231 
2232 	if (event->group_leader == event)
2233 		del_event_from_groups(event, ctx);
2234 
2235 	ctx->generation++;
2236 	event->pmu_ctx->nr_events--;
2237 }
2238 
2239 static int
perf_aux_output_match(struct perf_event * event,struct perf_event * aux_event)2240 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2241 {
2242 	if (!has_aux(aux_event))
2243 		return 0;
2244 
2245 	if (!event->pmu->aux_output_match)
2246 		return 0;
2247 
2248 	return event->pmu->aux_output_match(aux_event);
2249 }
2250 
2251 static void put_event(struct perf_event *event);
2252 static void __event_disable(struct perf_event *event,
2253 			    struct perf_event_context *ctx,
2254 			    enum perf_event_state state);
2255 
perf_put_aux_event(struct perf_event * event)2256 static void perf_put_aux_event(struct perf_event *event)
2257 {
2258 	struct perf_event_context *ctx = event->ctx;
2259 	struct perf_event *iter;
2260 
2261 	/*
2262 	 * If event uses aux_event tear down the link
2263 	 */
2264 	if (event->aux_event) {
2265 		iter = event->aux_event;
2266 		event->aux_event = NULL;
2267 		put_event(iter);
2268 		return;
2269 	}
2270 
2271 	/*
2272 	 * If the event is an aux_event, tear down all links to
2273 	 * it from other events.
2274 	 */
2275 	for_each_sibling_event(iter, event) {
2276 		if (iter->aux_event != event)
2277 			continue;
2278 
2279 		iter->aux_event = NULL;
2280 		put_event(event);
2281 
2282 		/*
2283 		 * If it's ACTIVE, schedule it out and put it into ERROR
2284 		 * state so that we don't try to schedule it again. Note
2285 		 * that perf_event_enable() will clear the ERROR status.
2286 		 */
2287 		__event_disable(iter, ctx, PERF_EVENT_STATE_ERROR);
2288 	}
2289 }
2290 
perf_need_aux_event(struct perf_event * event)2291 static bool perf_need_aux_event(struct perf_event *event)
2292 {
2293 	return event->attr.aux_output || has_aux_action(event);
2294 }
2295 
perf_get_aux_event(struct perf_event * event,struct perf_event * group_leader)2296 static int perf_get_aux_event(struct perf_event *event,
2297 			      struct perf_event *group_leader)
2298 {
2299 	/*
2300 	 * Our group leader must be an aux event if we want to be
2301 	 * an aux_output. This way, the aux event will precede its
2302 	 * aux_output events in the group, and therefore will always
2303 	 * schedule first.
2304 	 */
2305 	if (!group_leader)
2306 		return 0;
2307 
2308 	/*
2309 	 * aux_output and aux_sample_size are mutually exclusive.
2310 	 */
2311 	if (event->attr.aux_output && event->attr.aux_sample_size)
2312 		return 0;
2313 
2314 	if (event->attr.aux_output &&
2315 	    !perf_aux_output_match(event, group_leader))
2316 		return 0;
2317 
2318 	if ((event->attr.aux_pause || event->attr.aux_resume) &&
2319 	    !(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
2320 		return 0;
2321 
2322 	if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
2323 		return 0;
2324 
2325 	if (!atomic_long_inc_not_zero(&group_leader->refcount))
2326 		return 0;
2327 
2328 	/*
2329 	 * Link aux_outputs to their aux event; this is undone in
2330 	 * perf_group_detach() by perf_put_aux_event(). When the
2331 	 * group in torn down, the aux_output events loose their
2332 	 * link to the aux_event and can't schedule any more.
2333 	 */
2334 	event->aux_event = group_leader;
2335 
2336 	return 1;
2337 }
2338 
get_event_list(struct perf_event * event)2339 static inline struct list_head *get_event_list(struct perf_event *event)
2340 {
2341 	return event->attr.pinned ? &event->pmu_ctx->pinned_active :
2342 				    &event->pmu_ctx->flexible_active;
2343 }
2344 
perf_group_detach(struct perf_event * event)2345 static void perf_group_detach(struct perf_event *event)
2346 {
2347 	struct perf_event *leader = event->group_leader;
2348 	struct perf_event *sibling, *tmp;
2349 	struct perf_event_context *ctx = event->ctx;
2350 
2351 	lockdep_assert_held(&ctx->lock);
2352 
2353 	/*
2354 	 * We can have double detach due to exit/hot-unplug + close.
2355 	 */
2356 	if (!(event->attach_state & PERF_ATTACH_GROUP))
2357 		return;
2358 
2359 	event->attach_state &= ~PERF_ATTACH_GROUP;
2360 
2361 	perf_put_aux_event(event);
2362 
2363 	/*
2364 	 * If this is a sibling, remove it from its group.
2365 	 */
2366 	if (leader != event) {
2367 		list_del_init(&event->sibling_list);
2368 		event->group_leader->nr_siblings--;
2369 		event->group_leader->group_generation++;
2370 		goto out;
2371 	}
2372 
2373 	/*
2374 	 * If this was a group event with sibling events then
2375 	 * upgrade the siblings to singleton events by adding them
2376 	 * to whatever list we are on.
2377 	 */
2378 	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
2379 
2380 		/*
2381 		 * Events that have PERF_EV_CAP_SIBLING require being part of
2382 		 * a group and cannot exist on their own, schedule them out
2383 		 * and move them into the ERROR state. Also see
2384 		 * _perf_event_enable(), it will not be able to recover this
2385 		 * ERROR state.
2386 		 */
2387 		if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2388 			__event_disable(sibling, ctx, PERF_EVENT_STATE_ERROR);
2389 
2390 		sibling->group_leader = sibling;
2391 		list_del_init(&sibling->sibling_list);
2392 
2393 		/* Inherit group flags from the previous leader */
2394 		sibling->group_caps = event->group_caps;
2395 
2396 		if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
2397 			add_event_to_groups(sibling, event->ctx);
2398 
2399 			if (sibling->state == PERF_EVENT_STATE_ACTIVE)
2400 				list_add_tail(&sibling->active_list, get_event_list(sibling));
2401 		}
2402 
2403 		WARN_ON_ONCE(sibling->ctx != event->ctx);
2404 	}
2405 
2406 out:
2407 	for_each_sibling_event(tmp, leader)
2408 		perf_event__header_size(tmp);
2409 
2410 	perf_event__header_size(leader);
2411 }
2412 
perf_child_detach(struct perf_event * event)2413 static void perf_child_detach(struct perf_event *event)
2414 {
2415 	struct perf_event *parent_event = event->parent;
2416 
2417 	if (!(event->attach_state & PERF_ATTACH_CHILD))
2418 		return;
2419 
2420 	event->attach_state &= ~PERF_ATTACH_CHILD;
2421 
2422 	if (WARN_ON_ONCE(!parent_event))
2423 		return;
2424 
2425 	/*
2426 	 * Can't check this from an IPI, the holder is likey another CPU.
2427 	 *
2428 	lockdep_assert_held(&parent_event->child_mutex);
2429 	 */
2430 
2431 	list_del_init(&event->child_list);
2432 }
2433 
is_orphaned_event(struct perf_event * event)2434 static bool is_orphaned_event(struct perf_event *event)
2435 {
2436 	return event->state == PERF_EVENT_STATE_DEAD;
2437 }
2438 
2439 static inline int
event_filter_match(struct perf_event * event)2440 event_filter_match(struct perf_event *event)
2441 {
2442 	return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2443 	       perf_cgroup_match(event);
2444 }
2445 
is_event_in_freq_mode(struct perf_event * event)2446 static inline bool is_event_in_freq_mode(struct perf_event *event)
2447 {
2448 	return event->attr.freq && event->attr.sample_freq;
2449 }
2450 
2451 static void
event_sched_out(struct perf_event * event,struct perf_event_context * ctx)2452 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
2453 {
2454 	struct perf_event_pmu_context *epc = event->pmu_ctx;
2455 	struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2456 	enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
2457 
2458 	// XXX cpc serialization, probably per-cpu IRQ disabled
2459 
2460 	WARN_ON_ONCE(event->ctx != ctx);
2461 	lockdep_assert_held(&ctx->lock);
2462 
2463 	if (event->state != PERF_EVENT_STATE_ACTIVE)
2464 		return;
2465 
2466 	/*
2467 	 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2468 	 * we can schedule events _OUT_ individually through things like
2469 	 * __perf_remove_from_context().
2470 	 */
2471 	list_del_init(&event->active_list);
2472 
2473 	perf_pmu_disable(event->pmu);
2474 
2475 	event->pmu->del(event, 0);
2476 	event->oncpu = -1;
2477 
2478 	if (event->pending_disable) {
2479 		event->pending_disable = 0;
2480 		perf_cgroup_event_disable(event, ctx);
2481 		state = PERF_EVENT_STATE_OFF;
2482 	}
2483 
2484 	perf_event_set_state(event, state);
2485 
2486 	if (!is_software_event(event))
2487 		cpc->active_oncpu--;
2488 	if (is_event_in_freq_mode(event)) {
2489 		ctx->nr_freq--;
2490 		epc->nr_freq--;
2491 	}
2492 	if (event->attr.exclusive || !cpc->active_oncpu)
2493 		cpc->exclusive = 0;
2494 
2495 	perf_pmu_enable(event->pmu);
2496 }
2497 
2498 static void
group_sched_out(struct perf_event * group_event,struct perf_event_context * ctx)2499 group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx)
2500 {
2501 	struct perf_event *event;
2502 
2503 	if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2504 		return;
2505 
2506 	perf_assert_pmu_disabled(group_event->pmu_ctx->pmu);
2507 
2508 	event_sched_out(group_event, ctx);
2509 
2510 	/*
2511 	 * Schedule out siblings (if any):
2512 	 */
2513 	for_each_sibling_event(event, group_event)
2514 		event_sched_out(event, ctx);
2515 }
2516 
2517 static inline void
__ctx_time_update(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,bool final,enum event_type_t event_type)2518 __ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx,
2519 		  bool final, enum event_type_t event_type)
2520 {
2521 	if (ctx->is_active & EVENT_TIME) {
2522 		if (ctx->is_active & EVENT_FROZEN)
2523 			return;
2524 
2525 		update_context_time(ctx);
2526 		/* vPMU should not stop time */
2527 		update_cgrp_time_from_cpuctx(cpuctx, !(event_type & EVENT_GUEST) && final);
2528 	}
2529 }
2530 
2531 static inline void
ctx_time_update(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2532 ctx_time_update(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
2533 {
2534 	__ctx_time_update(cpuctx, ctx, false, 0);
2535 }
2536 
2537 /*
2538  * To be used inside perf_ctx_lock() / perf_ctx_unlock(). Lasts until perf_ctx_unlock().
2539  */
2540 static inline void
ctx_time_freeze(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx)2541 ctx_time_freeze(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx)
2542 {
2543 	ctx_time_update(cpuctx, ctx);
2544 	if (ctx->is_active & EVENT_TIME)
2545 		ctx->is_active |= EVENT_FROZEN;
2546 }
2547 
2548 static inline void
ctx_time_update_event(struct perf_event_context * ctx,struct perf_event * event)2549 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event)
2550 {
2551 	if (ctx->is_active & EVENT_TIME) {
2552 		if (ctx->is_active & EVENT_FROZEN)
2553 			return;
2554 		update_context_time(ctx);
2555 		update_cgrp_time_from_event(event);
2556 	}
2557 }
2558 
2559 #define DETACH_GROUP	0x01UL
2560 #define DETACH_CHILD	0x02UL
2561 #define DETACH_EXIT	0x04UL
2562 #define DETACH_REVOKE	0x08UL
2563 #define DETACH_DEAD	0x10UL
2564 
2565 /*
2566  * Cross CPU call to remove a performance event
2567  *
2568  * We disable the event on the hardware level first. After that we
2569  * remove it from the context list.
2570  */
2571 static void
__perf_remove_from_context(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2572 __perf_remove_from_context(struct perf_event *event,
2573 			   struct perf_cpu_context *cpuctx,
2574 			   struct perf_event_context *ctx,
2575 			   void *info)
2576 {
2577 	struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx;
2578 	enum perf_event_state state = PERF_EVENT_STATE_OFF;
2579 	unsigned long flags = (unsigned long)info;
2580 
2581 	ctx_time_update(cpuctx, ctx);
2582 
2583 	/*
2584 	 * Ensure event_sched_out() switches to OFF, at the very least
2585 	 * this avoids raising perf_pending_task() at this time.
2586 	 */
2587 	if (flags & DETACH_EXIT)
2588 		state = PERF_EVENT_STATE_EXIT;
2589 	if (flags & DETACH_REVOKE)
2590 		state = PERF_EVENT_STATE_REVOKED;
2591 	if (flags & DETACH_DEAD)
2592 		state = PERF_EVENT_STATE_DEAD;
2593 
2594 	event_sched_out(event, ctx);
2595 
2596 	if (event->state > PERF_EVENT_STATE_OFF)
2597 		perf_cgroup_event_disable(event, ctx);
2598 
2599 	perf_event_set_state(event, min(event->state, state));
2600 
2601 	if (flags & DETACH_GROUP)
2602 		perf_group_detach(event);
2603 	if (flags & DETACH_CHILD)
2604 		perf_child_detach(event);
2605 	list_del_event(event, ctx);
2606 
2607 	if (!pmu_ctx->nr_events) {
2608 		pmu_ctx->rotate_necessary = 0;
2609 
2610 		if (ctx->task && ctx->is_active) {
2611 			struct perf_cpu_pmu_context *cpc = this_cpc(pmu_ctx->pmu);
2612 
2613 			WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
2614 			cpc->task_epc = NULL;
2615 		}
2616 	}
2617 
2618 	if (!ctx->nr_events && ctx->is_active) {
2619 		if (ctx == &cpuctx->ctx)
2620 			update_cgrp_time_from_cpuctx(cpuctx, true);
2621 
2622 		ctx->is_active = 0;
2623 		if (ctx->task) {
2624 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2625 			cpuctx->task_ctx = NULL;
2626 		}
2627 	}
2628 }
2629 
2630 /*
2631  * Remove the event from a task's (or a CPU's) list of events.
2632  *
2633  * If event->ctx is a cloned context, callers must make sure that
2634  * every task struct that event->ctx->task could possibly point to
2635  * remains valid.  This is OK when called from perf_release since
2636  * that only calls us on the top-level context, which can't be a clone.
2637  * When called from perf_event_exit_task, it's OK because the
2638  * context has been detached from its task.
2639  */
perf_remove_from_context(struct perf_event * event,unsigned long flags)2640 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
2641 {
2642 	struct perf_event_context *ctx = event->ctx;
2643 
2644 	lockdep_assert_held(&ctx->mutex);
2645 
2646 	/*
2647 	 * Because of perf_event_exit_task(), perf_remove_from_context() ought
2648 	 * to work in the face of TASK_TOMBSTONE, unlike every other
2649 	 * event_function_call() user.
2650 	 */
2651 	raw_spin_lock_irq(&ctx->lock);
2652 	if (!ctx->is_active) {
2653 		__perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context),
2654 					   ctx, (void *)flags);
2655 		raw_spin_unlock_irq(&ctx->lock);
2656 		return;
2657 	}
2658 	raw_spin_unlock_irq(&ctx->lock);
2659 
2660 	event_function_call(event, __perf_remove_from_context, (void *)flags);
2661 }
2662 
__event_disable(struct perf_event * event,struct perf_event_context * ctx,enum perf_event_state state)2663 static void __event_disable(struct perf_event *event,
2664 			    struct perf_event_context *ctx,
2665 			    enum perf_event_state state)
2666 {
2667 	event_sched_out(event, ctx);
2668 	perf_cgroup_event_disable(event, ctx);
2669 	perf_event_set_state(event, state);
2670 }
2671 
2672 /*
2673  * Cross CPU call to disable a performance event
2674  */
__perf_event_disable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)2675 static void __perf_event_disable(struct perf_event *event,
2676 				 struct perf_cpu_context *cpuctx,
2677 				 struct perf_event_context *ctx,
2678 				 void *info)
2679 {
2680 	if (event->state < PERF_EVENT_STATE_INACTIVE)
2681 		return;
2682 
2683 	perf_pmu_disable(event->pmu_ctx->pmu);
2684 	ctx_time_update_event(ctx, event);
2685 
2686 	/*
2687 	 * When disabling a group leader, the whole group becomes ineligible
2688 	 * to run, so schedule out the full group.
2689 	 */
2690 	if (event == event->group_leader)
2691 		group_sched_out(event, ctx);
2692 
2693 	/*
2694 	 * But only mark the leader OFF; the siblings will remain
2695 	 * INACTIVE.
2696 	 */
2697 	__event_disable(event, ctx, PERF_EVENT_STATE_OFF);
2698 
2699 	perf_pmu_enable(event->pmu_ctx->pmu);
2700 }
2701 
2702 /*
2703  * Disable an event.
2704  *
2705  * If event->ctx is a cloned context, callers must make sure that
2706  * every task struct that event->ctx->task could possibly point to
2707  * remains valid.  This condition is satisfied when called through
2708  * perf_event_for_each_child or perf_event_for_each because they
2709  * hold the top-level event's child_mutex, so any descendant that
2710  * goes to exit will block in perf_event_exit_event().
2711  *
2712  * When called from perf_pending_disable it's OK because event->ctx
2713  * is the current context on this CPU and preemption is disabled,
2714  * hence we can't get into perf_event_task_sched_out for this context.
2715  */
_perf_event_disable(struct perf_event * event)2716 static void _perf_event_disable(struct perf_event *event)
2717 {
2718 	struct perf_event_context *ctx = event->ctx;
2719 
2720 	raw_spin_lock_irq(&ctx->lock);
2721 	if (event->state <= PERF_EVENT_STATE_OFF) {
2722 		raw_spin_unlock_irq(&ctx->lock);
2723 		return;
2724 	}
2725 	raw_spin_unlock_irq(&ctx->lock);
2726 
2727 	event_function_call(event, __perf_event_disable, NULL);
2728 }
2729 
perf_event_disable_local(struct perf_event * event)2730 void perf_event_disable_local(struct perf_event *event)
2731 {
2732 	event_function_local(event, __perf_event_disable, NULL);
2733 }
2734 
2735 /*
2736  * Strictly speaking kernel users cannot create groups and therefore this
2737  * interface does not need the perf_event_ctx_lock() magic.
2738  */
perf_event_disable(struct perf_event * event)2739 void perf_event_disable(struct perf_event *event)
2740 {
2741 	struct perf_event_context *ctx;
2742 
2743 	ctx = perf_event_ctx_lock(event);
2744 	_perf_event_disable(event);
2745 	perf_event_ctx_unlock(event, ctx);
2746 }
2747 EXPORT_SYMBOL_GPL(perf_event_disable);
2748 
perf_event_disable_inatomic(struct perf_event * event)2749 void perf_event_disable_inatomic(struct perf_event *event)
2750 {
2751 	event->pending_disable = 1;
2752 	irq_work_queue(&event->pending_disable_irq);
2753 }
2754 
2755 #define MAX_INTERRUPTS (~0ULL)
2756 
2757 static void perf_log_throttle(struct perf_event *event, int enable);
2758 static void perf_log_itrace_start(struct perf_event *event);
2759 
perf_event_unthrottle(struct perf_event * event,bool start)2760 static void perf_event_unthrottle(struct perf_event *event, bool start)
2761 {
2762 	if (event->state != PERF_EVENT_STATE_ACTIVE)
2763 		return;
2764 
2765 	event->hw.interrupts = 0;
2766 	if (start)
2767 		event->pmu->start(event, 0);
2768 	if (event == event->group_leader)
2769 		perf_log_throttle(event, 1);
2770 }
2771 
perf_event_throttle(struct perf_event * event)2772 static void perf_event_throttle(struct perf_event *event)
2773 {
2774 	if (event->state != PERF_EVENT_STATE_ACTIVE)
2775 		return;
2776 
2777 	event->hw.interrupts = MAX_INTERRUPTS;
2778 	event->pmu->stop(event, 0);
2779 	if (event == event->group_leader)
2780 		perf_log_throttle(event, 0);
2781 }
2782 
perf_event_unthrottle_group(struct perf_event * event,bool skip_start_event)2783 static void perf_event_unthrottle_group(struct perf_event *event, bool skip_start_event)
2784 {
2785 	struct perf_event *sibling, *leader = event->group_leader;
2786 
2787 	perf_event_unthrottle(leader, skip_start_event ? leader != event : true);
2788 	for_each_sibling_event(sibling, leader)
2789 		perf_event_unthrottle(sibling, skip_start_event ? sibling != event : true);
2790 }
2791 
perf_event_throttle_group(struct perf_event * event)2792 static void perf_event_throttle_group(struct perf_event *event)
2793 {
2794 	struct perf_event *sibling, *leader = event->group_leader;
2795 
2796 	perf_event_throttle(leader);
2797 	for_each_sibling_event(sibling, leader)
2798 		perf_event_throttle(sibling);
2799 }
2800 
2801 static int
event_sched_in(struct perf_event * event,struct perf_event_context * ctx)2802 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
2803 {
2804 	struct perf_event_pmu_context *epc = event->pmu_ctx;
2805 	struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2806 	int ret = 0;
2807 
2808 	WARN_ON_ONCE(event->ctx != ctx);
2809 
2810 	lockdep_assert_held(&ctx->lock);
2811 
2812 	if (event->state <= PERF_EVENT_STATE_OFF)
2813 		return 0;
2814 
2815 	WRITE_ONCE(event->oncpu, smp_processor_id());
2816 	/*
2817 	 * Order event::oncpu write to happen before the ACTIVE state is
2818 	 * visible. This allows perf_event_{stop,read}() to observe the correct
2819 	 * ->oncpu if it sees ACTIVE.
2820 	 */
2821 	smp_wmb();
2822 	perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
2823 
2824 	/*
2825 	 * Unthrottle events, since we scheduled we might have missed several
2826 	 * ticks already, also for a heavily scheduling task there is little
2827 	 * guarantee it'll get a tick in a timely manner.
2828 	 */
2829 	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS))
2830 		perf_event_unthrottle(event, false);
2831 
2832 	perf_pmu_disable(event->pmu);
2833 
2834 	perf_log_itrace_start(event);
2835 
2836 	if (event->pmu->add(event, PERF_EF_START)) {
2837 		perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
2838 		event->oncpu = -1;
2839 		ret = -EAGAIN;
2840 		goto out;
2841 	}
2842 
2843 	if (!is_software_event(event))
2844 		cpc->active_oncpu++;
2845 	if (is_event_in_freq_mode(event)) {
2846 		ctx->nr_freq++;
2847 		epc->nr_freq++;
2848 	}
2849 	if (event->attr.exclusive)
2850 		cpc->exclusive = 1;
2851 
2852 out:
2853 	perf_pmu_enable(event->pmu);
2854 
2855 	return ret;
2856 }
2857 
2858 static int
group_sched_in(struct perf_event * group_event,struct perf_event_context * ctx)2859 group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2860 {
2861 	struct perf_event *event, *partial_group = NULL;
2862 	struct pmu *pmu = group_event->pmu_ctx->pmu;
2863 
2864 	if (group_event->state == PERF_EVENT_STATE_OFF)
2865 		return 0;
2866 
2867 	pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2868 
2869 	if (event_sched_in(group_event, ctx))
2870 		goto error;
2871 
2872 	/*
2873 	 * Schedule in siblings as one group (if any):
2874 	 */
2875 	for_each_sibling_event(event, group_event) {
2876 		if (event_sched_in(event, ctx)) {
2877 			partial_group = event;
2878 			goto group_error;
2879 		}
2880 	}
2881 
2882 	if (!pmu->commit_txn(pmu))
2883 		return 0;
2884 
2885 group_error:
2886 	/*
2887 	 * Groups can be scheduled in as one unit only, so undo any
2888 	 * partial group before returning:
2889 	 * The events up to the failed event are scheduled out normally.
2890 	 */
2891 	for_each_sibling_event(event, group_event) {
2892 		if (event == partial_group)
2893 			break;
2894 
2895 		event_sched_out(event, ctx);
2896 	}
2897 	event_sched_out(group_event, ctx);
2898 
2899 error:
2900 	pmu->cancel_txn(pmu);
2901 	return -EAGAIN;
2902 }
2903 
2904 /*
2905  * Work out whether we can put this event group on the CPU now.
2906  */
group_can_go_on(struct perf_event * event,int can_add_hw)2907 static int group_can_go_on(struct perf_event *event, int can_add_hw)
2908 {
2909 	struct perf_event_pmu_context *epc = event->pmu_ctx;
2910 	struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
2911 
2912 	/*
2913 	 * Groups consisting entirely of software events can always go on.
2914 	 */
2915 	if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2916 		return 1;
2917 	/*
2918 	 * If an exclusive group is already on, no other hardware
2919 	 * events can go on.
2920 	 */
2921 	if (cpc->exclusive)
2922 		return 0;
2923 	/*
2924 	 * If this group is exclusive and there are already
2925 	 * events on the CPU, it can't go on.
2926 	 */
2927 	if (event->attr.exclusive && !list_empty(get_event_list(event)))
2928 		return 0;
2929 	/*
2930 	 * Otherwise, try to add it if all previous groups were able
2931 	 * to go on.
2932 	 */
2933 	return can_add_hw;
2934 }
2935 
add_event_to_ctx(struct perf_event * event,struct perf_event_context * ctx)2936 static void add_event_to_ctx(struct perf_event *event,
2937 			       struct perf_event_context *ctx)
2938 {
2939 	list_add_event(event, ctx);
2940 	perf_group_attach(event);
2941 }
2942 
task_ctx_sched_out(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)2943 static void task_ctx_sched_out(struct perf_event_context *ctx,
2944 			       struct pmu *pmu,
2945 			       enum event_type_t event_type)
2946 {
2947 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
2948 
2949 	if (!cpuctx->task_ctx)
2950 		return;
2951 
2952 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2953 		return;
2954 
2955 	ctx_sched_out(ctx, pmu, event_type);
2956 }
2957 
perf_event_sched_in(struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)2958 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2959 				struct perf_event_context *ctx,
2960 				struct pmu *pmu,
2961 				enum event_type_t event_type)
2962 {
2963 	ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED | event_type);
2964 	if (ctx)
2965 		ctx_sched_in(ctx, pmu, EVENT_PINNED | event_type);
2966 	ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE | event_type);
2967 	if (ctx)
2968 		ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE | event_type);
2969 }
2970 
2971 /*
2972  * We want to maintain the following priority of scheduling:
2973  *  - CPU pinned (EVENT_CPU | EVENT_PINNED)
2974  *  - task pinned (EVENT_PINNED)
2975  *  - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2976  *  - task flexible (EVENT_FLEXIBLE).
2977  *
2978  * In order to avoid unscheduling and scheduling back in everything every
2979  * time an event is added, only do it for the groups of equal priority and
2980  * below.
2981  *
2982  * This can be called after a batch operation on task events, in which case
2983  * event_type is a bit mask of the types of events involved. For CPU events,
2984  * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2985  */
ctx_resched(struct perf_cpu_context * cpuctx,struct perf_event_context * task_ctx,struct pmu * pmu,enum event_type_t event_type)2986 static void ctx_resched(struct perf_cpu_context *cpuctx,
2987 			struct perf_event_context *task_ctx,
2988 			struct pmu *pmu, enum event_type_t event_type)
2989 {
2990 	bool cpu_event = !!(event_type & EVENT_CPU);
2991 	struct perf_event_pmu_context *epc;
2992 
2993 	/*
2994 	 * If pinned groups are involved, flexible groups also need to be
2995 	 * scheduled out.
2996 	 */
2997 	if (event_type & EVENT_PINNED)
2998 		event_type |= EVENT_FLEXIBLE;
2999 
3000 	event_type &= EVENT_ALL;
3001 
3002 	for_each_epc(epc, &cpuctx->ctx, pmu, 0)
3003 		perf_pmu_disable(epc->pmu);
3004 
3005 	if (task_ctx) {
3006 		for_each_epc(epc, task_ctx, pmu, 0)
3007 			perf_pmu_disable(epc->pmu);
3008 
3009 		task_ctx_sched_out(task_ctx, pmu, event_type);
3010 	}
3011 
3012 	/*
3013 	 * Decide which cpu ctx groups to schedule out based on the types
3014 	 * of events that caused rescheduling:
3015 	 *  - EVENT_CPU: schedule out corresponding groups;
3016 	 *  - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
3017 	 *  - otherwise, do nothing more.
3018 	 */
3019 	if (cpu_event)
3020 		ctx_sched_out(&cpuctx->ctx, pmu, event_type);
3021 	else if (event_type & EVENT_PINNED)
3022 		ctx_sched_out(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
3023 
3024 	perf_event_sched_in(cpuctx, task_ctx, pmu, 0);
3025 
3026 	for_each_epc(epc, &cpuctx->ctx, pmu, 0)
3027 		perf_pmu_enable(epc->pmu);
3028 
3029 	if (task_ctx) {
3030 		for_each_epc(epc, task_ctx, pmu, 0)
3031 			perf_pmu_enable(epc->pmu);
3032 	}
3033 }
3034 
perf_pmu_resched(struct pmu * pmu)3035 void perf_pmu_resched(struct pmu *pmu)
3036 {
3037 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3038 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
3039 
3040 	perf_ctx_lock(cpuctx, task_ctx);
3041 	ctx_resched(cpuctx, task_ctx, pmu, EVENT_ALL|EVENT_CPU);
3042 	perf_ctx_unlock(cpuctx, task_ctx);
3043 }
3044 
3045 /*
3046  * Cross CPU call to install and enable a performance event
3047  *
3048  * Very similar to remote_function() + event_function() but cannot assume that
3049  * things like ctx->is_active and cpuctx->task_ctx are set.
3050  */
__perf_install_in_context(void * info)3051 static int  __perf_install_in_context(void *info)
3052 {
3053 	struct perf_event *event = info;
3054 	struct perf_event_context *ctx = event->ctx;
3055 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3056 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
3057 	bool reprogram = true;
3058 	int ret = 0;
3059 
3060 	raw_spin_lock(&cpuctx->ctx.lock);
3061 	if (ctx->task) {
3062 		raw_spin_lock(&ctx->lock);
3063 		task_ctx = ctx;
3064 
3065 		reprogram = (ctx->task == current);
3066 
3067 		/*
3068 		 * If the task is running, it must be running on this CPU,
3069 		 * otherwise we cannot reprogram things.
3070 		 *
3071 		 * If its not running, we don't care, ctx->lock will
3072 		 * serialize against it becoming runnable.
3073 		 */
3074 		if (task_curr(ctx->task) && !reprogram) {
3075 			ret = -ESRCH;
3076 			goto unlock;
3077 		}
3078 
3079 		WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
3080 	} else if (task_ctx) {
3081 		raw_spin_lock(&task_ctx->lock);
3082 	}
3083 
3084 #ifdef CONFIG_CGROUP_PERF
3085 	if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
3086 		/*
3087 		 * If the current cgroup doesn't match the event's
3088 		 * cgroup, we should not try to schedule it.
3089 		 */
3090 		struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
3091 		reprogram = cgroup_is_descendant(cgrp->css.cgroup,
3092 					event->cgrp->css.cgroup);
3093 	}
3094 #endif
3095 
3096 	if (reprogram) {
3097 		ctx_time_freeze(cpuctx, ctx);
3098 		add_event_to_ctx(event, ctx);
3099 		ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu,
3100 			    get_event_type(event));
3101 	} else {
3102 		add_event_to_ctx(event, ctx);
3103 	}
3104 
3105 unlock:
3106 	perf_ctx_unlock(cpuctx, task_ctx);
3107 
3108 	return ret;
3109 }
3110 
3111 static bool exclusive_event_installable(struct perf_event *event,
3112 					struct perf_event_context *ctx);
3113 
3114 /*
3115  * Attach a performance event to a context.
3116  *
3117  * Very similar to event_function_call, see comment there.
3118  */
3119 static void
perf_install_in_context(struct perf_event_context * ctx,struct perf_event * event,int cpu)3120 perf_install_in_context(struct perf_event_context *ctx,
3121 			struct perf_event *event,
3122 			int cpu)
3123 {
3124 	struct task_struct *task = READ_ONCE(ctx->task);
3125 
3126 	lockdep_assert_held(&ctx->mutex);
3127 
3128 	WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
3129 
3130 	if (event->cpu != -1)
3131 		WARN_ON_ONCE(event->cpu != cpu);
3132 
3133 	/*
3134 	 * Ensures that if we can observe event->ctx, both the event and ctx
3135 	 * will be 'complete'. See perf_iterate_sb_cpu().
3136 	 */
3137 	smp_store_release(&event->ctx, ctx);
3138 
3139 	/*
3140 	 * perf_event_attr::disabled events will not run and can be initialized
3141 	 * without IPI. Except when this is the first event for the context, in
3142 	 * that case we need the magic of the IPI to set ctx->is_active.
3143 	 *
3144 	 * The IOC_ENABLE that is sure to follow the creation of a disabled
3145 	 * event will issue the IPI and reprogram the hardware.
3146 	 */
3147 	if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
3148 	    ctx->nr_events && !is_cgroup_event(event)) {
3149 		raw_spin_lock_irq(&ctx->lock);
3150 		if (ctx->task == TASK_TOMBSTONE) {
3151 			raw_spin_unlock_irq(&ctx->lock);
3152 			return;
3153 		}
3154 		add_event_to_ctx(event, ctx);
3155 		raw_spin_unlock_irq(&ctx->lock);
3156 		return;
3157 	}
3158 
3159 	if (!task) {
3160 		cpu_function_call(cpu, __perf_install_in_context, event);
3161 		return;
3162 	}
3163 
3164 	/*
3165 	 * Should not happen, we validate the ctx is still alive before calling.
3166 	 */
3167 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
3168 		return;
3169 
3170 	/*
3171 	 * Installing events is tricky because we cannot rely on ctx->is_active
3172 	 * to be set in case this is the nr_events 0 -> 1 transition.
3173 	 *
3174 	 * Instead we use task_curr(), which tells us if the task is running.
3175 	 * However, since we use task_curr() outside of rq::lock, we can race
3176 	 * against the actual state. This means the result can be wrong.
3177 	 *
3178 	 * If we get a false positive, we retry, this is harmless.
3179 	 *
3180 	 * If we get a false negative, things are complicated. If we are after
3181 	 * perf_event_context_sched_in() ctx::lock will serialize us, and the
3182 	 * value must be correct. If we're before, it doesn't matter since
3183 	 * perf_event_context_sched_in() will program the counter.
3184 	 *
3185 	 * However, this hinges on the remote context switch having observed
3186 	 * our task->perf_event_ctxp[] store, such that it will in fact take
3187 	 * ctx::lock in perf_event_context_sched_in().
3188 	 *
3189 	 * We do this by task_function_call(), if the IPI fails to hit the task
3190 	 * we know any future context switch of task must see the
3191 	 * perf_event_ctpx[] store.
3192 	 */
3193 
3194 	/*
3195 	 * This smp_mb() orders the task->perf_event_ctxp[] store with the
3196 	 * task_cpu() load, such that if the IPI then does not find the task
3197 	 * running, a future context switch of that task must observe the
3198 	 * store.
3199 	 */
3200 	smp_mb();
3201 again:
3202 	if (!task_function_call(task, __perf_install_in_context, event))
3203 		return;
3204 
3205 	raw_spin_lock_irq(&ctx->lock);
3206 	task = ctx->task;
3207 	if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
3208 		/*
3209 		 * Cannot happen because we already checked above (which also
3210 		 * cannot happen), and we hold ctx->mutex, which serializes us
3211 		 * against perf_event_exit_task_context().
3212 		 */
3213 		raw_spin_unlock_irq(&ctx->lock);
3214 		return;
3215 	}
3216 	/*
3217 	 * If the task is not running, ctx->lock will avoid it becoming so,
3218 	 * thus we can safely install the event.
3219 	 */
3220 	if (task_curr(task)) {
3221 		raw_spin_unlock_irq(&ctx->lock);
3222 		goto again;
3223 	}
3224 	add_event_to_ctx(event, ctx);
3225 	raw_spin_unlock_irq(&ctx->lock);
3226 }
3227 
3228 /*
3229  * Cross CPU call to enable a performance event
3230  */
__perf_event_enable(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)3231 static void __perf_event_enable(struct perf_event *event,
3232 				struct perf_cpu_context *cpuctx,
3233 				struct perf_event_context *ctx,
3234 				void *info)
3235 {
3236 	struct perf_event *leader = event->group_leader;
3237 	struct perf_event_context *task_ctx;
3238 
3239 	if (event->state >= PERF_EVENT_STATE_INACTIVE ||
3240 	    event->state <= PERF_EVENT_STATE_ERROR)
3241 		return;
3242 
3243 	ctx_time_freeze(cpuctx, ctx);
3244 
3245 	perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
3246 	perf_cgroup_event_enable(event, ctx);
3247 
3248 	if (!ctx->is_active)
3249 		return;
3250 
3251 	if (!event_filter_match(event))
3252 		return;
3253 
3254 	/*
3255 	 * If the event is in a group and isn't the group leader,
3256 	 * then don't put it on unless the group is on.
3257 	 */
3258 	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
3259 		return;
3260 
3261 	task_ctx = cpuctx->task_ctx;
3262 	if (ctx->task)
3263 		WARN_ON_ONCE(task_ctx != ctx);
3264 
3265 	ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event));
3266 }
3267 
3268 /*
3269  * Enable an event.
3270  *
3271  * If event->ctx is a cloned context, callers must make sure that
3272  * every task struct that event->ctx->task could possibly point to
3273  * remains valid.  This condition is satisfied when called through
3274  * perf_event_for_each_child or perf_event_for_each as described
3275  * for perf_event_disable.
3276  */
_perf_event_enable(struct perf_event * event)3277 static void _perf_event_enable(struct perf_event *event)
3278 {
3279 	struct perf_event_context *ctx = event->ctx;
3280 
3281 	raw_spin_lock_irq(&ctx->lock);
3282 	if (event->state >= PERF_EVENT_STATE_INACTIVE ||
3283 	    event->state <  PERF_EVENT_STATE_ERROR) {
3284 out:
3285 		raw_spin_unlock_irq(&ctx->lock);
3286 		return;
3287 	}
3288 
3289 	/*
3290 	 * If the event is in error state, clear that first.
3291 	 *
3292 	 * That way, if we see the event in error state below, we know that it
3293 	 * has gone back into error state, as distinct from the task having
3294 	 * been scheduled away before the cross-call arrived.
3295 	 */
3296 	if (event->state == PERF_EVENT_STATE_ERROR) {
3297 		/*
3298 		 * Detached SIBLING events cannot leave ERROR state.
3299 		 */
3300 		if (event->event_caps & PERF_EV_CAP_SIBLING &&
3301 		    event->group_leader == event)
3302 			goto out;
3303 
3304 		event->state = PERF_EVENT_STATE_OFF;
3305 	}
3306 	raw_spin_unlock_irq(&ctx->lock);
3307 
3308 	event_function_call(event, __perf_event_enable, NULL);
3309 }
3310 
3311 /*
3312  * See perf_event_disable();
3313  */
perf_event_enable(struct perf_event * event)3314 void perf_event_enable(struct perf_event *event)
3315 {
3316 	struct perf_event_context *ctx;
3317 
3318 	ctx = perf_event_ctx_lock(event);
3319 	_perf_event_enable(event);
3320 	perf_event_ctx_unlock(event, ctx);
3321 }
3322 EXPORT_SYMBOL_GPL(perf_event_enable);
3323 
3324 struct stop_event_data {
3325 	struct perf_event	*event;
3326 	unsigned int		restart;
3327 };
3328 
__perf_event_stop(void * info)3329 static int __perf_event_stop(void *info)
3330 {
3331 	struct stop_event_data *sd = info;
3332 	struct perf_event *event = sd->event;
3333 
3334 	/* if it's already INACTIVE, do nothing */
3335 	if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3336 		return 0;
3337 
3338 	/* matches smp_wmb() in event_sched_in() */
3339 	smp_rmb();
3340 
3341 	/*
3342 	 * There is a window with interrupts enabled before we get here,
3343 	 * so we need to check again lest we try to stop another CPU's event.
3344 	 */
3345 	if (READ_ONCE(event->oncpu) != smp_processor_id())
3346 		return -EAGAIN;
3347 
3348 	event->pmu->stop(event, PERF_EF_UPDATE);
3349 
3350 	/*
3351 	 * May race with the actual stop (through perf_pmu_output_stop()),
3352 	 * but it is only used for events with AUX ring buffer, and such
3353 	 * events will refuse to restart because of rb::aux_mmap_count==0,
3354 	 * see comments in perf_aux_output_begin().
3355 	 *
3356 	 * Since this is happening on an event-local CPU, no trace is lost
3357 	 * while restarting.
3358 	 */
3359 	if (sd->restart)
3360 		event->pmu->start(event, 0);
3361 
3362 	return 0;
3363 }
3364 
perf_event_stop(struct perf_event * event,int restart)3365 static int perf_event_stop(struct perf_event *event, int restart)
3366 {
3367 	struct stop_event_data sd = {
3368 		.event		= event,
3369 		.restart	= restart,
3370 	};
3371 	int ret = 0;
3372 
3373 	do {
3374 		if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3375 			return 0;
3376 
3377 		/* matches smp_wmb() in event_sched_in() */
3378 		smp_rmb();
3379 
3380 		/*
3381 		 * We only want to restart ACTIVE events, so if the event goes
3382 		 * inactive here (event->oncpu==-1), there's nothing more to do;
3383 		 * fall through with ret==-ENXIO.
3384 		 */
3385 		ret = cpu_function_call(READ_ONCE(event->oncpu),
3386 					__perf_event_stop, &sd);
3387 	} while (ret == -EAGAIN);
3388 
3389 	return ret;
3390 }
3391 
3392 /*
3393  * In order to contain the amount of racy and tricky in the address filter
3394  * configuration management, it is a two part process:
3395  *
3396  * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
3397  *      we update the addresses of corresponding vmas in
3398  *	event::addr_filter_ranges array and bump the event::addr_filters_gen;
3399  * (p2) when an event is scheduled in (pmu::add), it calls
3400  *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3401  *      if the generation has changed since the previous call.
3402  *
3403  * If (p1) happens while the event is active, we restart it to force (p2).
3404  *
3405  * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
3406  *     pre-existing mappings, called once when new filters arrive via SET_FILTER
3407  *     ioctl;
3408  * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
3409  *     registered mapping, called for every new mmap(), with mm::mmap_lock down
3410  *     for reading;
3411  * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
3412  *     of exec.
3413  */
perf_event_addr_filters_sync(struct perf_event * event)3414 void perf_event_addr_filters_sync(struct perf_event *event)
3415 {
3416 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3417 
3418 	if (!has_addr_filter(event))
3419 		return;
3420 
3421 	raw_spin_lock(&ifh->lock);
3422 	if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3423 		event->pmu->addr_filters_sync(event);
3424 		event->hw.addr_filters_gen = event->addr_filters_gen;
3425 	}
3426 	raw_spin_unlock(&ifh->lock);
3427 }
3428 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
3429 
_perf_event_refresh(struct perf_event * event,int refresh)3430 static int _perf_event_refresh(struct perf_event *event, int refresh)
3431 {
3432 	/*
3433 	 * not supported on inherited events
3434 	 */
3435 	if (event->attr.inherit || !is_sampling_event(event))
3436 		return -EINVAL;
3437 
3438 	atomic_add(refresh, &event->event_limit);
3439 	_perf_event_enable(event);
3440 
3441 	return 0;
3442 }
3443 
3444 /*
3445  * See perf_event_disable()
3446  */
perf_event_refresh(struct perf_event * event,int refresh)3447 int perf_event_refresh(struct perf_event *event, int refresh)
3448 {
3449 	struct perf_event_context *ctx;
3450 	int ret;
3451 
3452 	ctx = perf_event_ctx_lock(event);
3453 	ret = _perf_event_refresh(event, refresh);
3454 	perf_event_ctx_unlock(event, ctx);
3455 
3456 	return ret;
3457 }
3458 EXPORT_SYMBOL_GPL(perf_event_refresh);
3459 
perf_event_modify_breakpoint(struct perf_event * bp,struct perf_event_attr * attr)3460 static int perf_event_modify_breakpoint(struct perf_event *bp,
3461 					 struct perf_event_attr *attr)
3462 {
3463 	int err;
3464 
3465 	_perf_event_disable(bp);
3466 
3467 	err = modify_user_hw_breakpoint_check(bp, attr, true);
3468 
3469 	if (!bp->attr.disabled)
3470 		_perf_event_enable(bp);
3471 
3472 	return err;
3473 }
3474 
3475 /*
3476  * Copy event-type-independent attributes that may be modified.
3477  */
perf_event_modify_copy_attr(struct perf_event_attr * to,const struct perf_event_attr * from)3478 static void perf_event_modify_copy_attr(struct perf_event_attr *to,
3479 					const struct perf_event_attr *from)
3480 {
3481 	to->sig_data = from->sig_data;
3482 }
3483 
perf_event_modify_attr(struct perf_event * event,struct perf_event_attr * attr)3484 static int perf_event_modify_attr(struct perf_event *event,
3485 				  struct perf_event_attr *attr)
3486 {
3487 	int (*func)(struct perf_event *, struct perf_event_attr *);
3488 	struct perf_event *child;
3489 	int err;
3490 
3491 	if (event->attr.type != attr->type)
3492 		return -EINVAL;
3493 
3494 	switch (event->attr.type) {
3495 	case PERF_TYPE_BREAKPOINT:
3496 		func = perf_event_modify_breakpoint;
3497 		break;
3498 	default:
3499 		/* Place holder for future additions. */
3500 		return -EOPNOTSUPP;
3501 	}
3502 
3503 	WARN_ON_ONCE(event->ctx->parent_ctx);
3504 
3505 	mutex_lock(&event->child_mutex);
3506 	/*
3507 	 * Event-type-independent attributes must be copied before event-type
3508 	 * modification, which will validate that final attributes match the
3509 	 * source attributes after all relevant attributes have been copied.
3510 	 */
3511 	perf_event_modify_copy_attr(&event->attr, attr);
3512 	err = func(event, attr);
3513 	if (err)
3514 		goto out;
3515 	list_for_each_entry(child, &event->child_list, child_list) {
3516 		perf_event_modify_copy_attr(&child->attr, attr);
3517 		err = func(child, attr);
3518 		if (err)
3519 			goto out;
3520 	}
3521 out:
3522 	mutex_unlock(&event->child_mutex);
3523 	return err;
3524 }
3525 
__pmu_ctx_sched_out(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)3526 static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
3527 				enum event_type_t event_type)
3528 {
3529 	struct perf_event_context *ctx = pmu_ctx->ctx;
3530 	struct perf_event *event, *tmp;
3531 	struct pmu *pmu = pmu_ctx->pmu;
3532 
3533 	if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
3534 		struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3535 
3536 		WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3537 		cpc->task_epc = NULL;
3538 	}
3539 
3540 	if (!(event_type & EVENT_ALL))
3541 		return;
3542 
3543 	perf_pmu_disable(pmu);
3544 	if (event_type & EVENT_PINNED) {
3545 		list_for_each_entry_safe(event, tmp,
3546 					 &pmu_ctx->pinned_active,
3547 					 active_list)
3548 			group_sched_out(event, ctx);
3549 	}
3550 
3551 	if (event_type & EVENT_FLEXIBLE) {
3552 		list_for_each_entry_safe(event, tmp,
3553 					 &pmu_ctx->flexible_active,
3554 					 active_list)
3555 			group_sched_out(event, ctx);
3556 		/*
3557 		 * Since we cleared EVENT_FLEXIBLE, also clear
3558 		 * rotate_necessary, is will be reset by
3559 		 * ctx_flexible_sched_in() when needed.
3560 		 */
3561 		pmu_ctx->rotate_necessary = 0;
3562 	}
3563 	perf_pmu_enable(pmu);
3564 }
3565 
3566 /*
3567  * Be very careful with the @pmu argument since this will change ctx state.
3568  * The @pmu argument works for ctx_resched(), because that is symmetric in
3569  * ctx_sched_out() / ctx_sched_in() usage and the ctx state ends up invariant.
3570  *
3571  * However, if you were to be asymmetrical, you could end up with messed up
3572  * state, eg. ctx->is_active cleared even though most EPCs would still actually
3573  * be active.
3574  */
3575 static void
ctx_sched_out(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)3576 ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
3577 {
3578 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3579 	enum event_type_t active_type = event_type & ~EVENT_FLAGS;
3580 	struct perf_event_pmu_context *pmu_ctx;
3581 	int is_active = ctx->is_active;
3582 
3583 
3584 	lockdep_assert_held(&ctx->lock);
3585 
3586 	if (likely(!ctx->nr_events)) {
3587 		/*
3588 		 * See __perf_remove_from_context().
3589 		 */
3590 		WARN_ON_ONCE(ctx->is_active);
3591 		if (ctx->task)
3592 			WARN_ON_ONCE(cpuctx->task_ctx);
3593 		return;
3594 	}
3595 
3596 	/*
3597 	 * Always update time if it was set; not only when it changes.
3598 	 * Otherwise we can 'forget' to update time for any but the last
3599 	 * context we sched out. For example:
3600 	 *
3601 	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
3602 	 *   ctx_sched_out(.event_type = EVENT_PINNED)
3603 	 *
3604 	 * would only update time for the pinned events.
3605 	 */
3606 	__ctx_time_update(cpuctx, ctx, ctx == &cpuctx->ctx, event_type);
3607 
3608 	/*
3609 	 * CPU-release for the below ->is_active store,
3610 	 * see __load_acquire() in perf_event_time_now()
3611 	 */
3612 	barrier();
3613 	ctx->is_active &= ~active_type;
3614 
3615 	if (!(ctx->is_active & EVENT_ALL)) {
3616 		/*
3617 		 * For FROZEN, preserve TIME|FROZEN such that perf_event_time_now()
3618 		 * does not observe a hole. perf_ctx_unlock() will clean up.
3619 		 */
3620 		if (ctx->is_active & EVENT_FROZEN)
3621 			ctx->is_active &= EVENT_TIME_FROZEN;
3622 		else
3623 			ctx->is_active = 0;
3624 	}
3625 
3626 	if (ctx->task) {
3627 		WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3628 		if (!(ctx->is_active & EVENT_ALL))
3629 			cpuctx->task_ctx = NULL;
3630 	}
3631 
3632 	if (event_type & EVENT_GUEST) {
3633 		/*
3634 		 * Schedule out all exclude_guest events of PMU
3635 		 * with PERF_PMU_CAP_MEDIATED_VPMU.
3636 		 */
3637 		is_active = EVENT_ALL;
3638 		__update_context_guest_time(ctx, false);
3639 		perf_cgroup_set_timestamp(cpuctx, true);
3640 		barrier();
3641 	} else {
3642 		is_active ^= ctx->is_active; /* changed bits */
3643 	}
3644 
3645 	for_each_epc(pmu_ctx, ctx, pmu, event_type)
3646 		__pmu_ctx_sched_out(pmu_ctx, is_active);
3647 }
3648 
3649 /*
3650  * Test whether two contexts are equivalent, i.e. whether they have both been
3651  * cloned from the same version of the same context.
3652  *
3653  * Equivalence is measured using a generation number in the context that is
3654  * incremented on each modification to it; see unclone_ctx(), list_add_event()
3655  * and list_del_event().
3656  */
context_equiv(struct perf_event_context * ctx1,struct perf_event_context * ctx2)3657 static int context_equiv(struct perf_event_context *ctx1,
3658 			 struct perf_event_context *ctx2)
3659 {
3660 	lockdep_assert_held(&ctx1->lock);
3661 	lockdep_assert_held(&ctx2->lock);
3662 
3663 	/* Pinning disables the swap optimization */
3664 	if (ctx1->pin_count || ctx2->pin_count)
3665 		return 0;
3666 
3667 	/* If ctx1 is the parent of ctx2 */
3668 	if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
3669 		return 1;
3670 
3671 	/* If ctx2 is the parent of ctx1 */
3672 	if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
3673 		return 1;
3674 
3675 	/*
3676 	 * If ctx1 and ctx2 have the same parent; we flatten the parent
3677 	 * hierarchy, see perf_event_init_context().
3678 	 */
3679 	if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
3680 			ctx1->parent_gen == ctx2->parent_gen)
3681 		return 1;
3682 
3683 	/* Unmatched */
3684 	return 0;
3685 }
3686 
__perf_event_sync_stat(struct perf_event * event,struct perf_event * next_event)3687 static void __perf_event_sync_stat(struct perf_event *event,
3688 				     struct perf_event *next_event)
3689 {
3690 	u64 value;
3691 
3692 	if (!event->attr.inherit_stat)
3693 		return;
3694 
3695 	/*
3696 	 * Update the event value, we cannot use perf_event_read()
3697 	 * because we're in the middle of a context switch and have IRQs
3698 	 * disabled, which upsets smp_call_function_single(), however
3699 	 * we know the event must be on the current CPU, therefore we
3700 	 * don't need to use it.
3701 	 */
3702 	perf_pmu_read(event);
3703 
3704 	perf_event_update_time(event);
3705 
3706 	/*
3707 	 * In order to keep per-task stats reliable we need to flip the event
3708 	 * values when we flip the contexts.
3709 	 */
3710 	value = local64_read(&next_event->count);
3711 	value = local64_xchg(&event->count, value);
3712 	local64_set(&next_event->count, value);
3713 
3714 	swap(event->total_time_enabled, next_event->total_time_enabled);
3715 	swap(event->total_time_running, next_event->total_time_running);
3716 
3717 	/*
3718 	 * Since we swizzled the values, update the user visible data too.
3719 	 */
3720 	perf_event_update_userpage(event);
3721 	perf_event_update_userpage(next_event);
3722 }
3723 
perf_event_sync_stat(struct perf_event_context * ctx,struct perf_event_context * next_ctx)3724 static void perf_event_sync_stat(struct perf_event_context *ctx,
3725 				   struct perf_event_context *next_ctx)
3726 {
3727 	struct perf_event *event, *next_event;
3728 
3729 	if (!ctx->nr_stat)
3730 		return;
3731 
3732 	update_context_time(ctx);
3733 
3734 	event = list_first_entry(&ctx->event_list,
3735 				   struct perf_event, event_entry);
3736 
3737 	next_event = list_first_entry(&next_ctx->event_list,
3738 					struct perf_event, event_entry);
3739 
3740 	while (&event->event_entry != &ctx->event_list &&
3741 	       &next_event->event_entry != &next_ctx->event_list) {
3742 
3743 		__perf_event_sync_stat(event, next_event);
3744 
3745 		event = list_next_entry(event, event_entry);
3746 		next_event = list_next_entry(next_event, event_entry);
3747 	}
3748 }
3749 
perf_ctx_sched_task_cb(struct perf_event_context * ctx,struct task_struct * task,bool sched_in)3750 static void perf_ctx_sched_task_cb(struct perf_event_context *ctx,
3751 				   struct task_struct *task, bool sched_in)
3752 {
3753 	struct perf_event_pmu_context *pmu_ctx;
3754 	struct perf_cpu_pmu_context *cpc;
3755 
3756 	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3757 		cpc = this_cpc(pmu_ctx->pmu);
3758 
3759 		if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3760 			pmu_ctx->pmu->sched_task(pmu_ctx, task, sched_in);
3761 	}
3762 }
3763 
3764 static void
perf_event_context_sched_out(struct task_struct * task,struct task_struct * next)3765 perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
3766 {
3767 	struct perf_event_context *ctx = task->perf_event_ctxp;
3768 	struct perf_event_context *next_ctx;
3769 	struct perf_event_context *parent, *next_parent;
3770 	int do_switch = 1;
3771 
3772 	if (likely(!ctx))
3773 		return;
3774 
3775 	rcu_read_lock();
3776 	next_ctx = rcu_dereference(next->perf_event_ctxp);
3777 	if (!next_ctx)
3778 		goto unlock;
3779 
3780 	parent = rcu_dereference(ctx->parent_ctx);
3781 	next_parent = rcu_dereference(next_ctx->parent_ctx);
3782 
3783 	/* If neither context have a parent context; they cannot be clones. */
3784 	if (!parent && !next_parent)
3785 		goto unlock;
3786 
3787 	if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
3788 		/*
3789 		 * Looks like the two contexts are clones, so we might be
3790 		 * able to optimize the context switch.  We lock both
3791 		 * contexts and check that they are clones under the
3792 		 * lock (including re-checking that neither has been
3793 		 * uncloned in the meantime).  It doesn't matter which
3794 		 * order we take the locks because no other cpu could
3795 		 * be trying to lock both of these tasks.
3796 		 */
3797 		raw_spin_lock(&ctx->lock);
3798 		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
3799 		if (context_equiv(ctx, next_ctx)) {
3800 
3801 			perf_ctx_disable(ctx, 0);
3802 
3803 			/* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
3804 			if (local_read(&ctx->nr_no_switch_fast) ||
3805 			    local_read(&next_ctx->nr_no_switch_fast)) {
3806 				/*
3807 				 * Must not swap out ctx when there's pending
3808 				 * events that rely on the ctx->task relation.
3809 				 *
3810 				 * Likewise, when a context contains inherit +
3811 				 * SAMPLE_READ events they should be switched
3812 				 * out using the slow path so that they are
3813 				 * treated as if they were distinct contexts.
3814 				 */
3815 				raw_spin_unlock(&next_ctx->lock);
3816 				rcu_read_unlock();
3817 				goto inside_switch;
3818 			}
3819 
3820 			WRITE_ONCE(ctx->task, next);
3821 			WRITE_ONCE(next_ctx->task, task);
3822 
3823 			perf_ctx_sched_task_cb(ctx, task, false);
3824 
3825 			perf_ctx_enable(ctx, 0);
3826 
3827 			/*
3828 			 * RCU_INIT_POINTER here is safe because we've not
3829 			 * modified the ctx and the above modification of
3830 			 * ctx->task is immaterial since this value is
3831 			 * always verified under ctx->lock which we're now
3832 			 * holding.
3833 			 */
3834 			RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
3835 			RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
3836 
3837 			do_switch = 0;
3838 
3839 			perf_event_sync_stat(ctx, next_ctx);
3840 		}
3841 		raw_spin_unlock(&next_ctx->lock);
3842 		raw_spin_unlock(&ctx->lock);
3843 	}
3844 unlock:
3845 	rcu_read_unlock();
3846 
3847 	if (do_switch) {
3848 		raw_spin_lock(&ctx->lock);
3849 		perf_ctx_disable(ctx, 0);
3850 
3851 inside_switch:
3852 		perf_ctx_sched_task_cb(ctx, task, false);
3853 		task_ctx_sched_out(ctx, NULL, EVENT_ALL);
3854 
3855 		perf_ctx_enable(ctx, 0);
3856 		raw_spin_unlock(&ctx->lock);
3857 	}
3858 }
3859 
3860 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3861 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
3862 
perf_sched_cb_dec(struct pmu * pmu)3863 void perf_sched_cb_dec(struct pmu *pmu)
3864 {
3865 	struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3866 
3867 	this_cpu_dec(perf_sched_cb_usages);
3868 	barrier();
3869 
3870 	if (!--cpc->sched_cb_usage)
3871 		list_del(&cpc->sched_cb_entry);
3872 }
3873 
3874 
perf_sched_cb_inc(struct pmu * pmu)3875 void perf_sched_cb_inc(struct pmu *pmu)
3876 {
3877 	struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
3878 
3879 	if (!cpc->sched_cb_usage++)
3880 		list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3881 
3882 	barrier();
3883 	this_cpu_inc(perf_sched_cb_usages);
3884 }
3885 
3886 /*
3887  * This function provides the context switch callback to the lower code
3888  * layer. It is invoked ONLY when the context switch callback is enabled.
3889  *
3890  * This callback is relevant even to per-cpu events; for example multi event
3891  * PEBS requires this to provide PID/TID information. This requires we flush
3892  * all queued PEBS records before we context switch to a new task.
3893  */
__perf_pmu_sched_task(struct perf_cpu_pmu_context * cpc,struct task_struct * task,bool sched_in)3894 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc,
3895 				  struct task_struct *task, bool sched_in)
3896 {
3897 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3898 	struct pmu *pmu;
3899 
3900 	pmu = cpc->epc.pmu;
3901 
3902 	/* software PMUs will not have sched_task */
3903 	if (WARN_ON_ONCE(!pmu->sched_task))
3904 		return;
3905 
3906 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3907 	perf_pmu_disable(pmu);
3908 
3909 	pmu->sched_task(cpc->task_epc, task, sched_in);
3910 
3911 	perf_pmu_enable(pmu);
3912 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3913 }
3914 
perf_pmu_sched_task(struct task_struct * prev,struct task_struct * next,bool sched_in)3915 static void perf_pmu_sched_task(struct task_struct *prev,
3916 				struct task_struct *next,
3917 				bool sched_in)
3918 {
3919 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
3920 	struct perf_cpu_pmu_context *cpc;
3921 
3922 	/* cpuctx->task_ctx will be handled in perf_event_context_sched_in/out */
3923 	if (prev == next || cpuctx->task_ctx)
3924 		return;
3925 
3926 	list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
3927 		__perf_pmu_sched_task(cpc, sched_in ? next : prev, sched_in);
3928 }
3929 
3930 static void perf_event_switch(struct task_struct *task,
3931 			      struct task_struct *next_prev, bool sched_in);
3932 
3933 /*
3934  * Called from scheduler to remove the events of the current task,
3935  * with interrupts disabled.
3936  *
3937  * We stop each event and update the event value in event->count.
3938  *
3939  * This does not protect us against NMI, but disable()
3940  * sets the disabled bit in the control field of event _before_
3941  * accessing the event control register. If a NMI hits, then it will
3942  * not restart the event.
3943  */
__perf_event_task_sched_out(struct task_struct * task,struct task_struct * next)3944 void __perf_event_task_sched_out(struct task_struct *task,
3945 				 struct task_struct *next)
3946 {
3947 	if (__this_cpu_read(perf_sched_cb_usages))
3948 		perf_pmu_sched_task(task, next, false);
3949 
3950 	if (atomic_read(&nr_switch_events))
3951 		perf_event_switch(task, next, false);
3952 
3953 	perf_event_context_sched_out(task, next);
3954 
3955 	/*
3956 	 * if cgroup events exist on this CPU, then we need
3957 	 * to check if we have to switch out PMU state.
3958 	 * cgroup event are system-wide mode only
3959 	 */
3960 	perf_cgroup_switch(next);
3961 }
3962 
perf_less_group_idx(const void * l,const void * r,void __always_unused * args)3963 static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args)
3964 {
3965 	const struct perf_event *le = *(const struct perf_event **)l;
3966 	const struct perf_event *re = *(const struct perf_event **)r;
3967 
3968 	return le->group_index < re->group_index;
3969 }
3970 
3971 DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap);
3972 
3973 static const struct min_heap_callbacks perf_min_heap = {
3974 	.less = perf_less_group_idx,
3975 	.swp = NULL,
3976 };
3977 
__heap_add(struct perf_event_min_heap * heap,struct perf_event * event)3978 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
3979 {
3980 	struct perf_event **itrs = heap->data;
3981 
3982 	if (event) {
3983 		itrs[heap->nr] = event;
3984 		heap->nr++;
3985 	}
3986 }
3987 
__link_epc(struct perf_event_pmu_context * pmu_ctx)3988 static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
3989 {
3990 	struct perf_cpu_pmu_context *cpc;
3991 
3992 	if (!pmu_ctx->ctx->task)
3993 		return;
3994 
3995 	cpc = this_cpc(pmu_ctx->pmu);
3996 	WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3997 	cpc->task_epc = pmu_ctx;
3998 }
3999 
visit_groups_merge(struct perf_event_context * ctx,struct perf_event_groups * groups,int cpu,struct pmu * pmu,int (* func)(struct perf_event *,void *),void * data)4000 static noinline int visit_groups_merge(struct perf_event_context *ctx,
4001 				struct perf_event_groups *groups, int cpu,
4002 				struct pmu *pmu,
4003 				int (*func)(struct perf_event *, void *),
4004 				void *data)
4005 {
4006 #ifdef CONFIG_CGROUP_PERF
4007 	struct cgroup_subsys_state *css = NULL;
4008 #endif
4009 	struct perf_cpu_context *cpuctx = NULL;
4010 	/* Space for per CPU and/or any CPU event iterators. */
4011 	struct perf_event *itrs[2];
4012 	struct perf_event_min_heap event_heap;
4013 	struct perf_event **evt;
4014 	int ret;
4015 
4016 	if (pmu->filter && pmu->filter(pmu, cpu))
4017 		return 0;
4018 
4019 	if (!ctx->task) {
4020 		cpuctx = this_cpu_ptr(&perf_cpu_context);
4021 		event_heap = (struct perf_event_min_heap){
4022 			.data = cpuctx->heap,
4023 			.nr = 0,
4024 			.size = cpuctx->heap_size,
4025 		};
4026 
4027 		lockdep_assert_held(&cpuctx->ctx.lock);
4028 
4029 #ifdef CONFIG_CGROUP_PERF
4030 		if (cpuctx->cgrp)
4031 			css = &cpuctx->cgrp->css;
4032 #endif
4033 	} else {
4034 		event_heap = (struct perf_event_min_heap){
4035 			.data = itrs,
4036 			.nr = 0,
4037 			.size = ARRAY_SIZE(itrs),
4038 		};
4039 		/* Events not within a CPU context may be on any CPU. */
4040 		__heap_add(&event_heap, perf_event_groups_first(groups, -1, pmu, NULL));
4041 	}
4042 	evt = event_heap.data;
4043 
4044 	__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, NULL));
4045 
4046 #ifdef CONFIG_CGROUP_PERF
4047 	for (; css; css = css->parent)
4048 		__heap_add(&event_heap, perf_event_groups_first(groups, cpu, pmu, css->cgroup));
4049 #endif
4050 
4051 	if (event_heap.nr) {
4052 		__link_epc((*evt)->pmu_ctx);
4053 		perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
4054 	}
4055 
4056 	min_heapify_all_inline(&event_heap, &perf_min_heap, NULL);
4057 
4058 	while (event_heap.nr) {
4059 		ret = func(*evt, data);
4060 		if (ret)
4061 			return ret;
4062 
4063 		*evt = perf_event_groups_next(*evt, pmu);
4064 		if (*evt)
4065 			min_heap_sift_down_inline(&event_heap, 0, &perf_min_heap, NULL);
4066 		else
4067 			min_heap_pop_inline(&event_heap, &perf_min_heap, NULL);
4068 	}
4069 
4070 	return 0;
4071 }
4072 
4073 /*
4074  * Because the userpage is strictly per-event (there is no concept of context,
4075  * so there cannot be a context indirection), every userpage must be updated
4076  * when context time starts :-(
4077  *
4078  * IOW, we must not miss EVENT_TIME edges.
4079  */
event_update_userpage(struct perf_event * event)4080 static inline bool event_update_userpage(struct perf_event *event)
4081 {
4082 	if (likely(!refcount_read(&event->mmap_count)))
4083 		return false;
4084 
4085 	perf_event_update_time(event);
4086 	perf_event_update_userpage(event);
4087 
4088 	return true;
4089 }
4090 
group_update_userpage(struct perf_event * group_event)4091 static inline void group_update_userpage(struct perf_event *group_event)
4092 {
4093 	struct perf_event *event;
4094 
4095 	if (!event_update_userpage(group_event))
4096 		return;
4097 
4098 	for_each_sibling_event(event, group_event)
4099 		event_update_userpage(event);
4100 }
4101 
4102 struct merge_sched_data {
4103 	int can_add_hw;
4104 	enum event_type_t event_type;
4105 };
4106 
merge_sched_in(struct perf_event * event,void * data)4107 static int merge_sched_in(struct perf_event *event, void *data)
4108 {
4109 	struct perf_event_context *ctx = event->ctx;
4110 	struct merge_sched_data *msd = data;
4111 
4112 	if (event->state <= PERF_EVENT_STATE_OFF)
4113 		return 0;
4114 
4115 	if (!event_filter_match(event))
4116 		return 0;
4117 
4118 	/*
4119 	 * Don't schedule in any host events from PMU with
4120 	 * PERF_PMU_CAP_MEDIATED_VPMU, while a guest is running.
4121 	 */
4122 	if (is_guest_mediated_pmu_loaded() &&
4123 	    event->pmu_ctx->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU &&
4124 	    !(msd->event_type & EVENT_GUEST))
4125 		return 0;
4126 
4127 	if (group_can_go_on(event, msd->can_add_hw)) {
4128 		if (!group_sched_in(event, ctx))
4129 			list_add_tail(&event->active_list, get_event_list(event));
4130 	}
4131 
4132 	if (event->state == PERF_EVENT_STATE_INACTIVE) {
4133 		msd->can_add_hw = 0;
4134 		if (event->attr.pinned) {
4135 			perf_cgroup_event_disable(event, ctx);
4136 			perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
4137 
4138 			if (*perf_event_fasync(event))
4139 				event->pending_kill = POLL_ERR;
4140 
4141 			event->pending_wakeup = 1;
4142 			irq_work_queue(&event->pending_irq);
4143 		} else {
4144 			struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
4145 
4146 			event->pmu_ctx->rotate_necessary = 1;
4147 			perf_mux_hrtimer_restart(cpc);
4148 			group_update_userpage(event);
4149 		}
4150 	}
4151 
4152 	return 0;
4153 }
4154 
pmu_groups_sched_in(struct perf_event_context * ctx,struct perf_event_groups * groups,struct pmu * pmu,enum event_type_t event_type)4155 static void pmu_groups_sched_in(struct perf_event_context *ctx,
4156 				struct perf_event_groups *groups,
4157 				struct pmu *pmu,
4158 				enum event_type_t event_type)
4159 {
4160 	struct merge_sched_data msd = {
4161 		.can_add_hw = 1,
4162 		.event_type = event_type,
4163 	};
4164 	visit_groups_merge(ctx, groups, smp_processor_id(), pmu,
4165 			   merge_sched_in, &msd);
4166 }
4167 
__pmu_ctx_sched_in(struct perf_event_pmu_context * pmu_ctx,enum event_type_t event_type)4168 static void __pmu_ctx_sched_in(struct perf_event_pmu_context *pmu_ctx,
4169 			       enum event_type_t event_type)
4170 {
4171 	struct perf_event_context *ctx = pmu_ctx->ctx;
4172 
4173 	if (event_type & EVENT_PINNED)
4174 		pmu_groups_sched_in(ctx, &ctx->pinned_groups, pmu_ctx->pmu, event_type);
4175 	if (event_type & EVENT_FLEXIBLE)
4176 		pmu_groups_sched_in(ctx, &ctx->flexible_groups, pmu_ctx->pmu, event_type);
4177 }
4178 
4179 static void
ctx_sched_in(struct perf_event_context * ctx,struct pmu * pmu,enum event_type_t event_type)4180 ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type)
4181 {
4182 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4183 	enum event_type_t active_type = event_type & ~EVENT_FLAGS;
4184 	struct perf_event_pmu_context *pmu_ctx;
4185 	int is_active = ctx->is_active;
4186 
4187 	lockdep_assert_held(&ctx->lock);
4188 
4189 	if (likely(!ctx->nr_events))
4190 		return;
4191 
4192 	if (!(is_active & EVENT_TIME)) {
4193 		/* EVENT_TIME should be active while the guest runs */
4194 		WARN_ON_ONCE(event_type & EVENT_GUEST);
4195 		/* start ctx time */
4196 		__update_context_time(ctx, false);
4197 		perf_cgroup_set_timestamp(cpuctx, false);
4198 		/*
4199 		 * CPU-release for the below ->is_active store,
4200 		 * see __load_acquire() in perf_event_time_now()
4201 		 */
4202 		barrier();
4203 	}
4204 
4205 	ctx->is_active |= active_type | EVENT_TIME;
4206 	if (ctx->task) {
4207 		if (!(is_active & EVENT_ALL))
4208 			cpuctx->task_ctx = ctx;
4209 		else
4210 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
4211 	}
4212 
4213 	if (event_type & EVENT_GUEST) {
4214 		/*
4215 		 * Schedule in the required exclude_guest events of PMU
4216 		 * with PERF_PMU_CAP_MEDIATED_VPMU.
4217 		 */
4218 		is_active = event_type & EVENT_ALL;
4219 
4220 		/*
4221 		 * Update ctx time to set the new start time for
4222 		 * the exclude_guest events.
4223 		 */
4224 		update_context_time(ctx);
4225 		update_cgrp_time_from_cpuctx(cpuctx, false);
4226 		barrier();
4227 	} else {
4228 		is_active ^= ctx->is_active; /* changed bits */
4229 	}
4230 
4231 	/*
4232 	 * First go through the list and put on any pinned groups
4233 	 * in order to give them the best chance of going on.
4234 	 */
4235 	if (is_active & EVENT_PINNED) {
4236 		for_each_epc(pmu_ctx, ctx, pmu, event_type)
4237 			__pmu_ctx_sched_in(pmu_ctx, EVENT_PINNED | (event_type & EVENT_GUEST));
4238 	}
4239 
4240 	/* Then walk through the lower prio flexible groups */
4241 	if (is_active & EVENT_FLEXIBLE) {
4242 		for_each_epc(pmu_ctx, ctx, pmu, event_type)
4243 			__pmu_ctx_sched_in(pmu_ctx, EVENT_FLEXIBLE | (event_type & EVENT_GUEST));
4244 	}
4245 }
4246 
perf_event_context_sched_in(struct task_struct * task)4247 static void perf_event_context_sched_in(struct task_struct *task)
4248 {
4249 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4250 	struct perf_event_context *ctx;
4251 
4252 	rcu_read_lock();
4253 	ctx = rcu_dereference(task->perf_event_ctxp);
4254 	if (!ctx)
4255 		goto rcu_unlock;
4256 
4257 	if (cpuctx->task_ctx == ctx) {
4258 		perf_ctx_lock(cpuctx, ctx);
4259 		perf_ctx_disable(ctx, 0);
4260 
4261 		perf_ctx_sched_task_cb(ctx, task, true);
4262 
4263 		perf_ctx_enable(ctx, 0);
4264 		perf_ctx_unlock(cpuctx, ctx);
4265 		goto rcu_unlock;
4266 	}
4267 
4268 	perf_ctx_lock(cpuctx, ctx);
4269 	/*
4270 	 * We must check ctx->nr_events while holding ctx->lock, such
4271 	 * that we serialize against perf_install_in_context().
4272 	 */
4273 	if (!ctx->nr_events)
4274 		goto unlock;
4275 
4276 	perf_ctx_disable(ctx, 0);
4277 	/*
4278 	 * We want to keep the following priority order:
4279 	 * cpu pinned (that don't need to move), task pinned,
4280 	 * cpu flexible, task flexible.
4281 	 *
4282 	 * However, if task's ctx is not carrying any pinned
4283 	 * events, no need to flip the cpuctx's events around.
4284 	 */
4285 	if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
4286 		perf_ctx_disable(&cpuctx->ctx, 0);
4287 		ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
4288 	}
4289 
4290 	perf_event_sched_in(cpuctx, ctx, NULL, 0);
4291 
4292 	perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
4293 
4294 	if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
4295 		perf_ctx_enable(&cpuctx->ctx, 0);
4296 
4297 	perf_ctx_enable(ctx, 0);
4298 
4299 unlock:
4300 	perf_ctx_unlock(cpuctx, ctx);
4301 rcu_unlock:
4302 	rcu_read_unlock();
4303 }
4304 
4305 /*
4306  * Called from scheduler to add the events of the current task
4307  * with interrupts disabled.
4308  *
4309  * We restore the event value and then enable it.
4310  *
4311  * This does not protect us against NMI, but enable()
4312  * sets the enabled bit in the control field of event _before_
4313  * accessing the event control register. If a NMI hits, then it will
4314  * keep the event running.
4315  */
__perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)4316 void __perf_event_task_sched_in(struct task_struct *prev,
4317 				struct task_struct *task)
4318 {
4319 	perf_event_context_sched_in(task);
4320 
4321 	if (atomic_read(&nr_switch_events))
4322 		perf_event_switch(task, prev, true);
4323 
4324 	if (__this_cpu_read(perf_sched_cb_usages))
4325 		perf_pmu_sched_task(prev, task, true);
4326 }
4327 
perf_calculate_period(struct perf_event * event,u64 nsec,u64 count)4328 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
4329 {
4330 	u64 frequency = event->attr.sample_freq;
4331 	u64 sec = NSEC_PER_SEC;
4332 	u64 divisor, dividend;
4333 
4334 	int count_fls, nsec_fls, frequency_fls, sec_fls;
4335 
4336 	count_fls = fls64(count);
4337 	nsec_fls = fls64(nsec);
4338 	frequency_fls = fls64(frequency);
4339 	sec_fls = 30;
4340 
4341 	/*
4342 	 * We got @count in @nsec, with a target of sample_freq HZ
4343 	 * the target period becomes:
4344 	 *
4345 	 *             @count * 10^9
4346 	 * period = -------------------
4347 	 *          @nsec * sample_freq
4348 	 *
4349 	 */
4350 
4351 	/*
4352 	 * Reduce accuracy by one bit such that @a and @b converge
4353 	 * to a similar magnitude.
4354 	 */
4355 #define REDUCE_FLS(a, b)		\
4356 do {					\
4357 	if (a##_fls > b##_fls) {	\
4358 		a >>= 1;		\
4359 		a##_fls--;		\
4360 	} else {			\
4361 		b >>= 1;		\
4362 		b##_fls--;		\
4363 	}				\
4364 } while (0)
4365 
4366 	/*
4367 	 * Reduce accuracy until either term fits in a u64, then proceed with
4368 	 * the other, so that finally we can do a u64/u64 division.
4369 	 */
4370 	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
4371 		REDUCE_FLS(nsec, frequency);
4372 		REDUCE_FLS(sec, count);
4373 	}
4374 
4375 	if (count_fls + sec_fls > 64) {
4376 		divisor = nsec * frequency;
4377 
4378 		while (count_fls + sec_fls > 64) {
4379 			REDUCE_FLS(count, sec);
4380 			divisor >>= 1;
4381 		}
4382 
4383 		dividend = count * sec;
4384 	} else {
4385 		dividend = count * sec;
4386 
4387 		while (nsec_fls + frequency_fls > 64) {
4388 			REDUCE_FLS(nsec, frequency);
4389 			dividend >>= 1;
4390 		}
4391 
4392 		divisor = nsec * frequency;
4393 	}
4394 
4395 	if (!divisor)
4396 		return dividend;
4397 
4398 	return div64_u64(dividend, divisor);
4399 }
4400 
4401 static DEFINE_PER_CPU(int, perf_throttled_count);
4402 static DEFINE_PER_CPU(u64, perf_throttled_seq);
4403 
perf_adjust_period(struct perf_event * event,u64 nsec,u64 count,bool disable)4404 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
4405 {
4406 	struct hw_perf_event *hwc = &event->hw;
4407 	s64 period, sample_period;
4408 	s64 delta;
4409 
4410 	period = perf_calculate_period(event, nsec, count);
4411 
4412 	delta = (s64)(period - hwc->sample_period);
4413 	if (delta >= 0)
4414 		delta += 7;
4415 	else
4416 		delta -= 7;
4417 	delta /= 8; /* low pass filter */
4418 
4419 	sample_period = hwc->sample_period + delta;
4420 
4421 	if (!sample_period)
4422 		sample_period = 1;
4423 
4424 	hwc->sample_period = sample_period;
4425 
4426 	if (local64_read(&hwc->period_left) > 8*sample_period) {
4427 		if (disable)
4428 			event->pmu->stop(event, PERF_EF_UPDATE);
4429 
4430 		local64_set(&hwc->period_left, 0);
4431 
4432 		if (disable)
4433 			event->pmu->start(event, PERF_EF_RELOAD);
4434 	}
4435 }
4436 
perf_adjust_freq_unthr_events(struct list_head * event_list)4437 static void perf_adjust_freq_unthr_events(struct list_head *event_list)
4438 {
4439 	struct perf_event *event;
4440 	struct hw_perf_event *hwc;
4441 	u64 now, period = TICK_NSEC;
4442 	s64 delta;
4443 
4444 	list_for_each_entry(event, event_list, active_list) {
4445 		if (event->state != PERF_EVENT_STATE_ACTIVE)
4446 			continue;
4447 
4448 		// XXX use visit thingy to avoid the -1,cpu match
4449 		if (!event_filter_match(event))
4450 			continue;
4451 
4452 		hwc = &event->hw;
4453 
4454 		if (hwc->interrupts == MAX_INTERRUPTS)
4455 			perf_event_unthrottle_group(event, is_event_in_freq_mode(event));
4456 
4457 		if (!is_event_in_freq_mode(event))
4458 			continue;
4459 
4460 		/*
4461 		 * stop the event and update event->count
4462 		 */
4463 		event->pmu->stop(event, PERF_EF_UPDATE);
4464 
4465 		now = local64_read(&event->count);
4466 		delta = now - hwc->freq_count_stamp;
4467 		hwc->freq_count_stamp = now;
4468 
4469 		/*
4470 		 * restart the event
4471 		 * reload only if value has changed
4472 		 * we have stopped the event so tell that
4473 		 * to perf_adjust_period() to avoid stopping it
4474 		 * twice.
4475 		 */
4476 		if (delta > 0)
4477 			perf_adjust_period(event, period, delta, false);
4478 
4479 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
4480 	}
4481 }
4482 
4483 /*
4484  * combine freq adjustment with unthrottling to avoid two passes over the
4485  * events. At the same time, make sure, having freq events does not change
4486  * the rate of unthrottling as that would introduce bias.
4487  */
4488 static void
perf_adjust_freq_unthr_context(struct perf_event_context * ctx,bool unthrottle)4489 perf_adjust_freq_unthr_context(struct perf_event_context *ctx, bool unthrottle)
4490 {
4491 	struct perf_event_pmu_context *pmu_ctx;
4492 
4493 	/*
4494 	 * only need to iterate over all events iff:
4495 	 * - context have events in frequency mode (needs freq adjust)
4496 	 * - there are events to unthrottle on this cpu
4497 	 */
4498 	if (!(ctx->nr_freq || unthrottle))
4499 		return;
4500 
4501 	raw_spin_lock(&ctx->lock);
4502 
4503 	list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
4504 		if (!(pmu_ctx->nr_freq || unthrottle))
4505 			continue;
4506 		if (!perf_pmu_ctx_is_active(pmu_ctx))
4507 			continue;
4508 		if (pmu_ctx->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT)
4509 			continue;
4510 
4511 		perf_pmu_disable(pmu_ctx->pmu);
4512 		perf_adjust_freq_unthr_events(&pmu_ctx->pinned_active);
4513 		perf_adjust_freq_unthr_events(&pmu_ctx->flexible_active);
4514 		perf_pmu_enable(pmu_ctx->pmu);
4515 	}
4516 
4517 	raw_spin_unlock(&ctx->lock);
4518 }
4519 
4520 /*
4521  * Move @event to the tail of the @ctx's elegible events.
4522  */
rotate_ctx(struct perf_event_context * ctx,struct perf_event * event)4523 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
4524 {
4525 	/*
4526 	 * Rotate the first entry last of non-pinned groups. Rotation might be
4527 	 * disabled by the inheritance code.
4528 	 */
4529 	if (ctx->rotate_disable)
4530 		return;
4531 
4532 	perf_event_groups_delete(&ctx->flexible_groups, event);
4533 	perf_event_groups_insert(&ctx->flexible_groups, event);
4534 }
4535 
4536 /* pick an event from the flexible_groups to rotate */
4537 static inline struct perf_event *
ctx_event_to_rotate(struct perf_event_pmu_context * pmu_ctx)4538 ctx_event_to_rotate(struct perf_event_pmu_context *pmu_ctx)
4539 {
4540 	struct perf_event *event;
4541 	struct rb_node *node;
4542 	struct rb_root *tree;
4543 	struct __group_key key = {
4544 		.pmu = pmu_ctx->pmu,
4545 	};
4546 
4547 	/* pick the first active flexible event */
4548 	event = list_first_entry_or_null(&pmu_ctx->flexible_active,
4549 					 struct perf_event, active_list);
4550 	if (event)
4551 		goto out;
4552 
4553 	/* if no active flexible event, pick the first event */
4554 	tree = &pmu_ctx->ctx->flexible_groups.tree;
4555 
4556 	if (!pmu_ctx->ctx->task) {
4557 		key.cpu = smp_processor_id();
4558 
4559 		node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4560 		if (node)
4561 			event = __node_2_pe(node);
4562 		goto out;
4563 	}
4564 
4565 	key.cpu = -1;
4566 	node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4567 	if (node) {
4568 		event = __node_2_pe(node);
4569 		goto out;
4570 	}
4571 
4572 	key.cpu = smp_processor_id();
4573 	node = rb_find_first(&key, tree, __group_cmp_ignore_cgroup);
4574 	if (node)
4575 		event = __node_2_pe(node);
4576 
4577 out:
4578 	/*
4579 	 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
4580 	 * finds there are unschedulable events, it will set it again.
4581 	 */
4582 	pmu_ctx->rotate_necessary = 0;
4583 
4584 	return event;
4585 }
4586 
perf_rotate_context(struct perf_cpu_pmu_context * cpc)4587 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
4588 {
4589 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4590 	struct perf_event_pmu_context *cpu_epc, *task_epc = NULL;
4591 	struct perf_event *cpu_event = NULL, *task_event = NULL;
4592 	int cpu_rotate, task_rotate;
4593 	struct pmu *pmu;
4594 
4595 	/*
4596 	 * Since we run this from IRQ context, nobody can install new
4597 	 * events, thus the event count values are stable.
4598 	 */
4599 
4600 	cpu_epc = &cpc->epc;
4601 	pmu = cpu_epc->pmu;
4602 	task_epc = cpc->task_epc;
4603 
4604 	cpu_rotate = cpu_epc->rotate_necessary;
4605 	task_rotate = task_epc ? task_epc->rotate_necessary : 0;
4606 
4607 	if (!(cpu_rotate || task_rotate))
4608 		return false;
4609 
4610 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
4611 	perf_pmu_disable(pmu);
4612 
4613 	if (task_rotate)
4614 		task_event = ctx_event_to_rotate(task_epc);
4615 	if (cpu_rotate)
4616 		cpu_event = ctx_event_to_rotate(cpu_epc);
4617 
4618 	/*
4619 	 * As per the order given at ctx_resched() first 'pop' task flexible
4620 	 * and then, if needed CPU flexible.
4621 	 */
4622 	if (task_event || (task_epc && cpu_event)) {
4623 		update_context_time(task_epc->ctx);
4624 		__pmu_ctx_sched_out(task_epc, EVENT_FLEXIBLE);
4625 	}
4626 
4627 	if (cpu_event) {
4628 		update_context_time(&cpuctx->ctx);
4629 		__pmu_ctx_sched_out(cpu_epc, EVENT_FLEXIBLE);
4630 		rotate_ctx(&cpuctx->ctx, cpu_event);
4631 		__pmu_ctx_sched_in(cpu_epc, EVENT_FLEXIBLE);
4632 	}
4633 
4634 	if (task_event)
4635 		rotate_ctx(task_epc->ctx, task_event);
4636 
4637 	if (task_event || (task_epc && cpu_event))
4638 		__pmu_ctx_sched_in(task_epc, EVENT_FLEXIBLE);
4639 
4640 	perf_pmu_enable(pmu);
4641 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
4642 
4643 	return true;
4644 }
4645 
perf_event_task_tick(void)4646 void perf_event_task_tick(void)
4647 {
4648 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4649 	struct perf_event_context *ctx;
4650 	int throttled;
4651 
4652 	lockdep_assert_irqs_disabled();
4653 
4654 	__this_cpu_inc(perf_throttled_seq);
4655 	throttled = __this_cpu_xchg(perf_throttled_count, 0);
4656 	tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
4657 
4658 	perf_adjust_freq_unthr_context(&cpuctx->ctx, !!throttled);
4659 
4660 	rcu_read_lock();
4661 	ctx = rcu_dereference(current->perf_event_ctxp);
4662 	if (ctx)
4663 		perf_adjust_freq_unthr_context(ctx, !!throttled);
4664 	rcu_read_unlock();
4665 }
4666 
event_enable_on_exec(struct perf_event * event,struct perf_event_context * ctx)4667 static int event_enable_on_exec(struct perf_event *event,
4668 				struct perf_event_context *ctx)
4669 {
4670 	if (!event->attr.enable_on_exec)
4671 		return 0;
4672 
4673 	event->attr.enable_on_exec = 0;
4674 	if (event->state >= PERF_EVENT_STATE_INACTIVE)
4675 		return 0;
4676 
4677 	perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
4678 
4679 	return 1;
4680 }
4681 
4682 /*
4683  * Enable all of a task's events that have been marked enable-on-exec.
4684  * This expects task == current.
4685  */
perf_event_enable_on_exec(struct perf_event_context * ctx)4686 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
4687 {
4688 	struct perf_event_context *clone_ctx = NULL;
4689 	enum event_type_t event_type = 0;
4690 	struct perf_cpu_context *cpuctx;
4691 	struct perf_event *event;
4692 	unsigned long flags;
4693 	int enabled = 0;
4694 
4695 	local_irq_save(flags);
4696 	if (WARN_ON_ONCE(current->perf_event_ctxp != ctx))
4697 		goto out;
4698 
4699 	if (!ctx->nr_events)
4700 		goto out;
4701 
4702 	cpuctx = this_cpu_ptr(&perf_cpu_context);
4703 	perf_ctx_lock(cpuctx, ctx);
4704 	ctx_time_freeze(cpuctx, ctx);
4705 
4706 	list_for_each_entry(event, &ctx->event_list, event_entry) {
4707 		enabled |= event_enable_on_exec(event, ctx);
4708 		event_type |= get_event_type(event);
4709 	}
4710 
4711 	/*
4712 	 * Unclone and reschedule this context if we enabled any event.
4713 	 */
4714 	if (enabled) {
4715 		clone_ctx = unclone_ctx(ctx);
4716 		ctx_resched(cpuctx, ctx, NULL, event_type);
4717 	}
4718 	perf_ctx_unlock(cpuctx, ctx);
4719 
4720 out:
4721 	local_irq_restore(flags);
4722 
4723 	if (clone_ctx)
4724 		put_ctx(clone_ctx);
4725 }
4726 
4727 static void perf_remove_from_owner(struct perf_event *event);
4728 static void perf_event_exit_event(struct perf_event *event,
4729 				  struct perf_event_context *ctx,
4730 				  struct task_struct *task,
4731 				  bool revoke);
4732 
4733 /*
4734  * Removes all events from the current task that have been marked
4735  * remove-on-exec, and feeds their values back to parent events.
4736  */
perf_event_remove_on_exec(struct perf_event_context * ctx)4737 static void perf_event_remove_on_exec(struct perf_event_context *ctx)
4738 {
4739 	struct perf_event_context *clone_ctx = NULL;
4740 	struct perf_event *event, *next;
4741 	unsigned long flags;
4742 	bool modified = false;
4743 
4744 	mutex_lock(&ctx->mutex);
4745 
4746 	if (WARN_ON_ONCE(ctx->task != current))
4747 		goto unlock;
4748 
4749 	list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) {
4750 		if (!event->attr.remove_on_exec)
4751 			continue;
4752 
4753 		if (!is_kernel_event(event))
4754 			perf_remove_from_owner(event);
4755 
4756 		modified = true;
4757 
4758 		perf_event_exit_event(event, ctx, ctx->task, false);
4759 	}
4760 
4761 	raw_spin_lock_irqsave(&ctx->lock, flags);
4762 	if (modified)
4763 		clone_ctx = unclone_ctx(ctx);
4764 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
4765 
4766 unlock:
4767 	mutex_unlock(&ctx->mutex);
4768 
4769 	if (clone_ctx)
4770 		put_ctx(clone_ctx);
4771 }
4772 
4773 struct perf_read_data {
4774 	struct perf_event *event;
4775 	bool group;
4776 	int ret;
4777 };
4778 
4779 static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu);
4780 
__perf_event_read_cpu(struct perf_event * event,int event_cpu)4781 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
4782 {
4783 	int local_cpu = smp_processor_id();
4784 	u16 local_pkg, event_pkg;
4785 
4786 	if ((unsigned)event_cpu >= nr_cpu_ids)
4787 		return event_cpu;
4788 
4789 	if (event->group_caps & PERF_EV_CAP_READ_SCOPE) {
4790 		const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu);
4791 
4792 		if (cpumask && cpumask_test_cpu(local_cpu, cpumask))
4793 			return local_cpu;
4794 	}
4795 
4796 	if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
4797 		event_pkg = topology_physical_package_id(event_cpu);
4798 		local_pkg = topology_physical_package_id(local_cpu);
4799 
4800 		if (event_pkg == local_pkg)
4801 			return local_cpu;
4802 	}
4803 
4804 	return event_cpu;
4805 }
4806 
4807 /*
4808  * Cross CPU call to read the hardware event
4809  */
__perf_event_read(void * info)4810 static void __perf_event_read(void *info)
4811 {
4812 	struct perf_read_data *data = info;
4813 	struct perf_event *sub, *event = data->event;
4814 	struct perf_event_context *ctx = event->ctx;
4815 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4816 	struct pmu *pmu;
4817 
4818 	/*
4819 	 * If this is a task context, we need to check whether it is
4820 	 * the current task context of this cpu.  If not it has been
4821 	 * scheduled out before the smp call arrived.  In that case
4822 	 * event->count would have been updated to a recent sample
4823 	 * when the event was scheduled out.
4824 	 */
4825 	if (ctx->task && cpuctx->task_ctx != ctx)
4826 		return;
4827 
4828 	guard(raw_spinlock)(&ctx->lock);
4829 	ctx_time_update_event(ctx, event);
4830 
4831 	perf_event_update_time(event);
4832 	if (data->group)
4833 		perf_event_update_sibling_time(event);
4834 
4835 	if (event->state != PERF_EVENT_STATE_ACTIVE)
4836 		return;
4837 
4838 	if (!data->group) {
4839 		perf_pmu_read(event);
4840 		data->ret = 0;
4841 		return;
4842 	}
4843 
4844 	pmu = event->pmu_ctx->pmu;
4845 	pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4846 
4847 	perf_pmu_read(event);
4848 	for_each_sibling_event(sub, event)
4849 		perf_pmu_read(sub);
4850 
4851 	data->ret = pmu->commit_txn(pmu);
4852 }
4853 
perf_event_count(struct perf_event * event,bool self)4854 static inline u64 perf_event_count(struct perf_event *event, bool self)
4855 {
4856 	if (self)
4857 		return local64_read(&event->count);
4858 
4859 	return local64_read(&event->count) + atomic64_read(&event->child_count);
4860 }
4861 
calc_timer_values(struct perf_event * event,u64 * now,u64 * enabled,u64 * running)4862 static void calc_timer_values(struct perf_event *event,
4863 				u64 *now,
4864 				u64 *enabled,
4865 				u64 *running)
4866 {
4867 	u64 ctx_time;
4868 
4869 	*now = perf_clock();
4870 	ctx_time = perf_event_time_now(event, *now);
4871 	__perf_update_times(event, ctx_time, enabled, running);
4872 }
4873 
4874 /*
4875  * NMI-safe method to read a local event, that is an event that
4876  * is:
4877  *   - either for the current task, or for this CPU
4878  *   - does not have inherit set, for inherited task events
4879  *     will not be local and we cannot read them atomically
4880  *   - must not have a pmu::count method
4881  */
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)4882 int perf_event_read_local(struct perf_event *event, u64 *value,
4883 			  u64 *enabled, u64 *running)
4884 {
4885 	unsigned long flags;
4886 	int event_oncpu;
4887 	int event_cpu;
4888 	int ret = 0;
4889 
4890 	/*
4891 	 * Disabling interrupts avoids all counter scheduling (context
4892 	 * switches, timer based rotation and IPIs).
4893 	 */
4894 	local_irq_save(flags);
4895 
4896 	/*
4897 	 * It must not be an event with inherit set, we cannot read
4898 	 * all child counters from atomic context.
4899 	 */
4900 	if (event->attr.inherit) {
4901 		ret = -EOPNOTSUPP;
4902 		goto out;
4903 	}
4904 
4905 	/* If this is a per-task event, it must be for current */
4906 	if ((event->attach_state & PERF_ATTACH_TASK) &&
4907 	    event->hw.target != current) {
4908 		ret = -EINVAL;
4909 		goto out;
4910 	}
4911 
4912 	/*
4913 	 * Get the event CPU numbers, and adjust them to local if the event is
4914 	 * a per-package event that can be read locally
4915 	 */
4916 	event_oncpu = __perf_event_read_cpu(event, event->oncpu);
4917 	event_cpu = __perf_event_read_cpu(event, event->cpu);
4918 
4919 	/* If this is a per-CPU event, it must be for this CPU */
4920 	if (!(event->attach_state & PERF_ATTACH_TASK) &&
4921 	    event_cpu != smp_processor_id()) {
4922 		ret = -EINVAL;
4923 		goto out;
4924 	}
4925 
4926 	/* If this is a pinned event it must be running on this CPU */
4927 	if (event->attr.pinned && event_oncpu != smp_processor_id()) {
4928 		ret = -EBUSY;
4929 		goto out;
4930 	}
4931 
4932 	/*
4933 	 * If the event is currently on this CPU, its either a per-task event,
4934 	 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
4935 	 * oncpu == -1).
4936 	 */
4937 	if (event_oncpu == smp_processor_id())
4938 		event->pmu->read(event);
4939 
4940 	*value = local64_read(&event->count);
4941 	if (enabled || running) {
4942 		u64 __enabled, __running, __now;
4943 
4944 		calc_timer_values(event, &__now, &__enabled, &__running);
4945 		if (enabled)
4946 			*enabled = __enabled;
4947 		if (running)
4948 			*running = __running;
4949 	}
4950 out:
4951 	local_irq_restore(flags);
4952 
4953 	return ret;
4954 }
4955 
perf_event_read(struct perf_event * event,bool group)4956 static int perf_event_read(struct perf_event *event, bool group)
4957 {
4958 	enum perf_event_state state = READ_ONCE(event->state);
4959 	int event_cpu, ret = 0;
4960 
4961 	/*
4962 	 * If event is enabled and currently active on a CPU, update the
4963 	 * value in the event structure:
4964 	 */
4965 again:
4966 	if (state == PERF_EVENT_STATE_ACTIVE) {
4967 		struct perf_read_data data;
4968 
4969 		/*
4970 		 * Orders the ->state and ->oncpu loads such that if we see
4971 		 * ACTIVE we must also see the right ->oncpu.
4972 		 *
4973 		 * Matches the smp_wmb() from event_sched_in().
4974 		 */
4975 		smp_rmb();
4976 
4977 		event_cpu = READ_ONCE(event->oncpu);
4978 		if ((unsigned)event_cpu >= nr_cpu_ids)
4979 			return 0;
4980 
4981 		data = (struct perf_read_data){
4982 			.event = event,
4983 			.group = group,
4984 			.ret = 0,
4985 		};
4986 
4987 		preempt_disable();
4988 		event_cpu = __perf_event_read_cpu(event, event_cpu);
4989 
4990 		/*
4991 		 * Purposely ignore the smp_call_function_single() return
4992 		 * value.
4993 		 *
4994 		 * If event_cpu isn't a valid CPU it means the event got
4995 		 * scheduled out and that will have updated the event count.
4996 		 *
4997 		 * Therefore, either way, we'll have an up-to-date event count
4998 		 * after this.
4999 		 */
5000 		(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
5001 		preempt_enable();
5002 		ret = data.ret;
5003 
5004 	} else if (state == PERF_EVENT_STATE_INACTIVE) {
5005 		struct perf_event_context *ctx = event->ctx;
5006 		unsigned long flags;
5007 
5008 		raw_spin_lock_irqsave(&ctx->lock, flags);
5009 		state = event->state;
5010 		if (state != PERF_EVENT_STATE_INACTIVE) {
5011 			raw_spin_unlock_irqrestore(&ctx->lock, flags);
5012 			goto again;
5013 		}
5014 
5015 		/*
5016 		 * May read while context is not active (e.g., thread is
5017 		 * blocked), in that case we cannot update context time
5018 		 */
5019 		ctx_time_update_event(ctx, event);
5020 
5021 		perf_event_update_time(event);
5022 		if (group)
5023 			perf_event_update_sibling_time(event);
5024 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
5025 	}
5026 
5027 	return ret;
5028 }
5029 
5030 /*
5031  * Initialize the perf_event context in a task_struct:
5032  */
__perf_event_init_context(struct perf_event_context * ctx)5033 static void __perf_event_init_context(struct perf_event_context *ctx)
5034 {
5035 	raw_spin_lock_init(&ctx->lock);
5036 	mutex_init(&ctx->mutex);
5037 	INIT_LIST_HEAD(&ctx->pmu_ctx_list);
5038 	perf_event_groups_init(&ctx->pinned_groups);
5039 	perf_event_groups_init(&ctx->flexible_groups);
5040 	INIT_LIST_HEAD(&ctx->event_list);
5041 	refcount_set(&ctx->refcount, 1);
5042 }
5043 
5044 static void
__perf_init_event_pmu_context(struct perf_event_pmu_context * epc,struct pmu * pmu)5045 __perf_init_event_pmu_context(struct perf_event_pmu_context *epc, struct pmu *pmu)
5046 {
5047 	epc->pmu = pmu;
5048 	INIT_LIST_HEAD(&epc->pmu_ctx_entry);
5049 	INIT_LIST_HEAD(&epc->pinned_active);
5050 	INIT_LIST_HEAD(&epc->flexible_active);
5051 	atomic_set(&epc->refcount, 1);
5052 }
5053 
5054 static struct perf_event_context *
alloc_perf_context(struct task_struct * task)5055 alloc_perf_context(struct task_struct *task)
5056 {
5057 	struct perf_event_context *ctx;
5058 
5059 	ctx = kzalloc_obj(struct perf_event_context);
5060 	if (!ctx)
5061 		return NULL;
5062 
5063 	__perf_event_init_context(ctx);
5064 	if (task)
5065 		ctx->task = get_task_struct(task);
5066 
5067 	return ctx;
5068 }
5069 
5070 static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)5071 find_lively_task_by_vpid(pid_t vpid)
5072 {
5073 	struct task_struct *task;
5074 
5075 	rcu_read_lock();
5076 	if (!vpid)
5077 		task = current;
5078 	else
5079 		task = find_task_by_vpid(vpid);
5080 	if (task)
5081 		get_task_struct(task);
5082 	rcu_read_unlock();
5083 
5084 	if (!task)
5085 		return ERR_PTR(-ESRCH);
5086 
5087 	return task;
5088 }
5089 
5090 /*
5091  * Returns a matching context with refcount and pincount.
5092  */
5093 static struct perf_event_context *
find_get_context(struct task_struct * task,struct perf_event * event)5094 find_get_context(struct task_struct *task, struct perf_event *event)
5095 {
5096 	struct perf_event_context *ctx, *clone_ctx = NULL;
5097 	struct perf_cpu_context *cpuctx;
5098 	unsigned long flags;
5099 	int err;
5100 
5101 	if (!task) {
5102 		/* Must be root to operate on a CPU event: */
5103 		err = perf_allow_cpu();
5104 		if (err)
5105 			return ERR_PTR(err);
5106 
5107 		cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
5108 		ctx = &cpuctx->ctx;
5109 		get_ctx(ctx);
5110 		raw_spin_lock_irqsave(&ctx->lock, flags);
5111 		++ctx->pin_count;
5112 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
5113 
5114 		return ctx;
5115 	}
5116 
5117 	err = -EINVAL;
5118 retry:
5119 	ctx = perf_lock_task_context(task, &flags);
5120 	if (ctx) {
5121 		clone_ctx = unclone_ctx(ctx);
5122 		++ctx->pin_count;
5123 
5124 		raw_spin_unlock_irqrestore(&ctx->lock, flags);
5125 
5126 		if (clone_ctx)
5127 			put_ctx(clone_ctx);
5128 	} else {
5129 		ctx = alloc_perf_context(task);
5130 		err = -ENOMEM;
5131 		if (!ctx)
5132 			goto errout;
5133 
5134 		err = 0;
5135 		mutex_lock(&task->perf_event_mutex);
5136 		/*
5137 		 * If it has already passed perf_event_exit_task().
5138 		 * we must see PF_EXITING, it takes this mutex too.
5139 		 */
5140 		if (task->flags & PF_EXITING)
5141 			err = -ESRCH;
5142 		else if (task->perf_event_ctxp)
5143 			err = -EAGAIN;
5144 		else {
5145 			get_ctx(ctx);
5146 			++ctx->pin_count;
5147 			rcu_assign_pointer(task->perf_event_ctxp, ctx);
5148 		}
5149 		mutex_unlock(&task->perf_event_mutex);
5150 
5151 		if (unlikely(err)) {
5152 			put_ctx(ctx);
5153 
5154 			if (err == -EAGAIN)
5155 				goto retry;
5156 			goto errout;
5157 		}
5158 	}
5159 
5160 	return ctx;
5161 
5162 errout:
5163 	return ERR_PTR(err);
5164 }
5165 
5166 static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu * pmu,struct perf_event_context * ctx,struct perf_event * event)5167 find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5168 		     struct perf_event *event)
5169 {
5170 	struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
5171 
5172 	if (!ctx->task) {
5173 		/*
5174 		 * perf_pmu_migrate_context() / __perf_pmu_install_event()
5175 		 * relies on the fact that find_get_pmu_context() cannot fail
5176 		 * for CPU contexts.
5177 		 */
5178 		struct perf_cpu_pmu_context *cpc;
5179 
5180 		cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
5181 		epc = &cpc->epc;
5182 		raw_spin_lock_irq(&ctx->lock);
5183 		if (!epc->ctx) {
5184 			/*
5185 			 * One extra reference for the pmu; see perf_pmu_free().
5186 			 */
5187 			atomic_set(&epc->refcount, 2);
5188 			epc->embedded = 1;
5189 			list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
5190 			epc->ctx = ctx;
5191 		} else {
5192 			WARN_ON_ONCE(epc->ctx != ctx);
5193 			atomic_inc(&epc->refcount);
5194 		}
5195 		raw_spin_unlock_irq(&ctx->lock);
5196 		return epc;
5197 	}
5198 
5199 	new = kzalloc_obj(*epc);
5200 	if (!new)
5201 		return ERR_PTR(-ENOMEM);
5202 
5203 	__perf_init_event_pmu_context(new, pmu);
5204 
5205 	/*
5206 	 * XXX
5207 	 *
5208 	 * lockdep_assert_held(&ctx->mutex);
5209 	 *
5210 	 * can't because perf_event_init_task() doesn't actually hold the
5211 	 * child_ctx->mutex.
5212 	 */
5213 
5214 	raw_spin_lock_irq(&ctx->lock);
5215 	list_for_each_entry(epc, &ctx->pmu_ctx_list, pmu_ctx_entry) {
5216 		if (epc->pmu == pmu) {
5217 			WARN_ON_ONCE(epc->ctx != ctx);
5218 			atomic_inc(&epc->refcount);
5219 			goto found_epc;
5220 		}
5221 		/* Make sure the pmu_ctx_list is sorted by PMU type: */
5222 		if (!pos && epc->pmu->type > pmu->type)
5223 			pos = epc;
5224 	}
5225 
5226 	epc = new;
5227 	new = NULL;
5228 
5229 	if (!pos)
5230 		list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
5231 	else
5232 		list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
5233 
5234 	epc->ctx = ctx;
5235 
5236 found_epc:
5237 	raw_spin_unlock_irq(&ctx->lock);
5238 	kfree(new);
5239 
5240 	return epc;
5241 }
5242 
get_pmu_ctx(struct perf_event_pmu_context * epc)5243 static void get_pmu_ctx(struct perf_event_pmu_context *epc)
5244 {
5245 	WARN_ON_ONCE(!atomic_inc_not_zero(&epc->refcount));
5246 }
5247 
free_cpc_rcu(struct rcu_head * head)5248 static void free_cpc_rcu(struct rcu_head *head)
5249 {
5250 	struct perf_cpu_pmu_context *cpc =
5251 		container_of(head, typeof(*cpc), epc.rcu_head);
5252 
5253 	kfree(cpc);
5254 }
5255 
free_epc_rcu(struct rcu_head * head)5256 static void free_epc_rcu(struct rcu_head *head)
5257 {
5258 	struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
5259 
5260 	kfree(epc);
5261 }
5262 
put_pmu_ctx(struct perf_event_pmu_context * epc)5263 static void put_pmu_ctx(struct perf_event_pmu_context *epc)
5264 {
5265 	struct perf_event_context *ctx = epc->ctx;
5266 	unsigned long flags;
5267 
5268 	/*
5269 	 * XXX
5270 	 *
5271 	 * lockdep_assert_held(&ctx->mutex);
5272 	 *
5273 	 * can't because of the call-site in _free_event()/put_event()
5274 	 * which isn't always called under ctx->mutex.
5275 	 */
5276 	if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
5277 		return;
5278 
5279 	WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
5280 
5281 	list_del_init(&epc->pmu_ctx_entry);
5282 	epc->ctx = NULL;
5283 
5284 	WARN_ON_ONCE(!list_empty(&epc->pinned_active));
5285 	WARN_ON_ONCE(!list_empty(&epc->flexible_active));
5286 
5287 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
5288 
5289 	if (epc->embedded) {
5290 		call_rcu(&epc->rcu_head, free_cpc_rcu);
5291 		return;
5292 	}
5293 
5294 	call_rcu(&epc->rcu_head, free_epc_rcu);
5295 }
5296 
5297 static void perf_event_free_filter(struct perf_event *event);
5298 
free_event_rcu(struct rcu_head * head)5299 static void free_event_rcu(struct rcu_head *head)
5300 {
5301 	struct perf_event *event = container_of(head, typeof(*event), rcu_head);
5302 
5303 	if (event->ns)
5304 		put_pid_ns(event->ns);
5305 	perf_event_free_filter(event);
5306 	kmem_cache_free(perf_event_cache, event);
5307 }
5308 
5309 static void ring_buffer_attach(struct perf_event *event,
5310 			       struct perf_buffer *rb);
5311 
detach_sb_event(struct perf_event * event)5312 static void detach_sb_event(struct perf_event *event)
5313 {
5314 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
5315 
5316 	raw_spin_lock(&pel->lock);
5317 	list_del_rcu(&event->sb_list);
5318 	raw_spin_unlock(&pel->lock);
5319 }
5320 
is_sb_event(struct perf_event * event)5321 static bool is_sb_event(struct perf_event *event)
5322 {
5323 	struct perf_event_attr *attr = &event->attr;
5324 
5325 	if (event->parent)
5326 		return false;
5327 
5328 	if (event->attach_state & PERF_ATTACH_TASK)
5329 		return false;
5330 
5331 	if (attr->mmap || attr->mmap_data || attr->mmap2 ||
5332 	    attr->comm || attr->comm_exec ||
5333 	    attr->task || attr->ksymbol ||
5334 	    attr->context_switch || attr->text_poke ||
5335 	    attr->bpf_event)
5336 		return true;
5337 
5338 	return false;
5339 }
5340 
unaccount_pmu_sb_event(struct perf_event * event)5341 static void unaccount_pmu_sb_event(struct perf_event *event)
5342 {
5343 	if (is_sb_event(event))
5344 		detach_sb_event(event);
5345 }
5346 
5347 #ifdef CONFIG_NO_HZ_FULL
5348 static DEFINE_SPINLOCK(nr_freq_lock);
5349 #endif
5350 
unaccount_freq_event_nohz(void)5351 static void unaccount_freq_event_nohz(void)
5352 {
5353 #ifdef CONFIG_NO_HZ_FULL
5354 	spin_lock(&nr_freq_lock);
5355 	if (atomic_dec_and_test(&nr_freq_events))
5356 		tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
5357 	spin_unlock(&nr_freq_lock);
5358 #endif
5359 }
5360 
unaccount_freq_event(void)5361 static void unaccount_freq_event(void)
5362 {
5363 	if (tick_nohz_full_enabled())
5364 		unaccount_freq_event_nohz();
5365 	else
5366 		atomic_dec(&nr_freq_events);
5367 }
5368 
5369 
5370 static struct perf_ctx_data *
alloc_perf_ctx_data(struct kmem_cache * ctx_cache,bool global,gfp_t gfp_flags)5371 alloc_perf_ctx_data(struct kmem_cache *ctx_cache, bool global, gfp_t gfp_flags)
5372 {
5373 	struct perf_ctx_data *cd;
5374 
5375 	cd = kzalloc_obj(*cd, gfp_flags);
5376 	if (!cd)
5377 		return NULL;
5378 
5379 	cd->data = kmem_cache_zalloc(ctx_cache, gfp_flags);
5380 	if (!cd->data) {
5381 		kfree(cd);
5382 		return NULL;
5383 	}
5384 
5385 	cd->global = global;
5386 	cd->ctx_cache = ctx_cache;
5387 	refcount_set(&cd->refcount, 1);
5388 
5389 	return cd;
5390 }
5391 
free_perf_ctx_data(struct perf_ctx_data * cd)5392 static void free_perf_ctx_data(struct perf_ctx_data *cd)
5393 {
5394 	kmem_cache_free(cd->ctx_cache, cd->data);
5395 	kfree(cd);
5396 }
5397 
__free_perf_ctx_data_rcu(struct rcu_head * rcu_head)5398 static void __free_perf_ctx_data_rcu(struct rcu_head *rcu_head)
5399 {
5400 	struct perf_ctx_data *cd;
5401 
5402 	cd = container_of(rcu_head, struct perf_ctx_data, rcu_head);
5403 	free_perf_ctx_data(cd);
5404 }
5405 
perf_free_ctx_data_rcu(struct perf_ctx_data * cd)5406 static inline void perf_free_ctx_data_rcu(struct perf_ctx_data *cd)
5407 {
5408 	call_rcu(&cd->rcu_head, __free_perf_ctx_data_rcu);
5409 }
5410 
5411 static int
attach_task_ctx_data(struct task_struct * task,struct kmem_cache * ctx_cache,bool global,gfp_t gfp_flags)5412 attach_task_ctx_data(struct task_struct *task, struct kmem_cache *ctx_cache,
5413 		     bool global, gfp_t gfp_flags)
5414 {
5415 	struct perf_ctx_data *cd, *old = NULL;
5416 
5417 	cd = alloc_perf_ctx_data(ctx_cache, global, gfp_flags);
5418 	if (!cd)
5419 		return -ENOMEM;
5420 
5421 	for (;;) {
5422 		if (try_cmpxchg(&task->perf_ctx_data, &old, cd)) {
5423 			if (old)
5424 				perf_free_ctx_data_rcu(old);
5425 			/*
5426 			 * Above try_cmpxchg() pairs with try_cmpxchg() from
5427 			 * detach_task_ctx_data() such that
5428 			 * if we race with perf_event_exit_task(), we must
5429 			 * observe PF_EXITING.
5430 			 */
5431 			if (task->flags & PF_EXITING) {
5432 				/* detach_task_ctx_data() may free it already */
5433 				if (try_cmpxchg(&task->perf_ctx_data, &cd, NULL))
5434 					perf_free_ctx_data_rcu(cd);
5435 			}
5436 			return 0;
5437 		}
5438 
5439 		if (!old) {
5440 			/*
5441 			 * After seeing a dead @old, we raced with
5442 			 * removal and lost, try again to install @cd.
5443 			 */
5444 			continue;
5445 		}
5446 
5447 		if (refcount_inc_not_zero(&old->refcount)) {
5448 			free_perf_ctx_data(cd); /* unused */
5449 			return 0;
5450 		}
5451 
5452 		/*
5453 		 * @old is a dead object, refcount==0 is stable, try and
5454 		 * replace it with @cd.
5455 		 */
5456 	}
5457 	return 0;
5458 }
5459 
5460 static void __detach_global_ctx_data(void);
5461 DEFINE_STATIC_PERCPU_RWSEM(global_ctx_data_rwsem);
5462 static refcount_t global_ctx_data_ref;
5463 
5464 static int
attach_global_ctx_data(struct kmem_cache * ctx_cache)5465 attach_global_ctx_data(struct kmem_cache *ctx_cache)
5466 {
5467 	struct task_struct *g, *p;
5468 	struct perf_ctx_data *cd;
5469 	int ret;
5470 
5471 	if (refcount_inc_not_zero(&global_ctx_data_ref))
5472 		return 0;
5473 
5474 	guard(percpu_write)(&global_ctx_data_rwsem);
5475 	if (refcount_inc_not_zero(&global_ctx_data_ref))
5476 		return 0;
5477 again:
5478 	/* Allocate everything */
5479 	scoped_guard (rcu) {
5480 		for_each_process_thread(g, p) {
5481 			if (p->flags & PF_EXITING)
5482 				continue;
5483 			cd = rcu_dereference(p->perf_ctx_data);
5484 			if (cd && !cd->global) {
5485 				cd->global = 1;
5486 				if (!refcount_inc_not_zero(&cd->refcount))
5487 					cd = NULL;
5488 			}
5489 			if (!cd) {
5490 				/*
5491 				 * Try to allocate context quickly before
5492 				 * traversing the whole thread list again.
5493 				 */
5494 				if (!attach_task_ctx_data(p, ctx_cache, true, GFP_NOWAIT))
5495 					continue;
5496 				get_task_struct(p);
5497 				goto alloc;
5498 			}
5499 		}
5500 	}
5501 
5502 	refcount_set(&global_ctx_data_ref, 1);
5503 
5504 	return 0;
5505 alloc:
5506 	ret = attach_task_ctx_data(p, ctx_cache, true, GFP_KERNEL);
5507 	put_task_struct(p);
5508 	if (ret) {
5509 		__detach_global_ctx_data();
5510 		return ret;
5511 	}
5512 	goto again;
5513 }
5514 
5515 static int
attach_perf_ctx_data(struct perf_event * event)5516 attach_perf_ctx_data(struct perf_event *event)
5517 {
5518 	struct task_struct *task = event->hw.target;
5519 	struct kmem_cache *ctx_cache = event->pmu->task_ctx_cache;
5520 	int ret;
5521 
5522 	if (!ctx_cache)
5523 		return -ENOMEM;
5524 
5525 	if (task)
5526 		return attach_task_ctx_data(task, ctx_cache, false, GFP_KERNEL);
5527 
5528 	ret = attach_global_ctx_data(ctx_cache);
5529 	if (ret)
5530 		return ret;
5531 
5532 	event->attach_state |= PERF_ATTACH_GLOBAL_DATA;
5533 	return 0;
5534 }
5535 
5536 static void
detach_task_ctx_data(struct task_struct * p)5537 detach_task_ctx_data(struct task_struct *p)
5538 {
5539 	struct perf_ctx_data *cd;
5540 
5541 	scoped_guard (rcu) {
5542 		cd = rcu_dereference(p->perf_ctx_data);
5543 		if (!cd || !refcount_dec_and_test(&cd->refcount))
5544 			return;
5545 	}
5546 
5547 	/*
5548 	 * The old ctx_data may be lost because of the race.
5549 	 * Nothing is required to do for the case.
5550 	 * See attach_task_ctx_data().
5551 	 */
5552 	if (try_cmpxchg((struct perf_ctx_data **)&p->perf_ctx_data, &cd, NULL))
5553 		perf_free_ctx_data_rcu(cd);
5554 }
5555 
__detach_global_ctx_data(void)5556 static void __detach_global_ctx_data(void)
5557 {
5558 	struct task_struct *g, *p;
5559 	struct perf_ctx_data *cd;
5560 
5561 	scoped_guard (rcu) {
5562 		for_each_process_thread(g, p) {
5563 			cd = rcu_dereference(p->perf_ctx_data);
5564 			if (cd && cd->global) {
5565 				cd->global = 0;
5566 				detach_task_ctx_data(p);
5567 			}
5568 		}
5569 	}
5570 }
5571 
detach_global_ctx_data(void)5572 static void detach_global_ctx_data(void)
5573 {
5574 	if (refcount_dec_not_one(&global_ctx_data_ref))
5575 		return;
5576 
5577 	guard(percpu_write)(&global_ctx_data_rwsem);
5578 	if (!refcount_dec_and_test(&global_ctx_data_ref))
5579 		return;
5580 
5581 	/* remove everything */
5582 	__detach_global_ctx_data();
5583 }
5584 
detach_perf_ctx_data(struct perf_event * event)5585 static void detach_perf_ctx_data(struct perf_event *event)
5586 {
5587 	struct task_struct *task = event->hw.target;
5588 
5589 	event->attach_state &= ~PERF_ATTACH_TASK_DATA;
5590 
5591 	if (task)
5592 		return detach_task_ctx_data(task);
5593 
5594 	if (event->attach_state & PERF_ATTACH_GLOBAL_DATA) {
5595 		detach_global_ctx_data();
5596 		event->attach_state &= ~PERF_ATTACH_GLOBAL_DATA;
5597 	}
5598 }
5599 
unaccount_event(struct perf_event * event)5600 static void unaccount_event(struct perf_event *event)
5601 {
5602 	bool dec = false;
5603 
5604 	if (event->parent)
5605 		return;
5606 
5607 	if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
5608 		dec = true;
5609 	if (event->attr.mmap || event->attr.mmap_data)
5610 		atomic_dec(&nr_mmap_events);
5611 	if (event->attr.build_id)
5612 		atomic_dec(&nr_build_id_events);
5613 	if (event->attr.comm)
5614 		atomic_dec(&nr_comm_events);
5615 	if (event->attr.namespaces)
5616 		atomic_dec(&nr_namespaces_events);
5617 	if (event->attr.cgroup)
5618 		atomic_dec(&nr_cgroup_events);
5619 	if (event->attr.task)
5620 		atomic_dec(&nr_task_events);
5621 	if (event->attr.freq)
5622 		unaccount_freq_event();
5623 	if (event->attr.context_switch) {
5624 		dec = true;
5625 		atomic_dec(&nr_switch_events);
5626 	}
5627 	if (is_cgroup_event(event))
5628 		dec = true;
5629 	if (has_branch_stack(event))
5630 		dec = true;
5631 	if (event->attr.ksymbol)
5632 		atomic_dec(&nr_ksymbol_events);
5633 	if (event->attr.bpf_event)
5634 		atomic_dec(&nr_bpf_events);
5635 	if (event->attr.text_poke)
5636 		atomic_dec(&nr_text_poke_events);
5637 
5638 	if (dec) {
5639 		if (!atomic_add_unless(&perf_sched_count, -1, 1))
5640 			schedule_delayed_work(&perf_sched_work, HZ);
5641 	}
5642 
5643 	unaccount_pmu_sb_event(event);
5644 }
5645 
perf_sched_delayed(struct work_struct * work)5646 static void perf_sched_delayed(struct work_struct *work)
5647 {
5648 	mutex_lock(&perf_sched_mutex);
5649 	if (atomic_dec_and_test(&perf_sched_count))
5650 		static_branch_disable(&perf_sched_events);
5651 	mutex_unlock(&perf_sched_mutex);
5652 }
5653 
5654 /*
5655  * The following implement mutual exclusion of events on "exclusive" pmus
5656  * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5657  * at a time, so we disallow creating events that might conflict, namely:
5658  *
5659  *  1) cpu-wide events in the presence of per-task events,
5660  *  2) per-task events in the presence of cpu-wide events,
5661  *  3) two matching events on the same perf_event_context.
5662  *
5663  * The former two cases are handled in the allocation path (perf_event_alloc(),
5664  * _free_event()), the latter -- before the first perf_install_in_context().
5665  */
exclusive_event_init(struct perf_event * event)5666 static int exclusive_event_init(struct perf_event *event)
5667 {
5668 	struct pmu *pmu = event->pmu;
5669 
5670 	if (!is_exclusive_pmu(pmu))
5671 		return 0;
5672 
5673 	/*
5674 	 * Prevent co-existence of per-task and cpu-wide events on the
5675 	 * same exclusive pmu.
5676 	 *
5677 	 * Negative pmu::exclusive_cnt means there are cpu-wide
5678 	 * events on this "exclusive" pmu, positive means there are
5679 	 * per-task events.
5680 	 *
5681 	 * Since this is called in perf_event_alloc() path, event::ctx
5682 	 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
5683 	 * to mean "per-task event", because unlike other attach states it
5684 	 * never gets cleared.
5685 	 */
5686 	if (event->attach_state & PERF_ATTACH_TASK) {
5687 		if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
5688 			return -EBUSY;
5689 	} else {
5690 		if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
5691 			return -EBUSY;
5692 	}
5693 
5694 	event->attach_state |= PERF_ATTACH_EXCLUSIVE;
5695 
5696 	return 0;
5697 }
5698 
exclusive_event_destroy(struct perf_event * event)5699 static void exclusive_event_destroy(struct perf_event *event)
5700 {
5701 	struct pmu *pmu = event->pmu;
5702 
5703 	/* see comment in exclusive_event_init() */
5704 	if (event->attach_state & PERF_ATTACH_TASK)
5705 		atomic_dec(&pmu->exclusive_cnt);
5706 	else
5707 		atomic_inc(&pmu->exclusive_cnt);
5708 
5709 	event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
5710 }
5711 
exclusive_event_match(struct perf_event * e1,struct perf_event * e2)5712 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
5713 {
5714 	if ((e1->pmu == e2->pmu) &&
5715 	    (e1->cpu == e2->cpu ||
5716 	     e1->cpu == -1 ||
5717 	     e2->cpu == -1))
5718 		return true;
5719 	return false;
5720 }
5721 
exclusive_event_installable(struct perf_event * event,struct perf_event_context * ctx)5722 static bool exclusive_event_installable(struct perf_event *event,
5723 					struct perf_event_context *ctx)
5724 {
5725 	struct perf_event *iter_event;
5726 	struct pmu *pmu = event->pmu;
5727 
5728 	lockdep_assert_held(&ctx->mutex);
5729 
5730 	if (!is_exclusive_pmu(pmu))
5731 		return true;
5732 
5733 	list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
5734 		if (exclusive_event_match(iter_event, event))
5735 			return false;
5736 	}
5737 
5738 	return true;
5739 }
5740 
5741 static void perf_free_addr_filters(struct perf_event *event);
5742 
5743 /* vs perf_event_alloc() error */
__free_event(struct perf_event * event)5744 static void __free_event(struct perf_event *event)
5745 {
5746 	struct pmu *pmu = event->pmu;
5747 
5748 	security_perf_event_free(event);
5749 
5750 	if (event->attach_state & PERF_ATTACH_CALLCHAIN)
5751 		put_callchain_buffers();
5752 
5753 	kfree(event->addr_filter_ranges);
5754 
5755 	if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
5756 		exclusive_event_destroy(event);
5757 
5758 	if (is_cgroup_event(event))
5759 		perf_detach_cgroup(event);
5760 
5761 	if (event->attach_state & PERF_ATTACH_TASK_DATA)
5762 		detach_perf_ctx_data(event);
5763 
5764 	if (event->destroy)
5765 		event->destroy(event);
5766 
5767 	/*
5768 	 * Must be after ->destroy(), due to uprobe_perf_close() using
5769 	 * hw.target.
5770 	 */
5771 	if (event->hw.target)
5772 		put_task_struct(event->hw.target);
5773 
5774 	if (event->pmu_ctx) {
5775 		/*
5776 		 * put_pmu_ctx() needs an event->ctx reference, because of
5777 		 * epc->ctx.
5778 		 */
5779 		WARN_ON_ONCE(!pmu);
5780 		WARN_ON_ONCE(!event->ctx);
5781 		WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
5782 		put_pmu_ctx(event->pmu_ctx);
5783 	}
5784 
5785 	/*
5786 	 * perf_event_free_task() relies on put_ctx() being 'last', in
5787 	 * particular all task references must be cleaned up.
5788 	 */
5789 	if (event->ctx)
5790 		put_ctx(event->ctx);
5791 
5792 	if (pmu) {
5793 		module_put(pmu->module);
5794 		scoped_guard (spinlock, &pmu->events_lock) {
5795 			list_del(&event->pmu_list);
5796 			wake_up_var(pmu);
5797 		}
5798 	}
5799 
5800 	call_rcu(&event->rcu_head, free_event_rcu);
5801 }
5802 
5803 static void mediated_pmu_unaccount_event(struct perf_event *event);
5804 
DEFINE_FREE(__free_event,struct perf_event *,if (_T)__free_event (_T))5805 DEFINE_FREE(__free_event, struct perf_event *, if (_T) __free_event(_T))
5806 
5807 /* vs perf_event_alloc() success */
5808 static void _free_event(struct perf_event *event)
5809 {
5810 	irq_work_sync(&event->pending_irq);
5811 	irq_work_sync(&event->pending_disable_irq);
5812 
5813 	unaccount_event(event);
5814 	mediated_pmu_unaccount_event(event);
5815 
5816 	if (event->rb) {
5817 		/*
5818 		 * Can happen when we close an event with re-directed output.
5819 		 *
5820 		 * Since we have a 0 refcount, perf_mmap_close() will skip
5821 		 * over us; possibly making our ring_buffer_put() the last.
5822 		 */
5823 		mutex_lock(&event->mmap_mutex);
5824 		ring_buffer_attach(event, NULL);
5825 		mutex_unlock(&event->mmap_mutex);
5826 	}
5827 
5828 	perf_event_free_bpf_prog(event);
5829 	perf_free_addr_filters(event);
5830 
5831 	__free_event(event);
5832 }
5833 
5834 /*
5835  * Used to free events which have a known refcount of 1, such as in error paths
5836  * of inherited events.
5837  */
free_event(struct perf_event * event)5838 static void free_event(struct perf_event *event)
5839 {
5840 	if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
5841 				     "unexpected event refcount: %ld; ptr=%p\n",
5842 				     atomic_long_read(&event->refcount), event)) {
5843 		/* leak to avoid use-after-free */
5844 		return;
5845 	}
5846 
5847 	_free_event(event);
5848 }
5849 
5850 /*
5851  * Remove user event from the owner task.
5852  */
perf_remove_from_owner(struct perf_event * event)5853 static void perf_remove_from_owner(struct perf_event *event)
5854 {
5855 	struct task_struct *owner;
5856 
5857 	rcu_read_lock();
5858 	/*
5859 	 * Matches the smp_store_release() in perf_event_exit_task(). If we
5860 	 * observe !owner it means the list deletion is complete and we can
5861 	 * indeed free this event, otherwise we need to serialize on
5862 	 * owner->perf_event_mutex.
5863 	 */
5864 	owner = READ_ONCE(event->owner);
5865 	if (owner) {
5866 		/*
5867 		 * Since delayed_put_task_struct() also drops the last
5868 		 * task reference we can safely take a new reference
5869 		 * while holding the rcu_read_lock().
5870 		 */
5871 		get_task_struct(owner);
5872 	}
5873 	rcu_read_unlock();
5874 
5875 	if (owner) {
5876 		/*
5877 		 * If we're here through perf_event_exit_task() we're already
5878 		 * holding ctx->mutex which would be an inversion wrt. the
5879 		 * normal lock order.
5880 		 *
5881 		 * However we can safely take this lock because its the child
5882 		 * ctx->mutex.
5883 		 */
5884 		mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
5885 
5886 		/*
5887 		 * We have to re-check the event->owner field, if it is cleared
5888 		 * we raced with perf_event_exit_task(), acquiring the mutex
5889 		 * ensured they're done, and we can proceed with freeing the
5890 		 * event.
5891 		 */
5892 		if (event->owner) {
5893 			list_del_init(&event->owner_entry);
5894 			smp_store_release(&event->owner, NULL);
5895 		}
5896 		mutex_unlock(&owner->perf_event_mutex);
5897 		put_task_struct(owner);
5898 	}
5899 }
5900 
put_event(struct perf_event * event)5901 static void put_event(struct perf_event *event)
5902 {
5903 	struct perf_event *parent;
5904 
5905 	if (!atomic_long_dec_and_test(&event->refcount))
5906 		return;
5907 
5908 	parent = event->parent;
5909 	_free_event(event);
5910 
5911 	/* Matches the refcount bump in inherit_event() */
5912 	if (parent)
5913 		put_event(parent);
5914 }
5915 
5916 /*
5917  * Kill an event dead; while event:refcount will preserve the event
5918  * object, it will not preserve its functionality. Once the last 'user'
5919  * gives up the object, we'll destroy the thing.
5920  */
perf_event_release_kernel(struct perf_event * event)5921 int perf_event_release_kernel(struct perf_event *event)
5922 {
5923 	struct perf_event_context *ctx = event->ctx;
5924 	struct perf_event *child, *tmp;
5925 
5926 	/*
5927 	 * If we got here through err_alloc: free_event(event); we will not
5928 	 * have attached to a context yet.
5929 	 */
5930 	if (!ctx) {
5931 		WARN_ON_ONCE(event->attach_state &
5932 				(PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
5933 		goto no_ctx;
5934 	}
5935 
5936 	if (!is_kernel_event(event))
5937 		perf_remove_from_owner(event);
5938 
5939 	ctx = perf_event_ctx_lock(event);
5940 	WARN_ON_ONCE(ctx->parent_ctx);
5941 
5942 	/*
5943 	 * Mark this event as STATE_DEAD, there is no external reference to it
5944 	 * anymore.
5945 	 *
5946 	 * Anybody acquiring event->child_mutex after the below loop _must_
5947 	 * also see this, most importantly inherit_event() which will avoid
5948 	 * placing more children on the list.
5949 	 *
5950 	 * Thus this guarantees that we will in fact observe and kill _ALL_
5951 	 * child events.
5952 	 */
5953 	if (event->state > PERF_EVENT_STATE_REVOKED) {
5954 		perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD);
5955 	} else {
5956 		event->state = PERF_EVENT_STATE_DEAD;
5957 	}
5958 
5959 	perf_event_ctx_unlock(event, ctx);
5960 
5961 again:
5962 	mutex_lock(&event->child_mutex);
5963 	list_for_each_entry(child, &event->child_list, child_list) {
5964 		/*
5965 		 * Cannot change, child events are not migrated, see the
5966 		 * comment with perf_event_ctx_lock_nested().
5967 		 */
5968 		ctx = READ_ONCE(child->ctx);
5969 		/*
5970 		 * Since child_mutex nests inside ctx::mutex, we must jump
5971 		 * through hoops. We start by grabbing a reference on the ctx.
5972 		 *
5973 		 * Since the event cannot get freed while we hold the
5974 		 * child_mutex, the context must also exist and have a !0
5975 		 * reference count.
5976 		 */
5977 		get_ctx(ctx);
5978 
5979 		/*
5980 		 * Now that we have a ctx ref, we can drop child_mutex, and
5981 		 * acquire ctx::mutex without fear of it going away. Then we
5982 		 * can re-acquire child_mutex.
5983 		 */
5984 		mutex_unlock(&event->child_mutex);
5985 		mutex_lock(&ctx->mutex);
5986 		mutex_lock(&event->child_mutex);
5987 
5988 		/*
5989 		 * Now that we hold ctx::mutex and child_mutex, revalidate our
5990 		 * state, if child is still the first entry, it didn't get freed
5991 		 * and we can continue doing so.
5992 		 */
5993 		tmp = list_first_entry_or_null(&event->child_list,
5994 					       struct perf_event, child_list);
5995 		if (tmp == child) {
5996 			perf_remove_from_context(child, DETACH_GROUP | DETACH_CHILD);
5997 		} else {
5998 			child = NULL;
5999 		}
6000 
6001 		mutex_unlock(&event->child_mutex);
6002 		mutex_unlock(&ctx->mutex);
6003 
6004 		if (child) {
6005 			/* Last reference unless ->pending_task work is pending */
6006 			put_event(child);
6007 		}
6008 		put_ctx(ctx);
6009 
6010 		goto again;
6011 	}
6012 	mutex_unlock(&event->child_mutex);
6013 
6014 no_ctx:
6015 	/*
6016 	 * Last reference unless ->pending_task work is pending on this event
6017 	 * or any of its children.
6018 	 */
6019 	put_event(event);
6020 	return 0;
6021 }
6022 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
6023 
6024 /*
6025  * Called when the last reference to the file is gone.
6026  */
perf_release(struct inode * inode,struct file * file)6027 static int perf_release(struct inode *inode, struct file *file)
6028 {
6029 	perf_event_release_kernel(file->private_data);
6030 	return 0;
6031 }
6032 
__perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)6033 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
6034 {
6035 	struct perf_event *child;
6036 	u64 total = 0;
6037 
6038 	*enabled = 0;
6039 	*running = 0;
6040 
6041 	mutex_lock(&event->child_mutex);
6042 
6043 	(void)perf_event_read(event, false);
6044 	total += perf_event_count(event, false);
6045 
6046 	*enabled += event->total_time_enabled +
6047 			atomic64_read(&event->child_total_time_enabled);
6048 	*running += event->total_time_running +
6049 			atomic64_read(&event->child_total_time_running);
6050 
6051 	list_for_each_entry(child, &event->child_list, child_list) {
6052 		(void)perf_event_read(child, false);
6053 		total += perf_event_count(child, false);
6054 		*enabled += child->total_time_enabled;
6055 		*running += child->total_time_running;
6056 	}
6057 	mutex_unlock(&event->child_mutex);
6058 
6059 	return total;
6060 }
6061 
perf_event_read_value(struct perf_event * event,u64 * enabled,u64 * running)6062 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
6063 {
6064 	struct perf_event_context *ctx;
6065 	u64 count;
6066 
6067 	ctx = perf_event_ctx_lock(event);
6068 	count = __perf_event_read_value(event, enabled, running);
6069 	perf_event_ctx_unlock(event, ctx);
6070 
6071 	return count;
6072 }
6073 EXPORT_SYMBOL_GPL(perf_event_read_value);
6074 
__perf_read_group_add(struct perf_event * leader,u64 read_format,u64 * values)6075 static int __perf_read_group_add(struct perf_event *leader,
6076 					u64 read_format, u64 *values)
6077 {
6078 	struct perf_event_context *ctx = leader->ctx;
6079 	struct perf_event *sub, *parent;
6080 	unsigned long flags;
6081 	int n = 1; /* skip @nr */
6082 	int ret;
6083 
6084 	ret = perf_event_read(leader, true);
6085 	if (ret)
6086 		return ret;
6087 
6088 	raw_spin_lock_irqsave(&ctx->lock, flags);
6089 	/*
6090 	 * Verify the grouping between the parent and child (inherited)
6091 	 * events is still in tact.
6092 	 *
6093 	 * Specifically:
6094 	 *  - leader->ctx->lock pins leader->sibling_list
6095 	 *  - parent->child_mutex pins parent->child_list
6096 	 *  - parent->ctx->mutex pins parent->sibling_list
6097 	 *
6098 	 * Because parent->ctx != leader->ctx (and child_list nests inside
6099 	 * ctx->mutex), group destruction is not atomic between children, also
6100 	 * see perf_event_release_kernel(). Additionally, parent can grow the
6101 	 * group.
6102 	 *
6103 	 * Therefore it is possible to have parent and child groups in a
6104 	 * different configuration and summing over such a beast makes no sense
6105 	 * what so ever.
6106 	 *
6107 	 * Reject this.
6108 	 */
6109 	parent = leader->parent;
6110 	if (parent &&
6111 	    (parent->group_generation != leader->group_generation ||
6112 	     parent->nr_siblings != leader->nr_siblings)) {
6113 		ret = -ECHILD;
6114 		goto unlock;
6115 	}
6116 
6117 	/*
6118 	 * Since we co-schedule groups, {enabled,running} times of siblings
6119 	 * will be identical to those of the leader, so we only publish one
6120 	 * set.
6121 	 */
6122 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
6123 		values[n++] += leader->total_time_enabled +
6124 			atomic64_read(&leader->child_total_time_enabled);
6125 	}
6126 
6127 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
6128 		values[n++] += leader->total_time_running +
6129 			atomic64_read(&leader->child_total_time_running);
6130 	}
6131 
6132 	/*
6133 	 * Write {count,id} tuples for every sibling.
6134 	 */
6135 	values[n++] += perf_event_count(leader, false);
6136 	if (read_format & PERF_FORMAT_ID)
6137 		values[n++] = primary_event_id(leader);
6138 	if (read_format & PERF_FORMAT_LOST)
6139 		values[n++] = atomic64_read(&leader->lost_samples);
6140 
6141 	for_each_sibling_event(sub, leader) {
6142 		values[n++] += perf_event_count(sub, false);
6143 		if (read_format & PERF_FORMAT_ID)
6144 			values[n++] = primary_event_id(sub);
6145 		if (read_format & PERF_FORMAT_LOST)
6146 			values[n++] = atomic64_read(&sub->lost_samples);
6147 	}
6148 
6149 unlock:
6150 	raw_spin_unlock_irqrestore(&ctx->lock, flags);
6151 	return ret;
6152 }
6153 
perf_read_group(struct perf_event * event,u64 read_format,char __user * buf)6154 static int perf_read_group(struct perf_event *event,
6155 				   u64 read_format, char __user *buf)
6156 {
6157 	struct perf_event *leader = event->group_leader, *child;
6158 	struct perf_event_context *ctx = leader->ctx;
6159 	int ret;
6160 	u64 *values;
6161 
6162 	lockdep_assert_held(&ctx->mutex);
6163 
6164 	values = kzalloc(event->read_size, GFP_KERNEL);
6165 	if (!values)
6166 		return -ENOMEM;
6167 
6168 	values[0] = 1 + leader->nr_siblings;
6169 
6170 	mutex_lock(&leader->child_mutex);
6171 
6172 	ret = __perf_read_group_add(leader, read_format, values);
6173 	if (ret)
6174 		goto unlock;
6175 
6176 	list_for_each_entry(child, &leader->child_list, child_list) {
6177 		ret = __perf_read_group_add(child, read_format, values);
6178 		if (ret)
6179 			goto unlock;
6180 	}
6181 
6182 	mutex_unlock(&leader->child_mutex);
6183 
6184 	ret = event->read_size;
6185 	if (copy_to_user(buf, values, event->read_size))
6186 		ret = -EFAULT;
6187 	goto out;
6188 
6189 unlock:
6190 	mutex_unlock(&leader->child_mutex);
6191 out:
6192 	kfree(values);
6193 	return ret;
6194 }
6195 
perf_read_one(struct perf_event * event,u64 read_format,char __user * buf)6196 static int perf_read_one(struct perf_event *event,
6197 				 u64 read_format, char __user *buf)
6198 {
6199 	u64 enabled, running;
6200 	u64 values[5];
6201 	int n = 0;
6202 
6203 	values[n++] = __perf_event_read_value(event, &enabled, &running);
6204 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
6205 		values[n++] = enabled;
6206 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
6207 		values[n++] = running;
6208 	if (read_format & PERF_FORMAT_ID)
6209 		values[n++] = primary_event_id(event);
6210 	if (read_format & PERF_FORMAT_LOST)
6211 		values[n++] = atomic64_read(&event->lost_samples);
6212 
6213 	if (copy_to_user(buf, values, n * sizeof(u64)))
6214 		return -EFAULT;
6215 
6216 	return n * sizeof(u64);
6217 }
6218 
is_event_hup(struct perf_event * event)6219 static bool is_event_hup(struct perf_event *event)
6220 {
6221 	bool no_children;
6222 
6223 	if (event->state > PERF_EVENT_STATE_EXIT)
6224 		return false;
6225 
6226 	mutex_lock(&event->child_mutex);
6227 	no_children = list_empty(&event->child_list);
6228 	mutex_unlock(&event->child_mutex);
6229 	return no_children;
6230 }
6231 
6232 /*
6233  * Read the performance event - simple non blocking version for now
6234  */
6235 static ssize_t
__perf_read(struct perf_event * event,char __user * buf,size_t count)6236 __perf_read(struct perf_event *event, char __user *buf, size_t count)
6237 {
6238 	u64 read_format = event->attr.read_format;
6239 	int ret;
6240 
6241 	/*
6242 	 * Return end-of-file for a read on an event that is in
6243 	 * error state (i.e. because it was pinned but it couldn't be
6244 	 * scheduled on to the CPU at some point).
6245 	 */
6246 	if (event->state == PERF_EVENT_STATE_ERROR)
6247 		return 0;
6248 
6249 	if (count < event->read_size)
6250 		return -ENOSPC;
6251 
6252 	WARN_ON_ONCE(event->ctx->parent_ctx);
6253 	if (read_format & PERF_FORMAT_GROUP)
6254 		ret = perf_read_group(event, read_format, buf);
6255 	else
6256 		ret = perf_read_one(event, read_format, buf);
6257 
6258 	return ret;
6259 }
6260 
6261 static ssize_t
perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)6262 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
6263 {
6264 	struct perf_event *event = file->private_data;
6265 	struct perf_event_context *ctx;
6266 	int ret;
6267 
6268 	ret = security_perf_event_read(event);
6269 	if (ret)
6270 		return ret;
6271 
6272 	ctx = perf_event_ctx_lock(event);
6273 	ret = __perf_read(event, buf, count);
6274 	perf_event_ctx_unlock(event, ctx);
6275 
6276 	return ret;
6277 }
6278 
perf_poll(struct file * file,poll_table * wait)6279 static __poll_t perf_poll(struct file *file, poll_table *wait)
6280 {
6281 	struct perf_event *event = file->private_data;
6282 	struct perf_buffer *rb;
6283 	__poll_t events = EPOLLHUP;
6284 
6285 	if (event->state <= PERF_EVENT_STATE_REVOKED)
6286 		return EPOLLERR;
6287 
6288 	poll_wait(file, &event->waitq, wait);
6289 
6290 	if (event->state <= PERF_EVENT_STATE_REVOKED)
6291 		return EPOLLERR;
6292 
6293 	if (is_event_hup(event))
6294 		return events;
6295 
6296 	if (unlikely(READ_ONCE(event->state) == PERF_EVENT_STATE_ERROR &&
6297 		     event->attr.pinned))
6298 		return EPOLLERR;
6299 
6300 	/*
6301 	 * Pin the event->rb by taking event->mmap_mutex; otherwise
6302 	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
6303 	 */
6304 	mutex_lock(&event->mmap_mutex);
6305 	rb = event->rb;
6306 	if (rb)
6307 		events = atomic_xchg(&rb->poll, 0);
6308 	mutex_unlock(&event->mmap_mutex);
6309 	return events;
6310 }
6311 
_perf_event_reset(struct perf_event * event)6312 static void _perf_event_reset(struct perf_event *event)
6313 {
6314 	(void)perf_event_read(event, false);
6315 	local64_set(&event->count, 0);
6316 	perf_event_update_userpage(event);
6317 }
6318 
6319 /* Assume it's not an event with inherit set. */
perf_event_pause(struct perf_event * event,bool reset)6320 u64 perf_event_pause(struct perf_event *event, bool reset)
6321 {
6322 	struct perf_event_context *ctx;
6323 	u64 count;
6324 
6325 	ctx = perf_event_ctx_lock(event);
6326 	WARN_ON_ONCE(event->attr.inherit);
6327 	_perf_event_disable(event);
6328 	count = local64_read(&event->count);
6329 	if (reset)
6330 		local64_set(&event->count, 0);
6331 	perf_event_ctx_unlock(event, ctx);
6332 
6333 	return count;
6334 }
6335 EXPORT_SYMBOL_GPL(perf_event_pause);
6336 
6337 #ifdef CONFIG_PERF_GUEST_MEDIATED_PMU
6338 static atomic_t nr_include_guest_events __read_mostly;
6339 
6340 static atomic_t nr_mediated_pmu_vms __read_mostly;
6341 static DEFINE_MUTEX(perf_mediated_pmu_mutex);
6342 
6343 /* !exclude_guest event of PMU with PERF_PMU_CAP_MEDIATED_VPMU */
is_include_guest_event(struct perf_event * event)6344 static inline bool is_include_guest_event(struct perf_event *event)
6345 {
6346 	if ((event->pmu->capabilities & PERF_PMU_CAP_MEDIATED_VPMU) &&
6347 	    !event->attr.exclude_guest)
6348 		return true;
6349 
6350 	return false;
6351 }
6352 
mediated_pmu_account_event(struct perf_event * event)6353 static int mediated_pmu_account_event(struct perf_event *event)
6354 {
6355 	if (!is_include_guest_event(event))
6356 		return 0;
6357 
6358 	if (atomic_inc_not_zero(&nr_include_guest_events))
6359 		return 0;
6360 
6361 	guard(mutex)(&perf_mediated_pmu_mutex);
6362 	if (atomic_read(&nr_mediated_pmu_vms))
6363 		return -EOPNOTSUPP;
6364 
6365 	atomic_inc(&nr_include_guest_events);
6366 	return 0;
6367 }
6368 
mediated_pmu_unaccount_event(struct perf_event * event)6369 static void mediated_pmu_unaccount_event(struct perf_event *event)
6370 {
6371 	if (!is_include_guest_event(event))
6372 		return;
6373 
6374 	if (WARN_ON_ONCE(!atomic_read(&nr_include_guest_events)))
6375 		return;
6376 
6377 	atomic_dec(&nr_include_guest_events);
6378 }
6379 
6380 /*
6381  * Currently invoked at VM creation to
6382  * - Check whether there are existing !exclude_guest events of PMU with
6383  *   PERF_PMU_CAP_MEDIATED_VPMU
6384  * - Set nr_mediated_pmu_vms to prevent !exclude_guest event creation on
6385  *   PMUs with PERF_PMU_CAP_MEDIATED_VPMU
6386  *
6387  * No impact for the PMU without PERF_PMU_CAP_MEDIATED_VPMU. The perf
6388  * still owns all the PMU resources.
6389  */
perf_create_mediated_pmu(void)6390 int perf_create_mediated_pmu(void)
6391 {
6392 	if (atomic_inc_not_zero(&nr_mediated_pmu_vms))
6393 		return 0;
6394 
6395 	guard(mutex)(&perf_mediated_pmu_mutex);
6396 	if (atomic_read(&nr_include_guest_events))
6397 		return -EBUSY;
6398 
6399 	atomic_inc(&nr_mediated_pmu_vms);
6400 	return 0;
6401 }
6402 EXPORT_SYMBOL_FOR_KVM(perf_create_mediated_pmu);
6403 
perf_release_mediated_pmu(void)6404 void perf_release_mediated_pmu(void)
6405 {
6406 	if (WARN_ON_ONCE(!atomic_read(&nr_mediated_pmu_vms)))
6407 		return;
6408 
6409 	atomic_dec(&nr_mediated_pmu_vms);
6410 }
6411 EXPORT_SYMBOL_FOR_KVM(perf_release_mediated_pmu);
6412 
6413 /* When loading a guest's mediated PMU, schedule out all exclude_guest events. */
perf_load_guest_context(void)6414 void perf_load_guest_context(void)
6415 {
6416 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
6417 
6418 	lockdep_assert_irqs_disabled();
6419 
6420 	guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
6421 
6422 	if (WARN_ON_ONCE(__this_cpu_read(guest_ctx_loaded)))
6423 		return;
6424 
6425 	perf_ctx_disable(&cpuctx->ctx, EVENT_GUEST);
6426 	ctx_sched_out(&cpuctx->ctx, NULL, EVENT_GUEST);
6427 	if (cpuctx->task_ctx) {
6428 		perf_ctx_disable(cpuctx->task_ctx, EVENT_GUEST);
6429 		task_ctx_sched_out(cpuctx->task_ctx, NULL, EVENT_GUEST);
6430 	}
6431 
6432 	perf_ctx_enable(&cpuctx->ctx, EVENT_GUEST);
6433 	if (cpuctx->task_ctx)
6434 		perf_ctx_enable(cpuctx->task_ctx, EVENT_GUEST);
6435 
6436 	__this_cpu_write(guest_ctx_loaded, true);
6437 }
6438 EXPORT_SYMBOL_GPL(perf_load_guest_context);
6439 
perf_put_guest_context(void)6440 void perf_put_guest_context(void)
6441 {
6442 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
6443 
6444 	lockdep_assert_irqs_disabled();
6445 
6446 	guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
6447 
6448 	if (WARN_ON_ONCE(!__this_cpu_read(guest_ctx_loaded)))
6449 		return;
6450 
6451 	perf_ctx_disable(&cpuctx->ctx, EVENT_GUEST);
6452 	if (cpuctx->task_ctx)
6453 		perf_ctx_disable(cpuctx->task_ctx, EVENT_GUEST);
6454 
6455 	perf_event_sched_in(cpuctx, cpuctx->task_ctx, NULL, EVENT_GUEST);
6456 
6457 	if (cpuctx->task_ctx)
6458 		perf_ctx_enable(cpuctx->task_ctx, EVENT_GUEST);
6459 	perf_ctx_enable(&cpuctx->ctx, EVENT_GUEST);
6460 
6461 	__this_cpu_write(guest_ctx_loaded, false);
6462 }
6463 EXPORT_SYMBOL_GPL(perf_put_guest_context);
6464 #else
mediated_pmu_account_event(struct perf_event * event)6465 static int mediated_pmu_account_event(struct perf_event *event) { return 0; }
mediated_pmu_unaccount_event(struct perf_event * event)6466 static void mediated_pmu_unaccount_event(struct perf_event *event) {}
6467 #endif
6468 
6469 /*
6470  * Holding the top-level event's child_mutex means that any
6471  * descendant process that has inherited this event will block
6472  * in perf_event_exit_event() if it goes to exit, thus satisfying the
6473  * task existence requirements of perf_event_enable/disable.
6474  */
perf_event_for_each_child(struct perf_event * event,void (* func)(struct perf_event *))6475 static void perf_event_for_each_child(struct perf_event *event,
6476 					void (*func)(struct perf_event *))
6477 {
6478 	struct perf_event *child;
6479 
6480 	WARN_ON_ONCE(event->ctx->parent_ctx);
6481 
6482 	mutex_lock(&event->child_mutex);
6483 	func(event);
6484 	list_for_each_entry(child, &event->child_list, child_list)
6485 		func(child);
6486 	mutex_unlock(&event->child_mutex);
6487 }
6488 
perf_event_for_each(struct perf_event * event,void (* func)(struct perf_event *))6489 static void perf_event_for_each(struct perf_event *event,
6490 				  void (*func)(struct perf_event *))
6491 {
6492 	struct perf_event_context *ctx = event->ctx;
6493 	struct perf_event *sibling;
6494 
6495 	lockdep_assert_held(&ctx->mutex);
6496 
6497 	event = event->group_leader;
6498 
6499 	perf_event_for_each_child(event, func);
6500 	for_each_sibling_event(sibling, event)
6501 		perf_event_for_each_child(sibling, func);
6502 }
6503 
__perf_event_period(struct perf_event * event,struct perf_cpu_context * cpuctx,struct perf_event_context * ctx,void * info)6504 static void __perf_event_period(struct perf_event *event,
6505 				struct perf_cpu_context *cpuctx,
6506 				struct perf_event_context *ctx,
6507 				void *info)
6508 {
6509 	u64 value = *((u64 *)info);
6510 	bool active;
6511 
6512 	if (event->attr.freq) {
6513 		event->attr.sample_freq = value;
6514 	} else {
6515 		event->attr.sample_period = value;
6516 		event->hw.sample_period = value;
6517 	}
6518 
6519 	active = (event->state == PERF_EVENT_STATE_ACTIVE);
6520 	if (active) {
6521 		perf_pmu_disable(event->pmu);
6522 		event->pmu->stop(event, PERF_EF_UPDATE);
6523 	}
6524 
6525 	local64_set(&event->hw.period_left, 0);
6526 
6527 	if (active) {
6528 		event->pmu->start(event, PERF_EF_RELOAD);
6529 		/*
6530 		 * Once the period is force-reset, the event starts immediately.
6531 		 * But the event/group could be throttled. Unthrottle the
6532 		 * event/group now to avoid the next tick trying to unthrottle
6533 		 * while we already re-started the event/group.
6534 		 */
6535 		if (event->hw.interrupts == MAX_INTERRUPTS)
6536 			perf_event_unthrottle_group(event, true);
6537 		perf_pmu_enable(event->pmu);
6538 	}
6539 }
6540 
perf_event_check_period(struct perf_event * event,u64 value)6541 static int perf_event_check_period(struct perf_event *event, u64 value)
6542 {
6543 	return event->pmu->check_period(event, value);
6544 }
6545 
_perf_event_period(struct perf_event * event,u64 value)6546 static int _perf_event_period(struct perf_event *event, u64 value)
6547 {
6548 	if (!is_sampling_event(event))
6549 		return -EINVAL;
6550 
6551 	if (!value)
6552 		return -EINVAL;
6553 
6554 	if (event->attr.freq) {
6555 		if (value > sysctl_perf_event_sample_rate)
6556 			return -EINVAL;
6557 	} else {
6558 		if (perf_event_check_period(event, value))
6559 			return -EINVAL;
6560 		if (value & (1ULL << 63))
6561 			return -EINVAL;
6562 	}
6563 
6564 	event_function_call(event, __perf_event_period, &value);
6565 
6566 	return 0;
6567 }
6568 
perf_event_period(struct perf_event * event,u64 value)6569 int perf_event_period(struct perf_event *event, u64 value)
6570 {
6571 	struct perf_event_context *ctx;
6572 	int ret;
6573 
6574 	ctx = perf_event_ctx_lock(event);
6575 	ret = _perf_event_period(event, value);
6576 	perf_event_ctx_unlock(event, ctx);
6577 
6578 	return ret;
6579 }
6580 EXPORT_SYMBOL_GPL(perf_event_period);
6581 
6582 static const struct file_operations perf_fops;
6583 
is_perf_file(struct fd f)6584 static inline bool is_perf_file(struct fd f)
6585 {
6586 	return !fd_empty(f) && fd_file(f)->f_op == &perf_fops;
6587 }
6588 
6589 static int perf_event_set_output(struct perf_event *event,
6590 				 struct perf_event *output_event);
6591 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6592 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6593 			  struct perf_event_attr *attr);
6594 static int __perf_event_set_bpf_prog(struct perf_event *event,
6595 				     struct bpf_prog *prog,
6596 				     u64 bpf_cookie);
6597 
_perf_ioctl(struct perf_event * event,unsigned int cmd,unsigned long arg)6598 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
6599 {
6600 	void (*func)(struct perf_event *);
6601 	u32 flags = arg;
6602 
6603 	if (event->state <= PERF_EVENT_STATE_REVOKED)
6604 		return -ENODEV;
6605 
6606 	switch (cmd) {
6607 	case PERF_EVENT_IOC_ENABLE:
6608 		func = _perf_event_enable;
6609 		break;
6610 	case PERF_EVENT_IOC_DISABLE:
6611 		func = _perf_event_disable;
6612 		break;
6613 	case PERF_EVENT_IOC_RESET:
6614 		func = _perf_event_reset;
6615 		break;
6616 
6617 	case PERF_EVENT_IOC_REFRESH:
6618 		return _perf_event_refresh(event, arg);
6619 
6620 	case PERF_EVENT_IOC_PERIOD:
6621 	{
6622 		u64 value;
6623 
6624 		if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
6625 			return -EFAULT;
6626 
6627 		return _perf_event_period(event, value);
6628 	}
6629 	case PERF_EVENT_IOC_ID:
6630 	{
6631 		u64 id = primary_event_id(event);
6632 
6633 		if (copy_to_user((void __user *)arg, &id, sizeof(id)))
6634 			return -EFAULT;
6635 		return 0;
6636 	}
6637 
6638 	case PERF_EVENT_IOC_SET_OUTPUT:
6639 	{
6640 		CLASS(fd, output)(arg);	     // arg == -1 => empty
6641 		struct perf_event *output_event = NULL;
6642 		if (arg != -1) {
6643 			if (!is_perf_file(output))
6644 				return -EBADF;
6645 			output_event = fd_file(output)->private_data;
6646 		}
6647 		return perf_event_set_output(event, output_event);
6648 	}
6649 
6650 	case PERF_EVENT_IOC_SET_FILTER:
6651 		return perf_event_set_filter(event, (void __user *)arg);
6652 
6653 	case PERF_EVENT_IOC_SET_BPF:
6654 	{
6655 		struct bpf_prog *prog;
6656 		int err;
6657 
6658 		prog = bpf_prog_get(arg);
6659 		if (IS_ERR(prog))
6660 			return PTR_ERR(prog);
6661 
6662 		err = __perf_event_set_bpf_prog(event, prog, 0);
6663 		if (err) {
6664 			bpf_prog_put(prog);
6665 			return err;
6666 		}
6667 
6668 		return 0;
6669 	}
6670 
6671 	case PERF_EVENT_IOC_PAUSE_OUTPUT: {
6672 		struct perf_buffer *rb;
6673 
6674 		rcu_read_lock();
6675 		rb = rcu_dereference(event->rb);
6676 		if (!rb || !rb->nr_pages) {
6677 			rcu_read_unlock();
6678 			return -EINVAL;
6679 		}
6680 		rb_toggle_paused(rb, !!arg);
6681 		rcu_read_unlock();
6682 		return 0;
6683 	}
6684 
6685 	case PERF_EVENT_IOC_QUERY_BPF:
6686 		return perf_event_query_prog_array(event, (void __user *)arg);
6687 
6688 	case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
6689 		struct perf_event_attr new_attr;
6690 		int err = perf_copy_attr((struct perf_event_attr __user *)arg,
6691 					 &new_attr);
6692 
6693 		if (err)
6694 			return err;
6695 
6696 		return perf_event_modify_attr(event,  &new_attr);
6697 	}
6698 	default:
6699 		return -ENOTTY;
6700 	}
6701 
6702 	if (flags & PERF_IOC_FLAG_GROUP)
6703 		perf_event_for_each(event, func);
6704 	else
6705 		perf_event_for_each_child(event, func);
6706 
6707 	return 0;
6708 }
6709 
perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6710 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6711 {
6712 	struct perf_event *event = file->private_data;
6713 	struct perf_event_context *ctx;
6714 	long ret;
6715 
6716 	/* Treat ioctl like writes as it is likely a mutating operation. */
6717 	ret = security_perf_event_write(event);
6718 	if (ret)
6719 		return ret;
6720 
6721 	ctx = perf_event_ctx_lock(event);
6722 	ret = _perf_ioctl(event, cmd, arg);
6723 	perf_event_ctx_unlock(event, ctx);
6724 
6725 	return ret;
6726 }
6727 
6728 #ifdef CONFIG_COMPAT
perf_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)6729 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
6730 				unsigned long arg)
6731 {
6732 	switch (_IOC_NR(cmd)) {
6733 	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
6734 	case _IOC_NR(PERF_EVENT_IOC_ID):
6735 	case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
6736 	case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
6737 		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
6738 		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
6739 			cmd &= ~IOCSIZE_MASK;
6740 			cmd |= sizeof(void *) << IOCSIZE_SHIFT;
6741 		}
6742 		break;
6743 	}
6744 	return perf_ioctl(file, cmd, arg);
6745 }
6746 #else
6747 # define perf_compat_ioctl NULL
6748 #endif
6749 
perf_event_task_enable(void)6750 int perf_event_task_enable(void)
6751 {
6752 	struct perf_event_context *ctx;
6753 	struct perf_event *event;
6754 
6755 	mutex_lock(&current->perf_event_mutex);
6756 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
6757 		ctx = perf_event_ctx_lock(event);
6758 		perf_event_for_each_child(event, _perf_event_enable);
6759 		perf_event_ctx_unlock(event, ctx);
6760 	}
6761 	mutex_unlock(&current->perf_event_mutex);
6762 
6763 	return 0;
6764 }
6765 
perf_event_task_disable(void)6766 int perf_event_task_disable(void)
6767 {
6768 	struct perf_event_context *ctx;
6769 	struct perf_event *event;
6770 
6771 	mutex_lock(&current->perf_event_mutex);
6772 	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
6773 		ctx = perf_event_ctx_lock(event);
6774 		perf_event_for_each_child(event, _perf_event_disable);
6775 		perf_event_ctx_unlock(event, ctx);
6776 	}
6777 	mutex_unlock(&current->perf_event_mutex);
6778 
6779 	return 0;
6780 }
6781 
perf_event_index(struct perf_event * event)6782 static int perf_event_index(struct perf_event *event)
6783 {
6784 	if (event->hw.state & PERF_HES_STOPPED)
6785 		return 0;
6786 
6787 	if (event->state != PERF_EVENT_STATE_ACTIVE)
6788 		return 0;
6789 
6790 	return event->pmu->event_idx(event);
6791 }
6792 
perf_event_init_userpage(struct perf_event * event)6793 static void perf_event_init_userpage(struct perf_event *event)
6794 {
6795 	struct perf_event_mmap_page *userpg;
6796 	struct perf_buffer *rb;
6797 
6798 	rcu_read_lock();
6799 	rb = rcu_dereference(event->rb);
6800 	if (!rb)
6801 		goto unlock;
6802 
6803 	userpg = rb->user_page;
6804 
6805 	/* Allow new userspace to detect that bit 0 is deprecated */
6806 	userpg->cap_bit0_is_deprecated = 1;
6807 	userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
6808 	userpg->data_offset = PAGE_SIZE;
6809 	userpg->data_size = perf_data_size(rb);
6810 
6811 unlock:
6812 	rcu_read_unlock();
6813 }
6814 
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)6815 void __weak arch_perf_update_userpage(
6816 	struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
6817 {
6818 }
6819 
6820 /*
6821  * Callers need to ensure there can be no nesting of this function, otherwise
6822  * the seqlock logic goes bad. We can not serialize this because the arch
6823  * code calls this from NMI context.
6824  */
perf_event_update_userpage(struct perf_event * event)6825 void perf_event_update_userpage(struct perf_event *event)
6826 {
6827 	struct perf_event_mmap_page *userpg;
6828 	struct perf_buffer *rb;
6829 	u64 enabled, running, now;
6830 
6831 	rcu_read_lock();
6832 	rb = rcu_dereference(event->rb);
6833 	if (!rb)
6834 		goto unlock;
6835 
6836 	/*
6837 	 * Disable preemption to guarantee consistent time stamps are stored to
6838 	 * the user page.
6839 	 */
6840 	preempt_disable();
6841 
6842 	/*
6843 	 * Compute total_time_enabled, total_time_running based on snapshot
6844 	 * values taken when the event was last scheduled in.
6845 	 *
6846 	 * We cannot simply call update_context_time() because doing so would
6847 	 * lead to deadlock when called from NMI context.
6848 	 */
6849 	calc_timer_values(event, &now, &enabled, &running);
6850 
6851 	userpg = rb->user_page;
6852 
6853 	++userpg->lock;
6854 	barrier();
6855 	userpg->index = perf_event_index(event);
6856 	userpg->offset = perf_event_count(event, false);
6857 	if (userpg->index)
6858 		userpg->offset -= local64_read(&event->hw.prev_count);
6859 
6860 	userpg->time_enabled = enabled +
6861 			atomic64_read(&event->child_total_time_enabled);
6862 
6863 	userpg->time_running = running +
6864 			atomic64_read(&event->child_total_time_running);
6865 
6866 	arch_perf_update_userpage(event, userpg, now);
6867 
6868 	barrier();
6869 	++userpg->lock;
6870 	preempt_enable();
6871 unlock:
6872 	rcu_read_unlock();
6873 }
6874 EXPORT_SYMBOL_GPL(perf_event_update_userpage);
6875 
ring_buffer_attach(struct perf_event * event,struct perf_buffer * rb)6876 static void ring_buffer_attach(struct perf_event *event,
6877 			       struct perf_buffer *rb)
6878 {
6879 	struct perf_buffer *old_rb = NULL;
6880 	unsigned long flags;
6881 
6882 	WARN_ON_ONCE(event->parent);
6883 
6884 	if (event->rb) {
6885 		/*
6886 		 * Should be impossible, we set this when removing
6887 		 * event->rb_entry and wait/clear when adding event->rb_entry.
6888 		 */
6889 		WARN_ON_ONCE(event->rcu_pending);
6890 
6891 		old_rb = event->rb;
6892 		spin_lock_irqsave(&old_rb->event_lock, flags);
6893 		list_del_rcu(&event->rb_entry);
6894 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
6895 
6896 		event->rcu_batches = get_state_synchronize_rcu();
6897 		event->rcu_pending = 1;
6898 	}
6899 
6900 	if (rb) {
6901 		if (event->rcu_pending) {
6902 			cond_synchronize_rcu(event->rcu_batches);
6903 			event->rcu_pending = 0;
6904 		}
6905 
6906 		spin_lock_irqsave(&rb->event_lock, flags);
6907 		list_add_rcu(&event->rb_entry, &rb->event_list);
6908 		spin_unlock_irqrestore(&rb->event_lock, flags);
6909 	}
6910 
6911 	/*
6912 	 * Avoid racing with perf_mmap_close(AUX): stop the event
6913 	 * before swizzling the event::rb pointer; if it's getting
6914 	 * unmapped, its aux_mmap_count will be 0 and it won't
6915 	 * restart. See the comment in __perf_pmu_output_stop().
6916 	 *
6917 	 * Data will inevitably be lost when set_output is done in
6918 	 * mid-air, but then again, whoever does it like this is
6919 	 * not in for the data anyway.
6920 	 */
6921 	if (has_aux(event))
6922 		perf_event_stop(event, 0);
6923 
6924 	rcu_assign_pointer(event->rb, rb);
6925 
6926 	if (old_rb) {
6927 		ring_buffer_put(old_rb);
6928 		/*
6929 		 * Since we detached before setting the new rb, so that we
6930 		 * could attach the new rb, we could have missed a wakeup.
6931 		 * Provide it now.
6932 		 */
6933 		wake_up_all(&event->waitq);
6934 	}
6935 }
6936 
ring_buffer_wakeup(struct perf_event * event)6937 static void ring_buffer_wakeup(struct perf_event *event)
6938 {
6939 	struct perf_buffer *rb;
6940 
6941 	if (event->parent)
6942 		event = event->parent;
6943 
6944 	rcu_read_lock();
6945 	rb = rcu_dereference(event->rb);
6946 	if (rb) {
6947 		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
6948 			wake_up_all(&event->waitq);
6949 	}
6950 	rcu_read_unlock();
6951 }
6952 
ring_buffer_get(struct perf_event * event)6953 struct perf_buffer *ring_buffer_get(struct perf_event *event)
6954 {
6955 	struct perf_buffer *rb;
6956 
6957 	if (event->parent)
6958 		event = event->parent;
6959 
6960 	rcu_read_lock();
6961 	rb = rcu_dereference(event->rb);
6962 	if (rb) {
6963 		if (!refcount_inc_not_zero(&rb->refcount))
6964 			rb = NULL;
6965 	}
6966 	rcu_read_unlock();
6967 
6968 	return rb;
6969 }
6970 
ring_buffer_put(struct perf_buffer * rb)6971 void ring_buffer_put(struct perf_buffer *rb)
6972 {
6973 	if (!refcount_dec_and_test(&rb->refcount))
6974 		return;
6975 
6976 	WARN_ON_ONCE(!list_empty(&rb->event_list));
6977 
6978 	call_rcu(&rb->rcu_head, rb_free_rcu);
6979 }
6980 
6981 typedef void (*mapped_f)(struct perf_event *event, struct mm_struct *mm);
6982 
6983 #define get_mapped(event, func)			\
6984 ({	struct pmu *pmu;			\
6985 	mapped_f f = NULL;			\
6986 	guard(rcu)();				\
6987 	pmu = READ_ONCE(event->pmu);		\
6988 	if (pmu)				\
6989 		f = pmu->func;			\
6990 	f;					\
6991 })
6992 
perf_mmap_open(struct vm_area_struct * vma)6993 static void perf_mmap_open(struct vm_area_struct *vma)
6994 {
6995 	struct perf_event *event = vma->vm_file->private_data;
6996 	mapped_f mapped = get_mapped(event, event_mapped);
6997 
6998 	refcount_inc(&event->mmap_count);
6999 	refcount_inc(&event->rb->mmap_count);
7000 
7001 	if (vma->vm_pgoff)
7002 		refcount_inc(&event->rb->aux_mmap_count);
7003 
7004 	if (mapped)
7005 		mapped(event, vma->vm_mm);
7006 }
7007 
7008 static void perf_pmu_output_stop(struct perf_event *event);
7009 
7010 /*
7011  * A buffer can be mmap()ed multiple times; either directly through the same
7012  * event, or through other events by use of perf_event_set_output().
7013  *
7014  * In order to undo the VM accounting done by perf_mmap() we need to destroy
7015  * the buffer here, where we still have a VM context. This means we need
7016  * to detach all events redirecting to us.
7017  */
perf_mmap_close(struct vm_area_struct * vma)7018 static void perf_mmap_close(struct vm_area_struct *vma)
7019 {
7020 	struct perf_event *event = vma->vm_file->private_data;
7021 	mapped_f unmapped = get_mapped(event, event_unmapped);
7022 	struct perf_buffer *rb = ring_buffer_get(event);
7023 	struct user_struct *mmap_user = rb->mmap_user;
7024 	int mmap_locked = rb->mmap_locked;
7025 	unsigned long size = perf_data_size(rb);
7026 	bool detach_rest = false;
7027 
7028 	/* FIXIES vs perf_pmu_unregister() */
7029 	if (unmapped)
7030 		unmapped(event, vma->vm_mm);
7031 
7032 	/*
7033 	 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
7034 	 * to avoid complications.
7035 	 */
7036 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
7037 	    refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
7038 		/*
7039 		 * Stop all AUX events that are writing to this buffer,
7040 		 * so that we can free its AUX pages and corresponding PMU
7041 		 * data. Note that after rb::aux_mmap_count dropped to zero,
7042 		 * they won't start any more (see perf_aux_output_begin()).
7043 		 */
7044 		perf_pmu_output_stop(event);
7045 
7046 		/* now it's safe to free the pages */
7047 		atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
7048 		atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
7049 
7050 		/* this has to be the last one */
7051 		rb_free_aux(rb);
7052 		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
7053 
7054 		mutex_unlock(&rb->aux_mutex);
7055 	}
7056 
7057 	if (refcount_dec_and_test(&rb->mmap_count))
7058 		detach_rest = true;
7059 
7060 	if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
7061 		goto out_put;
7062 
7063 	ring_buffer_attach(event, NULL);
7064 	mutex_unlock(&event->mmap_mutex);
7065 
7066 	/* If there's still other mmap()s of this buffer, we're done. */
7067 	if (!detach_rest)
7068 		goto out_put;
7069 
7070 	/*
7071 	 * No other mmap()s, detach from all other events that might redirect
7072 	 * into the now unreachable buffer. Somewhat complicated by the
7073 	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
7074 	 */
7075 again:
7076 	rcu_read_lock();
7077 	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
7078 		if (!atomic_long_inc_not_zero(&event->refcount)) {
7079 			/*
7080 			 * This event is en-route to free_event() which will
7081 			 * detach it and remove it from the list.
7082 			 */
7083 			continue;
7084 		}
7085 		rcu_read_unlock();
7086 
7087 		mutex_lock(&event->mmap_mutex);
7088 		/*
7089 		 * Check we didn't race with perf_event_set_output() which can
7090 		 * swizzle the rb from under us while we were waiting to
7091 		 * acquire mmap_mutex.
7092 		 *
7093 		 * If we find a different rb; ignore this event, a next
7094 		 * iteration will no longer find it on the list. We have to
7095 		 * still restart the iteration to make sure we're not now
7096 		 * iterating the wrong list.
7097 		 */
7098 		if (event->rb == rb)
7099 			ring_buffer_attach(event, NULL);
7100 
7101 		mutex_unlock(&event->mmap_mutex);
7102 		put_event(event);
7103 
7104 		/*
7105 		 * Restart the iteration; either we're on the wrong list or
7106 		 * destroyed its integrity by doing a deletion.
7107 		 */
7108 		goto again;
7109 	}
7110 	rcu_read_unlock();
7111 
7112 	/*
7113 	 * It could be there's still a few 0-ref events on the list; they'll
7114 	 * get cleaned up by free_event() -- they'll also still have their
7115 	 * ref on the rb and will free it whenever they are done with it.
7116 	 *
7117 	 * Aside from that, this buffer is 'fully' detached and unmapped,
7118 	 * undo the VM accounting.
7119 	 */
7120 
7121 	atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
7122 			&mmap_user->locked_vm);
7123 	atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
7124 	free_uid(mmap_user);
7125 
7126 out_put:
7127 	ring_buffer_put(rb); /* could be last */
7128 }
7129 
perf_mmap_pfn_mkwrite(struct vm_fault * vmf)7130 static vm_fault_t perf_mmap_pfn_mkwrite(struct vm_fault *vmf)
7131 {
7132 	/* The first page is the user control page, others are read-only. */
7133 	return vmf->pgoff == 0 ? 0 : VM_FAULT_SIGBUS;
7134 }
7135 
perf_mmap_may_split(struct vm_area_struct * vma,unsigned long addr)7136 static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
7137 {
7138 	/*
7139 	 * Forbid splitting perf mappings to prevent refcount leaks due to
7140 	 * the resulting non-matching offsets and sizes. See open()/close().
7141 	 */
7142 	return -EINVAL;
7143 }
7144 
7145 static const struct vm_operations_struct perf_mmap_vmops = {
7146 	.open		= perf_mmap_open,
7147 	.close		= perf_mmap_close, /* non mergeable */
7148 	.pfn_mkwrite	= perf_mmap_pfn_mkwrite,
7149 	.may_split	= perf_mmap_may_split,
7150 };
7151 
map_range(struct perf_buffer * rb,struct vm_area_struct * vma)7152 static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
7153 {
7154 	unsigned long nr_pages = vma_pages(vma);
7155 	int err = 0;
7156 	unsigned long pagenum;
7157 
7158 	/*
7159 	 * We map this as a VM_PFNMAP VMA.
7160 	 *
7161 	 * This is not ideal as this is designed broadly for mappings of PFNs
7162 	 * referencing memory-mapped I/O ranges or non-system RAM i.e. for which
7163 	 * !pfn_valid(pfn).
7164 	 *
7165 	 * We are mapping kernel-allocated memory (memory we manage ourselves)
7166 	 * which would more ideally be mapped using vm_insert_page() or a
7167 	 * similar mechanism, that is as a VM_MIXEDMAP mapping.
7168 	 *
7169 	 * However this won't work here, because:
7170 	 *
7171 	 * 1. It uses vma->vm_page_prot, but this field has not been completely
7172 	 *    setup at the point of the f_op->mmp() hook, so we are unable to
7173 	 *    indicate that this should be mapped CoW in order that the
7174 	 *    mkwrite() hook can be invoked to make the first page R/W and the
7175 	 *    rest R/O as desired.
7176 	 *
7177 	 * 2. Anything other than a VM_PFNMAP of valid PFNs will result in
7178 	 *    vm_normal_page() returning a struct page * pointer, which means
7179 	 *    vm_ops->page_mkwrite() will be invoked rather than
7180 	 *    vm_ops->pfn_mkwrite(), and this means we have to set page->mapping
7181 	 *    to work around retry logic in the fault handler, however this
7182 	 *    field is no longer allowed to be used within struct page.
7183 	 *
7184 	 * 3. Having a struct page * made available in the fault logic also
7185 	 *    means that the page gets put on the rmap and becomes
7186 	 *    inappropriately accessible and subject to map and ref counting.
7187 	 *
7188 	 * Ideally we would have a mechanism that could explicitly express our
7189 	 * desires, but this is not currently the case, so we instead use
7190 	 * VM_PFNMAP.
7191 	 *
7192 	 * We manage the lifetime of these mappings with internal refcounts (see
7193 	 * perf_mmap_open() and perf_mmap_close()) so we ensure the lifetime of
7194 	 * this mapping is maintained correctly.
7195 	 */
7196 	for (pagenum = 0; pagenum < nr_pages; pagenum++) {
7197 		unsigned long va = vma->vm_start + PAGE_SIZE * pagenum;
7198 		struct page *page = perf_mmap_to_page(rb, vma->vm_pgoff + pagenum);
7199 
7200 		if (page == NULL) {
7201 			err = -EINVAL;
7202 			break;
7203 		}
7204 
7205 		/* Map readonly, perf_mmap_pfn_mkwrite() called on write fault. */
7206 		err = remap_pfn_range(vma, va, page_to_pfn(page), PAGE_SIZE,
7207 				      vm_get_page_prot(vma->vm_flags & ~VM_SHARED));
7208 		if (err)
7209 			break;
7210 	}
7211 
7212 #ifdef CONFIG_MMU
7213 	/* Clear any partial mappings on error. */
7214 	if (err)
7215 		zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
7216 #endif
7217 
7218 	return err;
7219 }
7220 
perf_mmap_calc_limits(struct vm_area_struct * vma,long * user_extra,long * extra)7221 static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
7222 {
7223 	unsigned long user_locked, user_lock_limit, locked, lock_limit;
7224 	struct user_struct *user = current_user();
7225 
7226 	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
7227 	/* Increase the limit linearly with more CPUs */
7228 	user_lock_limit *= num_online_cpus();
7229 
7230 	user_locked = atomic_long_read(&user->locked_vm);
7231 
7232 	/*
7233 	 * sysctl_perf_event_mlock may have changed, so that
7234 	 *     user->locked_vm > user_lock_limit
7235 	 */
7236 	if (user_locked > user_lock_limit)
7237 		user_locked = user_lock_limit;
7238 	user_locked += *user_extra;
7239 
7240 	if (user_locked > user_lock_limit) {
7241 		/*
7242 		 * charge locked_vm until it hits user_lock_limit;
7243 		 * charge the rest from pinned_vm
7244 		 */
7245 		*extra = user_locked - user_lock_limit;
7246 		*user_extra -= *extra;
7247 	}
7248 
7249 	lock_limit = rlimit(RLIMIT_MEMLOCK);
7250 	lock_limit >>= PAGE_SHIFT;
7251 	locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
7252 
7253 	return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
7254 }
7255 
perf_mmap_account(struct vm_area_struct * vma,long user_extra,long extra)7256 static void perf_mmap_account(struct vm_area_struct *vma, long user_extra, long extra)
7257 {
7258 	struct user_struct *user = current_user();
7259 
7260 	atomic_long_add(user_extra, &user->locked_vm);
7261 	atomic64_add(extra, &vma->vm_mm->pinned_vm);
7262 }
7263 
perf_mmap_rb(struct vm_area_struct * vma,struct perf_event * event,unsigned long nr_pages)7264 static int perf_mmap_rb(struct vm_area_struct *vma, struct perf_event *event,
7265 			unsigned long nr_pages)
7266 {
7267 	long extra = 0, user_extra = nr_pages;
7268 	struct perf_buffer *rb;
7269 	int rb_flags = 0;
7270 
7271 	nr_pages -= 1;
7272 
7273 	/*
7274 	 * If we have rb pages ensure they're a power-of-two number, so we
7275 	 * can do bitmasks instead of modulo.
7276 	 */
7277 	if (nr_pages != 0 && !is_power_of_2(nr_pages))
7278 		return -EINVAL;
7279 
7280 	WARN_ON_ONCE(event->ctx->parent_ctx);
7281 
7282 	if (event->rb) {
7283 		if (data_page_nr(event->rb) != nr_pages)
7284 			return -EINVAL;
7285 
7286 		/*
7287 		 * If this event doesn't have mmap_count, we're attempting to
7288 		 * create an alias of another event's mmap(); this would mean
7289 		 * both events will end up scribbling the same user_page;
7290 		 * which makes no sense.
7291 		 */
7292 		if (!refcount_read(&event->mmap_count))
7293 			return -EBUSY;
7294 
7295 		if (refcount_inc_not_zero(&event->rb->mmap_count)) {
7296 			/*
7297 			 * Success -- managed to mmap() the same buffer
7298 			 * multiple times.
7299 			 */
7300 			perf_mmap_account(vma, user_extra, extra);
7301 			refcount_inc(&event->mmap_count);
7302 			return 0;
7303 		}
7304 
7305 		/*
7306 		 * Raced against perf_mmap_close()'s
7307 		 * refcount_dec_and_mutex_lock() remove the
7308 		 * event and continue as if !event->rb
7309 		 */
7310 		ring_buffer_attach(event, NULL);
7311 	}
7312 
7313 	if (!perf_mmap_calc_limits(vma, &user_extra, &extra))
7314 		return -EPERM;
7315 
7316 	if (vma->vm_flags & VM_WRITE)
7317 		rb_flags |= RING_BUFFER_WRITABLE;
7318 
7319 	rb = rb_alloc(nr_pages,
7320 		      event->attr.watermark ? event->attr.wakeup_watermark : 0,
7321 		      event->cpu, rb_flags);
7322 
7323 	if (!rb)
7324 		return -ENOMEM;
7325 
7326 	refcount_set(&rb->mmap_count, 1);
7327 	rb->mmap_user = get_current_user();
7328 	rb->mmap_locked = extra;
7329 
7330 	ring_buffer_attach(event, rb);
7331 
7332 	perf_event_update_time(event);
7333 	perf_event_init_userpage(event);
7334 	perf_event_update_userpage(event);
7335 
7336 	perf_mmap_account(vma, user_extra, extra);
7337 	refcount_set(&event->mmap_count, 1);
7338 
7339 	return 0;
7340 }
7341 
perf_mmap_aux(struct vm_area_struct * vma,struct perf_event * event,unsigned long nr_pages)7342 static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
7343 			 unsigned long nr_pages)
7344 {
7345 	long extra = 0, user_extra = nr_pages;
7346 	u64 aux_offset, aux_size;
7347 	struct perf_buffer *rb;
7348 	int ret, rb_flags = 0;
7349 
7350 	rb = event->rb;
7351 	if (!rb)
7352 		return -EINVAL;
7353 
7354 	guard(mutex)(&rb->aux_mutex);
7355 
7356 	/*
7357 	 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
7358 	 * mapped, all subsequent mappings should have the same size
7359 	 * and offset. Must be above the normal perf buffer.
7360 	 */
7361 	aux_offset = READ_ONCE(rb->user_page->aux_offset);
7362 	aux_size = READ_ONCE(rb->user_page->aux_size);
7363 
7364 	if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
7365 		return -EINVAL;
7366 
7367 	if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
7368 		return -EINVAL;
7369 
7370 	/* already mapped with a different offset */
7371 	if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
7372 		return -EINVAL;
7373 
7374 	if (aux_size != nr_pages * PAGE_SIZE)
7375 		return -EINVAL;
7376 
7377 	/* already mapped with a different size */
7378 	if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
7379 		return -EINVAL;
7380 
7381 	if (!is_power_of_2(nr_pages))
7382 		return -EINVAL;
7383 
7384 	if (!refcount_inc_not_zero(&rb->mmap_count))
7385 		return -EINVAL;
7386 
7387 	if (rb_has_aux(rb)) {
7388 		refcount_inc(&rb->aux_mmap_count);
7389 
7390 	} else {
7391 		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
7392 			refcount_dec(&rb->mmap_count);
7393 			return -EPERM;
7394 		}
7395 
7396 		WARN_ON(!rb && event->rb);
7397 
7398 		if (vma->vm_flags & VM_WRITE)
7399 			rb_flags |= RING_BUFFER_WRITABLE;
7400 
7401 		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
7402 				   event->attr.aux_watermark, rb_flags);
7403 		if (ret) {
7404 			refcount_dec(&rb->mmap_count);
7405 			return ret;
7406 		}
7407 
7408 		refcount_set(&rb->aux_mmap_count, 1);
7409 		rb->aux_mmap_locked = extra;
7410 	}
7411 
7412 	perf_mmap_account(vma, user_extra, extra);
7413 	refcount_inc(&event->mmap_count);
7414 
7415 	return 0;
7416 }
7417 
perf_mmap(struct file * file,struct vm_area_struct * vma)7418 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
7419 {
7420 	struct perf_event *event = file->private_data;
7421 	unsigned long vma_size, nr_pages;
7422 	mapped_f mapped;
7423 	int ret;
7424 
7425 	/*
7426 	 * Don't allow mmap() of inherited per-task counters. This would
7427 	 * create a performance issue due to all children writing to the
7428 	 * same rb.
7429 	 */
7430 	if (event->cpu == -1 && event->attr.inherit)
7431 		return -EINVAL;
7432 
7433 	if (!(vma->vm_flags & VM_SHARED))
7434 		return -EINVAL;
7435 
7436 	ret = security_perf_event_read(event);
7437 	if (ret)
7438 		return ret;
7439 
7440 	vma_size = vma->vm_end - vma->vm_start;
7441 	nr_pages = vma_size / PAGE_SIZE;
7442 
7443 	if (nr_pages > INT_MAX)
7444 		return -ENOMEM;
7445 
7446 	if (vma_size != PAGE_SIZE * nr_pages)
7447 		return -EINVAL;
7448 
7449 	scoped_guard (mutex, &event->mmap_mutex) {
7450 		/*
7451 		 * This relies on __pmu_detach_event() taking mmap_mutex after marking
7452 		 * the event REVOKED. Either we observe the state, or __pmu_detach_event()
7453 		 * will detach the rb created here.
7454 		 */
7455 		if (event->state <= PERF_EVENT_STATE_REVOKED)
7456 			return -ENODEV;
7457 
7458 		if (vma->vm_pgoff == 0)
7459 			ret = perf_mmap_rb(vma, event, nr_pages);
7460 		else
7461 			ret = perf_mmap_aux(vma, event, nr_pages);
7462 		if (ret)
7463 			return ret;
7464 
7465 		/*
7466 		 * Since pinned accounting is per vm we cannot allow fork() to copy our
7467 		 * vma.
7468 		 */
7469 		vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
7470 		vma->vm_ops = &perf_mmap_vmops;
7471 
7472 		mapped = get_mapped(event, event_mapped);
7473 		if (mapped)
7474 			mapped(event, vma->vm_mm);
7475 
7476 		/*
7477 		 * Try to map it into the page table. On fail, invoke
7478 		 * perf_mmap_close() to undo the above, as the callsite expects
7479 		 * full cleanup in this case and therefore does not invoke
7480 		 * vmops::close().
7481 		 */
7482 		ret = map_range(event->rb, vma);
7483 		if (ret)
7484 			perf_mmap_close(vma);
7485 	}
7486 
7487 	return ret;
7488 }
7489 
perf_fasync(int fd,struct file * filp,int on)7490 static int perf_fasync(int fd, struct file *filp, int on)
7491 {
7492 	struct inode *inode = file_inode(filp);
7493 	struct perf_event *event = filp->private_data;
7494 	int retval;
7495 
7496 	if (event->state <= PERF_EVENT_STATE_REVOKED)
7497 		return -ENODEV;
7498 
7499 	inode_lock(inode);
7500 	retval = fasync_helper(fd, filp, on, &event->fasync);
7501 	inode_unlock(inode);
7502 
7503 	if (retval < 0)
7504 		return retval;
7505 
7506 	return 0;
7507 }
7508 
7509 static const struct file_operations perf_fops = {
7510 	.release		= perf_release,
7511 	.read			= perf_read,
7512 	.poll			= perf_poll,
7513 	.unlocked_ioctl		= perf_ioctl,
7514 	.compat_ioctl		= perf_compat_ioctl,
7515 	.mmap			= perf_mmap,
7516 	.fasync			= perf_fasync,
7517 };
7518 
7519 /*
7520  * Perf event wakeup
7521  *
7522  * If there's data, ensure we set the poll() state and publish everything
7523  * to user-space before waking everybody up.
7524  */
7525 
perf_event_wakeup(struct perf_event * event)7526 void perf_event_wakeup(struct perf_event *event)
7527 {
7528 	ring_buffer_wakeup(event);
7529 
7530 	if (event->pending_kill) {
7531 		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
7532 		event->pending_kill = 0;
7533 	}
7534 }
7535 
perf_sigtrap(struct perf_event * event)7536 static void perf_sigtrap(struct perf_event *event)
7537 {
7538 	/*
7539 	 * Both perf_pending_task() and perf_pending_irq() can race with the
7540 	 * task exiting.
7541 	 */
7542 	if (current->flags & PF_EXITING)
7543 		return;
7544 
7545 	/*
7546 	 * We'd expect this to only occur if the irq_work is delayed and either
7547 	 * ctx->task or current has changed in the meantime. This can be the
7548 	 * case on architectures that do not implement arch_irq_work_raise().
7549 	 */
7550 	if (WARN_ON_ONCE(event->ctx->task != current))
7551 		return;
7552 
7553 	send_sig_perf((void __user *)event->pending_addr,
7554 		      event->orig_type, event->attr.sig_data);
7555 }
7556 
7557 /*
7558  * Deliver the pending work in-event-context or follow the context.
7559  */
__perf_pending_disable(struct perf_event * event)7560 static void __perf_pending_disable(struct perf_event *event)
7561 {
7562 	int cpu = READ_ONCE(event->oncpu);
7563 
7564 	/*
7565 	 * If the event isn't running; we done. event_sched_out() will have
7566 	 * taken care of things.
7567 	 */
7568 	if (cpu < 0)
7569 		return;
7570 
7571 	/*
7572 	 * Yay, we hit home and are in the context of the event.
7573 	 */
7574 	if (cpu == smp_processor_id()) {
7575 		if (event->pending_disable) {
7576 			event->pending_disable = 0;
7577 			perf_event_disable_local(event);
7578 		}
7579 		return;
7580 	}
7581 
7582 	/*
7583 	 *  CPU-A			CPU-B
7584 	 *
7585 	 *  perf_event_disable_inatomic()
7586 	 *    @pending_disable = 1;
7587 	 *    irq_work_queue();
7588 	 *
7589 	 *  sched-out
7590 	 *    @pending_disable = 0;
7591 	 *
7592 	 *				sched-in
7593 	 *				perf_event_disable_inatomic()
7594 	 *				  @pending_disable = 1;
7595 	 *				  irq_work_queue(); // FAILS
7596 	 *
7597 	 *  irq_work_run()
7598 	 *    perf_pending_disable()
7599 	 *
7600 	 * But the event runs on CPU-B and wants disabling there.
7601 	 */
7602 	irq_work_queue_on(&event->pending_disable_irq, cpu);
7603 }
7604 
perf_pending_disable(struct irq_work * entry)7605 static void perf_pending_disable(struct irq_work *entry)
7606 {
7607 	struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq);
7608 	int rctx;
7609 
7610 	/*
7611 	 * If we 'fail' here, that's OK, it means recursion is already disabled
7612 	 * and we won't recurse 'further'.
7613 	 */
7614 	rctx = perf_swevent_get_recursion_context();
7615 	__perf_pending_disable(event);
7616 	if (rctx >= 0)
7617 		perf_swevent_put_recursion_context(rctx);
7618 }
7619 
perf_pending_irq(struct irq_work * entry)7620 static void perf_pending_irq(struct irq_work *entry)
7621 {
7622 	struct perf_event *event = container_of(entry, struct perf_event, pending_irq);
7623 	int rctx;
7624 
7625 	/*
7626 	 * If we 'fail' here, that's OK, it means recursion is already disabled
7627 	 * and we won't recurse 'further'.
7628 	 */
7629 	rctx = perf_swevent_get_recursion_context();
7630 
7631 	/*
7632 	 * The wakeup isn't bound to the context of the event -- it can happen
7633 	 * irrespective of where the event is.
7634 	 */
7635 	if (event->pending_wakeup) {
7636 		event->pending_wakeup = 0;
7637 		perf_event_wakeup(event);
7638 	}
7639 
7640 	if (rctx >= 0)
7641 		perf_swevent_put_recursion_context(rctx);
7642 }
7643 
perf_pending_task(struct callback_head * head)7644 static void perf_pending_task(struct callback_head *head)
7645 {
7646 	struct perf_event *event = container_of(head, struct perf_event, pending_task);
7647 	int rctx;
7648 
7649 	/*
7650 	 * If we 'fail' here, that's OK, it means recursion is already disabled
7651 	 * and we won't recurse 'further'.
7652 	 */
7653 	rctx = perf_swevent_get_recursion_context();
7654 
7655 	if (event->pending_work) {
7656 		event->pending_work = 0;
7657 		perf_sigtrap(event);
7658 		local_dec(&event->ctx->nr_no_switch_fast);
7659 	}
7660 	put_event(event);
7661 
7662 	if (rctx >= 0)
7663 		perf_swevent_put_recursion_context(rctx);
7664 }
7665 
7666 #ifdef CONFIG_GUEST_PERF_EVENTS
7667 struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
7668 
7669 DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
7670 DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
7671 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
7672 DEFINE_STATIC_CALL_RET0(__perf_guest_handle_mediated_pmi, *perf_guest_cbs->handle_mediated_pmi);
7673 
perf_register_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)7674 void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
7675 {
7676 	if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
7677 		return;
7678 
7679 	rcu_assign_pointer(perf_guest_cbs, cbs);
7680 	static_call_update(__perf_guest_state, cbs->state);
7681 	static_call_update(__perf_guest_get_ip, cbs->get_ip);
7682 
7683 	/* Implementing ->handle_intel_pt_intr is optional. */
7684 	if (cbs->handle_intel_pt_intr)
7685 		static_call_update(__perf_guest_handle_intel_pt_intr,
7686 				   cbs->handle_intel_pt_intr);
7687 
7688 	if (cbs->handle_mediated_pmi)
7689 		static_call_update(__perf_guest_handle_mediated_pmi,
7690 				   cbs->handle_mediated_pmi);
7691 }
7692 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
7693 
perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks * cbs)7694 void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
7695 {
7696 	if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
7697 		return;
7698 
7699 	rcu_assign_pointer(perf_guest_cbs, NULL);
7700 	static_call_update(__perf_guest_state, (void *)&__static_call_return0);
7701 	static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
7702 	static_call_update(__perf_guest_handle_intel_pt_intr, (void *)&__static_call_return0);
7703 	static_call_update(__perf_guest_handle_mediated_pmi, (void *)&__static_call_return0);
7704 	synchronize_rcu();
7705 }
7706 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
7707 #endif
7708 
should_sample_guest(struct perf_event * event)7709 static bool should_sample_guest(struct perf_event *event)
7710 {
7711 	return !event->attr.exclude_guest && perf_guest_state();
7712 }
7713 
perf_misc_flags(struct perf_event * event,struct pt_regs * regs)7714 unsigned long perf_misc_flags(struct perf_event *event,
7715 			      struct pt_regs *regs)
7716 {
7717 	if (should_sample_guest(event))
7718 		return perf_arch_guest_misc_flags(regs);
7719 
7720 	return perf_arch_misc_flags(regs);
7721 }
7722 
perf_instruction_pointer(struct perf_event * event,struct pt_regs * regs)7723 unsigned long perf_instruction_pointer(struct perf_event *event,
7724 				       struct pt_regs *regs)
7725 {
7726 	if (should_sample_guest(event))
7727 		return perf_guest_get_ip();
7728 
7729 	return perf_arch_instruction_pointer(regs);
7730 }
7731 
7732 static void
perf_output_sample_regs(struct perf_output_handle * handle,struct pt_regs * regs,u64 mask)7733 perf_output_sample_regs(struct perf_output_handle *handle,
7734 			struct pt_regs *regs, u64 mask)
7735 {
7736 	int bit;
7737 	DECLARE_BITMAP(_mask, 64);
7738 
7739 	bitmap_from_u64(_mask, mask);
7740 	for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
7741 		u64 val;
7742 
7743 		val = perf_reg_value(regs, bit);
7744 		perf_output_put(handle, val);
7745 	}
7746 }
7747 
perf_sample_regs_user(struct perf_regs * regs_user,struct pt_regs * regs)7748 static void perf_sample_regs_user(struct perf_regs *regs_user,
7749 				  struct pt_regs *regs)
7750 {
7751 	if (user_mode(regs)) {
7752 		regs_user->abi = perf_reg_abi(current);
7753 		regs_user->regs = regs;
7754 	} else if (is_user_task(current)) {
7755 		perf_get_regs_user(regs_user, regs);
7756 	} else {
7757 		regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
7758 		regs_user->regs = NULL;
7759 	}
7760 }
7761 
perf_sample_regs_intr(struct perf_regs * regs_intr,struct pt_regs * regs)7762 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
7763 				  struct pt_regs *regs)
7764 {
7765 	regs_intr->regs = regs;
7766 	regs_intr->abi  = perf_reg_abi(current);
7767 }
7768 
7769 
7770 /*
7771  * Get remaining task size from user stack pointer.
7772  *
7773  * It'd be better to take stack vma map and limit this more
7774  * precisely, but there's no way to get it safely under interrupt,
7775  * so using TASK_SIZE as limit.
7776  */
perf_ustack_task_size(struct pt_regs * regs)7777 static u64 perf_ustack_task_size(struct pt_regs *regs)
7778 {
7779 	unsigned long addr = perf_user_stack_pointer(regs);
7780 
7781 	if (!addr || addr >= TASK_SIZE)
7782 		return 0;
7783 
7784 	return TASK_SIZE - addr;
7785 }
7786 
7787 static u16
perf_sample_ustack_size(u16 stack_size,u16 header_size,struct pt_regs * regs)7788 perf_sample_ustack_size(u16 stack_size, u16 header_size,
7789 			struct pt_regs *regs)
7790 {
7791 	u64 task_size;
7792 
7793 	/* No regs, no stack pointer, no dump. */
7794 	if (!regs)
7795 		return 0;
7796 
7797 	/* No mm, no stack, no dump. */
7798 	if (!current->mm)
7799 		return 0;
7800 
7801 	/*
7802 	 * Check if we fit in with the requested stack size into the:
7803 	 * - TASK_SIZE
7804 	 *   If we don't, we limit the size to the TASK_SIZE.
7805 	 *
7806 	 * - remaining sample size
7807 	 *   If we don't, we customize the stack size to
7808 	 *   fit in to the remaining sample size.
7809 	 */
7810 
7811 	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
7812 	stack_size = min(stack_size, (u16) task_size);
7813 
7814 	/* Current header size plus static size and dynamic size. */
7815 	header_size += 2 * sizeof(u64);
7816 
7817 	/* Do we fit in with the current stack dump size? */
7818 	if ((u16) (header_size + stack_size) < header_size) {
7819 		/*
7820 		 * If we overflow the maximum size for the sample,
7821 		 * we customize the stack dump size to fit in.
7822 		 */
7823 		stack_size = USHRT_MAX - header_size - sizeof(u64);
7824 		stack_size = round_up(stack_size, sizeof(u64));
7825 	}
7826 
7827 	return stack_size;
7828 }
7829 
7830 static void
perf_output_sample_ustack(struct perf_output_handle * handle,u64 dump_size,struct pt_regs * regs)7831 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
7832 			  struct pt_regs *regs)
7833 {
7834 	/* Case of a kernel thread, nothing to dump */
7835 	if (!regs) {
7836 		u64 size = 0;
7837 		perf_output_put(handle, size);
7838 	} else {
7839 		unsigned long sp;
7840 		unsigned int rem;
7841 		u64 dyn_size;
7842 
7843 		/*
7844 		 * We dump:
7845 		 * static size
7846 		 *   - the size requested by user or the best one we can fit
7847 		 *     in to the sample max size
7848 		 * data
7849 		 *   - user stack dump data
7850 		 * dynamic size
7851 		 *   - the actual dumped size
7852 		 */
7853 
7854 		/* Static size. */
7855 		perf_output_put(handle, dump_size);
7856 
7857 		/* Data. */
7858 		sp = perf_user_stack_pointer(regs);
7859 		rem = __output_copy_user(handle, (void *) sp, dump_size);
7860 		dyn_size = dump_size - rem;
7861 
7862 		perf_output_skip(handle, rem);
7863 
7864 		/* Dynamic size. */
7865 		perf_output_put(handle, dyn_size);
7866 	}
7867 }
7868 
perf_prepare_sample_aux(struct perf_event * event,struct perf_sample_data * data,size_t size)7869 static unsigned long perf_prepare_sample_aux(struct perf_event *event,
7870 					  struct perf_sample_data *data,
7871 					  size_t size)
7872 {
7873 	struct perf_event *sampler = event->aux_event;
7874 	struct perf_buffer *rb;
7875 
7876 	data->aux_size = 0;
7877 
7878 	if (!sampler)
7879 		goto out;
7880 
7881 	if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
7882 		goto out;
7883 
7884 	if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
7885 		goto out;
7886 
7887 	rb = ring_buffer_get(sampler);
7888 	if (!rb)
7889 		goto out;
7890 
7891 	/*
7892 	 * If this is an NMI hit inside sampling code, don't take
7893 	 * the sample. See also perf_aux_sample_output().
7894 	 */
7895 	if (READ_ONCE(rb->aux_in_sampling)) {
7896 		data->aux_size = 0;
7897 	} else {
7898 		size = min_t(size_t, size, perf_aux_size(rb));
7899 		data->aux_size = ALIGN(size, sizeof(u64));
7900 	}
7901 	ring_buffer_put(rb);
7902 
7903 out:
7904 	return data->aux_size;
7905 }
7906 
perf_pmu_snapshot_aux(struct perf_buffer * rb,struct perf_event * event,struct perf_output_handle * handle,unsigned long size)7907 static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
7908                                  struct perf_event *event,
7909                                  struct perf_output_handle *handle,
7910                                  unsigned long size)
7911 {
7912 	unsigned long flags;
7913 	long ret;
7914 
7915 	/*
7916 	 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
7917 	 * paths. If we start calling them in NMI context, they may race with
7918 	 * the IRQ ones, that is, for example, re-starting an event that's just
7919 	 * been stopped, which is why we're using a separate callback that
7920 	 * doesn't change the event state.
7921 	 *
7922 	 * IRQs need to be disabled to prevent IPIs from racing with us.
7923 	 */
7924 	local_irq_save(flags);
7925 	/*
7926 	 * Guard against NMI hits inside the critical section;
7927 	 * see also perf_prepare_sample_aux().
7928 	 */
7929 	WRITE_ONCE(rb->aux_in_sampling, 1);
7930 	barrier();
7931 
7932 	ret = event->pmu->snapshot_aux(event, handle, size);
7933 
7934 	barrier();
7935 	WRITE_ONCE(rb->aux_in_sampling, 0);
7936 	local_irq_restore(flags);
7937 
7938 	return ret;
7939 }
7940 
perf_aux_sample_output(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * data)7941 static void perf_aux_sample_output(struct perf_event *event,
7942 				   struct perf_output_handle *handle,
7943 				   struct perf_sample_data *data)
7944 {
7945 	struct perf_event *sampler = event->aux_event;
7946 	struct perf_buffer *rb;
7947 	unsigned long pad;
7948 	long size;
7949 
7950 	if (WARN_ON_ONCE(!sampler || !data->aux_size))
7951 		return;
7952 
7953 	rb = ring_buffer_get(sampler);
7954 	if (!rb)
7955 		return;
7956 
7957 	size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
7958 
7959 	/*
7960 	 * An error here means that perf_output_copy() failed (returned a
7961 	 * non-zero surplus that it didn't copy), which in its current
7962 	 * enlightened implementation is not possible. If that changes, we'd
7963 	 * like to know.
7964 	 */
7965 	if (WARN_ON_ONCE(size < 0))
7966 		goto out_put;
7967 
7968 	/*
7969 	 * The pad comes from ALIGN()ing data->aux_size up to u64 in
7970 	 * perf_prepare_sample_aux(), so should not be more than that.
7971 	 */
7972 	pad = data->aux_size - size;
7973 	if (WARN_ON_ONCE(pad >= sizeof(u64)))
7974 		pad = 8;
7975 
7976 	if (pad) {
7977 		u64 zero = 0;
7978 		perf_output_copy(handle, &zero, pad);
7979 	}
7980 
7981 out_put:
7982 	ring_buffer_put(rb);
7983 }
7984 
7985 /*
7986  * A set of common sample data types saved even for non-sample records
7987  * when event->attr.sample_id_all is set.
7988  */
7989 #define PERF_SAMPLE_ID_ALL  (PERF_SAMPLE_TID | PERF_SAMPLE_TIME |	\
7990 			     PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |	\
7991 			     PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
7992 
__perf_event_header__init_id(struct perf_sample_data * data,struct perf_event * event,u64 sample_type)7993 static void __perf_event_header__init_id(struct perf_sample_data *data,
7994 					 struct perf_event *event,
7995 					 u64 sample_type)
7996 {
7997 	data->type = event->attr.sample_type;
7998 	data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
7999 
8000 	if (sample_type & PERF_SAMPLE_TID) {
8001 		/* namespace issues */
8002 		data->tid_entry.pid = perf_event_pid(event, current);
8003 		data->tid_entry.tid = perf_event_tid(event, current);
8004 	}
8005 
8006 	if (sample_type & PERF_SAMPLE_TIME)
8007 		data->time = perf_event_clock(event);
8008 
8009 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
8010 		data->id = primary_event_id(event);
8011 
8012 	if (sample_type & PERF_SAMPLE_STREAM_ID)
8013 		data->stream_id = event->id;
8014 
8015 	if (sample_type & PERF_SAMPLE_CPU) {
8016 		data->cpu_entry.cpu	 = raw_smp_processor_id();
8017 		data->cpu_entry.reserved = 0;
8018 	}
8019 }
8020 
perf_event_header__init_id(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)8021 void perf_event_header__init_id(struct perf_event_header *header,
8022 				struct perf_sample_data *data,
8023 				struct perf_event *event)
8024 {
8025 	if (event->attr.sample_id_all) {
8026 		header->size += event->id_header_size;
8027 		__perf_event_header__init_id(data, event, event->attr.sample_type);
8028 	}
8029 }
8030 
__perf_event__output_id_sample(struct perf_output_handle * handle,struct perf_sample_data * data)8031 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
8032 					   struct perf_sample_data *data)
8033 {
8034 	u64 sample_type = data->type;
8035 
8036 	if (sample_type & PERF_SAMPLE_TID)
8037 		perf_output_put(handle, data->tid_entry);
8038 
8039 	if (sample_type & PERF_SAMPLE_TIME)
8040 		perf_output_put(handle, data->time);
8041 
8042 	if (sample_type & PERF_SAMPLE_ID)
8043 		perf_output_put(handle, data->id);
8044 
8045 	if (sample_type & PERF_SAMPLE_STREAM_ID)
8046 		perf_output_put(handle, data->stream_id);
8047 
8048 	if (sample_type & PERF_SAMPLE_CPU)
8049 		perf_output_put(handle, data->cpu_entry);
8050 
8051 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
8052 		perf_output_put(handle, data->id);
8053 }
8054 
perf_event__output_id_sample(struct perf_event * event,struct perf_output_handle * handle,struct perf_sample_data * sample)8055 void perf_event__output_id_sample(struct perf_event *event,
8056 				  struct perf_output_handle *handle,
8057 				  struct perf_sample_data *sample)
8058 {
8059 	if (event->attr.sample_id_all)
8060 		__perf_event__output_id_sample(handle, sample);
8061 }
8062 
perf_output_read_one(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)8063 static void perf_output_read_one(struct perf_output_handle *handle,
8064 				 struct perf_event *event,
8065 				 u64 enabled, u64 running)
8066 {
8067 	u64 read_format = event->attr.read_format;
8068 	u64 values[5];
8069 	int n = 0;
8070 
8071 	values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr));
8072 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
8073 		values[n++] = enabled +
8074 			atomic64_read(&event->child_total_time_enabled);
8075 	}
8076 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
8077 		values[n++] = running +
8078 			atomic64_read(&event->child_total_time_running);
8079 	}
8080 	if (read_format & PERF_FORMAT_ID)
8081 		values[n++] = primary_event_id(event);
8082 	if (read_format & PERF_FORMAT_LOST)
8083 		values[n++] = atomic64_read(&event->lost_samples);
8084 
8085 	__output_copy(handle, values, n * sizeof(u64));
8086 }
8087 
perf_output_read_group(struct perf_output_handle * handle,struct perf_event * event,u64 enabled,u64 running)8088 static void perf_output_read_group(struct perf_output_handle *handle,
8089 				   struct perf_event *event,
8090 				   u64 enabled, u64 running)
8091 {
8092 	struct perf_event *leader = event->group_leader, *sub;
8093 	u64 read_format = event->attr.read_format;
8094 	unsigned long flags;
8095 	u64 values[6];
8096 	int n = 0;
8097 	bool self = has_inherit_and_sample_read(&event->attr);
8098 
8099 	/*
8100 	 * Disabling interrupts avoids all counter scheduling
8101 	 * (context switches, timer based rotation and IPIs).
8102 	 */
8103 	local_irq_save(flags);
8104 
8105 	values[n++] = 1 + leader->nr_siblings;
8106 
8107 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
8108 		values[n++] = enabled;
8109 
8110 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
8111 		values[n++] = running;
8112 
8113 	if ((leader != event) && !handle->skip_read)
8114 		perf_pmu_read(leader);
8115 
8116 	values[n++] = perf_event_count(leader, self);
8117 	if (read_format & PERF_FORMAT_ID)
8118 		values[n++] = primary_event_id(leader);
8119 	if (read_format & PERF_FORMAT_LOST)
8120 		values[n++] = atomic64_read(&leader->lost_samples);
8121 
8122 	__output_copy(handle, values, n * sizeof(u64));
8123 
8124 	for_each_sibling_event(sub, leader) {
8125 		n = 0;
8126 
8127 		if ((sub != event) && !handle->skip_read)
8128 			perf_pmu_read(sub);
8129 
8130 		values[n++] = perf_event_count(sub, self);
8131 		if (read_format & PERF_FORMAT_ID)
8132 			values[n++] = primary_event_id(sub);
8133 		if (read_format & PERF_FORMAT_LOST)
8134 			values[n++] = atomic64_read(&sub->lost_samples);
8135 
8136 		__output_copy(handle, values, n * sizeof(u64));
8137 	}
8138 
8139 	local_irq_restore(flags);
8140 }
8141 
8142 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
8143 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
8144 
8145 /*
8146  * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
8147  *
8148  * The problem is that its both hard and excessively expensive to iterate the
8149  * child list, not to mention that its impossible to IPI the children running
8150  * on another CPU, from interrupt/NMI context.
8151  *
8152  * Instead the combination of PERF_SAMPLE_READ and inherit will track per-thread
8153  * counts rather than attempting to accumulate some value across all children on
8154  * all cores.
8155  */
perf_output_read(struct perf_output_handle * handle,struct perf_event * event)8156 static void perf_output_read(struct perf_output_handle *handle,
8157 			     struct perf_event *event)
8158 {
8159 	u64 enabled = 0, running = 0, now;
8160 	u64 read_format = event->attr.read_format;
8161 
8162 	/*
8163 	 * Compute total_time_enabled, total_time_running based on snapshot
8164 	 * values taken when the event was last scheduled in.
8165 	 *
8166 	 * We cannot simply call update_context_time() because doing so would
8167 	 * lead to deadlock when called from NMI context.
8168 	 */
8169 	if (read_format & PERF_FORMAT_TOTAL_TIMES)
8170 		calc_timer_values(event, &now, &enabled, &running);
8171 
8172 	if (event->attr.read_format & PERF_FORMAT_GROUP)
8173 		perf_output_read_group(handle, event, enabled, running);
8174 	else
8175 		perf_output_read_one(handle, event, enabled, running);
8176 }
8177 
perf_output_sample(struct perf_output_handle * handle,struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event)8178 void perf_output_sample(struct perf_output_handle *handle,
8179 			struct perf_event_header *header,
8180 			struct perf_sample_data *data,
8181 			struct perf_event *event)
8182 {
8183 	u64 sample_type = data->type;
8184 
8185 	if (data->sample_flags & PERF_SAMPLE_READ)
8186 		handle->skip_read = 1;
8187 
8188 	perf_output_put(handle, *header);
8189 
8190 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
8191 		perf_output_put(handle, data->id);
8192 
8193 	if (sample_type & PERF_SAMPLE_IP)
8194 		perf_output_put(handle, data->ip);
8195 
8196 	if (sample_type & PERF_SAMPLE_TID)
8197 		perf_output_put(handle, data->tid_entry);
8198 
8199 	if (sample_type & PERF_SAMPLE_TIME)
8200 		perf_output_put(handle, data->time);
8201 
8202 	if (sample_type & PERF_SAMPLE_ADDR)
8203 		perf_output_put(handle, data->addr);
8204 
8205 	if (sample_type & PERF_SAMPLE_ID)
8206 		perf_output_put(handle, data->id);
8207 
8208 	if (sample_type & PERF_SAMPLE_STREAM_ID)
8209 		perf_output_put(handle, data->stream_id);
8210 
8211 	if (sample_type & PERF_SAMPLE_CPU)
8212 		perf_output_put(handle, data->cpu_entry);
8213 
8214 	if (sample_type & PERF_SAMPLE_PERIOD)
8215 		perf_output_put(handle, data->period);
8216 
8217 	if (sample_type & PERF_SAMPLE_READ)
8218 		perf_output_read(handle, event);
8219 
8220 	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
8221 		int size = 1;
8222 
8223 		size += data->callchain->nr;
8224 		size *= sizeof(u64);
8225 		__output_copy(handle, data->callchain, size);
8226 	}
8227 
8228 	if (sample_type & PERF_SAMPLE_RAW) {
8229 		struct perf_raw_record *raw = data->raw;
8230 
8231 		if (raw) {
8232 			struct perf_raw_frag *frag = &raw->frag;
8233 
8234 			perf_output_put(handle, raw->size);
8235 			do {
8236 				if (frag->copy) {
8237 					__output_custom(handle, frag->copy,
8238 							frag->data, frag->size);
8239 				} else {
8240 					__output_copy(handle, frag->data,
8241 						      frag->size);
8242 				}
8243 				if (perf_raw_frag_last(frag))
8244 					break;
8245 				frag = frag->next;
8246 			} while (1);
8247 			if (frag->pad)
8248 				__output_skip(handle, NULL, frag->pad);
8249 		} else {
8250 			struct {
8251 				u32	size;
8252 				u32	data;
8253 			} raw = {
8254 				.size = sizeof(u32),
8255 				.data = 0,
8256 			};
8257 			perf_output_put(handle, raw);
8258 		}
8259 	}
8260 
8261 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
8262 		if (data->br_stack) {
8263 			size_t size;
8264 
8265 			size = data->br_stack->nr
8266 			     * sizeof(struct perf_branch_entry);
8267 
8268 			perf_output_put(handle, data->br_stack->nr);
8269 			if (branch_sample_hw_index(event))
8270 				perf_output_put(handle, data->br_stack->hw_idx);
8271 			perf_output_copy(handle, data->br_stack->entries, size);
8272 			/*
8273 			 * Add the extension space which is appended
8274 			 * right after the struct perf_branch_stack.
8275 			 */
8276 			if (data->br_stack_cntr) {
8277 				size = data->br_stack->nr * sizeof(u64);
8278 				perf_output_copy(handle, data->br_stack_cntr, size);
8279 			}
8280 		} else {
8281 			/*
8282 			 * we always store at least the value of nr
8283 			 */
8284 			u64 nr = 0;
8285 			perf_output_put(handle, nr);
8286 		}
8287 	}
8288 
8289 	if (sample_type & PERF_SAMPLE_REGS_USER) {
8290 		u64 abi = data->regs_user.abi;
8291 
8292 		/*
8293 		 * If there are no regs to dump, notice it through
8294 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
8295 		 */
8296 		perf_output_put(handle, abi);
8297 
8298 		if (abi) {
8299 			u64 mask = event->attr.sample_regs_user;
8300 			perf_output_sample_regs(handle,
8301 						data->regs_user.regs,
8302 						mask);
8303 		}
8304 	}
8305 
8306 	if (sample_type & PERF_SAMPLE_STACK_USER) {
8307 		perf_output_sample_ustack(handle,
8308 					  data->stack_user_size,
8309 					  data->regs_user.regs);
8310 	}
8311 
8312 	if (sample_type & PERF_SAMPLE_WEIGHT_TYPE)
8313 		perf_output_put(handle, data->weight.full);
8314 
8315 	if (sample_type & PERF_SAMPLE_DATA_SRC)
8316 		perf_output_put(handle, data->data_src.val);
8317 
8318 	if (sample_type & PERF_SAMPLE_TRANSACTION)
8319 		perf_output_put(handle, data->txn);
8320 
8321 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
8322 		u64 abi = data->regs_intr.abi;
8323 		/*
8324 		 * If there are no regs to dump, notice it through
8325 		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
8326 		 */
8327 		perf_output_put(handle, abi);
8328 
8329 		if (abi) {
8330 			u64 mask = event->attr.sample_regs_intr;
8331 
8332 			perf_output_sample_regs(handle,
8333 						data->regs_intr.regs,
8334 						mask);
8335 		}
8336 	}
8337 
8338 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
8339 		perf_output_put(handle, data->phys_addr);
8340 
8341 	if (sample_type & PERF_SAMPLE_CGROUP)
8342 		perf_output_put(handle, data->cgroup);
8343 
8344 	if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
8345 		perf_output_put(handle, data->data_page_size);
8346 
8347 	if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
8348 		perf_output_put(handle, data->code_page_size);
8349 
8350 	if (sample_type & PERF_SAMPLE_AUX) {
8351 		perf_output_put(handle, data->aux_size);
8352 
8353 		if (data->aux_size)
8354 			perf_aux_sample_output(event, handle, data);
8355 	}
8356 
8357 	if (!event->attr.watermark) {
8358 		int wakeup_events = event->attr.wakeup_events;
8359 
8360 		if (wakeup_events) {
8361 			struct perf_buffer *rb = handle->rb;
8362 			int events = local_inc_return(&rb->events);
8363 
8364 			if (events >= wakeup_events) {
8365 				local_sub(wakeup_events, &rb->events);
8366 				local_inc(&rb->wakeup);
8367 			}
8368 		}
8369 	}
8370 }
8371 
perf_virt_to_phys(u64 virt)8372 static u64 perf_virt_to_phys(u64 virt)
8373 {
8374 	u64 phys_addr = 0;
8375 
8376 	if (!virt)
8377 		return 0;
8378 
8379 	if (virt >= TASK_SIZE) {
8380 		/* If it's vmalloc()d memory, leave phys_addr as 0 */
8381 		if (virt_addr_valid((void *)(uintptr_t)virt) &&
8382 		    !(virt >= VMALLOC_START && virt < VMALLOC_END))
8383 			phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
8384 	} else {
8385 		/*
8386 		 * Walking the pages tables for user address.
8387 		 * Interrupts are disabled, so it prevents any tear down
8388 		 * of the page tables.
8389 		 * Try IRQ-safe get_user_page_fast_only first.
8390 		 * If failed, leave phys_addr as 0.
8391 		 */
8392 		if (is_user_task(current)) {
8393 			struct page *p;
8394 
8395 			pagefault_disable();
8396 			if (get_user_page_fast_only(virt, 0, &p)) {
8397 				phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
8398 				put_page(p);
8399 			}
8400 			pagefault_enable();
8401 		}
8402 	}
8403 
8404 	return phys_addr;
8405 }
8406 
8407 /*
8408  * Return the pagetable size of a given virtual address.
8409  */
perf_get_pgtable_size(struct mm_struct * mm,unsigned long addr)8410 static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr)
8411 {
8412 	u64 size = 0;
8413 
8414 #ifdef CONFIG_HAVE_GUP_FAST
8415 	pgd_t *pgdp, pgd;
8416 	p4d_t *p4dp, p4d;
8417 	pud_t *pudp, pud;
8418 	pmd_t *pmdp, pmd;
8419 	pte_t *ptep, pte;
8420 
8421 	pgdp = pgd_offset(mm, addr);
8422 	pgd = pgdp_get(pgdp);
8423 	if (pgd_none(pgd))
8424 		return 0;
8425 
8426 	if (pgd_leaf(pgd))
8427 		return pgd_leaf_size(pgd);
8428 
8429 	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
8430 	p4d = p4dp_get(p4dp);
8431 	if (!p4d_present(p4d))
8432 		return 0;
8433 
8434 	if (p4d_leaf(p4d))
8435 		return p4d_leaf_size(p4d);
8436 
8437 	pudp = pud_offset_lockless(p4dp, p4d, addr);
8438 	pud = pudp_get(pudp);
8439 	if (!pud_present(pud))
8440 		return 0;
8441 
8442 	if (pud_leaf(pud))
8443 		return pud_leaf_size(pud);
8444 
8445 	pmdp = pmd_offset_lockless(pudp, pud, addr);
8446 again:
8447 	pmd = pmdp_get_lockless(pmdp);
8448 	if (!pmd_present(pmd))
8449 		return 0;
8450 
8451 	if (pmd_leaf(pmd))
8452 		return pmd_leaf_size(pmd);
8453 
8454 	ptep = pte_offset_map(&pmd, addr);
8455 	if (!ptep)
8456 		goto again;
8457 
8458 	pte = ptep_get_lockless(ptep);
8459 	if (pte_present(pte))
8460 		size = __pte_leaf_size(pmd, pte);
8461 	pte_unmap(ptep);
8462 #endif /* CONFIG_HAVE_GUP_FAST */
8463 
8464 	return size;
8465 }
8466 
perf_get_page_size(unsigned long addr)8467 static u64 perf_get_page_size(unsigned long addr)
8468 {
8469 	struct mm_struct *mm;
8470 	unsigned long flags;
8471 	u64 size;
8472 
8473 	if (!addr)
8474 		return 0;
8475 
8476 	/*
8477 	 * Software page-table walkers must disable IRQs,
8478 	 * which prevents any tear down of the page tables.
8479 	 */
8480 	local_irq_save(flags);
8481 
8482 	mm = current->mm;
8483 	if (!mm) {
8484 		/*
8485 		 * For kernel threads and the like, use init_mm so that
8486 		 * we can find kernel memory.
8487 		 */
8488 		mm = &init_mm;
8489 	}
8490 
8491 	size = perf_get_pgtable_size(mm, addr);
8492 
8493 	local_irq_restore(flags);
8494 
8495 	return size;
8496 }
8497 
8498 static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
8499 
8500 static struct unwind_work perf_unwind_work;
8501 
8502 struct perf_callchain_entry *
perf_callchain(struct perf_event * event,struct pt_regs * regs)8503 perf_callchain(struct perf_event *event, struct pt_regs *regs)
8504 {
8505 	bool kernel = !event->attr.exclude_callchain_kernel;
8506 	bool user   = !event->attr.exclude_callchain_user &&
8507 		is_user_task(current);
8508 	/* Disallow cross-task user callchains. */
8509 	bool crosstask = event->ctx->task && event->ctx->task != current;
8510 	bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user &&
8511 			  event->attr.defer_callchain;
8512 	const u32 max_stack = event->attr.sample_max_stack;
8513 	struct perf_callchain_entry *callchain;
8514 	u64 defer_cookie;
8515 
8516 	if (!current->mm)
8517 		user = false;
8518 
8519 	if (!kernel && !user)
8520 		return &__empty_callchain;
8521 
8522 	if (!(user && defer_user && !crosstask &&
8523 	      unwind_deferred_request(&perf_unwind_work, &defer_cookie) >= 0))
8524 		defer_cookie = 0;
8525 
8526 	callchain = get_perf_callchain(regs, kernel, user, max_stack,
8527 				       crosstask, true, defer_cookie);
8528 
8529 	return callchain ?: &__empty_callchain;
8530 }
8531 
__cond_set(u64 flags,u64 s,u64 d)8532 static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
8533 {
8534 	return d * !!(flags & s);
8535 }
8536 
perf_prepare_sample(struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)8537 void perf_prepare_sample(struct perf_sample_data *data,
8538 			 struct perf_event *event,
8539 			 struct pt_regs *regs)
8540 {
8541 	u64 sample_type = event->attr.sample_type;
8542 	u64 filtered_sample_type;
8543 
8544 	/*
8545 	 * Add the sample flags that are dependent to others.  And clear the
8546 	 * sample flags that have already been done by the PMU driver.
8547 	 */
8548 	filtered_sample_type = sample_type;
8549 	filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
8550 					   PERF_SAMPLE_IP);
8551 	filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
8552 					   PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
8553 	filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
8554 					   PERF_SAMPLE_REGS_USER);
8555 	filtered_sample_type &= ~data->sample_flags;
8556 
8557 	if (filtered_sample_type == 0) {
8558 		/* Make sure it has the correct data->type for output */
8559 		data->type = event->attr.sample_type;
8560 		return;
8561 	}
8562 
8563 	__perf_event_header__init_id(data, event, filtered_sample_type);
8564 
8565 	if (filtered_sample_type & PERF_SAMPLE_IP) {
8566 		data->ip = perf_instruction_pointer(event, regs);
8567 		data->sample_flags |= PERF_SAMPLE_IP;
8568 	}
8569 
8570 	if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
8571 		perf_sample_save_callchain(data, event, regs);
8572 
8573 	if (filtered_sample_type & PERF_SAMPLE_RAW) {
8574 		data->raw = NULL;
8575 		data->dyn_size += sizeof(u64);
8576 		data->sample_flags |= PERF_SAMPLE_RAW;
8577 	}
8578 
8579 	if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
8580 		data->br_stack = NULL;
8581 		data->dyn_size += sizeof(u64);
8582 		data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
8583 	}
8584 
8585 	if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
8586 		perf_sample_regs_user(&data->regs_user, regs);
8587 
8588 	/*
8589 	 * It cannot use the filtered_sample_type here as REGS_USER can be set
8590 	 * by STACK_USER (using __cond_set() above) and we don't want to update
8591 	 * the dyn_size if it's not requested by users.
8592 	 */
8593 	if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
8594 		/* regs dump ABI info */
8595 		int size = sizeof(u64);
8596 
8597 		if (data->regs_user.regs) {
8598 			u64 mask = event->attr.sample_regs_user;
8599 			size += hweight64(mask) * sizeof(u64);
8600 		}
8601 
8602 		data->dyn_size += size;
8603 		data->sample_flags |= PERF_SAMPLE_REGS_USER;
8604 	}
8605 
8606 	if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
8607 		/*
8608 		 * Either we need PERF_SAMPLE_STACK_USER bit to be always
8609 		 * processed as the last one or have additional check added
8610 		 * in case new sample type is added, because we could eat
8611 		 * up the rest of the sample size.
8612 		 */
8613 		u16 stack_size = event->attr.sample_stack_user;
8614 		u16 header_size = perf_sample_data_size(data, event);
8615 		u16 size = sizeof(u64);
8616 
8617 		stack_size = perf_sample_ustack_size(stack_size, header_size,
8618 						     data->regs_user.regs);
8619 
8620 		/*
8621 		 * If there is something to dump, add space for the dump
8622 		 * itself and for the field that tells the dynamic size,
8623 		 * which is how many have been actually dumped.
8624 		 */
8625 		if (stack_size)
8626 			size += sizeof(u64) + stack_size;
8627 
8628 		data->stack_user_size = stack_size;
8629 		data->dyn_size += size;
8630 		data->sample_flags |= PERF_SAMPLE_STACK_USER;
8631 	}
8632 
8633 	if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
8634 		data->weight.full = 0;
8635 		data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
8636 	}
8637 
8638 	if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
8639 		data->data_src.val = PERF_MEM_NA;
8640 		data->sample_flags |= PERF_SAMPLE_DATA_SRC;
8641 	}
8642 
8643 	if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
8644 		data->txn = 0;
8645 		data->sample_flags |= PERF_SAMPLE_TRANSACTION;
8646 	}
8647 
8648 	if (filtered_sample_type & PERF_SAMPLE_ADDR) {
8649 		data->addr = 0;
8650 		data->sample_flags |= PERF_SAMPLE_ADDR;
8651 	}
8652 
8653 	if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
8654 		/* regs dump ABI info */
8655 		int size = sizeof(u64);
8656 
8657 		perf_sample_regs_intr(&data->regs_intr, regs);
8658 
8659 		if (data->regs_intr.regs) {
8660 			u64 mask = event->attr.sample_regs_intr;
8661 
8662 			size += hweight64(mask) * sizeof(u64);
8663 		}
8664 
8665 		data->dyn_size += size;
8666 		data->sample_flags |= PERF_SAMPLE_REGS_INTR;
8667 	}
8668 
8669 	if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
8670 		data->phys_addr = perf_virt_to_phys(data->addr);
8671 		data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
8672 	}
8673 
8674 #ifdef CONFIG_CGROUP_PERF
8675 	if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
8676 		struct cgroup *cgrp;
8677 
8678 		/* protected by RCU */
8679 		cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
8680 		data->cgroup = cgroup_id(cgrp);
8681 		data->sample_flags |= PERF_SAMPLE_CGROUP;
8682 	}
8683 #endif
8684 
8685 	/*
8686 	 * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't
8687 	 * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
8688 	 * but the value will not dump to the userspace.
8689 	 */
8690 	if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
8691 		data->data_page_size = perf_get_page_size(data->addr);
8692 		data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
8693 	}
8694 
8695 	if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
8696 		data->code_page_size = perf_get_page_size(data->ip);
8697 		data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
8698 	}
8699 
8700 	if (filtered_sample_type & PERF_SAMPLE_AUX) {
8701 		u64 size;
8702 		u16 header_size = perf_sample_data_size(data, event);
8703 
8704 		header_size += sizeof(u64); /* size */
8705 
8706 		/*
8707 		 * Given the 16bit nature of header::size, an AUX sample can
8708 		 * easily overflow it, what with all the preceding sample bits.
8709 		 * Make sure this doesn't happen by using up to U16_MAX bytes
8710 		 * per sample in total (rounded down to 8 byte boundary).
8711 		 */
8712 		size = min_t(size_t, U16_MAX - header_size,
8713 			     event->attr.aux_sample_size);
8714 		size = rounddown(size, 8);
8715 		size = perf_prepare_sample_aux(event, data, size);
8716 
8717 		WARN_ON_ONCE(size + header_size > U16_MAX);
8718 		data->dyn_size += size + sizeof(u64); /* size above */
8719 		data->sample_flags |= PERF_SAMPLE_AUX;
8720 	}
8721 }
8722 
perf_prepare_header(struct perf_event_header * header,struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)8723 void perf_prepare_header(struct perf_event_header *header,
8724 			 struct perf_sample_data *data,
8725 			 struct perf_event *event,
8726 			 struct pt_regs *regs)
8727 {
8728 	header->type = PERF_RECORD_SAMPLE;
8729 	header->size = perf_sample_data_size(data, event);
8730 	header->misc = perf_misc_flags(event, regs);
8731 
8732 	/*
8733 	 * If you're adding more sample types here, you likely need to do
8734 	 * something about the overflowing header::size, like repurpose the
8735 	 * lowest 3 bits of size, which should be always zero at the moment.
8736 	 * This raises a more important question, do we really need 512k sized
8737 	 * samples and why, so good argumentation is in order for whatever you
8738 	 * do here next.
8739 	 */
8740 	WARN_ON_ONCE(header->size & 7);
8741 }
8742 
__perf_event_aux_pause(struct perf_event * event,bool pause)8743 static void __perf_event_aux_pause(struct perf_event *event, bool pause)
8744 {
8745 	if (pause) {
8746 		if (!event->hw.aux_paused) {
8747 			event->hw.aux_paused = 1;
8748 			event->pmu->stop(event, PERF_EF_PAUSE);
8749 		}
8750 	} else {
8751 		if (event->hw.aux_paused) {
8752 			event->hw.aux_paused = 0;
8753 			event->pmu->start(event, PERF_EF_RESUME);
8754 		}
8755 	}
8756 }
8757 
perf_event_aux_pause(struct perf_event * event,bool pause)8758 static void perf_event_aux_pause(struct perf_event *event, bool pause)
8759 {
8760 	struct perf_buffer *rb;
8761 
8762 	if (WARN_ON_ONCE(!event))
8763 		return;
8764 
8765 	rb = ring_buffer_get(event);
8766 	if (!rb)
8767 		return;
8768 
8769 	scoped_guard (irqsave) {
8770 		/*
8771 		 * Guard against self-recursion here. Another event could trip
8772 		 * this same from NMI context.
8773 		 */
8774 		if (READ_ONCE(rb->aux_in_pause_resume))
8775 			break;
8776 
8777 		WRITE_ONCE(rb->aux_in_pause_resume, 1);
8778 		barrier();
8779 		__perf_event_aux_pause(event, pause);
8780 		barrier();
8781 		WRITE_ONCE(rb->aux_in_pause_resume, 0);
8782 	}
8783 	ring_buffer_put(rb);
8784 }
8785 
8786 static __always_inline int
__perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs,int (* output_begin)(struct perf_output_handle *,struct perf_sample_data *,struct perf_event *,unsigned int))8787 __perf_event_output(struct perf_event *event,
8788 		    struct perf_sample_data *data,
8789 		    struct pt_regs *regs,
8790 		    int (*output_begin)(struct perf_output_handle *,
8791 					struct perf_sample_data *,
8792 					struct perf_event *,
8793 					unsigned int))
8794 {
8795 	struct perf_output_handle handle;
8796 	struct perf_event_header header;
8797 	int err;
8798 
8799 	/* protect the callchain buffers */
8800 	rcu_read_lock();
8801 
8802 	perf_prepare_sample(data, event, regs);
8803 	perf_prepare_header(&header, data, event, regs);
8804 
8805 	err = output_begin(&handle, data, event, header.size);
8806 	if (err)
8807 		goto exit;
8808 
8809 	perf_output_sample(&handle, &header, data, event);
8810 
8811 	perf_output_end(&handle);
8812 
8813 exit:
8814 	rcu_read_unlock();
8815 	return err;
8816 }
8817 
8818 void
perf_event_output_forward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8819 perf_event_output_forward(struct perf_event *event,
8820 			 struct perf_sample_data *data,
8821 			 struct pt_regs *regs)
8822 {
8823 	__perf_event_output(event, data, regs, perf_output_begin_forward);
8824 }
8825 
8826 void
perf_event_output_backward(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8827 perf_event_output_backward(struct perf_event *event,
8828 			   struct perf_sample_data *data,
8829 			   struct pt_regs *regs)
8830 {
8831 	__perf_event_output(event, data, regs, perf_output_begin_backward);
8832 }
8833 
8834 int
perf_event_output(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)8835 perf_event_output(struct perf_event *event,
8836 		  struct perf_sample_data *data,
8837 		  struct pt_regs *regs)
8838 {
8839 	return __perf_event_output(event, data, regs, perf_output_begin);
8840 }
8841 
8842 /*
8843  * read event_id
8844  */
8845 
8846 struct perf_read_event {
8847 	struct perf_event_header	header;
8848 
8849 	u32				pid;
8850 	u32				tid;
8851 };
8852 
8853 static void
perf_event_read_event(struct perf_event * event,struct task_struct * task)8854 perf_event_read_event(struct perf_event *event,
8855 			struct task_struct *task)
8856 {
8857 	struct perf_output_handle handle;
8858 	struct perf_sample_data sample;
8859 	struct perf_read_event read_event = {
8860 		.header = {
8861 			.type = PERF_RECORD_READ,
8862 			.misc = 0,
8863 			.size = sizeof(read_event) + event->read_size,
8864 		},
8865 		.pid = perf_event_pid(event, task),
8866 		.tid = perf_event_tid(event, task),
8867 	};
8868 	int ret;
8869 
8870 	perf_event_header__init_id(&read_event.header, &sample, event);
8871 	ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
8872 	if (ret)
8873 		return;
8874 
8875 	perf_output_put(&handle, read_event);
8876 	perf_output_read(&handle, event);
8877 	perf_event__output_id_sample(event, &handle, &sample);
8878 
8879 	perf_output_end(&handle);
8880 }
8881 
8882 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8883 
8884 static void
perf_iterate_ctx(struct perf_event_context * ctx,perf_iterate_f output,void * data,bool all)8885 perf_iterate_ctx(struct perf_event_context *ctx,
8886 		   perf_iterate_f output,
8887 		   void *data, bool all)
8888 {
8889 	struct perf_event *event;
8890 
8891 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
8892 		if (!all) {
8893 			if (event->state < PERF_EVENT_STATE_INACTIVE)
8894 				continue;
8895 			if (!event_filter_match(event))
8896 				continue;
8897 		}
8898 
8899 		output(event, data);
8900 	}
8901 }
8902 
perf_iterate_sb_cpu(perf_iterate_f output,void * data)8903 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
8904 {
8905 	struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
8906 	struct perf_event *event;
8907 
8908 	list_for_each_entry_rcu(event, &pel->list, sb_list) {
8909 		/*
8910 		 * Skip events that are not fully formed yet; ensure that
8911 		 * if we observe event->ctx, both event and ctx will be
8912 		 * complete enough. See perf_install_in_context().
8913 		 */
8914 		if (!smp_load_acquire(&event->ctx))
8915 			continue;
8916 
8917 		if (event->state < PERF_EVENT_STATE_INACTIVE)
8918 			continue;
8919 		if (!event_filter_match(event))
8920 			continue;
8921 		output(event, data);
8922 	}
8923 }
8924 
8925 /*
8926  * Iterate all events that need to receive side-band events.
8927  *
8928  * For new callers; ensure that account_pmu_sb_event() includes
8929  * your event, otherwise it might not get delivered.
8930  */
8931 static void
perf_iterate_sb(perf_iterate_f output,void * data,struct perf_event_context * task_ctx)8932 perf_iterate_sb(perf_iterate_f output, void *data,
8933 	       struct perf_event_context *task_ctx)
8934 {
8935 	struct perf_event_context *ctx;
8936 
8937 	rcu_read_lock();
8938 	preempt_disable();
8939 
8940 	/*
8941 	 * If we have task_ctx != NULL we only notify the task context itself.
8942 	 * The task_ctx is set only for EXIT events before releasing task
8943 	 * context.
8944 	 */
8945 	if (task_ctx) {
8946 		perf_iterate_ctx(task_ctx, output, data, false);
8947 		goto done;
8948 	}
8949 
8950 	perf_iterate_sb_cpu(output, data);
8951 
8952 	ctx = rcu_dereference(current->perf_event_ctxp);
8953 	if (ctx)
8954 		perf_iterate_ctx(ctx, output, data, false);
8955 done:
8956 	preempt_enable();
8957 	rcu_read_unlock();
8958 }
8959 
8960 /*
8961  * Clear all file-based filters at exec, they'll have to be
8962  * re-instated when/if these objects are mmapped again.
8963  */
perf_event_addr_filters_exec(struct perf_event * event,void * data)8964 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
8965 {
8966 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8967 	struct perf_addr_filter *filter;
8968 	unsigned int restart = 0, count = 0;
8969 	unsigned long flags;
8970 
8971 	if (!has_addr_filter(event))
8972 		return;
8973 
8974 	raw_spin_lock_irqsave(&ifh->lock, flags);
8975 	list_for_each_entry(filter, &ifh->list, entry) {
8976 		if (filter->path.dentry) {
8977 			event->addr_filter_ranges[count].start = 0;
8978 			event->addr_filter_ranges[count].size = 0;
8979 			restart++;
8980 		}
8981 
8982 		count++;
8983 	}
8984 
8985 	if (restart)
8986 		event->addr_filters_gen++;
8987 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
8988 
8989 	if (restart)
8990 		perf_event_stop(event, 1);
8991 }
8992 
perf_event_exec(void)8993 void perf_event_exec(void)
8994 {
8995 	struct perf_event_context *ctx;
8996 
8997 	ctx = perf_pin_task_context(current);
8998 	if (!ctx)
8999 		return;
9000 
9001 	perf_event_enable_on_exec(ctx);
9002 	perf_event_remove_on_exec(ctx);
9003 	scoped_guard(rcu)
9004 		perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
9005 
9006 	perf_unpin_context(ctx);
9007 	put_ctx(ctx);
9008 }
9009 
9010 struct remote_output {
9011 	struct perf_buffer	*rb;
9012 	int			err;
9013 };
9014 
__perf_event_output_stop(struct perf_event * event,void * data)9015 static void __perf_event_output_stop(struct perf_event *event, void *data)
9016 {
9017 	struct perf_event *parent = event->parent;
9018 	struct remote_output *ro = data;
9019 	struct perf_buffer *rb = ro->rb;
9020 	struct stop_event_data sd = {
9021 		.event	= event,
9022 	};
9023 
9024 	if (!has_aux(event))
9025 		return;
9026 
9027 	if (!parent)
9028 		parent = event;
9029 
9030 	/*
9031 	 * In case of inheritance, it will be the parent that links to the
9032 	 * ring-buffer, but it will be the child that's actually using it.
9033 	 *
9034 	 * We are using event::rb to determine if the event should be stopped,
9035 	 * however this may race with ring_buffer_attach() (through set_output),
9036 	 * which will make us skip the event that actually needs to be stopped.
9037 	 * So ring_buffer_attach() has to stop an aux event before re-assigning
9038 	 * its rb pointer.
9039 	 */
9040 	if (rcu_dereference(parent->rb) == rb)
9041 		ro->err = __perf_event_stop(&sd);
9042 }
9043 
__perf_pmu_output_stop(void * info)9044 static int __perf_pmu_output_stop(void *info)
9045 {
9046 	struct perf_event *event = info;
9047 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
9048 	struct remote_output ro = {
9049 		.rb	= event->rb,
9050 	};
9051 
9052 	rcu_read_lock();
9053 	perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
9054 	if (cpuctx->task_ctx)
9055 		perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
9056 				   &ro, false);
9057 	rcu_read_unlock();
9058 
9059 	return ro.err;
9060 }
9061 
perf_pmu_output_stop(struct perf_event * event)9062 static void perf_pmu_output_stop(struct perf_event *event)
9063 {
9064 	struct perf_event *iter;
9065 	int err, cpu;
9066 
9067 restart:
9068 	rcu_read_lock();
9069 	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
9070 		/*
9071 		 * For per-CPU events, we need to make sure that neither they
9072 		 * nor their children are running; for cpu==-1 events it's
9073 		 * sufficient to stop the event itself if it's active, since
9074 		 * it can't have children.
9075 		 */
9076 		cpu = iter->cpu;
9077 		if (cpu == -1)
9078 			cpu = READ_ONCE(iter->oncpu);
9079 
9080 		if (cpu == -1)
9081 			continue;
9082 
9083 		err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
9084 		if (err == -EAGAIN) {
9085 			rcu_read_unlock();
9086 			goto restart;
9087 		}
9088 	}
9089 	rcu_read_unlock();
9090 }
9091 
9092 /*
9093  * task tracking -- fork/exit
9094  *
9095  * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
9096  */
9097 
9098 struct perf_task_event {
9099 	struct task_struct		*task;
9100 	struct perf_event_context	*task_ctx;
9101 
9102 	struct {
9103 		struct perf_event_header	header;
9104 
9105 		u32				pid;
9106 		u32				ppid;
9107 		u32				tid;
9108 		u32				ptid;
9109 		u64				time;
9110 	} event_id;
9111 };
9112 
perf_event_task_match(struct perf_event * event)9113 static int perf_event_task_match(struct perf_event *event)
9114 {
9115 	return event->attr.comm  || event->attr.mmap ||
9116 	       event->attr.mmap2 || event->attr.mmap_data ||
9117 	       event->attr.task;
9118 }
9119 
perf_event_task_output(struct perf_event * event,void * data)9120 static void perf_event_task_output(struct perf_event *event,
9121 				   void *data)
9122 {
9123 	struct perf_task_event *task_event = data;
9124 	struct perf_output_handle handle;
9125 	struct perf_sample_data	sample;
9126 	struct task_struct *task = task_event->task;
9127 	int ret, size = task_event->event_id.header.size;
9128 
9129 	if (!perf_event_task_match(event))
9130 		return;
9131 
9132 	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
9133 
9134 	ret = perf_output_begin(&handle, &sample, event,
9135 				task_event->event_id.header.size);
9136 	if (ret)
9137 		goto out;
9138 
9139 	task_event->event_id.pid = perf_event_pid(event, task);
9140 	task_event->event_id.tid = perf_event_tid(event, task);
9141 
9142 	if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
9143 		task_event->event_id.ppid = perf_event_pid(event,
9144 							task->real_parent);
9145 		task_event->event_id.ptid = perf_event_pid(event,
9146 							task->real_parent);
9147 	} else {  /* PERF_RECORD_FORK */
9148 		task_event->event_id.ppid = perf_event_pid(event, current);
9149 		task_event->event_id.ptid = perf_event_tid(event, current);
9150 	}
9151 
9152 	task_event->event_id.time = perf_event_clock(event);
9153 
9154 	perf_output_put(&handle, task_event->event_id);
9155 
9156 	perf_event__output_id_sample(event, &handle, &sample);
9157 
9158 	perf_output_end(&handle);
9159 out:
9160 	task_event->event_id.header.size = size;
9161 }
9162 
perf_event_task(struct task_struct * task,struct perf_event_context * task_ctx,int new)9163 static void perf_event_task(struct task_struct *task,
9164 			      struct perf_event_context *task_ctx,
9165 			      int new)
9166 {
9167 	struct perf_task_event task_event;
9168 
9169 	if (!atomic_read(&nr_comm_events) &&
9170 	    !atomic_read(&nr_mmap_events) &&
9171 	    !atomic_read(&nr_task_events))
9172 		return;
9173 
9174 	task_event = (struct perf_task_event){
9175 		.task	  = task,
9176 		.task_ctx = task_ctx,
9177 		.event_id    = {
9178 			.header = {
9179 				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
9180 				.misc = 0,
9181 				.size = sizeof(task_event.event_id),
9182 			},
9183 			/* .pid  */
9184 			/* .ppid */
9185 			/* .tid  */
9186 			/* .ptid */
9187 			/* .time */
9188 		},
9189 	};
9190 
9191 	perf_iterate_sb(perf_event_task_output,
9192 		       &task_event,
9193 		       task_ctx);
9194 }
9195 
9196 /*
9197  * Allocate data for a new task when profiling system-wide
9198  * events which require PMU specific data
9199  */
9200 static void
perf_event_alloc_task_data(struct task_struct * child,struct task_struct * parent)9201 perf_event_alloc_task_data(struct task_struct *child,
9202 			   struct task_struct *parent)
9203 {
9204 	struct kmem_cache *ctx_cache = NULL;
9205 	struct perf_ctx_data *cd;
9206 
9207 	if (!refcount_read(&global_ctx_data_ref))
9208 		return;
9209 
9210 	scoped_guard (rcu) {
9211 		cd = rcu_dereference(parent->perf_ctx_data);
9212 		if (cd)
9213 			ctx_cache = cd->ctx_cache;
9214 	}
9215 
9216 	if (!ctx_cache)
9217 		return;
9218 
9219 	guard(percpu_read)(&global_ctx_data_rwsem);
9220 	scoped_guard (rcu) {
9221 		cd = rcu_dereference(child->perf_ctx_data);
9222 		if (!cd) {
9223 			/*
9224 			 * A system-wide event may be unaccount,
9225 			 * when attaching the perf_ctx_data.
9226 			 */
9227 			if (!refcount_read(&global_ctx_data_ref))
9228 				return;
9229 			goto attach;
9230 		}
9231 
9232 		if (!cd->global) {
9233 			cd->global = 1;
9234 			refcount_inc(&cd->refcount);
9235 		}
9236 	}
9237 
9238 	return;
9239 attach:
9240 	attach_task_ctx_data(child, ctx_cache, true, GFP_KERNEL);
9241 }
9242 
perf_event_fork(struct task_struct * task)9243 void perf_event_fork(struct task_struct *task)
9244 {
9245 	perf_event_task(task, NULL, 1);
9246 	perf_event_namespaces(task);
9247 	perf_event_alloc_task_data(task, current);
9248 }
9249 
9250 /*
9251  * comm tracking
9252  */
9253 
9254 struct perf_comm_event {
9255 	struct task_struct	*task;
9256 	char			*comm;
9257 	int			comm_size;
9258 
9259 	struct {
9260 		struct perf_event_header	header;
9261 
9262 		u32				pid;
9263 		u32				tid;
9264 	} event_id;
9265 };
9266 
perf_event_comm_match(struct perf_event * event)9267 static int perf_event_comm_match(struct perf_event *event)
9268 {
9269 	return event->attr.comm;
9270 }
9271 
perf_event_comm_output(struct perf_event * event,void * data)9272 static void perf_event_comm_output(struct perf_event *event,
9273 				   void *data)
9274 {
9275 	struct perf_comm_event *comm_event = data;
9276 	struct perf_output_handle handle;
9277 	struct perf_sample_data sample;
9278 	int size = comm_event->event_id.header.size;
9279 	int ret;
9280 
9281 	if (!perf_event_comm_match(event))
9282 		return;
9283 
9284 	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
9285 	ret = perf_output_begin(&handle, &sample, event,
9286 				comm_event->event_id.header.size);
9287 
9288 	if (ret)
9289 		goto out;
9290 
9291 	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
9292 	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
9293 
9294 	perf_output_put(&handle, comm_event->event_id);
9295 	__output_copy(&handle, comm_event->comm,
9296 				   comm_event->comm_size);
9297 
9298 	perf_event__output_id_sample(event, &handle, &sample);
9299 
9300 	perf_output_end(&handle);
9301 out:
9302 	comm_event->event_id.header.size = size;
9303 }
9304 
perf_event_comm_event(struct perf_comm_event * comm_event)9305 static void perf_event_comm_event(struct perf_comm_event *comm_event)
9306 {
9307 	char comm[TASK_COMM_LEN];
9308 	unsigned int size;
9309 
9310 	memset(comm, 0, sizeof(comm));
9311 	strscpy(comm, comm_event->task->comm);
9312 	size = ALIGN(strlen(comm)+1, sizeof(u64));
9313 
9314 	comm_event->comm = comm;
9315 	comm_event->comm_size = size;
9316 
9317 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
9318 
9319 	perf_iterate_sb(perf_event_comm_output,
9320 		       comm_event,
9321 		       NULL);
9322 }
9323 
perf_event_comm(struct task_struct * task,bool exec)9324 void perf_event_comm(struct task_struct *task, bool exec)
9325 {
9326 	struct perf_comm_event comm_event;
9327 
9328 	if (!atomic_read(&nr_comm_events))
9329 		return;
9330 
9331 	comm_event = (struct perf_comm_event){
9332 		.task	= task,
9333 		/* .comm      */
9334 		/* .comm_size */
9335 		.event_id  = {
9336 			.header = {
9337 				.type = PERF_RECORD_COMM,
9338 				.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
9339 				/* .size */
9340 			},
9341 			/* .pid */
9342 			/* .tid */
9343 		},
9344 	};
9345 
9346 	perf_event_comm_event(&comm_event);
9347 }
9348 
9349 /*
9350  * namespaces tracking
9351  */
9352 
9353 struct perf_namespaces_event {
9354 	struct task_struct		*task;
9355 
9356 	struct {
9357 		struct perf_event_header	header;
9358 
9359 		u32				pid;
9360 		u32				tid;
9361 		u64				nr_namespaces;
9362 		struct perf_ns_link_info	link_info[NR_NAMESPACES];
9363 	} event_id;
9364 };
9365 
perf_event_namespaces_match(struct perf_event * event)9366 static int perf_event_namespaces_match(struct perf_event *event)
9367 {
9368 	return event->attr.namespaces;
9369 }
9370 
perf_event_namespaces_output(struct perf_event * event,void * data)9371 static void perf_event_namespaces_output(struct perf_event *event,
9372 					 void *data)
9373 {
9374 	struct perf_namespaces_event *namespaces_event = data;
9375 	struct perf_output_handle handle;
9376 	struct perf_sample_data sample;
9377 	u16 header_size = namespaces_event->event_id.header.size;
9378 	int ret;
9379 
9380 	if (!perf_event_namespaces_match(event))
9381 		return;
9382 
9383 	perf_event_header__init_id(&namespaces_event->event_id.header,
9384 				   &sample, event);
9385 	ret = perf_output_begin(&handle, &sample, event,
9386 				namespaces_event->event_id.header.size);
9387 	if (ret)
9388 		goto out;
9389 
9390 	namespaces_event->event_id.pid = perf_event_pid(event,
9391 							namespaces_event->task);
9392 	namespaces_event->event_id.tid = perf_event_tid(event,
9393 							namespaces_event->task);
9394 
9395 	perf_output_put(&handle, namespaces_event->event_id);
9396 
9397 	perf_event__output_id_sample(event, &handle, &sample);
9398 
9399 	perf_output_end(&handle);
9400 out:
9401 	namespaces_event->event_id.header.size = header_size;
9402 }
9403 
perf_fill_ns_link_info(struct perf_ns_link_info * ns_link_info,struct task_struct * task,const struct proc_ns_operations * ns_ops)9404 static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
9405 				   struct task_struct *task,
9406 				   const struct proc_ns_operations *ns_ops)
9407 {
9408 	struct path ns_path;
9409 	struct inode *ns_inode;
9410 	int error;
9411 
9412 	error = ns_get_path(&ns_path, task, ns_ops);
9413 	if (!error) {
9414 		ns_inode = ns_path.dentry->d_inode;
9415 		ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
9416 		ns_link_info->ino = ns_inode->i_ino;
9417 		path_put(&ns_path);
9418 	}
9419 }
9420 
perf_event_namespaces(struct task_struct * task)9421 void perf_event_namespaces(struct task_struct *task)
9422 {
9423 	struct perf_namespaces_event namespaces_event;
9424 	struct perf_ns_link_info *ns_link_info;
9425 
9426 	if (!atomic_read(&nr_namespaces_events))
9427 		return;
9428 
9429 	namespaces_event = (struct perf_namespaces_event){
9430 		.task	= task,
9431 		.event_id  = {
9432 			.header = {
9433 				.type = PERF_RECORD_NAMESPACES,
9434 				.misc = 0,
9435 				.size = sizeof(namespaces_event.event_id),
9436 			},
9437 			/* .pid */
9438 			/* .tid */
9439 			.nr_namespaces = NR_NAMESPACES,
9440 			/* .link_info[NR_NAMESPACES] */
9441 		},
9442 	};
9443 
9444 	ns_link_info = namespaces_event.event_id.link_info;
9445 
9446 	perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
9447 			       task, &mntns_operations);
9448 
9449 #ifdef CONFIG_USER_NS
9450 	perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
9451 			       task, &userns_operations);
9452 #endif
9453 #ifdef CONFIG_NET_NS
9454 	perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
9455 			       task, &netns_operations);
9456 #endif
9457 #ifdef CONFIG_UTS_NS
9458 	perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
9459 			       task, &utsns_operations);
9460 #endif
9461 #ifdef CONFIG_IPC_NS
9462 	perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
9463 			       task, &ipcns_operations);
9464 #endif
9465 #ifdef CONFIG_PID_NS
9466 	perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
9467 			       task, &pidns_operations);
9468 #endif
9469 #ifdef CONFIG_CGROUPS
9470 	perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
9471 			       task, &cgroupns_operations);
9472 #endif
9473 
9474 	perf_iterate_sb(perf_event_namespaces_output,
9475 			&namespaces_event,
9476 			NULL);
9477 }
9478 
9479 /*
9480  * cgroup tracking
9481  */
9482 #ifdef CONFIG_CGROUP_PERF
9483 
9484 struct perf_cgroup_event {
9485 	char				*path;
9486 	int				path_size;
9487 	struct {
9488 		struct perf_event_header	header;
9489 		u64				id;
9490 		char				path[];
9491 	} event_id;
9492 };
9493 
perf_event_cgroup_match(struct perf_event * event)9494 static int perf_event_cgroup_match(struct perf_event *event)
9495 {
9496 	return event->attr.cgroup;
9497 }
9498 
perf_event_cgroup_output(struct perf_event * event,void * data)9499 static void perf_event_cgroup_output(struct perf_event *event, void *data)
9500 {
9501 	struct perf_cgroup_event *cgroup_event = data;
9502 	struct perf_output_handle handle;
9503 	struct perf_sample_data sample;
9504 	u16 header_size = cgroup_event->event_id.header.size;
9505 	int ret;
9506 
9507 	if (!perf_event_cgroup_match(event))
9508 		return;
9509 
9510 	perf_event_header__init_id(&cgroup_event->event_id.header,
9511 				   &sample, event);
9512 	ret = perf_output_begin(&handle, &sample, event,
9513 				cgroup_event->event_id.header.size);
9514 	if (ret)
9515 		goto out;
9516 
9517 	perf_output_put(&handle, cgroup_event->event_id);
9518 	__output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
9519 
9520 	perf_event__output_id_sample(event, &handle, &sample);
9521 
9522 	perf_output_end(&handle);
9523 out:
9524 	cgroup_event->event_id.header.size = header_size;
9525 }
9526 
perf_event_cgroup(struct cgroup * cgrp)9527 static void perf_event_cgroup(struct cgroup *cgrp)
9528 {
9529 	struct perf_cgroup_event cgroup_event;
9530 	char path_enomem[16] = "//enomem";
9531 	char *pathname;
9532 	size_t size;
9533 
9534 	if (!atomic_read(&nr_cgroup_events))
9535 		return;
9536 
9537 	cgroup_event = (struct perf_cgroup_event){
9538 		.event_id  = {
9539 			.header = {
9540 				.type = PERF_RECORD_CGROUP,
9541 				.misc = 0,
9542 				.size = sizeof(cgroup_event.event_id),
9543 			},
9544 			.id = cgroup_id(cgrp),
9545 		},
9546 	};
9547 
9548 	pathname = kmalloc(PATH_MAX, GFP_KERNEL);
9549 	if (pathname == NULL) {
9550 		cgroup_event.path = path_enomem;
9551 	} else {
9552 		/* just to be sure to have enough space for alignment */
9553 		cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
9554 		cgroup_event.path = pathname;
9555 	}
9556 
9557 	/*
9558 	 * Since our buffer works in 8 byte units we need to align our string
9559 	 * size to a multiple of 8. However, we must guarantee the tail end is
9560 	 * zero'd out to avoid leaking random bits to userspace.
9561 	 */
9562 	size = strlen(cgroup_event.path) + 1;
9563 	while (!IS_ALIGNED(size, sizeof(u64)))
9564 		cgroup_event.path[size++] = '\0';
9565 
9566 	cgroup_event.event_id.header.size += size;
9567 	cgroup_event.path_size = size;
9568 
9569 	perf_iterate_sb(perf_event_cgroup_output,
9570 			&cgroup_event,
9571 			NULL);
9572 
9573 	kfree(pathname);
9574 }
9575 
9576 #endif
9577 
9578 /*
9579  * mmap tracking
9580  */
9581 
9582 struct perf_mmap_event {
9583 	struct vm_area_struct	*vma;
9584 
9585 	const char		*file_name;
9586 	int			file_size;
9587 	int			maj, min;
9588 	u64			ino;
9589 	u64			ino_generation;
9590 	u32			prot, flags;
9591 	u8			build_id[BUILD_ID_SIZE_MAX];
9592 	u32			build_id_size;
9593 
9594 	struct {
9595 		struct perf_event_header	header;
9596 
9597 		u32				pid;
9598 		u32				tid;
9599 		u64				start;
9600 		u64				len;
9601 		u64				pgoff;
9602 	} event_id;
9603 };
9604 
perf_event_mmap_match(struct perf_event * event,void * data)9605 static int perf_event_mmap_match(struct perf_event *event,
9606 				 void *data)
9607 {
9608 	struct perf_mmap_event *mmap_event = data;
9609 	struct vm_area_struct *vma = mmap_event->vma;
9610 	int executable = vma->vm_flags & VM_EXEC;
9611 
9612 	return (!executable && event->attr.mmap_data) ||
9613 	       (executable && (event->attr.mmap || event->attr.mmap2));
9614 }
9615 
perf_event_mmap_output(struct perf_event * event,void * data)9616 static void perf_event_mmap_output(struct perf_event *event,
9617 				   void *data)
9618 {
9619 	struct perf_mmap_event *mmap_event = data;
9620 	struct perf_output_handle handle;
9621 	struct perf_sample_data sample;
9622 	int size = mmap_event->event_id.header.size;
9623 	u32 type = mmap_event->event_id.header.type;
9624 	bool use_build_id;
9625 	int ret;
9626 
9627 	if (!perf_event_mmap_match(event, data))
9628 		return;
9629 
9630 	if (event->attr.mmap2) {
9631 		mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
9632 		mmap_event->event_id.header.size += sizeof(mmap_event->maj);
9633 		mmap_event->event_id.header.size += sizeof(mmap_event->min);
9634 		mmap_event->event_id.header.size += sizeof(mmap_event->ino);
9635 		mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
9636 		mmap_event->event_id.header.size += sizeof(mmap_event->prot);
9637 		mmap_event->event_id.header.size += sizeof(mmap_event->flags);
9638 	}
9639 
9640 	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
9641 	ret = perf_output_begin(&handle, &sample, event,
9642 				mmap_event->event_id.header.size);
9643 	if (ret)
9644 		goto out;
9645 
9646 	mmap_event->event_id.pid = perf_event_pid(event, current);
9647 	mmap_event->event_id.tid = perf_event_tid(event, current);
9648 
9649 	use_build_id = event->attr.build_id && mmap_event->build_id_size;
9650 
9651 	if (event->attr.mmap2 && use_build_id)
9652 		mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
9653 
9654 	perf_output_put(&handle, mmap_event->event_id);
9655 
9656 	if (event->attr.mmap2) {
9657 		if (use_build_id) {
9658 			u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 };
9659 
9660 			__output_copy(&handle, size, 4);
9661 			__output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX);
9662 		} else {
9663 			perf_output_put(&handle, mmap_event->maj);
9664 			perf_output_put(&handle, mmap_event->min);
9665 			perf_output_put(&handle, mmap_event->ino);
9666 			perf_output_put(&handle, mmap_event->ino_generation);
9667 		}
9668 		perf_output_put(&handle, mmap_event->prot);
9669 		perf_output_put(&handle, mmap_event->flags);
9670 	}
9671 
9672 	__output_copy(&handle, mmap_event->file_name,
9673 				   mmap_event->file_size);
9674 
9675 	perf_event__output_id_sample(event, &handle, &sample);
9676 
9677 	perf_output_end(&handle);
9678 out:
9679 	mmap_event->event_id.header.size = size;
9680 	mmap_event->event_id.header.type = type;
9681 }
9682 
perf_event_mmap_event(struct perf_mmap_event * mmap_event)9683 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
9684 {
9685 	struct vm_area_struct *vma = mmap_event->vma;
9686 	struct file *file = vma->vm_file;
9687 	int maj = 0, min = 0;
9688 	u64 ino = 0, gen = 0;
9689 	u32 prot = 0, flags = 0;
9690 	unsigned int size;
9691 	char tmp[16];
9692 	char *buf = NULL;
9693 	char *name = NULL;
9694 
9695 	if (vma->vm_flags & VM_READ)
9696 		prot |= PROT_READ;
9697 	if (vma->vm_flags & VM_WRITE)
9698 		prot |= PROT_WRITE;
9699 	if (vma->vm_flags & VM_EXEC)
9700 		prot |= PROT_EXEC;
9701 
9702 	if (vma->vm_flags & VM_MAYSHARE)
9703 		flags = MAP_SHARED;
9704 	else
9705 		flags = MAP_PRIVATE;
9706 
9707 	if (vma->vm_flags & VM_LOCKED)
9708 		flags |= MAP_LOCKED;
9709 	if (is_vm_hugetlb_page(vma))
9710 		flags |= MAP_HUGETLB;
9711 
9712 	if (file) {
9713 		const struct inode *inode;
9714 		dev_t dev;
9715 
9716 		buf = kmalloc(PATH_MAX, GFP_KERNEL);
9717 		if (!buf) {
9718 			name = "//enomem";
9719 			goto cpy_name;
9720 		}
9721 		/*
9722 		 * d_path() works from the end of the rb backwards, so we
9723 		 * need to add enough zero bytes after the string to handle
9724 		 * the 64bit alignment we do later.
9725 		 */
9726 		name = d_path(file_user_path(file), buf, PATH_MAX - sizeof(u64));
9727 		if (IS_ERR(name)) {
9728 			name = "//toolong";
9729 			goto cpy_name;
9730 		}
9731 		inode = file_user_inode(vma->vm_file);
9732 		dev = inode->i_sb->s_dev;
9733 		ino = inode->i_ino;
9734 		gen = inode->i_generation;
9735 		maj = MAJOR(dev);
9736 		min = MINOR(dev);
9737 
9738 		goto got_name;
9739 	} else {
9740 		if (vma->vm_ops && vma->vm_ops->name)
9741 			name = (char *) vma->vm_ops->name(vma);
9742 		if (!name)
9743 			name = (char *)arch_vma_name(vma);
9744 		if (!name) {
9745 			if (vma_is_initial_heap(vma))
9746 				name = "[heap]";
9747 			else if (vma_is_initial_stack(vma))
9748 				name = "[stack]";
9749 			else
9750 				name = "//anon";
9751 		}
9752 	}
9753 
9754 cpy_name:
9755 	strscpy(tmp, name);
9756 	name = tmp;
9757 got_name:
9758 	/*
9759 	 * Since our buffer works in 8 byte units we need to align our string
9760 	 * size to a multiple of 8. However, we must guarantee the tail end is
9761 	 * zero'd out to avoid leaking random bits to userspace.
9762 	 */
9763 	size = strlen(name)+1;
9764 	while (!IS_ALIGNED(size, sizeof(u64)))
9765 		name[size++] = '\0';
9766 
9767 	mmap_event->file_name = name;
9768 	mmap_event->file_size = size;
9769 	mmap_event->maj = maj;
9770 	mmap_event->min = min;
9771 	mmap_event->ino = ino;
9772 	mmap_event->ino_generation = gen;
9773 	mmap_event->prot = prot;
9774 	mmap_event->flags = flags;
9775 
9776 	if (!(vma->vm_flags & VM_EXEC))
9777 		mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
9778 
9779 	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
9780 
9781 	if (atomic_read(&nr_build_id_events))
9782 		build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size);
9783 
9784 	perf_iterate_sb(perf_event_mmap_output,
9785 		       mmap_event,
9786 		       NULL);
9787 
9788 	kfree(buf);
9789 }
9790 
9791 /*
9792  * Check whether inode and address range match filter criteria.
9793  */
perf_addr_filter_match(struct perf_addr_filter * filter,struct file * file,unsigned long offset,unsigned long size)9794 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
9795 				     struct file *file, unsigned long offset,
9796 				     unsigned long size)
9797 {
9798 	/* d_inode(NULL) won't be equal to any mapped user-space file */
9799 	if (!filter->path.dentry)
9800 		return false;
9801 
9802 	if (d_inode(filter->path.dentry) != file_user_inode(file))
9803 		return false;
9804 
9805 	if (filter->offset > offset + size)
9806 		return false;
9807 
9808 	if (filter->offset + filter->size < offset)
9809 		return false;
9810 
9811 	return true;
9812 }
9813 
perf_addr_filter_vma_adjust(struct perf_addr_filter * filter,struct vm_area_struct * vma,struct perf_addr_filter_range * fr)9814 static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
9815 					struct vm_area_struct *vma,
9816 					struct perf_addr_filter_range *fr)
9817 {
9818 	unsigned long vma_size = vma->vm_end - vma->vm_start;
9819 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
9820 	struct file *file = vma->vm_file;
9821 
9822 	if (!perf_addr_filter_match(filter, file, off, vma_size))
9823 		return false;
9824 
9825 	if (filter->offset < off) {
9826 		fr->start = vma->vm_start;
9827 		fr->size = min(vma_size, filter->size - (off - filter->offset));
9828 	} else {
9829 		fr->start = vma->vm_start + filter->offset - off;
9830 		fr->size = min(vma->vm_end - fr->start, filter->size);
9831 	}
9832 
9833 	return true;
9834 }
9835 
__perf_addr_filters_adjust(struct perf_event * event,void * data)9836 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
9837 {
9838 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
9839 	struct vm_area_struct *vma = data;
9840 	struct perf_addr_filter *filter;
9841 	unsigned int restart = 0, count = 0;
9842 	unsigned long flags;
9843 
9844 	if (!has_addr_filter(event))
9845 		return;
9846 
9847 	if (!vma->vm_file)
9848 		return;
9849 
9850 	raw_spin_lock_irqsave(&ifh->lock, flags);
9851 	list_for_each_entry(filter, &ifh->list, entry) {
9852 		if (perf_addr_filter_vma_adjust(filter, vma,
9853 						&event->addr_filter_ranges[count]))
9854 			restart++;
9855 
9856 		count++;
9857 	}
9858 
9859 	if (restart)
9860 		event->addr_filters_gen++;
9861 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
9862 
9863 	if (restart)
9864 		perf_event_stop(event, 1);
9865 }
9866 
9867 /*
9868  * Adjust all task's events' filters to the new vma
9869  */
perf_addr_filters_adjust(struct vm_area_struct * vma)9870 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
9871 {
9872 	struct perf_event_context *ctx;
9873 
9874 	/*
9875 	 * Data tracing isn't supported yet and as such there is no need
9876 	 * to keep track of anything that isn't related to executable code:
9877 	 */
9878 	if (!(vma->vm_flags & VM_EXEC))
9879 		return;
9880 
9881 	rcu_read_lock();
9882 	ctx = rcu_dereference(current->perf_event_ctxp);
9883 	if (ctx)
9884 		perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
9885 	rcu_read_unlock();
9886 }
9887 
perf_event_mmap(struct vm_area_struct * vma)9888 void perf_event_mmap(struct vm_area_struct *vma)
9889 {
9890 	struct perf_mmap_event mmap_event;
9891 
9892 	if (!atomic_read(&nr_mmap_events))
9893 		return;
9894 
9895 	mmap_event = (struct perf_mmap_event){
9896 		.vma	= vma,
9897 		/* .file_name */
9898 		/* .file_size */
9899 		.event_id  = {
9900 			.header = {
9901 				.type = PERF_RECORD_MMAP,
9902 				.misc = PERF_RECORD_MISC_USER,
9903 				/* .size */
9904 			},
9905 			/* .pid */
9906 			/* .tid */
9907 			.start  = vma->vm_start,
9908 			.len    = vma->vm_end - vma->vm_start,
9909 			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
9910 		},
9911 		/* .maj (attr_mmap2 only) */
9912 		/* .min (attr_mmap2 only) */
9913 		/* .ino (attr_mmap2 only) */
9914 		/* .ino_generation (attr_mmap2 only) */
9915 		/* .prot (attr_mmap2 only) */
9916 		/* .flags (attr_mmap2 only) */
9917 	};
9918 
9919 	perf_addr_filters_adjust(vma);
9920 	perf_event_mmap_event(&mmap_event);
9921 }
9922 
perf_event_aux_event(struct perf_event * event,unsigned long head,unsigned long size,u64 flags)9923 void perf_event_aux_event(struct perf_event *event, unsigned long head,
9924 			  unsigned long size, u64 flags)
9925 {
9926 	struct perf_output_handle handle;
9927 	struct perf_sample_data sample;
9928 	struct perf_aux_event {
9929 		struct perf_event_header	header;
9930 		u64				offset;
9931 		u64				size;
9932 		u64				flags;
9933 	} rec = {
9934 		.header = {
9935 			.type = PERF_RECORD_AUX,
9936 			.misc = 0,
9937 			.size = sizeof(rec),
9938 		},
9939 		.offset		= head,
9940 		.size		= size,
9941 		.flags		= flags,
9942 	};
9943 	int ret;
9944 
9945 	perf_event_header__init_id(&rec.header, &sample, event);
9946 	ret = perf_output_begin(&handle, &sample, event, rec.header.size);
9947 
9948 	if (ret)
9949 		return;
9950 
9951 	perf_output_put(&handle, rec);
9952 	perf_event__output_id_sample(event, &handle, &sample);
9953 
9954 	perf_output_end(&handle);
9955 }
9956 
9957 /*
9958  * Lost/dropped samples logging
9959  */
perf_log_lost_samples(struct perf_event * event,u64 lost)9960 void perf_log_lost_samples(struct perf_event *event, u64 lost)
9961 {
9962 	struct perf_output_handle handle;
9963 	struct perf_sample_data sample;
9964 	int ret;
9965 
9966 	struct {
9967 		struct perf_event_header	header;
9968 		u64				lost;
9969 	} lost_samples_event = {
9970 		.header = {
9971 			.type = PERF_RECORD_LOST_SAMPLES,
9972 			.misc = 0,
9973 			.size = sizeof(lost_samples_event),
9974 		},
9975 		.lost		= lost,
9976 	};
9977 
9978 	perf_event_header__init_id(&lost_samples_event.header, &sample, event);
9979 
9980 	ret = perf_output_begin(&handle, &sample, event,
9981 				lost_samples_event.header.size);
9982 	if (ret)
9983 		return;
9984 
9985 	perf_output_put(&handle, lost_samples_event);
9986 	perf_event__output_id_sample(event, &handle, &sample);
9987 	perf_output_end(&handle);
9988 }
9989 
9990 /*
9991  * context_switch tracking
9992  */
9993 
9994 struct perf_switch_event {
9995 	struct task_struct	*task;
9996 	struct task_struct	*next_prev;
9997 
9998 	struct {
9999 		struct perf_event_header	header;
10000 		u32				next_prev_pid;
10001 		u32				next_prev_tid;
10002 	} event_id;
10003 };
10004 
perf_event_switch_match(struct perf_event * event)10005 static int perf_event_switch_match(struct perf_event *event)
10006 {
10007 	return event->attr.context_switch;
10008 }
10009 
perf_event_switch_output(struct perf_event * event,void * data)10010 static void perf_event_switch_output(struct perf_event *event, void *data)
10011 {
10012 	struct perf_switch_event *se = data;
10013 	struct perf_output_handle handle;
10014 	struct perf_sample_data sample;
10015 	int ret;
10016 
10017 	if (!perf_event_switch_match(event))
10018 		return;
10019 
10020 	/* Only CPU-wide events are allowed to see next/prev pid/tid */
10021 	if (event->ctx->task) {
10022 		se->event_id.header.type = PERF_RECORD_SWITCH;
10023 		se->event_id.header.size = sizeof(se->event_id.header);
10024 	} else {
10025 		se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
10026 		se->event_id.header.size = sizeof(se->event_id);
10027 		se->event_id.next_prev_pid =
10028 					perf_event_pid(event, se->next_prev);
10029 		se->event_id.next_prev_tid =
10030 					perf_event_tid(event, se->next_prev);
10031 	}
10032 
10033 	perf_event_header__init_id(&se->event_id.header, &sample, event);
10034 
10035 	ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
10036 	if (ret)
10037 		return;
10038 
10039 	if (event->ctx->task)
10040 		perf_output_put(&handle, se->event_id.header);
10041 	else
10042 		perf_output_put(&handle, se->event_id);
10043 
10044 	perf_event__output_id_sample(event, &handle, &sample);
10045 
10046 	perf_output_end(&handle);
10047 }
10048 
perf_event_switch(struct task_struct * task,struct task_struct * next_prev,bool sched_in)10049 static void perf_event_switch(struct task_struct *task,
10050 			      struct task_struct *next_prev, bool sched_in)
10051 {
10052 	struct perf_switch_event switch_event;
10053 
10054 	/* N.B. caller checks nr_switch_events != 0 */
10055 
10056 	switch_event = (struct perf_switch_event){
10057 		.task		= task,
10058 		.next_prev	= next_prev,
10059 		.event_id	= {
10060 			.header = {
10061 				/* .type */
10062 				.misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
10063 				/* .size */
10064 			},
10065 			/* .next_prev_pid */
10066 			/* .next_prev_tid */
10067 		},
10068 	};
10069 
10070 	if (!sched_in && task_is_runnable(task)) {
10071 		switch_event.event_id.header.misc |=
10072 				PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
10073 	}
10074 
10075 	perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
10076 }
10077 
10078 /*
10079  * IRQ throttle logging
10080  */
10081 
perf_log_throttle(struct perf_event * event,int enable)10082 static void perf_log_throttle(struct perf_event *event, int enable)
10083 {
10084 	struct perf_output_handle handle;
10085 	struct perf_sample_data sample;
10086 	int ret;
10087 
10088 	struct {
10089 		struct perf_event_header	header;
10090 		u64				time;
10091 		u64				id;
10092 		u64				stream_id;
10093 	} throttle_event = {
10094 		.header = {
10095 			.type = PERF_RECORD_THROTTLE,
10096 			.misc = 0,
10097 			.size = sizeof(throttle_event),
10098 		},
10099 		.time		= perf_event_clock(event),
10100 		.id		= primary_event_id(event),
10101 		.stream_id	= event->id,
10102 	};
10103 
10104 	if (enable)
10105 		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
10106 
10107 	perf_event_header__init_id(&throttle_event.header, &sample, event);
10108 
10109 	ret = perf_output_begin(&handle, &sample, event,
10110 				throttle_event.header.size);
10111 	if (ret)
10112 		return;
10113 
10114 	perf_output_put(&handle, throttle_event);
10115 	perf_event__output_id_sample(event, &handle, &sample);
10116 	perf_output_end(&handle);
10117 }
10118 
10119 /*
10120  * ksymbol register/unregister tracking
10121  */
10122 
10123 struct perf_ksymbol_event {
10124 	const char	*name;
10125 	int		name_len;
10126 	struct {
10127 		struct perf_event_header        header;
10128 		u64				addr;
10129 		u32				len;
10130 		u16				ksym_type;
10131 		u16				flags;
10132 	} event_id;
10133 };
10134 
perf_event_ksymbol_match(struct perf_event * event)10135 static int perf_event_ksymbol_match(struct perf_event *event)
10136 {
10137 	return event->attr.ksymbol;
10138 }
10139 
perf_event_ksymbol_output(struct perf_event * event,void * data)10140 static void perf_event_ksymbol_output(struct perf_event *event, void *data)
10141 {
10142 	struct perf_ksymbol_event *ksymbol_event = data;
10143 	struct perf_output_handle handle;
10144 	struct perf_sample_data sample;
10145 	int ret;
10146 
10147 	if (!perf_event_ksymbol_match(event))
10148 		return;
10149 
10150 	perf_event_header__init_id(&ksymbol_event->event_id.header,
10151 				   &sample, event);
10152 	ret = perf_output_begin(&handle, &sample, event,
10153 				ksymbol_event->event_id.header.size);
10154 	if (ret)
10155 		return;
10156 
10157 	perf_output_put(&handle, ksymbol_event->event_id);
10158 	__output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
10159 	perf_event__output_id_sample(event, &handle, &sample);
10160 
10161 	perf_output_end(&handle);
10162 }
10163 
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)10164 void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
10165 			const char *sym)
10166 {
10167 	struct perf_ksymbol_event ksymbol_event;
10168 	char name[KSYM_NAME_LEN];
10169 	u16 flags = 0;
10170 	int name_len;
10171 
10172 	if (!atomic_read(&nr_ksymbol_events))
10173 		return;
10174 
10175 	if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
10176 	    ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
10177 		goto err;
10178 
10179 	strscpy(name, sym);
10180 	name_len = strlen(name) + 1;
10181 	while (!IS_ALIGNED(name_len, sizeof(u64)))
10182 		name[name_len++] = '\0';
10183 	BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
10184 
10185 	if (unregister)
10186 		flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
10187 
10188 	ksymbol_event = (struct perf_ksymbol_event){
10189 		.name = name,
10190 		.name_len = name_len,
10191 		.event_id = {
10192 			.header = {
10193 				.type = PERF_RECORD_KSYMBOL,
10194 				.size = sizeof(ksymbol_event.event_id) +
10195 					name_len,
10196 			},
10197 			.addr = addr,
10198 			.len = len,
10199 			.ksym_type = ksym_type,
10200 			.flags = flags,
10201 		},
10202 	};
10203 
10204 	perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
10205 	return;
10206 err:
10207 	WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
10208 }
10209 
10210 /*
10211  * bpf program load/unload tracking
10212  */
10213 
10214 struct perf_bpf_event {
10215 	struct bpf_prog	*prog;
10216 	struct {
10217 		struct perf_event_header        header;
10218 		u16				type;
10219 		u16				flags;
10220 		u32				id;
10221 		u8				tag[BPF_TAG_SIZE];
10222 	} event_id;
10223 };
10224 
perf_event_bpf_match(struct perf_event * event)10225 static int perf_event_bpf_match(struct perf_event *event)
10226 {
10227 	return event->attr.bpf_event;
10228 }
10229 
perf_event_bpf_output(struct perf_event * event,void * data)10230 static void perf_event_bpf_output(struct perf_event *event, void *data)
10231 {
10232 	struct perf_bpf_event *bpf_event = data;
10233 	struct perf_output_handle handle;
10234 	struct perf_sample_data sample;
10235 	int ret;
10236 
10237 	if (!perf_event_bpf_match(event))
10238 		return;
10239 
10240 	perf_event_header__init_id(&bpf_event->event_id.header,
10241 				   &sample, event);
10242 	ret = perf_output_begin(&handle, &sample, event,
10243 				bpf_event->event_id.header.size);
10244 	if (ret)
10245 		return;
10246 
10247 	perf_output_put(&handle, bpf_event->event_id);
10248 	perf_event__output_id_sample(event, &handle, &sample);
10249 
10250 	perf_output_end(&handle);
10251 }
10252 
perf_event_bpf_emit_ksymbols(struct bpf_prog * prog,enum perf_bpf_event_type type)10253 static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
10254 					 enum perf_bpf_event_type type)
10255 {
10256 	bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
10257 	int i;
10258 
10259 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
10260 			   (u64)(unsigned long)prog->bpf_func,
10261 			   prog->jited_len, unregister,
10262 			   prog->aux->ksym.name);
10263 
10264 	for (i = 1; i < prog->aux->func_cnt; i++) {
10265 		struct bpf_prog *subprog = prog->aux->func[i];
10266 
10267 		perf_event_ksymbol(
10268 			PERF_RECORD_KSYMBOL_TYPE_BPF,
10269 			(u64)(unsigned long)subprog->bpf_func,
10270 			subprog->jited_len, unregister,
10271 			subprog->aux->ksym.name);
10272 	}
10273 }
10274 
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)10275 void perf_event_bpf_event(struct bpf_prog *prog,
10276 			  enum perf_bpf_event_type type,
10277 			  u16 flags)
10278 {
10279 	struct perf_bpf_event bpf_event;
10280 
10281 	switch (type) {
10282 	case PERF_BPF_EVENT_PROG_LOAD:
10283 	case PERF_BPF_EVENT_PROG_UNLOAD:
10284 		if (atomic_read(&nr_ksymbol_events))
10285 			perf_event_bpf_emit_ksymbols(prog, type);
10286 		break;
10287 	default:
10288 		return;
10289 	}
10290 
10291 	if (!atomic_read(&nr_bpf_events))
10292 		return;
10293 
10294 	bpf_event = (struct perf_bpf_event){
10295 		.prog = prog,
10296 		.event_id = {
10297 			.header = {
10298 				.type = PERF_RECORD_BPF_EVENT,
10299 				.size = sizeof(bpf_event.event_id),
10300 			},
10301 			.type = type,
10302 			.flags = flags,
10303 			.id = prog->aux->id,
10304 		},
10305 	};
10306 
10307 	BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
10308 
10309 	memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
10310 	perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
10311 }
10312 
10313 struct perf_callchain_deferred_event {
10314 	struct unwind_stacktrace *trace;
10315 	struct {
10316 		struct perf_event_header	header;
10317 		u64				cookie;
10318 		u64				nr;
10319 		u64				ips[];
10320 	} event;
10321 };
10322 
perf_callchain_deferred_output(struct perf_event * event,void * data)10323 static void perf_callchain_deferred_output(struct perf_event *event, void *data)
10324 {
10325 	struct perf_callchain_deferred_event *deferred_event = data;
10326 	struct perf_output_handle handle;
10327 	struct perf_sample_data sample;
10328 	int ret, size = deferred_event->event.header.size;
10329 
10330 	if (!event->attr.defer_output)
10331 		return;
10332 
10333 	/* XXX do we really need sample_id_all for this ??? */
10334 	perf_event_header__init_id(&deferred_event->event.header, &sample, event);
10335 
10336 	ret = perf_output_begin(&handle, &sample, event,
10337 				deferred_event->event.header.size);
10338 	if (ret)
10339 		goto out;
10340 
10341 	perf_output_put(&handle, deferred_event->event);
10342 	for (int i = 0; i < deferred_event->trace->nr; i++) {
10343 		u64 entry = deferred_event->trace->entries[i];
10344 		perf_output_put(&handle, entry);
10345 	}
10346 	perf_event__output_id_sample(event, &handle, &sample);
10347 
10348 	perf_output_end(&handle);
10349 out:
10350 	deferred_event->event.header.size = size;
10351 }
10352 
perf_unwind_deferred_callback(struct unwind_work * work,struct unwind_stacktrace * trace,u64 cookie)10353 static void perf_unwind_deferred_callback(struct unwind_work *work,
10354 					 struct unwind_stacktrace *trace, u64 cookie)
10355 {
10356 	struct perf_callchain_deferred_event deferred_event = {
10357 		.trace = trace,
10358 		.event = {
10359 			.header = {
10360 				.type = PERF_RECORD_CALLCHAIN_DEFERRED,
10361 				.misc = PERF_RECORD_MISC_USER,
10362 				.size = sizeof(deferred_event.event) +
10363 					(trace->nr * sizeof(u64)),
10364 			},
10365 			.cookie = cookie,
10366 			.nr = trace->nr,
10367 		},
10368 	};
10369 
10370 	perf_iterate_sb(perf_callchain_deferred_output, &deferred_event, NULL);
10371 }
10372 
10373 struct perf_text_poke_event {
10374 	const void		*old_bytes;
10375 	const void		*new_bytes;
10376 	size_t			pad;
10377 	u16			old_len;
10378 	u16			new_len;
10379 
10380 	struct {
10381 		struct perf_event_header	header;
10382 
10383 		u64				addr;
10384 	} event_id;
10385 };
10386 
perf_event_text_poke_match(struct perf_event * event)10387 static int perf_event_text_poke_match(struct perf_event *event)
10388 {
10389 	return event->attr.text_poke;
10390 }
10391 
perf_event_text_poke_output(struct perf_event * event,void * data)10392 static void perf_event_text_poke_output(struct perf_event *event, void *data)
10393 {
10394 	struct perf_text_poke_event *text_poke_event = data;
10395 	struct perf_output_handle handle;
10396 	struct perf_sample_data sample;
10397 	u64 padding = 0;
10398 	int ret;
10399 
10400 	if (!perf_event_text_poke_match(event))
10401 		return;
10402 
10403 	perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
10404 
10405 	ret = perf_output_begin(&handle, &sample, event,
10406 				text_poke_event->event_id.header.size);
10407 	if (ret)
10408 		return;
10409 
10410 	perf_output_put(&handle, text_poke_event->event_id);
10411 	perf_output_put(&handle, text_poke_event->old_len);
10412 	perf_output_put(&handle, text_poke_event->new_len);
10413 
10414 	__output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len);
10415 	__output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len);
10416 
10417 	if (text_poke_event->pad)
10418 		__output_copy(&handle, &padding, text_poke_event->pad);
10419 
10420 	perf_event__output_id_sample(event, &handle, &sample);
10421 
10422 	perf_output_end(&handle);
10423 }
10424 
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)10425 void perf_event_text_poke(const void *addr, const void *old_bytes,
10426 			  size_t old_len, const void *new_bytes, size_t new_len)
10427 {
10428 	struct perf_text_poke_event text_poke_event;
10429 	size_t tot, pad;
10430 
10431 	if (!atomic_read(&nr_text_poke_events))
10432 		return;
10433 
10434 	tot  = sizeof(text_poke_event.old_len) + old_len;
10435 	tot += sizeof(text_poke_event.new_len) + new_len;
10436 	pad  = ALIGN(tot, sizeof(u64)) - tot;
10437 
10438 	text_poke_event = (struct perf_text_poke_event){
10439 		.old_bytes    = old_bytes,
10440 		.new_bytes    = new_bytes,
10441 		.pad          = pad,
10442 		.old_len      = old_len,
10443 		.new_len      = new_len,
10444 		.event_id  = {
10445 			.header = {
10446 				.type = PERF_RECORD_TEXT_POKE,
10447 				.misc = PERF_RECORD_MISC_KERNEL,
10448 				.size = sizeof(text_poke_event.event_id) + tot + pad,
10449 			},
10450 			.addr = (unsigned long)addr,
10451 		},
10452 	};
10453 
10454 	perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL);
10455 }
10456 
perf_event_itrace_started(struct perf_event * event)10457 void perf_event_itrace_started(struct perf_event *event)
10458 {
10459 	WRITE_ONCE(event->attach_state, event->attach_state | PERF_ATTACH_ITRACE);
10460 }
10461 
perf_log_itrace_start(struct perf_event * event)10462 static void perf_log_itrace_start(struct perf_event *event)
10463 {
10464 	struct perf_output_handle handle;
10465 	struct perf_sample_data sample;
10466 	struct perf_aux_event {
10467 		struct perf_event_header        header;
10468 		u32				pid;
10469 		u32				tid;
10470 	} rec;
10471 	int ret;
10472 
10473 	if (event->parent)
10474 		event = event->parent;
10475 
10476 	if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
10477 	    event->attach_state & PERF_ATTACH_ITRACE)
10478 		return;
10479 
10480 	rec.header.type	= PERF_RECORD_ITRACE_START;
10481 	rec.header.misc	= 0;
10482 	rec.header.size	= sizeof(rec);
10483 	rec.pid	= perf_event_pid(event, current);
10484 	rec.tid	= perf_event_tid(event, current);
10485 
10486 	perf_event_header__init_id(&rec.header, &sample, event);
10487 	ret = perf_output_begin(&handle, &sample, event, rec.header.size);
10488 
10489 	if (ret)
10490 		return;
10491 
10492 	perf_output_put(&handle, rec);
10493 	perf_event__output_id_sample(event, &handle, &sample);
10494 
10495 	perf_output_end(&handle);
10496 }
10497 
perf_report_aux_output_id(struct perf_event * event,u64 hw_id)10498 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id)
10499 {
10500 	struct perf_output_handle handle;
10501 	struct perf_sample_data sample;
10502 	struct perf_aux_event {
10503 		struct perf_event_header        header;
10504 		u64				hw_id;
10505 	} rec;
10506 	int ret;
10507 
10508 	if (event->parent)
10509 		event = event->parent;
10510 
10511 	rec.header.type	= PERF_RECORD_AUX_OUTPUT_HW_ID;
10512 	rec.header.misc	= 0;
10513 	rec.header.size	= sizeof(rec);
10514 	rec.hw_id	= hw_id;
10515 
10516 	perf_event_header__init_id(&rec.header, &sample, event);
10517 	ret = perf_output_begin(&handle, &sample, event, rec.header.size);
10518 
10519 	if (ret)
10520 		return;
10521 
10522 	perf_output_put(&handle, rec);
10523 	perf_event__output_id_sample(event, &handle, &sample);
10524 
10525 	perf_output_end(&handle);
10526 }
10527 EXPORT_SYMBOL_GPL(perf_report_aux_output_id);
10528 
10529 static int
__perf_event_account_interrupt(struct perf_event * event,int throttle)10530 __perf_event_account_interrupt(struct perf_event *event, int throttle)
10531 {
10532 	struct hw_perf_event *hwc = &event->hw;
10533 	int ret = 0;
10534 	u64 seq;
10535 
10536 	seq = __this_cpu_read(perf_throttled_seq);
10537 	if (seq != hwc->interrupts_seq) {
10538 		hwc->interrupts_seq = seq;
10539 		hwc->interrupts = 1;
10540 	} else {
10541 		hwc->interrupts++;
10542 	}
10543 
10544 	if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) {
10545 		__this_cpu_inc(perf_throttled_count);
10546 		tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
10547 		perf_event_throttle_group(event);
10548 		ret = 1;
10549 	}
10550 
10551 	if (event->attr.freq) {
10552 		u64 now = perf_clock();
10553 		s64 delta = now - hwc->freq_time_stamp;
10554 
10555 		hwc->freq_time_stamp = now;
10556 
10557 		if (delta > 0 && delta < 2*TICK_NSEC)
10558 			perf_adjust_period(event, delta, hwc->last_period, true);
10559 	}
10560 
10561 	return ret;
10562 }
10563 
perf_event_account_interrupt(struct perf_event * event)10564 int perf_event_account_interrupt(struct perf_event *event)
10565 {
10566 	return __perf_event_account_interrupt(event, 1);
10567 }
10568 
sample_is_allowed(struct perf_event * event,struct pt_regs * regs)10569 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
10570 {
10571 	/*
10572 	 * Due to interrupt latency (AKA "skid"), we may enter the
10573 	 * kernel before taking an overflow, even if the PMU is only
10574 	 * counting user events.
10575 	 */
10576 	if (event->attr.exclude_kernel && !user_mode(regs))
10577 		return false;
10578 
10579 	return true;
10580 }
10581 
10582 #ifdef CONFIG_BPF_SYSCALL
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10583 static int bpf_overflow_handler(struct perf_event *event,
10584 				struct perf_sample_data *data,
10585 				struct pt_regs *regs)
10586 {
10587 	struct bpf_perf_event_data_kern ctx = {
10588 		.data = data,
10589 		.event = event,
10590 	};
10591 	struct bpf_prog *prog;
10592 	int ret = 0;
10593 
10594 	ctx.regs = perf_arch_bpf_user_pt_regs(regs);
10595 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
10596 		goto out;
10597 	rcu_read_lock();
10598 	prog = READ_ONCE(event->prog);
10599 	if (prog) {
10600 		perf_prepare_sample(data, event, regs);
10601 		ret = bpf_prog_run(prog, &ctx);
10602 	}
10603 	rcu_read_unlock();
10604 out:
10605 	__this_cpu_dec(bpf_prog_active);
10606 
10607 	return ret;
10608 }
10609 
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10610 static inline int perf_event_set_bpf_handler(struct perf_event *event,
10611 					     struct bpf_prog *prog,
10612 					     u64 bpf_cookie)
10613 {
10614 	if (event->overflow_handler_context)
10615 		/* hw breakpoint or kernel counter */
10616 		return -EINVAL;
10617 
10618 	if (event->prog)
10619 		return -EEXIST;
10620 
10621 	if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
10622 		return -EINVAL;
10623 
10624 	if (event->attr.precise_ip &&
10625 	    prog->call_get_stack &&
10626 	    (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10627 	     event->attr.exclude_callchain_kernel ||
10628 	     event->attr.exclude_callchain_user)) {
10629 		/*
10630 		 * On perf_event with precise_ip, calling bpf_get_stack()
10631 		 * may trigger unwinder warnings and occasional crashes.
10632 		 * bpf_get_[stack|stackid] works around this issue by using
10633 		 * callchain attached to perf_sample_data. If the
10634 		 * perf_event does not full (kernel and user) callchain
10635 		 * attached to perf_sample_data, do not allow attaching BPF
10636 		 * program that calls bpf_get_[stack|stackid].
10637 		 */
10638 		return -EPROTO;
10639 	}
10640 
10641 	event->prog = prog;
10642 	event->bpf_cookie = bpf_cookie;
10643 	return 0;
10644 }
10645 
perf_event_free_bpf_handler(struct perf_event * event)10646 static inline void perf_event_free_bpf_handler(struct perf_event *event)
10647 {
10648 	struct bpf_prog *prog = event->prog;
10649 
10650 	if (!prog)
10651 		return;
10652 
10653 	event->prog = NULL;
10654 	bpf_prog_put(prog);
10655 }
10656 #else
bpf_overflow_handler(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10657 static inline int bpf_overflow_handler(struct perf_event *event,
10658 				       struct perf_sample_data *data,
10659 				       struct pt_regs *regs)
10660 {
10661 	return 1;
10662 }
10663 
perf_event_set_bpf_handler(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)10664 static inline int perf_event_set_bpf_handler(struct perf_event *event,
10665 					     struct bpf_prog *prog,
10666 					     u64 bpf_cookie)
10667 {
10668 	return -EOPNOTSUPP;
10669 }
10670 
perf_event_free_bpf_handler(struct perf_event * event)10671 static inline void perf_event_free_bpf_handler(struct perf_event *event)
10672 {
10673 }
10674 #endif
10675 
10676 /*
10677  * Generic event overflow handling, sampling.
10678  */
10679 
__perf_event_overflow(struct perf_event * event,int throttle,struct perf_sample_data * data,struct pt_regs * regs)10680 static int __perf_event_overflow(struct perf_event *event,
10681 				 int throttle, struct perf_sample_data *data,
10682 				 struct pt_regs *regs)
10683 {
10684 	int events = atomic_read(&event->event_limit);
10685 	int ret = 0;
10686 
10687 	/*
10688 	 * Non-sampling counters might still use the PMI to fold short
10689 	 * hardware counters, ignore those.
10690 	 */
10691 	if (unlikely(!is_sampling_event(event)))
10692 		return 0;
10693 
10694 	ret = __perf_event_account_interrupt(event, throttle);
10695 
10696 	if (event->attr.aux_pause)
10697 		perf_event_aux_pause(event->aux_event, true);
10698 
10699 	if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
10700 	    !bpf_overflow_handler(event, data, regs))
10701 		goto out;
10702 
10703 	/*
10704 	 * XXX event_limit might not quite work as expected on inherited
10705 	 * events
10706 	 */
10707 
10708 	event->pending_kill = POLL_IN;
10709 	if (events && atomic_dec_and_test(&event->event_limit)) {
10710 		ret = 1;
10711 		event->pending_kill = POLL_HUP;
10712 		perf_event_disable_inatomic(event);
10713 		event->pmu->stop(event, 0);
10714 	}
10715 
10716 	if (event->attr.sigtrap) {
10717 		/*
10718 		 * The desired behaviour of sigtrap vs invalid samples is a bit
10719 		 * tricky; on the one hand, one should not loose the SIGTRAP if
10720 		 * it is the first event, on the other hand, we should also not
10721 		 * trigger the WARN or override the data address.
10722 		 */
10723 		bool valid_sample = sample_is_allowed(event, regs);
10724 		unsigned int pending_id = 1;
10725 		enum task_work_notify_mode notify_mode;
10726 
10727 		if (regs)
10728 			pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
10729 
10730 		notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
10731 
10732 		if (!event->pending_work &&
10733 		    !task_work_add(current, &event->pending_task, notify_mode)) {
10734 			event->pending_work = pending_id;
10735 			local_inc(&event->ctx->nr_no_switch_fast);
10736 			WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
10737 
10738 			event->pending_addr = 0;
10739 			if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
10740 				event->pending_addr = data->addr;
10741 
10742 		} else if (event->attr.exclude_kernel && valid_sample) {
10743 			/*
10744 			 * Should not be able to return to user space without
10745 			 * consuming pending_work; with exceptions:
10746 			 *
10747 			 *  1. Where !exclude_kernel, events can overflow again
10748 			 *     in the kernel without returning to user space.
10749 			 *
10750 			 *  2. Events that can overflow again before the IRQ-
10751 			 *     work without user space progress (e.g. hrtimer).
10752 			 *     To approximate progress (with false negatives),
10753 			 *     check 32-bit hash of the current IP.
10754 			 */
10755 			WARN_ON_ONCE(event->pending_work != pending_id);
10756 		}
10757 	}
10758 
10759 	READ_ONCE(event->overflow_handler)(event, data, regs);
10760 
10761 	if (*perf_event_fasync(event) && event->pending_kill) {
10762 		event->pending_wakeup = 1;
10763 		irq_work_queue(&event->pending_irq);
10764 	}
10765 out:
10766 	if (event->attr.aux_resume)
10767 		perf_event_aux_pause(event->aux_event, false);
10768 
10769 	return ret;
10770 }
10771 
perf_event_overflow(struct perf_event * event,struct perf_sample_data * data,struct pt_regs * regs)10772 int perf_event_overflow(struct perf_event *event,
10773 			struct perf_sample_data *data,
10774 			struct pt_regs *regs)
10775 {
10776 	/*
10777 	 * Entry point from hardware PMI, interrupts should be disabled here.
10778 	 * This serializes us against perf_event_remove_from_context() in
10779 	 * things like perf_event_release_kernel().
10780 	 */
10781 	lockdep_assert_irqs_disabled();
10782 
10783 	return __perf_event_overflow(event, 1, data, regs);
10784 }
10785 
10786 /*
10787  * Generic software event infrastructure
10788  */
10789 
10790 struct swevent_htable {
10791 	struct swevent_hlist		*swevent_hlist;
10792 	struct mutex			hlist_mutex;
10793 	int				hlist_refcount;
10794 };
10795 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
10796 
10797 /*
10798  * We directly increment event->count and keep a second value in
10799  * event->hw.period_left to count intervals. This period event
10800  * is kept in the range [-sample_period, 0] so that we can use the
10801  * sign as trigger.
10802  */
10803 
perf_swevent_set_period(struct perf_event * event)10804 u64 perf_swevent_set_period(struct perf_event *event)
10805 {
10806 	struct hw_perf_event *hwc = &event->hw;
10807 	u64 period = hwc->last_period;
10808 	u64 nr, offset;
10809 	s64 old, val;
10810 
10811 	hwc->last_period = hwc->sample_period;
10812 
10813 	old = local64_read(&hwc->period_left);
10814 	do {
10815 		val = old;
10816 		if (val < 0)
10817 			return 0;
10818 
10819 		nr = div64_u64(period + val, period);
10820 		offset = nr * period;
10821 		val -= offset;
10822 	} while (!local64_try_cmpxchg(&hwc->period_left, &old, val));
10823 
10824 	return nr;
10825 }
10826 
perf_swevent_overflow(struct perf_event * event,u64 overflow,struct perf_sample_data * data,struct pt_regs * regs)10827 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
10828 				    struct perf_sample_data *data,
10829 				    struct pt_regs *regs)
10830 {
10831 	struct hw_perf_event *hwc = &event->hw;
10832 	int throttle = 0;
10833 
10834 	if (!overflow)
10835 		overflow = perf_swevent_set_period(event);
10836 
10837 	if (hwc->interrupts == MAX_INTERRUPTS)
10838 		return;
10839 
10840 	for (; overflow; overflow--) {
10841 		if (__perf_event_overflow(event, throttle,
10842 					    data, regs)) {
10843 			/*
10844 			 * We inhibit the overflow from happening when
10845 			 * hwc->interrupts == MAX_INTERRUPTS.
10846 			 */
10847 			break;
10848 		}
10849 		throttle = 1;
10850 	}
10851 }
10852 
perf_swevent_event(struct perf_event * event,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)10853 static void perf_swevent_event(struct perf_event *event, u64 nr,
10854 			       struct perf_sample_data *data,
10855 			       struct pt_regs *regs)
10856 {
10857 	struct hw_perf_event *hwc = &event->hw;
10858 
10859 	/*
10860 	 * This is:
10861 	 *   - software		preempt
10862 	 *   - tracepoint	preempt
10863 	 *   -   tp_target_task	irq (ctx->lock)
10864 	 *   - uprobes		preempt/irq
10865 	 *   - kprobes		preempt/irq
10866 	 *   - hw_breakpoint	irq
10867 	 *
10868 	 * Any of these are sufficient to hold off RCU and thus ensure @event
10869 	 * exists.
10870 	 */
10871 	lockdep_assert_preemption_disabled();
10872 	local64_add(nr, &event->count);
10873 
10874 	if (!regs)
10875 		return;
10876 
10877 	if (!is_sampling_event(event))
10878 		return;
10879 
10880 	/*
10881 	 * Serialize against event_function_call() IPIs like normal overflow
10882 	 * event handling. Specifically, must not allow
10883 	 * perf_event_release_kernel() -> perf_remove_from_context() to make
10884 	 * progress and 'release' the event from under us.
10885 	 */
10886 	guard(irqsave)();
10887 	if (event->state != PERF_EVENT_STATE_ACTIVE)
10888 		return;
10889 
10890 	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
10891 		data->period = nr;
10892 		return perf_swevent_overflow(event, 1, data, regs);
10893 	} else
10894 		data->period = event->hw.last_period;
10895 
10896 	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
10897 		return perf_swevent_overflow(event, 1, data, regs);
10898 
10899 	if (local64_add_negative(nr, &hwc->period_left))
10900 		return;
10901 
10902 	perf_swevent_overflow(event, 0, data, regs);
10903 }
10904 
perf_exclude_event(struct perf_event * event,struct pt_regs * regs)10905 int perf_exclude_event(struct perf_event *event, struct pt_regs *regs)
10906 {
10907 	if (event->hw.state & PERF_HES_STOPPED)
10908 		return 1;
10909 
10910 	if (regs) {
10911 		if (event->attr.exclude_user && user_mode(regs))
10912 			return 1;
10913 
10914 		if (event->attr.exclude_kernel && !user_mode(regs))
10915 			return 1;
10916 	}
10917 
10918 	return 0;
10919 }
10920 
perf_swevent_match(struct perf_event * event,enum perf_type_id type,u32 event_id,struct perf_sample_data * data,struct pt_regs * regs)10921 static int perf_swevent_match(struct perf_event *event,
10922 				enum perf_type_id type,
10923 				u32 event_id,
10924 				struct perf_sample_data *data,
10925 				struct pt_regs *regs)
10926 {
10927 	if (event->attr.type != type)
10928 		return 0;
10929 
10930 	if (event->attr.config != event_id)
10931 		return 0;
10932 
10933 	if (perf_exclude_event(event, regs))
10934 		return 0;
10935 
10936 	return 1;
10937 }
10938 
swevent_hash(u64 type,u32 event_id)10939 static inline u64 swevent_hash(u64 type, u32 event_id)
10940 {
10941 	u64 val = event_id | (type << 32);
10942 
10943 	return hash_64(val, SWEVENT_HLIST_BITS);
10944 }
10945 
10946 static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist * hlist,u64 type,u32 event_id)10947 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
10948 {
10949 	u64 hash = swevent_hash(type, event_id);
10950 
10951 	return &hlist->heads[hash];
10952 }
10953 
10954 /* For the read side: events when they trigger */
10955 static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable * swhash,u64 type,u32 event_id)10956 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
10957 {
10958 	struct swevent_hlist *hlist;
10959 
10960 	hlist = rcu_dereference(swhash->swevent_hlist);
10961 	if (!hlist)
10962 		return NULL;
10963 
10964 	return __find_swevent_head(hlist, type, event_id);
10965 }
10966 
10967 /* For the event head insertion and removal in the hlist */
10968 static inline struct hlist_head *
find_swevent_head(struct swevent_htable * swhash,struct perf_event * event)10969 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
10970 {
10971 	struct swevent_hlist *hlist;
10972 	u32 event_id = event->attr.config;
10973 	u64 type = event->attr.type;
10974 
10975 	/*
10976 	 * Event scheduling is always serialized against hlist allocation
10977 	 * and release. Which makes the protected version suitable here.
10978 	 * The context lock guarantees that.
10979 	 */
10980 	hlist = rcu_dereference_protected(swhash->swevent_hlist,
10981 					  lockdep_is_held(&event->ctx->lock));
10982 	if (!hlist)
10983 		return NULL;
10984 
10985 	return __find_swevent_head(hlist, type, event_id);
10986 }
10987 
do_perf_sw_event(enum perf_type_id type,u32 event_id,u64 nr,struct perf_sample_data * data,struct pt_regs * regs)10988 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
10989 				    u64 nr,
10990 				    struct perf_sample_data *data,
10991 				    struct pt_regs *regs)
10992 {
10993 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
10994 	struct perf_event *event;
10995 	struct hlist_head *head;
10996 
10997 	rcu_read_lock();
10998 	head = find_swevent_head_rcu(swhash, type, event_id);
10999 	if (!head)
11000 		goto end;
11001 
11002 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
11003 		if (perf_swevent_match(event, type, event_id, data, regs))
11004 			perf_swevent_event(event, nr, data, regs);
11005 	}
11006 end:
11007 	rcu_read_unlock();
11008 }
11009 
11010 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
11011 
perf_swevent_get_recursion_context(void)11012 int perf_swevent_get_recursion_context(void)
11013 {
11014 	return get_recursion_context(current->perf_recursion);
11015 }
11016 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
11017 
perf_swevent_put_recursion_context(int rctx)11018 void perf_swevent_put_recursion_context(int rctx)
11019 {
11020 	put_recursion_context(current->perf_recursion, rctx);
11021 }
11022 
___perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)11023 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
11024 {
11025 	struct perf_sample_data data;
11026 
11027 	if (WARN_ON_ONCE(!regs))
11028 		return;
11029 
11030 	perf_sample_data_init(&data, addr, 0);
11031 	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
11032 }
11033 
__perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)11034 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
11035 {
11036 	int rctx;
11037 
11038 	preempt_disable_notrace();
11039 	rctx = perf_swevent_get_recursion_context();
11040 	if (unlikely(rctx < 0))
11041 		goto fail;
11042 
11043 	___perf_sw_event(event_id, nr, regs, addr);
11044 
11045 	perf_swevent_put_recursion_context(rctx);
11046 fail:
11047 	preempt_enable_notrace();
11048 }
11049 
perf_swevent_read(struct perf_event * event)11050 static void perf_swevent_read(struct perf_event *event)
11051 {
11052 }
11053 
perf_swevent_add(struct perf_event * event,int flags)11054 static int perf_swevent_add(struct perf_event *event, int flags)
11055 {
11056 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
11057 	struct hw_perf_event *hwc = &event->hw;
11058 	struct hlist_head *head;
11059 
11060 	if (is_sampling_event(event)) {
11061 		hwc->last_period = hwc->sample_period;
11062 		perf_swevent_set_period(event);
11063 	}
11064 
11065 	hwc->state = !(flags & PERF_EF_START);
11066 
11067 	head = find_swevent_head(swhash, event);
11068 	if (WARN_ON_ONCE(!head))
11069 		return -EINVAL;
11070 
11071 	hlist_add_head_rcu(&event->hlist_entry, head);
11072 	perf_event_update_userpage(event);
11073 
11074 	return 0;
11075 }
11076 
perf_swevent_del(struct perf_event * event,int flags)11077 static void perf_swevent_del(struct perf_event *event, int flags)
11078 {
11079 	hlist_del_rcu(&event->hlist_entry);
11080 }
11081 
perf_swevent_start(struct perf_event * event,int flags)11082 static void perf_swevent_start(struct perf_event *event, int flags)
11083 {
11084 	event->hw.state = 0;
11085 }
11086 
perf_swevent_stop(struct perf_event * event,int flags)11087 static void perf_swevent_stop(struct perf_event *event, int flags)
11088 {
11089 	event->hw.state = PERF_HES_STOPPED;
11090 }
11091 
11092 /* Deref the hlist from the update side */
11093 static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable * swhash)11094 swevent_hlist_deref(struct swevent_htable *swhash)
11095 {
11096 	return rcu_dereference_protected(swhash->swevent_hlist,
11097 					 lockdep_is_held(&swhash->hlist_mutex));
11098 }
11099 
swevent_hlist_release(struct swevent_htable * swhash)11100 static void swevent_hlist_release(struct swevent_htable *swhash)
11101 {
11102 	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
11103 
11104 	if (!hlist)
11105 		return;
11106 
11107 	RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
11108 	kfree_rcu(hlist, rcu_head);
11109 }
11110 
swevent_hlist_put_cpu(int cpu)11111 static void swevent_hlist_put_cpu(int cpu)
11112 {
11113 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
11114 
11115 	mutex_lock(&swhash->hlist_mutex);
11116 
11117 	if (!--swhash->hlist_refcount)
11118 		swevent_hlist_release(swhash);
11119 
11120 	mutex_unlock(&swhash->hlist_mutex);
11121 }
11122 
swevent_hlist_put(void)11123 static void swevent_hlist_put(void)
11124 {
11125 	int cpu;
11126 
11127 	for_each_possible_cpu(cpu)
11128 		swevent_hlist_put_cpu(cpu);
11129 }
11130 
swevent_hlist_get_cpu(int cpu)11131 static int swevent_hlist_get_cpu(int cpu)
11132 {
11133 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
11134 	int err = 0;
11135 
11136 	mutex_lock(&swhash->hlist_mutex);
11137 	if (!swevent_hlist_deref(swhash) &&
11138 	    cpumask_test_cpu(cpu, perf_online_mask)) {
11139 		struct swevent_hlist *hlist;
11140 
11141 		hlist = kzalloc_obj(*hlist);
11142 		if (!hlist) {
11143 			err = -ENOMEM;
11144 			goto exit;
11145 		}
11146 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
11147 	}
11148 	swhash->hlist_refcount++;
11149 exit:
11150 	mutex_unlock(&swhash->hlist_mutex);
11151 
11152 	return err;
11153 }
11154 
swevent_hlist_get(void)11155 static int swevent_hlist_get(void)
11156 {
11157 	int err, cpu, failed_cpu;
11158 
11159 	mutex_lock(&pmus_lock);
11160 	for_each_possible_cpu(cpu) {
11161 		err = swevent_hlist_get_cpu(cpu);
11162 		if (err) {
11163 			failed_cpu = cpu;
11164 			goto fail;
11165 		}
11166 	}
11167 	mutex_unlock(&pmus_lock);
11168 	return 0;
11169 fail:
11170 	for_each_possible_cpu(cpu) {
11171 		if (cpu == failed_cpu)
11172 			break;
11173 		swevent_hlist_put_cpu(cpu);
11174 	}
11175 	mutex_unlock(&pmus_lock);
11176 	return err;
11177 }
11178 
11179 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
11180 
sw_perf_event_destroy(struct perf_event * event)11181 static void sw_perf_event_destroy(struct perf_event *event)
11182 {
11183 	u64 event_id = event->attr.config;
11184 
11185 	WARN_ON(event->parent);
11186 
11187 	static_key_slow_dec(&perf_swevent_enabled[event_id]);
11188 	swevent_hlist_put();
11189 }
11190 
11191 static struct pmu perf_cpu_clock; /* fwd declaration */
11192 static struct pmu perf_task_clock;
11193 
perf_swevent_init(struct perf_event * event)11194 static int perf_swevent_init(struct perf_event *event)
11195 {
11196 	u64 event_id = event->attr.config;
11197 
11198 	if (event->attr.type != PERF_TYPE_SOFTWARE)
11199 		return -ENOENT;
11200 
11201 	/*
11202 	 * no branch sampling for software events
11203 	 */
11204 	if (has_branch_stack(event))
11205 		return -EOPNOTSUPP;
11206 
11207 	switch (event_id) {
11208 	case PERF_COUNT_SW_CPU_CLOCK:
11209 		event->attr.type = perf_cpu_clock.type;
11210 		return -ENOENT;
11211 	case PERF_COUNT_SW_TASK_CLOCK:
11212 		event->attr.type = perf_task_clock.type;
11213 		return -ENOENT;
11214 
11215 	default:
11216 		break;
11217 	}
11218 
11219 	if (event_id >= PERF_COUNT_SW_MAX)
11220 		return -ENOENT;
11221 
11222 	if (!event->parent) {
11223 		int err;
11224 
11225 		err = swevent_hlist_get();
11226 		if (err)
11227 			return err;
11228 
11229 		static_key_slow_inc(&perf_swevent_enabled[event_id]);
11230 		event->destroy = sw_perf_event_destroy;
11231 	}
11232 
11233 	return 0;
11234 }
11235 
11236 static struct pmu perf_swevent = {
11237 	.task_ctx_nr	= perf_sw_context,
11238 
11239 	.capabilities	= PERF_PMU_CAP_NO_NMI,
11240 
11241 	.event_init	= perf_swevent_init,
11242 	.add		= perf_swevent_add,
11243 	.del		= perf_swevent_del,
11244 	.start		= perf_swevent_start,
11245 	.stop		= perf_swevent_stop,
11246 	.read		= perf_swevent_read,
11247 };
11248 
11249 #ifdef CONFIG_EVENT_TRACING
11250 
tp_perf_event_destroy(struct perf_event * event)11251 static void tp_perf_event_destroy(struct perf_event *event)
11252 {
11253 	perf_trace_destroy(event);
11254 }
11255 
perf_tp_event_init(struct perf_event * event)11256 static int perf_tp_event_init(struct perf_event *event)
11257 {
11258 	int err;
11259 
11260 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
11261 		return -ENOENT;
11262 
11263 	/*
11264 	 * no branch sampling for tracepoint events
11265 	 */
11266 	if (has_branch_stack(event))
11267 		return -EOPNOTSUPP;
11268 
11269 	err = perf_trace_init(event);
11270 	if (err)
11271 		return err;
11272 
11273 	event->destroy = tp_perf_event_destroy;
11274 
11275 	return 0;
11276 }
11277 
11278 static struct pmu perf_tracepoint = {
11279 	.task_ctx_nr	= perf_sw_context,
11280 
11281 	.event_init	= perf_tp_event_init,
11282 	.add		= perf_trace_add,
11283 	.del		= perf_trace_del,
11284 	.start		= perf_swevent_start,
11285 	.stop		= perf_swevent_stop,
11286 	.read		= perf_swevent_read,
11287 };
11288 
perf_tp_filter_match(struct perf_event * event,struct perf_raw_record * raw)11289 static int perf_tp_filter_match(struct perf_event *event,
11290 				struct perf_raw_record *raw)
11291 {
11292 	void *record = raw->frag.data;
11293 
11294 	/* only top level events have filters set */
11295 	if (event->parent)
11296 		event = event->parent;
11297 
11298 	if (likely(!event->filter) || filter_match_preds(event->filter, record))
11299 		return 1;
11300 	return 0;
11301 }
11302 
perf_tp_event_match(struct perf_event * event,struct perf_raw_record * raw,struct pt_regs * regs)11303 static int perf_tp_event_match(struct perf_event *event,
11304 				struct perf_raw_record *raw,
11305 				struct pt_regs *regs)
11306 {
11307 	if (event->hw.state & PERF_HES_STOPPED)
11308 		return 0;
11309 	/*
11310 	 * If exclude_kernel, only trace user-space tracepoints (uprobes)
11311 	 */
11312 	if (event->attr.exclude_kernel && !user_mode(regs))
11313 		return 0;
11314 
11315 	if (!perf_tp_filter_match(event, raw))
11316 		return 0;
11317 
11318 	return 1;
11319 }
11320 
perf_trace_run_bpf_submit(void * raw_data,int size,int rctx,struct trace_event_call * call,u64 count,struct pt_regs * regs,struct hlist_head * head,struct task_struct * task)11321 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
11322 			       struct trace_event_call *call, u64 count,
11323 			       struct pt_regs *regs, struct hlist_head *head,
11324 			       struct task_struct *task)
11325 {
11326 	if (bpf_prog_array_valid(call)) {
11327 		*(struct pt_regs **)raw_data = regs;
11328 		if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
11329 			perf_swevent_put_recursion_context(rctx);
11330 			return;
11331 		}
11332 	}
11333 	perf_tp_event(call->event.type, count, raw_data, size, regs, head,
11334 		      rctx, task);
11335 }
11336 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
11337 
__perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_raw_record * raw,struct perf_event * event)11338 static void __perf_tp_event_target_task(u64 count, void *record,
11339 					struct pt_regs *regs,
11340 					struct perf_sample_data *data,
11341 					struct perf_raw_record *raw,
11342 					struct perf_event *event)
11343 {
11344 	struct trace_entry *entry = record;
11345 
11346 	if (event->attr.config != entry->type)
11347 		return;
11348 	/* Cannot deliver synchronous signal to other task. */
11349 	if (event->attr.sigtrap)
11350 		return;
11351 	if (perf_tp_event_match(event, raw, regs)) {
11352 		perf_sample_data_init(data, 0, 0);
11353 		perf_sample_save_raw_data(data, event, raw);
11354 		perf_swevent_event(event, count, data, regs);
11355 	}
11356 }
11357 
perf_tp_event_target_task(u64 count,void * record,struct pt_regs * regs,struct perf_sample_data * data,struct perf_raw_record * raw,struct perf_event_context * ctx)11358 static void perf_tp_event_target_task(u64 count, void *record,
11359 				      struct pt_regs *regs,
11360 				      struct perf_sample_data *data,
11361 				      struct perf_raw_record *raw,
11362 				      struct perf_event_context *ctx)
11363 {
11364 	unsigned int cpu = smp_processor_id();
11365 	struct pmu *pmu = &perf_tracepoint;
11366 	struct perf_event *event, *sibling;
11367 
11368 	perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) {
11369 		__perf_tp_event_target_task(count, record, regs, data, raw, event);
11370 		for_each_sibling_event(sibling, event)
11371 			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
11372 	}
11373 
11374 	perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) {
11375 		__perf_tp_event_target_task(count, record, regs, data, raw, event);
11376 		for_each_sibling_event(sibling, event)
11377 			__perf_tp_event_target_task(count, record, regs, data, raw, sibling);
11378 	}
11379 }
11380 
perf_tp_event(u16 event_type,u64 count,void * record,int entry_size,struct pt_regs * regs,struct hlist_head * head,int rctx,struct task_struct * task)11381 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
11382 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
11383 		   struct task_struct *task)
11384 {
11385 	struct perf_sample_data data;
11386 	struct perf_event *event;
11387 
11388 	/*
11389 	 * Per being a tracepoint, this runs with preemption disabled.
11390 	 */
11391 	lockdep_assert_preemption_disabled();
11392 
11393 	struct perf_raw_record raw = {
11394 		.frag = {
11395 			.size = entry_size,
11396 			.data = record,
11397 		},
11398 	};
11399 
11400 	perf_trace_buf_update(record, event_type);
11401 
11402 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
11403 		if (perf_tp_event_match(event, &raw, regs)) {
11404 			/*
11405 			 * Here use the same on-stack perf_sample_data,
11406 			 * some members in data are event-specific and
11407 			 * need to be re-computed for different sweveents.
11408 			 * Re-initialize data->sample_flags safely to avoid
11409 			 * the problem that next event skips preparing data
11410 			 * because data->sample_flags is set.
11411 			 */
11412 			perf_sample_data_init(&data, 0, 0);
11413 			perf_sample_save_raw_data(&data, event, &raw);
11414 			perf_swevent_event(event, count, &data, regs);
11415 		}
11416 	}
11417 
11418 	/*
11419 	 * If we got specified a target task, also iterate its context and
11420 	 * deliver this event there too.
11421 	 */
11422 	if (task && task != current) {
11423 		struct perf_event_context *ctx;
11424 
11425 		rcu_read_lock();
11426 		ctx = rcu_dereference(task->perf_event_ctxp);
11427 		if (!ctx)
11428 			goto unlock;
11429 
11430 		raw_spin_lock(&ctx->lock);
11431 		perf_tp_event_target_task(count, record, regs, &data, &raw, ctx);
11432 		raw_spin_unlock(&ctx->lock);
11433 unlock:
11434 		rcu_read_unlock();
11435 	}
11436 
11437 	perf_swevent_put_recursion_context(rctx);
11438 }
11439 EXPORT_SYMBOL_GPL(perf_tp_event);
11440 
11441 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
11442 /*
11443  * Flags in config, used by dynamic PMU kprobe and uprobe
11444  * The flags should match following PMU_FORMAT_ATTR().
11445  *
11446  * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
11447  *                               if not set, create kprobe/uprobe
11448  *
11449  * The following values specify a reference counter (or semaphore in the
11450  * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
11451  * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
11452  *
11453  * PERF_UPROBE_REF_CTR_OFFSET_BITS	# of bits in config as th offset
11454  * PERF_UPROBE_REF_CTR_OFFSET_SHIFT	# of bits to shift left
11455  */
11456 enum perf_probe_config {
11457 	PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0,  /* [k,u]retprobe */
11458 	PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
11459 	PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
11460 };
11461 
11462 PMU_FORMAT_ATTR(retprobe, "config:0");
11463 #endif
11464 
11465 #ifdef CONFIG_KPROBE_EVENTS
11466 static struct attribute *kprobe_attrs[] = {
11467 	&format_attr_retprobe.attr,
11468 	NULL,
11469 };
11470 
11471 static struct attribute_group kprobe_format_group = {
11472 	.name = "format",
11473 	.attrs = kprobe_attrs,
11474 };
11475 
11476 static const struct attribute_group *kprobe_attr_groups[] = {
11477 	&kprobe_format_group,
11478 	NULL,
11479 };
11480 
11481 static int perf_kprobe_event_init(struct perf_event *event);
11482 static struct pmu perf_kprobe = {
11483 	.task_ctx_nr	= perf_sw_context,
11484 	.event_init	= perf_kprobe_event_init,
11485 	.add		= perf_trace_add,
11486 	.del		= perf_trace_del,
11487 	.start		= perf_swevent_start,
11488 	.stop		= perf_swevent_stop,
11489 	.read		= perf_swevent_read,
11490 	.attr_groups	= kprobe_attr_groups,
11491 };
11492 
perf_kprobe_event_init(struct perf_event * event)11493 static int perf_kprobe_event_init(struct perf_event *event)
11494 {
11495 	int err;
11496 	bool is_retprobe;
11497 
11498 	if (event->attr.type != perf_kprobe.type)
11499 		return -ENOENT;
11500 
11501 	if (!perfmon_capable())
11502 		return -EACCES;
11503 
11504 	/*
11505 	 * no branch sampling for probe events
11506 	 */
11507 	if (has_branch_stack(event))
11508 		return -EOPNOTSUPP;
11509 
11510 	is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
11511 	err = perf_kprobe_init(event, is_retprobe);
11512 	if (err)
11513 		return err;
11514 
11515 	event->destroy = perf_kprobe_destroy;
11516 
11517 	return 0;
11518 }
11519 #endif /* CONFIG_KPROBE_EVENTS */
11520 
11521 #ifdef CONFIG_UPROBE_EVENTS
11522 PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
11523 
11524 static struct attribute *uprobe_attrs[] = {
11525 	&format_attr_retprobe.attr,
11526 	&format_attr_ref_ctr_offset.attr,
11527 	NULL,
11528 };
11529 
11530 static struct attribute_group uprobe_format_group = {
11531 	.name = "format",
11532 	.attrs = uprobe_attrs,
11533 };
11534 
11535 static const struct attribute_group *uprobe_attr_groups[] = {
11536 	&uprobe_format_group,
11537 	NULL,
11538 };
11539 
11540 static int perf_uprobe_event_init(struct perf_event *event);
11541 static struct pmu perf_uprobe = {
11542 	.task_ctx_nr	= perf_sw_context,
11543 	.event_init	= perf_uprobe_event_init,
11544 	.add		= perf_trace_add,
11545 	.del		= perf_trace_del,
11546 	.start		= perf_swevent_start,
11547 	.stop		= perf_swevent_stop,
11548 	.read		= perf_swevent_read,
11549 	.attr_groups	= uprobe_attr_groups,
11550 };
11551 
perf_uprobe_event_init(struct perf_event * event)11552 static int perf_uprobe_event_init(struct perf_event *event)
11553 {
11554 	int err;
11555 	unsigned long ref_ctr_offset;
11556 	bool is_retprobe;
11557 
11558 	if (event->attr.type != perf_uprobe.type)
11559 		return -ENOENT;
11560 
11561 	if (!capable(CAP_SYS_ADMIN))
11562 		return -EACCES;
11563 
11564 	/*
11565 	 * no branch sampling for probe events
11566 	 */
11567 	if (has_branch_stack(event))
11568 		return -EOPNOTSUPP;
11569 
11570 	is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
11571 	ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
11572 	err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
11573 	if (err)
11574 		return err;
11575 
11576 	event->destroy = perf_uprobe_destroy;
11577 
11578 	return 0;
11579 }
11580 #endif /* CONFIG_UPROBE_EVENTS */
11581 
perf_tp_register(void)11582 static inline void perf_tp_register(void)
11583 {
11584 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
11585 #ifdef CONFIG_KPROBE_EVENTS
11586 	perf_pmu_register(&perf_kprobe, "kprobe", -1);
11587 #endif
11588 #ifdef CONFIG_UPROBE_EVENTS
11589 	perf_pmu_register(&perf_uprobe, "uprobe", -1);
11590 #endif
11591 }
11592 
perf_event_free_filter(struct perf_event * event)11593 static void perf_event_free_filter(struct perf_event *event)
11594 {
11595 	ftrace_profile_free_filter(event);
11596 }
11597 
11598 /*
11599  * returns true if the event is a tracepoint, or a kprobe/upprobe created
11600  * with perf_event_open()
11601  */
perf_event_is_tracing(struct perf_event * event)11602 static inline bool perf_event_is_tracing(struct perf_event *event)
11603 {
11604 	if (event->pmu == &perf_tracepoint)
11605 		return true;
11606 #ifdef CONFIG_KPROBE_EVENTS
11607 	if (event->pmu == &perf_kprobe)
11608 		return true;
11609 #endif
11610 #ifdef CONFIG_UPROBE_EVENTS
11611 	if (event->pmu == &perf_uprobe)
11612 		return true;
11613 #endif
11614 	return false;
11615 }
11616 
__perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11617 static int __perf_event_set_bpf_prog(struct perf_event *event,
11618 				     struct bpf_prog *prog,
11619 				     u64 bpf_cookie)
11620 {
11621 	bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
11622 
11623 	if (event->state <= PERF_EVENT_STATE_REVOKED)
11624 		return -ENODEV;
11625 
11626 	if (!perf_event_is_tracing(event))
11627 		return perf_event_set_bpf_handler(event, prog, bpf_cookie);
11628 
11629 	is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
11630 	is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
11631 	is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
11632 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
11633 	if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
11634 		/* bpf programs can only be attached to u/kprobe or tracepoint */
11635 		return -EINVAL;
11636 
11637 	if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
11638 	    (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
11639 	    (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
11640 		return -EINVAL;
11641 
11642 	if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe)
11643 		/* only uprobe programs are allowed to be sleepable */
11644 		return -EINVAL;
11645 
11646 	/* Kprobe override only works for kprobes, not uprobes. */
11647 	if (prog->kprobe_override && !is_kprobe)
11648 		return -EINVAL;
11649 
11650 	/* Writing to context allowed only for uprobes. */
11651 	if (prog->aux->kprobe_write_ctx && !is_uprobe)
11652 		return -EINVAL;
11653 
11654 	if (is_tracepoint || is_syscall_tp) {
11655 		int off = trace_event_get_offsets(event->tp_event);
11656 
11657 		if (prog->aux->max_ctx_offset > off)
11658 			return -EACCES;
11659 	}
11660 
11661 	return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
11662 }
11663 
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11664 int perf_event_set_bpf_prog(struct perf_event *event,
11665 			    struct bpf_prog *prog,
11666 			    u64 bpf_cookie)
11667 {
11668 	struct perf_event_context *ctx;
11669 	int ret;
11670 
11671 	ctx = perf_event_ctx_lock(event);
11672 	ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
11673 	perf_event_ctx_unlock(event, ctx);
11674 
11675 	return ret;
11676 }
11677 
perf_event_free_bpf_prog(struct perf_event * event)11678 void perf_event_free_bpf_prog(struct perf_event *event)
11679 {
11680 	if (!event->prog)
11681 		return;
11682 
11683 	if (!perf_event_is_tracing(event)) {
11684 		perf_event_free_bpf_handler(event);
11685 		return;
11686 	}
11687 	perf_event_detach_bpf_prog(event);
11688 }
11689 
11690 #else
11691 
perf_tp_register(void)11692 static inline void perf_tp_register(void)
11693 {
11694 }
11695 
perf_event_free_filter(struct perf_event * event)11696 static void perf_event_free_filter(struct perf_event *event)
11697 {
11698 }
11699 
__perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11700 static int __perf_event_set_bpf_prog(struct perf_event *event,
11701 				     struct bpf_prog *prog,
11702 				     u64 bpf_cookie)
11703 {
11704 	return -ENOENT;
11705 }
11706 
perf_event_set_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)11707 int perf_event_set_bpf_prog(struct perf_event *event,
11708 			    struct bpf_prog *prog,
11709 			    u64 bpf_cookie)
11710 {
11711 	return -ENOENT;
11712 }
11713 
perf_event_free_bpf_prog(struct perf_event * event)11714 void perf_event_free_bpf_prog(struct perf_event *event)
11715 {
11716 }
11717 #endif /* CONFIG_EVENT_TRACING */
11718 
11719 #ifdef CONFIG_HAVE_HW_BREAKPOINT
perf_bp_event(struct perf_event * bp,void * data)11720 void perf_bp_event(struct perf_event *bp, void *data)
11721 {
11722 	struct perf_sample_data sample;
11723 	struct pt_regs *regs = data;
11724 
11725 	/*
11726 	 * Exception context, will have interrupts disabled.
11727 	 */
11728 	lockdep_assert_irqs_disabled();
11729 
11730 	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
11731 
11732 	if (!bp->hw.state && !perf_exclude_event(bp, regs))
11733 		perf_swevent_event(bp, 1, &sample, regs);
11734 }
11735 #endif
11736 
11737 /*
11738  * Allocate a new address filter
11739  */
11740 static struct perf_addr_filter *
perf_addr_filter_new(struct perf_event * event,struct list_head * filters)11741 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
11742 {
11743 	int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
11744 	struct perf_addr_filter *filter;
11745 
11746 	filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
11747 	if (!filter)
11748 		return NULL;
11749 
11750 	INIT_LIST_HEAD(&filter->entry);
11751 	list_add_tail(&filter->entry, filters);
11752 
11753 	return filter;
11754 }
11755 
free_filters_list(struct list_head * filters)11756 static void free_filters_list(struct list_head *filters)
11757 {
11758 	struct perf_addr_filter *filter, *iter;
11759 
11760 	list_for_each_entry_safe(filter, iter, filters, entry) {
11761 		path_put(&filter->path);
11762 		list_del(&filter->entry);
11763 		kfree(filter);
11764 	}
11765 }
11766 
11767 /*
11768  * Free existing address filters and optionally install new ones
11769  */
perf_addr_filters_splice(struct perf_event * event,struct list_head * head)11770 static void perf_addr_filters_splice(struct perf_event *event,
11771 				     struct list_head *head)
11772 {
11773 	unsigned long flags;
11774 	LIST_HEAD(list);
11775 
11776 	if (!has_addr_filter(event))
11777 		return;
11778 
11779 	/* don't bother with children, they don't have their own filters */
11780 	if (event->parent)
11781 		return;
11782 
11783 	raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
11784 
11785 	list_splice_init(&event->addr_filters.list, &list);
11786 	if (head)
11787 		list_splice(head, &event->addr_filters.list);
11788 
11789 	raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
11790 
11791 	free_filters_list(&list);
11792 }
11793 
perf_free_addr_filters(struct perf_event * event)11794 static void perf_free_addr_filters(struct perf_event *event)
11795 {
11796 	/*
11797 	 * Used during free paths, there is no concurrency.
11798 	 */
11799 	if (list_empty(&event->addr_filters.list))
11800 		return;
11801 
11802 	perf_addr_filters_splice(event, NULL);
11803 }
11804 
11805 /*
11806  * Scan through mm's vmas and see if one of them matches the
11807  * @filter; if so, adjust filter's address range.
11808  * Called with mm::mmap_lock down for reading.
11809  */
perf_addr_filter_apply(struct perf_addr_filter * filter,struct mm_struct * mm,struct perf_addr_filter_range * fr)11810 static void perf_addr_filter_apply(struct perf_addr_filter *filter,
11811 				   struct mm_struct *mm,
11812 				   struct perf_addr_filter_range *fr)
11813 {
11814 	struct vm_area_struct *vma;
11815 	VMA_ITERATOR(vmi, mm, 0);
11816 
11817 	for_each_vma(vmi, vma) {
11818 		if (!vma->vm_file)
11819 			continue;
11820 
11821 		if (perf_addr_filter_vma_adjust(filter, vma, fr))
11822 			return;
11823 	}
11824 }
11825 
11826 /*
11827  * Update event's address range filters based on the
11828  * task's existing mappings, if any.
11829  */
perf_event_addr_filters_apply(struct perf_event * event)11830 static void perf_event_addr_filters_apply(struct perf_event *event)
11831 {
11832 	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
11833 	struct task_struct *task = READ_ONCE(event->ctx->task);
11834 	struct perf_addr_filter *filter;
11835 	struct mm_struct *mm = NULL;
11836 	unsigned int count = 0;
11837 	unsigned long flags;
11838 
11839 	/*
11840 	 * We may observe TASK_TOMBSTONE, which means that the event tear-down
11841 	 * will stop on the parent's child_mutex that our caller is also holding
11842 	 */
11843 	if (task == TASK_TOMBSTONE)
11844 		return;
11845 
11846 	if (ifh->nr_file_filters) {
11847 		mm = get_task_mm(task);
11848 		if (!mm)
11849 			goto restart;
11850 
11851 		mmap_read_lock(mm);
11852 	}
11853 
11854 	raw_spin_lock_irqsave(&ifh->lock, flags);
11855 	list_for_each_entry(filter, &ifh->list, entry) {
11856 		if (filter->path.dentry) {
11857 			/*
11858 			 * Adjust base offset if the filter is associated to a
11859 			 * binary that needs to be mapped:
11860 			 */
11861 			event->addr_filter_ranges[count].start = 0;
11862 			event->addr_filter_ranges[count].size = 0;
11863 
11864 			perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
11865 		} else {
11866 			event->addr_filter_ranges[count].start = filter->offset;
11867 			event->addr_filter_ranges[count].size  = filter->size;
11868 		}
11869 
11870 		count++;
11871 	}
11872 
11873 	event->addr_filters_gen++;
11874 	raw_spin_unlock_irqrestore(&ifh->lock, flags);
11875 
11876 	if (ifh->nr_file_filters) {
11877 		mmap_read_unlock(mm);
11878 
11879 		mmput(mm);
11880 	}
11881 
11882 restart:
11883 	perf_event_stop(event, 1);
11884 }
11885 
11886 /*
11887  * Address range filtering: limiting the data to certain
11888  * instruction address ranges. Filters are ioctl()ed to us from
11889  * userspace as ascii strings.
11890  *
11891  * Filter string format:
11892  *
11893  * ACTION RANGE_SPEC
11894  * where ACTION is one of the
11895  *  * "filter": limit the trace to this region
11896  *  * "start": start tracing from this address
11897  *  * "stop": stop tracing at this address/region;
11898  * RANGE_SPEC is
11899  *  * for kernel addresses: <start address>[/<size>]
11900  *  * for object files:     <start address>[/<size>]@</path/to/object/file>
11901  *
11902  * if <size> is not specified or is zero, the range is treated as a single
11903  * address; not valid for ACTION=="filter".
11904  */
11905 enum {
11906 	IF_ACT_NONE = -1,
11907 	IF_ACT_FILTER,
11908 	IF_ACT_START,
11909 	IF_ACT_STOP,
11910 	IF_SRC_FILE,
11911 	IF_SRC_KERNEL,
11912 	IF_SRC_FILEADDR,
11913 	IF_SRC_KERNELADDR,
11914 };
11915 
11916 enum {
11917 	IF_STATE_ACTION = 0,
11918 	IF_STATE_SOURCE,
11919 	IF_STATE_END,
11920 };
11921 
11922 static const match_table_t if_tokens = {
11923 	{ IF_ACT_FILTER,	"filter" },
11924 	{ IF_ACT_START,		"start" },
11925 	{ IF_ACT_STOP,		"stop" },
11926 	{ IF_SRC_FILE,		"%u/%u@%s" },
11927 	{ IF_SRC_KERNEL,	"%u/%u" },
11928 	{ IF_SRC_FILEADDR,	"%u@%s" },
11929 	{ IF_SRC_KERNELADDR,	"%u" },
11930 	{ IF_ACT_NONE,		NULL },
11931 };
11932 
11933 /*
11934  * Address filter string parser
11935  */
11936 static int
perf_event_parse_addr_filter(struct perf_event * event,char * fstr,struct list_head * filters)11937 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
11938 			     struct list_head *filters)
11939 {
11940 	struct perf_addr_filter *filter = NULL;
11941 	char *start, *orig, *filename = NULL;
11942 	substring_t args[MAX_OPT_ARGS];
11943 	int state = IF_STATE_ACTION, token;
11944 	unsigned int kernel = 0;
11945 	int ret = -EINVAL;
11946 
11947 	orig = fstr = kstrdup(fstr, GFP_KERNEL);
11948 	if (!fstr)
11949 		return -ENOMEM;
11950 
11951 	while ((start = strsep(&fstr, " ,\n")) != NULL) {
11952 		static const enum perf_addr_filter_action_t actions[] = {
11953 			[IF_ACT_FILTER]	= PERF_ADDR_FILTER_ACTION_FILTER,
11954 			[IF_ACT_START]	= PERF_ADDR_FILTER_ACTION_START,
11955 			[IF_ACT_STOP]	= PERF_ADDR_FILTER_ACTION_STOP,
11956 		};
11957 		ret = -EINVAL;
11958 
11959 		if (!*start)
11960 			continue;
11961 
11962 		/* filter definition begins */
11963 		if (state == IF_STATE_ACTION) {
11964 			filter = perf_addr_filter_new(event, filters);
11965 			if (!filter)
11966 				goto fail;
11967 		}
11968 
11969 		token = match_token(start, if_tokens, args);
11970 		switch (token) {
11971 		case IF_ACT_FILTER:
11972 		case IF_ACT_START:
11973 		case IF_ACT_STOP:
11974 			if (state != IF_STATE_ACTION)
11975 				goto fail;
11976 
11977 			filter->action = actions[token];
11978 			state = IF_STATE_SOURCE;
11979 			break;
11980 
11981 		case IF_SRC_KERNELADDR:
11982 		case IF_SRC_KERNEL:
11983 			kernel = 1;
11984 			fallthrough;
11985 
11986 		case IF_SRC_FILEADDR:
11987 		case IF_SRC_FILE:
11988 			if (state != IF_STATE_SOURCE)
11989 				goto fail;
11990 
11991 			*args[0].to = 0;
11992 			ret = kstrtoul(args[0].from, 0, &filter->offset);
11993 			if (ret)
11994 				goto fail;
11995 
11996 			if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
11997 				*args[1].to = 0;
11998 				ret = kstrtoul(args[1].from, 0, &filter->size);
11999 				if (ret)
12000 					goto fail;
12001 			}
12002 
12003 			if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
12004 				int fpos = token == IF_SRC_FILE ? 2 : 1;
12005 
12006 				kfree(filename);
12007 				filename = match_strdup(&args[fpos]);
12008 				if (!filename) {
12009 					ret = -ENOMEM;
12010 					goto fail;
12011 				}
12012 			}
12013 
12014 			state = IF_STATE_END;
12015 			break;
12016 
12017 		default:
12018 			goto fail;
12019 		}
12020 
12021 		/*
12022 		 * Filter definition is fully parsed, validate and install it.
12023 		 * Make sure that it doesn't contradict itself or the event's
12024 		 * attribute.
12025 		 */
12026 		if (state == IF_STATE_END) {
12027 			ret = -EINVAL;
12028 
12029 			/*
12030 			 * ACTION "filter" must have a non-zero length region
12031 			 * specified.
12032 			 */
12033 			if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
12034 			    !filter->size)
12035 				goto fail;
12036 
12037 			if (!kernel) {
12038 				if (!filename)
12039 					goto fail;
12040 
12041 				/*
12042 				 * For now, we only support file-based filters
12043 				 * in per-task events; doing so for CPU-wide
12044 				 * events requires additional context switching
12045 				 * trickery, since same object code will be
12046 				 * mapped at different virtual addresses in
12047 				 * different processes.
12048 				 */
12049 				ret = -EOPNOTSUPP;
12050 				if (!event->ctx->task)
12051 					goto fail;
12052 
12053 				/* look up the path and grab its inode */
12054 				ret = kern_path(filename, LOOKUP_FOLLOW,
12055 						&filter->path);
12056 				if (ret)
12057 					goto fail;
12058 
12059 				ret = -EINVAL;
12060 				if (!filter->path.dentry ||
12061 				    !S_ISREG(d_inode(filter->path.dentry)
12062 					     ->i_mode))
12063 					goto fail;
12064 
12065 				event->addr_filters.nr_file_filters++;
12066 			}
12067 
12068 			/* ready to consume more filters */
12069 			kfree(filename);
12070 			filename = NULL;
12071 			state = IF_STATE_ACTION;
12072 			filter = NULL;
12073 			kernel = 0;
12074 		}
12075 	}
12076 
12077 	if (state != IF_STATE_ACTION)
12078 		goto fail;
12079 
12080 	kfree(filename);
12081 	kfree(orig);
12082 
12083 	return 0;
12084 
12085 fail:
12086 	kfree(filename);
12087 	free_filters_list(filters);
12088 	kfree(orig);
12089 
12090 	return ret;
12091 }
12092 
12093 static int
perf_event_set_addr_filter(struct perf_event * event,char * filter_str)12094 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
12095 {
12096 	LIST_HEAD(filters);
12097 	int ret;
12098 
12099 	/*
12100 	 * Since this is called in perf_ioctl() path, we're already holding
12101 	 * ctx::mutex.
12102 	 */
12103 	lockdep_assert_held(&event->ctx->mutex);
12104 
12105 	if (WARN_ON_ONCE(event->parent))
12106 		return -EINVAL;
12107 
12108 	ret = perf_event_parse_addr_filter(event, filter_str, &filters);
12109 	if (ret)
12110 		goto fail_clear_files;
12111 
12112 	ret = event->pmu->addr_filters_validate(&filters);
12113 	if (ret)
12114 		goto fail_free_filters;
12115 
12116 	/* remove existing filters, if any */
12117 	perf_addr_filters_splice(event, &filters);
12118 
12119 	/* install new filters */
12120 	perf_event_for_each_child(event, perf_event_addr_filters_apply);
12121 
12122 	return ret;
12123 
12124 fail_free_filters:
12125 	free_filters_list(&filters);
12126 
12127 fail_clear_files:
12128 	event->addr_filters.nr_file_filters = 0;
12129 
12130 	return ret;
12131 }
12132 
perf_event_set_filter(struct perf_event * event,void __user * arg)12133 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
12134 {
12135 	int ret = -EINVAL;
12136 	char *filter_str;
12137 
12138 	filter_str = strndup_user(arg, PAGE_SIZE);
12139 	if (IS_ERR(filter_str))
12140 		return PTR_ERR(filter_str);
12141 
12142 #ifdef CONFIG_EVENT_TRACING
12143 	if (perf_event_is_tracing(event)) {
12144 		struct perf_event_context *ctx = event->ctx;
12145 
12146 		/*
12147 		 * Beware, here be dragons!!
12148 		 *
12149 		 * the tracepoint muck will deadlock against ctx->mutex, but
12150 		 * the tracepoint stuff does not actually need it. So
12151 		 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
12152 		 * already have a reference on ctx.
12153 		 *
12154 		 * This can result in event getting moved to a different ctx,
12155 		 * but that does not affect the tracepoint state.
12156 		 */
12157 		mutex_unlock(&ctx->mutex);
12158 		ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
12159 		mutex_lock(&ctx->mutex);
12160 	} else
12161 #endif
12162 	if (has_addr_filter(event))
12163 		ret = perf_event_set_addr_filter(event, filter_str);
12164 
12165 	kfree(filter_str);
12166 	return ret;
12167 }
12168 
12169 /*
12170  * hrtimer based swevent callback
12171  */
12172 
perf_swevent_hrtimer(struct hrtimer * hrtimer)12173 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
12174 {
12175 	enum hrtimer_restart ret = HRTIMER_RESTART;
12176 	struct perf_sample_data data;
12177 	struct pt_regs *regs;
12178 	struct perf_event *event;
12179 	u64 period;
12180 
12181 	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
12182 
12183 	if (event->state != PERF_EVENT_STATE_ACTIVE ||
12184 	    event->hw.state & PERF_HES_STOPPED)
12185 		return HRTIMER_NORESTART;
12186 
12187 	event->pmu->read(event);
12188 
12189 	perf_sample_data_init(&data, 0, event->hw.last_period);
12190 	regs = get_irq_regs();
12191 
12192 	if (regs && !perf_exclude_event(event, regs)) {
12193 		if (!(event->attr.exclude_idle && is_idle_task(current)))
12194 			if (perf_event_overflow(event, &data, regs))
12195 				ret = HRTIMER_NORESTART;
12196 	}
12197 
12198 	period = max_t(u64, 10000, event->hw.sample_period);
12199 	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
12200 
12201 	return ret;
12202 }
12203 
perf_swevent_start_hrtimer(struct perf_event * event)12204 static void perf_swevent_start_hrtimer(struct perf_event *event)
12205 {
12206 	struct hw_perf_event *hwc = &event->hw;
12207 	s64 period;
12208 
12209 	if (!is_sampling_event(event))
12210 		return;
12211 
12212 	period = local64_read(&hwc->period_left);
12213 	if (period) {
12214 		if (period < 0)
12215 			period = 10000;
12216 
12217 		local64_set(&hwc->period_left, 0);
12218 	} else {
12219 		period = max_t(u64, 10000, hwc->sample_period);
12220 	}
12221 	hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
12222 		      HRTIMER_MODE_REL_PINNED_HARD);
12223 }
12224 
perf_swevent_cancel_hrtimer(struct perf_event * event)12225 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
12226 {
12227 	struct hw_perf_event *hwc = &event->hw;
12228 
12229 	/*
12230 	 * Careful: this function can be triggered in the hrtimer handler,
12231 	 * for cpu-clock events, so hrtimer_cancel() would cause a
12232 	 * deadlock.
12233 	 *
12234 	 * So use hrtimer_try_to_cancel() to try to stop the hrtimer,
12235 	 * and the cpu-clock handler also sets the PERF_HES_STOPPED flag,
12236 	 * which guarantees that perf_swevent_hrtimer() will stop the
12237 	 * hrtimer once it sees the PERF_HES_STOPPED flag.
12238 	 */
12239 	if (is_sampling_event(event) && (hwc->interrupts != MAX_INTERRUPTS)) {
12240 		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
12241 		local64_set(&hwc->period_left, ktime_to_ns(remaining));
12242 
12243 		hrtimer_try_to_cancel(&hwc->hrtimer);
12244 	}
12245 }
12246 
perf_swevent_destroy_hrtimer(struct perf_event * event)12247 static void perf_swevent_destroy_hrtimer(struct perf_event *event)
12248 {
12249 	hrtimer_cancel(&event->hw.hrtimer);
12250 }
12251 
perf_swevent_init_hrtimer(struct perf_event * event)12252 static void perf_swevent_init_hrtimer(struct perf_event *event)
12253 {
12254 	struct hw_perf_event *hwc = &event->hw;
12255 
12256 	if (!is_sampling_event(event))
12257 		return;
12258 
12259 	hrtimer_setup(&hwc->hrtimer, perf_swevent_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
12260 	event->destroy = perf_swevent_destroy_hrtimer;
12261 
12262 	/*
12263 	 * Since hrtimers have a fixed rate, we can do a static freq->period
12264 	 * mapping and avoid the whole period adjust feedback stuff.
12265 	 */
12266 	if (event->attr.freq) {
12267 		long freq = event->attr.sample_freq;
12268 
12269 		event->attr.sample_period = NSEC_PER_SEC / freq;
12270 		hwc->sample_period = event->attr.sample_period;
12271 		local64_set(&hwc->period_left, hwc->sample_period);
12272 		hwc->last_period = hwc->sample_period;
12273 		event->attr.freq = 0;
12274 	}
12275 }
12276 
12277 /*
12278  * Software event: cpu wall time clock
12279  */
12280 
cpu_clock_event_update(struct perf_event * event)12281 static void cpu_clock_event_update(struct perf_event *event)
12282 {
12283 	s64 prev;
12284 	u64 now;
12285 
12286 	now = local_clock();
12287 	prev = local64_xchg(&event->hw.prev_count, now);
12288 	local64_add(now - prev, &event->count);
12289 }
12290 
cpu_clock_event_start(struct perf_event * event,int flags)12291 static void cpu_clock_event_start(struct perf_event *event, int flags)
12292 {
12293 	event->hw.state = 0;
12294 	local64_set(&event->hw.prev_count, local_clock());
12295 	perf_swevent_start_hrtimer(event);
12296 }
12297 
cpu_clock_event_stop(struct perf_event * event,int flags)12298 static void cpu_clock_event_stop(struct perf_event *event, int flags)
12299 {
12300 	event->hw.state = PERF_HES_STOPPED;
12301 	perf_swevent_cancel_hrtimer(event);
12302 	if (flags & PERF_EF_UPDATE)
12303 		cpu_clock_event_update(event);
12304 }
12305 
cpu_clock_event_add(struct perf_event * event,int flags)12306 static int cpu_clock_event_add(struct perf_event *event, int flags)
12307 {
12308 	if (flags & PERF_EF_START)
12309 		cpu_clock_event_start(event, flags);
12310 	perf_event_update_userpage(event);
12311 
12312 	return 0;
12313 }
12314 
cpu_clock_event_del(struct perf_event * event,int flags)12315 static void cpu_clock_event_del(struct perf_event *event, int flags)
12316 {
12317 	cpu_clock_event_stop(event, PERF_EF_UPDATE);
12318 }
12319 
cpu_clock_event_read(struct perf_event * event)12320 static void cpu_clock_event_read(struct perf_event *event)
12321 {
12322 	cpu_clock_event_update(event);
12323 }
12324 
cpu_clock_event_init(struct perf_event * event)12325 static int cpu_clock_event_init(struct perf_event *event)
12326 {
12327 	if (event->attr.type != perf_cpu_clock.type)
12328 		return -ENOENT;
12329 
12330 	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
12331 		return -ENOENT;
12332 
12333 	/*
12334 	 * no branch sampling for software events
12335 	 */
12336 	if (has_branch_stack(event))
12337 		return -EOPNOTSUPP;
12338 
12339 	perf_swevent_init_hrtimer(event);
12340 
12341 	return 0;
12342 }
12343 
12344 static struct pmu perf_cpu_clock = {
12345 	.task_ctx_nr	= perf_sw_context,
12346 
12347 	.capabilities	= PERF_PMU_CAP_NO_NMI,
12348 	.dev		= PMU_NULL_DEV,
12349 
12350 	.event_init	= cpu_clock_event_init,
12351 	.add		= cpu_clock_event_add,
12352 	.del		= cpu_clock_event_del,
12353 	.start		= cpu_clock_event_start,
12354 	.stop		= cpu_clock_event_stop,
12355 	.read		= cpu_clock_event_read,
12356 };
12357 
12358 /*
12359  * Software event: task time clock
12360  */
12361 
task_clock_event_update(struct perf_event * event,u64 now)12362 static void task_clock_event_update(struct perf_event *event, u64 now)
12363 {
12364 	u64 prev;
12365 	s64 delta;
12366 
12367 	prev = local64_xchg(&event->hw.prev_count, now);
12368 	delta = now - prev;
12369 	local64_add(delta, &event->count);
12370 }
12371 
task_clock_event_start(struct perf_event * event,int flags)12372 static void task_clock_event_start(struct perf_event *event, int flags)
12373 {
12374 	event->hw.state = 0;
12375 	local64_set(&event->hw.prev_count, event->ctx->time.time);
12376 	perf_swevent_start_hrtimer(event);
12377 }
12378 
task_clock_event_stop(struct perf_event * event,int flags)12379 static void task_clock_event_stop(struct perf_event *event, int flags)
12380 {
12381 	event->hw.state = PERF_HES_STOPPED;
12382 	perf_swevent_cancel_hrtimer(event);
12383 	if (flags & PERF_EF_UPDATE)
12384 		task_clock_event_update(event, event->ctx->time.time);
12385 }
12386 
task_clock_event_add(struct perf_event * event,int flags)12387 static int task_clock_event_add(struct perf_event *event, int flags)
12388 {
12389 	if (flags & PERF_EF_START)
12390 		task_clock_event_start(event, flags);
12391 	perf_event_update_userpage(event);
12392 
12393 	return 0;
12394 }
12395 
task_clock_event_del(struct perf_event * event,int flags)12396 static void task_clock_event_del(struct perf_event *event, int flags)
12397 {
12398 	task_clock_event_stop(event, PERF_EF_UPDATE);
12399 }
12400 
task_clock_event_read(struct perf_event * event)12401 static void task_clock_event_read(struct perf_event *event)
12402 {
12403 	u64 now = perf_clock();
12404 	u64 delta = now - event->ctx->time.stamp;
12405 	u64 time = event->ctx->time.time + delta;
12406 
12407 	task_clock_event_update(event, time);
12408 }
12409 
task_clock_event_init(struct perf_event * event)12410 static int task_clock_event_init(struct perf_event *event)
12411 {
12412 	if (event->attr.type != perf_task_clock.type)
12413 		return -ENOENT;
12414 
12415 	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
12416 		return -ENOENT;
12417 
12418 	/*
12419 	 * no branch sampling for software events
12420 	 */
12421 	if (has_branch_stack(event))
12422 		return -EOPNOTSUPP;
12423 
12424 	perf_swevent_init_hrtimer(event);
12425 
12426 	return 0;
12427 }
12428 
12429 static struct pmu perf_task_clock = {
12430 	.task_ctx_nr	= perf_sw_context,
12431 
12432 	.capabilities	= PERF_PMU_CAP_NO_NMI,
12433 	.dev		= PMU_NULL_DEV,
12434 
12435 	.event_init	= task_clock_event_init,
12436 	.add		= task_clock_event_add,
12437 	.del		= task_clock_event_del,
12438 	.start		= task_clock_event_start,
12439 	.stop		= task_clock_event_stop,
12440 	.read		= task_clock_event_read,
12441 };
12442 
perf_pmu_nop_void(struct pmu * pmu)12443 static void perf_pmu_nop_void(struct pmu *pmu)
12444 {
12445 }
12446 
perf_pmu_nop_txn(struct pmu * pmu,unsigned int flags)12447 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
12448 {
12449 }
12450 
perf_pmu_nop_int(struct pmu * pmu)12451 static int perf_pmu_nop_int(struct pmu *pmu)
12452 {
12453 	return 0;
12454 }
12455 
perf_event_nop_int(struct perf_event * event,u64 value)12456 static int perf_event_nop_int(struct perf_event *event, u64 value)
12457 {
12458 	return 0;
12459 }
12460 
12461 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
12462 
perf_pmu_start_txn(struct pmu * pmu,unsigned int flags)12463 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
12464 {
12465 	__this_cpu_write(nop_txn_flags, flags);
12466 
12467 	if (flags & ~PERF_PMU_TXN_ADD)
12468 		return;
12469 
12470 	perf_pmu_disable(pmu);
12471 }
12472 
perf_pmu_commit_txn(struct pmu * pmu)12473 static int perf_pmu_commit_txn(struct pmu *pmu)
12474 {
12475 	unsigned int flags = __this_cpu_read(nop_txn_flags);
12476 
12477 	__this_cpu_write(nop_txn_flags, 0);
12478 
12479 	if (flags & ~PERF_PMU_TXN_ADD)
12480 		return 0;
12481 
12482 	perf_pmu_enable(pmu);
12483 	return 0;
12484 }
12485 
perf_pmu_cancel_txn(struct pmu * pmu)12486 static void perf_pmu_cancel_txn(struct pmu *pmu)
12487 {
12488 	unsigned int flags =  __this_cpu_read(nop_txn_flags);
12489 
12490 	__this_cpu_write(nop_txn_flags, 0);
12491 
12492 	if (flags & ~PERF_PMU_TXN_ADD)
12493 		return;
12494 
12495 	perf_pmu_enable(pmu);
12496 }
12497 
perf_event_idx_default(struct perf_event * event)12498 static int perf_event_idx_default(struct perf_event *event)
12499 {
12500 	return 0;
12501 }
12502 
12503 /*
12504  * Let userspace know that this PMU supports address range filtering:
12505  */
nr_addr_filters_show(struct device * dev,struct device_attribute * attr,char * page)12506 static ssize_t nr_addr_filters_show(struct device *dev,
12507 				    struct device_attribute *attr,
12508 				    char *page)
12509 {
12510 	struct pmu *pmu = dev_get_drvdata(dev);
12511 
12512 	return sysfs_emit(page, "%d\n", pmu->nr_addr_filters);
12513 }
12514 DEVICE_ATTR_RO(nr_addr_filters);
12515 
12516 static struct idr pmu_idr;
12517 
12518 static ssize_t
type_show(struct device * dev,struct device_attribute * attr,char * page)12519 type_show(struct device *dev, struct device_attribute *attr, char *page)
12520 {
12521 	struct pmu *pmu = dev_get_drvdata(dev);
12522 
12523 	return sysfs_emit(page, "%d\n", pmu->type);
12524 }
12525 static DEVICE_ATTR_RO(type);
12526 
12527 static ssize_t
perf_event_mux_interval_ms_show(struct device * dev,struct device_attribute * attr,char * page)12528 perf_event_mux_interval_ms_show(struct device *dev,
12529 				struct device_attribute *attr,
12530 				char *page)
12531 {
12532 	struct pmu *pmu = dev_get_drvdata(dev);
12533 
12534 	return sysfs_emit(page, "%d\n", pmu->hrtimer_interval_ms);
12535 }
12536 
12537 static DEFINE_MUTEX(mux_interval_mutex);
12538 
12539 static ssize_t
perf_event_mux_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)12540 perf_event_mux_interval_ms_store(struct device *dev,
12541 				 struct device_attribute *attr,
12542 				 const char *buf, size_t count)
12543 {
12544 	struct pmu *pmu = dev_get_drvdata(dev);
12545 	int timer, cpu, ret;
12546 
12547 	ret = kstrtoint(buf, 0, &timer);
12548 	if (ret)
12549 		return ret;
12550 
12551 	if (timer < 1)
12552 		return -EINVAL;
12553 
12554 	/* same value, noting to do */
12555 	if (timer == pmu->hrtimer_interval_ms)
12556 		return count;
12557 
12558 	mutex_lock(&mux_interval_mutex);
12559 	pmu->hrtimer_interval_ms = timer;
12560 
12561 	/* update all cpuctx for this PMU */
12562 	cpus_read_lock();
12563 	for_each_online_cpu(cpu) {
12564 		struct perf_cpu_pmu_context *cpc;
12565 		cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
12566 		cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
12567 
12568 		cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
12569 	}
12570 	cpus_read_unlock();
12571 	mutex_unlock(&mux_interval_mutex);
12572 
12573 	return count;
12574 }
12575 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
12576 
perf_scope_cpu_topology_cpumask(unsigned int scope,int cpu)12577 static inline const struct cpumask *perf_scope_cpu_topology_cpumask(unsigned int scope, int cpu)
12578 {
12579 	switch (scope) {
12580 	case PERF_PMU_SCOPE_CORE:
12581 		return topology_sibling_cpumask(cpu);
12582 	case PERF_PMU_SCOPE_DIE:
12583 		return topology_die_cpumask(cpu);
12584 	case PERF_PMU_SCOPE_CLUSTER:
12585 		return topology_cluster_cpumask(cpu);
12586 	case PERF_PMU_SCOPE_PKG:
12587 		return topology_core_cpumask(cpu);
12588 	case PERF_PMU_SCOPE_SYS_WIDE:
12589 		return cpu_online_mask;
12590 	}
12591 
12592 	return NULL;
12593 }
12594 
perf_scope_cpumask(unsigned int scope)12595 static inline struct cpumask *perf_scope_cpumask(unsigned int scope)
12596 {
12597 	switch (scope) {
12598 	case PERF_PMU_SCOPE_CORE:
12599 		return perf_online_core_mask;
12600 	case PERF_PMU_SCOPE_DIE:
12601 		return perf_online_die_mask;
12602 	case PERF_PMU_SCOPE_CLUSTER:
12603 		return perf_online_cluster_mask;
12604 	case PERF_PMU_SCOPE_PKG:
12605 		return perf_online_pkg_mask;
12606 	case PERF_PMU_SCOPE_SYS_WIDE:
12607 		return perf_online_sys_mask;
12608 	}
12609 
12610 	return NULL;
12611 }
12612 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)12613 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
12614 			    char *buf)
12615 {
12616 	struct pmu *pmu = dev_get_drvdata(dev);
12617 	struct cpumask *mask = perf_scope_cpumask(pmu->scope);
12618 
12619 	if (mask)
12620 		return cpumap_print_to_pagebuf(true, buf, mask);
12621 	return 0;
12622 }
12623 
12624 static DEVICE_ATTR_RO(cpumask);
12625 
12626 static struct attribute *pmu_dev_attrs[] = {
12627 	&dev_attr_type.attr,
12628 	&dev_attr_perf_event_mux_interval_ms.attr,
12629 	&dev_attr_nr_addr_filters.attr,
12630 	&dev_attr_cpumask.attr,
12631 	NULL,
12632 };
12633 
pmu_dev_is_visible(struct kobject * kobj,struct attribute * a,int n)12634 static umode_t pmu_dev_is_visible(struct kobject *kobj, struct attribute *a, int n)
12635 {
12636 	struct device *dev = kobj_to_dev(kobj);
12637 	struct pmu *pmu = dev_get_drvdata(dev);
12638 
12639 	if (n == 2 && !pmu->nr_addr_filters)
12640 		return 0;
12641 
12642 	/* cpumask */
12643 	if (n == 3 && pmu->scope == PERF_PMU_SCOPE_NONE)
12644 		return 0;
12645 
12646 	return a->mode;
12647 }
12648 
12649 static struct attribute_group pmu_dev_attr_group = {
12650 	.is_visible = pmu_dev_is_visible,
12651 	.attrs = pmu_dev_attrs,
12652 };
12653 
12654 static const struct attribute_group *pmu_dev_groups[] = {
12655 	&pmu_dev_attr_group,
12656 	NULL,
12657 };
12658 
12659 static int pmu_bus_running;
12660 static const struct bus_type pmu_bus = {
12661 	.name		= "event_source",
12662 	.dev_groups	= pmu_dev_groups,
12663 };
12664 
pmu_dev_release(struct device * dev)12665 static void pmu_dev_release(struct device *dev)
12666 {
12667 	kfree(dev);
12668 }
12669 
pmu_dev_alloc(struct pmu * pmu)12670 static int pmu_dev_alloc(struct pmu *pmu)
12671 {
12672 	int ret = -ENOMEM;
12673 
12674 	pmu->dev = kzalloc_obj(struct device);
12675 	if (!pmu->dev)
12676 		goto out;
12677 
12678 	pmu->dev->groups = pmu->attr_groups;
12679 	device_initialize(pmu->dev);
12680 
12681 	dev_set_drvdata(pmu->dev, pmu);
12682 	pmu->dev->bus = &pmu_bus;
12683 	pmu->dev->parent = pmu->parent;
12684 	pmu->dev->release = pmu_dev_release;
12685 
12686 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
12687 	if (ret)
12688 		goto free_dev;
12689 
12690 	ret = device_add(pmu->dev);
12691 	if (ret)
12692 		goto free_dev;
12693 
12694 	if (pmu->attr_update) {
12695 		ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
12696 		if (ret)
12697 			goto del_dev;
12698 	}
12699 
12700 out:
12701 	return ret;
12702 
12703 del_dev:
12704 	device_del(pmu->dev);
12705 
12706 free_dev:
12707 	put_device(pmu->dev);
12708 	pmu->dev = NULL;
12709 	goto out;
12710 }
12711 
12712 static struct lock_class_key cpuctx_mutex;
12713 static struct lock_class_key cpuctx_lock;
12714 
idr_cmpxchg(struct idr * idr,unsigned long id,void * old,void * new)12715 static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)
12716 {
12717 	void *tmp, *val = idr_find(idr, id);
12718 
12719 	if (val != old)
12720 		return false;
12721 
12722 	tmp = idr_replace(idr, new, id);
12723 	if (IS_ERR(tmp))
12724 		return false;
12725 
12726 	WARN_ON_ONCE(tmp != val);
12727 	return true;
12728 }
12729 
perf_pmu_free(struct pmu * pmu)12730 static void perf_pmu_free(struct pmu *pmu)
12731 {
12732 	if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
12733 		if (pmu->nr_addr_filters)
12734 			device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
12735 		device_del(pmu->dev);
12736 		put_device(pmu->dev);
12737 	}
12738 
12739 	if (pmu->cpu_pmu_context) {
12740 		int cpu;
12741 
12742 		for_each_possible_cpu(cpu) {
12743 			struct perf_cpu_pmu_context *cpc;
12744 
12745 			cpc = *per_cpu_ptr(pmu->cpu_pmu_context, cpu);
12746 			if (!cpc)
12747 				continue;
12748 			if (cpc->epc.embedded) {
12749 				/* refcount managed */
12750 				put_pmu_ctx(&cpc->epc);
12751 				continue;
12752 			}
12753 			kfree(cpc);
12754 		}
12755 		free_percpu(pmu->cpu_pmu_context);
12756 	}
12757 }
12758 
DEFINE_FREE(pmu_unregister,struct pmu *,if (_T)perf_pmu_free (_T))12759 DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T))
12760 
12761 int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
12762 {
12763 	int cpu, max = PERF_TYPE_MAX;
12764 
12765 	struct pmu *pmu __free(pmu_unregister) = _pmu;
12766 	guard(mutex)(&pmus_lock);
12767 
12768 	if (WARN_ONCE(!name, "Can not register anonymous pmu.\n"))
12769 		return -EINVAL;
12770 
12771 	if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE,
12772 		      "Can not register a pmu with an invalid scope.\n"))
12773 		return -EINVAL;
12774 
12775 	pmu->name = name;
12776 
12777 	if (type >= 0)
12778 		max = type;
12779 
12780 	CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL);
12781 	if (pmu_type.id < 0)
12782 		return pmu_type.id;
12783 
12784 	WARN_ON(type >= 0 && pmu_type.id != type);
12785 
12786 	pmu->type = pmu_type.id;
12787 	atomic_set(&pmu->exclusive_cnt, 0);
12788 
12789 	if (pmu_bus_running && !pmu->dev) {
12790 		int ret = pmu_dev_alloc(pmu);
12791 		if (ret)
12792 			return ret;
12793 	}
12794 
12795 	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context *);
12796 	if (!pmu->cpu_pmu_context)
12797 		return -ENOMEM;
12798 
12799 	for_each_possible_cpu(cpu) {
12800 		struct perf_cpu_pmu_context *cpc =
12801 			kmalloc_node(sizeof(struct perf_cpu_pmu_context),
12802 				     GFP_KERNEL | __GFP_ZERO,
12803 				     cpu_to_node(cpu));
12804 
12805 		if (!cpc)
12806 			return -ENOMEM;
12807 
12808 		*per_cpu_ptr(pmu->cpu_pmu_context, cpu) = cpc;
12809 		__perf_init_event_pmu_context(&cpc->epc, pmu);
12810 		__perf_mux_hrtimer_init(cpc, cpu);
12811 	}
12812 
12813 	if (!pmu->start_txn) {
12814 		if (pmu->pmu_enable) {
12815 			/*
12816 			 * If we have pmu_enable/pmu_disable calls, install
12817 			 * transaction stubs that use that to try and batch
12818 			 * hardware accesses.
12819 			 */
12820 			pmu->start_txn  = perf_pmu_start_txn;
12821 			pmu->commit_txn = perf_pmu_commit_txn;
12822 			pmu->cancel_txn = perf_pmu_cancel_txn;
12823 		} else {
12824 			pmu->start_txn  = perf_pmu_nop_txn;
12825 			pmu->commit_txn = perf_pmu_nop_int;
12826 			pmu->cancel_txn = perf_pmu_nop_void;
12827 		}
12828 	}
12829 
12830 	if (!pmu->pmu_enable) {
12831 		pmu->pmu_enable  = perf_pmu_nop_void;
12832 		pmu->pmu_disable = perf_pmu_nop_void;
12833 	}
12834 
12835 	if (!pmu->check_period)
12836 		pmu->check_period = perf_event_nop_int;
12837 
12838 	if (!pmu->event_idx)
12839 		pmu->event_idx = perf_event_idx_default;
12840 
12841 	INIT_LIST_HEAD(&pmu->events);
12842 	spin_lock_init(&pmu->events_lock);
12843 
12844 	/*
12845 	 * Now that the PMU is complete, make it visible to perf_try_init_event().
12846 	 */
12847 	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
12848 		return -EINVAL;
12849 	list_add_rcu(&pmu->entry, &pmus);
12850 
12851 	take_idr_id(pmu_type);
12852 	_pmu = no_free_ptr(pmu); // let it rip
12853 	return 0;
12854 }
12855 EXPORT_SYMBOL_GPL(perf_pmu_register);
12856 
__pmu_detach_event(struct pmu * pmu,struct perf_event * event,struct perf_event_context * ctx)12857 static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
12858 			       struct perf_event_context *ctx)
12859 {
12860 	/*
12861 	 * De-schedule the event and mark it REVOKED.
12862 	 */
12863 	perf_event_exit_event(event, ctx, ctx->task, true);
12864 
12865 	/*
12866 	 * All _free_event() bits that rely on event->pmu:
12867 	 *
12868 	 * Notably, perf_mmap() relies on the ordering here.
12869 	 */
12870 	scoped_guard (mutex, &event->mmap_mutex) {
12871 		WARN_ON_ONCE(pmu->event_unmapped);
12872 		/*
12873 		 * Mostly an empty lock sequence, such that perf_mmap(), which
12874 		 * relies on mmap_mutex, is sure to observe the state change.
12875 		 */
12876 	}
12877 
12878 	perf_event_free_bpf_prog(event);
12879 	perf_free_addr_filters(event);
12880 
12881 	if (event->destroy) {
12882 		event->destroy(event);
12883 		event->destroy = NULL;
12884 	}
12885 
12886 	if (event->pmu_ctx) {
12887 		put_pmu_ctx(event->pmu_ctx);
12888 		event->pmu_ctx = NULL;
12889 	}
12890 
12891 	exclusive_event_destroy(event);
12892 	module_put(pmu->module);
12893 
12894 	event->pmu = NULL; /* force fault instead of UAF */
12895 }
12896 
pmu_detach_event(struct pmu * pmu,struct perf_event * event)12897 static void pmu_detach_event(struct pmu *pmu, struct perf_event *event)
12898 {
12899 	struct perf_event_context *ctx;
12900 
12901 	ctx = perf_event_ctx_lock(event);
12902 	__pmu_detach_event(pmu, event, ctx);
12903 	perf_event_ctx_unlock(event, ctx);
12904 
12905 	scoped_guard (spinlock, &pmu->events_lock)
12906 		list_del(&event->pmu_list);
12907 }
12908 
pmu_get_event(struct pmu * pmu)12909 static struct perf_event *pmu_get_event(struct pmu *pmu)
12910 {
12911 	struct perf_event *event;
12912 
12913 	guard(spinlock)(&pmu->events_lock);
12914 	list_for_each_entry(event, &pmu->events, pmu_list) {
12915 		if (atomic_long_inc_not_zero(&event->refcount))
12916 			return event;
12917 	}
12918 
12919 	return NULL;
12920 }
12921 
pmu_empty(struct pmu * pmu)12922 static bool pmu_empty(struct pmu *pmu)
12923 {
12924 	guard(spinlock)(&pmu->events_lock);
12925 	return list_empty(&pmu->events);
12926 }
12927 
pmu_detach_events(struct pmu * pmu)12928 static void pmu_detach_events(struct pmu *pmu)
12929 {
12930 	struct perf_event *event;
12931 
12932 	for (;;) {
12933 		event = pmu_get_event(pmu);
12934 		if (!event)
12935 			break;
12936 
12937 		pmu_detach_event(pmu, event);
12938 		put_event(event);
12939 	}
12940 
12941 	/*
12942 	 * wait for pending _free_event()s
12943 	 */
12944 	wait_var_event(pmu, pmu_empty(pmu));
12945 }
12946 
perf_pmu_unregister(struct pmu * pmu)12947 int perf_pmu_unregister(struct pmu *pmu)
12948 {
12949 	scoped_guard (mutex, &pmus_lock) {
12950 		if (!idr_cmpxchg(&pmu_idr, pmu->type, pmu, NULL))
12951 			return -EINVAL;
12952 
12953 		list_del_rcu(&pmu->entry);
12954 	}
12955 
12956 	/*
12957 	 * We dereference the pmu list under both SRCU and regular RCU, so
12958 	 * synchronize against both of those.
12959 	 *
12960 	 * Notably, the entirety of event creation, from perf_init_event()
12961 	 * (which will now fail, because of the above) until
12962 	 * perf_install_in_context() should be under SRCU such that
12963 	 * this synchronizes against event creation. This avoids trying to
12964 	 * detach events that are not fully formed.
12965 	 */
12966 	synchronize_srcu(&pmus_srcu);
12967 	synchronize_rcu();
12968 
12969 	if (pmu->event_unmapped && !pmu_empty(pmu)) {
12970 		/*
12971 		 * Can't force remove events when pmu::event_unmapped()
12972 		 * is used in perf_mmap_close().
12973 		 */
12974 		guard(mutex)(&pmus_lock);
12975 		idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu);
12976 		list_add_rcu(&pmu->entry, &pmus);
12977 		return -EBUSY;
12978 	}
12979 
12980 	scoped_guard (mutex, &pmus_lock)
12981 		idr_remove(&pmu_idr, pmu->type);
12982 
12983 	/*
12984 	 * PMU is removed from the pmus list, so no new events will
12985 	 * be created, now take care of the existing ones.
12986 	 */
12987 	pmu_detach_events(pmu);
12988 
12989 	/*
12990 	 * PMU is unused, make it go away.
12991 	 */
12992 	perf_pmu_free(pmu);
12993 	return 0;
12994 }
12995 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
12996 
has_extended_regs(struct perf_event * event)12997 static inline bool has_extended_regs(struct perf_event *event)
12998 {
12999 	return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
13000 	       (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
13001 }
13002 
perf_try_init_event(struct pmu * pmu,struct perf_event * event)13003 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
13004 {
13005 	struct perf_event_context *ctx = NULL;
13006 	int ret;
13007 
13008 	if (!try_module_get(pmu->module))
13009 		return -ENODEV;
13010 
13011 	/*
13012 	 * A number of pmu->event_init() methods iterate the sibling_list to,
13013 	 * for example, validate if the group fits on the PMU. Therefore,
13014 	 * if this is a sibling event, acquire the ctx->mutex to protect
13015 	 * the sibling_list.
13016 	 */
13017 	if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
13018 		/*
13019 		 * This ctx->mutex can nest when we're called through
13020 		 * inheritance. See the perf_event_ctx_lock_nested() comment.
13021 		 */
13022 		ctx = perf_event_ctx_lock_nested(event->group_leader,
13023 						 SINGLE_DEPTH_NESTING);
13024 		BUG_ON(!ctx);
13025 	}
13026 
13027 	event->pmu = pmu;
13028 	ret = pmu->event_init(event);
13029 
13030 	if (ctx)
13031 		perf_event_ctx_unlock(event->group_leader, ctx);
13032 
13033 	if (ret)
13034 		goto err_pmu;
13035 
13036 	if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
13037 	    has_extended_regs(event)) {
13038 		ret = -EOPNOTSUPP;
13039 		goto err_destroy;
13040 	}
13041 
13042 	if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
13043 	    event_has_any_exclude_flag(event)) {
13044 		ret = -EINVAL;
13045 		goto err_destroy;
13046 	}
13047 
13048 	if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) {
13049 		const struct cpumask *cpumask;
13050 		struct cpumask *pmu_cpumask;
13051 		int cpu;
13052 
13053 		cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu);
13054 		pmu_cpumask = perf_scope_cpumask(pmu->scope);
13055 
13056 		ret = -ENODEV;
13057 		if (!pmu_cpumask || !cpumask)
13058 			goto err_destroy;
13059 
13060 		cpu = cpumask_any_and(pmu_cpumask, cpumask);
13061 		if (cpu >= nr_cpu_ids)
13062 			goto err_destroy;
13063 
13064 		event->event_caps |= PERF_EV_CAP_READ_SCOPE;
13065 	}
13066 
13067 	return 0;
13068 
13069 err_destroy:
13070 	if (event->destroy) {
13071 		event->destroy(event);
13072 		event->destroy = NULL;
13073 	}
13074 
13075 err_pmu:
13076 	event->pmu = NULL;
13077 	module_put(pmu->module);
13078 	return ret;
13079 }
13080 
perf_init_event(struct perf_event * event)13081 static struct pmu *perf_init_event(struct perf_event *event)
13082 {
13083 	bool extended_type = false;
13084 	struct pmu *pmu;
13085 	int type, ret;
13086 
13087 	guard(srcu)(&pmus_srcu); /* pmu idr/list access */
13088 
13089 	/*
13090 	 * Save original type before calling pmu->event_init() since certain
13091 	 * pmus overwrites event->attr.type to forward event to another pmu.
13092 	 */
13093 	event->orig_type = event->attr.type;
13094 
13095 	/* Try parent's PMU first: */
13096 	if (event->parent && event->parent->pmu) {
13097 		pmu = event->parent->pmu;
13098 		ret = perf_try_init_event(pmu, event);
13099 		if (!ret)
13100 			return pmu;
13101 	}
13102 
13103 	/*
13104 	 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
13105 	 * are often aliases for PERF_TYPE_RAW.
13106 	 */
13107 	type = event->attr.type;
13108 	if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) {
13109 		type = event->attr.config >> PERF_PMU_TYPE_SHIFT;
13110 		if (!type) {
13111 			type = PERF_TYPE_RAW;
13112 		} else {
13113 			extended_type = true;
13114 			event->attr.config &= PERF_HW_EVENT_MASK;
13115 		}
13116 	}
13117 
13118 again:
13119 	scoped_guard (rcu)
13120 		pmu = idr_find(&pmu_idr, type);
13121 	if (pmu) {
13122 		if (event->attr.type != type && type != PERF_TYPE_RAW &&
13123 		    !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE))
13124 			return ERR_PTR(-ENOENT);
13125 
13126 		ret = perf_try_init_event(pmu, event);
13127 		if (ret == -ENOENT && event->attr.type != type && !extended_type) {
13128 			type = event->attr.type;
13129 			goto again;
13130 		}
13131 
13132 		if (ret)
13133 			return ERR_PTR(ret);
13134 
13135 		return pmu;
13136 	}
13137 
13138 	list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
13139 		ret = perf_try_init_event(pmu, event);
13140 		if (!ret)
13141 			return pmu;
13142 
13143 		if (ret != -ENOENT)
13144 			return ERR_PTR(ret);
13145 	}
13146 
13147 	return ERR_PTR(-ENOENT);
13148 }
13149 
attach_sb_event(struct perf_event * event)13150 static void attach_sb_event(struct perf_event *event)
13151 {
13152 	struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
13153 
13154 	raw_spin_lock(&pel->lock);
13155 	list_add_rcu(&event->sb_list, &pel->list);
13156 	raw_spin_unlock(&pel->lock);
13157 }
13158 
13159 /*
13160  * We keep a list of all !task (and therefore per-cpu) events
13161  * that need to receive side-band records.
13162  *
13163  * This avoids having to scan all the various PMU per-cpu contexts
13164  * looking for them.
13165  */
account_pmu_sb_event(struct perf_event * event)13166 static void account_pmu_sb_event(struct perf_event *event)
13167 {
13168 	if (is_sb_event(event))
13169 		attach_sb_event(event);
13170 }
13171 
13172 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
account_freq_event_nohz(void)13173 static void account_freq_event_nohz(void)
13174 {
13175 #ifdef CONFIG_NO_HZ_FULL
13176 	/* Lock so we don't race with concurrent unaccount */
13177 	spin_lock(&nr_freq_lock);
13178 	if (atomic_inc_return(&nr_freq_events) == 1)
13179 		tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
13180 	spin_unlock(&nr_freq_lock);
13181 #endif
13182 }
13183 
account_freq_event(void)13184 static void account_freq_event(void)
13185 {
13186 	if (tick_nohz_full_enabled())
13187 		account_freq_event_nohz();
13188 	else
13189 		atomic_inc(&nr_freq_events);
13190 }
13191 
13192 
account_event(struct perf_event * event)13193 static void account_event(struct perf_event *event)
13194 {
13195 	bool inc = false;
13196 
13197 	if (event->parent)
13198 		return;
13199 
13200 	if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
13201 		inc = true;
13202 	if (event->attr.mmap || event->attr.mmap_data)
13203 		atomic_inc(&nr_mmap_events);
13204 	if (event->attr.build_id)
13205 		atomic_inc(&nr_build_id_events);
13206 	if (event->attr.comm)
13207 		atomic_inc(&nr_comm_events);
13208 	if (event->attr.namespaces)
13209 		atomic_inc(&nr_namespaces_events);
13210 	if (event->attr.cgroup)
13211 		atomic_inc(&nr_cgroup_events);
13212 	if (event->attr.task)
13213 		atomic_inc(&nr_task_events);
13214 	if (event->attr.freq)
13215 		account_freq_event();
13216 	if (event->attr.context_switch) {
13217 		atomic_inc(&nr_switch_events);
13218 		inc = true;
13219 	}
13220 	if (has_branch_stack(event))
13221 		inc = true;
13222 	if (is_cgroup_event(event))
13223 		inc = true;
13224 	if (event->attr.ksymbol)
13225 		atomic_inc(&nr_ksymbol_events);
13226 	if (event->attr.bpf_event)
13227 		atomic_inc(&nr_bpf_events);
13228 	if (event->attr.text_poke)
13229 		atomic_inc(&nr_text_poke_events);
13230 
13231 	if (inc) {
13232 		/*
13233 		 * We need the mutex here because static_branch_enable()
13234 		 * must complete *before* the perf_sched_count increment
13235 		 * becomes visible.
13236 		 */
13237 		if (atomic_inc_not_zero(&perf_sched_count))
13238 			goto enabled;
13239 
13240 		mutex_lock(&perf_sched_mutex);
13241 		if (!atomic_read(&perf_sched_count)) {
13242 			static_branch_enable(&perf_sched_events);
13243 			/*
13244 			 * Guarantee that all CPUs observe they key change and
13245 			 * call the perf scheduling hooks before proceeding to
13246 			 * install events that need them.
13247 			 */
13248 			synchronize_rcu();
13249 		}
13250 		/*
13251 		 * Now that we have waited for the sync_sched(), allow further
13252 		 * increments to by-pass the mutex.
13253 		 */
13254 		atomic_inc(&perf_sched_count);
13255 		mutex_unlock(&perf_sched_mutex);
13256 	}
13257 enabled:
13258 
13259 	account_pmu_sb_event(event);
13260 }
13261 
13262 /*
13263  * Allocate and initialize an event structure
13264  */
13265 static struct perf_event *
perf_event_alloc(struct perf_event_attr * attr,int cpu,struct task_struct * task,struct perf_event * group_leader,struct perf_event * parent_event,perf_overflow_handler_t overflow_handler,void * context,int cgroup_fd)13266 perf_event_alloc(struct perf_event_attr *attr, int cpu,
13267 		 struct task_struct *task,
13268 		 struct perf_event *group_leader,
13269 		 struct perf_event *parent_event,
13270 		 perf_overflow_handler_t overflow_handler,
13271 		 void *context, int cgroup_fd)
13272 {
13273 	struct pmu *pmu;
13274 	struct hw_perf_event *hwc;
13275 	long err = -EINVAL;
13276 	int node;
13277 
13278 	if ((unsigned)cpu >= nr_cpu_ids) {
13279 		if (!task || cpu != -1)
13280 			return ERR_PTR(-EINVAL);
13281 	}
13282 	if (attr->sigtrap && !task) {
13283 		/* Requires a task: avoid signalling random tasks. */
13284 		return ERR_PTR(-EINVAL);
13285 	}
13286 
13287 	node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
13288 	struct perf_event *event __free(__free_event) =
13289 		kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
13290 	if (!event)
13291 		return ERR_PTR(-ENOMEM);
13292 
13293 	/*
13294 	 * Single events are their own group leaders, with an
13295 	 * empty sibling list:
13296 	 */
13297 	if (!group_leader)
13298 		group_leader = event;
13299 
13300 	mutex_init(&event->child_mutex);
13301 	INIT_LIST_HEAD(&event->child_list);
13302 
13303 	INIT_LIST_HEAD(&event->event_entry);
13304 	INIT_LIST_HEAD(&event->sibling_list);
13305 	INIT_LIST_HEAD(&event->active_list);
13306 	init_event_group(event);
13307 	INIT_LIST_HEAD(&event->rb_entry);
13308 	INIT_LIST_HEAD(&event->active_entry);
13309 	INIT_LIST_HEAD(&event->addr_filters.list);
13310 	INIT_HLIST_NODE(&event->hlist_entry);
13311 	INIT_LIST_HEAD(&event->pmu_list);
13312 
13313 
13314 	init_waitqueue_head(&event->waitq);
13315 	init_irq_work(&event->pending_irq, perf_pending_irq);
13316 	event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
13317 	init_task_work(&event->pending_task, perf_pending_task);
13318 
13319 	mutex_init(&event->mmap_mutex);
13320 	raw_spin_lock_init(&event->addr_filters.lock);
13321 
13322 	atomic_long_set(&event->refcount, 1);
13323 	event->cpu		= cpu;
13324 	event->attr		= *attr;
13325 	event->group_leader	= group_leader;
13326 	event->pmu		= NULL;
13327 	event->oncpu		= -1;
13328 
13329 	event->parent		= parent_event;
13330 
13331 	event->ns		= get_pid_ns(task_active_pid_ns(current));
13332 	event->id		= atomic64_inc_return(&perf_event_id);
13333 
13334 	event->state		= PERF_EVENT_STATE_INACTIVE;
13335 
13336 	if (parent_event)
13337 		event->event_caps = parent_event->event_caps;
13338 
13339 	if (task) {
13340 		event->attach_state = PERF_ATTACH_TASK;
13341 		/*
13342 		 * XXX pmu::event_init needs to know what task to account to
13343 		 * and we cannot use the ctx information because we need the
13344 		 * pmu before we get a ctx.
13345 		 */
13346 		event->hw.target = get_task_struct(task);
13347 	}
13348 
13349 	event->clock = &local_clock;
13350 	if (parent_event)
13351 		event->clock = parent_event->clock;
13352 
13353 	if (!overflow_handler && parent_event) {
13354 		overflow_handler = parent_event->overflow_handler;
13355 		context = parent_event->overflow_handler_context;
13356 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
13357 		if (parent_event->prog) {
13358 			struct bpf_prog *prog = parent_event->prog;
13359 
13360 			bpf_prog_inc(prog);
13361 			event->prog = prog;
13362 		}
13363 #endif
13364 	}
13365 
13366 	if (overflow_handler) {
13367 		event->overflow_handler	= overflow_handler;
13368 		event->overflow_handler_context = context;
13369 	} else if (is_write_backward(event)){
13370 		event->overflow_handler = perf_event_output_backward;
13371 		event->overflow_handler_context = NULL;
13372 	} else {
13373 		event->overflow_handler = perf_event_output_forward;
13374 		event->overflow_handler_context = NULL;
13375 	}
13376 
13377 	perf_event__state_init(event);
13378 
13379 	pmu = NULL;
13380 
13381 	hwc = &event->hw;
13382 	hwc->sample_period = attr->sample_period;
13383 	if (is_event_in_freq_mode(event))
13384 		hwc->sample_period = 1;
13385 	hwc->last_period = hwc->sample_period;
13386 
13387 	local64_set(&hwc->period_left, hwc->sample_period);
13388 
13389 	/*
13390 	 * We do not support PERF_SAMPLE_READ on inherited events unless
13391 	 * PERF_SAMPLE_TID is also selected, which allows inherited events to
13392 	 * collect per-thread samples.
13393 	 * See perf_output_read().
13394 	 */
13395 	if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
13396 		return ERR_PTR(-EINVAL);
13397 
13398 	if (!has_branch_stack(event))
13399 		event->attr.branch_sample_type = 0;
13400 
13401 	pmu = perf_init_event(event);
13402 	if (IS_ERR(pmu))
13403 		return (void*)pmu;
13404 
13405 	/*
13406 	 * The PERF_ATTACH_TASK_DATA is set in the event_init()->hw_config().
13407 	 * The attach should be right after the perf_init_event().
13408 	 * Otherwise, the __free_event() would mistakenly detach the non-exist
13409 	 * perf_ctx_data because of the other errors between them.
13410 	 */
13411 	if (event->attach_state & PERF_ATTACH_TASK_DATA) {
13412 		err = attach_perf_ctx_data(event);
13413 		if (err)
13414 			return ERR_PTR(err);
13415 	}
13416 
13417 	/*
13418 	 * Disallow uncore-task events. Similarly, disallow uncore-cgroup
13419 	 * events (they don't make sense as the cgroup will be different
13420 	 * on other CPUs in the uncore mask).
13421 	 */
13422 	if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1))
13423 		return ERR_PTR(-EINVAL);
13424 
13425 	if (event->attr.aux_output &&
13426 	    (!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
13427 	     event->attr.aux_pause || event->attr.aux_resume))
13428 		return ERR_PTR(-EOPNOTSUPP);
13429 
13430 	if (event->attr.aux_pause && event->attr.aux_resume)
13431 		return ERR_PTR(-EINVAL);
13432 
13433 	if (event->attr.aux_start_paused) {
13434 		if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
13435 			return ERR_PTR(-EOPNOTSUPP);
13436 		event->hw.aux_paused = 1;
13437 	}
13438 
13439 	if (cgroup_fd != -1) {
13440 		err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
13441 		if (err)
13442 			return ERR_PTR(err);
13443 	}
13444 
13445 	err = exclusive_event_init(event);
13446 	if (err)
13447 		return ERR_PTR(err);
13448 
13449 	if (has_addr_filter(event)) {
13450 		event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
13451 						    sizeof(struct perf_addr_filter_range),
13452 						    GFP_KERNEL);
13453 		if (!event->addr_filter_ranges)
13454 			return ERR_PTR(-ENOMEM);
13455 
13456 		/*
13457 		 * Clone the parent's vma offsets: they are valid until exec()
13458 		 * even if the mm is not shared with the parent.
13459 		 */
13460 		if (event->parent) {
13461 			struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
13462 
13463 			raw_spin_lock_irq(&ifh->lock);
13464 			memcpy(event->addr_filter_ranges,
13465 			       event->parent->addr_filter_ranges,
13466 			       pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
13467 			raw_spin_unlock_irq(&ifh->lock);
13468 		}
13469 
13470 		/* force hw sync on the address filters */
13471 		event->addr_filters_gen = 1;
13472 	}
13473 
13474 	if (!event->parent) {
13475 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
13476 			err = get_callchain_buffers(attr->sample_max_stack);
13477 			if (err)
13478 				return ERR_PTR(err);
13479 			event->attach_state |= PERF_ATTACH_CALLCHAIN;
13480 		}
13481 	}
13482 
13483 	err = security_perf_event_alloc(event);
13484 	if (err)
13485 		return ERR_PTR(err);
13486 
13487 	err = mediated_pmu_account_event(event);
13488 	if (err)
13489 		return ERR_PTR(err);
13490 
13491 	/* symmetric to unaccount_event() in _free_event() */
13492 	account_event(event);
13493 
13494 	/*
13495 	 * Event creation should be under SRCU, see perf_pmu_unregister().
13496 	 */
13497 	lockdep_assert_held(&pmus_srcu);
13498 	scoped_guard (spinlock, &pmu->events_lock)
13499 		list_add(&event->pmu_list, &pmu->events);
13500 
13501 	return_ptr(event);
13502 }
13503 
perf_copy_attr(struct perf_event_attr __user * uattr,struct perf_event_attr * attr)13504 static int perf_copy_attr(struct perf_event_attr __user *uattr,
13505 			  struct perf_event_attr *attr)
13506 {
13507 	u32 size;
13508 	int ret;
13509 
13510 	/* Zero the full structure, so that a short copy will be nice. */
13511 	memset(attr, 0, sizeof(*attr));
13512 
13513 	ret = get_user(size, &uattr->size);
13514 	if (ret)
13515 		return ret;
13516 
13517 	/* ABI compatibility quirk: */
13518 	if (!size)
13519 		size = PERF_ATTR_SIZE_VER0;
13520 	if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
13521 		goto err_size;
13522 
13523 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
13524 	if (ret) {
13525 		if (ret == -E2BIG)
13526 			goto err_size;
13527 		return ret;
13528 	}
13529 
13530 	attr->size = size;
13531 
13532 	if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
13533 		return -EINVAL;
13534 
13535 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
13536 		return -EINVAL;
13537 
13538 	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
13539 		return -EINVAL;
13540 
13541 	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
13542 		u64 mask = attr->branch_sample_type;
13543 
13544 		/* only using defined bits */
13545 		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
13546 			return -EINVAL;
13547 
13548 		/* at least one branch bit must be set */
13549 		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
13550 			return -EINVAL;
13551 
13552 		/* propagate priv level, when not set for branch */
13553 		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
13554 
13555 			/* exclude_kernel checked on syscall entry */
13556 			if (!attr->exclude_kernel)
13557 				mask |= PERF_SAMPLE_BRANCH_KERNEL;
13558 
13559 			if (!attr->exclude_user)
13560 				mask |= PERF_SAMPLE_BRANCH_USER;
13561 
13562 			if (!attr->exclude_hv)
13563 				mask |= PERF_SAMPLE_BRANCH_HV;
13564 			/*
13565 			 * adjust user setting (for HW filter setup)
13566 			 */
13567 			attr->branch_sample_type = mask;
13568 		}
13569 		/* privileged levels capture (kernel, hv): check permissions */
13570 		if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
13571 			ret = perf_allow_kernel();
13572 			if (ret)
13573 				return ret;
13574 		}
13575 	}
13576 
13577 	if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
13578 		ret = perf_reg_validate(attr->sample_regs_user);
13579 		if (ret)
13580 			return ret;
13581 	}
13582 
13583 	if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
13584 		if (!arch_perf_have_user_stack_dump())
13585 			return -ENOSYS;
13586 
13587 		/*
13588 		 * We have __u32 type for the size, but so far
13589 		 * we can only use __u16 as maximum due to the
13590 		 * __u16 sample size limit.
13591 		 */
13592 		if (attr->sample_stack_user >= USHRT_MAX)
13593 			return -EINVAL;
13594 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
13595 			return -EINVAL;
13596 	}
13597 
13598 	if (!attr->sample_max_stack)
13599 		attr->sample_max_stack = sysctl_perf_event_max_stack;
13600 
13601 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
13602 		ret = perf_reg_validate(attr->sample_regs_intr);
13603 
13604 #ifndef CONFIG_CGROUP_PERF
13605 	if (attr->sample_type & PERF_SAMPLE_CGROUP)
13606 		return -EINVAL;
13607 #endif
13608 	if ((attr->sample_type & PERF_SAMPLE_WEIGHT) &&
13609 	    (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT))
13610 		return -EINVAL;
13611 
13612 	if (!attr->inherit && attr->inherit_thread)
13613 		return -EINVAL;
13614 
13615 	if (attr->remove_on_exec && attr->enable_on_exec)
13616 		return -EINVAL;
13617 
13618 	if (attr->sigtrap && !attr->remove_on_exec)
13619 		return -EINVAL;
13620 
13621 out:
13622 	return ret;
13623 
13624 err_size:
13625 	put_user(sizeof(*attr), &uattr->size);
13626 	ret = -E2BIG;
13627 	goto out;
13628 }
13629 
mutex_lock_double(struct mutex * a,struct mutex * b)13630 static void mutex_lock_double(struct mutex *a, struct mutex *b)
13631 {
13632 	if (b < a)
13633 		swap(a, b);
13634 
13635 	mutex_lock(a);
13636 	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
13637 }
13638 
13639 static int
perf_event_set_output(struct perf_event * event,struct perf_event * output_event)13640 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
13641 {
13642 	struct perf_buffer *rb = NULL;
13643 	int ret = -EINVAL;
13644 
13645 	if (!output_event) {
13646 		mutex_lock(&event->mmap_mutex);
13647 		goto set;
13648 	}
13649 
13650 	/* don't allow circular references */
13651 	if (event == output_event)
13652 		goto out;
13653 
13654 	/*
13655 	 * Don't allow cross-cpu buffers
13656 	 */
13657 	if (output_event->cpu != event->cpu)
13658 		goto out;
13659 
13660 	/*
13661 	 * If its not a per-cpu rb, it must be the same task.
13662 	 */
13663 	if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
13664 		goto out;
13665 
13666 	/*
13667 	 * Mixing clocks in the same buffer is trouble you don't need.
13668 	 */
13669 	if (output_event->clock != event->clock)
13670 		goto out;
13671 
13672 	/*
13673 	 * Either writing ring buffer from beginning or from end.
13674 	 * Mixing is not allowed.
13675 	 */
13676 	if (is_write_backward(output_event) != is_write_backward(event))
13677 		goto out;
13678 
13679 	/*
13680 	 * If both events generate aux data, they must be on the same PMU
13681 	 */
13682 	if (has_aux(event) && has_aux(output_event) &&
13683 	    event->pmu != output_event->pmu)
13684 		goto out;
13685 
13686 	/*
13687 	 * Hold both mmap_mutex to serialize against perf_mmap_close().  Since
13688 	 * output_event is already on rb->event_list, and the list iteration
13689 	 * restarts after every removal, it is guaranteed this new event is
13690 	 * observed *OR* if output_event is already removed, it's guaranteed we
13691 	 * observe !rb->mmap_count.
13692 	 */
13693 	mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
13694 set:
13695 	/* Can't redirect output if we've got an active mmap() */
13696 	if (refcount_read(&event->mmap_count))
13697 		goto unlock;
13698 
13699 	if (output_event) {
13700 		if (output_event->state <= PERF_EVENT_STATE_REVOKED)
13701 			goto unlock;
13702 
13703 		/* get the rb we want to redirect to */
13704 		rb = ring_buffer_get(output_event);
13705 		if (!rb)
13706 			goto unlock;
13707 
13708 		/* did we race against perf_mmap_close() */
13709 		if (!refcount_read(&rb->mmap_count)) {
13710 			ring_buffer_put(rb);
13711 			goto unlock;
13712 		}
13713 	}
13714 
13715 	ring_buffer_attach(event, rb);
13716 
13717 	ret = 0;
13718 unlock:
13719 	mutex_unlock(&event->mmap_mutex);
13720 	if (output_event)
13721 		mutex_unlock(&output_event->mmap_mutex);
13722 
13723 out:
13724 	return ret;
13725 }
13726 
perf_event_set_clock(struct perf_event * event,clockid_t clk_id)13727 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
13728 {
13729 	bool nmi_safe = false;
13730 
13731 	switch (clk_id) {
13732 	case CLOCK_MONOTONIC:
13733 		event->clock = &ktime_get_mono_fast_ns;
13734 		nmi_safe = true;
13735 		break;
13736 
13737 	case CLOCK_MONOTONIC_RAW:
13738 		event->clock = &ktime_get_raw_fast_ns;
13739 		nmi_safe = true;
13740 		break;
13741 
13742 	case CLOCK_REALTIME:
13743 		event->clock = &ktime_get_real_ns;
13744 		break;
13745 
13746 	case CLOCK_BOOTTIME:
13747 		event->clock = &ktime_get_boottime_ns;
13748 		break;
13749 
13750 	case CLOCK_TAI:
13751 		event->clock = &ktime_get_clocktai_ns;
13752 		break;
13753 
13754 	default:
13755 		return -EINVAL;
13756 	}
13757 
13758 	if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
13759 		return -EINVAL;
13760 
13761 	return 0;
13762 }
13763 
13764 static bool
perf_check_permission(struct perf_event_attr * attr,struct task_struct * task)13765 perf_check_permission(struct perf_event_attr *attr, struct task_struct *task)
13766 {
13767 	unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS;
13768 	bool is_capable = perfmon_capable();
13769 
13770 	if (attr->sigtrap) {
13771 		/*
13772 		 * perf_event_attr::sigtrap sends signals to the other task.
13773 		 * Require the current task to also have CAP_KILL.
13774 		 */
13775 		rcu_read_lock();
13776 		is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL);
13777 		rcu_read_unlock();
13778 
13779 		/*
13780 		 * If the required capabilities aren't available, checks for
13781 		 * ptrace permissions: upgrade to ATTACH, since sending signals
13782 		 * can effectively change the target task.
13783 		 */
13784 		ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS;
13785 	}
13786 
13787 	/*
13788 	 * Preserve ptrace permission check for backwards compatibility. The
13789 	 * ptrace check also includes checks that the current task and other
13790 	 * task have matching uids, and is therefore not done here explicitly.
13791 	 */
13792 	return is_capable || ptrace_may_access(task, ptrace_mode);
13793 }
13794 
13795 /**
13796  * sys_perf_event_open - open a performance event, associate it to a task/cpu
13797  *
13798  * @attr_uptr:	event_id type attributes for monitoring/sampling
13799  * @pid:		target pid
13800  * @cpu:		target cpu
13801  * @group_fd:		group leader event fd
13802  * @flags:		perf event open flags
13803  */
SYSCALL_DEFINE5(perf_event_open,struct perf_event_attr __user *,attr_uptr,pid_t,pid,int,cpu,int,group_fd,unsigned long,flags)13804 SYSCALL_DEFINE5(perf_event_open,
13805 		struct perf_event_attr __user *, attr_uptr,
13806 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
13807 {
13808 	struct perf_event *group_leader = NULL, *output_event = NULL;
13809 	struct perf_event_pmu_context *pmu_ctx;
13810 	struct perf_event *event, *sibling;
13811 	struct perf_event_attr attr;
13812 	struct perf_event_context *ctx;
13813 	struct file *event_file = NULL;
13814 	struct task_struct *task = NULL;
13815 	struct pmu *pmu;
13816 	int event_fd;
13817 	int move_group = 0;
13818 	int err;
13819 	int f_flags = O_RDWR;
13820 	int cgroup_fd = -1;
13821 
13822 	/* for future expandability... */
13823 	if (flags & ~PERF_FLAG_ALL)
13824 		return -EINVAL;
13825 
13826 	err = perf_copy_attr(attr_uptr, &attr);
13827 	if (err)
13828 		return err;
13829 
13830 	/* Do we allow access to perf_event_open(2) ? */
13831 	err = security_perf_event_open(PERF_SECURITY_OPEN);
13832 	if (err)
13833 		return err;
13834 
13835 	if (!attr.exclude_kernel) {
13836 		err = perf_allow_kernel();
13837 		if (err)
13838 			return err;
13839 	}
13840 
13841 	if (attr.namespaces) {
13842 		if (!perfmon_capable())
13843 			return -EACCES;
13844 	}
13845 
13846 	if (attr.freq) {
13847 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
13848 			return -EINVAL;
13849 	} else {
13850 		if (attr.sample_period & (1ULL << 63))
13851 			return -EINVAL;
13852 	}
13853 
13854 	/* Only privileged users can get physical addresses */
13855 	if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
13856 		err = perf_allow_kernel();
13857 		if (err)
13858 			return err;
13859 	}
13860 
13861 	/* REGS_INTR can leak data, lockdown must prevent this */
13862 	if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
13863 		err = security_locked_down(LOCKDOWN_PERF);
13864 		if (err)
13865 			return err;
13866 	}
13867 
13868 	/*
13869 	 * In cgroup mode, the pid argument is used to pass the fd
13870 	 * opened to the cgroup directory in cgroupfs. The cpu argument
13871 	 * designates the cpu on which to monitor threads from that
13872 	 * cgroup.
13873 	 */
13874 	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
13875 		return -EINVAL;
13876 
13877 	if (flags & PERF_FLAG_FD_CLOEXEC)
13878 		f_flags |= O_CLOEXEC;
13879 
13880 	event_fd = get_unused_fd_flags(f_flags);
13881 	if (event_fd < 0)
13882 		return event_fd;
13883 
13884 	/*
13885 	 * Event creation should be under SRCU, see perf_pmu_unregister().
13886 	 */
13887 	guard(srcu)(&pmus_srcu);
13888 
13889 	CLASS(fd, group)(group_fd);     // group_fd == -1 => empty
13890 	if (group_fd != -1) {
13891 		if (!is_perf_file(group)) {
13892 			err = -EBADF;
13893 			goto err_fd;
13894 		}
13895 		group_leader = fd_file(group)->private_data;
13896 		if (group_leader->state <= PERF_EVENT_STATE_REVOKED) {
13897 			err = -ENODEV;
13898 			goto err_fd;
13899 		}
13900 		if (flags & PERF_FLAG_FD_OUTPUT)
13901 			output_event = group_leader;
13902 		if (flags & PERF_FLAG_FD_NO_GROUP)
13903 			group_leader = NULL;
13904 	}
13905 
13906 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
13907 		task = find_lively_task_by_vpid(pid);
13908 		if (IS_ERR(task)) {
13909 			err = PTR_ERR(task);
13910 			goto err_fd;
13911 		}
13912 	}
13913 
13914 	if (task && group_leader &&
13915 	    group_leader->attr.inherit != attr.inherit) {
13916 		err = -EINVAL;
13917 		goto err_task;
13918 	}
13919 
13920 	if (flags & PERF_FLAG_PID_CGROUP)
13921 		cgroup_fd = pid;
13922 
13923 	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
13924 				 NULL, NULL, cgroup_fd);
13925 	if (IS_ERR(event)) {
13926 		err = PTR_ERR(event);
13927 		goto err_task;
13928 	}
13929 
13930 	if (is_sampling_event(event)) {
13931 		if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
13932 			err = -EOPNOTSUPP;
13933 			goto err_alloc;
13934 		}
13935 	}
13936 
13937 	/*
13938 	 * Special case software events and allow them to be part of
13939 	 * any hardware group.
13940 	 */
13941 	pmu = event->pmu;
13942 
13943 	if (attr.use_clockid) {
13944 		err = perf_event_set_clock(event, attr.clockid);
13945 		if (err)
13946 			goto err_alloc;
13947 	}
13948 
13949 	if (pmu->task_ctx_nr == perf_sw_context)
13950 		event->event_caps |= PERF_EV_CAP_SOFTWARE;
13951 
13952 	if (task) {
13953 		err = down_read_interruptible(&task->signal->exec_update_lock);
13954 		if (err)
13955 			goto err_alloc;
13956 
13957 		/*
13958 		 * We must hold exec_update_lock across this and any potential
13959 		 * perf_install_in_context() call for this new event to
13960 		 * serialize against exec() altering our credentials (and the
13961 		 * perf_event_exit_task() that could imply).
13962 		 */
13963 		err = -EACCES;
13964 		if (!perf_check_permission(&attr, task))
13965 			goto err_cred;
13966 	}
13967 
13968 	/*
13969 	 * Get the target context (task or percpu):
13970 	 */
13971 	ctx = find_get_context(task, event);
13972 	if (IS_ERR(ctx)) {
13973 		err = PTR_ERR(ctx);
13974 		goto err_cred;
13975 	}
13976 
13977 	mutex_lock(&ctx->mutex);
13978 
13979 	if (ctx->task == TASK_TOMBSTONE) {
13980 		err = -ESRCH;
13981 		goto err_locked;
13982 	}
13983 
13984 	if (!task) {
13985 		/*
13986 		 * Check if the @cpu we're creating an event for is online.
13987 		 *
13988 		 * We use the perf_cpu_context::ctx::mutex to serialize against
13989 		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
13990 		 */
13991 		struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu);
13992 
13993 		if (!cpuctx->online) {
13994 			err = -ENODEV;
13995 			goto err_locked;
13996 		}
13997 	}
13998 
13999 	if (group_leader) {
14000 		err = -EINVAL;
14001 
14002 		/*
14003 		 * Do not allow a recursive hierarchy (this new sibling
14004 		 * becoming part of another group-sibling):
14005 		 */
14006 		if (group_leader->group_leader != group_leader)
14007 			goto err_locked;
14008 
14009 		/* All events in a group should have the same clock */
14010 		if (group_leader->clock != event->clock)
14011 			goto err_locked;
14012 
14013 		/*
14014 		 * Make sure we're both events for the same CPU;
14015 		 * grouping events for different CPUs is broken; since
14016 		 * you can never concurrently schedule them anyhow.
14017 		 */
14018 		if (group_leader->cpu != event->cpu)
14019 			goto err_locked;
14020 
14021 		/*
14022 		 * Make sure we're both on the same context; either task or cpu.
14023 		 */
14024 		if (group_leader->ctx != ctx)
14025 			goto err_locked;
14026 
14027 		/*
14028 		 * Only a group leader can be exclusive or pinned
14029 		 */
14030 		if (attr.exclusive || attr.pinned)
14031 			goto err_locked;
14032 
14033 		if (is_software_event(event) &&
14034 		    !in_software_context(group_leader)) {
14035 			/*
14036 			 * If the event is a sw event, but the group_leader
14037 			 * is on hw context.
14038 			 *
14039 			 * Allow the addition of software events to hw
14040 			 * groups, this is safe because software events
14041 			 * never fail to schedule.
14042 			 *
14043 			 * Note the comment that goes with struct
14044 			 * perf_event_pmu_context.
14045 			 */
14046 			pmu = group_leader->pmu_ctx->pmu;
14047 		} else if (!is_software_event(event)) {
14048 			if (is_software_event(group_leader) &&
14049 			    (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
14050 				/*
14051 				 * In case the group is a pure software group, and we
14052 				 * try to add a hardware event, move the whole group to
14053 				 * the hardware context.
14054 				 */
14055 				move_group = 1;
14056 			}
14057 
14058 			/* Don't allow group of multiple hw events from different pmus */
14059 			if (!in_software_context(group_leader) &&
14060 			    group_leader->pmu_ctx->pmu != pmu)
14061 				goto err_locked;
14062 		}
14063 	}
14064 
14065 	/*
14066 	 * Now that we're certain of the pmu; find the pmu_ctx.
14067 	 */
14068 	pmu_ctx = find_get_pmu_context(pmu, ctx, event);
14069 	if (IS_ERR(pmu_ctx)) {
14070 		err = PTR_ERR(pmu_ctx);
14071 		goto err_locked;
14072 	}
14073 	event->pmu_ctx = pmu_ctx;
14074 
14075 	if (output_event) {
14076 		err = perf_event_set_output(event, output_event);
14077 		if (err)
14078 			goto err_context;
14079 	}
14080 
14081 	if (!perf_event_validate_size(event)) {
14082 		err = -E2BIG;
14083 		goto err_context;
14084 	}
14085 
14086 	if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
14087 		err = -EINVAL;
14088 		goto err_context;
14089 	}
14090 
14091 	/*
14092 	 * Must be under the same ctx::mutex as perf_install_in_context(),
14093 	 * because we need to serialize with concurrent event creation.
14094 	 */
14095 	if (!exclusive_event_installable(event, ctx)) {
14096 		err = -EBUSY;
14097 		goto err_context;
14098 	}
14099 
14100 	WARN_ON_ONCE(ctx->parent_ctx);
14101 
14102 	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags);
14103 	if (IS_ERR(event_file)) {
14104 		err = PTR_ERR(event_file);
14105 		event_file = NULL;
14106 		goto err_context;
14107 	}
14108 
14109 	/*
14110 	 * This is the point on no return; we cannot fail hereafter. This is
14111 	 * where we start modifying current state.
14112 	 */
14113 
14114 	if (move_group) {
14115 		perf_remove_from_context(group_leader, 0);
14116 		put_pmu_ctx(group_leader->pmu_ctx);
14117 
14118 		for_each_sibling_event(sibling, group_leader) {
14119 			perf_remove_from_context(sibling, 0);
14120 			put_pmu_ctx(sibling->pmu_ctx);
14121 		}
14122 
14123 		/*
14124 		 * Install the group siblings before the group leader.
14125 		 *
14126 		 * Because a group leader will try and install the entire group
14127 		 * (through the sibling list, which is still in-tact), we can
14128 		 * end up with siblings installed in the wrong context.
14129 		 *
14130 		 * By installing siblings first we NO-OP because they're not
14131 		 * reachable through the group lists.
14132 		 */
14133 		for_each_sibling_event(sibling, group_leader) {
14134 			sibling->pmu_ctx = pmu_ctx;
14135 			get_pmu_ctx(pmu_ctx);
14136 			perf_event__state_init(sibling);
14137 			perf_install_in_context(ctx, sibling, sibling->cpu);
14138 		}
14139 
14140 		/*
14141 		 * Removing from the context ends up with disabled
14142 		 * event. What we want here is event in the initial
14143 		 * startup state, ready to be add into new context.
14144 		 */
14145 		group_leader->pmu_ctx = pmu_ctx;
14146 		get_pmu_ctx(pmu_ctx);
14147 		perf_event__state_init(group_leader);
14148 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
14149 	}
14150 
14151 	/*
14152 	 * Precalculate sample_data sizes; do while holding ctx::mutex such
14153 	 * that we're serialized against further additions and before
14154 	 * perf_install_in_context() which is the point the event is active and
14155 	 * can use these values.
14156 	 */
14157 	perf_event__header_size(event);
14158 	perf_event__id_header_size(event);
14159 
14160 	event->owner = current;
14161 
14162 	perf_install_in_context(ctx, event, event->cpu);
14163 	perf_unpin_context(ctx);
14164 
14165 	mutex_unlock(&ctx->mutex);
14166 
14167 	if (task) {
14168 		up_read(&task->signal->exec_update_lock);
14169 		put_task_struct(task);
14170 	}
14171 
14172 	mutex_lock(&current->perf_event_mutex);
14173 	list_add_tail(&event->owner_entry, &current->perf_event_list);
14174 	mutex_unlock(&current->perf_event_mutex);
14175 
14176 	/*
14177 	 * File reference in group guarantees that group_leader has been
14178 	 * kept alive until we place the new event on the sibling_list.
14179 	 * This ensures destruction of the group leader will find
14180 	 * the pointer to itself in perf_group_detach().
14181 	 */
14182 	fd_install(event_fd, event_file);
14183 	return event_fd;
14184 
14185 err_context:
14186 	put_pmu_ctx(event->pmu_ctx);
14187 	event->pmu_ctx = NULL; /* _free_event() */
14188 err_locked:
14189 	mutex_unlock(&ctx->mutex);
14190 	perf_unpin_context(ctx);
14191 	put_ctx(ctx);
14192 err_cred:
14193 	if (task)
14194 		up_read(&task->signal->exec_update_lock);
14195 err_alloc:
14196 	put_event(event);
14197 err_task:
14198 	if (task)
14199 		put_task_struct(task);
14200 err_fd:
14201 	put_unused_fd(event_fd);
14202 	return err;
14203 }
14204 
14205 /**
14206  * perf_event_create_kernel_counter
14207  *
14208  * @attr: attributes of the counter to create
14209  * @cpu: cpu in which the counter is bound
14210  * @task: task to profile (NULL for percpu)
14211  * @overflow_handler: callback to trigger when we hit the event
14212  * @context: context data could be used in overflow_handler callback
14213  */
14214 struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr * attr,int cpu,struct task_struct * task,perf_overflow_handler_t overflow_handler,void * context)14215 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
14216 				 struct task_struct *task,
14217 				 perf_overflow_handler_t overflow_handler,
14218 				 void *context)
14219 {
14220 	struct perf_event_pmu_context *pmu_ctx;
14221 	struct perf_event_context *ctx;
14222 	struct perf_event *event;
14223 	struct pmu *pmu;
14224 	int err;
14225 
14226 	/*
14227 	 * Grouping is not supported for kernel events, neither is 'AUX',
14228 	 * make sure the caller's intentions are adjusted.
14229 	 */
14230 	if (attr->aux_output || attr->aux_action)
14231 		return ERR_PTR(-EINVAL);
14232 
14233 	/*
14234 	 * Event creation should be under SRCU, see perf_pmu_unregister().
14235 	 */
14236 	guard(srcu)(&pmus_srcu);
14237 
14238 	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
14239 				 overflow_handler, context, -1);
14240 	if (IS_ERR(event)) {
14241 		err = PTR_ERR(event);
14242 		goto err;
14243 	}
14244 
14245 	/* Mark owner so we could distinguish it from user events. */
14246 	event->owner = TASK_TOMBSTONE;
14247 	pmu = event->pmu;
14248 
14249 	if (pmu->task_ctx_nr == perf_sw_context)
14250 		event->event_caps |= PERF_EV_CAP_SOFTWARE;
14251 
14252 	/*
14253 	 * Get the target context (task or percpu):
14254 	 */
14255 	ctx = find_get_context(task, event);
14256 	if (IS_ERR(ctx)) {
14257 		err = PTR_ERR(ctx);
14258 		goto err_alloc;
14259 	}
14260 
14261 	WARN_ON_ONCE(ctx->parent_ctx);
14262 	mutex_lock(&ctx->mutex);
14263 	if (ctx->task == TASK_TOMBSTONE) {
14264 		err = -ESRCH;
14265 		goto err_unlock;
14266 	}
14267 
14268 	pmu_ctx = find_get_pmu_context(pmu, ctx, event);
14269 	if (IS_ERR(pmu_ctx)) {
14270 		err = PTR_ERR(pmu_ctx);
14271 		goto err_unlock;
14272 	}
14273 	event->pmu_ctx = pmu_ctx;
14274 
14275 	if (!task) {
14276 		/*
14277 		 * Check if the @cpu we're creating an event for is online.
14278 		 *
14279 		 * We use the perf_cpu_context::ctx::mutex to serialize against
14280 		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
14281 		 */
14282 		struct perf_cpu_context *cpuctx =
14283 			container_of(ctx, struct perf_cpu_context, ctx);
14284 		if (!cpuctx->online) {
14285 			err = -ENODEV;
14286 			goto err_pmu_ctx;
14287 		}
14288 	}
14289 
14290 	if (!exclusive_event_installable(event, ctx)) {
14291 		err = -EBUSY;
14292 		goto err_pmu_ctx;
14293 	}
14294 
14295 	perf_install_in_context(ctx, event, event->cpu);
14296 	perf_unpin_context(ctx);
14297 	mutex_unlock(&ctx->mutex);
14298 
14299 	return event;
14300 
14301 err_pmu_ctx:
14302 	put_pmu_ctx(pmu_ctx);
14303 	event->pmu_ctx = NULL; /* _free_event() */
14304 err_unlock:
14305 	mutex_unlock(&ctx->mutex);
14306 	perf_unpin_context(ctx);
14307 	put_ctx(ctx);
14308 err_alloc:
14309 	put_event(event);
14310 err:
14311 	return ERR_PTR(err);
14312 }
14313 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
14314 
__perf_pmu_remove(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct perf_event_groups * groups,struct list_head * events)14315 static void __perf_pmu_remove(struct perf_event_context *ctx,
14316 			      int cpu, struct pmu *pmu,
14317 			      struct perf_event_groups *groups,
14318 			      struct list_head *events)
14319 {
14320 	struct perf_event *event, *sibling;
14321 
14322 	perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
14323 		perf_remove_from_context(event, 0);
14324 		put_pmu_ctx(event->pmu_ctx);
14325 		list_add(&event->migrate_entry, events);
14326 
14327 		for_each_sibling_event(sibling, event) {
14328 			perf_remove_from_context(sibling, 0);
14329 			put_pmu_ctx(sibling->pmu_ctx);
14330 			list_add(&sibling->migrate_entry, events);
14331 		}
14332 	}
14333 }
14334 
__perf_pmu_install_event(struct pmu * pmu,struct perf_event_context * ctx,int cpu,struct perf_event * event)14335 static void __perf_pmu_install_event(struct pmu *pmu,
14336 				     struct perf_event_context *ctx,
14337 				     int cpu, struct perf_event *event)
14338 {
14339 	struct perf_event_pmu_context *epc;
14340 	struct perf_event_context *old_ctx = event->ctx;
14341 
14342 	get_ctx(ctx); /* normally find_get_context() */
14343 
14344 	event->cpu = cpu;
14345 	epc = find_get_pmu_context(pmu, ctx, event);
14346 	event->pmu_ctx = epc;
14347 
14348 	if (event->state >= PERF_EVENT_STATE_OFF)
14349 		event->state = PERF_EVENT_STATE_INACTIVE;
14350 	perf_install_in_context(ctx, event, cpu);
14351 
14352 	/*
14353 	 * Now that event->ctx is updated and visible, put the old ctx.
14354 	 */
14355 	put_ctx(old_ctx);
14356 }
14357 
__perf_pmu_install(struct perf_event_context * ctx,int cpu,struct pmu * pmu,struct list_head * events)14358 static void __perf_pmu_install(struct perf_event_context *ctx,
14359 			       int cpu, struct pmu *pmu, struct list_head *events)
14360 {
14361 	struct perf_event *event, *tmp;
14362 
14363 	/*
14364 	 * Re-instate events in 2 passes.
14365 	 *
14366 	 * Skip over group leaders and only install siblings on this first
14367 	 * pass, siblings will not get enabled without a leader, however a
14368 	 * leader will enable its siblings, even if those are still on the old
14369 	 * context.
14370 	 */
14371 	list_for_each_entry_safe(event, tmp, events, migrate_entry) {
14372 		if (event->group_leader == event)
14373 			continue;
14374 
14375 		list_del(&event->migrate_entry);
14376 		__perf_pmu_install_event(pmu, ctx, cpu, event);
14377 	}
14378 
14379 	/*
14380 	 * Once all the siblings are setup properly, install the group leaders
14381 	 * to make it go.
14382 	 */
14383 	list_for_each_entry_safe(event, tmp, events, migrate_entry) {
14384 		list_del(&event->migrate_entry);
14385 		__perf_pmu_install_event(pmu, ctx, cpu, event);
14386 	}
14387 }
14388 
perf_pmu_migrate_context(struct pmu * pmu,int src_cpu,int dst_cpu)14389 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
14390 {
14391 	struct perf_event_context *src_ctx, *dst_ctx;
14392 	LIST_HEAD(events);
14393 
14394 	/*
14395 	 * Since per-cpu context is persistent, no need to grab an extra
14396 	 * reference.
14397 	 */
14398 	src_ctx = &per_cpu_ptr(&perf_cpu_context, src_cpu)->ctx;
14399 	dst_ctx = &per_cpu_ptr(&perf_cpu_context, dst_cpu)->ctx;
14400 
14401 	/*
14402 	 * See perf_event_ctx_lock() for comments on the details
14403 	 * of swizzling perf_event::ctx.
14404 	 */
14405 	mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
14406 
14407 	__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
14408 	__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
14409 
14410 	if (!list_empty(&events)) {
14411 		/*
14412 		 * Wait for the events to quiesce before re-instating them.
14413 		 */
14414 		synchronize_rcu();
14415 
14416 		__perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
14417 	}
14418 
14419 	mutex_unlock(&dst_ctx->mutex);
14420 	mutex_unlock(&src_ctx->mutex);
14421 }
14422 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
14423 
sync_child_event(struct perf_event * child_event,struct task_struct * task)14424 static void sync_child_event(struct perf_event *child_event,
14425 			     struct task_struct *task)
14426 {
14427 	struct perf_event *parent_event = child_event->parent;
14428 	u64 child_val;
14429 
14430 	if (child_event->attr.inherit_stat) {
14431 		if (task && task != TASK_TOMBSTONE)
14432 			perf_event_read_event(child_event, task);
14433 	}
14434 
14435 	child_val = perf_event_count(child_event, false);
14436 
14437 	/*
14438 	 * Add back the child's count to the parent's count:
14439 	 */
14440 	atomic64_add(child_val, &parent_event->child_count);
14441 	atomic64_add(child_event->total_time_enabled,
14442 		     &parent_event->child_total_time_enabled);
14443 	atomic64_add(child_event->total_time_running,
14444 		     &parent_event->child_total_time_running);
14445 }
14446 
14447 static void
perf_event_exit_event(struct perf_event * event,struct perf_event_context * ctx,struct task_struct * task,bool revoke)14448 perf_event_exit_event(struct perf_event *event,
14449 		      struct perf_event_context *ctx,
14450 		      struct task_struct *task,
14451 		      bool revoke)
14452 {
14453 	struct perf_event *parent_event = event->parent;
14454 	unsigned long detach_flags = DETACH_EXIT;
14455 	unsigned int attach_state;
14456 
14457 	if (parent_event) {
14458 		/*
14459 		 * Do not destroy the 'original' grouping; because of the
14460 		 * context switch optimization the original events could've
14461 		 * ended up in a random child task.
14462 		 *
14463 		 * If we were to destroy the original group, all group related
14464 		 * operations would cease to function properly after this
14465 		 * random child dies.
14466 		 *
14467 		 * Do destroy all inherited groups, we don't care about those
14468 		 * and being thorough is better.
14469 		 */
14470 		detach_flags |= DETACH_GROUP | DETACH_CHILD;
14471 		mutex_lock(&parent_event->child_mutex);
14472 		/* PERF_ATTACH_ITRACE might be set concurrently */
14473 		attach_state = READ_ONCE(event->attach_state);
14474 
14475 		if (attach_state & PERF_ATTACH_CHILD)
14476 			sync_child_event(event, task);
14477 	}
14478 
14479 	if (revoke)
14480 		detach_flags |= DETACH_GROUP | DETACH_REVOKE;
14481 
14482 	perf_remove_from_context(event, detach_flags);
14483 	/*
14484 	 * Child events can be freed.
14485 	 */
14486 	if (parent_event) {
14487 		mutex_unlock(&parent_event->child_mutex);
14488 
14489 		/*
14490 		 * Match the refcount initialization. Make sure it doesn't happen
14491 		 * twice if pmu_detach_event() calls it on an already exited task.
14492 		 */
14493 		if (attach_state & PERF_ATTACH_CHILD) {
14494 			/*
14495 			 * Kick perf_poll() for is_event_hup();
14496 			 */
14497 			perf_event_wakeup(parent_event);
14498 			/*
14499 			 * pmu_detach_event() will have an extra refcount.
14500 			 * perf_pending_task() might have one too.
14501 			 */
14502 			put_event(event);
14503 		}
14504 
14505 		return;
14506 	}
14507 
14508 	/*
14509 	 * Parent events are governed by their filedesc, retain them.
14510 	 */
14511 	perf_event_wakeup(event);
14512 }
14513 
perf_event_exit_task_context(struct task_struct * task,bool exit)14514 static void perf_event_exit_task_context(struct task_struct *task, bool exit)
14515 {
14516 	struct perf_event_context *ctx, *clone_ctx = NULL;
14517 	struct perf_event *child_event, *next;
14518 
14519 	ctx = perf_pin_task_context(task);
14520 	if (!ctx)
14521 		return;
14522 
14523 	/*
14524 	 * In order to reduce the amount of tricky in ctx tear-down, we hold
14525 	 * ctx::mutex over the entire thing. This serializes against almost
14526 	 * everything that wants to access the ctx.
14527 	 *
14528 	 * The exception is sys_perf_event_open() /
14529 	 * perf_event_create_kernel_count() which does find_get_context()
14530 	 * without ctx::mutex (it cannot because of the move_group double mutex
14531 	 * lock thing). See the comments in perf_install_in_context().
14532 	 */
14533 	mutex_lock(&ctx->mutex);
14534 
14535 	/*
14536 	 * In a single ctx::lock section, de-schedule the events and detach the
14537 	 * context from the task such that we cannot ever get it scheduled back
14538 	 * in.
14539 	 */
14540 	raw_spin_lock_irq(&ctx->lock);
14541 	if (exit)
14542 		task_ctx_sched_out(ctx, NULL, EVENT_ALL);
14543 
14544 	/*
14545 	 * Now that the context is inactive, destroy the task <-> ctx relation
14546 	 * and mark the context dead.
14547 	 */
14548 	RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
14549 	put_ctx(ctx); /* cannot be last */
14550 	WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
14551 	put_task_struct(task); /* cannot be last */
14552 
14553 	clone_ctx = unclone_ctx(ctx);
14554 	raw_spin_unlock_irq(&ctx->lock);
14555 
14556 	if (clone_ctx)
14557 		put_ctx(clone_ctx);
14558 
14559 	/*
14560 	 * Report the task dead after unscheduling the events so that we
14561 	 * won't get any samples after PERF_RECORD_EXIT. We can however still
14562 	 * get a few PERF_RECORD_READ events.
14563 	 */
14564 	if (exit)
14565 		perf_event_task(task, ctx, 0);
14566 
14567 	list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
14568 		perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
14569 
14570 	mutex_unlock(&ctx->mutex);
14571 
14572 	if (!exit) {
14573 		/*
14574 		 * perf_event_release_kernel() could still have a reference on
14575 		 * this context. In that case we must wait for these events to
14576 		 * have been freed (in particular all their references to this
14577 		 * task must've been dropped).
14578 		 *
14579 		 * Without this copy_process() will unconditionally free this
14580 		 * task (irrespective of its reference count) and
14581 		 * _free_event()'s put_task_struct(event->hw.target) will be a
14582 		 * use-after-free.
14583 		 *
14584 		 * Wait for all events to drop their context reference.
14585 		 */
14586 		wait_var_event(&ctx->refcount,
14587 			       refcount_read(&ctx->refcount) == 1);
14588 	}
14589 	put_ctx(ctx);
14590 }
14591 
14592 /*
14593  * When a task exits, feed back event values to parent events.
14594  *
14595  * Can be called with exec_update_lock held when called from
14596  * setup_new_exec().
14597  */
perf_event_exit_task(struct task_struct * task)14598 void perf_event_exit_task(struct task_struct *task)
14599 {
14600 	struct perf_event *event, *tmp;
14601 
14602 	WARN_ON_ONCE(task != current);
14603 
14604 	mutex_lock(&task->perf_event_mutex);
14605 	list_for_each_entry_safe(event, tmp, &task->perf_event_list,
14606 				 owner_entry) {
14607 		list_del_init(&event->owner_entry);
14608 
14609 		/*
14610 		 * Ensure the list deletion is visible before we clear
14611 		 * the owner, closes a race against perf_release() where
14612 		 * we need to serialize on the owner->perf_event_mutex.
14613 		 */
14614 		smp_store_release(&event->owner, NULL);
14615 	}
14616 	mutex_unlock(&task->perf_event_mutex);
14617 
14618 	perf_event_exit_task_context(task, true);
14619 
14620 	/*
14621 	 * The perf_event_exit_task_context calls perf_event_task
14622 	 * with task's task_ctx, which generates EXIT events for
14623 	 * task contexts and sets task->perf_event_ctxp[] to NULL.
14624 	 * At this point we need to send EXIT events to cpu contexts.
14625 	 */
14626 	perf_event_task(task, NULL, 0);
14627 
14628 	/*
14629 	 * Detach the perf_ctx_data for the system-wide event.
14630 	 *
14631 	 * Done without holding global_ctx_data_rwsem; typically
14632 	 * attach_global_ctx_data() will skip over this task, but otherwise
14633 	 * attach_task_ctx_data() will observe PF_EXITING.
14634 	 */
14635 	detach_task_ctx_data(task);
14636 }
14637 
14638 /*
14639  * Free a context as created by inheritance by perf_event_init_task() below,
14640  * used by fork() in case of fail.
14641  *
14642  * Even though the task has never lived, the context and events have been
14643  * exposed through the child_list, so we must take care tearing it all down.
14644  */
perf_event_free_task(struct task_struct * task)14645 void perf_event_free_task(struct task_struct *task)
14646 {
14647 	perf_event_exit_task_context(task, false);
14648 }
14649 
perf_event_delayed_put(struct task_struct * task)14650 void perf_event_delayed_put(struct task_struct *task)
14651 {
14652 	WARN_ON_ONCE(task->perf_event_ctxp);
14653 }
14654 
perf_event_get(unsigned int fd)14655 struct file *perf_event_get(unsigned int fd)
14656 {
14657 	struct file *file = fget(fd);
14658 	if (!file)
14659 		return ERR_PTR(-EBADF);
14660 
14661 	if (file->f_op != &perf_fops) {
14662 		fput(file);
14663 		return ERR_PTR(-EBADF);
14664 	}
14665 
14666 	return file;
14667 }
14668 
perf_get_event(struct file * file)14669 const struct perf_event *perf_get_event(struct file *file)
14670 {
14671 	if (file->f_op != &perf_fops)
14672 		return ERR_PTR(-EINVAL);
14673 
14674 	return file->private_data;
14675 }
14676 
perf_event_attrs(struct perf_event * event)14677 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
14678 {
14679 	if (!event)
14680 		return ERR_PTR(-EINVAL);
14681 
14682 	return &event->attr;
14683 }
14684 
perf_allow_kernel(void)14685 int perf_allow_kernel(void)
14686 {
14687 	if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
14688 		return -EACCES;
14689 
14690 	return security_perf_event_open(PERF_SECURITY_KERNEL);
14691 }
14692 EXPORT_SYMBOL_GPL(perf_allow_kernel);
14693 
14694 /*
14695  * Inherit an event from parent task to child task.
14696  *
14697  * Returns:
14698  *  - valid pointer on success
14699  *  - NULL for orphaned events
14700  *  - IS_ERR() on error
14701  */
14702 static struct perf_event *
inherit_event(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event * group_leader,struct perf_event_context * child_ctx)14703 inherit_event(struct perf_event *parent_event,
14704 	      struct task_struct *parent,
14705 	      struct perf_event_context *parent_ctx,
14706 	      struct task_struct *child,
14707 	      struct perf_event *group_leader,
14708 	      struct perf_event_context *child_ctx)
14709 {
14710 	enum perf_event_state parent_state = parent_event->state;
14711 	struct perf_event_pmu_context *pmu_ctx;
14712 	struct perf_event *child_event;
14713 	unsigned long flags;
14714 
14715 	/*
14716 	 * Instead of creating recursive hierarchies of events,
14717 	 * we link inherited events back to the original parent,
14718 	 * which has a filp for sure, which we use as the reference
14719 	 * count:
14720 	 */
14721 	if (parent_event->parent)
14722 		parent_event = parent_event->parent;
14723 
14724 	if (parent_event->state <= PERF_EVENT_STATE_REVOKED)
14725 		return NULL;
14726 
14727 	/*
14728 	 * Event creation should be under SRCU, see perf_pmu_unregister().
14729 	 */
14730 	guard(srcu)(&pmus_srcu);
14731 
14732 	child_event = perf_event_alloc(&parent_event->attr,
14733 					   parent_event->cpu,
14734 					   child,
14735 					   group_leader, parent_event,
14736 					   NULL, NULL, -1);
14737 	if (IS_ERR(child_event))
14738 		return child_event;
14739 
14740 	get_ctx(child_ctx);
14741 	child_event->ctx = child_ctx;
14742 
14743 	pmu_ctx = find_get_pmu_context(parent_event->pmu_ctx->pmu, child_ctx, child_event);
14744 	if (IS_ERR(pmu_ctx)) {
14745 		free_event(child_event);
14746 		return ERR_CAST(pmu_ctx);
14747 	}
14748 	child_event->pmu_ctx = pmu_ctx;
14749 
14750 	/*
14751 	 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
14752 	 * must be under the same lock in order to serialize against
14753 	 * perf_event_release_kernel(), such that either we must observe
14754 	 * is_orphaned_event() or they will observe us on the child_list.
14755 	 */
14756 	mutex_lock(&parent_event->child_mutex);
14757 	if (is_orphaned_event(parent_event) ||
14758 	    !atomic_long_inc_not_zero(&parent_event->refcount)) {
14759 		mutex_unlock(&parent_event->child_mutex);
14760 		free_event(child_event);
14761 		return NULL;
14762 	}
14763 
14764 	/*
14765 	 * Make the child state follow the state of the parent event,
14766 	 * not its attr.disabled bit.  We hold the parent's mutex,
14767 	 * so we won't race with perf_event_{en, dis}able_family.
14768 	 */
14769 	if (parent_state >= PERF_EVENT_STATE_INACTIVE)
14770 		child_event->state = PERF_EVENT_STATE_INACTIVE;
14771 	else
14772 		child_event->state = PERF_EVENT_STATE_OFF;
14773 
14774 	if (parent_event->attr.freq) {
14775 		u64 sample_period = parent_event->hw.sample_period;
14776 		struct hw_perf_event *hwc = &child_event->hw;
14777 
14778 		hwc->sample_period = sample_period;
14779 		hwc->last_period   = sample_period;
14780 
14781 		local64_set(&hwc->period_left, sample_period);
14782 	}
14783 
14784 	child_event->overflow_handler = parent_event->overflow_handler;
14785 	child_event->overflow_handler_context
14786 		= parent_event->overflow_handler_context;
14787 
14788 	/*
14789 	 * Precalculate sample_data sizes
14790 	 */
14791 	perf_event__header_size(child_event);
14792 	perf_event__id_header_size(child_event);
14793 
14794 	/*
14795 	 * Link it up in the child's context:
14796 	 */
14797 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
14798 	add_event_to_ctx(child_event, child_ctx);
14799 	child_event->attach_state |= PERF_ATTACH_CHILD;
14800 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
14801 
14802 	/*
14803 	 * Link this into the parent event's child list
14804 	 */
14805 	list_add_tail(&child_event->child_list, &parent_event->child_list);
14806 	mutex_unlock(&parent_event->child_mutex);
14807 
14808 	return child_event;
14809 }
14810 
14811 /*
14812  * Inherits an event group.
14813  *
14814  * This will quietly suppress orphaned events; !inherit_event() is not an error.
14815  * This matches with perf_event_release_kernel() removing all child events.
14816  *
14817  * Returns:
14818  *  - 0 on success
14819  *  - <0 on error
14820  */
inherit_group(struct perf_event * parent_event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,struct perf_event_context * child_ctx)14821 static int inherit_group(struct perf_event *parent_event,
14822 	      struct task_struct *parent,
14823 	      struct perf_event_context *parent_ctx,
14824 	      struct task_struct *child,
14825 	      struct perf_event_context *child_ctx)
14826 {
14827 	struct perf_event *leader;
14828 	struct perf_event *sub;
14829 	struct perf_event *child_ctr;
14830 
14831 	leader = inherit_event(parent_event, parent, parent_ctx,
14832 				 child, NULL, child_ctx);
14833 	if (IS_ERR(leader))
14834 		return PTR_ERR(leader);
14835 	/*
14836 	 * @leader can be NULL here because of is_orphaned_event(). In this
14837 	 * case inherit_event() will create individual events, similar to what
14838 	 * perf_group_detach() would do anyway.
14839 	 */
14840 	for_each_sibling_event(sub, parent_event) {
14841 		child_ctr = inherit_event(sub, parent, parent_ctx,
14842 					    child, leader, child_ctx);
14843 		if (IS_ERR(child_ctr))
14844 			return PTR_ERR(child_ctr);
14845 
14846 		if (sub->aux_event == parent_event && child_ctr &&
14847 		    !perf_get_aux_event(child_ctr, leader))
14848 			return -EINVAL;
14849 	}
14850 	if (leader)
14851 		leader->group_generation = parent_event->group_generation;
14852 	return 0;
14853 }
14854 
14855 /*
14856  * Creates the child task context and tries to inherit the event-group.
14857  *
14858  * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
14859  * inherited_all set when we 'fail' to inherit an orphaned event; this is
14860  * consistent with perf_event_release_kernel() removing all child events.
14861  *
14862  * Returns:
14863  *  - 0 on success
14864  *  - <0 on error
14865  */
14866 static int
inherit_task_group(struct perf_event * event,struct task_struct * parent,struct perf_event_context * parent_ctx,struct task_struct * child,u64 clone_flags,int * inherited_all)14867 inherit_task_group(struct perf_event *event, struct task_struct *parent,
14868 		   struct perf_event_context *parent_ctx,
14869 		   struct task_struct *child,
14870 		   u64 clone_flags, int *inherited_all)
14871 {
14872 	struct perf_event_context *child_ctx;
14873 	int ret;
14874 
14875 	if (!event->attr.inherit ||
14876 	    (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
14877 	    /* Do not inherit if sigtrap and signal handlers were cleared. */
14878 	    (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
14879 		*inherited_all = 0;
14880 		return 0;
14881 	}
14882 
14883 	child_ctx = child->perf_event_ctxp;
14884 	if (!child_ctx) {
14885 		/*
14886 		 * This is executed from the parent task context, so
14887 		 * inherit events that have been marked for cloning.
14888 		 * First allocate and initialize a context for the
14889 		 * child.
14890 		 */
14891 		child_ctx = alloc_perf_context(child);
14892 		if (!child_ctx)
14893 			return -ENOMEM;
14894 
14895 		child->perf_event_ctxp = child_ctx;
14896 	}
14897 
14898 	ret = inherit_group(event, parent, parent_ctx, child, child_ctx);
14899 	if (ret)
14900 		*inherited_all = 0;
14901 
14902 	return ret;
14903 }
14904 
14905 /*
14906  * Initialize the perf_event context in task_struct
14907  */
perf_event_init_context(struct task_struct * child,u64 clone_flags)14908 static int perf_event_init_context(struct task_struct *child, u64 clone_flags)
14909 {
14910 	struct perf_event_context *child_ctx, *parent_ctx;
14911 	struct perf_event_context *cloned_ctx;
14912 	struct perf_event *event;
14913 	struct task_struct *parent = current;
14914 	int inherited_all = 1;
14915 	unsigned long flags;
14916 	int ret = 0;
14917 
14918 	if (likely(!parent->perf_event_ctxp))
14919 		return 0;
14920 
14921 	/*
14922 	 * If the parent's context is a clone, pin it so it won't get
14923 	 * swapped under us.
14924 	 */
14925 	parent_ctx = perf_pin_task_context(parent);
14926 	if (!parent_ctx)
14927 		return 0;
14928 
14929 	/*
14930 	 * No need to check if parent_ctx != NULL here; since we saw
14931 	 * it non-NULL earlier, the only reason for it to become NULL
14932 	 * is if we exit, and since we're currently in the middle of
14933 	 * a fork we can't be exiting at the same time.
14934 	 */
14935 
14936 	/*
14937 	 * Lock the parent list. No need to lock the child - not PID
14938 	 * hashed yet and not running, so nobody can access it.
14939 	 */
14940 	mutex_lock(&parent_ctx->mutex);
14941 
14942 	/*
14943 	 * We dont have to disable NMIs - we are only looking at
14944 	 * the list, not manipulating it:
14945 	 */
14946 	perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
14947 		ret = inherit_task_group(event, parent, parent_ctx,
14948 					 child, clone_flags, &inherited_all);
14949 		if (ret)
14950 			goto out_unlock;
14951 	}
14952 
14953 	/*
14954 	 * We can't hold ctx->lock when iterating the ->flexible_group list due
14955 	 * to allocations, but we need to prevent rotation because
14956 	 * rotate_ctx() will change the list from interrupt context.
14957 	 */
14958 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
14959 	parent_ctx->rotate_disable = 1;
14960 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
14961 
14962 	perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
14963 		ret = inherit_task_group(event, parent, parent_ctx,
14964 					 child, clone_flags, &inherited_all);
14965 		if (ret)
14966 			goto out_unlock;
14967 	}
14968 
14969 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
14970 	parent_ctx->rotate_disable = 0;
14971 
14972 	child_ctx = child->perf_event_ctxp;
14973 
14974 	if (child_ctx && inherited_all) {
14975 		/*
14976 		 * Mark the child context as a clone of the parent
14977 		 * context, or of whatever the parent is a clone of.
14978 		 *
14979 		 * Note that if the parent is a clone, the holding of
14980 		 * parent_ctx->lock avoids it from being uncloned.
14981 		 */
14982 		cloned_ctx = parent_ctx->parent_ctx;
14983 		if (cloned_ctx) {
14984 			child_ctx->parent_ctx = cloned_ctx;
14985 			child_ctx->parent_gen = parent_ctx->parent_gen;
14986 		} else {
14987 			child_ctx->parent_ctx = parent_ctx;
14988 			child_ctx->parent_gen = parent_ctx->generation;
14989 		}
14990 		get_ctx(child_ctx->parent_ctx);
14991 	}
14992 
14993 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
14994 out_unlock:
14995 	mutex_unlock(&parent_ctx->mutex);
14996 
14997 	perf_unpin_context(parent_ctx);
14998 	put_ctx(parent_ctx);
14999 
15000 	return ret;
15001 }
15002 
15003 /*
15004  * Initialize the perf_event context in task_struct
15005  */
perf_event_init_task(struct task_struct * child,u64 clone_flags)15006 int perf_event_init_task(struct task_struct *child, u64 clone_flags)
15007 {
15008 	int ret;
15009 
15010 	memset(child->perf_recursion, 0, sizeof(child->perf_recursion));
15011 	child->perf_event_ctxp = NULL;
15012 	mutex_init(&child->perf_event_mutex);
15013 	INIT_LIST_HEAD(&child->perf_event_list);
15014 	child->perf_ctx_data = NULL;
15015 
15016 	ret = perf_event_init_context(child, clone_flags);
15017 	if (ret) {
15018 		perf_event_free_task(child);
15019 		return ret;
15020 	}
15021 
15022 	return 0;
15023 }
15024 
perf_event_init_all_cpus(void)15025 static void __init perf_event_init_all_cpus(void)
15026 {
15027 	struct swevent_htable *swhash;
15028 	struct perf_cpu_context *cpuctx;
15029 	int cpu;
15030 
15031 	zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
15032 	zalloc_cpumask_var(&perf_online_core_mask, GFP_KERNEL);
15033 	zalloc_cpumask_var(&perf_online_die_mask, GFP_KERNEL);
15034 	zalloc_cpumask_var(&perf_online_cluster_mask, GFP_KERNEL);
15035 	zalloc_cpumask_var(&perf_online_pkg_mask, GFP_KERNEL);
15036 	zalloc_cpumask_var(&perf_online_sys_mask, GFP_KERNEL);
15037 
15038 
15039 	for_each_possible_cpu(cpu) {
15040 		swhash = &per_cpu(swevent_htable, cpu);
15041 		mutex_init(&swhash->hlist_mutex);
15042 
15043 		INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
15044 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
15045 
15046 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
15047 
15048 		cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
15049 		__perf_event_init_context(&cpuctx->ctx);
15050 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
15051 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
15052 		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
15053 		cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
15054 		cpuctx->heap = cpuctx->heap_default;
15055 	}
15056 }
15057 
perf_swevent_init_cpu(unsigned int cpu)15058 static void perf_swevent_init_cpu(unsigned int cpu)
15059 {
15060 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
15061 
15062 	mutex_lock(&swhash->hlist_mutex);
15063 	if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
15064 		struct swevent_hlist *hlist;
15065 
15066 		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
15067 		WARN_ON(!hlist);
15068 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
15069 	}
15070 	mutex_unlock(&swhash->hlist_mutex);
15071 }
15072 
15073 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
__perf_event_exit_context(void * __info)15074 static void __perf_event_exit_context(void *__info)
15075 {
15076 	struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
15077 	struct perf_event_context *ctx = __info;
15078 	struct perf_event *event;
15079 
15080 	raw_spin_lock(&ctx->lock);
15081 	ctx_sched_out(ctx, NULL, EVENT_TIME);
15082 	list_for_each_entry(event, &ctx->event_list, event_entry)
15083 		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
15084 	raw_spin_unlock(&ctx->lock);
15085 }
15086 
perf_event_clear_cpumask(unsigned int cpu)15087 static void perf_event_clear_cpumask(unsigned int cpu)
15088 {
15089 	int target[PERF_PMU_MAX_SCOPE];
15090 	unsigned int scope;
15091 	struct pmu *pmu;
15092 
15093 	cpumask_clear_cpu(cpu, perf_online_mask);
15094 
15095 	for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
15096 		const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
15097 		struct cpumask *pmu_cpumask = perf_scope_cpumask(scope);
15098 
15099 		target[scope] = -1;
15100 		if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
15101 			continue;
15102 
15103 		if (!cpumask_test_and_clear_cpu(cpu, pmu_cpumask))
15104 			continue;
15105 		target[scope] = cpumask_any_but(cpumask, cpu);
15106 		if (target[scope] < nr_cpu_ids)
15107 			cpumask_set_cpu(target[scope], pmu_cpumask);
15108 	}
15109 
15110 	/* migrate */
15111 	list_for_each_entry(pmu, &pmus, entry) {
15112 		if (pmu->scope == PERF_PMU_SCOPE_NONE ||
15113 		    WARN_ON_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE))
15114 			continue;
15115 
15116 		if (target[pmu->scope] >= 0 && target[pmu->scope] < nr_cpu_ids)
15117 			perf_pmu_migrate_context(pmu, cpu, target[pmu->scope]);
15118 	}
15119 }
15120 
perf_event_exit_cpu_context(int cpu)15121 static void perf_event_exit_cpu_context(int cpu)
15122 {
15123 	struct perf_cpu_context *cpuctx;
15124 	struct perf_event_context *ctx;
15125 
15126 	// XXX simplify cpuctx->online
15127 	mutex_lock(&pmus_lock);
15128 	/*
15129 	 * Clear the cpumasks, and migrate to other CPUs if possible.
15130 	 * Must be invoked before the __perf_event_exit_context.
15131 	 */
15132 	perf_event_clear_cpumask(cpu);
15133 	cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
15134 	ctx = &cpuctx->ctx;
15135 
15136 	mutex_lock(&ctx->mutex);
15137 	if (ctx->nr_events)
15138 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
15139 	cpuctx->online = 0;
15140 	mutex_unlock(&ctx->mutex);
15141 	mutex_unlock(&pmus_lock);
15142 }
15143 #else
15144 
perf_event_exit_cpu_context(int cpu)15145 static void perf_event_exit_cpu_context(int cpu) { }
15146 
15147 #endif
15148 
perf_event_setup_cpumask(unsigned int cpu)15149 static void perf_event_setup_cpumask(unsigned int cpu)
15150 {
15151 	struct cpumask *pmu_cpumask;
15152 	unsigned int scope;
15153 
15154 	/*
15155 	 * Early boot stage, the cpumask hasn't been set yet.
15156 	 * The perf_online_<domain>_masks includes the first CPU of each domain.
15157 	 * Always unconditionally set the boot CPU for the perf_online_<domain>_masks.
15158 	 */
15159 	if (cpumask_empty(perf_online_mask)) {
15160 		for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
15161 			pmu_cpumask = perf_scope_cpumask(scope);
15162 			if (WARN_ON_ONCE(!pmu_cpumask))
15163 				continue;
15164 			cpumask_set_cpu(cpu, pmu_cpumask);
15165 		}
15166 		goto end;
15167 	}
15168 
15169 	for (scope = PERF_PMU_SCOPE_NONE + 1; scope < PERF_PMU_MAX_SCOPE; scope++) {
15170 		const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(scope, cpu);
15171 
15172 		pmu_cpumask = perf_scope_cpumask(scope);
15173 
15174 		if (WARN_ON_ONCE(!pmu_cpumask || !cpumask))
15175 			continue;
15176 
15177 		if (!cpumask_empty(cpumask) &&
15178 		    cpumask_any_and(pmu_cpumask, cpumask) >= nr_cpu_ids)
15179 			cpumask_set_cpu(cpu, pmu_cpumask);
15180 	}
15181 end:
15182 	cpumask_set_cpu(cpu, perf_online_mask);
15183 }
15184 
perf_event_init_cpu(unsigned int cpu)15185 int perf_event_init_cpu(unsigned int cpu)
15186 {
15187 	struct perf_cpu_context *cpuctx;
15188 	struct perf_event_context *ctx;
15189 
15190 	perf_swevent_init_cpu(cpu);
15191 
15192 	mutex_lock(&pmus_lock);
15193 	perf_event_setup_cpumask(cpu);
15194 	cpuctx = per_cpu_ptr(&perf_cpu_context, cpu);
15195 	ctx = &cpuctx->ctx;
15196 
15197 	mutex_lock(&ctx->mutex);
15198 	cpuctx->online = 1;
15199 	mutex_unlock(&ctx->mutex);
15200 	mutex_unlock(&pmus_lock);
15201 
15202 	return 0;
15203 }
15204 
perf_event_exit_cpu(unsigned int cpu)15205 int perf_event_exit_cpu(unsigned int cpu)
15206 {
15207 	perf_event_exit_cpu_context(cpu);
15208 	return 0;
15209 }
15210 
15211 static int
perf_reboot(struct notifier_block * notifier,unsigned long val,void * v)15212 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
15213 {
15214 	int cpu;
15215 
15216 	for_each_online_cpu(cpu)
15217 		perf_event_exit_cpu(cpu);
15218 
15219 	return NOTIFY_OK;
15220 }
15221 
15222 /*
15223  * Run the perf reboot notifier at the very last possible moment so that
15224  * the generic watchdog code runs as long as possible.
15225  */
15226 static struct notifier_block perf_reboot_notifier = {
15227 	.notifier_call = perf_reboot,
15228 	.priority = INT_MIN,
15229 };
15230 
perf_event_init(void)15231 void __init perf_event_init(void)
15232 {
15233 	int ret;
15234 
15235 	idr_init(&pmu_idr);
15236 
15237 	unwind_deferred_init(&perf_unwind_work,
15238 			     perf_unwind_deferred_callback);
15239 
15240 	perf_event_init_all_cpus();
15241 	init_srcu_struct(&pmus_srcu);
15242 	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
15243 	perf_pmu_register(&perf_cpu_clock, "cpu_clock", -1);
15244 	perf_pmu_register(&perf_task_clock, "task_clock", -1);
15245 	perf_tp_register();
15246 	perf_event_init_cpu(smp_processor_id());
15247 	register_reboot_notifier(&perf_reboot_notifier);
15248 
15249 	ret = init_hw_breakpoint();
15250 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
15251 
15252 	perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC);
15253 
15254 	/*
15255 	 * Build time assertion that we keep the data_head at the intended
15256 	 * location.  IOW, validation we got the __reserved[] size right.
15257 	 */
15258 	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
15259 		     != 1024);
15260 }
15261 
perf_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)15262 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
15263 			      char *page)
15264 {
15265 	struct perf_pmu_events_attr *pmu_attr =
15266 		container_of(attr, struct perf_pmu_events_attr, attr);
15267 
15268 	if (pmu_attr->event_str)
15269 		return sprintf(page, "%s\n", pmu_attr->event_str);
15270 
15271 	return 0;
15272 }
15273 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
15274 
perf_event_sysfs_init(void)15275 static int __init perf_event_sysfs_init(void)
15276 {
15277 	struct pmu *pmu;
15278 	int ret;
15279 
15280 	mutex_lock(&pmus_lock);
15281 
15282 	ret = bus_register(&pmu_bus);
15283 	if (ret)
15284 		goto unlock;
15285 
15286 	list_for_each_entry(pmu, &pmus, entry) {
15287 		if (pmu->dev)
15288 			continue;
15289 
15290 		ret = pmu_dev_alloc(pmu);
15291 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
15292 	}
15293 	pmu_bus_running = 1;
15294 	ret = 0;
15295 
15296 unlock:
15297 	mutex_unlock(&pmus_lock);
15298 
15299 	return ret;
15300 }
15301 device_initcall(perf_event_sysfs_init);
15302 
15303 #ifdef CONFIG_CGROUP_PERF
15304 static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)15305 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
15306 {
15307 	struct perf_cgroup *jc;
15308 
15309 	jc = kzalloc_obj(*jc);
15310 	if (!jc)
15311 		return ERR_PTR(-ENOMEM);
15312 
15313 	jc->info = alloc_percpu(struct perf_cgroup_info);
15314 	if (!jc->info) {
15315 		kfree(jc);
15316 		return ERR_PTR(-ENOMEM);
15317 	}
15318 
15319 	return &jc->css;
15320 }
15321 
perf_cgroup_css_free(struct cgroup_subsys_state * css)15322 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
15323 {
15324 	struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
15325 
15326 	free_percpu(jc->info);
15327 	kfree(jc);
15328 }
15329 
perf_cgroup_css_online(struct cgroup_subsys_state * css)15330 static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
15331 {
15332 	perf_event_cgroup(css->cgroup);
15333 	return 0;
15334 }
15335 
__perf_cgroup_move(void * info)15336 static int __perf_cgroup_move(void *info)
15337 {
15338 	struct task_struct *task = info;
15339 
15340 	preempt_disable();
15341 	perf_cgroup_switch(task);
15342 	preempt_enable();
15343 
15344 	return 0;
15345 }
15346 
perf_cgroup_attach(struct cgroup_taskset * tset)15347 static void perf_cgroup_attach(struct cgroup_taskset *tset)
15348 {
15349 	struct task_struct *task;
15350 	struct cgroup_subsys_state *css;
15351 
15352 	cgroup_taskset_for_each(task, css, tset)
15353 		task_function_call(task, __perf_cgroup_move, task);
15354 }
15355 
15356 struct cgroup_subsys perf_event_cgrp_subsys = {
15357 	.css_alloc	= perf_cgroup_css_alloc,
15358 	.css_free	= perf_cgroup_css_free,
15359 	.css_online	= perf_cgroup_css_online,
15360 	.attach		= perf_cgroup_attach,
15361 	/*
15362 	 * Implicitly enable on dfl hierarchy so that perf events can
15363 	 * always be filtered by cgroup2 path as long as perf_event
15364 	 * controller is not mounted on a legacy hierarchy.
15365 	 */
15366 	.implicit_on_dfl = true,
15367 	.threaded	= true,
15368 };
15369 #endif /* CONFIG_CGROUP_PERF */
15370 
15371 DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
15372