Lines Matching refs:event
225 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
227 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
261 struct perf_event *event; member
269 struct perf_event *event = efs->event; in event_function() local
270 struct perf_event_context *ctx = event->ctx; in event_function()
305 efs->func(event, cpuctx, ctx, efs->data); in event_function()
312 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
314 struct perf_event_context *ctx = event->ctx; in event_function_call()
318 .event = event, in event_function_call()
323 if (!event->parent) { in event_function_call()
333 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
359 func(event, NULL, ctx, data); in event_function_call()
369 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
371 struct perf_event_context *ctx = event->ctx; in event_function_local()
408 func(event, cpuctx, ctx, data); in event_function_local()
668 static u64 perf_event_time(struct perf_event *event);
677 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
679 return event->clock(); in perf_event_clock()
705 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
707 struct perf_event *leader = event->group_leader; in __perf_effective_state()
712 return event->state; in __perf_effective_state()
716 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
718 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
719 u64 delta = now - event->tstamp; in __perf_update_times()
721 *enabled = event->total_time_enabled; in __perf_update_times()
725 *running = event->total_time_running; in __perf_update_times()
730 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
732 u64 now = perf_event_time(event); in perf_event_update_time()
734 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
735 &event->total_time_running); in perf_event_update_time()
736 event->tstamp = now; in perf_event_update_time()
748 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
750 if (event->state == state) in perf_event_set_state()
753 perf_event_update_time(event); in perf_event_set_state()
758 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
759 perf_event_update_sibling_time(event); in perf_event_set_state()
761 WRITE_ONCE(event->state, state); in perf_event_set_state()
811 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
816 if (!event->cgrp) in perf_cgroup_match()
830 event->cgrp->css.cgroup); in perf_cgroup_match()
833 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
835 css_put(&event->cgrp->css); in perf_detach_cgroup()
836 event->cgrp = NULL; in perf_detach_cgroup()
839 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
841 return event->cgrp != NULL; in is_cgroup_event()
844 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
848 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
852 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
856 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
894 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
902 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
905 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
986 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
1027 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
1044 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
1049 event->cgrp = cgrp; in perf_cgroup_connect()
1057 perf_detach_cgroup(event); in perf_cgroup_connect()
1064 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1068 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1071 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1086 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1090 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1093 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1110 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1115 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1118 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1123 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1132 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1144 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1149 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1155 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1160 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1264 static inline void perf_pmu_read(struct perf_event *event) in perf_pmu_read() argument
1266 if (event->state == PERF_EVENT_STATE_ACTIVE) in perf_pmu_read()
1267 event->pmu->read(event); in perf_pmu_read()
1366 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1372 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1380 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1390 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1392 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1395 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1421 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1428 if (event->parent) in perf_event_pid_type()
1429 event = event->parent; in perf_event_pid_type()
1431 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1438 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1440 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1443 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1445 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1452 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1454 u64 id = event->id; in primary_event_id()
1456 if (event->parent) in primary_event_id()
1457 id = event->parent->id; in primary_event_id()
1577 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1579 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1584 if (is_cgroup_event(event)) in perf_event_time()
1585 return perf_cgroup_event_time(event); in perf_event_time()
1590 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1592 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1597 if (is_cgroup_event(event)) in perf_event_time_now()
1598 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1607 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1609 struct perf_event_context *ctx = event->ctx; in get_event_type()
1618 if (event->group_leader != event) in get_event_type()
1619 event = event->group_leader; in get_event_type()
1621 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1631 static void init_event_group(struct perf_event *event) in init_event_group() argument
1633 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1634 event->group_index = 0; in init_event_group()
1642 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1644 if (event->attr.pinned) in get_event_groups()
1659 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1664 if (event->cgrp) in event_cgroup()
1665 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1773 struct perf_event *event) in perf_event_groups_insert() argument
1775 event->group_index = ++groups->index; in perf_event_groups_insert()
1777 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1784 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1788 groups = get_event_groups(event, ctx); in add_event_to_groups()
1789 perf_event_groups_insert(groups, event); in add_event_to_groups()
1797 struct perf_event *event) in perf_event_groups_delete() argument
1799 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1802 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1803 init_event_group(event); in perf_event_groups_delete()
1810 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1814 groups = get_event_groups(event, ctx); in del_event_from_groups()
1815 perf_event_groups_delete(groups, event); in del_event_from_groups()
1840 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1843 .cpu = event->cpu, in perf_event_groups_next()
1845 .cgroup = event_cgroup(event), in perf_event_groups_next()
1849 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1856 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1857 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1858 event; event = perf_event_groups_next(event, pmu))
1863 #define perf_event_groups_for_each(event, groups) \ argument
1864 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1865 typeof(*event), group_node); event; \
1866 event = rb_entry_safe(rb_next(&event->group_node), \
1867 typeof(*event), group_node))
1882 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1886 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1887 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1889 event->tstamp = perf_event_time(event); in list_add_event()
1896 if (event->group_leader == event) { in list_add_event()
1897 event->group_caps = event->event_caps; in list_add_event()
1898 add_event_to_groups(event, ctx); in list_add_event()
1901 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1903 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1905 if (event->attr.inherit_stat) in list_add_event()
1907 if (has_inherit_and_sample_read(&event->attr)) in list_add_event()
1910 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1911 perf_cgroup_event_enable(event, ctx); in list_add_event()
1914 event->pmu_ctx->nr_events++; in list_add_event()
1920 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1922 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1956 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1974 size += event->read_size; in __perf_event_header_size()
1994 event->header_size = size; in __perf_event_header_size()
2001 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
2003 event->read_size = in perf_event__header_size()
2004 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
2005 event->group_leader->nr_siblings); in perf_event__header_size()
2006 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
2009 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
2012 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
2033 event->id_header_size = size; in perf_event__id_header_size()
2047 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
2049 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
2051 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
2066 if (event == group_leader) in perf_event_validate_size()
2078 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2080 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2082 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2088 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2091 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2093 if (group_leader == event) in perf_group_attach()
2096 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2098 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2100 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2115 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2117 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2123 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2126 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2129 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2131 if (event->attr.inherit_stat) in list_del_event()
2133 if (has_inherit_and_sample_read(&event->attr)) in list_del_event()
2136 list_del_rcu(&event->event_entry); in list_del_event()
2138 if (event->group_leader == event) in list_del_event()
2139 del_event_from_groups(event, ctx); in list_del_event()
2142 event->pmu_ctx->nr_events--; in list_del_event()
2146 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2151 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2154 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2157 static void put_event(struct perf_event *event);
2158 static void __event_disable(struct perf_event *event,
2162 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2164 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2170 if (event->aux_event) { in perf_put_aux_event()
2171 iter = event->aux_event; in perf_put_aux_event()
2172 event->aux_event = NULL; in perf_put_aux_event()
2181 for_each_sibling_event(iter, event) { in perf_put_aux_event()
2182 if (iter->aux_event != event) in perf_put_aux_event()
2186 put_event(event); in perf_put_aux_event()
2197 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2199 return event->attr.aux_output || has_aux_action(event); in perf_need_aux_event()
2202 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2217 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2220 if (event->attr.aux_output && in perf_get_aux_event()
2221 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2224 if ((event->attr.aux_pause || event->attr.aux_resume) && in perf_get_aux_event()
2228 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2240 event->aux_event = group_leader; in perf_get_aux_event()
2245 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2247 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2248 &event->pmu_ctx->flexible_active; in get_event_list()
2251 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2253 struct perf_event *leader = event->group_leader; in perf_group_detach()
2255 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2262 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2265 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2267 perf_put_aux_event(event); in perf_group_detach()
2272 if (leader != event) { in perf_group_detach()
2273 list_del_init(&event->sibling_list); in perf_group_detach()
2274 event->group_leader->nr_siblings--; in perf_group_detach()
2275 event->group_leader->group_generation++; in perf_group_detach()
2284 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2300 sibling->group_caps = event->group_caps; in perf_group_detach()
2303 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2309 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2321 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2323 struct perf_event *parent_event = event->parent; in perf_child_detach()
2325 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2328 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2339 sync_child_event(event); in perf_child_detach()
2340 list_del_init(&event->child_list); in perf_child_detach()
2343 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2345 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2349 event_filter_match(struct perf_event *event) in event_filter_match() argument
2351 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2352 perf_cgroup_match(event); in event_filter_match()
2355 static inline bool is_event_in_freq_mode(struct perf_event *event) in is_event_in_freq_mode() argument
2357 return event->attr.freq && event->attr.sample_freq; in is_event_in_freq_mode()
2361 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2363 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2369 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2372 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2380 list_del_init(&event->active_list); in event_sched_out()
2382 perf_pmu_disable(event->pmu); in event_sched_out()
2384 event->pmu->del(event, 0); in event_sched_out()
2385 event->oncpu = -1; in event_sched_out()
2387 if (event->pending_disable) { in event_sched_out()
2388 event->pending_disable = 0; in event_sched_out()
2389 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2393 perf_event_set_state(event, state); in event_sched_out()
2395 if (!is_software_event(event)) in event_sched_out()
2397 if (is_event_in_freq_mode(event)) { in event_sched_out()
2401 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2404 perf_pmu_enable(event->pmu); in event_sched_out()
2410 struct perf_event *event; in group_sched_out() local
2422 for_each_sibling_event(event, group_event) in group_sched_out()
2423 event_sched_out(event, ctx); in group_sched_out()
2455 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) in ctx_time_update_event() argument
2461 update_cgrp_time_from_event(event); in ctx_time_update_event()
2478 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2483 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2500 event_sched_out(event, ctx); in __perf_remove_from_context()
2502 if (event->state > PERF_EVENT_STATE_OFF) in __perf_remove_from_context()
2503 perf_cgroup_event_disable(event, ctx); in __perf_remove_from_context()
2505 perf_event_set_state(event, min(event->state, state)); in __perf_remove_from_context()
2508 perf_group_detach(event); in __perf_remove_from_context()
2510 perf_child_detach(event); in __perf_remove_from_context()
2511 list_del_event(event, ctx); in __perf_remove_from_context()
2546 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2548 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2559 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2566 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2569 static void __event_disable(struct perf_event *event, in __event_disable() argument
2573 event_sched_out(event, ctx); in __event_disable()
2574 perf_cgroup_event_disable(event, ctx); in __event_disable()
2575 perf_event_set_state(event, state); in __event_disable()
2581 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2586 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2589 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2590 ctx_time_update_event(ctx, event); in __perf_event_disable()
2596 if (event == event->group_leader) in __perf_event_disable()
2597 group_sched_out(event, ctx); in __perf_event_disable()
2603 __event_disable(event, ctx, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2605 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2622 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2624 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2627 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2633 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2636 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2638 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2645 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2649 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2650 _perf_event_disable(event); in perf_event_disable()
2651 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2655 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2657 event->pending_disable = 1; in perf_event_disable_inatomic()
2658 irq_work_queue(&event->pending_disable_irq); in perf_event_disable_inatomic()
2663 static void perf_log_throttle(struct perf_event *event, int enable);
2664 static void perf_log_itrace_start(struct perf_event *event);
2666 static void perf_event_unthrottle(struct perf_event *event, bool start) in perf_event_unthrottle() argument
2668 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_unthrottle()
2671 event->hw.interrupts = 0; in perf_event_unthrottle()
2673 event->pmu->start(event, 0); in perf_event_unthrottle()
2674 if (event == event->group_leader) in perf_event_unthrottle()
2675 perf_log_throttle(event, 1); in perf_event_unthrottle()
2678 static void perf_event_throttle(struct perf_event *event) in perf_event_throttle() argument
2680 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_throttle()
2683 event->hw.interrupts = MAX_INTERRUPTS; in perf_event_throttle()
2684 event->pmu->stop(event, 0); in perf_event_throttle()
2685 if (event == event->group_leader) in perf_event_throttle()
2686 perf_log_throttle(event, 0); in perf_event_throttle()
2689 static void perf_event_unthrottle_group(struct perf_event *event, bool skip_start_event) in perf_event_unthrottle_group() argument
2691 struct perf_event *sibling, *leader = event->group_leader; in perf_event_unthrottle_group()
2693 perf_event_unthrottle(leader, skip_start_event ? leader != event : true); in perf_event_unthrottle_group()
2695 perf_event_unthrottle(sibling, skip_start_event ? sibling != event : true); in perf_event_unthrottle_group()
2698 static void perf_event_throttle_group(struct perf_event *event) in perf_event_throttle_group() argument
2700 struct perf_event *sibling, *leader = event->group_leader; in perf_event_throttle_group()
2708 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2710 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2714 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2718 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2721 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2728 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2735 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) in event_sched_in()
2736 perf_event_unthrottle(event, false); in event_sched_in()
2738 perf_pmu_disable(event->pmu); in event_sched_in()
2740 perf_log_itrace_start(event); in event_sched_in()
2742 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2743 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2744 event->oncpu = -1; in event_sched_in()
2749 if (!is_software_event(event)) in event_sched_in()
2751 if (is_event_in_freq_mode(event)) { in event_sched_in()
2755 if (event->attr.exclusive) in event_sched_in()
2759 perf_pmu_enable(event->pmu); in event_sched_in()
2767 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2781 for_each_sibling_event(event, group_event) { in group_sched_in()
2782 if (event_sched_in(event, ctx)) { in group_sched_in()
2783 partial_group = event; in group_sched_in()
2797 for_each_sibling_event(event, group_event) { in group_sched_in()
2798 if (event == partial_group) in group_sched_in()
2801 event_sched_out(event, ctx); in group_sched_in()
2813 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2815 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2821 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2833 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2842 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2845 list_add_event(event, ctx); in add_event_to_ctx()
2846 perf_group_attach(event); in add_event_to_ctx()
2958 struct perf_event *event = info; in __perf_install_in_context() local
2959 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2990 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2997 event->cgrp->css.cgroup); in __perf_install_in_context()
3003 add_event_to_ctx(event, ctx); in __perf_install_in_context()
3004 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
3005 get_event_type(event)); in __perf_install_in_context()
3007 add_event_to_ctx(event, ctx); in __perf_install_in_context()
3016 static bool exclusive_event_installable(struct perf_event *event,
3026 struct perf_event *event, in perf_install_in_context() argument
3033 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
3035 if (event->cpu != -1) in perf_install_in_context()
3036 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
3042 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
3052 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
3053 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
3059 add_event_to_ctx(event, ctx); in perf_install_in_context()
3065 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
3107 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
3129 add_event_to_ctx(event, ctx); in perf_install_in_context()
3136 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
3141 struct perf_event *leader = event->group_leader; in __perf_event_enable()
3144 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
3145 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
3150 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
3151 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3156 if (!event_filter_match(event)) in __perf_event_enable()
3163 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
3170 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3182 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3184 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3187 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3188 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3201 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3205 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3206 event->group_leader == event) in _perf_event_enable()
3209 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3213 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3219 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3223 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3224 _perf_event_enable(event); in perf_event_enable()
3225 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3230 struct perf_event *event; member
3237 struct perf_event *event = sd->event; in __perf_event_stop() local
3240 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3250 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3253 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3265 event->pmu->start(event, 0); in __perf_event_stop()
3270 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3273 .event = event, in perf_event_stop()
3279 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3290 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3319 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3321 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3323 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3327 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3328 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3329 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3335 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3340 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3343 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3344 _perf_event_enable(event); in _perf_event_refresh()
3352 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3357 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3358 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3359 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3389 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3396 if (event->attr.type != attr->type) in perf_event_modify_attr()
3399 switch (event->attr.type) { in perf_event_modify_attr()
3408 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3410 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3416 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3417 err = func(event, attr); in perf_event_modify_attr()
3420 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3427 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3435 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3450 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3453 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3457 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3460 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3582 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3587 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3597 perf_pmu_read(event); in __perf_event_sync_stat()
3599 perf_event_update_time(event); in __perf_event_sync_stat()
3606 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3609 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3610 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3615 perf_event_update_userpage(event); in __perf_event_sync_stat()
3622 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3629 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3635 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3638 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3640 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3873 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) in __heap_add() argument
3877 if (event) { in __heap_add()
3878 itrs[heap->nr] = event; in __heap_add()
3975 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3977 if (likely(!refcount_read(&event->mmap_count))) in event_update_userpage()
3980 perf_event_update_time(event); in event_update_userpage()
3981 perf_event_update_userpage(event); in event_update_userpage()
3988 struct perf_event *event; in group_update_userpage() local
3993 for_each_sibling_event(event, group_event) in group_update_userpage()
3994 event_update_userpage(event); in group_update_userpage()
3997 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3999 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
4002 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
4005 if (!event_filter_match(event)) in merge_sched_in()
4008 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
4009 if (!group_sched_in(event, ctx)) in merge_sched_in()
4010 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
4013 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
4015 if (event->attr.pinned) { in merge_sched_in()
4016 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
4017 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
4019 if (*perf_event_fasync(event)) in merge_sched_in()
4020 event->pending_kill = POLL_ERR; in merge_sched_in()
4022 perf_event_wakeup(event); in merge_sched_in()
4024 struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu); in merge_sched_in()
4026 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
4028 group_update_userpage(event); in merge_sched_in()
4188 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4190 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4264 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4266 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4270 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4288 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4293 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4299 struct perf_event *event; in perf_adjust_freq_unthr_events() local
4304 list_for_each_entry(event, event_list, active_list) { in perf_adjust_freq_unthr_events()
4305 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_events()
4309 if (!event_filter_match(event)) in perf_adjust_freq_unthr_events()
4312 hwc = &event->hw; in perf_adjust_freq_unthr_events()
4315 perf_event_unthrottle_group(event, is_event_in_freq_mode(event)); in perf_adjust_freq_unthr_events()
4317 if (!is_event_in_freq_mode(event)) in perf_adjust_freq_unthr_events()
4323 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_events()
4325 now = local64_read(&event->count); in perf_adjust_freq_unthr_events()
4337 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_events()
4339 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_events()
4383 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4392 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4393 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4400 struct perf_event *event; in ctx_event_to_rotate() local
4408 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4410 if (event) in ctx_event_to_rotate()
4421 event = __node_2_pe(node); in ctx_event_to_rotate()
4428 event = __node_2_pe(node); in ctx_event_to_rotate()
4435 event = __node_2_pe(node); in ctx_event_to_rotate()
4444 return event; in ctx_event_to_rotate()
4527 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4530 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4533 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4534 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4537 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4551 struct perf_event *event; in perf_event_enable_on_exec() local
4566 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4567 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4568 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4587 static void perf_remove_from_owner(struct perf_event *event);
4588 static void perf_event_exit_event(struct perf_event *event,
4599 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4608 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4609 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4612 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4613 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4617 perf_event_exit_event(event, ctx, false); in perf_event_remove_on_exec()
4633 struct perf_event *event; member
4640 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4648 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) { in __perf_event_read_cpu()
4649 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu); in __perf_event_read_cpu()
4655 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4672 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4673 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4675 struct pmu *pmu = event->pmu; in __perf_event_read()
4688 ctx_time_update_event(ctx, event); in __perf_event_read()
4690 perf_event_update_time(event); in __perf_event_read()
4692 perf_event_update_sibling_time(event); in __perf_event_read()
4694 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4698 pmu->read(event); in __perf_event_read()
4705 pmu->read(event); in __perf_event_read()
4707 for_each_sibling_event(sub, event) in __perf_event_read()
4716 static inline u64 perf_event_count(struct perf_event *event, bool self) in perf_event_count() argument
4719 return local64_read(&event->count); in perf_event_count()
4721 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4724 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4732 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4733 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4744 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4762 if (event->attr.inherit) { in perf_event_read_local()
4768 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4769 event->hw.target != current) { in perf_event_read_local()
4778 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4779 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4782 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4789 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4800 event->pmu->read(event); in perf_event_read_local()
4802 *value = local64_read(&event->count); in perf_event_read_local()
4806 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4818 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4820 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4839 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4844 .event = event, in perf_event_read()
4850 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4867 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4871 state = event->state; in perf_event_read()
4881 ctx_time_update_event(ctx, event); in perf_event_read()
4883 perf_event_update_time(event); in perf_event_read()
4885 perf_event_update_sibling_time(event); in perf_event_read()
4956 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4969 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
5030 struct perf_event *event) in find_get_pmu_context() argument
5042 cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
5159 static void perf_event_free_filter(struct perf_event *event);
5163 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
5165 if (event->ns) in free_event_rcu()
5166 put_pid_ns(event->ns); in free_event_rcu()
5167 perf_event_free_filter(event); in free_event_rcu()
5168 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
5171 static void ring_buffer_attach(struct perf_event *event,
5174 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
5176 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
5179 list_del_rcu(&event->sb_list); in detach_sb_event()
5183 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5185 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5187 if (event->parent) in is_sb_event()
5190 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5203 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5205 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5206 detach_sb_event(event); in unaccount_pmu_sb_event()
5359 attach_perf_ctx_data(struct perf_event *event) in attach_perf_ctx_data() argument
5361 struct task_struct *task = event->hw.target; in attach_perf_ctx_data()
5362 struct kmem_cache *ctx_cache = event->pmu->task_ctx_cache; in attach_perf_ctx_data()
5375 event->attach_state |= PERF_ATTACH_GLOBAL_DATA; in attach_perf_ctx_data()
5435 static void detach_perf_ctx_data(struct perf_event *event) in detach_perf_ctx_data() argument
5437 struct task_struct *task = event->hw.target; in detach_perf_ctx_data()
5439 event->attach_state &= ~PERF_ATTACH_TASK_DATA; in detach_perf_ctx_data()
5444 if (event->attach_state & PERF_ATTACH_GLOBAL_DATA) { in detach_perf_ctx_data()
5446 event->attach_state &= ~PERF_ATTACH_GLOBAL_DATA; in detach_perf_ctx_data()
5450 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5454 if (event->parent) in unaccount_event()
5457 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5459 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5461 if (event->attr.build_id) in unaccount_event()
5463 if (event->attr.comm) in unaccount_event()
5465 if (event->attr.namespaces) in unaccount_event()
5467 if (event->attr.cgroup) in unaccount_event()
5469 if (event->attr.task) in unaccount_event()
5471 if (event->attr.freq) in unaccount_event()
5473 if (event->attr.context_switch) { in unaccount_event()
5477 if (is_cgroup_event(event)) in unaccount_event()
5479 if (has_branch_stack(event)) in unaccount_event()
5481 if (event->attr.ksymbol) in unaccount_event()
5483 if (event->attr.bpf_event) in unaccount_event()
5485 if (event->attr.text_poke) in unaccount_event()
5493 unaccount_pmu_sb_event(event); in unaccount_event()
5516 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5518 struct pmu *pmu = event->pmu; in exclusive_event_init()
5536 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5544 event->attach_state |= PERF_ATTACH_EXCLUSIVE; in exclusive_event_init()
5549 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5551 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5554 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5559 event->attach_state &= ~PERF_ATTACH_EXCLUSIVE; in exclusive_event_destroy()
5572 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5576 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5584 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5591 static void perf_free_addr_filters(struct perf_event *event);
5594 static void __free_event(struct perf_event *event) in __free_event() argument
5596 struct pmu *pmu = event->pmu; in __free_event()
5598 if (event->attach_state & PERF_ATTACH_CALLCHAIN) in __free_event()
5601 kfree(event->addr_filter_ranges); in __free_event()
5603 if (event->attach_state & PERF_ATTACH_EXCLUSIVE) in __free_event()
5604 exclusive_event_destroy(event); in __free_event()
5606 if (is_cgroup_event(event)) in __free_event()
5607 perf_detach_cgroup(event); in __free_event()
5609 if (event->attach_state & PERF_ATTACH_TASK_DATA) in __free_event()
5610 detach_perf_ctx_data(event); in __free_event()
5612 if (event->destroy) in __free_event()
5613 event->destroy(event); in __free_event()
5619 if (event->hw.target) in __free_event()
5620 put_task_struct(event->hw.target); in __free_event()
5622 if (event->pmu_ctx) { in __free_event()
5628 WARN_ON_ONCE(!event->ctx); in __free_event()
5629 WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); in __free_event()
5630 put_pmu_ctx(event->pmu_ctx); in __free_event()
5637 if (event->ctx) in __free_event()
5638 put_ctx(event->ctx); in __free_event()
5643 list_del(&event->pmu_list); in __free_event()
5648 call_rcu(&event->rcu_head, free_event_rcu); in __free_event()
5654 static void _free_event(struct perf_event *event) in DEFINE_FREE()
5656 irq_work_sync(&event->pending_irq); in DEFINE_FREE()
5657 irq_work_sync(&event->pending_disable_irq); in DEFINE_FREE()
5659 unaccount_event(event); in DEFINE_FREE()
5661 security_perf_event_free(event); in DEFINE_FREE()
5663 if (event->rb) { in DEFINE_FREE()
5670 mutex_lock(&event->mmap_mutex); in DEFINE_FREE()
5671 ring_buffer_attach(event, NULL); in DEFINE_FREE()
5672 mutex_unlock(&event->mmap_mutex); in DEFINE_FREE()
5675 perf_event_free_bpf_prog(event); in DEFINE_FREE()
5676 perf_free_addr_filters(event); in DEFINE_FREE()
5678 __free_event(event); in DEFINE_FREE()
5685 static void free_event(struct perf_event *event) in free_event() argument
5687 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5689 atomic_long_read(&event->refcount), event)) { in free_event()
5694 _free_event(event); in free_event()
5700 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5711 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5739 if (event->owner) { in perf_remove_from_owner()
5740 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5741 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5748 static void put_event(struct perf_event *event) in put_event() argument
5752 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5755 parent = event->parent; in put_event()
5756 _free_event(event); in put_event()
5768 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5770 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5778 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5783 if (!is_kernel_event(event)) in perf_event_release_kernel()
5784 perf_remove_from_owner(event); in perf_event_release_kernel()
5786 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5800 if (event->state > PERF_EVENT_STATE_REVOKED) { in perf_event_release_kernel()
5801 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5803 event->state = PERF_EVENT_STATE_DEAD; in perf_event_release_kernel()
5806 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5809 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5810 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5831 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5833 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5840 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5848 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5859 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5866 put_event(event); in perf_event_release_kernel()
5880 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5888 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5890 (void)perf_event_read(event, false); in __perf_event_read_value()
5891 total += perf_event_count(event, false); in __perf_event_read_value()
5893 *enabled += event->total_time_enabled + in __perf_event_read_value()
5894 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5895 *running += event->total_time_running + in __perf_event_read_value()
5896 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5898 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5904 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5909 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5914 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5915 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5916 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
6001 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
6004 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
6011 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
6031 ret = event->read_size; in perf_read_group()
6032 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
6043 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
6050 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
6056 values[n++] = primary_event_id(event); in perf_read_one()
6058 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
6066 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
6070 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
6073 mutex_lock(&event->child_mutex); in is_event_hup()
6074 no_children = list_empty(&event->child_list); in is_event_hup()
6075 mutex_unlock(&event->child_mutex); in is_event_hup()
6083 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
6085 u64 read_format = event->attr.read_format; in __perf_read()
6093 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
6096 if (count < event->read_size) in __perf_read()
6099 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
6101 ret = perf_read_group(event, read_format, buf); in __perf_read()
6103 ret = perf_read_one(event, read_format, buf); in __perf_read()
6111 struct perf_event *event = file->private_data; in perf_read() local
6115 ret = security_perf_event_read(event); in perf_read()
6119 ctx = perf_event_ctx_lock(event); in perf_read()
6120 ret = __perf_read(event, buf, count); in perf_read()
6121 perf_event_ctx_unlock(event, ctx); in perf_read()
6128 struct perf_event *event = file->private_data; in perf_poll() local
6132 if (event->state <= PERF_EVENT_STATE_REVOKED) in perf_poll()
6135 poll_wait(file, &event->waitq, wait); in perf_poll()
6137 if (event->state <= PERF_EVENT_STATE_REVOKED) in perf_poll()
6140 if (is_event_hup(event)) in perf_poll()
6143 if (unlikely(READ_ONCE(event->state) == PERF_EVENT_STATE_ERROR && in perf_poll()
6144 event->attr.pinned)) in perf_poll()
6151 mutex_lock(&event->mmap_mutex); in perf_poll()
6152 rb = event->rb; in perf_poll()
6155 mutex_unlock(&event->mmap_mutex); in perf_poll()
6159 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
6161 (void)perf_event_read(event, false); in _perf_event_reset()
6162 local64_set(&event->count, 0); in _perf_event_reset()
6163 perf_event_update_userpage(event); in _perf_event_reset()
6167 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
6172 ctx = perf_event_ctx_lock(event); in perf_event_pause()
6173 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
6174 _perf_event_disable(event); in perf_event_pause()
6175 count = local64_read(&event->count); in perf_event_pause()
6177 local64_set(&event->count, 0); in perf_event_pause()
6178 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
6190 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
6195 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
6197 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
6198 func(event); in perf_event_for_each_child()
6199 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
6201 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
6204 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
6207 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
6212 event = event->group_leader; in perf_event_for_each()
6214 perf_event_for_each_child(event, func); in perf_event_for_each()
6215 for_each_sibling_event(sibling, event) in perf_event_for_each()
6219 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
6227 if (event->attr.freq) { in __perf_event_period()
6228 event->attr.sample_freq = value; in __perf_event_period()
6230 event->attr.sample_period = value; in __perf_event_period()
6231 event->hw.sample_period = value; in __perf_event_period()
6234 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
6236 perf_pmu_disable(event->pmu); in __perf_event_period()
6237 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
6240 local64_set(&event->hw.period_left, 0); in __perf_event_period()
6243 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
6250 if (event->hw.interrupts == MAX_INTERRUPTS) in __perf_event_period()
6251 perf_event_unthrottle_group(event, true); in __perf_event_period()
6252 perf_pmu_enable(event->pmu); in __perf_event_period()
6256 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
6258 return event->pmu->check_period(event, value); in perf_event_check_period()
6261 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
6263 if (!is_sampling_event(event)) in _perf_event_period()
6269 if (event->attr.freq) { in _perf_event_period()
6273 if (perf_event_check_period(event, value)) in _perf_event_period()
6279 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
6284 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
6289 ctx = perf_event_ctx_lock(event); in perf_event_period()
6290 ret = _perf_event_period(event, value); in perf_event_period()
6291 perf_event_ctx_unlock(event, ctx); in perf_event_period()
6304 static int perf_event_set_output(struct perf_event *event,
6306 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6309 static int __perf_event_set_bpf_prog(struct perf_event *event,
6313 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
6318 if (event->state <= PERF_EVENT_STATE_REVOKED) in _perf_ioctl()
6333 return _perf_event_refresh(event, arg); in _perf_ioctl()
6342 return _perf_event_period(event, value); in _perf_ioctl()
6346 u64 id = primary_event_id(event); in _perf_ioctl()
6362 return perf_event_set_output(event, output_event); in _perf_ioctl()
6366 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
6377 err = __perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
6390 rb = rcu_dereference(event->rb); in _perf_ioctl()
6401 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6411 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6418 perf_event_for_each(event, func); in _perf_ioctl()
6420 perf_event_for_each_child(event, func); in _perf_ioctl()
6427 struct perf_event *event = file->private_data; in perf_ioctl() local
6432 ret = security_perf_event_write(event); in perf_ioctl()
6436 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6437 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6438 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6468 struct perf_event *event; in perf_event_task_enable() local
6471 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6472 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6473 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6474 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6484 struct perf_event *event; in perf_event_task_disable() local
6487 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6488 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6489 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6490 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6497 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6499 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6502 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6505 return event->pmu->event_idx(event); in perf_event_index()
6508 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6514 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6531 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6540 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6547 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6560 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6570 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6571 userpg->offset = perf_event_count(event, false); in perf_event_update_userpage()
6573 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6576 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6579 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6581 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6591 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6597 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6599 if (event->rb) { in ring_buffer_attach()
6604 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6606 old_rb = event->rb; in ring_buffer_attach()
6608 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6611 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6612 event->rcu_pending = 1; in ring_buffer_attach()
6616 if (event->rcu_pending) { in ring_buffer_attach()
6617 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6618 event->rcu_pending = 0; in ring_buffer_attach()
6622 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6636 if (has_aux(event)) in ring_buffer_attach()
6637 perf_event_stop(event, 0); in ring_buffer_attach()
6639 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6648 wake_up_all(&event->waitq); in ring_buffer_attach()
6652 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6656 if (event->parent) in ring_buffer_wakeup()
6657 event = event->parent; in ring_buffer_wakeup()
6660 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6662 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6663 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6668 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6672 if (event->parent) in ring_buffer_get()
6673 event = event->parent; in ring_buffer_get()
6676 rb = rcu_dereference(event->rb); in ring_buffer_get()
6696 typedef void (*mapped_f)(struct perf_event *event, struct mm_struct *mm);
6698 #define get_mapped(event, func) \ argument
6702 pmu = READ_ONCE(event->pmu); \
6710 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6711 mapped_f mapped = get_mapped(event, event_mapped); in perf_mmap_open()
6713 refcount_inc(&event->mmap_count); in perf_mmap_open()
6714 refcount_inc(&event->rb->mmap_count); in perf_mmap_open()
6717 refcount_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6720 mapped(event, vma->vm_mm); in perf_mmap_open()
6723 static void perf_pmu_output_stop(struct perf_event *event);
6735 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6736 mapped_f unmapped = get_mapped(event, event_unmapped); in perf_mmap_close()
6737 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6745 unmapped(event, vma->vm_mm); in perf_mmap_close()
6759 perf_pmu_output_stop(event); in perf_mmap_close()
6775 if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6778 ring_buffer_attach(event, NULL); in perf_mmap_close()
6779 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6792 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6793 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6802 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6813 if (event->rb == rb) in perf_mmap_close()
6814 ring_buffer_attach(event, NULL); in perf_mmap_close()
6816 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6817 put_event(event); in perf_mmap_close()
6979 static int perf_mmap_rb(struct vm_area_struct *vma, struct perf_event *event, in perf_mmap_rb() argument
6995 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap_rb()
6997 if (event->rb) { in perf_mmap_rb()
6998 if (data_page_nr(event->rb) != nr_pages) in perf_mmap_rb()
7001 if (refcount_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap_rb()
7007 refcount_inc(&event->mmap_count); in perf_mmap_rb()
7016 ring_buffer_attach(event, NULL); in perf_mmap_rb()
7026 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap_rb()
7027 event->cpu, rb_flags); in perf_mmap_rb()
7036 ring_buffer_attach(event, rb); in perf_mmap_rb()
7038 perf_event_update_time(event); in perf_mmap_rb()
7039 perf_event_init_userpage(event); in perf_mmap_rb()
7040 perf_event_update_userpage(event); in perf_mmap_rb()
7043 refcount_set(&event->mmap_count, 1); in perf_mmap_rb()
7048 static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event, in perf_mmap_aux() argument
7056 rb = event->rb; in perf_mmap_aux()
7102 WARN_ON(!rb && event->rb); in perf_mmap_aux()
7107 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap_aux()
7108 event->attr.aux_watermark, rb_flags); in perf_mmap_aux()
7119 refcount_inc(&event->mmap_count); in perf_mmap_aux()
7126 struct perf_event *event = file->private_data; in perf_mmap() local
7136 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
7142 ret = security_perf_event_read(event); in perf_mmap()
7155 scoped_guard (mutex, &event->mmap_mutex) { in perf_mmap()
7161 if (event->state <= PERF_EVENT_STATE_REVOKED) in perf_mmap()
7165 ret = perf_mmap_rb(vma, event, nr_pages); in perf_mmap()
7167 ret = perf_mmap_aux(vma, event, nr_pages); in perf_mmap()
7179 mapped = get_mapped(event, event_mapped); in perf_mmap()
7181 mapped(event, vma->vm_mm); in perf_mmap()
7189 ret = map_range(event->rb, vma); in perf_mmap()
7199 struct perf_event *event = filp->private_data; in perf_fasync() local
7202 if (event->state <= PERF_EVENT_STATE_REVOKED) in perf_fasync()
7206 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
7232 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
7234 ring_buffer_wakeup(event); in perf_event_wakeup()
7236 if (event->pending_kill) { in perf_event_wakeup()
7237 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
7238 event->pending_kill = 0; in perf_event_wakeup()
7242 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
7256 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
7259 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
7260 event->orig_type, event->attr.sig_data); in perf_sigtrap()
7266 static void __perf_pending_disable(struct perf_event *event) in __perf_pending_disable() argument
7268 int cpu = READ_ONCE(event->oncpu); in __perf_pending_disable()
7281 if (event->pending_disable) { in __perf_pending_disable()
7282 event->pending_disable = 0; in __perf_pending_disable()
7283 perf_event_disable_local(event); in __perf_pending_disable()
7308 irq_work_queue_on(&event->pending_disable_irq, cpu); in __perf_pending_disable()
7313 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq); in perf_pending_disable() local
7321 __perf_pending_disable(event); in perf_pending_disable()
7328 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
7341 if (event->pending_wakeup) { in perf_pending_irq()
7342 event->pending_wakeup = 0; in perf_pending_irq()
7343 perf_event_wakeup(event); in perf_pending_irq()
7352 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
7361 if (event->pending_work) { in perf_pending_task()
7362 event->pending_work = 0; in perf_pending_task()
7363 perf_sigtrap(event); in perf_pending_task()
7364 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task()
7366 put_event(event); in perf_pending_task()
7410 static bool should_sample_guest(struct perf_event *event) in should_sample_guest() argument
7412 return !event->attr.exclude_guest && perf_guest_state(); in should_sample_guest()
7415 unsigned long perf_misc_flags(struct perf_event *event, in perf_misc_flags() argument
7418 if (should_sample_guest(event)) in perf_misc_flags()
7424 unsigned long perf_instruction_pointer(struct perf_event *event, in perf_instruction_pointer() argument
7427 if (should_sample_guest(event)) in perf_instruction_pointer()
7570 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7574 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7609 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7633 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7642 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7646 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7695 struct perf_event *event, in __perf_event_header__init_id() argument
7698 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7703 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7704 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7708 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7711 data->id = primary_event_id(event); in __perf_event_header__init_id()
7714 data->stream_id = event->id; in __perf_event_header__init_id()
7724 struct perf_event *event) in perf_event_header__init_id() argument
7726 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7727 header->size += event->id_header_size; in perf_event_header__init_id()
7728 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7756 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7760 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7765 struct perf_event *event, in perf_output_read_one() argument
7768 u64 read_format = event->attr.read_format; in perf_output_read_one()
7772 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr)); in perf_output_read_one()
7775 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7779 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7782 values[n++] = primary_event_id(event); in perf_output_read_one()
7784 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7790 struct perf_event *event, in perf_output_read_group() argument
7793 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7794 u64 read_format = event->attr.read_format; in perf_output_read_group()
7798 bool self = has_inherit_and_sample_read(&event->attr); in perf_output_read_group()
7814 if ((leader != event) && !handle->skip_read) in perf_output_read_group()
7828 if ((sub != event) && !handle->skip_read) in perf_output_read_group()
7858 struct perf_event *event) in perf_output_read() argument
7861 u64 read_format = event->attr.read_format; in perf_output_read()
7873 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7875 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7876 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7878 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7884 struct perf_event *event) in perf_output_sample() argument
7921 perf_output_read(handle, event); in perf_output_sample()
7972 if (branch_sample_hw_index(event)) in perf_output_sample()
8002 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
8033 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
8057 perf_aux_sample_output(event, handle, data); in perf_output_sample()
8060 if (!event->attr.watermark) { in perf_output_sample()
8061 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
8204 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
8206 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
8207 bool user = !event->attr.exclude_callchain_user && in perf_callchain()
8210 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
8211 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
8231 struct perf_event *event, in perf_prepare_sample() argument
8234 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
8252 data->type = event->attr.sample_type; in perf_prepare_sample()
8256 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
8259 data->ip = perf_instruction_pointer(event, regs); in perf_prepare_sample()
8264 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
8291 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
8306 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
8307 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
8353 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
8395 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
8406 event->attr.aux_sample_size); in perf_prepare_sample()
8408 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
8418 struct perf_event *event, in perf_prepare_header() argument
8422 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
8423 header->misc = perf_misc_flags(event, regs); in perf_prepare_header()
8436 static void __perf_event_aux_pause(struct perf_event *event, bool pause) in __perf_event_aux_pause() argument
8439 if (!event->hw.aux_paused) { in __perf_event_aux_pause()
8440 event->hw.aux_paused = 1; in __perf_event_aux_pause()
8441 event->pmu->stop(event, PERF_EF_PAUSE); in __perf_event_aux_pause()
8444 if (event->hw.aux_paused) { in __perf_event_aux_pause()
8445 event->hw.aux_paused = 0; in __perf_event_aux_pause()
8446 event->pmu->start(event, PERF_EF_RESUME); in __perf_event_aux_pause()
8451 static void perf_event_aux_pause(struct perf_event *event, bool pause) in perf_event_aux_pause() argument
8455 if (WARN_ON_ONCE(!event)) in perf_event_aux_pause()
8458 rb = ring_buffer_get(event); in perf_event_aux_pause()
8472 __perf_event_aux_pause(event, pause); in perf_event_aux_pause()
8480 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
8495 perf_prepare_sample(data, event, regs); in __perf_event_output()
8496 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
8498 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
8502 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
8512 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
8516 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
8520 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
8524 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
8528 perf_event_output(struct perf_event *event, in perf_event_output() argument
8532 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
8547 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
8556 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
8558 .pid = perf_event_pid(event, task), in perf_event_read_event()
8559 .tid = perf_event_tid(event, task), in perf_event_read_event()
8563 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
8564 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
8569 perf_output_read(&handle, event); in perf_event_read_event()
8570 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
8575 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8582 struct perf_event *event; in perf_iterate_ctx() local
8584 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8586 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8588 if (!event_filter_match(event)) in perf_iterate_ctx()
8592 output(event, data); in perf_iterate_ctx()
8599 struct perf_event *event; in perf_iterate_sb_cpu() local
8601 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8607 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8610 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8612 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8614 output(event, data); in perf_iterate_sb_cpu()
8657 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8659 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8664 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8670 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8671 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8679 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8683 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8708 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8710 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8714 .event = event, in __perf_event_output_stop()
8717 if (!has_aux(event)) in __perf_event_output_stop()
8721 parent = event; in __perf_event_output_stop()
8739 struct perf_event *event = info; in __perf_pmu_output_stop() local
8742 .rb = event->rb, in __perf_pmu_output_stop()
8755 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8762 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8776 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8806 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8808 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8809 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8810 event->attr.task; in perf_event_task_match()
8813 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8822 if (!perf_event_task_match(event)) in perf_event_task_output()
8825 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8827 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8832 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8833 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8836 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8838 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8841 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8842 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8845 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8849 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8960 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8962 return event->attr.comm; in perf_event_comm_match()
8965 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8974 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8977 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8978 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8984 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8985 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8991 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
9059 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
9061 return event->attr.namespaces; in perf_event_namespaces_match()
9064 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
9073 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
9077 &sample, event); in perf_event_namespaces_output()
9078 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
9083 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
9085 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
9090 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
9187 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
9189 return event->attr.cgroup; in perf_event_cgroup_match()
9192 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
9200 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
9204 &sample, event); in perf_event_cgroup_output()
9205 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
9213 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
9298 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
9305 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
9306 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
9309 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
9320 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
9323 if (event->attr.mmap2) { in perf_event_mmap_output()
9333 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
9334 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
9339 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
9340 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
9342 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
9344 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
9349 if (event->attr.mmap2) { in perf_event_mmap_output()
9368 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
9529 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
9531 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
9537 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
9546 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
9553 event->addr_filters_gen++; in __perf_addr_filters_adjust()
9557 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
9616 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9638 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9639 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9645 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9653 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9671 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9673 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9679 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9698 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9700 return event->attr.context_switch; in perf_event_switch_match()
9703 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9710 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9714 if (event->ctx->task) { in perf_event_switch_output()
9721 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9723 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9726 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9728 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9732 if (event->ctx->task) in perf_event_switch_output()
9737 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9775 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9792 .time = perf_event_clock(event), in perf_log_throttle()
9793 .id = primary_event_id(event), in perf_log_throttle()
9794 .stream_id = event->id, in perf_log_throttle()
9800 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9802 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9808 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9828 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9830 return event->attr.ksymbol; in perf_event_ksymbol_match()
9833 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9840 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9844 &sample, event); in perf_event_ksymbol_output()
9845 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9852 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9918 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9920 return event->attr.bpf_event; in perf_event_bpf_match()
9923 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9930 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9934 &sample, event); in perf_event_bpf_output()
9935 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9941 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
10020 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
10022 return event->attr.text_poke; in perf_event_text_poke_match()
10025 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
10033 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
10036 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
10038 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
10053 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
10090 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
10092 WRITE_ONCE(event->attach_state, event->attach_state | PERF_ATTACH_ITRACE); in perf_event_itrace_started()
10095 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
10106 if (event->parent) in perf_log_itrace_start()
10107 event = event->parent; in perf_log_itrace_start()
10109 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
10110 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
10116 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
10117 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
10119 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
10120 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
10126 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
10131 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
10141 if (event->parent) in perf_report_aux_output_id()
10142 event = event->parent; in perf_report_aux_output_id()
10149 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
10150 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
10156 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
10163 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
10165 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
10180 perf_event_throttle_group(event); in __perf_event_account_interrupt()
10184 if (event->attr.freq) { in __perf_event_account_interrupt()
10191 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
10197 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
10199 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
10202 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
10209 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
10216 static int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10222 .event = event, in bpf_overflow_handler()
10231 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
10233 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
10243 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10247 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
10251 if (event->prog) in perf_event_set_bpf_handler()
10257 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
10259 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
10260 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
10261 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
10274 event->prog = prog; in perf_event_set_bpf_handler()
10275 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
10279 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10281 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10286 event->prog = NULL; in perf_event_free_bpf_handler()
10290 static inline int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10297 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10304 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10313 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
10317 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
10324 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
10327 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
10329 if (event->attr.aux_pause) in __perf_event_overflow()
10330 perf_event_aux_pause(event->aux_event, true); in __perf_event_overflow()
10332 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && in __perf_event_overflow()
10333 !bpf_overflow_handler(event, data, regs)) in __perf_event_overflow()
10341 event->pending_kill = POLL_IN; in __perf_event_overflow()
10342 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
10344 event->pending_kill = POLL_HUP; in __perf_event_overflow()
10345 perf_event_disable_inatomic(event); in __perf_event_overflow()
10346 event->pmu->stop(event, 0); in __perf_event_overflow()
10349 if (event->attr.sigtrap) { in __perf_event_overflow()
10356 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
10365 if (!event->pending_work && in __perf_event_overflow()
10366 !task_work_add(current, &event->pending_task, notify_mode)) { in __perf_event_overflow()
10367 event->pending_work = pending_id; in __perf_event_overflow()
10368 local_inc(&event->ctx->nr_no_switch_fast); in __perf_event_overflow()
10369 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); in __perf_event_overflow()
10371 event->pending_addr = 0; in __perf_event_overflow()
10373 event->pending_addr = data->addr; in __perf_event_overflow()
10375 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
10388 WARN_ON_ONCE(event->pending_work != pending_id); in __perf_event_overflow()
10392 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
10394 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
10395 event->pending_wakeup = 1; in __perf_event_overflow()
10396 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
10399 if (event->attr.aux_resume) in __perf_event_overflow()
10400 perf_event_aux_pause(event->aux_event, false); in __perf_event_overflow()
10405 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
10409 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
10430 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
10432 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
10453 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
10457 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
10461 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
10467 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
10479 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
10483 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
10485 local64_add(nr, &event->count); in perf_swevent_event()
10490 if (!is_sampling_event(event)) in perf_swevent_event()
10493 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
10495 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10497 data->period = event->hw.last_period; in perf_swevent_event()
10499 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
10500 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10505 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
10508 int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) in perf_exclude_event() argument
10510 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
10514 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
10517 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
10524 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
10530 if (event->attr.type != type) in perf_swevent_match()
10533 if (event->attr.config != event_id) in perf_swevent_match()
10536 if (perf_exclude_event(event, regs)) in perf_swevent_match()
10572 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
10575 u32 event_id = event->attr.config; in find_swevent_head()
10576 u64 type = event->attr.type; in find_swevent_head()
10584 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
10597 struct perf_event *event; in do_perf_sw_event() local
10605 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
10606 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
10607 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
10653 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
10657 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
10660 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
10663 if (is_sampling_event(event)) { in perf_swevent_add()
10665 perf_swevent_set_period(event); in perf_swevent_add()
10670 head = find_swevent_head(swhash, event); in perf_swevent_add()
10674 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
10675 perf_event_update_userpage(event); in perf_swevent_add()
10680 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
10682 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
10685 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
10687 event->hw.state = 0; in perf_swevent_start()
10690 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
10692 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10784 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10786 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10788 WARN_ON(event->parent); in sw_perf_event_destroy()
10797 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10799 u64 event_id = event->attr.config; in perf_swevent_init()
10801 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10807 if (has_branch_stack(event)) in perf_swevent_init()
10812 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10815 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10825 if (!event->parent) { in perf_swevent_init()
10833 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10854 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10856 perf_trace_destroy(event); in tp_perf_event_destroy()
10859 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10863 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10869 if (has_branch_stack(event)) in perf_tp_event_init()
10872 err = perf_trace_init(event); in perf_tp_event_init()
10876 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10892 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10898 if (event->parent) in perf_tp_filter_match()
10899 event = event->parent; in perf_tp_filter_match()
10901 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10906 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10910 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10915 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10918 if (!perf_tp_filter_match(event, raw)) in perf_tp_event_match()
10936 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10945 struct perf_event *event) in __perf_tp_event_target_task() argument
10949 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10952 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10954 if (perf_tp_event_match(event, raw, regs)) { in __perf_tp_event_target_task()
10956 perf_sample_save_raw_data(data, event, raw); in __perf_tp_event_target_task()
10957 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10969 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10971 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10972 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10973 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10977 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10978 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10979 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10989 struct perf_event *event; in perf_tp_event() local
11000 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
11001 if (perf_tp_event_match(event, &raw, regs)) { in perf_tp_event()
11011 perf_sample_save_raw_data(&data, event, &raw); in perf_tp_event()
11012 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
11079 static int perf_kprobe_event_init(struct perf_event *event);
11091 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
11096 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
11105 if (has_branch_stack(event)) in perf_kprobe_event_init()
11108 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
11109 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
11113 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
11138 static int perf_uprobe_event_init(struct perf_event *event);
11150 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
11156 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
11165 if (has_branch_stack(event)) in perf_uprobe_event_init()
11168 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
11169 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
11170 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
11174 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
11191 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
11193 ftrace_profile_free_filter(event); in perf_event_free_filter()
11200 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
11202 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
11205 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
11209 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
11215 static int __perf_event_set_bpf_prog(struct perf_event *event, in __perf_event_set_bpf_prog() argument
11221 if (event->state <= PERF_EVENT_STATE_REVOKED) in __perf_event_set_bpf_prog()
11224 if (!perf_event_is_tracing(event)) in __perf_event_set_bpf_prog()
11225 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in __perf_event_set_bpf_prog()
11227 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in __perf_event_set_bpf_prog()
11228 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in __perf_event_set_bpf_prog()
11229 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in __perf_event_set_bpf_prog()
11230 is_syscall_tp = is_syscall_trace_event(event->tp_event); in __perf_event_set_bpf_prog()
11253 int off = trace_event_get_offsets(event->tp_event); in __perf_event_set_bpf_prog()
11259 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in __perf_event_set_bpf_prog()
11262 int perf_event_set_bpf_prog(struct perf_event *event, in perf_event_set_bpf_prog() argument
11269 ctx = perf_event_ctx_lock(event); in perf_event_set_bpf_prog()
11270 ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
11271 perf_event_ctx_unlock(event, ctx); in perf_event_set_bpf_prog()
11276 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
11278 if (!event->prog) in perf_event_free_bpf_prog()
11281 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
11282 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
11285 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
11294 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
11298 static int __perf_event_set_bpf_prog(struct perf_event *event, in __perf_event_set_bpf_prog() argument
11305 int perf_event_set_bpf_prog(struct perf_event *event, in perf_event_set_bpf_prog() argument
11312 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
11334 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
11336 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
11363 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
11369 if (!has_addr_filter(event)) in perf_addr_filters_splice()
11373 if (event->parent) in perf_addr_filters_splice()
11376 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
11378 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
11380 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
11382 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
11387 static void perf_free_addr_filters(struct perf_event *event) in perf_free_addr_filters() argument
11392 if (list_empty(&event->addr_filters.list)) in perf_free_addr_filters()
11395 perf_addr_filters_splice(event, NULL); in perf_free_addr_filters()
11423 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
11425 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
11426 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
11454 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
11455 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
11457 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
11459 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
11460 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
11466 event->addr_filters_gen++; in perf_event_addr_filters_apply()
11476 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
11530 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
11557 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
11643 if (!event->ctx->task) in perf_event_parse_addr_filter()
11658 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11687 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11696 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11698 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11701 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11705 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11710 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11713 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11721 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11726 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11736 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11737 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11751 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11755 if (has_addr_filter(event)) in perf_event_set_filter()
11756 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11771 struct perf_event *event; in perf_swevent_hrtimer() local
11774 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11776 if (event->state != PERF_EVENT_STATE_ACTIVE || in perf_swevent_hrtimer()
11777 event->hw.state & PERF_HES_STOPPED) in perf_swevent_hrtimer()
11780 event->pmu->read(event); in perf_swevent_hrtimer()
11782 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11785 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11786 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11787 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11791 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11797 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11799 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11802 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11818 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11820 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11832 if (is_sampling_event(event) && (hwc->interrupts != MAX_INTERRUPTS)) { in perf_swevent_cancel_hrtimer()
11840 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11842 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11844 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11853 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11854 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11856 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11857 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11860 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11868 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11874 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11875 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11878 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11880 event->hw.state = 0; in cpu_clock_event_start()
11881 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11882 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11885 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11887 event->hw.state = PERF_HES_STOPPED; in cpu_clock_event_stop()
11888 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11890 cpu_clock_event_update(event); in cpu_clock_event_stop()
11893 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11896 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11897 perf_event_update_userpage(event); in cpu_clock_event_add()
11902 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11904 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11907 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11909 cpu_clock_event_update(event); in cpu_clock_event_read()
11912 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11914 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11917 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11923 if (has_branch_stack(event)) in cpu_clock_event_init()
11926 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11949 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11954 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11956 local64_add(delta, &event->count); in task_clock_event_update()
11959 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11961 event->hw.state = 0; in task_clock_event_start()
11962 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11963 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11966 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11968 event->hw.state = PERF_HES_STOPPED; in task_clock_event_stop()
11969 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11971 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11974 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11977 task_clock_event_start(event, flags); in task_clock_event_add()
11978 perf_event_update_userpage(event); in task_clock_event_add()
11983 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11985 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11988 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11991 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11992 u64 time = event->ctx->time + delta; in task_clock_event_read()
11994 task_clock_event_update(event, time); in task_clock_event_read()
11997 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11999 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
12002 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
12008 if (has_branch_stack(event)) in task_clock_event_init()
12011 perf_swevent_init_hrtimer(event); in task_clock_event_init()
12043 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
12085 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
12444 static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event, in __pmu_detach_event() argument
12450 perf_event_exit_event(event, ctx, true); in __pmu_detach_event()
12457 scoped_guard (mutex, &event->mmap_mutex) { in __pmu_detach_event()
12465 perf_event_free_bpf_prog(event); in __pmu_detach_event()
12466 perf_free_addr_filters(event); in __pmu_detach_event()
12468 if (event->destroy) { in __pmu_detach_event()
12469 event->destroy(event); in __pmu_detach_event()
12470 event->destroy = NULL; in __pmu_detach_event()
12473 if (event->pmu_ctx) { in __pmu_detach_event()
12474 put_pmu_ctx(event->pmu_ctx); in __pmu_detach_event()
12475 event->pmu_ctx = NULL; in __pmu_detach_event()
12478 exclusive_event_destroy(event); in __pmu_detach_event()
12481 event->pmu = NULL; /* force fault instead of UAF */ in __pmu_detach_event()
12484 static void pmu_detach_event(struct pmu *pmu, struct perf_event *event) in pmu_detach_event() argument
12488 ctx = perf_event_ctx_lock(event); in pmu_detach_event()
12489 __pmu_detach_event(pmu, event, ctx); in pmu_detach_event()
12490 perf_event_ctx_unlock(event, ctx); in pmu_detach_event()
12493 list_del(&event->pmu_list); in pmu_detach_event()
12498 struct perf_event *event; in pmu_get_event() local
12501 list_for_each_entry(event, &pmu->events, pmu_list) { in pmu_get_event()
12502 if (atomic_long_inc_not_zero(&event->refcount)) in pmu_get_event()
12503 return event; in pmu_get_event()
12517 struct perf_event *event; in pmu_detach_events() local
12520 event = pmu_get_event(pmu); in pmu_detach_events()
12521 if (!event) in pmu_detach_events()
12524 pmu_detach_event(pmu, event); in pmu_detach_events()
12525 put_event(event); in pmu_detach_events()
12584 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
12586 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
12587 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
12590 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
12604 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
12609 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
12614 event->pmu = pmu; in perf_try_init_event()
12615 ret = pmu->event_init(event); in perf_try_init_event()
12618 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
12624 has_extended_regs(event)) { in perf_try_init_event()
12630 event_has_any_exclude_flag(event)) { in perf_try_init_event()
12635 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { in perf_try_init_event()
12640 cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); in perf_try_init_event()
12651 event->event_caps |= PERF_EV_CAP_READ_SCOPE; in perf_try_init_event()
12657 if (event->destroy) { in perf_try_init_event()
12658 event->destroy(event); in perf_try_init_event()
12659 event->destroy = NULL; in perf_try_init_event()
12663 event->pmu = NULL; in perf_try_init_event()
12668 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
12680 event->orig_type = event->attr.type; in perf_init_event()
12683 if (event->parent && event->parent->pmu) { in perf_init_event()
12684 pmu = event->parent->pmu; in perf_init_event()
12685 ret = perf_try_init_event(pmu, event); in perf_init_event()
12694 type = event->attr.type; in perf_init_event()
12696 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
12701 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
12709 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
12713 ret = perf_try_init_event(pmu, event); in perf_init_event()
12714 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
12715 type = event->attr.type; in perf_init_event()
12726 ret = perf_try_init_event(pmu, event); in perf_init_event()
12737 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
12739 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
12742 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
12753 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
12755 if (is_sb_event(event)) in account_pmu_sb_event()
12756 attach_sb_event(event); in account_pmu_sb_event()
12780 static void account_event(struct perf_event *event) in account_event() argument
12784 if (event->parent) in account_event()
12787 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
12789 if (event->attr.mmap || event->attr.mmap_data) in account_event()
12791 if (event->attr.build_id) in account_event()
12793 if (event->attr.comm) in account_event()
12795 if (event->attr.namespaces) in account_event()
12797 if (event->attr.cgroup) in account_event()
12799 if (event->attr.task) in account_event()
12801 if (event->attr.freq) in account_event()
12803 if (event->attr.context_switch) { in account_event()
12807 if (has_branch_stack(event)) in account_event()
12809 if (is_cgroup_event(event)) in account_event()
12811 if (event->attr.ksymbol) in account_event()
12813 if (event->attr.bpf_event) in account_event()
12815 if (event->attr.text_poke) in account_event()
12846 account_pmu_sb_event(event); in account_event()
12875 struct perf_event *event __free(__free_event) = in perf_event_alloc()
12877 if (!event) in perf_event_alloc()
12885 group_leader = event; in perf_event_alloc()
12887 mutex_init(&event->child_mutex); in perf_event_alloc()
12888 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
12890 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
12891 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
12892 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
12893 init_event_group(event); in perf_event_alloc()
12894 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
12895 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
12896 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
12897 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
12898 INIT_LIST_HEAD(&event->pmu_list); in perf_event_alloc()
12901 init_waitqueue_head(&event->waitq); in perf_event_alloc()
12902 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
12903 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); in perf_event_alloc()
12904 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
12906 mutex_init(&event->mmap_mutex); in perf_event_alloc()
12907 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
12909 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
12910 event->cpu = cpu; in perf_event_alloc()
12911 event->attr = *attr; in perf_event_alloc()
12912 event->group_leader = group_leader; in perf_event_alloc()
12913 event->pmu = NULL; in perf_event_alloc()
12914 event->oncpu = -1; in perf_event_alloc()
12916 event->parent = parent_event; in perf_event_alloc()
12918 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
12919 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
12921 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12924 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12927 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12933 event->hw.target = get_task_struct(task); in perf_event_alloc()
12936 event->clock = &local_clock; in perf_event_alloc()
12938 event->clock = parent_event->clock; in perf_event_alloc()
12948 event->prog = prog; in perf_event_alloc()
12954 event->overflow_handler = overflow_handler; in perf_event_alloc()
12955 event->overflow_handler_context = context; in perf_event_alloc()
12956 } else if (is_write_backward(event)){ in perf_event_alloc()
12957 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12958 event->overflow_handler_context = NULL; in perf_event_alloc()
12960 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12961 event->overflow_handler_context = NULL; in perf_event_alloc()
12964 perf_event__state_init(event); in perf_event_alloc()
12968 hwc = &event->hw; in perf_event_alloc()
12970 if (is_event_in_freq_mode(event)) in perf_event_alloc()
12985 if (!has_branch_stack(event)) in perf_event_alloc()
12986 event->attr.branch_sample_type = 0; in perf_event_alloc()
12988 pmu = perf_init_event(event); in perf_event_alloc()
12998 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in perf_event_alloc()
12999 err = attach_perf_ctx_data(event); in perf_event_alloc()
13012 if (event->attr.aux_output && in perf_event_alloc()
13014 event->attr.aux_pause || event->attr.aux_resume)) in perf_event_alloc()
13017 if (event->attr.aux_pause && event->attr.aux_resume) in perf_event_alloc()
13020 if (event->attr.aux_start_paused) { in perf_event_alloc()
13023 event->hw.aux_paused = 1; in perf_event_alloc()
13027 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
13032 err = exclusive_event_init(event); in perf_event_alloc()
13036 if (has_addr_filter(event)) { in perf_event_alloc()
13037 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
13040 if (!event->addr_filter_ranges) in perf_event_alloc()
13047 if (event->parent) { in perf_event_alloc()
13048 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
13051 memcpy(event->addr_filter_ranges, in perf_event_alloc()
13052 event->parent->addr_filter_ranges, in perf_event_alloc()
13058 event->addr_filters_gen = 1; in perf_event_alloc()
13061 if (!event->parent) { in perf_event_alloc()
13062 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
13066 event->attach_state |= PERF_ATTACH_CALLCHAIN; in perf_event_alloc()
13070 err = security_perf_event_alloc(event); in perf_event_alloc()
13075 account_event(event); in perf_event_alloc()
13082 list_add(&event->pmu_list, &pmu->events); in perf_event_alloc()
13084 return_ptr(event); in perf_event_alloc()
13223 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
13229 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
13234 if (event == output_event) in perf_event_set_output()
13240 if (output_event->cpu != event->cpu) in perf_event_set_output()
13246 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
13252 if (output_event->clock != event->clock) in perf_event_set_output()
13259 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
13265 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
13266 event->pmu != output_event->pmu) in perf_event_set_output()
13276 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
13279 if (refcount_read(&event->mmap_count)) in perf_event_set_output()
13298 ring_buffer_attach(event, rb); in perf_event_set_output()
13302 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
13310 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
13316 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
13321 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
13326 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
13330 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
13334 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
13341 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
13393 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
13506 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
13508 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
13509 err = PTR_ERR(event); in SYSCALL_DEFINE5()
13513 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
13514 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
13524 pmu = event->pmu; in SYSCALL_DEFINE5()
13527 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
13533 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
13554 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
13574 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
13593 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
13601 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
13616 if (is_software_event(event) && in SYSCALL_DEFINE5()
13630 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
13651 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
13656 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
13659 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
13664 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
13669 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
13678 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
13685 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
13740 perf_event__header_size(event); in SYSCALL_DEFINE5()
13741 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
13743 event->owner = current; in SYSCALL_DEFINE5()
13745 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
13756 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
13769 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
13770 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
13779 put_event(event); in SYSCALL_DEFINE5()
13805 struct perf_event *event; in perf_event_create_kernel_counter() local
13821 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
13823 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
13824 err = PTR_ERR(event); in perf_event_create_kernel_counter()
13829 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
13830 pmu = event->pmu; in perf_event_create_kernel_counter()
13833 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
13838 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
13851 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
13856 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
13873 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
13878 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
13882 return event; in perf_event_create_kernel_counter()
13886 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
13892 put_event(event); in perf_event_create_kernel_counter()
13903 struct perf_event *event, *sibling; in __perf_pmu_remove() local
13905 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
13906 perf_remove_from_context(event, 0); in __perf_pmu_remove()
13907 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
13908 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13910 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
13920 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
13923 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
13927 event->cpu = cpu; in __perf_pmu_install_event()
13928 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
13929 event->pmu_ctx = epc; in __perf_pmu_install_event()
13931 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
13932 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
13933 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13944 struct perf_event *event, *tmp; in __perf_pmu_install() local
13954 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13955 if (event->group_leader == event) in __perf_pmu_install()
13958 list_del(&event->migrate_entry); in __perf_pmu_install()
13959 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13966 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13967 list_del(&event->migrate_entry); in __perf_pmu_install()
13968 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
14032 perf_event_exit_event(struct perf_event *event, in perf_event_exit_event() argument
14035 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
14055 attach_state = READ_ONCE(event->attach_state); in perf_event_exit_event()
14061 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
14081 put_event(event); in perf_event_exit_event()
14090 perf_event_wakeup(event); in perf_event_exit_event()
14179 struct perf_event *event, *tmp; in perf_event_exit_task() local
14184 list_for_each_entry_safe(event, tmp, &task->perf_event_list, in perf_event_exit_task()
14186 list_del_init(&event->owner_entry); in perf_event_exit_task()
14193 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
14253 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
14255 if (!event) in perf_event_attrs()
14258 return &event->attr; in perf_event_attrs()
14443 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
14451 if (!event->attr.inherit || in inherit_task_group()
14452 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
14454 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
14474 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
14488 struct perf_event *event; in perf_event_init_context() local
14522 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
14523 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
14538 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
14539 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
14654 struct perf_event *event; in __perf_event_exit_context() local
14658 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
14659 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()