Lines Matching full:event
176 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
178 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
198 * - removing the last event from a task ctx; this is relatively straight
201 * - adding the first event to a task ctx; this is tricky because we cannot
212 struct perf_event *event; member
220 struct perf_event *event = efs->event; in event_function() local
221 struct perf_event_context *ctx = event->ctx; in event_function()
256 efs->func(event, cpuctx, ctx, efs->data); in event_function()
263 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
265 struct perf_event_context *ctx = event->ctx; in event_function_call()
268 .event = event, in event_function_call()
273 if (!event->parent) { in event_function_call()
275 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
276 * stabilize the event->ctx relation. See in event_function_call()
283 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
308 func(event, NULL, ctx, data); in event_function_call()
316 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
318 struct perf_event_context *ctx = event->ctx; in event_function_local()
355 func(event, cpuctx, ctx, data); in event_function_local()
413 * perf event paranoia level:
425 * max perf event sample rate
575 static u64 perf_event_time(struct perf_event *event);
584 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
586 return event->clock(); in perf_event_clock()
590 * State based event timekeeping...
592 * The basic idea is to use event->state to determine which (if any) time
597 * Event groups make things a little more complicated, but not terribly so. The
612 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
614 struct perf_event *leader = event->group_leader; in __perf_effective_state()
619 return event->state; in __perf_effective_state()
623 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
625 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
626 u64 delta = now - event->tstamp; in __perf_update_times()
628 *enabled = event->total_time_enabled; in __perf_update_times()
632 *running = event->total_time_running; in __perf_update_times()
637 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
639 u64 now = perf_event_time(event); in perf_event_update_time()
641 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
642 &event->total_time_running); in perf_event_update_time()
643 event->tstamp = now; in perf_event_update_time()
655 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
657 if (event->state == state) in perf_event_set_state()
660 perf_event_update_time(event); in perf_event_set_state()
665 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
666 perf_event_update_sibling_time(event); in perf_event_set_state()
668 WRITE_ONCE(event->state, state); in perf_event_set_state()
716 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
720 /* @event doesn't care about cgroup */ in perf_cgroup_match()
721 if (!event->cgrp) in perf_cgroup_match()
729 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
731 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
735 event->cgrp->css.cgroup); in perf_cgroup_match()
738 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
740 css_put(&event->cgrp->css); in perf_detach_cgroup()
741 event->cgrp = NULL; in perf_detach_cgroup()
744 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
746 return event->cgrp != NULL; in is_cgroup_event()
749 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
753 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
757 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
761 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
799 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
807 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
810 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
853 * cpuctx->cgrp is set when the first cgroup event enabled, in perf_cgroup_switch()
854 * and is cleared when the last cgroup event disabled. in perf_cgroup_switch()
886 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
927 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
946 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
951 event->cgrp = cgrp; in perf_cgroup_connect()
959 perf_detach_cgroup(event); in perf_cgroup_connect()
968 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
972 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
975 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
990 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
994 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
997 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1014 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1019 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1022 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1027 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1036 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1048 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1053 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1059 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1064 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1232 * because the sys_perf_event_open() case will install a new event and break
1243 * quiesce the event, after which we can install it in the new location. This
1244 * means that only external vectors (perf_fops, prctl) can perturb the event
1248 * However; because event->ctx can change while we're waiting to acquire
1267 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1273 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1281 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1291 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1293 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1296 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1322 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1329 if (event->parent) in perf_event_pid_type()
1330 event = event->parent; in perf_event_pid_type()
1332 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1339 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1341 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1344 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1346 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1350 * If we inherit events we want to return the parent event id
1353 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1355 u64 id = event->id; in primary_event_id()
1357 if (event->parent) in primary_event_id()
1358 id = event->parent->id; in primary_event_id()
1478 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1480 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1485 if (is_cgroup_event(event)) in perf_event_time()
1486 return perf_cgroup_event_time(event); in perf_event_time()
1491 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1493 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1498 if (is_cgroup_event(event)) in perf_event_time_now()
1499 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1508 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1510 struct perf_event_context *ctx = event->ctx; in get_event_type()
1519 if (event->group_leader != event) in get_event_type()
1520 event = event->group_leader; in get_event_type()
1522 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1530 * Helper function to initialize event group nodes.
1532 static void init_event_group(struct perf_event *event) in init_event_group() argument
1534 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1535 event->group_index = 0; in init_event_group()
1540 * based on event attrs bits.
1543 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1545 if (event->attr.pinned) in get_event_groups()
1560 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1565 if (event->cgrp) in event_cgroup()
1566 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1573 * Compare function for event groups;
1668 * Insert @event into @groups' tree; using
1669 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1674 struct perf_event *event) in perf_event_groups_insert() argument
1676 event->group_index = ++groups->index; in perf_event_groups_insert()
1678 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1682 * Helper function to insert event into the pinned or flexible groups.
1685 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1689 groups = get_event_groups(event, ctx); in add_event_to_groups()
1690 perf_event_groups_insert(groups, event); in add_event_to_groups()
1698 struct perf_event *event) in perf_event_groups_delete() argument
1700 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1703 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1704 init_event_group(event); in perf_event_groups_delete()
1708 * Helper function to delete event from its groups.
1711 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1715 groups = get_event_groups(event, ctx); in del_event_from_groups()
1716 perf_event_groups_delete(groups, event); in del_event_from_groups()
1720 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1741 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1744 .cpu = event->cpu, in perf_event_groups_next()
1746 .cgroup = event_cgroup(event), in perf_event_groups_next()
1750 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1757 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1758 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1759 event; event = perf_event_groups_next(event, pmu))
1764 #define perf_event_groups_for_each(event, groups) \ argument
1765 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1766 typeof(*event), group_node); event; \
1767 event = rb_entry_safe(rb_next(&event->group_node), \
1768 typeof(*event), group_node))
1771 * Add an event from the lists for its context.
1775 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1779 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1780 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1782 event->tstamp = perf_event_time(event); in list_add_event()
1785 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1789 if (event->group_leader == event) { in list_add_event()
1790 event->group_caps = event->event_caps; in list_add_event()
1791 add_event_to_groups(event, ctx); in list_add_event()
1794 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1796 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1798 if (event->attr.inherit_stat) in list_add_event()
1801 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1802 perf_cgroup_event_enable(event, ctx); in list_add_event()
1805 event->pmu_ctx->nr_events++; in list_add_event()
1809 * Initialize event state based on the perf_event_attr::disabled.
1811 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1813 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1847 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1865 size += event->read_size; in __perf_event_header_size()
1885 event->header_size = size; in __perf_event_header_size()
1892 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1894 event->read_size = in perf_event__header_size()
1895 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1896 event->group_leader->nr_siblings); in perf_event__header_size()
1897 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1900 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1903 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1924 event->id_header_size = size; in perf_event__id_header_size()
1928 * Check that adding an event to the group does not result in anybody
1929 * overflowing the 64k event limit imposed by the output buffer.
1931 * Specifically, check that the read_size for the event does not exceed 16k,
1933 * depends on per-event read_format, also (re)check the existing events.
1938 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1940 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
1942 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
1957 if (event == group_leader) in perf_event_validate_size()
1969 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
1971 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
1973 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
1979 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
1982 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
1984 if (group_leader == event) in perf_group_attach()
1987 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
1989 group_leader->group_caps &= event->event_caps; in perf_group_attach()
1991 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2002 * Remove an event from the lists for its context.
2006 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2008 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2014 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2017 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2020 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2022 if (event->attr.inherit_stat) in list_del_event()
2025 list_del_rcu(&event->event_entry); in list_del_event()
2027 if (event->group_leader == event) in list_del_event()
2028 del_event_from_groups(event, ctx); in list_del_event()
2031 * If event was in error state, then keep it in list_del_event()
2035 * of the event in list_del_event()
2037 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2038 perf_cgroup_event_disable(event, ctx); in list_del_event()
2039 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2043 event->pmu_ctx->nr_events--; in list_del_event()
2047 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2052 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2055 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2058 static void put_event(struct perf_event *event);
2059 static void event_sched_out(struct perf_event *event,
2062 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2064 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2068 * If event uses aux_event tear down the link in perf_put_aux_event()
2070 if (event->aux_event) { in perf_put_aux_event()
2071 iter = event->aux_event; in perf_put_aux_event()
2072 event->aux_event = NULL; in perf_put_aux_event()
2078 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2081 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2082 if (iter->aux_event != event) in perf_put_aux_event()
2086 put_event(event); in perf_put_aux_event()
2094 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2098 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2100 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2103 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2107 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2108 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2118 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2121 if (event->attr.aux_output && in perf_get_aux_event()
2122 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2125 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2132 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2137 event->aux_event = group_leader; in perf_get_aux_event()
2142 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2144 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2145 &event->pmu_ctx->flexible_active; in get_event_list()
2154 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2156 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2157 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2160 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2162 struct perf_event *leader = event->group_leader; in perf_group_detach()
2164 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2171 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2174 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2176 perf_put_aux_event(event); in perf_group_detach()
2181 if (leader != event) { in perf_group_detach()
2182 list_del_init(&event->sibling_list); in perf_group_detach()
2183 event->group_leader->nr_siblings--; in perf_group_detach()
2184 event->group_leader->group_generation++; in perf_group_detach()
2189 * If this was a group event with sibling events then in perf_group_detach()
2193 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2202 sibling->group_caps = event->group_caps; in perf_group_detach()
2205 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2211 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2223 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2225 struct perf_event *parent_event = event->parent; in perf_child_detach()
2227 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2230 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2237 sync_child_event(event); in perf_child_detach()
2238 list_del_init(&event->child_list); in perf_child_detach()
2241 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2243 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2247 event_filter_match(struct perf_event *event) in event_filter_match() argument
2249 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2250 perf_cgroup_match(event); in event_filter_match()
2254 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2256 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2262 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2265 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2273 list_del_init(&event->active_list); in event_sched_out()
2275 perf_pmu_disable(event->pmu); in event_sched_out()
2277 event->pmu->del(event, 0); in event_sched_out()
2278 event->oncpu = -1; in event_sched_out()
2280 if (event->pending_disable) { in event_sched_out()
2281 event->pending_disable = 0; in event_sched_out()
2282 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2286 if (event->pending_sigtrap) { in event_sched_out()
2289 event->pending_sigtrap = 0; in event_sched_out()
2291 !event->pending_work) { in event_sched_out()
2292 event->pending_work = 1; in event_sched_out()
2294 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); in event_sched_out()
2295 task_work_add(current, &event->pending_task, TWA_RESUME); in event_sched_out()
2298 local_dec(&event->ctx->nr_pending); in event_sched_out()
2301 perf_event_set_state(event, state); in event_sched_out()
2303 if (!is_software_event(event)) in event_sched_out()
2305 if (event->attr.freq && event->attr.sample_freq) in event_sched_out()
2307 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2310 perf_pmu_enable(event->pmu); in event_sched_out()
2316 struct perf_event *event; in group_sched_out() local
2328 for_each_sibling_event(event, group_event) in group_sched_out()
2329 event_sched_out(event, ctx); in group_sched_out()
2337 * Cross CPU call to remove a performance event
2339 * We disable the event on the hardware level first. After that we
2343 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2348 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2361 event->pending_disable = 1; in __perf_remove_from_context()
2362 event_sched_out(event, ctx); in __perf_remove_from_context()
2364 perf_group_detach(event); in __perf_remove_from_context()
2366 perf_child_detach(event); in __perf_remove_from_context()
2367 list_del_event(event, ctx); in __perf_remove_from_context()
2369 event->state = PERF_EVENT_STATE_DEAD; in __perf_remove_from_context()
2396 * Remove the event from a task's (or a CPU's) list of events.
2398 * If event->ctx is a cloned context, callers must make sure that
2399 * every task struct that event->ctx->task could possibly point to
2405 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2407 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2418 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2425 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2429 * Cross CPU call to disable a performance event
2431 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2436 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2441 update_cgrp_time_from_event(event); in __perf_event_disable()
2444 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2446 if (event == event->group_leader) in __perf_event_disable()
2447 group_sched_out(event, ctx); in __perf_event_disable()
2449 event_sched_out(event, ctx); in __perf_event_disable()
2451 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2452 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2454 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2458 * Disable an event.
2460 * If event->ctx is a cloned context, callers must make sure that
2461 * every task struct that event->ctx->task could possibly point to
2464 * hold the top-level event's child_mutex, so any descendant that
2467 * When called from perf_pending_irq it's OK because event->ctx
2471 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2473 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2476 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2482 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2485 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2487 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2494 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2498 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2499 _perf_event_disable(event); in perf_event_disable()
2500 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2504 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2506 event->pending_disable = 1; in perf_event_disable_inatomic()
2507 irq_work_queue(&event->pending_irq); in perf_event_disable_inatomic()
2512 static void perf_log_throttle(struct perf_event *event, int enable);
2513 static void perf_log_itrace_start(struct perf_event *event);
2516 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2518 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2522 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2526 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2529 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2531 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2536 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2543 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2544 perf_log_throttle(event, 1); in event_sched_in()
2545 event->hw.interrupts = 0; in event_sched_in()
2548 perf_pmu_disable(event->pmu); in event_sched_in()
2550 perf_log_itrace_start(event); in event_sched_in()
2552 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2553 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2554 event->oncpu = -1; in event_sched_in()
2559 if (!is_software_event(event)) in event_sched_in()
2561 if (event->attr.freq && event->attr.sample_freq) in event_sched_in()
2564 if (event->attr.exclusive) in event_sched_in()
2568 perf_pmu_enable(event->pmu); in event_sched_in()
2576 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2590 for_each_sibling_event(event, group_event) { in group_sched_in()
2591 if (event_sched_in(event, ctx)) { in group_sched_in()
2592 partial_group = event; in group_sched_in()
2604 * The events up to the failed event are scheduled out normally. in group_sched_in()
2606 for_each_sibling_event(event, group_event) { in group_sched_in()
2607 if (event == partial_group) in group_sched_in()
2610 event_sched_out(event, ctx); in group_sched_in()
2620 * Work out whether we can put this event group on the CPU now.
2622 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2624 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2630 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2642 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2651 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2654 list_add_event(event, ctx); in add_event_to_ctx()
2655 perf_group_attach(event); in add_event_to_ctx()
2691 * time an event is added, only do it for the groups of equal priority and
2700 * event to the context or enabling existing event in the context. We can
2754 * Cross CPU call to install and enable a performance event
2761 struct perf_event *event = info; in __perf_install_in_context() local
2762 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2793 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2795 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2800 event->cgrp->css.cgroup); in __perf_install_in_context()
2806 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2807 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2809 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2818 static bool exclusive_event_installable(struct perf_event *event,
2822 * Attach a performance event to a context.
2828 struct perf_event *event, in perf_install_in_context() argument
2835 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2837 if (event->cpu != -1) in perf_install_in_context()
2838 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2841 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2844 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2848 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2852 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2854 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2855 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2861 add_event_to_ctx(event, ctx); in perf_install_in_context()
2867 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2909 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2925 * thus we can safely install the event. in perf_install_in_context()
2931 add_event_to_ctx(event, ctx); in perf_install_in_context()
2936 * Cross CPU call to enable a performance event
2938 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
2943 struct perf_event *leader = event->group_leader; in __perf_event_enable()
2946 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
2947 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
2953 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
2954 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
2959 if (!event_filter_match(event)) { in __perf_event_enable()
2965 * If the event is in a group and isn't the group leader, in __perf_event_enable()
2968 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { in __perf_event_enable()
2977 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
2981 * Enable an event.
2983 * If event->ctx is a cloned context, callers must make sure that
2984 * every task struct that event->ctx->task could possibly point to
2989 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
2991 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
2994 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
2995 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3002 * If the event is in error state, clear that first. in _perf_event_enable()
3004 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3008 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3012 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3013 event->group_leader == event) in _perf_event_enable()
3016 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3020 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3026 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3030 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3031 _perf_event_enable(event); in perf_event_enable()
3032 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3037 struct perf_event *event; member
3044 struct perf_event *event = sd->event; in __perf_event_stop() local
3047 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3055 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3057 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3060 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3068 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3072 event->pmu->start(event, 0); in __perf_event_stop()
3077 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3080 .event = event, in perf_event_stop()
3086 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3093 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3094 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3097 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3110 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3111 * (p2) when an event is scheduled in (pmu::add), it calls
3115 * If (p1) happens while the event is active, we restart it to force (p2).
3126 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3128 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3130 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3134 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3135 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3136 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3142 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3147 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3150 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3151 _perf_event_enable(event); in _perf_event_refresh()
3159 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3164 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3165 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3166 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3188 * Copy event-type-independent attributes that may be modified.
3196 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3203 if (event->attr.type != attr->type) in perf_event_modify_attr()
3206 switch (event->attr.type) { in perf_event_modify_attr()
3215 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3217 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3219 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3223 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3224 err = func(event, attr); in perf_event_modify_attr()
3227 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3234 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3242 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3258 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3261 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3265 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3268 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3379 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3384 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3388 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3391 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3394 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3395 event->pmu->read(event); in __perf_event_sync_stat()
3397 perf_event_update_time(event); in __perf_event_sync_stat()
3400 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3404 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3407 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3408 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3413 perf_event_update_userpage(event); in __perf_event_sync_stat()
3420 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3427 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3433 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3436 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3438 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3614 * This callback is relevant even to per-cpu events; for example multi event
3660 * We stop each event and update the event value in event->count.
3663 * sets the disabled bit in the control field of event _before_
3664 * accessing the event control register. If a NMI hits, then it will
3665 * not restart the event.
3681 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3707 static void __heap_add(struct min_heap *heap, struct perf_event *event) in __heap_add() argument
3711 if (event) { in __heap_add()
3712 itrs[heap->nr] = event; in __heap_add()
3739 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3803 * Because the userpage is strictly per-event (there is no concept of context,
3809 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3811 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3814 perf_event_update_time(event); in event_update_userpage()
3815 perf_event_update_userpage(event); in event_update_userpage()
3822 struct perf_event *event; in group_update_userpage() local
3827 for_each_sibling_event(event, group_event) in group_update_userpage()
3828 event_update_userpage(event); in group_update_userpage()
3831 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3833 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3836 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3839 if (!event_filter_match(event)) in merge_sched_in()
3842 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3843 if (!group_sched_in(event, ctx)) in merge_sched_in()
3844 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3847 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3849 if (event->attr.pinned) { in merge_sched_in()
3850 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3851 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3855 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3856 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3858 group_update_userpage(event); in merge_sched_in()
4002 * We restore the event value and then enable it.
4005 * sets the enabled bit in the control field of event _before_
4006 * accessing the event control register. If a NMI hits, then it will
4007 * keep the event running.
4021 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4023 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4097 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4099 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4103 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4117 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4122 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4134 struct perf_event *event; in perf_adjust_freq_unthr_context() local
4149 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4150 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_context()
4154 if (!event_filter_match(event)) in perf_adjust_freq_unthr_context()
4157 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4159 hwc = &event->hw; in perf_adjust_freq_unthr_context()
4163 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_context()
4164 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4167 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_context()
4171 * stop the event and update event->count in perf_adjust_freq_unthr_context()
4173 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4175 now = local64_read(&event->count); in perf_adjust_freq_unthr_context()
4180 * restart the event in perf_adjust_freq_unthr_context()
4182 * we have stopped the event so tell that in perf_adjust_freq_unthr_context()
4187 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_context()
4189 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4191 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4198 * Move @event to the tail of the @ctx's elegible events.
4200 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4209 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4210 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4213 /* pick an event from the flexible_groups to rotate */
4217 struct perf_event *event; in ctx_event_to_rotate() local
4224 /* pick the first active flexible event */ in ctx_event_to_rotate()
4225 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4227 if (event) in ctx_event_to_rotate()
4230 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4238 event = __node_2_pe(node); in ctx_event_to_rotate()
4245 event = __node_2_pe(node); in ctx_event_to_rotate()
4252 event = __node_2_pe(node); in ctx_event_to_rotate()
4261 return event; in ctx_event_to_rotate()
4274 * events, thus the event count values are stable. in perf_rotate_context()
4344 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4347 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4350 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4351 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4354 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4368 struct perf_event *event; in perf_event_enable_on_exec() local
4383 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4384 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4385 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4389 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4406 static void perf_remove_from_owner(struct perf_event *event);
4407 static void perf_event_exit_event(struct perf_event *event,
4417 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4426 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4427 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4430 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4431 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4435 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4451 struct perf_event *event; member
4456 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4463 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4477 * Cross CPU call to read the hardware event
4482 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4483 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4485 struct pmu *pmu = event->pmu; in __perf_event_read()
4491 * event->count would have been updated to a recent sample in __perf_event_read()
4492 * when the event was scheduled out. in __perf_event_read()
4500 update_cgrp_time_from_event(event); in __perf_event_read()
4503 perf_event_update_time(event); in __perf_event_read()
4505 perf_event_update_sibling_time(event); in __perf_event_read()
4507 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4511 pmu->read(event); in __perf_event_read()
4518 pmu->read(event); in __perf_event_read()
4520 for_each_sibling_event(sub, event) { in __perf_event_read()
4523 * Use sibling's PMU rather than @event's since in __perf_event_read()
4536 static inline u64 perf_event_count(struct perf_event *event) in perf_event_count() argument
4538 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4541 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4549 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4550 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4554 * NMI-safe method to read a local event, that is an event that
4561 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4576 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4579 if (event->attr.inherit) { in perf_event_read_local()
4584 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4585 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4586 event->hw.target != current) { in perf_event_read_local()
4592 * Get the event CPU numbers, and adjust them to local if the event is in perf_event_read_local()
4593 * a per-package event that can be read locally in perf_event_read_local()
4595 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4596 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4598 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4599 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4605 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4606 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4612 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4617 event->pmu->read(event); in perf_event_read_local()
4619 *value = local64_read(&event->count); in perf_event_read_local()
4623 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4635 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4637 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4641 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4642 * value in the event structure: in perf_event_read()
4656 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4661 .event = event, in perf_event_read()
4667 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4673 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4674 * scheduled out and that will have updated the event count. in perf_event_read()
4676 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4684 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4688 state = event->state; in perf_event_read()
4700 update_cgrp_time_from_event(event); in perf_event_read()
4703 perf_event_update_time(event); in perf_event_read()
4705 perf_event_update_sibling_time(event); in perf_event_read()
4776 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4784 /* Must be root to operate on a CPU event: */ in find_get_context()
4785 err = perf_allow_cpu(&event->attr); in find_get_context()
4789 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4850 struct perf_event *event) in find_get_pmu_context() argument
4863 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4883 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_pmu_context()
4976 static void perf_event_free_filter(struct perf_event *event);
4980 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
4982 if (event->ns) in free_event_rcu()
4983 put_pid_ns(event->ns); in free_event_rcu()
4984 perf_event_free_filter(event); in free_event_rcu()
4985 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
4988 static void ring_buffer_attach(struct perf_event *event,
4991 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
4993 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
4996 list_del_rcu(&event->sb_list); in detach_sb_event()
5000 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5002 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5004 if (event->parent) in is_sb_event()
5007 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5019 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5021 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5022 detach_sb_event(event); in unaccount_pmu_sb_event()
5047 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5051 if (event->parent) in unaccount_event()
5054 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5056 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5058 if (event->attr.build_id) in unaccount_event()
5060 if (event->attr.comm) in unaccount_event()
5062 if (event->attr.namespaces) in unaccount_event()
5064 if (event->attr.cgroup) in unaccount_event()
5066 if (event->attr.task) in unaccount_event()
5068 if (event->attr.freq) in unaccount_event()
5070 if (event->attr.context_switch) { in unaccount_event()
5074 if (is_cgroup_event(event)) in unaccount_event()
5076 if (has_branch_stack(event)) in unaccount_event()
5078 if (event->attr.ksymbol) in unaccount_event()
5080 if (event->attr.bpf_event) in unaccount_event()
5082 if (event->attr.text_poke) in unaccount_event()
5090 unaccount_pmu_sb_event(event); in unaccount_event()
5103 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5113 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5115 struct pmu *pmu = event->pmu; in exclusive_event_init()
5128 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
5130 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
5133 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5144 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5146 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5152 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5168 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5172 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5180 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5187 static void perf_addr_filters_splice(struct perf_event *event,
5190 static void _free_event(struct perf_event *event) in _free_event() argument
5192 irq_work_sync(&event->pending_irq); in _free_event()
5194 unaccount_event(event); in _free_event()
5196 security_perf_event_free(event); in _free_event()
5198 if (event->rb) { in _free_event()
5200 * Can happen when we close an event with re-directed output. in _free_event()
5205 mutex_lock(&event->mmap_mutex); in _free_event()
5206 ring_buffer_attach(event, NULL); in _free_event()
5207 mutex_unlock(&event->mmap_mutex); in _free_event()
5210 if (is_cgroup_event(event)) in _free_event()
5211 perf_detach_cgroup(event); in _free_event()
5213 if (!event->parent) { in _free_event()
5214 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
5218 perf_event_free_bpf_prog(event); in _free_event()
5219 perf_addr_filters_splice(event, NULL); in _free_event()
5220 kfree(event->addr_filter_ranges); in _free_event()
5222 if (event->destroy) in _free_event()
5223 event->destroy(event); in _free_event()
5229 if (event->hw.target) in _free_event()
5230 put_task_struct(event->hw.target); in _free_event()
5232 if (event->pmu_ctx) in _free_event()
5233 put_pmu_ctx(event->pmu_ctx); in _free_event()
5239 if (event->ctx) in _free_event()
5240 put_ctx(event->ctx); in _free_event()
5242 exclusive_event_destroy(event); in _free_event()
5243 module_put(event->pmu->module); in _free_event()
5245 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5250 * where the event isn't exposed yet and inherited events.
5252 static void free_event(struct perf_event *event) in free_event() argument
5254 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5255 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5256 atomic_long_read(&event->refcount), event)) { in free_event()
5261 _free_event(event); in free_event()
5265 * Remove user event from the owner task.
5267 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5275 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5278 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5301 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5304 * event. in perf_remove_from_owner()
5306 if (event->owner) { in perf_remove_from_owner()
5307 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5308 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5315 static void put_event(struct perf_event *event) in put_event() argument
5317 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5320 _free_event(event); in put_event()
5324 * Kill an event dead; while event:refcount will preserve the event
5328 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5330 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5335 * If we got here through err_alloc: free_event(event); we will not in perf_event_release_kernel()
5339 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5344 if (!is_kernel_event(event)) in perf_event_release_kernel()
5345 perf_remove_from_owner(event); in perf_event_release_kernel()
5347 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5351 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5354 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5361 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5363 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5366 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5367 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5378 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5389 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5391 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5398 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5407 put_event(event); in perf_event_release_kernel()
5410 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5415 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5424 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5432 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5446 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5454 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5456 (void)perf_event_read(event, false); in __perf_event_read_value()
5457 total += perf_event_count(event); in __perf_event_read_value()
5459 *enabled += event->total_time_enabled + in __perf_event_read_value()
5460 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5461 *running += event->total_time_running + in __perf_event_read_value()
5462 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5464 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5470 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5475 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5480 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5481 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5482 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5567 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5570 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5577 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5597 ret = event->read_size; in perf_read_group()
5598 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5609 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5616 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5622 values[n++] = primary_event_id(event); in perf_read_one()
5624 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5632 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5636 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5639 mutex_lock(&event->child_mutex); in is_event_hup()
5640 no_children = list_empty(&event->child_list); in is_event_hup()
5641 mutex_unlock(&event->child_mutex); in is_event_hup()
5646 * Read the performance event - simple non blocking version for now
5649 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5651 u64 read_format = event->attr.read_format; in __perf_read()
5655 * Return end-of-file for a read on an event that is in in __perf_read()
5659 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5662 if (count < event->read_size) in __perf_read()
5665 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5667 ret = perf_read_group(event, read_format, buf); in __perf_read()
5669 ret = perf_read_one(event, read_format, buf); in __perf_read()
5677 struct perf_event *event = file->private_data; in perf_read() local
5681 ret = security_perf_event_read(event); in perf_read()
5685 ctx = perf_event_ctx_lock(event); in perf_read()
5686 ret = __perf_read(event, buf, count); in perf_read()
5687 perf_event_ctx_unlock(event, ctx); in perf_read()
5694 struct perf_event *event = file->private_data; in perf_poll() local
5698 poll_wait(file, &event->waitq, wait); in perf_poll()
5700 if (is_event_hup(event)) in perf_poll()
5704 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5707 mutex_lock(&event->mmap_mutex); in perf_poll()
5708 rb = event->rb; in perf_poll()
5711 mutex_unlock(&event->mmap_mutex); in perf_poll()
5715 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5717 (void)perf_event_read(event, false); in _perf_event_reset()
5718 local64_set(&event->count, 0); in _perf_event_reset()
5719 perf_event_update_userpage(event); in _perf_event_reset()
5722 /* Assume it's not an event with inherit set. */
5723 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5728 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5729 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5730 _perf_event_disable(event); in perf_event_pause()
5731 count = local64_read(&event->count); in perf_event_pause()
5733 local64_set(&event->count, 0); in perf_event_pause()
5734 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5741 * Holding the top-level event's child_mutex means that any
5742 * descendant process that has inherited this event will block
5746 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5751 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5753 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5754 func(event); in perf_event_for_each_child()
5755 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5757 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5760 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5763 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5768 event = event->group_leader; in perf_event_for_each()
5770 perf_event_for_each_child(event, func); in perf_event_for_each()
5771 for_each_sibling_event(sibling, event) in perf_event_for_each()
5775 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5783 if (event->attr.freq) { in __perf_event_period()
5784 event->attr.sample_freq = value; in __perf_event_period()
5786 event->attr.sample_period = value; in __perf_event_period()
5787 event->hw.sample_period = value; in __perf_event_period()
5790 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5792 perf_pmu_disable(event->pmu); in __perf_event_period()
5795 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5797 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5798 event->hw.interrupts = 0; in __perf_event_period()
5799 perf_log_throttle(event, 1); in __perf_event_period()
5801 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5804 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5807 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5808 perf_pmu_enable(event->pmu); in __perf_event_period()
5812 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5814 return event->pmu->check_period(event, value); in perf_event_check_period()
5817 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5819 if (!is_sampling_event(event)) in _perf_event_period()
5825 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5828 if (perf_event_check_period(event, value)) in _perf_event_period()
5831 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5834 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5839 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5844 ctx = perf_event_ctx_lock(event); in perf_event_period()
5845 ret = _perf_event_period(event, value); in perf_event_period()
5846 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5868 static int perf_event_set_output(struct perf_event *event,
5870 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
5874 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
5891 return _perf_event_refresh(event, arg); in _perf_ioctl()
5900 return _perf_event_period(event, value); in _perf_ioctl()
5904 u64 id = primary_event_id(event); in _perf_ioctl()
5921 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
5924 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
5930 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
5941 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
5954 rb = rcu_dereference(event->rb); in _perf_ioctl()
5965 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
5975 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
5982 perf_event_for_each(event, func); in _perf_ioctl()
5984 perf_event_for_each_child(event, func); in _perf_ioctl()
5991 struct perf_event *event = file->private_data; in perf_ioctl() local
5996 ret = security_perf_event_write(event); in perf_ioctl()
6000 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6001 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6002 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6032 struct perf_event *event; in perf_event_task_enable() local
6035 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6036 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6037 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6038 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6048 struct perf_event *event; in perf_event_task_disable() local
6051 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6052 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6053 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6054 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6061 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6063 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6066 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6069 return event->pmu->event_idx(event); in perf_event_index()
6072 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6078 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6095 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6104 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6111 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6117 * based on snapshot values taken when the event in perf_event_update_userpage()
6124 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6134 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6135 userpg->offset = perf_event_count(event); in perf_event_update_userpage()
6137 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6140 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6143 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6145 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6157 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
6168 rb = rcu_dereference(event->rb); in perf_mmap_fault()
6190 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6196 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6198 if (event->rb) { in ring_buffer_attach()
6201 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
6203 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6205 old_rb = event->rb; in ring_buffer_attach()
6207 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6210 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6211 event->rcu_pending = 1; in ring_buffer_attach()
6215 if (event->rcu_pending) { in ring_buffer_attach()
6216 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6217 event->rcu_pending = 0; in ring_buffer_attach()
6221 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6226 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
6227 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
6235 if (has_aux(event)) in ring_buffer_attach()
6236 perf_event_stop(event, 0); in ring_buffer_attach()
6238 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6247 wake_up_all(&event->waitq); in ring_buffer_attach()
6251 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6255 if (event->parent) in ring_buffer_wakeup()
6256 event = event->parent; in ring_buffer_wakeup()
6259 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6261 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6262 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6267 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6271 if (event->parent) in ring_buffer_get()
6272 event = event->parent; in ring_buffer_get()
6275 rb = rcu_dereference(event->rb); in ring_buffer_get()
6297 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6299 atomic_inc(&event->mmap_count); in perf_mmap_open()
6300 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6303 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6305 if (event->pmu->event_mapped) in perf_mmap_open()
6306 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6309 static void perf_pmu_output_stop(struct perf_event *event);
6313 * event, or through other events by use of perf_event_set_output().
6321 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6322 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6328 if (event->pmu->event_unmapped) in perf_mmap_close()
6329 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6333 * event->mmap_count, so it is ok to use event->mmap_mutex to in perf_mmap_close()
6337 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { in perf_mmap_close()
6344 perf_pmu_output_stop(event); in perf_mmap_close()
6354 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6360 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6363 ring_buffer_attach(event, NULL); in perf_mmap_close()
6364 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6377 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6378 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6380 * This event is en-route to free_event() which will in perf_mmap_close()
6387 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6393 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6398 if (event->rb == rb) in perf_mmap_close()
6399 ring_buffer_attach(event, NULL); in perf_mmap_close()
6401 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6402 put_event(event); in perf_mmap_close()
6439 struct perf_event *event = file->private_data; in perf_mmap() local
6454 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6460 ret = security_perf_event_read(event); in perf_mmap()
6476 if (!event->rb) in perf_mmap()
6481 mutex_lock(&event->mmap_mutex); in perf_mmap()
6484 rb = event->rb; in perf_mmap()
6536 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6538 mutex_lock(&event->mmap_mutex); in perf_mmap()
6539 if (event->rb) { in perf_mmap()
6540 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6545 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6548 * event and try again. in perf_mmap()
6550 ring_buffer_attach(event, NULL); in perf_mmap()
6551 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6597 WARN_ON(!rb && event->rb); in perf_mmap()
6604 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6605 event->cpu, flags); in perf_mmap()
6616 ring_buffer_attach(event, rb); in perf_mmap()
6618 perf_event_update_time(event); in perf_mmap()
6619 perf_event_init_userpage(event); in perf_mmap()
6620 perf_event_update_userpage(event); in perf_mmap()
6622 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6623 event->attr.aux_watermark, flags); in perf_mmap()
6633 atomic_inc(&event->mmap_count); in perf_mmap()
6638 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6647 if (event->pmu->event_mapped) in perf_mmap()
6648 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6656 struct perf_event *event = filp->private_data; in perf_fasync() local
6660 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6681 * Perf event wakeup
6687 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) in perf_event_fasync() argument
6690 if (event->parent) in perf_event_fasync()
6691 event = event->parent; in perf_event_fasync()
6692 return &event->fasync; in perf_event_fasync()
6695 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6697 ring_buffer_wakeup(event); in perf_event_wakeup()
6699 if (event->pending_kill) { in perf_event_wakeup()
6700 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6701 event->pending_kill = 0; in perf_event_wakeup()
6705 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6712 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6722 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6723 event->orig_type, event->attr.sig_data); in perf_sigtrap()
6727 * Deliver the pending work in-event-context or follow the context.
6729 static void __perf_pending_irq(struct perf_event *event) in __perf_pending_irq() argument
6731 int cpu = READ_ONCE(event->oncpu); in __perf_pending_irq()
6734 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_irq()
6741 * Yay, we hit home and are in the context of the event. in __perf_pending_irq()
6744 if (event->pending_sigtrap) { in __perf_pending_irq()
6745 event->pending_sigtrap = 0; in __perf_pending_irq()
6746 perf_sigtrap(event); in __perf_pending_irq()
6747 local_dec(&event->ctx->nr_pending); in __perf_pending_irq()
6749 if (event->pending_disable) { in __perf_pending_irq()
6750 event->pending_disable = 0; in __perf_pending_irq()
6751 perf_event_disable_local(event); in __perf_pending_irq()
6774 * But the event runs on CPU-B and wants disabling there. in __perf_pending_irq()
6776 irq_work_queue_on(&event->pending_irq, cpu); in __perf_pending_irq()
6781 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6791 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
6792 * irrespective of where the event is. in perf_pending_irq()
6794 if (event->pending_wakeup) { in perf_pending_irq()
6795 event->pending_wakeup = 0; in perf_pending_irq()
6796 perf_event_wakeup(event); in perf_pending_irq()
6799 __perf_pending_irq(event); in perf_pending_irq()
6807 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
6817 if (event->pending_work) { in perf_pending_task()
6818 event->pending_work = 0; in perf_pending_task()
6819 perf_sigtrap(event); in perf_pending_task()
6820 local_dec(&event->ctx->nr_pending); in perf_pending_task()
6827 put_event(event); in perf_pending_task()
7001 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7005 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7040 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7050 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
7052 * doesn't change the event state. in perf_pmu_snapshot_aux()
7064 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7073 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7077 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7119 * when event->attr.sample_id_all is set.
7126 struct perf_event *event, in __perf_event_header__init_id() argument
7129 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7134 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7135 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7139 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7142 data->id = primary_event_id(event); in __perf_event_header__init_id()
7145 data->stream_id = event->id; in __perf_event_header__init_id()
7155 struct perf_event *event) in perf_event_header__init_id() argument
7157 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7158 header->size += event->id_header_size; in perf_event_header__init_id()
7159 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7187 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7191 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7196 struct perf_event *event, in perf_output_read_one() argument
7199 u64 read_format = event->attr.read_format; in perf_output_read_one()
7203 values[n++] = perf_event_count(event); in perf_output_read_one()
7206 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7210 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7213 values[n++] = primary_event_id(event); in perf_output_read_one()
7215 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7221 struct perf_event *event, in perf_output_read_group() argument
7224 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7225 u64 read_format = event->attr.read_format; in perf_output_read_group()
7244 if ((leader != event) && in perf_output_read_group()
7259 if ((sub != event) && in perf_output_read_group()
7286 struct perf_event *event) in perf_output_read() argument
7289 u64 read_format = event->attr.read_format; in perf_output_read()
7293 * based on snapshot values taken when the event in perf_output_read()
7301 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7303 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7304 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7306 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7312 struct perf_event *event) in perf_output_sample() argument
7346 perf_output_read(handle, event); in perf_output_sample()
7397 if (branch_sample_hw_index(event)) in perf_output_sample()
7427 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7458 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7482 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7485 if (!event->attr.watermark) { in perf_output_sample()
7486 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7629 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7631 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7632 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7634 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7635 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7652 struct perf_event *event, in perf_prepare_sample() argument
7655 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7673 data->type = event->attr.sample_type; in perf_prepare_sample()
7677 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
7685 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
7712 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7727 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7728 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7774 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7816 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7827 event->attr.aux_sample_size); in perf_prepare_sample()
7829 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
7839 struct perf_event *event, in perf_prepare_header() argument
7843 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
7858 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
7873 perf_prepare_sample(data, event, regs); in __perf_event_output()
7874 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
7876 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
7880 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
7890 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
7894 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
7898 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
7902 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
7906 perf_event_output(struct perf_event *event, in perf_event_output() argument
7910 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
7925 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
7934 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
7936 .pid = perf_event_pid(event, task), in perf_event_read_event()
7937 .tid = perf_event_tid(event, task), in perf_event_read_event()
7941 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
7942 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
7947 perf_output_read(&handle, event); in perf_event_read_event()
7948 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
7953 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
7960 struct perf_event *event; in perf_iterate_ctx() local
7962 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7964 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
7966 if (!event_filter_match(event)) in perf_iterate_ctx()
7970 output(event, data); in perf_iterate_ctx()
7977 struct perf_event *event; in perf_iterate_sb_cpu() local
7979 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
7982 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
7985 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7988 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
7990 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
7992 output(event, data); in perf_iterate_sb_cpu()
8000 * your event, otherwise it might not get delivered.
8035 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8037 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8042 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8048 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8049 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8057 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8061 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8085 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8087 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8091 .event = event, in __perf_event_output_stop()
8094 if (!has_aux(event)) in __perf_event_output_stop()
8098 parent = event; in __perf_event_output_stop()
8104 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
8106 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
8107 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
8116 struct perf_event *event = info; in __perf_pmu_output_stop() local
8119 .rb = event->rb, in __perf_pmu_output_stop()
8132 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8139 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8143 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
8153 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8183 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8185 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8186 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8187 event->attr.task; in perf_event_task_match()
8190 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8199 if (!perf_event_task_match(event)) in perf_event_task_output()
8202 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8204 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8209 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8210 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8213 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8215 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8218 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8219 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8222 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8226 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8289 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8291 return event->attr.comm; in perf_event_comm_match()
8294 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8303 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8306 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8307 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8313 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8314 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8320 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8388 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8390 return event->attr.namespaces; in perf_event_namespaces_match()
8393 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8402 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8406 &sample, event); in perf_event_namespaces_output()
8407 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8412 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8414 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8419 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8516 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8518 return event->attr.cgroup; in perf_event_cgroup_match()
8521 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8529 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8533 &sample, event); in perf_event_cgroup_output()
8534 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8542 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8627 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8634 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8635 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8638 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8649 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8652 if (event->attr.mmap2) { in perf_event_mmap_output()
8662 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8663 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8668 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8669 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8671 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8673 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8678 if (event->attr.mmap2) { in perf_event_mmap_output()
8697 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
8858 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
8860 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
8866 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
8875 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
8882 event->addr_filters_gen++; in __perf_addr_filters_adjust()
8886 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
8945 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
8967 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
8968 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
8974 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
8982 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9000 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9002 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9008 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9027 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9029 return event->attr.context_switch; in perf_event_switch_match()
9032 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9039 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9043 if (event->ctx->task) { in perf_event_switch_output()
9050 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9052 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9055 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9057 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9061 if (event->ctx->task) in perf_event_switch_output()
9066 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9104 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9121 .time = perf_event_clock(event), in perf_log_throttle()
9122 .id = primary_event_id(event), in perf_log_throttle()
9123 .stream_id = event->id, in perf_log_throttle()
9129 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9131 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9137 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9157 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9159 return event->attr.ksymbol; in perf_event_ksymbol_match()
9162 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9169 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9173 &sample, event); in perf_event_ksymbol_output()
9174 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9181 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9247 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9249 return event->attr.bpf_event; in perf_event_bpf_match()
9252 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9259 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9263 &sample, event); in perf_event_bpf_output()
9264 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9270 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9355 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9357 return event->attr.text_poke; in perf_event_text_poke_match()
9360 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9368 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9371 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9373 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9388 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9425 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9427 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9430 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9441 if (event->parent) in perf_log_itrace_start()
9442 event = event->parent; in perf_log_itrace_start()
9444 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9445 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9451 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9452 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9454 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9455 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9461 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9466 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9476 if (event->parent) in perf_report_aux_output_id()
9477 event = event->parent; in perf_report_aux_output_id()
9484 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9485 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9491 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9498 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9500 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9515 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9520 if (event->attr.freq) { in __perf_event_account_interrupt()
9527 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9533 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9535 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9538 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9545 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9552 * Generic event overflow handling, sampling.
9555 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9559 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9566 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9569 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9576 event->pending_kill = POLL_IN; in __perf_event_overflow()
9577 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9579 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9580 perf_event_disable_inatomic(event); in __perf_event_overflow()
9583 if (event->attr.sigtrap) { in __perf_event_overflow()
9587 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
9590 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9595 if (!event->pending_sigtrap) { in __perf_event_overflow()
9596 event->pending_sigtrap = pending_id; in __perf_event_overflow()
9597 local_inc(&event->ctx->nr_pending); in __perf_event_overflow()
9598 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9611 WARN_ON_ONCE(event->pending_sigtrap != pending_id); in __perf_event_overflow()
9614 event->pending_addr = 0; in __perf_event_overflow()
9616 event->pending_addr = data->addr; in __perf_event_overflow()
9617 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9620 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9622 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9623 event->pending_wakeup = 1; in __perf_event_overflow()
9624 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9630 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9634 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9638 * Generic software event infrastructure
9653 * We directly increment event->count and keep a second value in
9654 * event->hw.period_left to count intervals. This period event
9659 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9661 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9682 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9686 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9690 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9696 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9708 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9712 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9714 local64_add(nr, &event->count); in perf_swevent_event()
9719 if (!is_sampling_event(event)) in perf_swevent_event()
9722 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9724 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9726 data->period = event->hw.last_period; in perf_swevent_event()
9728 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9729 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9734 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9737 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9740 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9744 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
9747 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
9754 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
9760 if (event->attr.type != type) in perf_swevent_match()
9763 if (event->attr.config != event_id) in perf_swevent_match()
9766 if (perf_exclude_event(event, regs)) in perf_swevent_match()
9800 /* For the event head insertion and removal in the hlist */
9802 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
9805 u32 event_id = event->attr.config; in find_swevent_head()
9806 u64 type = event->attr.type; in find_swevent_head()
9809 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
9814 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9827 struct perf_event *event; in do_perf_sw_event() local
9835 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
9836 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
9837 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
9887 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
9891 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
9894 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
9897 if (is_sampling_event(event)) { in perf_swevent_add()
9899 perf_swevent_set_period(event); in perf_swevent_add()
9904 head = find_swevent_head(swhash, event); in perf_swevent_add()
9908 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
9909 perf_event_update_userpage(event); in perf_swevent_add()
9914 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
9916 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
9919 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
9921 event->hw.state = 0; in perf_swevent_start()
9924 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
9926 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10018 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10020 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10022 WARN_ON(event->parent); in sw_perf_event_destroy()
10031 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10033 u64 event_id = event->attr.config; in perf_swevent_init()
10035 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10041 if (has_branch_stack(event)) in perf_swevent_init()
10046 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10049 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10059 if (!event->parent) { in perf_swevent_init()
10067 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10088 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10090 perf_trace_destroy(event); in tp_perf_event_destroy()
10093 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10097 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10103 if (has_branch_stack(event)) in perf_tp_event_init()
10106 err = perf_trace_init(event); in perf_tp_event_init()
10110 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10126 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10132 if (event->parent) in perf_tp_filter_match()
10133 event = event->parent; in perf_tp_filter_match()
10135 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10140 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10144 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10149 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10152 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
10170 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10178 struct perf_event *event) in __perf_tp_event_target_task() argument
10182 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10185 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10187 if (perf_tp_event_match(event, data, regs)) in __perf_tp_event_target_task()
10188 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10198 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10200 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10201 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10202 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10206 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10207 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10208 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10218 struct perf_event *event; in perf_tp_event() local
10232 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10233 if (perf_tp_event_match(event, &data, regs)) { in perf_tp_event()
10234 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10238 * some members in data are event-specific and in perf_tp_event()
10241 * the problem that next event skips preparing data in perf_tp_event()
10251 * deliver this event there too. in perf_tp_event()
10312 static int perf_kprobe_event_init(struct perf_event *event);
10324 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10329 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10338 if (has_branch_stack(event)) in perf_kprobe_event_init()
10341 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10342 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10346 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10371 static int perf_uprobe_event_init(struct perf_event *event);
10383 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10389 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10398 if (has_branch_stack(event)) in perf_uprobe_event_init()
10401 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10402 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10403 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10407 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10424 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10426 ftrace_profile_free_filter(event); in perf_event_free_filter()
10430 static void bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10436 .event = event, in bpf_overflow_handler()
10445 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
10447 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
10456 event->orig_overflow_handler(event, data, regs); in bpf_overflow_handler()
10459 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10463 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
10467 if (event->prog) in perf_event_set_bpf_handler()
10473 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
10475 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
10476 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
10477 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
10490 event->prog = prog; in perf_event_set_bpf_handler()
10491 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
10492 event->orig_overflow_handler = READ_ONCE(event->overflow_handler); in perf_event_set_bpf_handler()
10493 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); in perf_event_set_bpf_handler()
10497 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10499 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10504 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); in perf_event_free_bpf_handler()
10505 event->prog = NULL; in perf_event_free_bpf_handler()
10509 static int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10515 static void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10521 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10524 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10526 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10529 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10533 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10539 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10544 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10545 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10547 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10548 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10549 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10550 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10569 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10575 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10578 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10580 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10581 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10584 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10593 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10597 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10603 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10625 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10627 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10654 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10660 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10664 if (event->parent) in perf_addr_filters_splice()
10667 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10669 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10671 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10673 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10700 * Update event's address range filters based on the
10703 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10705 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10706 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10713 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10734 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10735 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10737 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10739 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10740 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10746 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10756 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10810 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10837 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
10896 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
10923 if (!event->ctx->task) in perf_event_parse_addr_filter()
10938 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
10967 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
10976 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10978 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
10981 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
10985 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10990 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
10993 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11001 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11006 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11016 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11017 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11027 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
11031 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11035 if (has_addr_filter(event)) in perf_event_set_filter()
11036 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11051 struct perf_event *event; in perf_swevent_hrtimer() local
11054 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11056 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11059 event->pmu->read(event); in perf_swevent_hrtimer()
11061 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11064 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11065 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11066 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11070 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11076 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11078 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11081 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11097 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11099 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11101 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11109 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11111 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11113 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11123 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11124 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11126 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11127 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11130 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11135 * Software event: cpu wall time clock
11138 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11144 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11145 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11148 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11150 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11151 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11154 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11156 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11157 cpu_clock_event_update(event); in cpu_clock_event_stop()
11160 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11163 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11164 perf_event_update_userpage(event); in cpu_clock_event_add()
11169 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11171 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11174 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11176 cpu_clock_event_update(event); in cpu_clock_event_read()
11179 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11181 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11184 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11190 if (has_branch_stack(event)) in cpu_clock_event_init()
11193 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11213 * Software event: task time clock
11216 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11221 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11223 local64_add(delta, &event->count); in task_clock_event_update()
11226 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11228 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11229 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11232 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11234 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11235 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11238 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11241 task_clock_event_start(event, flags); in task_clock_event_add()
11242 perf_event_update_userpage(event); in task_clock_event_add()
11247 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11249 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11252 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11255 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11256 u64 time = event->ctx->time + delta; in task_clock_event_read()
11258 task_clock_event_update(event, time); in task_clock_event_read()
11261 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11263 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11266 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11272 if (has_branch_stack(event)) in task_clock_event_init()
11275 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11307 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11349 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11641 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11643 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11644 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11647 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11658 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11661 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11666 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11671 event->pmu = pmu; in perf_try_init_event()
11672 ret = pmu->event_init(event); in perf_try_init_event()
11675 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11679 has_extended_regs(event)) in perf_try_init_event()
11683 event_has_any_exclude_flag(event)) in perf_try_init_event()
11686 if (ret && event->destroy) in perf_try_init_event()
11687 event->destroy(event); in perf_try_init_event()
11696 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11706 * pmus overwrites event->attr.type to forward event to another pmu. in perf_init_event()
11708 event->orig_type = event->attr.type; in perf_init_event()
11711 if (event->parent && event->parent->pmu) { in perf_init_event()
11712 pmu = event->parent->pmu; in perf_init_event()
11713 ret = perf_try_init_event(pmu, event); in perf_init_event()
11722 type = event->attr.type; in perf_init_event()
11724 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
11729 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
11738 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
11742 ret = perf_try_init_event(pmu, event); in perf_init_event()
11743 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
11744 type = event->attr.type; in perf_init_event()
11755 ret = perf_try_init_event(pmu, event); in perf_init_event()
11772 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
11774 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
11777 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
11788 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
11790 if (is_sb_event(event)) in account_pmu_sb_event()
11791 attach_sb_event(event); in account_pmu_sb_event()
11815 static void account_event(struct perf_event *event) in account_event() argument
11819 if (event->parent) in account_event()
11822 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
11824 if (event->attr.mmap || event->attr.mmap_data) in account_event()
11826 if (event->attr.build_id) in account_event()
11828 if (event->attr.comm) in account_event()
11830 if (event->attr.namespaces) in account_event()
11832 if (event->attr.cgroup) in account_event()
11834 if (event->attr.task) in account_event()
11836 if (event->attr.freq) in account_event()
11838 if (event->attr.context_switch) { in account_event()
11842 if (has_branch_stack(event)) in account_event()
11844 if (is_cgroup_event(event)) in account_event()
11846 if (event->attr.ksymbol) in account_event()
11848 if (event->attr.bpf_event) in account_event()
11850 if (event->attr.text_poke) in account_event()
11881 account_pmu_sb_event(event); in account_event()
11885 * Allocate and initialize an event structure
11896 struct perf_event *event; in perf_event_alloc() local
11911 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
11913 if (!event) in perf_event_alloc()
11921 group_leader = event; in perf_event_alloc()
11923 mutex_init(&event->child_mutex); in perf_event_alloc()
11924 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
11926 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
11927 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
11928 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
11929 init_event_group(event); in perf_event_alloc()
11930 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
11931 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
11932 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
11933 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
11936 init_waitqueue_head(&event->waitq); in perf_event_alloc()
11937 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
11938 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
11940 mutex_init(&event->mmap_mutex); in perf_event_alloc()
11941 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
11943 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
11944 event->cpu = cpu; in perf_event_alloc()
11945 event->attr = *attr; in perf_event_alloc()
11946 event->group_leader = group_leader; in perf_event_alloc()
11947 event->pmu = NULL; in perf_event_alloc()
11948 event->oncpu = -1; in perf_event_alloc()
11950 event->parent = parent_event; in perf_event_alloc()
11952 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
11953 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
11955 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
11958 event->event_caps = parent_event->event_caps; in perf_event_alloc()
11961 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
11967 event->hw.target = get_task_struct(task); in perf_event_alloc()
11970 event->clock = &local_clock; in perf_event_alloc()
11972 event->clock = parent_event->clock; in perf_event_alloc()
11982 event->prog = prog; in perf_event_alloc()
11983 event->orig_overflow_handler = in perf_event_alloc()
11990 event->overflow_handler = overflow_handler; in perf_event_alloc()
11991 event->overflow_handler_context = context; in perf_event_alloc()
11992 } else if (is_write_backward(event)){ in perf_event_alloc()
11993 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
11994 event->overflow_handler_context = NULL; in perf_event_alloc()
11996 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
11997 event->overflow_handler_context = NULL; in perf_event_alloc()
12000 perf_event__state_init(event); in perf_event_alloc()
12004 hwc = &event->hw; in perf_event_alloc()
12019 if (!has_branch_stack(event)) in perf_event_alloc()
12020 event->attr.branch_sample_type = 0; in perf_event_alloc()
12022 pmu = perf_init_event(event); in perf_event_alloc()
12038 if (event->attr.aux_output && in perf_event_alloc()
12045 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12050 err = exclusive_event_init(event); in perf_event_alloc()
12054 if (has_addr_filter(event)) { in perf_event_alloc()
12055 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12058 if (!event->addr_filter_ranges) { in perf_event_alloc()
12067 if (event->parent) { in perf_event_alloc()
12068 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12071 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12072 event->parent->addr_filter_ranges, in perf_event_alloc()
12078 event->addr_filters_gen = 1; in perf_event_alloc()
12081 if (!event->parent) { in perf_event_alloc()
12082 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12089 err = security_perf_event_alloc(event); in perf_event_alloc()
12094 account_event(event); in perf_event_alloc()
12096 return event; in perf_event_alloc()
12099 if (!event->parent) { in perf_event_alloc()
12100 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
12104 kfree(event->addr_filter_ranges); in perf_event_alloc()
12107 exclusive_event_destroy(event); in perf_event_alloc()
12110 if (is_cgroup_event(event)) in perf_event_alloc()
12111 perf_detach_cgroup(event); in perf_event_alloc()
12112 if (event->destroy) in perf_event_alloc()
12113 event->destroy(event); in perf_event_alloc()
12116 if (event->hw.target) in perf_event_alloc()
12117 put_task_struct(event->hw.target); in perf_event_alloc()
12118 call_rcu(&event->rcu_head, free_event_rcu); in perf_event_alloc()
12259 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12265 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12270 if (event == output_event) in perf_event_set_output()
12276 if (output_event->cpu != event->cpu) in perf_event_set_output()
12282 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12288 if (output_event->clock != event->clock) in perf_event_set_output()
12295 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12301 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12302 event->pmu != output_event->pmu) in perf_event_set_output()
12308 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12312 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12315 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12331 ring_buffer_attach(event, rb); in perf_event_set_output()
12335 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12343 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12349 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12354 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12359 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12363 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12367 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12374 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12412 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12417 * @group_fd: group leader event fd
12418 * @flags: perf event open flags
12426 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12529 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12531 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12532 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12536 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12537 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12547 pmu = event->pmu; in SYSCALL_DEFINE5()
12550 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12556 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12565 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12577 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
12592 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12597 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
12616 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12624 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12639 if (is_software_event(event) && in SYSCALL_DEFINE5()
12642 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
12653 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
12658 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
12674 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
12679 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12682 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12687 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12692 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12699 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12701 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12708 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
12748 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12760 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
12763 perf_event__header_size(event); in SYSCALL_DEFINE5()
12764 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
12766 event->owner = current; in SYSCALL_DEFINE5()
12768 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12779 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
12784 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
12793 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
12794 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
12803 free_event(event); in SYSCALL_DEFINE5()
12820 * @overflow_handler: callback to trigger when we hit the event
12831 struct perf_event *event; in perf_event_create_kernel_counter() local
12842 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12844 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
12845 err = PTR_ERR(event); in perf_event_create_kernel_counter()
12850 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
12851 pmu = event->pmu; in perf_event_create_kernel_counter()
12854 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
12859 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
12872 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
12877 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
12881 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
12894 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12899 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12903 return event; in perf_event_create_kernel_counter()
12907 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
12913 free_event(event); in perf_event_create_kernel_counter()
12924 struct perf_event *event, *sibling; in __perf_pmu_remove() local
12926 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
12927 perf_remove_from_context(event, 0); in __perf_pmu_remove()
12928 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
12929 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
12931 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
12941 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
12944 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
12948 event->cpu = cpu; in __perf_pmu_install_event()
12949 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
12950 event->pmu_ctx = epc; in __perf_pmu_install_event()
12952 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
12953 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
12954 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
12957 * Now that event->ctx is updated and visible, put the old ctx. in __perf_pmu_install_event()
12965 struct perf_event *event, *tmp; in __perf_pmu_install() local
12975 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
12976 if (event->group_leader == event) in __perf_pmu_install()
12979 list_del(&event->migrate_entry); in __perf_pmu_install()
12980 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
12987 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
12988 list_del(&event->migrate_entry); in __perf_pmu_install()
12989 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13053 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13055 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13075 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
13078 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
13079 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
13091 free_event(event); in perf_event_exit_event()
13099 perf_event_wakeup(event); in perf_event_exit_event()
13164 * When a child task exits, feed back event values to parent events.
13171 struct perf_event *event, *tmp; in perf_event_exit_task() local
13174 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13176 list_del_init(&event->owner_entry); in perf_event_exit_task()
13183 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13198 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13201 struct perf_event *parent = event->parent; in perf_free_event()
13207 list_del_init(&event->child_list); in perf_free_event()
13213 perf_group_detach(event); in perf_free_event()
13214 list_del_event(event, ctx); in perf_free_event()
13216 free_event(event); in perf_free_event()
13229 struct perf_event *event, *tmp; in perf_event_free_task() local
13249 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13250 perf_free_event(event, ctx); in perf_event_free_task()
13263 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13299 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13301 if (!event) in perf_event_attrs()
13304 return &event->attr; in perf_event_attrs()
13308 * Inherit an event from parent task to child task.
13370 * Make the child state follow the state of the parent event, in inherit_event()
13409 * Link this into the parent event's child list in inherit_event()
13418 * Inherits an event group.
13462 * Creates the child task context and tries to inherit the event-group.
13465 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13473 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13481 if (!event->attr.inherit || in inherit_task_group()
13482 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13484 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13504 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
13518 struct perf_event *event; in perf_event_init_context() local
13552 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13553 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13568 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13569 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13676 struct perf_event *event; in __perf_event_exit_context() local
13680 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13681 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()