Lines Matching full:event
212 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
214 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
234 * - removing the last event from a task ctx; this is relatively straight
237 * - adding the first event to a task ctx; this is tricky because we cannot
248 struct perf_event *event; member
256 struct perf_event *event = efs->event; in event_function() local
257 struct perf_event_context *ctx = event->ctx; in event_function()
292 efs->func(event, cpuctx, ctx, efs->data); in event_function()
299 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
301 struct perf_event_context *ctx = event->ctx; in event_function_call()
305 .event = event, in event_function_call()
310 if (!event->parent) { in event_function_call()
312 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
313 * stabilize the event->ctx relation. See in event_function_call()
320 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
346 func(event, NULL, ctx, data); in event_function_call()
356 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
358 struct perf_event_context *ctx = event->ctx; in event_function_local()
395 func(event, cpuctx, ctx, data); in event_function_local()
448 * perf event paranoia level:
460 * max perf event sample rate
655 static u64 perf_event_time(struct perf_event *event);
664 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
666 return event->clock(); in perf_event_clock()
670 * State based event timekeeping...
672 * The basic idea is to use event->state to determine which (if any) time
677 * Event groups make things a little more complicated, but not terribly so. The
692 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
694 struct perf_event *leader = event->group_leader; in __perf_effective_state()
699 return event->state; in __perf_effective_state()
703 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
705 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
706 u64 delta = now - event->tstamp; in __perf_update_times()
708 *enabled = event->total_time_enabled; in __perf_update_times()
712 *running = event->total_time_running; in __perf_update_times()
717 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
719 u64 now = perf_event_time(event); in perf_event_update_time()
721 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
722 &event->total_time_running); in perf_event_update_time()
723 event->tstamp = now; in perf_event_update_time()
735 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
737 if (event->state == state) in perf_event_set_state()
740 perf_event_update_time(event); in perf_event_set_state()
745 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
746 perf_event_update_sibling_time(event); in perf_event_set_state()
748 WRITE_ONCE(event->state, state); in perf_event_set_state()
798 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
802 /* @event doesn't care about cgroup */ in perf_cgroup_match()
803 if (!event->cgrp) in perf_cgroup_match()
811 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
813 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
817 event->cgrp->css.cgroup); in perf_cgroup_match()
820 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
822 css_put(&event->cgrp->css); in perf_detach_cgroup()
823 event->cgrp = NULL; in perf_detach_cgroup()
826 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
828 return event->cgrp != NULL; in is_cgroup_event()
831 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
835 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
839 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
843 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
881 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
889 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
892 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
935 * cpuctx->cgrp is set when the first cgroup event enabled, in perf_cgroup_switch()
936 * and is cleared when the last cgroup event disabled. in perf_cgroup_switch()
968 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
1009 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
1026 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
1031 event->cgrp = cgrp; in perf_cgroup_connect()
1039 perf_detach_cgroup(event); in perf_cgroup_connect()
1046 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1050 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1053 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1068 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1072 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1075 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1092 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1097 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1100 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1105 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1114 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1126 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1131 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1137 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1142 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1246 static inline void perf_pmu_read(struct perf_event *event) in perf_pmu_read() argument
1248 if (event->state == PERF_EVENT_STATE_ACTIVE) in perf_pmu_read()
1249 event->pmu->read(event); in perf_pmu_read()
1308 * because the sys_perf_event_open() case will install a new event and break
1319 * quiesce the event, after which we can install it in the new location. This
1320 * means that only external vectors (perf_fops, prctl) can perturb the event
1324 * However; because event->ctx can change while we're waiting to acquire
1344 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1350 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1358 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1368 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1370 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1373 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1399 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1406 if (event->parent) in perf_event_pid_type()
1407 event = event->parent; in perf_event_pid_type()
1409 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1416 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1418 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1421 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1423 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1427 * If we inherit events we want to return the parent event id
1430 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1432 u64 id = event->id; in primary_event_id()
1434 if (event->parent) in primary_event_id()
1435 id = event->parent->id; in primary_event_id()
1555 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1557 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1562 if (is_cgroup_event(event)) in perf_event_time()
1563 return perf_cgroup_event_time(event); in perf_event_time()
1568 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1570 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1575 if (is_cgroup_event(event)) in perf_event_time_now()
1576 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1585 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1587 struct perf_event_context *ctx = event->ctx; in get_event_type()
1596 if (event->group_leader != event) in get_event_type()
1597 event = event->group_leader; in get_event_type()
1599 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1607 * Helper function to initialize event group nodes.
1609 static void init_event_group(struct perf_event *event) in init_event_group() argument
1611 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1612 event->group_index = 0; in init_event_group()
1617 * based on event attrs bits.
1620 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1622 if (event->attr.pinned) in get_event_groups()
1637 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1642 if (event->cgrp) in event_cgroup()
1643 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1650 * Compare function for event groups;
1745 * Insert @event into @groups' tree; using
1746 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1751 struct perf_event *event) in perf_event_groups_insert() argument
1753 event->group_index = ++groups->index; in perf_event_groups_insert()
1755 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1759 * Helper function to insert event into the pinned or flexible groups.
1762 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1766 groups = get_event_groups(event, ctx); in add_event_to_groups()
1767 perf_event_groups_insert(groups, event); in add_event_to_groups()
1775 struct perf_event *event) in perf_event_groups_delete() argument
1777 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1780 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1781 init_event_group(event); in perf_event_groups_delete()
1785 * Helper function to delete event from its groups.
1788 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1792 groups = get_event_groups(event, ctx); in del_event_from_groups()
1793 perf_event_groups_delete(groups, event); in del_event_from_groups()
1797 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1818 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1821 .cpu = event->cpu, in perf_event_groups_next()
1823 .cgroup = event_cgroup(event), in perf_event_groups_next()
1827 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1834 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1835 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1836 event; event = perf_event_groups_next(event, pmu))
1841 #define perf_event_groups_for_each(event, groups) \ argument
1842 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1843 typeof(*event), group_node); event; \
1844 event = rb_entry_safe(rb_next(&event->group_node), \
1845 typeof(*event), group_node))
1848 * Does the event attribute request inherit with PERF_SAMPLE_READ
1856 * Add an event from the lists for its context.
1860 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1864 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1865 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1867 event->tstamp = perf_event_time(event); in list_add_event()
1870 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1874 if (event->group_leader == event) { in list_add_event()
1875 event->group_caps = event->event_caps; in list_add_event()
1876 add_event_to_groups(event, ctx); in list_add_event()
1879 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1881 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1883 if (event->attr.inherit_stat) in list_add_event()
1885 if (has_inherit_and_sample_read(&event->attr)) in list_add_event()
1888 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1889 perf_cgroup_event_enable(event, ctx); in list_add_event()
1892 event->pmu_ctx->nr_events++; in list_add_event()
1896 * Initialize event state based on the perf_event_attr::disabled.
1898 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1900 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1934 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1952 size += event->read_size; in __perf_event_header_size()
1972 event->header_size = size; in __perf_event_header_size()
1979 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1981 event->read_size = in perf_event__header_size()
1982 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1983 event->group_leader->nr_siblings); in perf_event__header_size()
1984 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1987 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1990 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
2011 event->id_header_size = size; in perf_event__id_header_size()
2015 * Check that adding an event to the group does not result in anybody
2016 * overflowing the 64k event limit imposed by the output buffer.
2018 * Specifically, check that the read_size for the event does not exceed 16k,
2020 * depends on per-event read_format, also (re)check the existing events.
2025 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
2027 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
2029 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
2044 if (event == group_leader) in perf_event_validate_size()
2056 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2058 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2060 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2066 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2069 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2071 if (group_leader == event) in perf_group_attach()
2074 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2076 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2078 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2089 * Remove an event from the lists for its context.
2093 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2095 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2101 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2104 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2107 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2109 if (event->attr.inherit_stat) in list_del_event()
2111 if (has_inherit_and_sample_read(&event->attr)) in list_del_event()
2114 list_del_rcu(&event->event_entry); in list_del_event()
2116 if (event->group_leader == event) in list_del_event()
2117 del_event_from_groups(event, ctx); in list_del_event()
2120 * If event was in error state, then keep it in list_del_event()
2124 * of the event in list_del_event()
2126 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2127 perf_cgroup_event_disable(event, ctx); in list_del_event()
2128 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2132 event->pmu_ctx->nr_events--; in list_del_event()
2136 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2141 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2144 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2147 static void put_event(struct perf_event *event);
2148 static void event_sched_out(struct perf_event *event,
2151 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2153 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2157 * If event uses aux_event tear down the link in perf_put_aux_event()
2159 if (event->aux_event) { in perf_put_aux_event()
2160 iter = event->aux_event; in perf_put_aux_event()
2161 event->aux_event = NULL; in perf_put_aux_event()
2167 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2170 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2171 if (iter->aux_event != event) in perf_put_aux_event()
2175 put_event(event); in perf_put_aux_event()
2183 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2187 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2189 return event->attr.aux_output || has_aux_action(event); in perf_need_aux_event()
2192 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2196 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2197 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2207 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2210 if (event->attr.aux_output && in perf_get_aux_event()
2211 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2214 if ((event->attr.aux_pause || event->attr.aux_resume) && in perf_get_aux_event()
2218 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2225 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2230 event->aux_event = group_leader; in perf_get_aux_event()
2235 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2237 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2238 &event->pmu_ctx->flexible_active; in get_event_list()
2247 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2249 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2250 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2253 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2255 struct perf_event *leader = event->group_leader; in perf_group_detach()
2257 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2264 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2267 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2269 perf_put_aux_event(event); in perf_group_detach()
2274 if (leader != event) { in perf_group_detach()
2275 list_del_init(&event->sibling_list); in perf_group_detach()
2276 event->group_leader->nr_siblings--; in perf_group_detach()
2277 event->group_leader->group_generation++; in perf_group_detach()
2282 * If this was a group event with sibling events then in perf_group_detach()
2286 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2295 sibling->group_caps = event->group_caps; in perf_group_detach()
2298 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2304 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2316 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2318 struct perf_event *parent_event = event->parent; in perf_child_detach()
2320 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2323 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2330 sync_child_event(event); in perf_child_detach()
2331 list_del_init(&event->child_list); in perf_child_detach()
2334 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2336 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2340 event_filter_match(struct perf_event *event) in event_filter_match() argument
2342 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2343 perf_cgroup_match(event); in event_filter_match()
2347 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2349 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2355 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2358 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2366 list_del_init(&event->active_list); in event_sched_out()
2368 perf_pmu_disable(event->pmu); in event_sched_out()
2370 event->pmu->del(event, 0); in event_sched_out()
2371 event->oncpu = -1; in event_sched_out()
2373 if (event->pending_disable) { in event_sched_out()
2374 event->pending_disable = 0; in event_sched_out()
2375 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2379 perf_event_set_state(event, state); in event_sched_out()
2381 if (!is_software_event(event)) in event_sched_out()
2383 if (event->attr.freq && event->attr.sample_freq) { in event_sched_out()
2387 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2390 perf_pmu_enable(event->pmu); in event_sched_out()
2396 struct perf_event *event; in group_sched_out() local
2408 for_each_sibling_event(event, group_event) in group_sched_out()
2409 event_sched_out(event, ctx); in group_sched_out()
2441 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) in ctx_time_update_event() argument
2447 update_cgrp_time_from_event(event); in ctx_time_update_event()
2457 * Cross CPU call to remove a performance event
2459 * We disable the event on the hardware level first. After that we
2463 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2468 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2481 event->pending_disable = 1; in __perf_remove_from_context()
2484 event_sched_out(event, ctx); in __perf_remove_from_context()
2485 perf_event_set_state(event, min(event->state, state)); in __perf_remove_from_context()
2487 perf_group_detach(event); in __perf_remove_from_context()
2489 perf_child_detach(event); in __perf_remove_from_context()
2490 list_del_event(event, ctx); in __perf_remove_from_context()
2516 * Remove the event from a task's (or a CPU's) list of events.
2518 * If event->ctx is a cloned context, callers must make sure that
2519 * every task struct that event->ctx->task could possibly point to
2525 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2527 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2538 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2545 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2549 * Cross CPU call to disable a performance event
2551 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2556 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2559 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2560 ctx_time_update_event(ctx, event); in __perf_event_disable()
2562 if (event == event->group_leader) in __perf_event_disable()
2563 group_sched_out(event, ctx); in __perf_event_disable()
2565 event_sched_out(event, ctx); in __perf_event_disable()
2567 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2568 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2570 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2574 * Disable an event.
2576 * If event->ctx is a cloned context, callers must make sure that
2577 * every task struct that event->ctx->task could possibly point to
2580 * hold the top-level event's child_mutex, so any descendant that
2583 * When called from perf_pending_disable it's OK because event->ctx
2587 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2589 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2592 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2598 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2601 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2603 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2610 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2614 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2615 _perf_event_disable(event); in perf_event_disable()
2616 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2620 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2622 event->pending_disable = 1; in perf_event_disable_inatomic()
2623 irq_work_queue(&event->pending_disable_irq); in perf_event_disable_inatomic()
2628 static void perf_log_throttle(struct perf_event *event, int enable);
2629 static void perf_log_itrace_start(struct perf_event *event);
2632 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2634 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2638 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2642 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2645 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2647 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2652 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2659 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2660 perf_log_throttle(event, 1); in event_sched_in()
2661 event->hw.interrupts = 0; in event_sched_in()
2664 perf_pmu_disable(event->pmu); in event_sched_in()
2666 perf_log_itrace_start(event); in event_sched_in()
2668 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2669 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2670 event->oncpu = -1; in event_sched_in()
2675 if (!is_software_event(event)) in event_sched_in()
2677 if (event->attr.freq && event->attr.sample_freq) { in event_sched_in()
2681 if (event->attr.exclusive) in event_sched_in()
2685 perf_pmu_enable(event->pmu); in event_sched_in()
2693 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2707 for_each_sibling_event(event, group_event) { in group_sched_in()
2708 if (event_sched_in(event, ctx)) { in group_sched_in()
2709 partial_group = event; in group_sched_in()
2721 * The events up to the failed event are scheduled out normally. in group_sched_in()
2723 for_each_sibling_event(event, group_event) { in group_sched_in()
2724 if (event == partial_group) in group_sched_in()
2727 event_sched_out(event, ctx); in group_sched_in()
2737 * Work out whether we can put this event group on the CPU now.
2739 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2741 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2747 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2759 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2768 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2771 list_add_event(event, ctx); in add_event_to_ctx()
2772 perf_group_attach(event); in add_event_to_ctx()
2810 * time an event is added, only do it for the groups of equal priority and
2877 * Cross CPU call to install and enable a performance event
2884 struct perf_event *event = info; in __perf_install_in_context() local
2885 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2916 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2918 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2923 event->cgrp->css.cgroup); in __perf_install_in_context()
2929 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2930 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
2931 get_event_type(event)); in __perf_install_in_context()
2933 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2942 static bool exclusive_event_installable(struct perf_event *event,
2946 * Attach a performance event to a context.
2952 struct perf_event *event, in perf_install_in_context() argument
2959 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2961 if (event->cpu != -1) in perf_install_in_context()
2962 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2965 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2968 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2972 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2976 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2978 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2979 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2985 add_event_to_ctx(event, ctx); in perf_install_in_context()
2991 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
3033 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
3049 * thus we can safely install the event. in perf_install_in_context()
3055 add_event_to_ctx(event, ctx); in perf_install_in_context()
3060 * Cross CPU call to enable a performance event
3062 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
3067 struct perf_event *leader = event->group_leader; in __perf_event_enable()
3070 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
3071 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
3076 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
3077 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3082 if (!event_filter_match(event)) in __perf_event_enable()
3086 * If the event is in a group and isn't the group leader, in __perf_event_enable()
3089 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
3096 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3100 * Enable an event.
3102 * If event->ctx is a cloned context, callers must make sure that
3103 * every task struct that event->ctx->task could possibly point to
3108 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3110 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3113 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3114 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3121 * If the event is in error state, clear that first. in _perf_event_enable()
3123 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3127 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3131 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3132 event->group_leader == event) in _perf_event_enable()
3135 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3139 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3145 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3149 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3150 _perf_event_enable(event); in perf_event_enable()
3151 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3156 struct perf_event *event; member
3163 struct perf_event *event = sd->event; in __perf_event_stop() local
3166 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3174 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3176 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3179 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3187 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3191 event->pmu->start(event, 0); in __perf_event_stop()
3196 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3199 .event = event, in perf_event_stop()
3205 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3212 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3213 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3216 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3229 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3230 * (p2) when an event is scheduled in (pmu::add), it calls
3234 * If (p1) happens while the event is active, we restart it to force (p2).
3245 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3247 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3249 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3253 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3254 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3255 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3261 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3266 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3269 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3270 _perf_event_enable(event); in _perf_event_refresh()
3278 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3283 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3284 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3285 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3307 * Copy event-type-independent attributes that may be modified.
3315 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3322 if (event->attr.type != attr->type) in perf_event_modify_attr()
3325 switch (event->attr.type) { in perf_event_modify_attr()
3334 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3336 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3338 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3342 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3343 err = func(event, attr); in perf_event_modify_attr()
3346 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3353 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3361 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3376 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3379 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3383 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3386 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3508 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3513 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3517 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3520 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3523 perf_pmu_read(event); in __perf_event_sync_stat()
3525 perf_event_update_time(event); in __perf_event_sync_stat()
3528 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3532 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3535 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3536 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3541 perf_event_update_userpage(event); in __perf_event_sync_stat()
3548 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3555 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3561 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3564 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3566 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3711 * This callback is relevant even to per-cpu events; for example multi event
3758 * We stop each event and update the event value in event->count.
3761 * sets the disabled bit in the control field of event _before_
3762 * accessing the event control register. If a NMI hits, then it will
3763 * not restart the event.
3779 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3799 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) in __heap_add() argument
3803 if (event) { in __heap_add()
3804 itrs[heap->nr] = event; in __heap_add()
3831 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3895 * Because the userpage is strictly per-event (there is no concept of context,
3901 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3903 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3906 perf_event_update_time(event); in event_update_userpage()
3907 perf_event_update_userpage(event); in event_update_userpage()
3914 struct perf_event *event; in group_update_userpage() local
3919 for_each_sibling_event(event, group_event) in group_update_userpage()
3920 event_update_userpage(event); in group_update_userpage()
3923 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3925 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3928 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3931 if (!event_filter_match(event)) in merge_sched_in()
3934 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3935 if (!group_sched_in(event, ctx)) in merge_sched_in()
3936 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3939 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3941 if (event->attr.pinned) { in merge_sched_in()
3942 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3943 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3945 if (*perf_event_fasync(event)) in merge_sched_in()
3946 event->pending_kill = POLL_ERR; in merge_sched_in()
3948 perf_event_wakeup(event); in merge_sched_in()
3950 struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu); in merge_sched_in()
3952 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3954 group_update_userpage(event); in merge_sched_in()
4095 * We restore the event value and then enable it.
4098 * sets the enabled bit in the control field of event _before_
4099 * accessing the event control register. If a NMI hits, then it will
4100 * keep the event running.
4114 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4116 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4190 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4192 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4196 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4214 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4219 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4225 struct perf_event *event; in perf_adjust_freq_unthr_events() local
4230 list_for_each_entry(event, event_list, active_list) { in perf_adjust_freq_unthr_events()
4231 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_events()
4235 if (!event_filter_match(event)) in perf_adjust_freq_unthr_events()
4238 hwc = &event->hw; in perf_adjust_freq_unthr_events()
4242 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_events()
4243 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4244 event->pmu->start(event, 0); in perf_adjust_freq_unthr_events()
4247 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4251 * stop the event and update event->count in perf_adjust_freq_unthr_events()
4253 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_events()
4255 now = local64_read(&event->count); in perf_adjust_freq_unthr_events()
4260 * restart the event in perf_adjust_freq_unthr_events()
4262 * we have stopped the event so tell that in perf_adjust_freq_unthr_events()
4267 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_events()
4269 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_events()
4311 * Move @event to the tail of the @ctx's elegible events.
4313 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4322 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4323 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4326 /* pick an event from the flexible_groups to rotate */
4330 struct perf_event *event; in ctx_event_to_rotate() local
4337 /* pick the first active flexible event */ in ctx_event_to_rotate()
4338 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4340 if (event) in ctx_event_to_rotate()
4343 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4351 event = __node_2_pe(node); in ctx_event_to_rotate()
4358 event = __node_2_pe(node); in ctx_event_to_rotate()
4365 event = __node_2_pe(node); in ctx_event_to_rotate()
4374 return event; in ctx_event_to_rotate()
4387 * events, thus the event count values are stable. in perf_rotate_context()
4457 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4460 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4463 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4464 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4467 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4481 struct perf_event *event; in perf_event_enable_on_exec() local
4496 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4497 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4498 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4502 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4517 static void perf_remove_from_owner(struct perf_event *event);
4518 static void perf_event_exit_event(struct perf_event *event,
4528 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4537 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4538 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4541 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4542 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4546 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4562 struct perf_event *event; member
4569 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4577 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) { in __perf_event_read_cpu()
4578 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu); in __perf_event_read_cpu()
4584 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4596 * Cross CPU call to read the hardware event
4601 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4602 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4604 struct pmu *pmu = event->pmu; in __perf_event_read()
4610 * event->count would have been updated to a recent sample in __perf_event_read()
4611 * when the event was scheduled out. in __perf_event_read()
4617 ctx_time_update_event(ctx, event); in __perf_event_read()
4619 perf_event_update_time(event); in __perf_event_read()
4621 perf_event_update_sibling_time(event); in __perf_event_read()
4623 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4627 pmu->read(event); in __perf_event_read()
4634 pmu->read(event); in __perf_event_read()
4636 for_each_sibling_event(sub, event) in __perf_event_read()
4645 static inline u64 perf_event_count(struct perf_event *event, bool self) in perf_event_count() argument
4648 return local64_read(&event->count); in perf_event_count()
4650 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4653 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4661 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4662 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4666 * NMI-safe method to read a local event, that is an event that
4673 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4688 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4691 if (event->attr.inherit) { in perf_event_read_local()
4696 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4697 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4698 event->hw.target != current) { in perf_event_read_local()
4704 * Get the event CPU numbers, and adjust them to local if the event is in perf_event_read_local()
4705 * a per-package event that can be read locally in perf_event_read_local()
4707 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4708 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4710 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4711 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4717 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4718 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4724 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4729 event->pmu->read(event); in perf_event_read_local()
4731 *value = local64_read(&event->count); in perf_event_read_local()
4735 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4747 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4749 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4753 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4754 * value in the event structure: in perf_event_read()
4768 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4773 .event = event, in perf_event_read()
4779 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4785 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4786 * scheduled out and that will have updated the event count. in perf_event_read()
4788 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4796 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4800 state = event->state; in perf_event_read()
4810 ctx_time_update_event(ctx, event); in perf_event_read()
4812 perf_event_update_time(event); in perf_event_read()
4814 perf_event_update_sibling_time(event); in perf_event_read()
4885 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4893 /* Must be root to operate on a CPU event: */ in find_get_context()
4898 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4959 struct perf_event *event) in find_get_pmu_context() argument
4971 cpc = *per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
5088 static void perf_event_free_filter(struct perf_event *event);
5092 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
5094 if (event->ns) in free_event_rcu()
5095 put_pid_ns(event->ns); in free_event_rcu()
5096 perf_event_free_filter(event); in free_event_rcu()
5097 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
5100 static void ring_buffer_attach(struct perf_event *event,
5103 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
5105 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
5108 list_del_rcu(&event->sb_list); in detach_sb_event()
5112 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5114 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5116 if (event->parent) in is_sb_event()
5119 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5131 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5133 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5134 detach_sb_event(event); in unaccount_pmu_sb_event()
5287 attach_perf_ctx_data(struct perf_event *event) in attach_perf_ctx_data() argument
5289 struct task_struct *task = event->hw.target; in attach_perf_ctx_data()
5290 struct kmem_cache *ctx_cache = event->pmu->task_ctx_cache; in attach_perf_ctx_data()
5303 event->attach_state |= PERF_ATTACH_GLOBAL_DATA; in attach_perf_ctx_data()
5363 static void detach_perf_ctx_data(struct perf_event *event) in detach_perf_ctx_data() argument
5365 struct task_struct *task = event->hw.target; in detach_perf_ctx_data()
5367 event->attach_state &= ~PERF_ATTACH_TASK_DATA; in detach_perf_ctx_data()
5372 if (event->attach_state & PERF_ATTACH_GLOBAL_DATA) { in detach_perf_ctx_data()
5374 event->attach_state &= ~PERF_ATTACH_GLOBAL_DATA; in detach_perf_ctx_data()
5378 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5382 if (event->parent) in unaccount_event()
5385 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5387 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5389 if (event->attr.build_id) in unaccount_event()
5391 if (event->attr.comm) in unaccount_event()
5393 if (event->attr.namespaces) in unaccount_event()
5395 if (event->attr.cgroup) in unaccount_event()
5397 if (event->attr.task) in unaccount_event()
5399 if (event->attr.freq) in unaccount_event()
5401 if (event->attr.context_switch) { in unaccount_event()
5405 if (is_cgroup_event(event)) in unaccount_event()
5407 if (has_branch_stack(event)) in unaccount_event()
5409 if (event->attr.ksymbol) in unaccount_event()
5411 if (event->attr.bpf_event) in unaccount_event()
5413 if (event->attr.text_poke) in unaccount_event()
5421 unaccount_pmu_sb_event(event); in unaccount_event()
5434 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5444 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5446 struct pmu *pmu = event->pmu; in exclusive_event_init()
5459 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
5461 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
5464 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5472 event->attach_state |= PERF_ATTACH_EXCLUSIVE; in exclusive_event_init()
5477 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5479 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5482 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5487 event->attach_state &= ~PERF_ATTACH_EXCLUSIVE; in exclusive_event_destroy()
5500 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5504 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5512 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5519 static void perf_free_addr_filters(struct perf_event *event);
5522 static void __free_event(struct perf_event *event) in __free_event() argument
5524 if (event->attach_state & PERF_ATTACH_CALLCHAIN) in __free_event()
5527 kfree(event->addr_filter_ranges); in __free_event()
5529 if (event->attach_state & PERF_ATTACH_EXCLUSIVE) in __free_event()
5530 exclusive_event_destroy(event); in __free_event()
5532 if (is_cgroup_event(event)) in __free_event()
5533 perf_detach_cgroup(event); in __free_event()
5535 if (event->attach_state & PERF_ATTACH_TASK_DATA) in __free_event()
5536 detach_perf_ctx_data(event); in __free_event()
5538 if (event->destroy) in __free_event()
5539 event->destroy(event); in __free_event()
5545 if (event->hw.target) in __free_event()
5546 put_task_struct(event->hw.target); in __free_event()
5548 if (event->pmu_ctx) { in __free_event()
5550 * put_pmu_ctx() needs an event->ctx reference, because of in __free_event()
5553 WARN_ON_ONCE(!event->ctx); in __free_event()
5554 WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); in __free_event()
5555 put_pmu_ctx(event->pmu_ctx); in __free_event()
5562 if (event->ctx) in __free_event()
5563 put_ctx(event->ctx); in __free_event()
5565 if (event->pmu) in __free_event()
5566 module_put(event->pmu->module); in __free_event()
5568 call_rcu(&event->rcu_head, free_event_rcu); in __free_event()
5574 static void _free_event(struct perf_event *event) in DEFINE_FREE()
5576 irq_work_sync(&event->pending_irq); in DEFINE_FREE()
5577 irq_work_sync(&event->pending_disable_irq); in DEFINE_FREE()
5579 unaccount_event(event); in DEFINE_FREE()
5581 security_perf_event_free(event); in DEFINE_FREE()
5583 if (event->rb) { in DEFINE_FREE()
5585 * Can happen when we close an event with re-directed output. in DEFINE_FREE()
5590 mutex_lock(&event->mmap_mutex); in DEFINE_FREE()
5591 ring_buffer_attach(event, NULL); in DEFINE_FREE()
5592 mutex_unlock(&event->mmap_mutex); in DEFINE_FREE()
5595 perf_event_free_bpf_prog(event); in DEFINE_FREE()
5596 perf_free_addr_filters(event); in DEFINE_FREE()
5598 __free_event(event); in DEFINE_FREE()
5603 * where the event isn't exposed yet and inherited events.
5605 static void free_event(struct perf_event *event) in free_event() argument
5607 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5608 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5609 atomic_long_read(&event->refcount), event)) { in free_event()
5614 _free_event(event); in free_event()
5618 * Remove user event from the owner task.
5620 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5628 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5631 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5654 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5657 * event. in perf_remove_from_owner()
5659 if (event->owner) { in perf_remove_from_owner()
5660 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5661 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5668 static void put_event(struct perf_event *event) in put_event() argument
5672 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5675 parent = event->parent; in put_event()
5676 _free_event(event); in put_event()
5684 * Kill an event dead; while event:refcount will preserve the event
5688 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5690 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5695 * If we got here through err_alloc: free_event(event); we will not in perf_event_release_kernel()
5699 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5704 if (!is_kernel_event(event)) in perf_event_release_kernel()
5705 perf_remove_from_owner(event); in perf_event_release_kernel()
5707 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5711 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5714 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5721 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5723 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5726 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5727 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5739 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5750 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5752 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5759 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5768 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5783 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5793 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5802 * Last reference unless ->pending_task work is pending on this event in perf_event_release_kernel()
5805 put_event(event); in perf_event_release_kernel()
5819 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5827 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5829 (void)perf_event_read(event, false); in __perf_event_read_value()
5830 total += perf_event_count(event, false); in __perf_event_read_value()
5832 *enabled += event->total_time_enabled + in __perf_event_read_value()
5833 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5834 *running += event->total_time_running + in __perf_event_read_value()
5835 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5837 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5843 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5848 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5853 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5854 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5855 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5940 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5943 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5950 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5970 ret = event->read_size; in perf_read_group()
5971 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5982 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5989 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5995 values[n++] = primary_event_id(event); in perf_read_one()
5997 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
6005 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
6009 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
6012 mutex_lock(&event->child_mutex); in is_event_hup()
6013 no_children = list_empty(&event->child_list); in is_event_hup()
6014 mutex_unlock(&event->child_mutex); in is_event_hup()
6019 * Read the performance event - simple non blocking version for now
6022 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
6024 u64 read_format = event->attr.read_format; in __perf_read()
6028 * Return end-of-file for a read on an event that is in in __perf_read()
6032 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
6035 if (count < event->read_size) in __perf_read()
6038 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
6040 ret = perf_read_group(event, read_format, buf); in __perf_read()
6042 ret = perf_read_one(event, read_format, buf); in __perf_read()
6050 struct perf_event *event = file->private_data; in perf_read() local
6054 ret = security_perf_event_read(event); in perf_read()
6058 ctx = perf_event_ctx_lock(event); in perf_read()
6059 ret = __perf_read(event, buf, count); in perf_read()
6060 perf_event_ctx_unlock(event, ctx); in perf_read()
6067 struct perf_event *event = file->private_data; in perf_poll() local
6071 poll_wait(file, &event->waitq, wait); in perf_poll()
6073 if (is_event_hup(event)) in perf_poll()
6076 if (unlikely(READ_ONCE(event->state) == PERF_EVENT_STATE_ERROR && in perf_poll()
6077 event->attr.pinned)) in perf_poll()
6081 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
6084 mutex_lock(&event->mmap_mutex); in perf_poll()
6085 rb = event->rb; in perf_poll()
6088 mutex_unlock(&event->mmap_mutex); in perf_poll()
6092 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
6094 (void)perf_event_read(event, false); in _perf_event_reset()
6095 local64_set(&event->count, 0); in _perf_event_reset()
6096 perf_event_update_userpage(event); in _perf_event_reset()
6099 /* Assume it's not an event with inherit set. */
6100 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
6105 ctx = perf_event_ctx_lock(event); in perf_event_pause()
6106 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
6107 _perf_event_disable(event); in perf_event_pause()
6108 count = local64_read(&event->count); in perf_event_pause()
6110 local64_set(&event->count, 0); in perf_event_pause()
6111 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
6118 * Holding the top-level event's child_mutex means that any
6119 * descendant process that has inherited this event will block
6123 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
6128 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
6130 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
6131 func(event); in perf_event_for_each_child()
6132 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
6134 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
6137 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
6140 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
6145 event = event->group_leader; in perf_event_for_each()
6147 perf_event_for_each_child(event, func); in perf_event_for_each()
6148 for_each_sibling_event(sibling, event) in perf_event_for_each()
6152 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
6160 if (event->attr.freq) { in __perf_event_period()
6161 event->attr.sample_freq = value; in __perf_event_period()
6163 event->attr.sample_period = value; in __perf_event_period()
6164 event->hw.sample_period = value; in __perf_event_period()
6167 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
6169 perf_pmu_disable(event->pmu); in __perf_event_period()
6172 * trying to unthrottle while we already re-started the event. in __perf_event_period()
6174 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
6175 event->hw.interrupts = 0; in __perf_event_period()
6176 perf_log_throttle(event, 1); in __perf_event_period()
6178 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
6181 local64_set(&event->hw.period_left, 0); in __perf_event_period()
6184 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
6185 perf_pmu_enable(event->pmu); in __perf_event_period()
6189 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
6191 return event->pmu->check_period(event, value); in perf_event_check_period()
6194 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
6196 if (!is_sampling_event(event)) in _perf_event_period()
6202 if (event->attr.freq) { in _perf_event_period()
6206 if (perf_event_check_period(event, value)) in _perf_event_period()
6212 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
6217 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
6222 ctx = perf_event_ctx_lock(event); in perf_event_period()
6223 ret = _perf_event_period(event, value); in perf_event_period()
6224 perf_event_ctx_unlock(event, ctx); in perf_event_period()
6237 static int perf_event_set_output(struct perf_event *event,
6239 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6243 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
6260 return _perf_event_refresh(event, arg); in _perf_ioctl()
6269 return _perf_event_period(event, value); in _perf_ioctl()
6273 u64 id = primary_event_id(event); in _perf_ioctl()
6289 return perf_event_set_output(event, output_event); in _perf_ioctl()
6293 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
6304 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
6317 rb = rcu_dereference(event->rb); in _perf_ioctl()
6328 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6338 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6345 perf_event_for_each(event, func); in _perf_ioctl()
6347 perf_event_for_each_child(event, func); in _perf_ioctl()
6354 struct perf_event *event = file->private_data; in perf_ioctl() local
6359 ret = security_perf_event_write(event); in perf_ioctl()
6363 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6364 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6365 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6395 struct perf_event *event; in perf_event_task_enable() local
6398 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6399 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6400 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6401 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6411 struct perf_event *event; in perf_event_task_disable() local
6414 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6415 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6416 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6417 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6424 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6426 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6429 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6432 return event->pmu->event_idx(event); in perf_event_index()
6435 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6441 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6458 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6467 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6474 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6480 * based on snapshot values taken when the event in perf_event_update_userpage()
6487 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6497 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6498 userpg->offset = perf_event_count(event, false); in perf_event_update_userpage()
6500 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6503 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6506 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6508 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6518 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6524 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6526 if (event->rb) { in ring_buffer_attach()
6529 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
6531 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6533 old_rb = event->rb; in ring_buffer_attach()
6535 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6538 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6539 event->rcu_pending = 1; in ring_buffer_attach()
6543 if (event->rcu_pending) { in ring_buffer_attach()
6544 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6545 event->rcu_pending = 0; in ring_buffer_attach()
6549 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6554 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
6555 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
6563 if (has_aux(event)) in ring_buffer_attach()
6564 perf_event_stop(event, 0); in ring_buffer_attach()
6566 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6575 wake_up_all(&event->waitq); in ring_buffer_attach()
6579 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6583 if (event->parent) in ring_buffer_wakeup()
6584 event = event->parent; in ring_buffer_wakeup()
6587 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6589 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6590 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6595 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6599 if (event->parent) in ring_buffer_get()
6600 event = event->parent; in ring_buffer_get()
6603 rb = rcu_dereference(event->rb); in ring_buffer_get()
6625 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6627 atomic_inc(&event->mmap_count); in perf_mmap_open()
6628 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6631 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6633 if (event->pmu->event_mapped) in perf_mmap_open()
6634 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6637 static void perf_pmu_output_stop(struct perf_event *event);
6641 * event, or through other events by use of perf_event_set_output().
6649 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6650 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6656 if (event->pmu->event_unmapped) in perf_mmap_close()
6657 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6671 perf_pmu_output_stop(event); in perf_mmap_close()
6687 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6690 ring_buffer_attach(event, NULL); in perf_mmap_close()
6691 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6704 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6705 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6707 * This event is en-route to free_event() which will in perf_mmap_close()
6714 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6720 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6725 if (event->rb == rb) in perf_mmap_close()
6726 ring_buffer_attach(event, NULL); in perf_mmap_close()
6728 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6729 put_event(event); in perf_mmap_close()
6840 struct perf_event *event = file->private_data; in perf_mmap() local
6856 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6862 ret = security_perf_event_read(event); in perf_mmap()
6877 mutex_lock(&event->mmap_mutex); in perf_mmap()
6890 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6892 if (event->rb) { in perf_mmap()
6893 if (data_page_nr(event->rb) != nr_pages) in perf_mmap()
6896 if (atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6903 rb = event->rb; in perf_mmap()
6910 * event and continue as if !event->rb in perf_mmap()
6912 ring_buffer_attach(event, NULL); in perf_mmap()
6923 rb = event->rb; in perf_mmap()
7001 WARN_ON(!rb && event->rb); in perf_mmap()
7008 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
7009 event->cpu, flags); in perf_mmap()
7020 ring_buffer_attach(event, rb); in perf_mmap()
7022 perf_event_update_time(event); in perf_mmap()
7023 perf_event_init_userpage(event); in perf_mmap()
7024 perf_event_update_userpage(event); in perf_mmap()
7026 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
7027 event->attr.aux_watermark, flags); in perf_mmap()
7039 atomic_inc(&event->mmap_count); in perf_mmap()
7046 mutex_unlock(&event->mmap_mutex); in perf_mmap()
7058 if (!ret && event->pmu->event_mapped) in perf_mmap()
7059 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
7067 struct perf_event *event = filp->private_data; in perf_fasync() local
7071 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
7091 * Perf event wakeup
7097 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
7099 ring_buffer_wakeup(event); in perf_event_wakeup()
7101 if (event->pending_kill) { in perf_event_wakeup()
7102 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
7103 event->pending_kill = 0; in perf_event_wakeup()
7107 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
7114 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
7124 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
7125 event->orig_type, event->attr.sig_data); in perf_sigtrap()
7129 * Deliver the pending work in-event-context or follow the context.
7131 static void __perf_pending_disable(struct perf_event *event) in __perf_pending_disable() argument
7133 int cpu = READ_ONCE(event->oncpu); in __perf_pending_disable()
7136 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_disable()
7143 * Yay, we hit home and are in the context of the event. in __perf_pending_disable()
7146 if (event->pending_disable) { in __perf_pending_disable()
7147 event->pending_disable = 0; in __perf_pending_disable()
7148 perf_event_disable_local(event); in __perf_pending_disable()
7171 * But the event runs on CPU-B and wants disabling there. in __perf_pending_disable()
7173 irq_work_queue_on(&event->pending_disable_irq, cpu); in __perf_pending_disable()
7178 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq); in perf_pending_disable() local
7186 __perf_pending_disable(event); in perf_pending_disable()
7193 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
7203 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
7204 * irrespective of where the event is. in perf_pending_irq()
7206 if (event->pending_wakeup) { in perf_pending_irq()
7207 event->pending_wakeup = 0; in perf_pending_irq()
7208 perf_event_wakeup(event); in perf_pending_irq()
7217 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
7226 if (event->pending_work) { in perf_pending_task()
7227 event->pending_work = 0; in perf_pending_task()
7228 perf_sigtrap(event); in perf_pending_task()
7229 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task()
7231 put_event(event); in perf_pending_task()
7275 static bool should_sample_guest(struct perf_event *event) in should_sample_guest() argument
7277 return !event->attr.exclude_guest && perf_guest_state(); in should_sample_guest()
7280 unsigned long perf_misc_flags(struct perf_event *event, in perf_misc_flags() argument
7283 if (should_sample_guest(event)) in perf_misc_flags()
7289 unsigned long perf_instruction_pointer(struct perf_event *event, in perf_instruction_pointer() argument
7292 if (should_sample_guest(event)) in perf_instruction_pointer()
7431 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7435 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7470 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7480 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
7482 * doesn't change the event state. in perf_pmu_snapshot_aux()
7494 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7503 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7507 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7549 * when event->attr.sample_id_all is set.
7556 struct perf_event *event, in __perf_event_header__init_id() argument
7559 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7564 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7565 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7569 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7572 data->id = primary_event_id(event); in __perf_event_header__init_id()
7575 data->stream_id = event->id; in __perf_event_header__init_id()
7585 struct perf_event *event) in perf_event_header__init_id() argument
7587 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7588 header->size += event->id_header_size; in perf_event_header__init_id()
7589 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7617 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7621 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7626 struct perf_event *event, in perf_output_read_one() argument
7629 u64 read_format = event->attr.read_format; in perf_output_read_one()
7633 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr)); in perf_output_read_one()
7636 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7640 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7643 values[n++] = primary_event_id(event); in perf_output_read_one()
7645 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7651 struct perf_event *event, in perf_output_read_group() argument
7654 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7655 u64 read_format = event->attr.read_format; in perf_output_read_group()
7659 bool self = has_inherit_and_sample_read(&event->attr); in perf_output_read_group()
7675 if ((leader != event) && !handle->skip_read) in perf_output_read_group()
7689 if ((sub != event) && !handle->skip_read) in perf_output_read_group()
7719 struct perf_event *event) in perf_output_read() argument
7722 u64 read_format = event->attr.read_format; in perf_output_read()
7726 * based on snapshot values taken when the event in perf_output_read()
7734 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7736 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7737 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7739 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7745 struct perf_event *event) in perf_output_sample() argument
7782 perf_output_read(handle, event); in perf_output_sample()
7833 if (branch_sample_hw_index(event)) in perf_output_sample()
7863 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7894 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7918 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7921 if (!event->attr.watermark) { in perf_output_sample()
7922 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
8065 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
8067 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
8068 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
8070 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
8071 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
8088 struct perf_event *event, in perf_prepare_sample() argument
8091 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
8109 data->type = event->attr.sample_type; in perf_prepare_sample()
8113 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
8116 data->ip = perf_instruction_pointer(event, regs); in perf_prepare_sample()
8121 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
8148 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
8163 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
8164 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
8210 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
8252 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
8263 event->attr.aux_sample_size); in perf_prepare_sample()
8265 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
8275 struct perf_event *event, in perf_prepare_header() argument
8279 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
8280 header->misc = perf_misc_flags(event, regs); in perf_prepare_header()
8293 static void __perf_event_aux_pause(struct perf_event *event, bool pause) in __perf_event_aux_pause() argument
8296 if (!event->hw.aux_paused) { in __perf_event_aux_pause()
8297 event->hw.aux_paused = 1; in __perf_event_aux_pause()
8298 event->pmu->stop(event, PERF_EF_PAUSE); in __perf_event_aux_pause()
8301 if (event->hw.aux_paused) { in __perf_event_aux_pause()
8302 event->hw.aux_paused = 0; in __perf_event_aux_pause()
8303 event->pmu->start(event, PERF_EF_RESUME); in __perf_event_aux_pause()
8308 static void perf_event_aux_pause(struct perf_event *event, bool pause) in perf_event_aux_pause() argument
8312 if (WARN_ON_ONCE(!event)) in perf_event_aux_pause()
8315 rb = ring_buffer_get(event); in perf_event_aux_pause()
8321 * Guard against self-recursion here. Another event could trip in perf_event_aux_pause()
8329 __perf_event_aux_pause(event, pause); in perf_event_aux_pause()
8337 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
8352 perf_prepare_sample(data, event, regs); in __perf_event_output()
8353 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
8355 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
8359 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
8369 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
8373 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
8377 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
8381 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
8385 perf_event_output(struct perf_event *event, in perf_event_output() argument
8389 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
8404 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
8413 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
8415 .pid = perf_event_pid(event, task), in perf_event_read_event()
8416 .tid = perf_event_tid(event, task), in perf_event_read_event()
8420 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
8421 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
8426 perf_output_read(&handle, event); in perf_event_read_event()
8427 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
8432 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8439 struct perf_event *event; in perf_iterate_ctx() local
8441 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8443 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8445 if (!event_filter_match(event)) in perf_iterate_ctx()
8449 output(event, data); in perf_iterate_ctx()
8456 struct perf_event *event; in perf_iterate_sb_cpu() local
8458 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8461 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
8464 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8467 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8469 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8471 output(event, data); in perf_iterate_sb_cpu()
8479 * your event, otherwise it might not get delivered.
8514 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8516 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8521 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8527 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8528 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8536 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8540 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8565 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8567 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8571 .event = event, in __perf_event_output_stop()
8574 if (!has_aux(event)) in __perf_event_output_stop()
8578 parent = event; in __perf_event_output_stop()
8584 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
8586 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
8587 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
8596 struct perf_event *event = info; in __perf_pmu_output_stop() local
8599 .rb = event->rb, in __perf_pmu_output_stop()
8612 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8619 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8623 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
8633 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8663 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8665 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8666 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8667 event->attr.task; in perf_event_task_match()
8670 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8679 if (!perf_event_task_match(event)) in perf_event_task_output()
8682 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8684 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8689 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8690 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8693 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8695 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8698 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8699 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8702 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8706 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8774 * A system-wide event may be unaccount, in perf_event_alloc_task_data()
8817 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8819 return event->attr.comm; in perf_event_comm_match()
8822 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8831 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8834 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8835 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8841 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8842 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8848 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8916 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8918 return event->attr.namespaces; in perf_event_namespaces_match()
8921 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8930 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8934 &sample, event); in perf_event_namespaces_output()
8935 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8940 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8942 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8947 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
9044 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
9046 return event->attr.cgroup; in perf_event_cgroup_match()
9049 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
9057 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
9061 &sample, event); in perf_event_cgroup_output()
9062 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
9070 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
9155 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
9162 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
9163 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
9166 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
9177 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
9180 if (event->attr.mmap2) { in perf_event_mmap_output()
9190 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
9191 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
9196 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
9197 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
9199 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
9201 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
9206 if (event->attr.mmap2) { in perf_event_mmap_output()
9225 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
9386 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
9388 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
9394 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
9403 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
9410 event->addr_filters_gen++; in __perf_addr_filters_adjust()
9414 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
9473 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9495 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9496 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9502 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9510 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9528 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9530 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9536 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9555 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9557 return event->attr.context_switch; in perf_event_switch_match()
9560 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9567 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9571 if (event->ctx->task) { in perf_event_switch_output()
9578 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9580 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9583 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9585 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9589 if (event->ctx->task) in perf_event_switch_output()
9594 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9632 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9649 .time = perf_event_clock(event), in perf_log_throttle()
9650 .id = primary_event_id(event), in perf_log_throttle()
9651 .stream_id = event->id, in perf_log_throttle()
9657 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9659 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9665 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9685 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9687 return event->attr.ksymbol; in perf_event_ksymbol_match()
9690 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9697 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9701 &sample, event); in perf_event_ksymbol_output()
9702 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9709 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9775 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9777 return event->attr.bpf_event; in perf_event_bpf_match()
9780 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9787 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9791 &sample, event); in perf_event_bpf_output()
9792 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9798 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9877 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9879 return event->attr.text_poke; in perf_event_text_poke_match()
9882 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9890 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9893 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9895 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9910 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9947 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9949 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9952 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9963 if (event->parent) in perf_log_itrace_start()
9964 event = event->parent; in perf_log_itrace_start()
9966 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9967 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9973 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9974 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9976 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9977 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9983 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9988 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9998 if (event->parent) in perf_report_aux_output_id()
9999 event = event->parent; in perf_report_aux_output_id()
10006 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
10007 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
10013 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
10020 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
10022 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
10037 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
10042 if (event->attr.freq) { in __perf_event_account_interrupt()
10049 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
10055 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
10057 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
10060 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
10067 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
10074 static int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10080 .event = event, in bpf_overflow_handler()
10089 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
10091 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
10101 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10105 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
10109 if (event->prog) in perf_event_set_bpf_handler()
10115 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
10117 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
10118 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
10119 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
10132 event->prog = prog; in perf_event_set_bpf_handler()
10133 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
10137 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10139 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
10144 event->prog = NULL; in perf_event_free_bpf_handler()
10148 static inline int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
10155 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
10162 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
10168 * Generic event overflow handling, sampling.
10171 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
10175 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
10182 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
10185 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
10187 if (event->attr.aux_pause) in __perf_event_overflow()
10188 perf_event_aux_pause(event->aux_event, true); in __perf_event_overflow()
10190 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && in __perf_event_overflow()
10191 !bpf_overflow_handler(event, data, regs)) in __perf_event_overflow()
10199 event->pending_kill = POLL_IN; in __perf_event_overflow()
10200 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
10202 event->pending_kill = POLL_HUP; in __perf_event_overflow()
10203 perf_event_disable_inatomic(event); in __perf_event_overflow()
10206 if (event->attr.sigtrap) { in __perf_event_overflow()
10210 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
10213 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
10222 if (!event->pending_work && in __perf_event_overflow()
10223 !task_work_add(current, &event->pending_task, notify_mode)) { in __perf_event_overflow()
10224 event->pending_work = pending_id; in __perf_event_overflow()
10225 local_inc(&event->ctx->nr_no_switch_fast); in __perf_event_overflow()
10226 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); in __perf_event_overflow()
10228 event->pending_addr = 0; in __perf_event_overflow()
10230 event->pending_addr = data->addr; in __perf_event_overflow()
10232 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
10245 WARN_ON_ONCE(event->pending_work != pending_id); in __perf_event_overflow()
10249 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
10251 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
10252 event->pending_wakeup = 1; in __perf_event_overflow()
10253 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
10256 if (event->attr.aux_resume) in __perf_event_overflow()
10257 perf_event_aux_pause(event->aux_event, false); in __perf_event_overflow()
10262 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
10266 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
10270 * Generic software event infrastructure
10281 * We directly increment event->count and keep a second value in
10282 * event->hw.period_left to count intervals. This period event
10287 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
10289 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
10310 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
10314 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
10318 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
10324 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
10336 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
10340 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
10342 local64_add(nr, &event->count); in perf_swevent_event()
10347 if (!is_sampling_event(event)) in perf_swevent_event()
10350 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
10352 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10354 data->period = event->hw.last_period; in perf_swevent_event()
10356 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
10357 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10362 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
10365 int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) in perf_exclude_event() argument
10367 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
10371 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
10374 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
10381 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
10387 if (event->attr.type != type) in perf_swevent_match()
10390 if (event->attr.config != event_id) in perf_swevent_match()
10393 if (perf_exclude_event(event, regs)) in perf_swevent_match()
10427 /* For the event head insertion and removal in the hlist */
10429 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
10432 u32 event_id = event->attr.config; in find_swevent_head()
10433 u64 type = event->attr.type; in find_swevent_head()
10436 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
10441 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
10454 struct perf_event *event; in do_perf_sw_event() local
10462 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
10463 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
10464 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
10510 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
10514 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
10517 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
10520 if (is_sampling_event(event)) { in perf_swevent_add()
10522 perf_swevent_set_period(event); in perf_swevent_add()
10527 head = find_swevent_head(swhash, event); in perf_swevent_add()
10531 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
10532 perf_event_update_userpage(event); in perf_swevent_add()
10537 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
10539 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
10542 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
10544 event->hw.state = 0; in perf_swevent_start()
10547 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
10549 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10641 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10643 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10645 WARN_ON(event->parent); in sw_perf_event_destroy()
10654 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10656 u64 event_id = event->attr.config; in perf_swevent_init()
10658 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10664 if (has_branch_stack(event)) in perf_swevent_init()
10669 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10672 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10682 if (!event->parent) { in perf_swevent_init()
10690 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10711 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10713 perf_trace_destroy(event); in tp_perf_event_destroy()
10716 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10720 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10726 if (has_branch_stack(event)) in perf_tp_event_init()
10729 err = perf_trace_init(event); in perf_tp_event_init()
10733 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10749 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10755 if (event->parent) in perf_tp_filter_match()
10756 event = event->parent; in perf_tp_filter_match()
10758 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10763 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10767 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10772 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10775 if (!perf_tp_filter_match(event, raw)) in perf_tp_event_match()
10793 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10802 struct perf_event *event) in __perf_tp_event_target_task() argument
10806 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10809 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10811 if (perf_tp_event_match(event, raw, regs)) { in __perf_tp_event_target_task()
10813 perf_sample_save_raw_data(data, event, raw); in __perf_tp_event_target_task()
10814 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10826 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10828 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10829 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10830 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10834 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10835 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10836 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10846 struct perf_event *event; in perf_tp_event() local
10857 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10858 if (perf_tp_event_match(event, &raw, regs)) { in perf_tp_event()
10861 * some members in data are event-specific and in perf_tp_event()
10864 * the problem that next event skips preparing data in perf_tp_event()
10868 perf_sample_save_raw_data(&data, event, &raw); in perf_tp_event()
10869 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10875 * deliver this event there too. in perf_tp_event()
10936 static int perf_kprobe_event_init(struct perf_event *event);
10948 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10953 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10962 if (has_branch_stack(event)) in perf_kprobe_event_init()
10965 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10966 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10970 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10995 static int perf_uprobe_event_init(struct perf_event *event);
11007 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
11013 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
11022 if (has_branch_stack(event)) in perf_uprobe_event_init()
11025 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
11026 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
11027 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
11031 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
11048 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
11050 ftrace_profile_free_filter(event); in perf_event_free_filter()
11054 * returns true if the event is a tracepoint, or a kprobe/upprobe created
11057 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
11059 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
11062 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
11066 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
11072 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
11077 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
11078 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
11080 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
11081 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
11082 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
11083 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
11102 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
11108 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
11111 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
11113 if (!event->prog) in perf_event_free_bpf_prog()
11116 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
11117 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
11120 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
11129 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
11133 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
11139 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
11161 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
11163 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
11190 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
11196 if (!has_addr_filter(event)) in perf_addr_filters_splice()
11200 if (event->parent) in perf_addr_filters_splice()
11203 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
11205 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
11207 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
11209 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
11214 static void perf_free_addr_filters(struct perf_event *event) in perf_free_addr_filters() argument
11219 if (list_empty(&event->addr_filters.list)) in perf_free_addr_filters()
11222 perf_addr_filters_splice(event, NULL); in perf_free_addr_filters()
11247 * Update event's address range filters based on the
11250 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
11252 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
11253 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
11260 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
11281 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
11282 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
11284 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
11286 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
11287 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
11293 event->addr_filters_gen++; in perf_event_addr_filters_apply()
11303 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
11357 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
11384 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
11443 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
11470 if (!event->ctx->task) in perf_event_parse_addr_filter()
11485 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11514 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11523 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11525 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11528 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11532 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11537 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11540 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11548 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11553 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11563 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11564 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11574 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
11578 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11582 if (has_addr_filter(event)) in perf_event_set_filter()
11583 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11598 struct perf_event *event; in perf_swevent_hrtimer() local
11601 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11603 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11606 event->pmu->read(event); in perf_swevent_hrtimer()
11608 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11611 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11612 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11613 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11617 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11623 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11625 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11628 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11644 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11646 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11648 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11656 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11658 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11660 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11669 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11670 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11672 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11673 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11676 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11681 * Software event: cpu wall time clock
11684 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11690 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11691 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11694 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11696 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11697 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11700 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11702 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11703 cpu_clock_event_update(event); in cpu_clock_event_stop()
11706 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11709 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11710 perf_event_update_userpage(event); in cpu_clock_event_add()
11715 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11717 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11720 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11722 cpu_clock_event_update(event); in cpu_clock_event_read()
11725 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11727 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11730 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11736 if (has_branch_stack(event)) in cpu_clock_event_init()
11739 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11759 * Software event: task time clock
11762 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11767 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11769 local64_add(delta, &event->count); in task_clock_event_update()
11772 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11774 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11775 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11778 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11780 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11781 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11784 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11787 task_clock_event_start(event, flags); in task_clock_event_add()
11788 perf_event_update_userpage(event); in task_clock_event_add()
11793 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11795 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11798 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11801 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11802 u64 time = event->ctx->time + delta; in task_clock_event_read()
11804 task_clock_event_update(event, time); in task_clock_event_read()
11807 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11809 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11812 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11818 if (has_branch_stack(event)) in task_clock_event_init()
11821 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11853 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11895 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
12269 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
12271 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
12272 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
12275 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
12286 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
12289 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
12294 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
12299 event->pmu = pmu; in perf_try_init_event()
12300 ret = pmu->event_init(event); in perf_try_init_event()
12303 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
12309 has_extended_regs(event)) { in perf_try_init_event()
12315 event_has_any_exclude_flag(event)) { in perf_try_init_event()
12320 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { in perf_try_init_event()
12325 cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); in perf_try_init_event()
12336 event->event_caps |= PERF_EV_CAP_READ_SCOPE; in perf_try_init_event()
12342 if (event->destroy) { in perf_try_init_event()
12343 event->destroy(event); in perf_try_init_event()
12344 event->destroy = NULL; in perf_try_init_event()
12348 event->pmu = NULL; in perf_try_init_event()
12353 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
12363 * pmus overwrites event->attr.type to forward event to another pmu. in perf_init_event()
12365 event->orig_type = event->attr.type; in perf_init_event()
12368 if (event->parent && event->parent->pmu) { in perf_init_event()
12369 pmu = event->parent->pmu; in perf_init_event()
12370 ret = perf_try_init_event(pmu, event); in perf_init_event()
12379 type = event->attr.type; in perf_init_event()
12381 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
12386 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
12394 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
12398 ret = perf_try_init_event(pmu, event); in perf_init_event()
12399 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
12400 type = event->attr.type; in perf_init_event()
12411 ret = perf_try_init_event(pmu, event); in perf_init_event()
12422 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
12424 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
12427 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
12438 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
12440 if (is_sb_event(event)) in account_pmu_sb_event()
12441 attach_sb_event(event); in account_pmu_sb_event()
12465 static void account_event(struct perf_event *event) in account_event() argument
12469 if (event->parent) in account_event()
12472 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
12474 if (event->attr.mmap || event->attr.mmap_data) in account_event()
12476 if (event->attr.build_id) in account_event()
12478 if (event->attr.comm) in account_event()
12480 if (event->attr.namespaces) in account_event()
12482 if (event->attr.cgroup) in account_event()
12484 if (event->attr.task) in account_event()
12486 if (event->attr.freq) in account_event()
12488 if (event->attr.context_switch) { in account_event()
12492 if (has_branch_stack(event)) in account_event()
12494 if (is_cgroup_event(event)) in account_event()
12496 if (event->attr.ksymbol) in account_event()
12498 if (event->attr.bpf_event) in account_event()
12500 if (event->attr.text_poke) in account_event()
12531 account_pmu_sb_event(event); in account_event()
12535 * Allocate and initialize an event structure
12560 struct perf_event *event __free(__free_event) = in perf_event_alloc()
12562 if (!event) in perf_event_alloc()
12570 group_leader = event; in perf_event_alloc()
12572 mutex_init(&event->child_mutex); in perf_event_alloc()
12573 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
12575 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
12576 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
12577 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
12578 init_event_group(event); in perf_event_alloc()
12579 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
12580 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
12581 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
12582 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
12585 init_waitqueue_head(&event->waitq); in perf_event_alloc()
12586 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
12587 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); in perf_event_alloc()
12588 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
12590 mutex_init(&event->mmap_mutex); in perf_event_alloc()
12591 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
12593 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
12594 event->cpu = cpu; in perf_event_alloc()
12595 event->attr = *attr; in perf_event_alloc()
12596 event->group_leader = group_leader; in perf_event_alloc()
12597 event->pmu = NULL; in perf_event_alloc()
12598 event->oncpu = -1; in perf_event_alloc()
12600 event->parent = parent_event; in perf_event_alloc()
12602 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
12603 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
12605 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12608 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12611 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12617 event->hw.target = get_task_struct(task); in perf_event_alloc()
12620 event->clock = &local_clock; in perf_event_alloc()
12622 event->clock = parent_event->clock; in perf_event_alloc()
12632 event->prog = prog; in perf_event_alloc()
12638 event->overflow_handler = overflow_handler; in perf_event_alloc()
12639 event->overflow_handler_context = context; in perf_event_alloc()
12640 } else if (is_write_backward(event)){ in perf_event_alloc()
12641 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12642 event->overflow_handler_context = NULL; in perf_event_alloc()
12644 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12645 event->overflow_handler_context = NULL; in perf_event_alloc()
12648 perf_event__state_init(event); in perf_event_alloc()
12652 hwc = &event->hw; in perf_event_alloc()
12669 if (!has_branch_stack(event)) in perf_event_alloc()
12670 event->attr.branch_sample_type = 0; in perf_event_alloc()
12672 pmu = perf_init_event(event); in perf_event_alloc()
12682 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in perf_event_alloc()
12683 err = attach_perf_ctx_data(event); in perf_event_alloc()
12696 if (event->attr.aux_output && in perf_event_alloc()
12698 event->attr.aux_pause || event->attr.aux_resume)) in perf_event_alloc()
12701 if (event->attr.aux_pause && event->attr.aux_resume) in perf_event_alloc()
12704 if (event->attr.aux_start_paused) { in perf_event_alloc()
12707 event->hw.aux_paused = 1; in perf_event_alloc()
12711 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12716 err = exclusive_event_init(event); in perf_event_alloc()
12720 if (has_addr_filter(event)) { in perf_event_alloc()
12721 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12724 if (!event->addr_filter_ranges) in perf_event_alloc()
12731 if (event->parent) { in perf_event_alloc()
12732 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12735 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12736 event->parent->addr_filter_ranges, in perf_event_alloc()
12742 event->addr_filters_gen = 1; in perf_event_alloc()
12745 if (!event->parent) { in perf_event_alloc()
12746 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12750 event->attach_state |= PERF_ATTACH_CALLCHAIN; in perf_event_alloc()
12754 err = security_perf_event_alloc(event); in perf_event_alloc()
12759 account_event(event); in perf_event_alloc()
12761 return_ptr(event); in perf_event_alloc()
12900 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12906 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12911 if (event == output_event) in perf_event_set_output()
12917 if (output_event->cpu != event->cpu) in perf_event_set_output()
12923 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12929 if (output_event->clock != event->clock) in perf_event_set_output()
12936 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12942 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12943 event->pmu != output_event->pmu) in perf_event_set_output()
12949 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12953 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12956 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12972 ring_buffer_attach(event, rb); in perf_event_set_output()
12976 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12984 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12990 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12995 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
13000 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
13004 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
13008 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
13015 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
13053 * sys_perf_event_open - open a performance event, associate it to a task/cpu
13058 * @group_fd: group leader event fd
13059 * @flags: perf event open flags
13067 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
13171 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
13173 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
13174 err = PTR_ERR(event); in SYSCALL_DEFINE5()
13178 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
13179 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
13189 pmu = event->pmu; in SYSCALL_DEFINE5()
13192 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
13198 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
13207 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
13219 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
13234 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
13239 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
13258 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
13266 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
13281 if (is_software_event(event) && in SYSCALL_DEFINE5()
13284 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
13295 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
13300 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
13316 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
13321 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
13324 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
13329 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
13334 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
13341 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
13343 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
13350 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
13390 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
13402 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
13405 perf_event__header_size(event); in SYSCALL_DEFINE5()
13406 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
13408 event->owner = current; in SYSCALL_DEFINE5()
13410 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
13421 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
13426 * kept alive until we place the new event on the sibling_list. in SYSCALL_DEFINE5()
13434 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
13435 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
13444 free_event(event); in SYSCALL_DEFINE5()
13459 * @overflow_handler: callback to trigger when we hit the event
13470 struct perf_event *event; in perf_event_create_kernel_counter() local
13481 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
13483 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
13484 err = PTR_ERR(event); in perf_event_create_kernel_counter()
13489 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
13490 pmu = event->pmu; in perf_event_create_kernel_counter()
13493 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
13498 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
13511 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
13516 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
13520 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
13533 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
13538 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
13542 return event; in perf_event_create_kernel_counter()
13546 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
13552 free_event(event); in perf_event_create_kernel_counter()
13563 struct perf_event *event, *sibling; in __perf_pmu_remove() local
13565 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
13566 perf_remove_from_context(event, 0); in __perf_pmu_remove()
13567 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
13568 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13570 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
13580 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
13583 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
13587 event->cpu = cpu; in __perf_pmu_install_event()
13588 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
13589 event->pmu_ctx = epc; in __perf_pmu_install_event()
13591 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
13592 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
13593 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13596 * Now that event->ctx is updated and visible, put the old ctx. in __perf_pmu_install_event()
13604 struct perf_event *event, *tmp; in __perf_pmu_install() local
13614 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13615 if (event->group_leader == event) in __perf_pmu_install()
13618 list_del(&event->migrate_entry); in __perf_pmu_install()
13619 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13626 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13627 list_del(&event->migrate_entry); in __perf_pmu_install()
13628 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13692 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13694 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13714 perf_remove_from_context(event, detach_flags | DETACH_EXIT); in perf_event_exit_event()
13725 put_event(event); in perf_event_exit_event()
13732 perf_event_wakeup(event); in perf_event_exit_event()
13797 * When a child task exits, feed back event values to parent events.
13804 struct perf_event *event, *tmp; in perf_event_exit_task() local
13807 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13809 list_del_init(&event->owner_entry); in perf_event_exit_task()
13816 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13831 * Detach the perf_ctx_data for the system-wide event. in perf_event_exit_task()
13837 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13840 struct perf_event *parent = event->parent; in perf_free_event()
13846 list_del_init(&event->child_list); in perf_free_event()
13850 perf_group_detach(event); in perf_free_event()
13851 list_del_event(event, ctx); in perf_free_event()
13853 put_event(event); in perf_free_event()
13866 struct perf_event *event, *tmp; in perf_event_free_task() local
13886 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13887 perf_free_event(event, ctx); in perf_event_free_task()
13900 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13936 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13938 if (!event) in perf_event_attrs()
13941 return &event->attr; in perf_event_attrs()
13954 * Inherit an event from parent task to child task.
14016 * Make the child state follow the state of the parent event, in inherit_event()
14054 * Link this into the parent event's child list in inherit_event()
14063 * Inherits an event group.
14107 * Creates the child task context and tries to inherit the event-group.
14110 * inherited_all set when we 'fail' to inherit an orphaned event; this is
14118 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
14126 if (!event->attr.inherit || in inherit_task_group()
14127 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
14129 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
14149 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
14163 struct perf_event *event; in perf_event_init_context() local
14197 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
14198 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
14213 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
14214 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
14329 struct perf_event *event; in __perf_event_exit_context() local
14333 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
14334 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()