Lines Matching +full:dont +full:- +full:validate

5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
120 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
121 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
125 if (unlikely(!hwc->event_base)) in x86_perf_event_update()
132 * exchange a new raw count - then add that new-prev delta in x86_perf_event_update()
135 prev_raw_count = local64_read(&hwc->prev_count); in x86_perf_event_update()
137 rdpmcl(hwc->event_base_rdpmc, new_raw_count); in x86_perf_event_update()
138 } while (!local64_try_cmpxchg(&hwc->prev_count, in x86_perf_event_update()
144 * (event-)time and add that to the generic event. in x86_perf_event_update()
146 * Careful, not all hw sign-extends above the physical width in x86_perf_event_update()
149 delta = (new_raw_count << shift) - (prev_raw_count << shift); in x86_perf_event_update()
152 local64_add(delta, &event->count); in x86_perf_event_update()
153 local64_sub(delta, &hwc->period_left); in x86_perf_event_update()
159 * Find and validate any extra registers to set up.
163 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); in x86_pmu_extra_regs()
167 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
172 for (er = extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
173 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs()
175 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs()
176 return -EINVAL; in x86_pmu_extra_regs()
178 if (!er->extra_msr_access) in x86_pmu_extra_regs()
179 return -ENXIO; in x86_pmu_extra_regs()
181 reg->idx = er->idx; in x86_pmu_extra_regs()
182 reg->config = event->attr.config1; in x86_pmu_extra_regs()
183 reg->reg = er->msr; in x86_pmu_extra_regs()
261 u64 val, val_fail = -1, val_new= ~0; in check_hw_exists()
262 int i, reg, reg_fail = -1, ret = 0; in check_hw_exists()
264 int reg_safe = -1; in check_hw_exists()
306 if (reg_safe == -1) { in check_hw_exists()
330 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", in check_hw_exists()
370 struct perf_event_attr *attr = &event->attr; in set_ext_hw_attr()
374 config = attr->config; in set_ext_hw_attr()
378 return -EINVAL; in set_ext_hw_attr()
383 return -EINVAL; in set_ext_hw_attr()
388 return -EINVAL; in set_ext_hw_attr()
391 val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
393 return -ENOENT; in set_ext_hw_attr()
395 if (val == -1) in set_ext_hw_attr()
396 return -EINVAL; in set_ext_hw_attr()
398 hwc->config |= val; in set_ext_hw_attr()
399 attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
411 err = -EBUSY; in x86_reserve_hardware()
466 return -EBUSY; in x86_add_exclusive()
484 struct perf_event_attr *attr = &event->attr; in x86_setup_perfctr()
485 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
489 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
490 hwc->last_period = hwc->sample_period; in x86_setup_perfctr()
491 local64_set(&hwc->period_left, hwc->sample_period); in x86_setup_perfctr()
494 if (attr->type == event->pmu->type) in x86_setup_perfctr()
495 return x86_pmu_extra_regs(event->attr.config, event); in x86_setup_perfctr()
497 if (attr->type == PERF_TYPE_HW_CACHE) in x86_setup_perfctr()
500 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
501 return -EINVAL; in x86_setup_perfctr()
503 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
508 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
511 return -ENOENT; in x86_setup_perfctr()
513 if (config == -1LL) in x86_setup_perfctr()
514 return -EINVAL; in x86_setup_perfctr()
516 hwc->config |= config; in x86_setup_perfctr()
529 u64 m = event->attr.branch_sample_type; in precise_br_compat()
538 if (!event->attr.exclude_user) in precise_br_compat()
541 if (!event->attr.exclude_kernel) in precise_br_compat()
571 if (event->attr.precise_ip) { in x86_pmu_hw_config()
574 if (event->attr.precise_ip > precise) in x86_pmu_hw_config()
575 return -EOPNOTSUPP; in x86_pmu_hw_config()
579 return -EINVAL; in x86_pmu_hw_config()
583 * whatever the user is asking with attr->branch_sample_type in x86_pmu_hw_config()
585 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
586 u64 *br_type = &event->attr.branch_sample_type; in x86_pmu_hw_config()
590 return -EOPNOTSUPP; in x86_pmu_hw_config()
604 if (!event->attr.exclude_user) in x86_pmu_hw_config()
607 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
613 event->attach_state |= PERF_ATTACH_TASK_DATA; in x86_pmu_hw_config()
619 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
624 if (!event->attr.exclude_user) in x86_pmu_hw_config()
625 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
626 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
627 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
629 if (event->attr.type == event->pmu->type) in x86_pmu_hw_config()
630 event->hw.config |= x86_pmu_get_event_config(event); in x86_pmu_hw_config()
632 if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) { in x86_pmu_hw_config()
633 s64 left = event->attr.sample_period; in x86_pmu_hw_config()
635 if (left > event->attr.sample_period) in x86_pmu_hw_config()
636 return -EINVAL; in x86_pmu_hw_config()
640 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) in x86_pmu_hw_config()
641 return -EINVAL; in x86_pmu_hw_config()
646 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { in x86_pmu_hw_config()
647 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) in x86_pmu_hw_config()
648 return -EINVAL; in x86_pmu_hw_config()
650 if (!event->attr.precise_ip) in x86_pmu_hw_config()
651 return -EINVAL; in x86_pmu_hw_config()
665 return -ENODEV; in __x86_pmu_event_init()
672 event->destroy = hw_perf_event_destroy; in __x86_pmu_event_init()
674 event->hw.idx = -1; in __x86_pmu_event_init()
675 event->hw.last_cpu = -1; in __x86_pmu_event_init()
676 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
679 event->hw.extra_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
680 event->hw.branch_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
691 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all()
694 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
717 * It will not be re-enabled in the NMI handler again, because enabled=0. After
732 if (!cpuc->enabled) in x86_pmu_disable()
735 cpuc->n_added = 0; in x86_pmu_disable()
736 cpuc->enabled = 0; in x86_pmu_disable()
748 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
750 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
762 return event->pmu == &pmu; in is_x86_event()
765 if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) in is_x86_event()
780 if (WARN_ON_ONCE(!cpuc->pmu)) in x86_get_pmu()
783 return cpuc->pmu; in x86_get_pmu()
823 sched->max_events = num; in perf_sched_init()
824 sched->max_weight = wmax; in perf_sched_init()
825 sched->max_gp = gpmax; in perf_sched_init()
826 sched->constraints = constraints; in perf_sched_init()
829 if (constraints[idx]->weight == wmin) in perf_sched_init()
833 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
834 sched->state.weight = wmin; in perf_sched_init()
835 sched->state.unassigned = num; in perf_sched_init()
840 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) in perf_sched_save_state()
843 sched->saved[sched->saved_states] = sched->state; in perf_sched_save_state()
844 sched->saved_states++; in perf_sched_save_state()
849 if (!sched->saved_states) in perf_sched_restore_state()
852 sched->saved_states--; in perf_sched_restore_state()
853 sched->state = sched->saved[sched->saved_states]; in perf_sched_restore_state()
857 sched->state.used &= ~BIT_ULL(sched->state.counter); in perf_sched_restore_state()
860 sched->state.counter++; in perf_sched_restore_state()
874 if (!sched->state.unassigned) in __perf_sched_find_counter()
877 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
880 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
882 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { in __perf_sched_find_counter()
884 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { in __perf_sched_find_counter()
887 if (sched->state.used & mask) in __perf_sched_find_counter()
890 sched->state.used |= mask; in __perf_sched_find_counter()
896 idx = sched->state.counter; in __perf_sched_find_counter()
897 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { in __perf_sched_find_counter()
900 if (c->flags & PERF_X86_EVENT_PAIR) in __perf_sched_find_counter()
903 if (sched->state.used & mask) in __perf_sched_find_counter()
906 if (sched->state.nr_gp++ >= sched->max_gp) in __perf_sched_find_counter()
909 sched->state.used |= mask; in __perf_sched_find_counter()
916 sched->state.counter = idx; in __perf_sched_find_counter()
918 if (c->overlap) in __perf_sched_find_counter()
942 if (!sched->state.unassigned || !--sched->state.unassigned) in perf_sched_next_event()
947 sched->state.event++; in perf_sched_next_event()
948 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
950 sched->state.event = 0; in perf_sched_next_event()
951 sched->state.weight++; in perf_sched_next_event()
952 if (sched->state.weight > sched->max_weight) in perf_sched_next_event()
955 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
956 } while (c->weight != sched->state.weight); in perf_sched_next_event()
958 sched->state.counter = 0; /* start with first counter */ in perf_sched_next_event()
995 * cpuc->n_events hasn't been updated yet, while for the latter in x86_schedule_events()
996 * cpuc->n_txn contains the number of events added in the current in x86_schedule_events()
999 n0 = cpuc->n_events; in x86_schedule_events()
1000 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_schedule_events()
1001 n0 -= cpuc->n_txn; in x86_schedule_events()
1006 c = cpuc->event_constraint[i]; in x86_schedule_events()
1016 * have a dynamic constraint -- for those the constraint can in x86_schedule_events()
1019 if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { in x86_schedule_events()
1020 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
1021 cpuc->event_constraint[i] = c; in x86_schedule_events()
1024 wmin = min(wmin, c->weight); in x86_schedule_events()
1025 wmax = max(wmax, c->weight); in x86_schedule_events()
1034 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
1035 c = cpuc->event_constraint[i]; in x86_schedule_events()
1038 if (hwc->idx == -1) in x86_schedule_events()
1042 if (!test_bit(hwc->idx, c->idxmsk)) in x86_schedule_events()
1045 mask = BIT_ULL(hwc->idx); in x86_schedule_events()
1056 assign[i] = hwc->idx; in x86_schedule_events()
1061 int gpmax = x86_pmu_max_num_counters(cpuc->pmu); in x86_schedule_events()
1073 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
1074 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
1082 gpmax -= cpuc->n_pair; in x86_schedule_events()
1086 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
1098 * validate an event group (assign == NULL) in x86_schedule_events()
1105 e = cpuc->event_list[i]; in x86_schedule_events()
1112 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
1118 return unsched ? -EINVAL : 0; in x86_schedule_events()
1125 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) in add_nr_metric_event()
1126 return -EINVAL; in add_nr_metric_event()
1127 cpuc->n_metric++; in add_nr_metric_event()
1128 cpuc->n_txn_metric++; in add_nr_metric_event()
1138 cpuc->n_metric--; in del_nr_metric_event()
1144 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in collect_event()
1147 return -EINVAL; in collect_event()
1149 if (n >= max_count + cpuc->n_metric) in collect_event()
1150 return -EINVAL; in collect_event()
1152 cpuc->event_list[n] = event; in collect_event()
1153 if (is_counter_pair(&event->hw)) { in collect_event()
1154 cpuc->n_pair++; in collect_event()
1155 cpuc->n_txn_pair++; in collect_event()
1170 max_count = x86_pmu_num_counters(cpuc->pmu) + x86_pmu_num_counters_fixed(cpuc->pmu); in collect_events()
1173 n = cpuc->n_events; in collect_events()
1174 if (!cpuc->n_events) in collect_events()
1175 cpuc->pebs_output = 0; in collect_events()
1177 if (!cpuc->is_fake && leader->attr.precise_ip) { in collect_events()
1179 * For PEBS->PT, if !aux_event, the group leader (PT) went in collect_events()
1183 if (is_pebs_pt(leader) && !leader->aux_event) in collect_events()
1184 return -EINVAL; in collect_events()
1189 if (cpuc->pebs_output && in collect_events()
1190 cpuc->pebs_output != is_pebs_pt(leader) + 1) in collect_events()
1191 return -EINVAL; in collect_events()
1193 cpuc->pebs_output = is_pebs_pt(leader) + 1; in collect_events()
1198 return -EINVAL; in collect_events()
1206 if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) in collect_events()
1210 return -EINVAL; in collect_events()
1220 struct hw_perf_event *hwc = &event->hw; in x86_assign_hw_event()
1223 idx = hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
1224 hwc->last_cpu = smp_processor_id(); in x86_assign_hw_event()
1225 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
1229 switch (hwc->idx) { in x86_assign_hw_event()
1232 hwc->config_base = 0; in x86_assign_hw_event()
1233 hwc->event_base = 0; in x86_assign_hw_event()
1240 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: in x86_assign_hw_event()
1241 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; in x86_assign_hw_event()
1242 hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED); in x86_assign_hw_event()
1243 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | in x86_assign_hw_event()
1248 hwc->config_base = x86_pmu_config_addr(hwc->idx); in x86_assign_hw_event()
1249 hwc->event_base = x86_pmu_event_addr(hwc->idx); in x86_assign_hw_event()
1250 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); in x86_assign_hw_event()
1256 * x86_perf_rdpmc_index - Return PMC counter used for event
1273 return event->hw.event_base_rdpmc; in x86_perf_rdpmc_index()
1280 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
1281 hwc->last_cpu == smp_processor_id() && in match_prev_assignment()
1282 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
1292 int i, added = cpuc->n_added; in x86_pmu_enable()
1297 if (cpuc->enabled) in x86_pmu_enable()
1300 if (cpuc->n_added) { in x86_pmu_enable()
1301 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1318 event = cpuc->event_list[i]; in x86_pmu_enable()
1319 hwc = &event->hw; in x86_pmu_enable()
1323 * - assigned same counter as last time in x86_pmu_enable()
1324 * - running on same CPU as last time in x86_pmu_enable()
1325 * - no other event has used the counter since in x86_pmu_enable()
1327 if (hwc->idx == -1 || in x86_pmu_enable()
1335 if (hwc->state & PERF_HES_STOPPED) in x86_pmu_enable()
1336 hwc->state |= PERF_HES_ARCH; in x86_pmu_enable()
1344 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1345 event = cpuc->event_list[i]; in x86_pmu_enable()
1346 hwc = &event->hw; in x86_pmu_enable()
1353 if (hwc->state & PERF_HES_ARCH) in x86_pmu_enable()
1357 * if cpuc->enabled = 0, then no wrmsr as in x86_pmu_enable()
1362 cpuc->n_added = 0; in x86_pmu_enable()
1366 cpuc->enabled = 1; in x86_pmu_enable()
1375 * Set the next IRQ period, based on the hwc->period_left value.
1380 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_set_period()
1381 s64 left = local64_read(&hwc->period_left); in x86_perf_event_set_period()
1382 s64 period = hwc->sample_period; in x86_perf_event_set_period()
1383 int ret = 0, idx = hwc->idx; in x86_perf_event_set_period()
1385 if (unlikely(!hwc->event_base)) in x86_perf_event_set_period()
1391 if (unlikely(left <= -period)) { in x86_perf_event_set_period()
1393 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1394 hwc->last_period = period; in x86_perf_event_set_period()
1400 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1401 hwc->last_period = period; in x86_perf_event_set_period()
1405 * Quirk: certain CPUs dont like it if just 1 hw_event is left: in x86_perf_event_set_period()
1421 local64_set(&hwc->prev_count, (u64)-left); in x86_perf_event_set_period()
1423 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1427 * we currently declare a 48-bit counter width in x86_perf_event_set_period()
1440 __x86_pmu_enable_event(&event->hw, in x86_pmu_enable_event()
1457 hwc = &event->hw; in x86_pmu_add()
1459 n0 = cpuc->n_events; in x86_pmu_add()
1464 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in x86_pmu_add()
1466 hwc->state |= PERF_HES_ARCH; in x86_pmu_add()
1471 * at commit time (->commit_txn) as a whole. in x86_pmu_add()
1473 * If commit fails, we'll call ->del() on all events in x86_pmu_add()
1474 * for which ->add() was called. in x86_pmu_add()
1476 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_add()
1486 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1493 cpuc->n_events = n; in x86_pmu_add()
1494 cpuc->n_added += n - n0; in x86_pmu_add()
1495 cpuc->n_txn += n - n0; in x86_pmu_add()
1511 int idx = event->hw.idx; in x86_pmu_start()
1513 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in x86_pmu_start()
1516 if (WARN_ON_ONCE(idx == -1)) in x86_pmu_start()
1520 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in x86_pmu_start()
1524 event->hw.state = 0; in x86_pmu_start()
1526 cpuc->events[idx] = event; in x86_pmu_start()
1527 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1545 cntr_mask = hybrid(cpuc->pmu, cntr_mask); in perf_event_print_debug()
1546 fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); in perf_event_print_debug()
1547 pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); in perf_event_print_debug()
1572 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1580 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", in perf_event_print_debug()
1582 pr_info("CPU#%d: gen-PMC%d count: %016llx\n", in perf_event_print_debug()
1584 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", in perf_event_print_debug()
1588 if (fixed_counter_disabled(idx, cpuc->pmu)) in perf_event_print_debug()
1592 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", in perf_event_print_debug()
1600 struct hw_perf_event *hwc = &event->hw; in x86_pmu_stop()
1602 if (test_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1604 __clear_bit(hwc->idx, cpuc->active_mask); in x86_pmu_stop()
1605 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1606 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in x86_pmu_stop()
1607 hwc->state |= PERF_HES_STOPPED; in x86_pmu_stop()
1610 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { in x86_pmu_stop()
1616 hwc->state |= PERF_HES_UPTODATE; in x86_pmu_stop()
1623 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in x86_pmu_del()
1628 * The events never got scheduled and ->cancel_txn will truncate in x86_pmu_del()
1631 * XXX assumes any ->del() called during a TXN will only be on in x86_pmu_del()
1634 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_del()
1637 __set_bit(event->hw.idx, cpuc->dirty); in x86_pmu_del()
1644 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1645 if (event == cpuc->event_list[i]) in x86_pmu_del()
1649 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1653 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1654 --cpuc->n_added; in x86_pmu_del()
1659 while (++i < cpuc->n_events) { in x86_pmu_del()
1660 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1661 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1662 cpuc->assign[i-1] = cpuc->assign[i]; in x86_pmu_del()
1664 cpuc->event_constraint[i-1] = NULL; in x86_pmu_del()
1665 --cpuc->n_events; in x86_pmu_del()
1701 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_handle_irq()
1704 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1707 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1718 perf_sample_data_init(&data, 0, event->hw.last_period); in x86_pmu_handle_irq()
1720 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); in x86_pmu_handle_irq()
1761 perf_sample_event_took(finish_clock - start_clock); in perf_event_nmi_handler()
1776 cpuc->kfree_on_online[i] = NULL; in x86_pmu_prepare_cpu()
1795 kfree(cpuc->kfree_on_online[i]); in x86_pmu_online_cpu()
1796 cpuc->kfree_on_online[i] = NULL; in x86_pmu_online_cpu()
1821 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); in pmu_check_apic()
1827 * events (user-space has to fall back and in pmu_check_apic()
1845 if (pmu_attr->id < x86_pmu.max_events) in events_sysfs_show()
1846 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1849 if (pmu_attr->event_str) in events_sysfs_show()
1850 return sprintf(page, "%s\n", pmu_attr->event_str); in events_sysfs_show()
1863 * Report conditional events depending on Hyper-Threading. in events_ht_sysfs_show()
1871 * has to re-read when a thread sibling gets onlined later. in events_ht_sysfs_show()
1875 pmu_attr->event_str_ht : in events_ht_sysfs_show()
1876 pmu_attr->event_str_noht); in events_ht_sysfs_show()
1889 if (hweight64(pmu_attr->pmu_type) == 1) in events_hybrid_sysfs_show()
1890 return sprintf(page, "%s", pmu_attr->event_str); in events_hybrid_sysfs_show()
1894 * event encoding, e.g., the mem-loads event on an Atom PMU has in events_hybrid_sysfs_show()
1903 str = pmu_attr->event_str; in events_hybrid_sysfs_show()
1905 if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) in events_hybrid_sysfs_show()
1907 if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { in events_hybrid_sysfs_show()
1910 return snprintf(page, next_str - str + 1, "%s", str); in events_hybrid_sysfs_show()
1922 EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1924 EVENT_ATTR(cache-references, CACHE_REFERENCES );
1925 EVENT_ATTR(cache-misses, CACHE_MISSES );
1926 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1927 EVENT_ATTR(branch-misses, BRANCH_MISSES );
1928 EVENT_ATTR(bus-cycles, BUS_CYCLES );
1929 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1930 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1931 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1963 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
2063 pr_info("... fixed-purpose events: %d\n", x86_pmu_num_counters_fixed(pmu)); in x86_pmu_show_pmu_cap()
2090 err = -ENOTSUPP; in init_hw_perf_events()
2108 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
2109 quirk->func(); in init_hw_perf_events()
2179 hybrid_pmu->pmu = pmu; in init_hw_perf_events()
2180 hybrid_pmu->pmu.type = -1; in init_hw_perf_events()
2181 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2182 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; in init_hw_perf_events()
2184 err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, in init_hw_perf_events()
2185 (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); in init_hw_perf_events()
2226 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
2233 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ in x86_pmu_start_txn()
2235 cpuc->txn_flags = txn_flags; in x86_pmu_start_txn()
2255 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_cancel_txn()
2257 txn_flags = cpuc->txn_flags; in x86_pmu_cancel_txn()
2258 cpuc->txn_flags = 0; in x86_pmu_cancel_txn()
2286 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_commit_txn()
2288 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { in x86_pmu_commit_txn()
2289 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2293 n = cpuc->n_events; in x86_pmu_commit_txn()
2296 return -EAGAIN; in x86_pmu_commit_txn()
2306 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
2308 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2313 * a fake_cpuc is used to validate event groups. Due to
2333 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2334 cpuc->is_fake = 1; in allocate_fake_cpuc()
2340 if (cpumask_empty(&h_pmu->supported_cpus)) in allocate_fake_cpuc()
2342 cpu = cpumask_first(&h_pmu->supported_cpus); in allocate_fake_cpuc()
2345 cpuc->pmu = event_pmu; in allocate_fake_cpuc()
2353 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2357 * validate that we can schedule this event
2365 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_event()
2371 if (!c || !c->weight) in validate_event()
2372 ret = -EINVAL; in validate_event()
2383 * validate a single event group
2386 * - check events are compatible which each other
2387 * - events do not compete for the same counter
2388 * - number of events <= number of counters
2395 struct perf_event *leader = event->group_leader; in validate_group()
2397 int ret = -EINVAL, n; in validate_group()
2407 pmu = leader->pmu; in validate_group()
2413 pmu = sibling->pmu; in validate_group()
2414 else if (pmu != sibling->pmu) in validate_group()
2419 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_group()
2432 fake_cpuc->n_events = n; in validate_group()
2437 fake_cpuc->n_events = 0; in validate_group()
2450 if ((event->attr.type != event->pmu->type) && in x86_pmu_event_init()
2451 (event->attr.type != PERF_TYPE_HARDWARE) && in x86_pmu_event_init()
2452 (event->attr.type != PERF_TYPE_HW_CACHE)) in x86_pmu_event_init()
2453 return -ENOENT; in x86_pmu_event_init()
2455 if (is_hybrid() && (event->cpu != -1)) { in x86_pmu_event_init()
2456 pmu = hybrid_pmu(event->pmu); in x86_pmu_event_init()
2457 if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus)) in x86_pmu_event_init()
2458 return -ENOENT; in x86_pmu_event_init()
2463 if (event->group_leader != event) in x86_pmu_event_init()
2469 if (event->destroy) in x86_pmu_event_init()
2470 event->destroy(event); in x86_pmu_event_init()
2471 event->destroy = NULL; in x86_pmu_event_init()
2475 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) in x86_pmu_event_init()
2476 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in x86_pmu_event_init()
2487 for (i = 0; i < cpuc->n_events; i++) in perf_clear_dirty_counters()
2488 __clear_bit(cpuc->assign[i], cpuc->dirty); in perf_clear_dirty_counters()
2490 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) in perf_clear_dirty_counters()
2493 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { in perf_clear_dirty_counters()
2496 if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask))) in perf_clear_dirty_counters()
2499 wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0); in perf_clear_dirty_counters()
2505 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); in perf_clear_dirty_counters()
2510 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_mapped()
2525 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) in x86_pmu_event_mapped()
2531 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_unmapped()
2534 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) in x86_pmu_event_unmapped()
2540 struct hw_perf_event *hwc = &event->hw; in x86_pmu_event_idx()
2542 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_idx()
2545 if (is_metric_idx(hwc->idx)) in x86_pmu_event_idx()
2548 return hwc->event_base_rdpmc + 1; in x86_pmu_event_idx()
2571 return -EINVAL; in set_attr_rdpmc()
2574 return -ENOTSUPP; in set_attr_rdpmc()
2581 * aka perf-event-bypassing mode. This path is extremely slow, in set_attr_rdpmc()
2654 return -EINVAL; in x86_pmu_check_period()
2660 return -EINVAL; in x86_pmu_check_period()
2722 userpg->cap_user_time = 0; in arch_perf_update_userpage()
2723 userpg->cap_user_time_zero = 0; in arch_perf_update_userpage()
2724 userpg->cap_user_rdpmc = in arch_perf_update_userpage()
2725 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); in arch_perf_update_userpage()
2726 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2739 userpg->cap_user_time = 1; in arch_perf_update_userpage()
2740 userpg->time_mult = data.cyc2ns_mul; in arch_perf_update_userpage()
2741 userpg->time_shift = data.cyc2ns_shift; in arch_perf_update_userpage()
2742 userpg->time_offset = offset - now; in arch_perf_update_userpage()
2748 if (!event->attr.use_clockid) { in arch_perf_update_userpage()
2749 userpg->cap_user_time_zero = 1; in arch_perf_update_userpage()
2750 userpg->time_zero = offset; in arch_perf_update_userpage()
2762 return regs->flags & X86_EFLAGS_FIXED; in perf_hw_regs()
2776 if (perf_callchain_store(entry, regs->ip)) in perf_callchain_kernel()
2782 unwind_start(&state, current, NULL, (void *)regs->sp); in perf_callchain_kernel()
2807 ldt = READ_ONCE(current->active_mm->context.ldt); in get_segment_base()
2808 if (!ldt || idx >= ldt->nr_entries) in get_segment_base()
2811 desc = &ldt->entries[idx]; in get_segment_base()
2827 * Heuristic-based check if uprobe is installed at the function entry.
2832 * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
2840 if (!current->utask) in is_uprobe_at_func_entry()
2843 auprobe = current->utask->auprobe; in is_uprobe_at_func_entry()
2848 if (auprobe->insn[0] == 0x55) in is_uprobe_at_func_entry()
2851 /* endbr64 (64-bit only) */ in is_uprobe_at_func_entry()
2852 if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn)) in is_uprobe_at_func_entry()
2872 /* 32-bit process in 64-bit kernel. */ in perf_callchain_user32()
2881 cs_base = get_segment_base(regs->cs); in perf_callchain_user32()
2882 ss_base = get_segment_base(regs->ss); in perf_callchain_user32()
2884 fp = compat_ptr(ss_base + regs->bp); in perf_callchain_user32()
2889 !get_user(ret_addr, (const u32 __user *)regs->sp)) in perf_callchain_user32()
2892 while (entry->nr < entry->max_stack) { in perf_callchain_user32()
2896 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user32()
2898 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user32()
2930 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) in perf_callchain_user()
2933 fp = (void __user *)regs->bp; in perf_callchain_user()
2935 perf_callchain_store(entry, regs->ip); in perf_callchain_user()
2949 * we should read return address from *regs->sp before proceeding in perf_callchain_user()
2954 !get_user(ret_addr, (const unsigned long __user *)regs->sp)) in perf_callchain_user()
2957 while (entry->nr < entry->max_stack) { in perf_callchain_user()
2961 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user()
2963 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user()
2975 * VM86 - the good olde 16 bit days, where the linear address is
2976 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2978 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2981 * X32 - has TIF_X32 set, but is running in x86_64
2983 * X86_64 - CS,DS,SS,ES are all zero based.
2997 if (regs->flags & X86_VM_MASK) in code_segment_base()
2998 return 0x10 * regs->cs; in code_segment_base()
3000 if (user_mode(regs) && regs->cs != __USER_CS) in code_segment_base()
3001 return get_segment_base(regs->cs); in code_segment_base()
3004 regs->cs != __USER32_CS) in code_segment_base()
3005 return get_segment_base(regs->cs); in code_segment_base()
3012 return regs->ip + code_segment_base(regs); in perf_arch_instruction_pointer()
3017 if (regs->flags & PERF_EFLAGS_EXACT) in common_misc_flags()
3074 * all E-cores are disabled via BIOS. When E-cores are disabled, the in perf_get_x86_pmu_capability()
3075 * base PMU holds the correct number of counters for P-cores. in perf_get_x86_pmu_capability()
3077 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
3078 cap->num_counters_gp = x86_pmu_num_counters(NULL); in perf_get_x86_pmu_capability()
3079 cap->num_counters_fixed = x86_pmu_num_counters_fixed(NULL); in perf_get_x86_pmu_capability()
3080 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
3081 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
3082 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
3083 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()
3084 cap->pebs_ept = x86_pmu.pebs_ept; in perf_get_x86_pmu_capability()