Lines Matching +full:dont +full:- +full:validate
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
117 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_update()
118 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
122 if (unlikely(!hwc->event_base)) in x86_perf_event_update()
129 * exchange a new raw count - then add that new-prev delta in x86_perf_event_update()
132 prev_raw_count = local64_read(&hwc->prev_count); in x86_perf_event_update()
134 rdpmcl(hwc->event_base_rdpmc, new_raw_count); in x86_perf_event_update()
135 } while (!local64_try_cmpxchg(&hwc->prev_count, in x86_perf_event_update()
141 * (event-)time and add that to the generic event. in x86_perf_event_update()
143 * Careful, not all hw sign-extends above the physical width in x86_perf_event_update()
146 delta = (new_raw_count << shift) - (prev_raw_count << shift); in x86_perf_event_update()
149 local64_add(delta, &event->count); in x86_perf_event_update()
150 local64_sub(delta, &hwc->period_left); in x86_perf_event_update()
156 * Find and validate any extra registers to set up.
160 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); in x86_pmu_extra_regs()
164 reg = &event->hw.extra_reg; in x86_pmu_extra_regs()
169 for (er = extra_regs; er->msr; er++) { in x86_pmu_extra_regs()
170 if (er->event != (config & er->config_mask)) in x86_pmu_extra_regs()
172 if (event->attr.config1 & ~er->valid_mask) in x86_pmu_extra_regs()
173 return -EINVAL; in x86_pmu_extra_regs()
175 if (!er->extra_msr_access) in x86_pmu_extra_regs()
176 return -ENXIO; in x86_pmu_extra_regs()
178 reg->idx = er->idx; in x86_pmu_extra_regs()
179 reg->config = event->attr.config1; in x86_pmu_extra_regs()
180 reg->reg = er->msr; in x86_pmu_extra_regs()
222 for (i--; i >= 0; i--) in reserve_pmc_hardware()
228 for (i--; i >= 0; i--) in reserve_pmc_hardware()
253 u64 val, val_fail = -1, val_new= ~0; in check_hw_exists()
254 int i, reg, reg_fail = -1, ret = 0; in check_hw_exists()
256 int reg_safe = -1; in check_hw_exists()
298 if (reg_safe == -1) { in check_hw_exists()
322 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", in check_hw_exists()
362 struct perf_event_attr *attr = &event->attr; in set_ext_hw_attr()
366 config = attr->config; in set_ext_hw_attr()
370 return -EINVAL; in set_ext_hw_attr()
375 return -EINVAL; in set_ext_hw_attr()
380 return -EINVAL; in set_ext_hw_attr()
383 val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
385 return -ENOENT; in set_ext_hw_attr()
387 if (val == -1) in set_ext_hw_attr()
388 return -EINVAL; in set_ext_hw_attr()
390 hwc->config |= val; in set_ext_hw_attr()
391 attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result]; in set_ext_hw_attr()
403 err = -EBUSY; in x86_reserve_hardware()
458 return -EBUSY; in x86_add_exclusive()
476 struct perf_event_attr *attr = &event->attr; in x86_setup_perfctr()
477 struct hw_perf_event *hwc = &event->hw; in x86_setup_perfctr()
481 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
482 hwc->last_period = hwc->sample_period; in x86_setup_perfctr()
483 local64_set(&hwc->period_left, hwc->sample_period); in x86_setup_perfctr()
486 if (attr->type == event->pmu->type) in x86_setup_perfctr()
487 return x86_pmu_extra_regs(event->attr.config, event); in x86_setup_perfctr()
489 if (attr->type == PERF_TYPE_HW_CACHE) in x86_setup_perfctr()
492 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
493 return -EINVAL; in x86_setup_perfctr()
495 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
500 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
503 return -ENOENT; in x86_setup_perfctr()
505 if (config == -1LL) in x86_setup_perfctr()
506 return -EINVAL; in x86_setup_perfctr()
508 hwc->config |= config; in x86_setup_perfctr()
521 u64 m = event->attr.branch_sample_type; in precise_br_compat()
530 if (!event->attr.exclude_user) in precise_br_compat()
533 if (!event->attr.exclude_kernel) in precise_br_compat()
563 if (event->attr.precise_ip) { in x86_pmu_hw_config()
566 if (event->attr.precise_ip > precise) in x86_pmu_hw_config()
567 return -EOPNOTSUPP; in x86_pmu_hw_config()
571 return -EINVAL; in x86_pmu_hw_config()
575 * whatever the user is asking with attr->branch_sample_type in x86_pmu_hw_config()
577 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
578 u64 *br_type = &event->attr.branch_sample_type; in x86_pmu_hw_config()
582 return -EOPNOTSUPP; in x86_pmu_hw_config()
596 if (!event->attr.exclude_user) in x86_pmu_hw_config()
599 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
605 event->attach_state |= PERF_ATTACH_TASK_DATA; in x86_pmu_hw_config()
611 event->hw.config = ARCH_PERFMON_EVENTSEL_INT; in x86_pmu_hw_config()
616 if (!event->attr.exclude_user) in x86_pmu_hw_config()
617 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; in x86_pmu_hw_config()
618 if (!event->attr.exclude_kernel) in x86_pmu_hw_config()
619 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; in x86_pmu_hw_config()
621 if (event->attr.type == event->pmu->type) in x86_pmu_hw_config()
622 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; in x86_pmu_hw_config()
624 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
625 s64 left = event->attr.sample_period; in x86_pmu_hw_config()
627 if (left > event->attr.sample_period) in x86_pmu_hw_config()
628 return -EINVAL; in x86_pmu_hw_config()
632 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) in x86_pmu_hw_config()
633 return -EINVAL; in x86_pmu_hw_config()
638 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { in x86_pmu_hw_config()
639 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) in x86_pmu_hw_config()
640 return -EINVAL; in x86_pmu_hw_config()
642 if (!event->attr.precise_ip) in x86_pmu_hw_config()
643 return -EINVAL; in x86_pmu_hw_config()
657 return -ENODEV; in __x86_pmu_event_init()
664 event->destroy = hw_perf_event_destroy; in __x86_pmu_event_init()
666 event->hw.idx = -1; in __x86_pmu_event_init()
667 event->hw.last_cpu = -1; in __x86_pmu_event_init()
668 event->hw.last_tag = ~0ULL; in __x86_pmu_event_init()
671 event->hw.extra_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
672 event->hw.branch_reg.idx = EXTRA_REG_NONE; in __x86_pmu_event_init()
683 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all()
686 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
709 * It will not be re-enabled in the NMI handler again, because enabled=0. After
724 if (!cpuc->enabled) in x86_pmu_disable()
727 cpuc->n_added = 0; in x86_pmu_disable()
728 cpuc->enabled = 0; in x86_pmu_disable()
740 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
742 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
754 return event->pmu == &pmu; in is_x86_event()
757 if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) in is_x86_event()
772 if (WARN_ON_ONCE(!cpuc->pmu)) in x86_get_pmu()
775 return cpuc->pmu; in x86_get_pmu()
815 sched->max_events = num; in perf_sched_init()
816 sched->max_weight = wmax; in perf_sched_init()
817 sched->max_gp = gpmax; in perf_sched_init()
818 sched->constraints = constraints; in perf_sched_init()
821 if (constraints[idx]->weight == wmin) in perf_sched_init()
825 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
826 sched->state.weight = wmin; in perf_sched_init()
827 sched->state.unassigned = num; in perf_sched_init()
832 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX)) in perf_sched_save_state()
835 sched->saved[sched->saved_states] = sched->state; in perf_sched_save_state()
836 sched->saved_states++; in perf_sched_save_state()
841 if (!sched->saved_states) in perf_sched_restore_state()
844 sched->saved_states--; in perf_sched_restore_state()
845 sched->state = sched->saved[sched->saved_states]; in perf_sched_restore_state()
849 sched->state.used &= ~BIT_ULL(sched->state.counter); in perf_sched_restore_state()
852 sched->state.counter++; in perf_sched_restore_state()
866 if (!sched->state.unassigned) in __perf_sched_find_counter()
869 if (sched->state.event >= sched->max_events) in __perf_sched_find_counter()
872 c = sched->constraints[sched->state.event]; in __perf_sched_find_counter()
874 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { in __perf_sched_find_counter()
876 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { in __perf_sched_find_counter()
879 if (sched->state.used & mask) in __perf_sched_find_counter()
882 sched->state.used |= mask; in __perf_sched_find_counter()
888 idx = sched->state.counter; in __perf_sched_find_counter()
889 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { in __perf_sched_find_counter()
892 if (c->flags & PERF_X86_EVENT_PAIR) in __perf_sched_find_counter()
895 if (sched->state.used & mask) in __perf_sched_find_counter()
898 if (sched->state.nr_gp++ >= sched->max_gp) in __perf_sched_find_counter()
901 sched->state.used |= mask; in __perf_sched_find_counter()
908 sched->state.counter = idx; in __perf_sched_find_counter()
910 if (c->overlap) in __perf_sched_find_counter()
934 if (!sched->state.unassigned || !--sched->state.unassigned) in perf_sched_next_event()
939 sched->state.event++; in perf_sched_next_event()
940 if (sched->state.event >= sched->max_events) { in perf_sched_next_event()
942 sched->state.event = 0; in perf_sched_next_event()
943 sched->state.weight++; in perf_sched_next_event()
944 if (sched->state.weight > sched->max_weight) in perf_sched_next_event()
947 c = sched->constraints[sched->state.event]; in perf_sched_next_event()
948 } while (c->weight != sched->state.weight); in perf_sched_next_event()
950 sched->state.counter = 0; /* start with first counter */ in perf_sched_next_event()
978 int num_counters = hybrid(cpuc->pmu, num_counters); in x86_schedule_events()
988 * cpuc->n_events hasn't been updated yet, while for the latter in x86_schedule_events()
989 * cpuc->n_txn contains the number of events added in the current in x86_schedule_events()
992 n0 = cpuc->n_events; in x86_schedule_events()
993 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_schedule_events()
994 n0 -= cpuc->n_txn; in x86_schedule_events()
999 c = cpuc->event_constraint[i]; in x86_schedule_events()
1009 * have a dynamic constraint -- for those the constraint can in x86_schedule_events()
1012 if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { in x86_schedule_events()
1013 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
1014 cpuc->event_constraint[i] = c; in x86_schedule_events()
1017 wmin = min(wmin, c->weight); in x86_schedule_events()
1018 wmax = max(wmax, c->weight); in x86_schedule_events()
1027 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
1028 c = cpuc->event_constraint[i]; in x86_schedule_events()
1031 if (hwc->idx == -1) in x86_schedule_events()
1035 if (!test_bit(hwc->idx, c->idxmsk)) in x86_schedule_events()
1038 mask = BIT_ULL(hwc->idx); in x86_schedule_events()
1049 assign[i] = hwc->idx; in x86_schedule_events()
1066 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
1067 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
1075 gpmax = num_counters - cpuc->n_pair; in x86_schedule_events()
1079 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
1091 * validate an event group (assign == NULL) in x86_schedule_events()
1098 e = cpuc->event_list[i]; in x86_schedule_events()
1105 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
1111 return unsched ? -EINVAL : 0; in x86_schedule_events()
1118 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) in add_nr_metric_event()
1119 return -EINVAL; in add_nr_metric_event()
1120 cpuc->n_metric++; in add_nr_metric_event()
1121 cpuc->n_txn_metric++; in add_nr_metric_event()
1131 cpuc->n_metric--; in del_nr_metric_event()
1137 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in collect_event()
1140 return -EINVAL; in collect_event()
1142 if (n >= max_count + cpuc->n_metric) in collect_event()
1143 return -EINVAL; in collect_event()
1145 cpuc->event_list[n] = event; in collect_event()
1146 if (is_counter_pair(&event->hw)) { in collect_event()
1147 cpuc->n_pair++; in collect_event()
1148 cpuc->n_txn_pair++; in collect_event()
1160 int num_counters = hybrid(cpuc->pmu, num_counters); in collect_events()
1161 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); in collect_events()
1168 n = cpuc->n_events; in collect_events()
1169 if (!cpuc->n_events) in collect_events()
1170 cpuc->pebs_output = 0; in collect_events()
1172 if (!cpuc->is_fake && leader->attr.precise_ip) { in collect_events()
1174 * For PEBS->PT, if !aux_event, the group leader (PT) went in collect_events()
1178 if (is_pebs_pt(leader) && !leader->aux_event) in collect_events()
1179 return -EINVAL; in collect_events()
1184 if (cpuc->pebs_output && in collect_events()
1185 cpuc->pebs_output != is_pebs_pt(leader) + 1) in collect_events()
1186 return -EINVAL; in collect_events()
1188 cpuc->pebs_output = is_pebs_pt(leader) + 1; in collect_events()
1193 return -EINVAL; in collect_events()
1201 if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) in collect_events()
1205 return -EINVAL; in collect_events()
1215 struct hw_perf_event *hwc = &event->hw; in x86_assign_hw_event()
1218 idx = hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
1219 hwc->last_cpu = smp_processor_id(); in x86_assign_hw_event()
1220 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
1224 switch (hwc->idx) { in x86_assign_hw_event()
1227 hwc->config_base = 0; in x86_assign_hw_event()
1228 hwc->event_base = 0; in x86_assign_hw_event()
1235 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1: in x86_assign_hw_event()
1236 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; in x86_assign_hw_event()
1237 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + in x86_assign_hw_event()
1238 (idx - INTEL_PMC_IDX_FIXED); in x86_assign_hw_event()
1239 hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | in x86_assign_hw_event()
1244 hwc->config_base = x86_pmu_config_addr(hwc->idx); in x86_assign_hw_event()
1245 hwc->event_base = x86_pmu_event_addr(hwc->idx); in x86_assign_hw_event()
1246 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); in x86_assign_hw_event()
1252 * x86_perf_rdpmc_index - Return PMC counter used for event
1269 return event->hw.event_base_rdpmc; in x86_perf_rdpmc_index()
1276 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
1277 hwc->last_cpu == smp_processor_id() && in match_prev_assignment()
1278 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
1288 int i, added = cpuc->n_added; in x86_pmu_enable()
1293 if (cpuc->enabled) in x86_pmu_enable()
1296 if (cpuc->n_added) { in x86_pmu_enable()
1297 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1305 event = cpuc->event_list[i]; in x86_pmu_enable()
1306 hwc = &event->hw; in x86_pmu_enable()
1310 * - assigned same counter as last time in x86_pmu_enable()
1311 * - running on same CPU as last time in x86_pmu_enable()
1312 * - no other event has used the counter since in x86_pmu_enable()
1314 if (hwc->idx == -1 || in x86_pmu_enable()
1322 if (hwc->state & PERF_HES_STOPPED) in x86_pmu_enable()
1323 hwc->state |= PERF_HES_ARCH; in x86_pmu_enable()
1331 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1332 event = cpuc->event_list[i]; in x86_pmu_enable()
1333 hwc = &event->hw; in x86_pmu_enable()
1340 if (hwc->state & PERF_HES_ARCH) in x86_pmu_enable()
1344 * if cpuc->enabled = 0, then no wrmsr as in x86_pmu_enable()
1349 cpuc->n_added = 0; in x86_pmu_enable()
1353 cpuc->enabled = 1; in x86_pmu_enable()
1362 * Set the next IRQ period, based on the hwc->period_left value.
1367 struct hw_perf_event *hwc = &event->hw; in x86_perf_event_set_period()
1368 s64 left = local64_read(&hwc->period_left); in x86_perf_event_set_period()
1369 s64 period = hwc->sample_period; in x86_perf_event_set_period()
1370 int ret = 0, idx = hwc->idx; in x86_perf_event_set_period()
1372 if (unlikely(!hwc->event_base)) in x86_perf_event_set_period()
1378 if (unlikely(left <= -period)) { in x86_perf_event_set_period()
1380 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1381 hwc->last_period = period; in x86_perf_event_set_period()
1387 local64_set(&hwc->period_left, left); in x86_perf_event_set_period()
1388 hwc->last_period = period; in x86_perf_event_set_period()
1392 * Quirk: certain CPUs dont like it if just 1 hw_event is left: in x86_perf_event_set_period()
1408 local64_set(&hwc->prev_count, (u64)-left); in x86_perf_event_set_period()
1410 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1414 * we currently declare a 48-bit counter width in x86_perf_event_set_period()
1427 __x86_pmu_enable_event(&event->hw, in x86_pmu_enable_event()
1444 hwc = &event->hw; in x86_pmu_add()
1446 n0 = cpuc->n_events; in x86_pmu_add()
1451 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in x86_pmu_add()
1453 hwc->state |= PERF_HES_ARCH; in x86_pmu_add()
1458 * at commit time (->commit_txn) as a whole. in x86_pmu_add()
1460 * If commit fails, we'll call ->del() on all events in x86_pmu_add()
1461 * for which ->add() was called. in x86_pmu_add()
1463 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_add()
1473 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1480 cpuc->n_events = n; in x86_pmu_add()
1481 cpuc->n_added += n - n0; in x86_pmu_add()
1482 cpuc->n_txn += n - n0; in x86_pmu_add()
1498 int idx = event->hw.idx; in x86_pmu_start()
1500 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in x86_pmu_start()
1503 if (WARN_ON_ONCE(idx == -1)) in x86_pmu_start()
1507 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in x86_pmu_start()
1511 event->hw.state = 0; in x86_pmu_start()
1513 cpuc->events[idx] = event; in x86_pmu_start()
1514 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1525 int num_counters = hybrid(cpuc->pmu, num_counters); in perf_event_print_debug()
1526 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); in perf_event_print_debug()
1527 struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); in perf_event_print_debug()
1556 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1564 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", in perf_event_print_debug()
1566 pr_info("CPU#%d: gen-PMC%d count: %016llx\n", in perf_event_print_debug()
1568 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", in perf_event_print_debug()
1572 if (fixed_counter_disabled(idx, cpuc->pmu)) in perf_event_print_debug()
1576 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", in perf_event_print_debug()
1585 struct hw_perf_event *hwc = &event->hw; in x86_pmu_stop()
1587 if (test_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1589 __clear_bit(hwc->idx, cpuc->active_mask); in x86_pmu_stop()
1590 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1591 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in x86_pmu_stop()
1592 hwc->state |= PERF_HES_STOPPED; in x86_pmu_stop()
1595 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { in x86_pmu_stop()
1601 hwc->state |= PERF_HES_UPTODATE; in x86_pmu_stop()
1608 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in x86_pmu_del()
1613 * The events never got scheduled and ->cancel_txn will truncate in x86_pmu_del()
1616 * XXX assumes any ->del() called during a TXN will only be on in x86_pmu_del()
1619 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_del()
1622 __set_bit(event->hw.idx, cpuc->dirty); in x86_pmu_del()
1629 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1630 if (event == cpuc->event_list[i]) in x86_pmu_del()
1634 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1638 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1639 --cpuc->n_added; in x86_pmu_del()
1644 while (++i < cpuc->n_events) { in x86_pmu_del()
1645 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1646 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1648 cpuc->event_constraint[i-1] = NULL; in x86_pmu_del()
1649 --cpuc->n_events; in x86_pmu_del()
1685 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_handle_irq()
1688 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1691 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1702 perf_sample_data_init(&data, 0, event->hw.last_period); in x86_pmu_handle_irq()
1705 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); in x86_pmu_handle_irq()
1746 perf_sample_event_took(finish_clock - start_clock); in perf_event_nmi_handler()
1761 cpuc->kfree_on_online[i] = NULL; in x86_pmu_prepare_cpu()
1780 kfree(cpuc->kfree_on_online[i]); in x86_pmu_online_cpu()
1781 cpuc->kfree_on_online[i] = NULL; in x86_pmu_online_cpu()
1806 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); in pmu_check_apic()
1812 * events (user-space has to fall back and in pmu_check_apic()
1830 if (pmu_attr->id < x86_pmu.max_events) in events_sysfs_show()
1831 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1834 if (pmu_attr->event_str) in events_sysfs_show()
1835 return sprintf(page, "%s\n", pmu_attr->event_str); in events_sysfs_show()
1848 * Report conditional events depending on Hyper-Threading. in events_ht_sysfs_show()
1856 * has to re-read when a thread sibling gets onlined later. in events_ht_sysfs_show()
1860 pmu_attr->event_str_ht : in events_ht_sysfs_show()
1861 pmu_attr->event_str_noht); in events_ht_sysfs_show()
1874 if (hweight64(pmu_attr->pmu_type) == 1) in events_hybrid_sysfs_show()
1875 return sprintf(page, "%s", pmu_attr->event_str); in events_hybrid_sysfs_show()
1879 * event encoding, e.g., the mem-loads event on an Atom PMU has in events_hybrid_sysfs_show()
1888 str = pmu_attr->event_str; in events_hybrid_sysfs_show()
1890 if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) in events_hybrid_sysfs_show()
1892 if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { in events_hybrid_sysfs_show()
1895 return snprintf(page, next_str - str + 1, "%s", str); in events_hybrid_sysfs_show()
1907 EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1909 EVENT_ATTR(cache-references, CACHE_REFERENCES );
1910 EVENT_ATTR(cache-misses, CACHE_MISSES );
1911 EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1912 EVENT_ATTR(branch-misses, BRANCH_MISSES );
1913 EVENT_ATTR(bus-cycles, BUS_CYCLES );
1914 EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1915 EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1916 EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1948 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
2048 pr_info("... fixed-purpose events: %lu\n", in x86_pmu_show_pmu_cap()
2049 hweight64((((1ULL << num_counters_fixed) - 1) in x86_pmu_show_pmu_cap()
2077 err = -ENOTSUPP; in init_hw_perf_events()
2095 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
2096 quirk->func(); in init_hw_perf_events()
2099 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
2105 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
2166 hybrid_pmu->pmu = pmu; in init_hw_perf_events()
2167 hybrid_pmu->pmu.type = -1; in init_hw_perf_events()
2168 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2169 hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; in init_hw_perf_events()
2171 err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, in init_hw_perf_events()
2172 (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); in init_hw_perf_events()
2213 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
2220 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ in x86_pmu_start_txn()
2222 cpuc->txn_flags = txn_flags; in x86_pmu_start_txn()
2242 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_cancel_txn()
2244 txn_flags = cpuc->txn_flags; in x86_pmu_cancel_txn()
2245 cpuc->txn_flags = 0; in x86_pmu_cancel_txn()
2273 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_commit_txn()
2275 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { in x86_pmu_commit_txn()
2276 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2280 n = cpuc->n_events; in x86_pmu_commit_txn()
2283 return -EAGAIN; in x86_pmu_commit_txn()
2293 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
2295 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2300 * a fake_cpuc is used to validate event groups. Due to
2320 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2321 cpuc->is_fake = 1; in allocate_fake_cpuc()
2327 if (cpumask_empty(&h_pmu->supported_cpus)) in allocate_fake_cpuc()
2329 cpu = cpumask_first(&h_pmu->supported_cpus); in allocate_fake_cpuc()
2332 cpuc->pmu = event_pmu; in allocate_fake_cpuc()
2340 return ERR_PTR(-ENOMEM); in allocate_fake_cpuc()
2344 * validate that we can schedule this event
2352 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_event()
2358 if (!c || !c->weight) in validate_event()
2359 ret = -EINVAL; in validate_event()
2370 * validate a single event group
2373 * - check events are compatible which each other
2374 * - events do not compete for the same counter
2375 * - number of events <= number of counters
2382 struct perf_event *leader = event->group_leader; in validate_group()
2384 int ret = -EINVAL, n; in validate_group()
2394 pmu = leader->pmu; in validate_group()
2400 pmu = sibling->pmu; in validate_group()
2401 else if (pmu != sibling->pmu) in validate_group()
2406 fake_cpuc = allocate_fake_cpuc(event->pmu); in validate_group()
2419 fake_cpuc->n_events = n; in validate_group()
2424 fake_cpuc->n_events = 0; in validate_group()
2437 if ((event->attr.type != event->pmu->type) && in x86_pmu_event_init()
2438 (event->attr.type != PERF_TYPE_HARDWARE) && in x86_pmu_event_init()
2439 (event->attr.type != PERF_TYPE_HW_CACHE)) in x86_pmu_event_init()
2440 return -ENOENT; in x86_pmu_event_init()
2442 if (is_hybrid() && (event->cpu != -1)) { in x86_pmu_event_init()
2443 pmu = hybrid_pmu(event->pmu); in x86_pmu_event_init()
2444 if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus)) in x86_pmu_event_init()
2445 return -ENOENT; in x86_pmu_event_init()
2450 if (event->group_leader != event) in x86_pmu_event_init()
2456 if (event->destroy) in x86_pmu_event_init()
2457 event->destroy(event); in x86_pmu_event_init()
2458 event->destroy = NULL; in x86_pmu_event_init()
2462 !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) in x86_pmu_event_init()
2463 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in x86_pmu_event_init()
2474 for (i = 0; i < cpuc->n_events; i++) in perf_clear_dirty_counters()
2475 __clear_bit(cpuc->assign[i], cpuc->dirty); in perf_clear_dirty_counters()
2477 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) in perf_clear_dirty_counters()
2480 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { in perf_clear_dirty_counters()
2483 if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed)) in perf_clear_dirty_counters()
2486 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0); in perf_clear_dirty_counters()
2492 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); in perf_clear_dirty_counters()
2497 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_mapped()
2512 if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) in x86_pmu_event_mapped()
2518 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_unmapped()
2521 if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) in x86_pmu_event_unmapped()
2527 struct hw_perf_event *hwc = &event->hw; in x86_pmu_event_idx()
2529 if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in x86_pmu_event_idx()
2532 if (is_metric_idx(hwc->idx)) in x86_pmu_event_idx()
2535 return hwc->event_base_rdpmc + 1; in x86_pmu_event_idx()
2557 return -EINVAL; in set_attr_rdpmc()
2560 return -ENOTSUPP; in set_attr_rdpmc()
2565 * aka perf-event-bypassing mode. This path is extremely slow, in set_attr_rdpmc()
2643 return -EINVAL; in x86_pmu_check_period()
2649 return -EINVAL; in x86_pmu_check_period()
2712 userpg->cap_user_time = 0; in arch_perf_update_userpage()
2713 userpg->cap_user_time_zero = 0; in arch_perf_update_userpage()
2714 userpg->cap_user_rdpmc = in arch_perf_update_userpage()
2715 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT); in arch_perf_update_userpage()
2716 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2729 userpg->cap_user_time = 1; in arch_perf_update_userpage()
2730 userpg->time_mult = data.cyc2ns_mul; in arch_perf_update_userpage()
2731 userpg->time_shift = data.cyc2ns_shift; in arch_perf_update_userpage()
2732 userpg->time_offset = offset - now; in arch_perf_update_userpage()
2738 if (!event->attr.use_clockid) { in arch_perf_update_userpage()
2739 userpg->cap_user_time_zero = 1; in arch_perf_update_userpage()
2740 userpg->time_zero = offset; in arch_perf_update_userpage()
2752 return regs->flags & X86_EFLAGS_FIXED; in perf_hw_regs()
2766 if (perf_callchain_store(entry, regs->ip)) in perf_callchain_kernel()
2772 unwind_start(&state, current, NULL, (void *)regs->sp); in perf_callchain_kernel()
2797 ldt = READ_ONCE(current->active_mm->context.ldt); in get_segment_base()
2798 if (!ldt || idx >= ldt->nr_entries) in get_segment_base()
2801 desc = &ldt->entries[idx]; in get_segment_base()
2822 /* 32-bit process in 64-bit kernel. */ in perf_callchain_user32()
2830 cs_base = get_segment_base(regs->cs); in perf_callchain_user32()
2831 ss_base = get_segment_base(regs->ss); in perf_callchain_user32()
2833 fp = compat_ptr(ss_base + regs->bp); in perf_callchain_user32()
2835 while (entry->nr < entry->max_stack) { in perf_callchain_user32()
2839 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user32()
2841 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user32()
2872 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) in perf_callchain_user()
2875 fp = (void __user *)regs->bp; in perf_callchain_user()
2877 perf_callchain_store(entry, regs->ip); in perf_callchain_user()
2886 while (entry->nr < entry->max_stack) { in perf_callchain_user()
2890 if (__get_user(frame.next_frame, &fp->next_frame)) in perf_callchain_user()
2892 if (__get_user(frame.return_address, &fp->return_address)) in perf_callchain_user()
2904 * VM86 - the good olde 16 bit days, where the linear address is
2905 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2907 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2910 * X32 - has TIF_X32 set, but is running in x86_64
2912 * X86_64 - CS,DS,SS,ES are all zero based.
2926 if (regs->flags & X86_VM_MASK) in code_segment_base()
2927 return 0x10 * regs->cs; in code_segment_base()
2929 if (user_mode(regs) && regs->cs != __USER_CS) in code_segment_base()
2930 return get_segment_base(regs->cs); in code_segment_base()
2933 regs->cs != __USER32_CS) in code_segment_base()
2934 return get_segment_base(regs->cs); in code_segment_base()
2944 return regs->ip + code_segment_base(regs); in perf_instruction_pointer()
2964 if (regs->flags & PERF_EFLAGS_EXACT) in perf_misc_flags()
2981 * all E-cores are disabled via BIOS. When E-cores are disabled, the in perf_get_x86_pmu_capability()
2982 * base PMU holds the correct number of counters for P-cores. in perf_get_x86_pmu_capability()
2984 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2985 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2986 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2987 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2988 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2989 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2990 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()
2991 cap->pebs_ept = x86_pmu.pebs_ept; in perf_get_x86_pmu_capability()