| /linux/arch/x86/events/intel/ |
| H A D | lbr.c | 131 if (pmi && x86_pmu.version >= 4) in __intel_pmu_lbr_enable() 139 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask; in __intel_pmu_lbr_enable() 169 for (i = 0; i < x86_pmu.lbr_nr; i++) in intel_pmu_lbr_reset_32() 170 wrmsrq(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32() 177 for (i = 0; i < x86_pmu.lbr_nr; i++) { in intel_pmu_lbr_reset_64() 178 wrmsrq(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64() 179 wrmsrq(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64() 180 if (x86_pmu.lbr_has_info) in intel_pmu_lbr_reset_64() 181 wrmsrq(x86_pmu.lbr_info + i, 0); in intel_pmu_lbr_reset_64() 188 wrmsrq(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr); in intel_pmu_arch_lbr_reset() [all …]
|
| H A D | core.c | 2564 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); in __intel_pmu_snapshot_branch_stack() 2784 x86_pmu.addr_offset(idx, false); in __intel_pmu_update_event_ext() 2787 x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); in __intel_pmu_update_event_ext() 2889 if (left == x86_pmu.max_period) { in icl_set_topdown_event_period() 3056 x86_pmu.num_topdown_events - 1, in icl_update_topdown_event() 3137 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) in intel_pmu_enable_fixed() 3142 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) in intel_pmu_enable_fixed() 3161 msr_offset = x86_pmu.addr_offset(idx, false); in intel_pmu_config_acr() 3165 msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); in intel_pmu_config_acr() 3281 if (x86_pmu.attr_rdpmc == X86_USER_RDPMC_ALWAYS_ENABLE || in intel_pmu_update_rdpmc_user_disable() [all …]
|
| H A D | ds.c | 184 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; in intel_pmu_pebs_data_source_adl() 188 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; in intel_pmu_pebs_data_source_adl() 207 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; in intel_pmu_pebs_data_source_mtl() 211 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; in intel_pmu_pebs_data_source_mtl() 222 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX].pebs_data_source; in intel_pmu_pebs_data_source_arl_h() 258 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source; in intel_pmu_pebs_data_source_lnl() 261 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source; in intel_pmu_pebs_data_source_lnl() 655 if (x86_pmu.pebs_no_tlb) { in load_latency_data() 665 if (!x86_pmu.pebs_block) { in load_latency_data() 851 size_t bsiz = x86_pmu.pebs_buffer_size; in alloc_pebs_buffer() [all …]
|
| H A D | p4.c | 924 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in p4_pmu_disable_all() 1003 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in p4_pmu_enable_all() 1028 wrmsrq(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in p4_pmu_set_period() 1045 for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in p4_pmu_handle_irq() 1064 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) in p4_pmu_handle_irq() 1336 static __initconst const struct x86_pmu p4_pmu = { 1388 x86_pmu = p4_pmu; in p4_pmu_init() 1399 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in p4_pmu_init()
|
| H A D | knc.c | 292 static const struct x86_pmu knc_pmu __initconst = { 318 x86_pmu = knc_pmu; in knc_pmu_init()
|
| H A D | bts.c | 606 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); in bts_init() 607 if (!x86_pmu.bts) in bts_init()
|
| /linux/arch/x86/events/ |
| H A D | core.c | 52 struct x86_pmu x86_pmu __read_mostly; 70 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq); 71 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all); 72 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all); 73 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable); 74 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable); 76 DEFINE_STATIC_CALL_NULL(x86_pmu_assign, *x86_pmu.assign); 78 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add); 79 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del); 80 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read); [all …]
|
| H A D | perf_event.h | 790 typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \ 810 bool __Fp = x86_pmu._field; \ 821 struct x86_pmu { struct 1094 __quirk.next = x86_pmu.quirks; \ 1095 x86_pmu.quirks = &__quirk; \ 1157 extern struct x86_pmu x86_pmu __read_mostly; 1159 DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); 1160 DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); 1161 DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); 1162 DECLARE_STATIC_CALL(x86_pmu_late_setup, *x86_pmu.late_setup); [all …]
|
| /linux/arch/x86/events/amd/ |
| H A D | core.c | 355 if (!(x86_pmu.flags & PMU_FL_PAIR)) in amd_is_pair_event_code() 364 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config); 381 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw)) in amd_core_hw_config() 410 if (has_branch_stack(event) && !x86_pmu.lbr_nr) in amd_pmu_hw_config() 437 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in __amd_put_nb_event_constraints() 549 for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) { in amd_alloc_nb() 561 if (x86_pmu.lbr_nr) in amd_pmu_cpu_reset() 564 if (x86_pmu.version < 2) in amd_pmu_cpu_reset() 589 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_prepare() 612 if (!x86_pmu.amd_nb_constraints) in amd_pmu_cpu_starting() [all …]
|
| H A D | brs.c | 62 x86_pmu.lbr_nr = 16; in amd_brs_detect() 65 x86_pmu.lbr_sel_map = NULL; in amd_brs_detect() 66 x86_pmu.lbr_sel_mask = 0; in amd_brs_detect() 86 if (!x86_pmu.lbr_nr) in amd_brs_setup_filter() 148 if (event->attr.sample_period <= x86_pmu.lbr_nr) in amd_brs_hw_config() 170 return (cfg->msroff ? cfg->msroff : x86_pmu.lbr_nr) - 1; in amd_brs_get_tos() 198 pr_cont("%d-deep BRS, ", x86_pmu.lbr_nr); in amd_brs_init() 302 if (WARN_ON_ONCE(cfg.msroff >= x86_pmu.lbr_nr)) in amd_brs_drain()
|
| H A D | lbr.c | 171 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_read() 254 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_setup_filter() 326 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_reset() 330 for (i = 0; i < x86_pmu.lbr_nr; i++) { in amd_pmu_lbr_reset() 345 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_add() 364 if (!x86_pmu.lbr_nr) in amd_pmu_lbr_del() 394 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_enable_all() 416 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_disable_all() 426 if (x86_pmu.version < 2 || !boot_cpu_has(X86_FEATURE_AMD_LBR_V2)) in amd_pmu_lbr_init() 431 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz; in amd_pmu_lbr_init() [all …]
|
| /linux/arch/x86/events/zhaoxin/ |
| H A D | core.c | 263 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); in zhaoxin_pmu_enable_all() 371 if (x86_pmu.enabled_ack) in zhaoxin_pmu_handle_irq() 427 if (x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 428 for_each_event_constraint(c, x86_pmu.event_constraints) { in zhaoxin_get_event_constraints() 459 static const struct x86_pmu zhaoxin_pmu __initconst = { 498 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { in zhaoxin_arch_events_quirk() 529 x86_pmu = zhaoxin_pmu; in zhaoxin_pmu_init() 532 x86_pmu.version = version; in zhaoxin_pmu_init() 533 x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0); in zhaoxin_pmu_init() 534 x86_pmu.cntval_bits = eax.split.bit_width; in zhaoxin_pmu_init() [all …]
|
| /linux/arch/x86/xen/ |
| H A D | pmu.c | 474 if (x86_pmu.handle_irq(®s)) in xen_pmu_irq_handler()
|