1a9f8b16fSGleb Natapov 2a9f8b16fSGleb Natapov #include "x86/msr.h" 3a9f8b16fSGleb Natapov #include "x86/processor.h" 49f17508dSLike Xu #include "x86/pmu.h" 5a9f8b16fSGleb Natapov #include "x86/apic-defs.h" 6a9f8b16fSGleb Natapov #include "x86/apic.h" 7a9f8b16fSGleb Natapov #include "x86/desc.h" 8a9f8b16fSGleb Natapov #include "x86/isr.h" 995a94088SNicholas Piggin #include "vmalloc.h" 10dcda215bSPaolo Bonzini #include "alloc.h" 11a9f8b16fSGleb Natapov 12a9f8b16fSGleb Natapov #include "libcflat.h" 13a9f8b16fSGleb Natapov #include <stdint.h> 14a9f8b16fSGleb Natapov 15a9f8b16fSGleb Natapov #define N 1000000 16a9f8b16fSGleb Natapov 1720cf9147SJim Mattson // These values match the number of instructions and branches in the 1820cf9147SJim Mattson // assembly block in check_emulated_instr(). 1920cf9147SJim Mattson #define EXPECTED_INSTR 17 2020cf9147SJim Mattson #define EXPECTED_BRNCH 5 2120cf9147SJim Mattson 22a9f8b16fSGleb Natapov typedef struct { 23a9f8b16fSGleb Natapov uint32_t ctr; 249720e46cSDapeng Mi uint32_t idx; 25006b089dSLike Xu uint64_t config; 26a9f8b16fSGleb Natapov uint64_t count; 27a9f8b16fSGleb Natapov } pmu_counter_t; 28a9f8b16fSGleb Natapov 29a9f8b16fSGleb Natapov struct pmu_event { 30797d79a2SThomas Huth const char *name; 31a9f8b16fSGleb Natapov uint32_t unit_sel; 32a9f8b16fSGleb Natapov int min; 33a9f8b16fSGleb Natapov int max; 347c648ce2SLike Xu } intel_gp_events[] = { 35a9f8b16fSGleb Natapov {"core cycles", 0x003c, 1*N, 50*N}, 36a9f8b16fSGleb Natapov {"instructions", 0x00c0, 10*N, 10.2*N}, 37290f4213SJim Mattson {"ref cycles", 0x013c, 1*N, 30*N}, 38290f4213SJim Mattson {"llc references", 0x4f2e, 1, 2*N}, 39a9f8b16fSGleb Natapov {"llc misses", 0x412e, 1, 1*N}, 40a9f8b16fSGleb Natapov {"branches", 0x00c4, 1*N, 1.1*N}, 41a9f8b16fSGleb Natapov {"branch misses", 0x00c5, 0, 0.1*N}, 42b883751aSLike Xu }, amd_gp_events[] = { 43b883751aSLike Xu {"core cycles", 0x0076, 1*N, 50*N}, 44b883751aSLike Xu {"instructions", 0x00c0, 10*N, 10.2*N}, 45b883751aSLike Xu {"branches", 0x00c2, 1*N, 1.1*N}, 46b883751aSLike Xu {"branch misses", 0x00c3, 0, 0.1*N}, 47a9f8b16fSGleb Natapov }, fixed_events[] = { 485d6a3a54SDapeng Mi {"fixed 0", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N}, 495d6a3a54SDapeng Mi {"fixed 1", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N}, 505d6a3a54SDapeng Mi {"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N} 51a9f8b16fSGleb Natapov }; 52a9f8b16fSGleb Natapov 53a9f8b16fSGleb Natapov char *buf; 54a9f8b16fSGleb Natapov 557c648ce2SLike Xu static struct pmu_event *gp_events; 567c648ce2SLike Xu static unsigned int gp_events_size; 577c648ce2SLike Xu 587db17e21SThomas Huth static inline void loop(void) 59a9f8b16fSGleb Natapov { 60a9f8b16fSGleb Natapov unsigned long tmp, tmp2, tmp3; 61a9f8b16fSGleb Natapov 62a9f8b16fSGleb Natapov asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b" 63a9f8b16fSGleb Natapov : "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf)); 64a9f8b16fSGleb Natapov 65a9f8b16fSGleb Natapov } 66a9f8b16fSGleb Natapov 67a9f8b16fSGleb Natapov volatile uint64_t irq_received; 68a9f8b16fSGleb Natapov 69a9f8b16fSGleb Natapov static void cnt_overflow(isr_regs_t *regs) 70a9f8b16fSGleb Natapov { 71a9f8b16fSGleb Natapov irq_received++; 72c595c361SMingwei Zhang apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); 73a9f8b16fSGleb Natapov apic_write(APIC_EOI, 0); 74a9f8b16fSGleb Natapov } 75a9f8b16fSGleb Natapov 76a9f8b16fSGleb Natapov static bool check_irq(void) 77a9f8b16fSGleb Natapov { 78a9f8b16fSGleb Natapov int i; 79a9f8b16fSGleb Natapov irq_received = 0; 80787f0aebSMaxim Levitsky sti(); 81a9f8b16fSGleb Natapov for (i = 0; i < 100000 && !irq_received; i++) 82a9f8b16fSGleb Natapov asm volatile("pause"); 83787f0aebSMaxim Levitsky cli(); 84a9f8b16fSGleb Natapov return irq_received; 85a9f8b16fSGleb Natapov } 86a9f8b16fSGleb Natapov 87a9f8b16fSGleb Natapov static bool is_gp(pmu_counter_t *evt) 88a9f8b16fSGleb Natapov { 89b883751aSLike Xu if (!pmu.is_intel) 90b883751aSLike Xu return true; 91b883751aSLike Xu 9222f2901aSLike Xu return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 || 9322f2901aSLike Xu evt->ctr >= MSR_IA32_PMC0; 94a9f8b16fSGleb Natapov } 95a9f8b16fSGleb Natapov 96a9f8b16fSGleb Natapov static int event_to_global_idx(pmu_counter_t *cnt) 97a9f8b16fSGleb Natapov { 98b883751aSLike Xu if (pmu.is_intel) 99cda64e80SLike Xu return cnt->ctr - (is_gp(cnt) ? pmu.msr_gp_counter_base : 100a9f8b16fSGleb Natapov (MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX)); 101b883751aSLike Xu 102b883751aSLike Xu if (pmu.msr_gp_counter_base == MSR_F15H_PERF_CTR0) 103b883751aSLike Xu return (cnt->ctr - pmu.msr_gp_counter_base) / 2; 104b883751aSLike Xu else 105b883751aSLike Xu return cnt->ctr - pmu.msr_gp_counter_base; 106a9f8b16fSGleb Natapov } 107a9f8b16fSGleb Natapov 108a9f8b16fSGleb Natapov static struct pmu_event* get_counter_event(pmu_counter_t *cnt) 109a9f8b16fSGleb Natapov { 110a9f8b16fSGleb Natapov if (is_gp(cnt)) { 111a9f8b16fSGleb Natapov int i; 112a9f8b16fSGleb Natapov 1137c648ce2SLike Xu for (i = 0; i < gp_events_size; i++) 114a9f8b16fSGleb Natapov if (gp_events[i].unit_sel == (cnt->config & 0xffff)) 115a9f8b16fSGleb Natapov return &gp_events[i]; 116a9f8b16fSGleb Natapov } else 117a9f8b16fSGleb Natapov return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0]; 118a9f8b16fSGleb Natapov 119a9f8b16fSGleb Natapov return (void*)0; 120a9f8b16fSGleb Natapov } 121a9f8b16fSGleb Natapov 122a9f8b16fSGleb Natapov static void global_enable(pmu_counter_t *cnt) 123a9f8b16fSGleb Natapov { 12462ba5036SLike Xu if (!this_cpu_has_perf_global_ctrl()) 12562ba5036SLike Xu return; 12662ba5036SLike Xu 127a9f8b16fSGleb Natapov cnt->idx = event_to_global_idx(cnt); 1288a2866d1SLike Xu wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) | BIT_ULL(cnt->idx)); 129a9f8b16fSGleb Natapov } 130a9f8b16fSGleb Natapov 131a9f8b16fSGleb Natapov static void global_disable(pmu_counter_t *cnt) 132a9f8b16fSGleb Natapov { 13362ba5036SLike Xu if (!this_cpu_has_perf_global_ctrl()) 13462ba5036SLike Xu return; 13562ba5036SLike Xu 1368a2866d1SLike Xu wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) & ~BIT_ULL(cnt->idx)); 137a9f8b16fSGleb Natapov } 138a9f8b16fSGleb Natapov 139e9e7577bSLike Xu static void __start_event(pmu_counter_t *evt, uint64_t count) 140a9f8b16fSGleb Natapov { 141e9e7577bSLike Xu evt->count = count; 142a9f8b16fSGleb Natapov wrmsr(evt->ctr, evt->count); 143cda64e80SLike Xu if (is_gp(evt)) { 144cda64e80SLike Xu wrmsr(MSR_GP_EVENT_SELECTx(event_to_global_idx(evt)), 145a9f8b16fSGleb Natapov evt->config | EVNTSEL_EN); 146cda64e80SLike Xu } else { 147a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 148a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 149a9f8b16fSGleb Natapov uint32_t usrospmi = 0; 150a9f8b16fSGleb Natapov 151a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_OS) 152a9f8b16fSGleb Natapov usrospmi |= (1 << 0); 153a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_USR) 154a9f8b16fSGleb Natapov usrospmi |= (1 << 1); 155a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_INT) 156a9f8b16fSGleb Natapov usrospmi |= (1 << 3); // PMI on overflow 157a9f8b16fSGleb Natapov ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift); 158a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl); 159a9f8b16fSGleb Natapov } 160a9f8b16fSGleb Natapov global_enable(evt); 1615a2cb3e6SLike Xu apic_write(APIC_LVTPC, PMI_VECTOR); 162a9f8b16fSGleb Natapov } 163a9f8b16fSGleb Natapov 164e9e7577bSLike Xu static void start_event(pmu_counter_t *evt) 165e9e7577bSLike Xu { 166e9e7577bSLike Xu __start_event(evt, 0); 167e9e7577bSLike Xu } 168e9e7577bSLike Xu 169a9f8b16fSGleb Natapov static void stop_event(pmu_counter_t *evt) 170a9f8b16fSGleb Natapov { 171a9f8b16fSGleb Natapov global_disable(evt); 172cda64e80SLike Xu if (is_gp(evt)) { 173cda64e80SLike Xu wrmsr(MSR_GP_EVENT_SELECTx(event_to_global_idx(evt)), 174a9f8b16fSGleb Natapov evt->config & ~EVNTSEL_EN); 175cda64e80SLike Xu } else { 176a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 177a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 178a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift)); 179a9f8b16fSGleb Natapov } 180a9f8b16fSGleb Natapov evt->count = rdmsr(evt->ctr); 181a9f8b16fSGleb Natapov } 182a9f8b16fSGleb Natapov 1838554261fSLike Xu static noinline void measure_many(pmu_counter_t *evt, int count) 184a9f8b16fSGleb Natapov { 185a9f8b16fSGleb Natapov int i; 186a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 187a9f8b16fSGleb Natapov start_event(&evt[i]); 188a9f8b16fSGleb Natapov loop(); 189a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 190a9f8b16fSGleb Natapov stop_event(&evt[i]); 191a9f8b16fSGleb Natapov } 192a9f8b16fSGleb Natapov 1938554261fSLike Xu static void measure_one(pmu_counter_t *evt) 1948554261fSLike Xu { 1958554261fSLike Xu measure_many(evt, 1); 1968554261fSLike Xu } 1978554261fSLike Xu 198e9e7577bSLike Xu static noinline void __measure(pmu_counter_t *evt, uint64_t count) 199e9e7577bSLike Xu { 200e9e7577bSLike Xu __start_event(evt, count); 201e9e7577bSLike Xu loop(); 202e9e7577bSLike Xu stop_event(evt); 203e9e7577bSLike Xu } 204e9e7577bSLike Xu 205a9f8b16fSGleb Natapov static bool verify_event(uint64_t count, struct pmu_event *e) 206a9f8b16fSGleb Natapov { 207*d24d3381SDapeng Mi bool pass = count >= e->min && count <= e->max; 208*d24d3381SDapeng Mi 209*d24d3381SDapeng Mi if (!pass) 210*d24d3381SDapeng Mi printf("FAIL: %d <= %"PRId64" <= %d\n", e->min, count, e->max); 211*d24d3381SDapeng Mi 212*d24d3381SDapeng Mi return pass; 213a9f8b16fSGleb Natapov } 214a9f8b16fSGleb Natapov 215a9f8b16fSGleb Natapov static bool verify_counter(pmu_counter_t *cnt) 216a9f8b16fSGleb Natapov { 217a9f8b16fSGleb Natapov return verify_event(cnt->count, get_counter_event(cnt)); 218a9f8b16fSGleb Natapov } 219a9f8b16fSGleb Natapov 220a9f8b16fSGleb Natapov static void check_gp_counter(struct pmu_event *evt) 221a9f8b16fSGleb Natapov { 222a9f8b16fSGleb Natapov pmu_counter_t cnt = { 223a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel, 224a9f8b16fSGleb Natapov }; 225a9f8b16fSGleb Natapov int i; 226a9f8b16fSGleb Natapov 227cda64e80SLike Xu for (i = 0; i < pmu.nr_gp_counters; i++) { 228cda64e80SLike Xu cnt.ctr = MSR_GP_COUNTERx(i); 2298554261fSLike Xu measure_one(&cnt); 230a299895bSThomas Huth report(verify_event(cnt.count, evt), "%s-%d", evt->name, i); 231a9f8b16fSGleb Natapov } 232a9f8b16fSGleb Natapov } 233a9f8b16fSGleb Natapov 234a9f8b16fSGleb Natapov static void check_gp_counters(void) 235a9f8b16fSGleb Natapov { 236a9f8b16fSGleb Natapov int i; 237a9f8b16fSGleb Natapov 2387c648ce2SLike Xu for (i = 0; i < gp_events_size; i++) 2392719b92cSYang Weijiang if (pmu_gp_counter_is_available(i)) 240a9f8b16fSGleb Natapov check_gp_counter(&gp_events[i]); 241a9f8b16fSGleb Natapov else 242a9f8b16fSGleb Natapov printf("GP event '%s' is disabled\n", 243a9f8b16fSGleb Natapov gp_events[i].name); 244a9f8b16fSGleb Natapov } 245a9f8b16fSGleb Natapov 246a9f8b16fSGleb Natapov static void check_fixed_counters(void) 247a9f8b16fSGleb Natapov { 248a9f8b16fSGleb Natapov pmu_counter_t cnt = { 249a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR, 250a9f8b16fSGleb Natapov }; 251a9f8b16fSGleb Natapov int i; 252a9f8b16fSGleb Natapov 253414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_fixed_counters; i++) { 254a9f8b16fSGleb Natapov cnt.ctr = fixed_events[i].unit_sel; 2558554261fSLike Xu measure_one(&cnt); 2562719b92cSYang Weijiang report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i); 257a9f8b16fSGleb Natapov } 258a9f8b16fSGleb Natapov } 259a9f8b16fSGleb Natapov 260a9f8b16fSGleb Natapov static void check_counters_many(void) 261a9f8b16fSGleb Natapov { 262f21c809eSDapeng Mi pmu_counter_t cnt[48]; 263a9f8b16fSGleb Natapov int i, n; 264a9f8b16fSGleb Natapov 265414ee7d1SSean Christopherson for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) { 2662719b92cSYang Weijiang if (!pmu_gp_counter_is_available(i)) 267a9f8b16fSGleb Natapov continue; 268a9f8b16fSGleb Natapov 269cda64e80SLike Xu cnt[n].ctr = MSR_GP_COUNTERx(n); 2704ac45293SWei Huang cnt[n].config = EVNTSEL_OS | EVNTSEL_USR | 2717c648ce2SLike Xu gp_events[i % gp_events_size].unit_sel; 272a9f8b16fSGleb Natapov n++; 273a9f8b16fSGleb Natapov } 274414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_fixed_counters; i++) { 275a9f8b16fSGleb Natapov cnt[n].ctr = fixed_events[i].unit_sel; 276a9f8b16fSGleb Natapov cnt[n].config = EVNTSEL_OS | EVNTSEL_USR; 277a9f8b16fSGleb Natapov n++; 278a9f8b16fSGleb Natapov } 279a9f8b16fSGleb Natapov 280f21c809eSDapeng Mi assert(n <= ARRAY_SIZE(cnt)); 2818554261fSLike Xu measure_many(cnt, n); 282a9f8b16fSGleb Natapov 283a9f8b16fSGleb Natapov for (i = 0; i < n; i++) 284a9f8b16fSGleb Natapov if (!verify_counter(&cnt[i])) 285a9f8b16fSGleb Natapov break; 286a9f8b16fSGleb Natapov 287a299895bSThomas Huth report(i == n, "all counters"); 288a9f8b16fSGleb Natapov } 289a9f8b16fSGleb Natapov 2907ec3b67aSLike Xu static uint64_t measure_for_overflow(pmu_counter_t *cnt) 2917ec3b67aSLike Xu { 2927ec3b67aSLike Xu __measure(cnt, 0); 2937ec3b67aSLike Xu /* 2947ec3b67aSLike Xu * To generate overflow, i.e. roll over to '0', the initial count just 2957ec3b67aSLike Xu * needs to be preset to the negative expected count. However, as per 2967ec3b67aSLike Xu * Intel's SDM, the preset count needs to be incremented by 1 to ensure 2977ec3b67aSLike Xu * the overflow interrupt is generated immediately instead of possibly 2987ec3b67aSLike Xu * waiting for the overflow to propagate through the counter. 2997ec3b67aSLike Xu */ 3007ec3b67aSLike Xu assert(cnt->count > 1); 3017ec3b67aSLike Xu return 1 - cnt->count; 3027ec3b67aSLike Xu } 3037ec3b67aSLike Xu 304a9f8b16fSGleb Natapov static void check_counter_overflow(void) 305a9f8b16fSGleb Natapov { 3067ec3b67aSLike Xu uint64_t overflow_preset; 307a9f8b16fSGleb Natapov int i; 308a9f8b16fSGleb Natapov pmu_counter_t cnt = { 309cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(0), 310a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 311a9f8b16fSGleb Natapov }; 3127ec3b67aSLike Xu overflow_preset = measure_for_overflow(&cnt); 313a9f8b16fSGleb Natapov 314a9f8b16fSGleb Natapov /* clear status before test */ 31562ba5036SLike Xu if (this_cpu_has_perf_global_status()) 3168a2866d1SLike Xu pmu_clear_global_status(); 317a9f8b16fSGleb Natapov 3185bba1769SAndrew Jones report_prefix_push("overflow"); 3195bba1769SAndrew Jones 320cda64e80SLike Xu for (i = 0; i < pmu.nr_gp_counters + 1; i++) { 321a9f8b16fSGleb Natapov uint64_t status; 322a9f8b16fSGleb Natapov int idx; 32333cfc1b0SNadav Amit 3247ec3b67aSLike Xu cnt.count = overflow_preset; 325cda64e80SLike Xu if (pmu_use_full_writes()) 326414ee7d1SSean Christopherson cnt.count &= (1ull << pmu.gp_counter_width) - 1; 32733cfc1b0SNadav Amit 328414ee7d1SSean Christopherson if (i == pmu.nr_gp_counters) { 329b883751aSLike Xu if (!pmu.is_intel) 330b883751aSLike Xu break; 331b883751aSLike Xu 332a9f8b16fSGleb Natapov cnt.ctr = fixed_events[0].unit_sel; 3337ec3b67aSLike Xu cnt.count = measure_for_overflow(&cnt); 334cda64e80SLike Xu cnt.count &= (1ull << pmu.gp_counter_width) - 1; 335cda64e80SLike Xu } else { 336cda64e80SLike Xu cnt.ctr = MSR_GP_COUNTERx(i); 33733cfc1b0SNadav Amit } 33833cfc1b0SNadav Amit 339a9f8b16fSGleb Natapov if (i % 2) 340a9f8b16fSGleb Natapov cnt.config |= EVNTSEL_INT; 341a9f8b16fSGleb Natapov else 342a9f8b16fSGleb Natapov cnt.config &= ~EVNTSEL_INT; 343a9f8b16fSGleb Natapov idx = event_to_global_idx(&cnt); 344e9e7577bSLike Xu __measure(&cnt, cnt.count); 345b883751aSLike Xu if (pmu.is_intel) 346a299895bSThomas Huth report(cnt.count == 1, "cntr-%d", i); 347b883751aSLike Xu else 348b883751aSLike Xu report(cnt.count == 0xffffffffffff || cnt.count < 7, "cntr-%d", i); 34962ba5036SLike Xu 35062ba5036SLike Xu if (!this_cpu_has_perf_global_status()) 35162ba5036SLike Xu continue; 35262ba5036SLike Xu 3538a2866d1SLike Xu status = rdmsr(pmu.msr_global_status); 354a299895bSThomas Huth report(status & (1ull << idx), "status-%d", i); 3558a2866d1SLike Xu wrmsr(pmu.msr_global_status_clr, status); 3568a2866d1SLike Xu status = rdmsr(pmu.msr_global_status); 357a299895bSThomas Huth report(!(status & (1ull << idx)), "status clear-%d", i); 358a299895bSThomas Huth report(check_irq() == (i % 2), "irq-%d", i); 359a9f8b16fSGleb Natapov } 3605bba1769SAndrew Jones 3615bba1769SAndrew Jones report_prefix_pop(); 362a9f8b16fSGleb Natapov } 363a9f8b16fSGleb Natapov 364a9f8b16fSGleb Natapov static void check_gp_counter_cmask(void) 365a9f8b16fSGleb Natapov { 366a9f8b16fSGleb Natapov pmu_counter_t cnt = { 367cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(0), 368a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 369a9f8b16fSGleb Natapov }; 370a9f8b16fSGleb Natapov cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT); 3718554261fSLike Xu measure_one(&cnt); 372a299895bSThomas Huth report(cnt.count < gp_events[1].min, "cmask"); 373a9f8b16fSGleb Natapov } 374a9f8b16fSGleb Natapov 375ca1b9de9SNadav Amit static void do_rdpmc_fast(void *ptr) 376ca1b9de9SNadav Amit { 377ca1b9de9SNadav Amit pmu_counter_t *cnt = ptr; 378ca1b9de9SNadav Amit uint32_t idx = (uint32_t)cnt->idx | (1u << 31); 379ca1b9de9SNadav Amit 380ca1b9de9SNadav Amit if (!is_gp(cnt)) 381ca1b9de9SNadav Amit idx |= 1 << 30; 382ca1b9de9SNadav Amit 383ca1b9de9SNadav Amit cnt->count = rdpmc(idx); 384ca1b9de9SNadav Amit } 385ca1b9de9SNadav Amit 386ca1b9de9SNadav Amit 387a9f8b16fSGleb Natapov static void check_rdpmc(void) 388a9f8b16fSGleb Natapov { 38922f2901aSLike Xu uint64_t val = 0xff0123456789ull; 390ca1b9de9SNadav Amit bool exc; 391a9f8b16fSGleb Natapov int i; 392a9f8b16fSGleb Natapov 3935bba1769SAndrew Jones report_prefix_push("rdpmc"); 3945bba1769SAndrew Jones 395414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_gp_counters; i++) { 39633cfc1b0SNadav Amit uint64_t x; 397ca1b9de9SNadav Amit pmu_counter_t cnt = { 398cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(i), 399ca1b9de9SNadav Amit .idx = i 400ca1b9de9SNadav Amit }; 40133cfc1b0SNadav Amit 40233cfc1b0SNadav Amit /* 40322f2901aSLike Xu * Without full-width writes, only the low 32 bits are writable, 40422f2901aSLike Xu * and the value is sign-extended. 40533cfc1b0SNadav Amit */ 406cda64e80SLike Xu if (pmu.msr_gp_counter_base == MSR_IA32_PERFCTR0) 40733cfc1b0SNadav Amit x = (uint64_t)(int64_t)(int32_t)val; 40822f2901aSLike Xu else 40922f2901aSLike Xu x = (uint64_t)(int64_t)val; 41033cfc1b0SNadav Amit 41133cfc1b0SNadav Amit /* Mask according to the number of supported bits */ 412414ee7d1SSean Christopherson x &= (1ull << pmu.gp_counter_width) - 1; 41333cfc1b0SNadav Amit 414cda64e80SLike Xu wrmsr(MSR_GP_COUNTERx(i), val); 415a299895bSThomas Huth report(rdpmc(i) == x, "cntr-%d", i); 416ca1b9de9SNadav Amit 417ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 418ca1b9de9SNadav Amit if (exc) 419ca1b9de9SNadav Amit report_skip("fast-%d", i); 420ca1b9de9SNadav Amit else 421a299895bSThomas Huth report(cnt.count == (u32)val, "fast-%d", i); 422a9f8b16fSGleb Natapov } 423414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_fixed_counters; i++) { 424414ee7d1SSean Christopherson uint64_t x = val & ((1ull << pmu.fixed_counter_width) - 1); 425ca1b9de9SNadav Amit pmu_counter_t cnt = { 426ca1b9de9SNadav Amit .ctr = MSR_CORE_PERF_FIXED_CTR0 + i, 427ca1b9de9SNadav Amit .idx = i 428ca1b9de9SNadav Amit }; 42933cfc1b0SNadav Amit 4303f914933SLike Xu wrmsr(MSR_PERF_FIXED_CTRx(i), x); 431a299895bSThomas Huth report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i); 432ca1b9de9SNadav Amit 433ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 434ca1b9de9SNadav Amit if (exc) 435ca1b9de9SNadav Amit report_skip("fixed fast-%d", i); 436ca1b9de9SNadav Amit else 437a299895bSThomas Huth report(cnt.count == (u32)x, "fixed fast-%d", i); 438a9f8b16fSGleb Natapov } 4395bba1769SAndrew Jones 4405bba1769SAndrew Jones report_prefix_pop(); 441a9f8b16fSGleb Natapov } 442a9f8b16fSGleb Natapov 443ddade902SEric Hankland static void check_running_counter_wrmsr(void) 444ddade902SEric Hankland { 44559ca1413SEric Hankland uint64_t status; 44622f2901aSLike Xu uint64_t count; 447ddade902SEric Hankland pmu_counter_t evt = { 448cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(0), 449ddade902SEric Hankland .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 450ddade902SEric Hankland }; 451ddade902SEric Hankland 45259ca1413SEric Hankland report_prefix_push("running counter wrmsr"); 45359ca1413SEric Hankland 454ddade902SEric Hankland start_event(&evt); 455ddade902SEric Hankland loop(); 456cda64e80SLike Xu wrmsr(MSR_GP_COUNTERx(0), 0); 457ddade902SEric Hankland stop_event(&evt); 45859ca1413SEric Hankland report(evt.count < gp_events[1].min, "cntr"); 45959ca1413SEric Hankland 46059ca1413SEric Hankland /* clear status before overflow test */ 46162ba5036SLike Xu if (this_cpu_has_perf_global_status()) 4628a2866d1SLike Xu pmu_clear_global_status(); 46359ca1413SEric Hankland 46459ca1413SEric Hankland start_event(&evt); 46522f2901aSLike Xu 46622f2901aSLike Xu count = -1; 467cda64e80SLike Xu if (pmu_use_full_writes()) 468414ee7d1SSean Christopherson count &= (1ull << pmu.gp_counter_width) - 1; 46922f2901aSLike Xu 470cda64e80SLike Xu wrmsr(MSR_GP_COUNTERx(0), count); 47122f2901aSLike Xu 47259ca1413SEric Hankland loop(); 47359ca1413SEric Hankland stop_event(&evt); 47462ba5036SLike Xu 47562ba5036SLike Xu if (this_cpu_has_perf_global_status()) { 4768a2866d1SLike Xu status = rdmsr(pmu.msr_global_status); 4778a2866d1SLike Xu report(status & 1, "status msr bit"); 47862ba5036SLike Xu } 47959ca1413SEric Hankland 48059ca1413SEric Hankland report_prefix_pop(); 481ddade902SEric Hankland } 482ddade902SEric Hankland 48320cf9147SJim Mattson static void check_emulated_instr(void) 48420cf9147SJim Mattson { 48520cf9147SJim Mattson uint64_t status, instr_start, brnch_start; 4868b547cc2SLike Xu uint64_t gp_counter_width = (1ull << pmu.gp_counter_width) - 1; 487b883751aSLike Xu unsigned int branch_idx = pmu.is_intel ? 5 : 2; 48820cf9147SJim Mattson pmu_counter_t brnch_cnt = { 489cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(0), 49020cf9147SJim Mattson /* branch instructions */ 491b883751aSLike Xu .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[branch_idx].unit_sel, 49220cf9147SJim Mattson }; 49320cf9147SJim Mattson pmu_counter_t instr_cnt = { 494cda64e80SLike Xu .ctr = MSR_GP_COUNTERx(1), 49520cf9147SJim Mattson /* instructions */ 49620cf9147SJim Mattson .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 49720cf9147SJim Mattson }; 49820cf9147SJim Mattson report_prefix_push("emulated instruction"); 49920cf9147SJim Mattson 50062ba5036SLike Xu if (this_cpu_has_perf_global_status()) 5018a2866d1SLike Xu pmu_clear_global_status(); 50220cf9147SJim Mattson 50320cf9147SJim Mattson start_event(&brnch_cnt); 50420cf9147SJim Mattson start_event(&instr_cnt); 50520cf9147SJim Mattson 50620cf9147SJim Mattson brnch_start = -EXPECTED_BRNCH; 50720cf9147SJim Mattson instr_start = -EXPECTED_INSTR; 5088b547cc2SLike Xu wrmsr(MSR_GP_COUNTERx(0), brnch_start & gp_counter_width); 5098b547cc2SLike Xu wrmsr(MSR_GP_COUNTERx(1), instr_start & gp_counter_width); 51020cf9147SJim Mattson // KVM_FEP is a magic prefix that forces emulation so 51120cf9147SJim Mattson // 'KVM_FEP "jne label\n"' just counts as a single instruction. 51220cf9147SJim Mattson asm volatile( 51320cf9147SJim Mattson "mov $0x0, %%eax\n" 51420cf9147SJim Mattson "cmp $0x0, %%eax\n" 51520cf9147SJim Mattson KVM_FEP "jne label\n" 51620cf9147SJim Mattson KVM_FEP "jne label\n" 51720cf9147SJim Mattson KVM_FEP "jne label\n" 51820cf9147SJim Mattson KVM_FEP "jne label\n" 51920cf9147SJim Mattson KVM_FEP "jne label\n" 52020cf9147SJim Mattson "mov $0xa, %%eax\n" 52120cf9147SJim Mattson "cpuid\n" 52220cf9147SJim Mattson "mov $0xa, %%eax\n" 52320cf9147SJim Mattson "cpuid\n" 52420cf9147SJim Mattson "mov $0xa, %%eax\n" 52520cf9147SJim Mattson "cpuid\n" 52620cf9147SJim Mattson "mov $0xa, %%eax\n" 52720cf9147SJim Mattson "cpuid\n" 52820cf9147SJim Mattson "mov $0xa, %%eax\n" 52920cf9147SJim Mattson "cpuid\n" 53020cf9147SJim Mattson "label:\n" 53120cf9147SJim Mattson : 53220cf9147SJim Mattson : 53320cf9147SJim Mattson : "eax", "ebx", "ecx", "edx"); 53420cf9147SJim Mattson 53562ba5036SLike Xu if (this_cpu_has_perf_global_ctrl()) 5368a2866d1SLike Xu wrmsr(pmu.msr_global_ctl, 0); 53720cf9147SJim Mattson 53820cf9147SJim Mattson stop_event(&brnch_cnt); 53920cf9147SJim Mattson stop_event(&instr_cnt); 54020cf9147SJim Mattson 54120cf9147SJim Mattson // Check that the end count - start count is at least the expected 54220cf9147SJim Mattson // number of instructions and branches. 54320cf9147SJim Mattson report(instr_cnt.count - instr_start >= EXPECTED_INSTR, 54420cf9147SJim Mattson "instruction count"); 54520cf9147SJim Mattson report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH, 54620cf9147SJim Mattson "branch count"); 54762ba5036SLike Xu if (this_cpu_has_perf_global_status()) { 54820cf9147SJim Mattson // Additionally check that those counters overflowed properly. 5498a2866d1SLike Xu status = rdmsr(pmu.msr_global_status); 5504070b9c6SLike Xu report(status & 1, "branch counter overflow"); 5514070b9c6SLike Xu report(status & 2, "instruction counter overflow"); 55262ba5036SLike Xu } 55320cf9147SJim Mattson 55420cf9147SJim Mattson report_prefix_pop(); 55520cf9147SJim Mattson } 55620cf9147SJim Mattson 557006b089dSLike Xu #define XBEGIN_STARTED (~0u) 558006b089dSLike Xu static void check_tsx_cycles(void) 559006b089dSLike Xu { 560006b089dSLike Xu pmu_counter_t cnt; 561006b089dSLike Xu unsigned int i, ret = 0; 562006b089dSLike Xu 563006b089dSLike Xu if (!this_cpu_has(X86_FEATURE_RTM)) 564006b089dSLike Xu return; 565006b089dSLike Xu 566006b089dSLike Xu report_prefix_push("TSX cycles"); 567006b089dSLike Xu 568006b089dSLike Xu for (i = 0; i < pmu.nr_gp_counters; i++) { 569006b089dSLike Xu cnt.ctr = MSR_GP_COUNTERx(i); 570006b089dSLike Xu 571006b089dSLike Xu if (i == 2) { 572d4ae0a71SThomas Huth /* Transactional cycles committed only on gp counter 2 */ 573006b089dSLike Xu cnt.config = EVNTSEL_OS | EVNTSEL_USR | 0x30000003c; 574006b089dSLike Xu } else { 575006b089dSLike Xu /* Transactional cycles */ 576006b089dSLike Xu cnt.config = EVNTSEL_OS | EVNTSEL_USR | 0x10000003c; 577006b089dSLike Xu } 578006b089dSLike Xu 579006b089dSLike Xu start_event(&cnt); 580006b089dSLike Xu 581006b089dSLike Xu asm volatile("xbegin 1f\n\t" 582006b089dSLike Xu "1:\n\t" 583006b089dSLike Xu : "+a" (ret) :: "memory"); 584006b089dSLike Xu 585006b089dSLike Xu /* Generate a non-canonical #GP to trigger ABORT. */ 586006b089dSLike Xu if (ret == XBEGIN_STARTED) 587006b089dSLike Xu *(int *)NONCANONICAL = 0; 588006b089dSLike Xu 589006b089dSLike Xu stop_event(&cnt); 590006b089dSLike Xu 591006b089dSLike Xu report(cnt.count > 0, "gp cntr-%d with a value of %" PRId64 "", i, cnt.count); 592006b089dSLike Xu } 593006b089dSLike Xu 594006b089dSLike Xu report_prefix_pop(); 595006b089dSLike Xu } 596006b089dSLike Xu 59722f2901aSLike Xu static void check_counters(void) 59822f2901aSLike Xu { 59900dca75cSLike Xu if (is_fep_available()) 60000dca75cSLike Xu check_emulated_instr(); 60100dca75cSLike Xu 60222f2901aSLike Xu check_gp_counters(); 60322f2901aSLike Xu check_fixed_counters(); 60422f2901aSLike Xu check_rdpmc(); 60522f2901aSLike Xu check_counters_many(); 60622f2901aSLike Xu check_counter_overflow(); 60722f2901aSLike Xu check_gp_counter_cmask(); 60822f2901aSLike Xu check_running_counter_wrmsr(); 609006b089dSLike Xu check_tsx_cycles(); 61022f2901aSLike Xu } 61122f2901aSLike Xu 61222f2901aSLike Xu static void do_unsupported_width_counter_write(void *index) 61322f2901aSLike Xu { 61422f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull); 61522f2901aSLike Xu } 61622f2901aSLike Xu 61722f2901aSLike Xu static void check_gp_counters_write_width(void) 61822f2901aSLike Xu { 61922f2901aSLike Xu u64 val_64 = 0xffffff0123456789ull; 6204b74c718SThomas Huth u64 val_32 = val_64 & ((1ull << 32) - 1); 621414ee7d1SSean Christopherson u64 val_max_width = val_64 & ((1ull << pmu.gp_counter_width) - 1); 62222f2901aSLike Xu int i; 62322f2901aSLike Xu 62422f2901aSLike Xu /* 62522f2901aSLike Xu * MSR_IA32_PERFCTRn supports 64-bit writes, 62622f2901aSLike Xu * but only the lowest 32 bits are valid. 62722f2901aSLike Xu */ 628414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_gp_counters; i++) { 62922f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_32); 63022f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 63122f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 63222f2901aSLike Xu 63322f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width); 63422f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 63522f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 63622f2901aSLike Xu 63722f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_64); 63822f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 63922f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 64022f2901aSLike Xu } 64122f2901aSLike Xu 64222f2901aSLike Xu /* 6434340720eSLike Xu * MSR_IA32_PMCn supports writing values up to GP counter width, 64422f2901aSLike Xu * and only the lowest bits of GP counter width are valid. 64522f2901aSLike Xu */ 646414ee7d1SSean Christopherson for (i = 0; i < pmu.nr_gp_counters; i++) { 64722f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + i, val_32); 64822f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 64922f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 65022f2901aSLike Xu 65122f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + i, val_max_width); 65222f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width); 65322f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width); 65422f2901aSLike Xu 65522f2901aSLike Xu report(test_for_exception(GP_VECTOR, 65622f2901aSLike Xu do_unsupported_width_counter_write, &i), 65722f2901aSLike Xu "writing unsupported width to MSR_IA32_PMC%d raises #GP", i); 65822f2901aSLike Xu } 65922f2901aSLike Xu } 66022f2901aSLike Xu 661290f4213SJim Mattson /* 662290f4213SJim Mattson * Per the SDM, reference cycles are currently implemented using the 663290f4213SJim Mattson * core crystal clock, TSC, or bus clock. Calibrate to the TSC 664290f4213SJim Mattson * frequency to set reasonable expectations. 665290f4213SJim Mattson */ 666290f4213SJim Mattson static void set_ref_cycle_expectations(void) 667290f4213SJim Mattson { 668290f4213SJim Mattson pmu_counter_t cnt = { 669290f4213SJim Mattson .ctr = MSR_IA32_PERFCTR0, 6707c648ce2SLike Xu .config = EVNTSEL_OS | EVNTSEL_USR | intel_gp_events[2].unit_sel, 671290f4213SJim Mattson }; 672290f4213SJim Mattson uint64_t tsc_delta; 673290f4213SJim Mattson uint64_t t0, t1, t2, t3; 674290f4213SJim Mattson 6752719b92cSYang Weijiang /* Bit 2 enumerates the availability of reference cycles events. */ 676414ee7d1SSean Christopherson if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2)) 677290f4213SJim Mattson return; 678290f4213SJim Mattson 67962ba5036SLike Xu if (this_cpu_has_perf_global_ctrl()) 6808a2866d1SLike Xu wrmsr(pmu.msr_global_ctl, 0); 681290f4213SJim Mattson 682290f4213SJim Mattson t0 = fenced_rdtsc(); 683290f4213SJim Mattson start_event(&cnt); 684290f4213SJim Mattson t1 = fenced_rdtsc(); 685290f4213SJim Mattson 686290f4213SJim Mattson /* 687290f4213SJim Mattson * This loop has to run long enough to dominate the VM-exit 688290f4213SJim Mattson * costs for playing with the PMU MSRs on start and stop. 689290f4213SJim Mattson * 690290f4213SJim Mattson * On a 2.6GHz Ice Lake, with the TSC frequency at 104 times 691290f4213SJim Mattson * the core crystal clock, this function calculated a guest 692290f4213SJim Mattson * TSC : ref cycles ratio of around 105 with ECX initialized 693290f4213SJim Mattson * to one billion. 694290f4213SJim Mattson */ 695290f4213SJim Mattson asm volatile("loop ." : "+c"((int){1000000000ull})); 696290f4213SJim Mattson 697290f4213SJim Mattson t2 = fenced_rdtsc(); 698290f4213SJim Mattson stop_event(&cnt); 699290f4213SJim Mattson t3 = fenced_rdtsc(); 700290f4213SJim Mattson 701290f4213SJim Mattson tsc_delta = ((t2 - t1) + (t3 - t0)) / 2; 702290f4213SJim Mattson 703290f4213SJim Mattson if (!tsc_delta) 704290f4213SJim Mattson return; 705290f4213SJim Mattson 7067c648ce2SLike Xu intel_gp_events[2].min = (intel_gp_events[2].min * cnt.count) / tsc_delta; 7077c648ce2SLike Xu intel_gp_events[2].max = (intel_gp_events[2].max * cnt.count) / tsc_delta; 708290f4213SJim Mattson } 709290f4213SJim Mattson 71085c21181SLike Xu static void check_invalid_rdpmc_gp(void) 71185c21181SLike Xu { 71285c21181SLike Xu uint64_t val; 71385c21181SLike Xu 71485c21181SLike Xu report(rdpmc_safe(64, &val) == GP_VECTOR, 71585c21181SLike Xu "Expected #GP on RDPMC(64)"); 71685c21181SLike Xu } 71785c21181SLike Xu 718a9f8b16fSGleb Natapov int main(int ac, char **av) 719a9f8b16fSGleb Natapov { 720a9f8b16fSGleb Natapov setup_vm(); 7215a2cb3e6SLike Xu handle_irq(PMI_VECTOR, cnt_overflow); 722dcda215bSPaolo Bonzini buf = malloc(N*64); 723a9f8b16fSGleb Natapov 72485c21181SLike Xu check_invalid_rdpmc_gp(); 72585c21181SLike Xu 726b883751aSLike Xu if (pmu.is_intel) { 727414ee7d1SSean Christopherson if (!pmu.version) { 72803041e97SLike Xu report_skip("No Intel Arch PMU is detected!"); 72932b9603cSRadim Krčmář return report_summary(); 730a9f8b16fSGleb Natapov } 7317c648ce2SLike Xu gp_events = (struct pmu_event *)intel_gp_events; 7327c648ce2SLike Xu gp_events_size = sizeof(intel_gp_events)/sizeof(intel_gp_events[0]); 733b883751aSLike Xu report_prefix_push("Intel"); 734290f4213SJim Mattson set_ref_cycle_expectations(); 735b883751aSLike Xu } else { 736b883751aSLike Xu gp_events_size = sizeof(amd_gp_events)/sizeof(amd_gp_events[0]); 737b883751aSLike Xu gp_events = (struct pmu_event *)amd_gp_events; 738b883751aSLike Xu report_prefix_push("AMD"); 739b883751aSLike Xu } 740290f4213SJim Mattson 741414ee7d1SSean Christopherson printf("PMU version: %d\n", pmu.version); 742414ee7d1SSean Christopherson printf("GP counters: %d\n", pmu.nr_gp_counters); 743414ee7d1SSean Christopherson printf("GP counter width: %d\n", pmu.gp_counter_width); 744414ee7d1SSean Christopherson printf("Mask length: %d\n", pmu.gp_counter_mask_length); 745414ee7d1SSean Christopherson printf("Fixed counters: %d\n", pmu.nr_fixed_counters); 746414ee7d1SSean Christopherson printf("Fixed counter width: %d\n", pmu.fixed_counter_width); 7470ef1f6a8SPaolo Bonzini 7485a2cb3e6SLike Xu apic_write(APIC_LVTPC, PMI_VECTOR); 749a9f8b16fSGleb Natapov 750afa714b2SPaolo Bonzini check_counters(); 75120cf9147SJim Mattson 752879e7f07SLike Xu if (pmu_has_full_writes()) { 753cda64e80SLike Xu pmu.msr_gp_counter_base = MSR_IA32_PMC0; 754cda64e80SLike Xu 75522f2901aSLike Xu report_prefix_push("full-width writes"); 75622f2901aSLike Xu check_counters(); 75722f2901aSLike Xu check_gp_counters_write_width(); 758d7714e16SLike Xu report_prefix_pop(); 75922f2901aSLike Xu } 760a9f8b16fSGleb Natapov 761b883751aSLike Xu if (!pmu.is_intel) { 762b883751aSLike Xu report_prefix_push("K7"); 763b883751aSLike Xu pmu.nr_gp_counters = AMD64_NUM_COUNTERS; 764b883751aSLike Xu pmu.msr_gp_counter_base = MSR_K7_PERFCTR0; 765b883751aSLike Xu pmu.msr_gp_event_select_base = MSR_K7_EVNTSEL0; 766b883751aSLike Xu check_counters(); 767b883751aSLike Xu report_prefix_pop(); 768b883751aSLike Xu } 769b883751aSLike Xu 770f3cdd159SJan Kiszka return report_summary(); 771a9f8b16fSGleb Natapov } 772