1a9f8b16fSGleb Natapov 2a9f8b16fSGleb Natapov #include "x86/msr.h" 3a9f8b16fSGleb Natapov #include "x86/processor.h" 4a9f8b16fSGleb Natapov #include "x86/apic-defs.h" 5a9f8b16fSGleb Natapov #include "x86/apic.h" 6a9f8b16fSGleb Natapov #include "x86/desc.h" 7a9f8b16fSGleb Natapov #include "x86/isr.h" 8dcda215bSPaolo Bonzini #include "alloc.h" 9a9f8b16fSGleb Natapov 10a9f8b16fSGleb Natapov #include "libcflat.h" 11a9f8b16fSGleb Natapov #include <stdint.h> 12a9f8b16fSGleb Natapov 13a9f8b16fSGleb Natapov #define FIXED_CNT_INDEX 32 14a9f8b16fSGleb Natapov #define PC_VECTOR 32 15a9f8b16fSGleb Natapov 16a9f8b16fSGleb Natapov #define EVNSEL_EVENT_SHIFT 0 17a9f8b16fSGleb Natapov #define EVNTSEL_UMASK_SHIFT 8 18a9f8b16fSGleb Natapov #define EVNTSEL_USR_SHIFT 16 19a9f8b16fSGleb Natapov #define EVNTSEL_OS_SHIFT 17 20a9f8b16fSGleb Natapov #define EVNTSEL_EDGE_SHIFT 18 21a9f8b16fSGleb Natapov #define EVNTSEL_PC_SHIFT 19 22a9f8b16fSGleb Natapov #define EVNTSEL_INT_SHIFT 20 23a9f8b16fSGleb Natapov #define EVNTSEL_EN_SHIF 22 24a9f8b16fSGleb Natapov #define EVNTSEL_INV_SHIF 23 25a9f8b16fSGleb Natapov #define EVNTSEL_CMASK_SHIFT 24 26a9f8b16fSGleb Natapov 27a9f8b16fSGleb Natapov #define EVNTSEL_EN (1 << EVNTSEL_EN_SHIF) 28a9f8b16fSGleb Natapov #define EVNTSEL_USR (1 << EVNTSEL_USR_SHIFT) 29a9f8b16fSGleb Natapov #define EVNTSEL_OS (1 << EVNTSEL_OS_SHIFT) 30a9f8b16fSGleb Natapov #define EVNTSEL_PC (1 << EVNTSEL_PC_SHIFT) 31a9f8b16fSGleb Natapov #define EVNTSEL_INT (1 << EVNTSEL_INT_SHIFT) 32a9f8b16fSGleb Natapov #define EVNTSEL_INV (1 << EVNTSEL_INV_SHIF) 33a9f8b16fSGleb Natapov 34a9f8b16fSGleb Natapov #define N 1000000 35a9f8b16fSGleb Natapov 3620cf9147SJim Mattson #define KVM_FEP "ud2; .byte 'k', 'v', 'm';" 3720cf9147SJim Mattson // These values match the number of instructions and branches in the 3820cf9147SJim Mattson // assembly block in check_emulated_instr(). 3920cf9147SJim Mattson #define EXPECTED_INSTR 17 4020cf9147SJim Mattson #define EXPECTED_BRNCH 5 4120cf9147SJim Mattson 42a9f8b16fSGleb Natapov typedef struct { 43a9f8b16fSGleb Natapov uint32_t ctr; 44a9f8b16fSGleb Natapov uint32_t config; 45a9f8b16fSGleb Natapov uint64_t count; 46a9f8b16fSGleb Natapov int idx; 47a9f8b16fSGleb Natapov } pmu_counter_t; 48a9f8b16fSGleb Natapov 49a9f8b16fSGleb Natapov union cpuid10_eax { 50a9f8b16fSGleb Natapov struct { 51a9f8b16fSGleb Natapov unsigned int version_id:8; 52a9f8b16fSGleb Natapov unsigned int num_counters:8; 53a9f8b16fSGleb Natapov unsigned int bit_width:8; 54a9f8b16fSGleb Natapov unsigned int mask_length:8; 55a9f8b16fSGleb Natapov } split; 56a9f8b16fSGleb Natapov unsigned int full; 57a9f8b16fSGleb Natapov } eax; 58a9f8b16fSGleb Natapov 59a9f8b16fSGleb Natapov union cpuid10_ebx { 60a9f8b16fSGleb Natapov struct { 61a9f8b16fSGleb Natapov unsigned int no_unhalted_core_cycles:1; 62a9f8b16fSGleb Natapov unsigned int no_instructions_retired:1; 63a9f8b16fSGleb Natapov unsigned int no_unhalted_reference_cycles:1; 64a9f8b16fSGleb Natapov unsigned int no_llc_reference:1; 65a9f8b16fSGleb Natapov unsigned int no_llc_misses:1; 66a9f8b16fSGleb Natapov unsigned int no_branch_instruction_retired:1; 67a9f8b16fSGleb Natapov unsigned int no_branch_misses_retired:1; 68a9f8b16fSGleb Natapov } split; 69a9f8b16fSGleb Natapov unsigned int full; 70a9f8b16fSGleb Natapov } ebx; 71a9f8b16fSGleb Natapov 72a9f8b16fSGleb Natapov union cpuid10_edx { 73a9f8b16fSGleb Natapov struct { 74a9f8b16fSGleb Natapov unsigned int num_counters_fixed:5; 75a9f8b16fSGleb Natapov unsigned int bit_width_fixed:8; 76a9f8b16fSGleb Natapov unsigned int reserved:19; 77a9f8b16fSGleb Natapov } split; 78a9f8b16fSGleb Natapov unsigned int full; 79a9f8b16fSGleb Natapov } edx; 80a9f8b16fSGleb Natapov 81a9f8b16fSGleb Natapov struct pmu_event { 82797d79a2SThomas Huth const char *name; 83a9f8b16fSGleb Natapov uint32_t unit_sel; 84a9f8b16fSGleb Natapov int min; 85a9f8b16fSGleb Natapov int max; 86a9f8b16fSGleb Natapov } gp_events[] = { 87a9f8b16fSGleb Natapov {"core cycles", 0x003c, 1*N, 50*N}, 88a9f8b16fSGleb Natapov {"instructions", 0x00c0, 10*N, 10.2*N}, 89*290f4213SJim Mattson {"ref cycles", 0x013c, 1*N, 30*N}, 90*290f4213SJim Mattson {"llc references", 0x4f2e, 1, 2*N}, 91a9f8b16fSGleb Natapov {"llc misses", 0x412e, 1, 1*N}, 92a9f8b16fSGleb Natapov {"branches", 0x00c4, 1*N, 1.1*N}, 93a9f8b16fSGleb Natapov {"branch misses", 0x00c5, 0, 0.1*N}, 94a9f8b16fSGleb Natapov }, fixed_events[] = { 95a9f8b16fSGleb Natapov {"fixed 1", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N}, 96a9f8b16fSGleb Natapov {"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N}, 970ef1f6a8SPaolo Bonzini {"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N} 98a9f8b16fSGleb Natapov }; 99a9f8b16fSGleb Natapov 10022f2901aSLike Xu #define PMU_CAP_FW_WRITES (1ULL << 13) 10122f2901aSLike Xu static u64 gp_counter_base = MSR_IA32_PERFCTR0; 10222f2901aSLike Xu 1030ef1f6a8SPaolo Bonzini static int num_counters; 104a9f8b16fSGleb Natapov 105a9f8b16fSGleb Natapov char *buf; 106a9f8b16fSGleb Natapov 1077db17e21SThomas Huth static inline void loop(void) 108a9f8b16fSGleb Natapov { 109a9f8b16fSGleb Natapov unsigned long tmp, tmp2, tmp3; 110a9f8b16fSGleb Natapov 111a9f8b16fSGleb Natapov asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b" 112a9f8b16fSGleb Natapov : "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf)); 113a9f8b16fSGleb Natapov 114a9f8b16fSGleb Natapov } 115a9f8b16fSGleb Natapov 116a9f8b16fSGleb Natapov volatile uint64_t irq_received; 117a9f8b16fSGleb Natapov 118a9f8b16fSGleb Natapov static void cnt_overflow(isr_regs_t *regs) 119a9f8b16fSGleb Natapov { 120a9f8b16fSGleb Natapov irq_received++; 121a9f8b16fSGleb Natapov apic_write(APIC_EOI, 0); 122a9f8b16fSGleb Natapov } 123a9f8b16fSGleb Natapov 124a9f8b16fSGleb Natapov static bool check_irq(void) 125a9f8b16fSGleb Natapov { 126a9f8b16fSGleb Natapov int i; 127a9f8b16fSGleb Natapov irq_received = 0; 128a9f8b16fSGleb Natapov irq_enable(); 129a9f8b16fSGleb Natapov for (i = 0; i < 100000 && !irq_received; i++) 130a9f8b16fSGleb Natapov asm volatile("pause"); 131a9f8b16fSGleb Natapov irq_disable(); 132a9f8b16fSGleb Natapov return irq_received; 133a9f8b16fSGleb Natapov } 134a9f8b16fSGleb Natapov 135a9f8b16fSGleb Natapov static bool is_gp(pmu_counter_t *evt) 136a9f8b16fSGleb Natapov { 13722f2901aSLike Xu return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 || 13822f2901aSLike Xu evt->ctr >= MSR_IA32_PMC0; 139a9f8b16fSGleb Natapov } 140a9f8b16fSGleb Natapov 141a9f8b16fSGleb Natapov static int event_to_global_idx(pmu_counter_t *cnt) 142a9f8b16fSGleb Natapov { 14322f2901aSLike Xu return cnt->ctr - (is_gp(cnt) ? gp_counter_base : 144a9f8b16fSGleb Natapov (MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX)); 145a9f8b16fSGleb Natapov } 146a9f8b16fSGleb Natapov 147a9f8b16fSGleb Natapov static struct pmu_event* get_counter_event(pmu_counter_t *cnt) 148a9f8b16fSGleb Natapov { 149a9f8b16fSGleb Natapov if (is_gp(cnt)) { 150a9f8b16fSGleb Natapov int i; 151a9f8b16fSGleb Natapov 152a9f8b16fSGleb Natapov for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 153a9f8b16fSGleb Natapov if (gp_events[i].unit_sel == (cnt->config & 0xffff)) 154a9f8b16fSGleb Natapov return &gp_events[i]; 155a9f8b16fSGleb Natapov } else 156a9f8b16fSGleb Natapov return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0]; 157a9f8b16fSGleb Natapov 158a9f8b16fSGleb Natapov return (void*)0; 159a9f8b16fSGleb Natapov } 160a9f8b16fSGleb Natapov 161a9f8b16fSGleb Natapov static void global_enable(pmu_counter_t *cnt) 162a9f8b16fSGleb Natapov { 163a9f8b16fSGleb Natapov cnt->idx = event_to_global_idx(cnt); 164a9f8b16fSGleb Natapov 165a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) | 166a9f8b16fSGleb Natapov (1ull << cnt->idx)); 167a9f8b16fSGleb Natapov } 168a9f8b16fSGleb Natapov 169a9f8b16fSGleb Natapov static void global_disable(pmu_counter_t *cnt) 170a9f8b16fSGleb Natapov { 171a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) & 172a9f8b16fSGleb Natapov ~(1ull << cnt->idx)); 173a9f8b16fSGleb Natapov } 174a9f8b16fSGleb Natapov 175a9f8b16fSGleb Natapov 176a9f8b16fSGleb Natapov static void start_event(pmu_counter_t *evt) 177a9f8b16fSGleb Natapov { 178a9f8b16fSGleb Natapov wrmsr(evt->ctr, evt->count); 179a9f8b16fSGleb Natapov if (is_gp(evt)) 180a9f8b16fSGleb Natapov wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 181a9f8b16fSGleb Natapov evt->config | EVNTSEL_EN); 182a9f8b16fSGleb Natapov else { 183a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 184a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 185a9f8b16fSGleb Natapov uint32_t usrospmi = 0; 186a9f8b16fSGleb Natapov 187a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_OS) 188a9f8b16fSGleb Natapov usrospmi |= (1 << 0); 189a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_USR) 190a9f8b16fSGleb Natapov usrospmi |= (1 << 1); 191a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_INT) 192a9f8b16fSGleb Natapov usrospmi |= (1 << 3); // PMI on overflow 193a9f8b16fSGleb Natapov ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift); 194a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl); 195a9f8b16fSGleb Natapov } 196a9f8b16fSGleb Natapov global_enable(evt); 197bb6ede96SNadav Amit apic_write(APIC_LVTPC, PC_VECTOR); 198a9f8b16fSGleb Natapov } 199a9f8b16fSGleb Natapov 200a9f8b16fSGleb Natapov static void stop_event(pmu_counter_t *evt) 201a9f8b16fSGleb Natapov { 202a9f8b16fSGleb Natapov global_disable(evt); 203a9f8b16fSGleb Natapov if (is_gp(evt)) 204a9f8b16fSGleb Natapov wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 205a9f8b16fSGleb Natapov evt->config & ~EVNTSEL_EN); 206a9f8b16fSGleb Natapov else { 207a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 208a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 209a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift)); 210a9f8b16fSGleb Natapov } 211a9f8b16fSGleb Natapov evt->count = rdmsr(evt->ctr); 212a9f8b16fSGleb Natapov } 213a9f8b16fSGleb Natapov 214a9f8b16fSGleb Natapov static void measure(pmu_counter_t *evt, int count) 215a9f8b16fSGleb Natapov { 216a9f8b16fSGleb Natapov int i; 217a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 218a9f8b16fSGleb Natapov start_event(&evt[i]); 219a9f8b16fSGleb Natapov loop(); 220a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 221a9f8b16fSGleb Natapov stop_event(&evt[i]); 222a9f8b16fSGleb Natapov } 223a9f8b16fSGleb Natapov 224a9f8b16fSGleb Natapov static bool verify_event(uint64_t count, struct pmu_event *e) 225a9f8b16fSGleb Natapov { 226*290f4213SJim Mattson // printf("%d <= %ld <= %d\n", e->min, count, e->max); 227a9f8b16fSGleb Natapov return count >= e->min && count <= e->max; 228a9f8b16fSGleb Natapov 229a9f8b16fSGleb Natapov } 230a9f8b16fSGleb Natapov 231a9f8b16fSGleb Natapov static bool verify_counter(pmu_counter_t *cnt) 232a9f8b16fSGleb Natapov { 233a9f8b16fSGleb Natapov return verify_event(cnt->count, get_counter_event(cnt)); 234a9f8b16fSGleb Natapov } 235a9f8b16fSGleb Natapov 236a9f8b16fSGleb Natapov static void check_gp_counter(struct pmu_event *evt) 237a9f8b16fSGleb Natapov { 238a9f8b16fSGleb Natapov pmu_counter_t cnt = { 23922f2901aSLike Xu .ctr = gp_counter_base, 240a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel, 241a9f8b16fSGleb Natapov }; 242a9f8b16fSGleb Natapov int i; 243a9f8b16fSGleb Natapov 2440ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters; i++, cnt.ctr++) { 245a9f8b16fSGleb Natapov cnt.count = 0; 246a9f8b16fSGleb Natapov measure(&cnt, 1); 247a299895bSThomas Huth report(verify_event(cnt.count, evt), "%s-%d", evt->name, i); 248a9f8b16fSGleb Natapov } 249a9f8b16fSGleb Natapov } 250a9f8b16fSGleb Natapov 251a9f8b16fSGleb Natapov static void check_gp_counters(void) 252a9f8b16fSGleb Natapov { 253a9f8b16fSGleb Natapov int i; 254a9f8b16fSGleb Natapov 255a9f8b16fSGleb Natapov for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 256a9f8b16fSGleb Natapov if (!(ebx.full & (1 << i))) 257a9f8b16fSGleb Natapov check_gp_counter(&gp_events[i]); 258a9f8b16fSGleb Natapov else 259a9f8b16fSGleb Natapov printf("GP event '%s' is disabled\n", 260a9f8b16fSGleb Natapov gp_events[i].name); 261a9f8b16fSGleb Natapov } 262a9f8b16fSGleb Natapov 263a9f8b16fSGleb Natapov static void check_fixed_counters(void) 264a9f8b16fSGleb Natapov { 265a9f8b16fSGleb Natapov pmu_counter_t cnt = { 266a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR, 267a9f8b16fSGleb Natapov }; 268a9f8b16fSGleb Natapov int i; 269a9f8b16fSGleb Natapov 270a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 271a9f8b16fSGleb Natapov cnt.count = 0; 272a9f8b16fSGleb Natapov cnt.ctr = fixed_events[i].unit_sel; 273a9f8b16fSGleb Natapov measure(&cnt, 1); 274a299895bSThomas Huth report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", 275a299895bSThomas Huth i); 276a9f8b16fSGleb Natapov } 277a9f8b16fSGleb Natapov } 278a9f8b16fSGleb Natapov 279a9f8b16fSGleb Natapov static void check_counters_many(void) 280a9f8b16fSGleb Natapov { 281a9f8b16fSGleb Natapov pmu_counter_t cnt[10]; 282a9f8b16fSGleb Natapov int i, n; 283a9f8b16fSGleb Natapov 2840ef1f6a8SPaolo Bonzini for (i = 0, n = 0; n < num_counters; i++) { 285a9f8b16fSGleb Natapov if (ebx.full & (1 << i)) 286a9f8b16fSGleb Natapov continue; 287a9f8b16fSGleb Natapov 288a9f8b16fSGleb Natapov cnt[n].count = 0; 28922f2901aSLike Xu cnt[n].ctr = gp_counter_base + n; 2904ac45293SWei Huang cnt[n].config = EVNTSEL_OS | EVNTSEL_USR | 2914ac45293SWei Huang gp_events[i % ARRAY_SIZE(gp_events)].unit_sel; 292a9f8b16fSGleb Natapov n++; 293a9f8b16fSGleb Natapov } 294a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 295a9f8b16fSGleb Natapov cnt[n].count = 0; 296a9f8b16fSGleb Natapov cnt[n].ctr = fixed_events[i].unit_sel; 297a9f8b16fSGleb Natapov cnt[n].config = EVNTSEL_OS | EVNTSEL_USR; 298a9f8b16fSGleb Natapov n++; 299a9f8b16fSGleb Natapov } 300a9f8b16fSGleb Natapov 301a9f8b16fSGleb Natapov measure(cnt, n); 302a9f8b16fSGleb Natapov 303a9f8b16fSGleb Natapov for (i = 0; i < n; i++) 304a9f8b16fSGleb Natapov if (!verify_counter(&cnt[i])) 305a9f8b16fSGleb Natapov break; 306a9f8b16fSGleb Natapov 307a299895bSThomas Huth report(i == n, "all counters"); 308a9f8b16fSGleb Natapov } 309a9f8b16fSGleb Natapov 310a9f8b16fSGleb Natapov static void check_counter_overflow(void) 311a9f8b16fSGleb Natapov { 312a9f8b16fSGleb Natapov uint64_t count; 313a9f8b16fSGleb Natapov int i; 314a9f8b16fSGleb Natapov pmu_counter_t cnt = { 31522f2901aSLike Xu .ctr = gp_counter_base, 316a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 317a9f8b16fSGleb Natapov .count = 0, 318a9f8b16fSGleb Natapov }; 319a9f8b16fSGleb Natapov measure(&cnt, 1); 320a9f8b16fSGleb Natapov count = cnt.count; 321a9f8b16fSGleb Natapov 322a9f8b16fSGleb Natapov /* clear status before test */ 323a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 324a9f8b16fSGleb Natapov 3255bba1769SAndrew Jones report_prefix_push("overflow"); 3265bba1769SAndrew Jones 3270ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters + 1; i++, cnt.ctr++) { 328a9f8b16fSGleb Natapov uint64_t status; 329a9f8b16fSGleb Natapov int idx; 33033cfc1b0SNadav Amit 33133cfc1b0SNadav Amit cnt.count = 1 - count; 33222f2901aSLike Xu if (gp_counter_base == MSR_IA32_PMC0) 333a4ff5dceSNadav Amit cnt.count &= (1ull << eax.split.bit_width) - 1; 33433cfc1b0SNadav Amit 33533cfc1b0SNadav Amit if (i == num_counters) { 336a9f8b16fSGleb Natapov cnt.ctr = fixed_events[0].unit_sel; 337a4ff5dceSNadav Amit cnt.count &= (1ull << edx.split.bit_width_fixed) - 1; 33833cfc1b0SNadav Amit } 33933cfc1b0SNadav Amit 340a9f8b16fSGleb Natapov if (i % 2) 341a9f8b16fSGleb Natapov cnt.config |= EVNTSEL_INT; 342a9f8b16fSGleb Natapov else 343a9f8b16fSGleb Natapov cnt.config &= ~EVNTSEL_INT; 344a9f8b16fSGleb Natapov idx = event_to_global_idx(&cnt); 345a9f8b16fSGleb Natapov measure(&cnt, 1); 346a299895bSThomas Huth report(cnt.count == 1, "cntr-%d", i); 347a9f8b16fSGleb Natapov status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 348a299895bSThomas Huth report(status & (1ull << idx), "status-%d", i); 349a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status); 350a9f8b16fSGleb Natapov status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 351a299895bSThomas Huth report(!(status & (1ull << idx)), "status clear-%d", i); 352a299895bSThomas Huth report(check_irq() == (i % 2), "irq-%d", i); 353a9f8b16fSGleb Natapov } 3545bba1769SAndrew Jones 3555bba1769SAndrew Jones report_prefix_pop(); 356a9f8b16fSGleb Natapov } 357a9f8b16fSGleb Natapov 358a9f8b16fSGleb Natapov static void check_gp_counter_cmask(void) 359a9f8b16fSGleb Natapov { 360a9f8b16fSGleb Natapov pmu_counter_t cnt = { 36122f2901aSLike Xu .ctr = gp_counter_base, 362a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 363a9f8b16fSGleb Natapov .count = 0, 364a9f8b16fSGleb Natapov }; 365a9f8b16fSGleb Natapov cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT); 366a9f8b16fSGleb Natapov measure(&cnt, 1); 367a299895bSThomas Huth report(cnt.count < gp_events[1].min, "cmask"); 368a9f8b16fSGleb Natapov } 369a9f8b16fSGleb Natapov 370ca1b9de9SNadav Amit static void do_rdpmc_fast(void *ptr) 371ca1b9de9SNadav Amit { 372ca1b9de9SNadav Amit pmu_counter_t *cnt = ptr; 373ca1b9de9SNadav Amit uint32_t idx = (uint32_t)cnt->idx | (1u << 31); 374ca1b9de9SNadav Amit 375ca1b9de9SNadav Amit if (!is_gp(cnt)) 376ca1b9de9SNadav Amit idx |= 1 << 30; 377ca1b9de9SNadav Amit 378ca1b9de9SNadav Amit cnt->count = rdpmc(idx); 379ca1b9de9SNadav Amit } 380ca1b9de9SNadav Amit 381ca1b9de9SNadav Amit 382a9f8b16fSGleb Natapov static void check_rdpmc(void) 383a9f8b16fSGleb Natapov { 38422f2901aSLike Xu uint64_t val = 0xff0123456789ull; 385ca1b9de9SNadav Amit bool exc; 386a9f8b16fSGleb Natapov int i; 387a9f8b16fSGleb Natapov 3885bba1769SAndrew Jones report_prefix_push("rdpmc"); 3895bba1769SAndrew Jones 3900ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters; i++) { 39133cfc1b0SNadav Amit uint64_t x; 392ca1b9de9SNadav Amit pmu_counter_t cnt = { 39322f2901aSLike Xu .ctr = gp_counter_base + i, 394ca1b9de9SNadav Amit .idx = i 395ca1b9de9SNadav Amit }; 39633cfc1b0SNadav Amit 39733cfc1b0SNadav Amit /* 39822f2901aSLike Xu * Without full-width writes, only the low 32 bits are writable, 39922f2901aSLike Xu * and the value is sign-extended. 40033cfc1b0SNadav Amit */ 40122f2901aSLike Xu if (gp_counter_base == MSR_IA32_PERFCTR0) 40233cfc1b0SNadav Amit x = (uint64_t)(int64_t)(int32_t)val; 40322f2901aSLike Xu else 40422f2901aSLike Xu x = (uint64_t)(int64_t)val; 40533cfc1b0SNadav Amit 40633cfc1b0SNadav Amit /* Mask according to the number of supported bits */ 40733cfc1b0SNadav Amit x &= (1ull << eax.split.bit_width) - 1; 40833cfc1b0SNadav Amit 40922f2901aSLike Xu wrmsr(gp_counter_base + i, val); 410a299895bSThomas Huth report(rdpmc(i) == x, "cntr-%d", i); 411ca1b9de9SNadav Amit 412ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 413ca1b9de9SNadav Amit if (exc) 414ca1b9de9SNadav Amit report_skip("fast-%d", i); 415ca1b9de9SNadav Amit else 416a299895bSThomas Huth report(cnt.count == (u32)val, "fast-%d", i); 417a9f8b16fSGleb Natapov } 418a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 41933cfc1b0SNadav Amit uint64_t x = val & ((1ull << edx.split.bit_width_fixed) - 1); 420ca1b9de9SNadav Amit pmu_counter_t cnt = { 421ca1b9de9SNadav Amit .ctr = MSR_CORE_PERF_FIXED_CTR0 + i, 422ca1b9de9SNadav Amit .idx = i 423ca1b9de9SNadav Amit }; 42433cfc1b0SNadav Amit 42533cfc1b0SNadav Amit wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, x); 426a299895bSThomas Huth report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i); 427ca1b9de9SNadav Amit 428ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 429ca1b9de9SNadav Amit if (exc) 430ca1b9de9SNadav Amit report_skip("fixed fast-%d", i); 431ca1b9de9SNadav Amit else 432a299895bSThomas Huth report(cnt.count == (u32)x, "fixed fast-%d", i); 433a9f8b16fSGleb Natapov } 4345bba1769SAndrew Jones 4355bba1769SAndrew Jones report_prefix_pop(); 436a9f8b16fSGleb Natapov } 437a9f8b16fSGleb Natapov 438ddade902SEric Hankland static void check_running_counter_wrmsr(void) 439ddade902SEric Hankland { 44059ca1413SEric Hankland uint64_t status; 44122f2901aSLike Xu uint64_t count; 442ddade902SEric Hankland pmu_counter_t evt = { 44322f2901aSLike Xu .ctr = gp_counter_base, 444ddade902SEric Hankland .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 445ddade902SEric Hankland .count = 0, 446ddade902SEric Hankland }; 447ddade902SEric Hankland 44859ca1413SEric Hankland report_prefix_push("running counter wrmsr"); 44959ca1413SEric Hankland 450ddade902SEric Hankland start_event(&evt); 451ddade902SEric Hankland loop(); 45222f2901aSLike Xu wrmsr(gp_counter_base, 0); 453ddade902SEric Hankland stop_event(&evt); 45459ca1413SEric Hankland report(evt.count < gp_events[1].min, "cntr"); 45559ca1413SEric Hankland 45659ca1413SEric Hankland /* clear status before overflow test */ 45759ca1413SEric Hankland wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 45859ca1413SEric Hankland rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 45959ca1413SEric Hankland 46059ca1413SEric Hankland evt.count = 0; 46159ca1413SEric Hankland start_event(&evt); 46222f2901aSLike Xu 46322f2901aSLike Xu count = -1; 46422f2901aSLike Xu if (gp_counter_base == MSR_IA32_PMC0) 465a4ff5dceSNadav Amit count &= (1ull << eax.split.bit_width) - 1; 46622f2901aSLike Xu 46722f2901aSLike Xu wrmsr(gp_counter_base, count); 46822f2901aSLike Xu 46959ca1413SEric Hankland loop(); 47059ca1413SEric Hankland stop_event(&evt); 47159ca1413SEric Hankland status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 47259ca1413SEric Hankland report(status & 1, "status"); 47359ca1413SEric Hankland 47459ca1413SEric Hankland report_prefix_pop(); 475ddade902SEric Hankland } 476ddade902SEric Hankland 47720cf9147SJim Mattson static void check_emulated_instr(void) 47820cf9147SJim Mattson { 47920cf9147SJim Mattson uint64_t status, instr_start, brnch_start; 48020cf9147SJim Mattson pmu_counter_t brnch_cnt = { 48120cf9147SJim Mattson .ctr = MSR_IA32_PERFCTR0, 48220cf9147SJim Mattson /* branch instructions */ 48320cf9147SJim Mattson .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[5].unit_sel, 48420cf9147SJim Mattson .count = 0, 48520cf9147SJim Mattson }; 48620cf9147SJim Mattson pmu_counter_t instr_cnt = { 48720cf9147SJim Mattson .ctr = MSR_IA32_PERFCTR0 + 1, 48820cf9147SJim Mattson /* instructions */ 48920cf9147SJim Mattson .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 49020cf9147SJim Mattson .count = 0, 49120cf9147SJim Mattson }; 49220cf9147SJim Mattson report_prefix_push("emulated instruction"); 49320cf9147SJim Mattson 49420cf9147SJim Mattson wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 49520cf9147SJim Mattson rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 49620cf9147SJim Mattson 49720cf9147SJim Mattson start_event(&brnch_cnt); 49820cf9147SJim Mattson start_event(&instr_cnt); 49920cf9147SJim Mattson 50020cf9147SJim Mattson brnch_start = -EXPECTED_BRNCH; 50120cf9147SJim Mattson instr_start = -EXPECTED_INSTR; 50220cf9147SJim Mattson wrmsr(MSR_IA32_PERFCTR0, brnch_start); 50320cf9147SJim Mattson wrmsr(MSR_IA32_PERFCTR0 + 1, instr_start); 50420cf9147SJim Mattson // KVM_FEP is a magic prefix that forces emulation so 50520cf9147SJim Mattson // 'KVM_FEP "jne label\n"' just counts as a single instruction. 50620cf9147SJim Mattson asm volatile( 50720cf9147SJim Mattson "mov $0x0, %%eax\n" 50820cf9147SJim Mattson "cmp $0x0, %%eax\n" 50920cf9147SJim Mattson KVM_FEP "jne label\n" 51020cf9147SJim Mattson KVM_FEP "jne label\n" 51120cf9147SJim Mattson KVM_FEP "jne label\n" 51220cf9147SJim Mattson KVM_FEP "jne label\n" 51320cf9147SJim Mattson KVM_FEP "jne label\n" 51420cf9147SJim Mattson "mov $0xa, %%eax\n" 51520cf9147SJim Mattson "cpuid\n" 51620cf9147SJim Mattson "mov $0xa, %%eax\n" 51720cf9147SJim Mattson "cpuid\n" 51820cf9147SJim Mattson "mov $0xa, %%eax\n" 51920cf9147SJim Mattson "cpuid\n" 52020cf9147SJim Mattson "mov $0xa, %%eax\n" 52120cf9147SJim Mattson "cpuid\n" 52220cf9147SJim Mattson "mov $0xa, %%eax\n" 52320cf9147SJim Mattson "cpuid\n" 52420cf9147SJim Mattson "label:\n" 52520cf9147SJim Mattson : 52620cf9147SJim Mattson : 52720cf9147SJim Mattson : "eax", "ebx", "ecx", "edx"); 52820cf9147SJim Mattson 52920cf9147SJim Mattson wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); 53020cf9147SJim Mattson 53120cf9147SJim Mattson stop_event(&brnch_cnt); 53220cf9147SJim Mattson stop_event(&instr_cnt); 53320cf9147SJim Mattson 53420cf9147SJim Mattson // Check that the end count - start count is at least the expected 53520cf9147SJim Mattson // number of instructions and branches. 53620cf9147SJim Mattson report(instr_cnt.count - instr_start >= EXPECTED_INSTR, 53720cf9147SJim Mattson "instruction count"); 53820cf9147SJim Mattson report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH, 53920cf9147SJim Mattson "branch count"); 54020cf9147SJim Mattson // Additionally check that those counters overflowed properly. 54120cf9147SJim Mattson status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 54220cf9147SJim Mattson report(status & 1, "instruction counter overflow"); 54320cf9147SJim Mattson report(status & 2, "branch counter overflow"); 54420cf9147SJim Mattson 54520cf9147SJim Mattson report_prefix_pop(); 54620cf9147SJim Mattson } 54720cf9147SJim Mattson 54822f2901aSLike Xu static void check_counters(void) 54922f2901aSLike Xu { 55022f2901aSLike Xu check_gp_counters(); 55122f2901aSLike Xu check_fixed_counters(); 55222f2901aSLike Xu check_rdpmc(); 55322f2901aSLike Xu check_counters_many(); 55422f2901aSLike Xu check_counter_overflow(); 55522f2901aSLike Xu check_gp_counter_cmask(); 55622f2901aSLike Xu check_running_counter_wrmsr(); 55722f2901aSLike Xu } 55822f2901aSLike Xu 55922f2901aSLike Xu static void do_unsupported_width_counter_write(void *index) 56022f2901aSLike Xu { 56122f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull); 56222f2901aSLike Xu } 56322f2901aSLike Xu 56422f2901aSLike Xu static void check_gp_counters_write_width(void) 56522f2901aSLike Xu { 56622f2901aSLike Xu u64 val_64 = 0xffffff0123456789ull; 5674b74c718SThomas Huth u64 val_32 = val_64 & ((1ull << 32) - 1); 568a4ff5dceSNadav Amit u64 val_max_width = val_64 & ((1ull << eax.split.bit_width) - 1); 56922f2901aSLike Xu int i; 57022f2901aSLike Xu 57122f2901aSLike Xu /* 57222f2901aSLike Xu * MSR_IA32_PERFCTRn supports 64-bit writes, 57322f2901aSLike Xu * but only the lowest 32 bits are valid. 57422f2901aSLike Xu */ 57522f2901aSLike Xu for (i = 0; i < num_counters; i++) { 57622f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_32); 57722f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 57822f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 57922f2901aSLike Xu 58022f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width); 58122f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 58222f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 58322f2901aSLike Xu 58422f2901aSLike Xu wrmsr(MSR_IA32_PERFCTR0 + i, val_64); 58522f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 58622f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 58722f2901aSLike Xu } 58822f2901aSLike Xu 58922f2901aSLike Xu /* 5904340720eSLike Xu * MSR_IA32_PMCn supports writing values up to GP counter width, 59122f2901aSLike Xu * and only the lowest bits of GP counter width are valid. 59222f2901aSLike Xu */ 59322f2901aSLike Xu for (i = 0; i < num_counters; i++) { 59422f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + i, val_32); 59522f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 59622f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 59722f2901aSLike Xu 59822f2901aSLike Xu wrmsr(MSR_IA32_PMC0 + i, val_max_width); 59922f2901aSLike Xu assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width); 60022f2901aSLike Xu assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width); 60122f2901aSLike Xu 60222f2901aSLike Xu report(test_for_exception(GP_VECTOR, 60322f2901aSLike Xu do_unsupported_width_counter_write, &i), 60422f2901aSLike Xu "writing unsupported width to MSR_IA32_PMC%d raises #GP", i); 60522f2901aSLike Xu } 60622f2901aSLike Xu } 60722f2901aSLike Xu 608*290f4213SJim Mattson /* 609*290f4213SJim Mattson * Per the SDM, reference cycles are currently implemented using the 610*290f4213SJim Mattson * core crystal clock, TSC, or bus clock. Calibrate to the TSC 611*290f4213SJim Mattson * frequency to set reasonable expectations. 612*290f4213SJim Mattson */ 613*290f4213SJim Mattson static void set_ref_cycle_expectations(void) 614*290f4213SJim Mattson { 615*290f4213SJim Mattson pmu_counter_t cnt = { 616*290f4213SJim Mattson .ctr = MSR_IA32_PERFCTR0, 617*290f4213SJim Mattson .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[2].unit_sel, 618*290f4213SJim Mattson .count = 0, 619*290f4213SJim Mattson }; 620*290f4213SJim Mattson uint64_t tsc_delta; 621*290f4213SJim Mattson uint64_t t0, t1, t2, t3; 622*290f4213SJim Mattson 623*290f4213SJim Mattson if (!eax.split.num_counters || (ebx.full & (1 << 2))) 624*290f4213SJim Mattson return; 625*290f4213SJim Mattson 626*290f4213SJim Mattson wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); 627*290f4213SJim Mattson 628*290f4213SJim Mattson t0 = fenced_rdtsc(); 629*290f4213SJim Mattson start_event(&cnt); 630*290f4213SJim Mattson t1 = fenced_rdtsc(); 631*290f4213SJim Mattson 632*290f4213SJim Mattson /* 633*290f4213SJim Mattson * This loop has to run long enough to dominate the VM-exit 634*290f4213SJim Mattson * costs for playing with the PMU MSRs on start and stop. 635*290f4213SJim Mattson * 636*290f4213SJim Mattson * On a 2.6GHz Ice Lake, with the TSC frequency at 104 times 637*290f4213SJim Mattson * the core crystal clock, this function calculated a guest 638*290f4213SJim Mattson * TSC : ref cycles ratio of around 105 with ECX initialized 639*290f4213SJim Mattson * to one billion. 640*290f4213SJim Mattson */ 641*290f4213SJim Mattson asm volatile("loop ." : "+c"((int){1000000000ull})); 642*290f4213SJim Mattson 643*290f4213SJim Mattson t2 = fenced_rdtsc(); 644*290f4213SJim Mattson stop_event(&cnt); 645*290f4213SJim Mattson t3 = fenced_rdtsc(); 646*290f4213SJim Mattson 647*290f4213SJim Mattson tsc_delta = ((t2 - t1) + (t3 - t0)) / 2; 648*290f4213SJim Mattson 649*290f4213SJim Mattson if (!tsc_delta) 650*290f4213SJim Mattson return; 651*290f4213SJim Mattson 652*290f4213SJim Mattson gp_events[2].min = (gp_events[2].min * cnt.count) / tsc_delta; 653*290f4213SJim Mattson gp_events[2].max = (gp_events[2].max * cnt.count) / tsc_delta; 654*290f4213SJim Mattson } 655*290f4213SJim Mattson 656a9f8b16fSGleb Natapov int main(int ac, char **av) 657a9f8b16fSGleb Natapov { 658a9f8b16fSGleb Natapov struct cpuid id = cpuid(10); 659a9f8b16fSGleb Natapov 660a9f8b16fSGleb Natapov setup_vm(); 661a9f8b16fSGleb Natapov handle_irq(PC_VECTOR, cnt_overflow); 662dcda215bSPaolo Bonzini buf = malloc(N*64); 663a9f8b16fSGleb Natapov 664a9f8b16fSGleb Natapov eax.full = id.a; 665a9f8b16fSGleb Natapov ebx.full = id.b; 666a9f8b16fSGleb Natapov edx.full = id.d; 667a9f8b16fSGleb Natapov 668a9f8b16fSGleb Natapov if (!eax.split.version_id) { 669a9f8b16fSGleb Natapov printf("No pmu is detected!\n"); 67032b9603cSRadim Krčmář return report_summary(); 671a9f8b16fSGleb Natapov } 67270972e21SNadav Amit 67370972e21SNadav Amit if (eax.split.version_id == 1) { 67470972e21SNadav Amit printf("PMU version 1 is not supported\n"); 67570972e21SNadav Amit return report_summary(); 67670972e21SNadav Amit } 67770972e21SNadav Amit 678*290f4213SJim Mattson set_ref_cycle_expectations(); 679*290f4213SJim Mattson 680a9f8b16fSGleb Natapov printf("PMU version: %d\n", eax.split.version_id); 681a9f8b16fSGleb Natapov printf("GP counters: %d\n", eax.split.num_counters); 682a9f8b16fSGleb Natapov printf("GP counter width: %d\n", eax.split.bit_width); 683a9f8b16fSGleb Natapov printf("Mask length: %d\n", eax.split.mask_length); 684a9f8b16fSGleb Natapov printf("Fixed counters: %d\n", edx.split.num_counters_fixed); 685a9f8b16fSGleb Natapov printf("Fixed counter width: %d\n", edx.split.bit_width_fixed); 686a9f8b16fSGleb Natapov 6870ef1f6a8SPaolo Bonzini num_counters = eax.split.num_counters; 6880ef1f6a8SPaolo Bonzini 689a9f8b16fSGleb Natapov apic_write(APIC_LVTPC, PC_VECTOR); 690a9f8b16fSGleb Natapov 691afa714b2SPaolo Bonzini if (ac > 1 && !strcmp(av[1], "emulation")) { 69220cf9147SJim Mattson check_emulated_instr(); 693afa714b2SPaolo Bonzini } else { 694afa714b2SPaolo Bonzini check_counters(); 69520cf9147SJim Mattson 69622f2901aSLike Xu if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES) { 69722f2901aSLike Xu gp_counter_base = MSR_IA32_PMC0; 69822f2901aSLike Xu report_prefix_push("full-width writes"); 69922f2901aSLike Xu check_counters(); 70022f2901aSLike Xu check_gp_counters_write_width(); 70122f2901aSLike Xu } 702afa714b2SPaolo Bonzini } 703a9f8b16fSGleb Natapov 704f3cdd159SJan Kiszka return report_summary(); 705a9f8b16fSGleb Natapov } 706