1a9f8b16fSGleb Natapov 2a9f8b16fSGleb Natapov #include "x86/msr.h" 3a9f8b16fSGleb Natapov #include "x86/processor.h" 4a9f8b16fSGleb Natapov #include "x86/apic-defs.h" 5a9f8b16fSGleb Natapov #include "x86/apic.h" 6a9f8b16fSGleb Natapov #include "x86/desc.h" 7a9f8b16fSGleb Natapov #include "x86/isr.h" 8dcda215bSPaolo Bonzini #include "alloc.h" 9a9f8b16fSGleb Natapov 10a9f8b16fSGleb Natapov #include "libcflat.h" 11a9f8b16fSGleb Natapov #include <stdint.h> 12a9f8b16fSGleb Natapov 13a9f8b16fSGleb Natapov #define FIXED_CNT_INDEX 32 14a9f8b16fSGleb Natapov #define PC_VECTOR 32 15a9f8b16fSGleb Natapov 16a9f8b16fSGleb Natapov #define EVNSEL_EVENT_SHIFT 0 17a9f8b16fSGleb Natapov #define EVNTSEL_UMASK_SHIFT 8 18a9f8b16fSGleb Natapov #define EVNTSEL_USR_SHIFT 16 19a9f8b16fSGleb Natapov #define EVNTSEL_OS_SHIFT 17 20a9f8b16fSGleb Natapov #define EVNTSEL_EDGE_SHIFT 18 21a9f8b16fSGleb Natapov #define EVNTSEL_PC_SHIFT 19 22a9f8b16fSGleb Natapov #define EVNTSEL_INT_SHIFT 20 23a9f8b16fSGleb Natapov #define EVNTSEL_EN_SHIF 22 24a9f8b16fSGleb Natapov #define EVNTSEL_INV_SHIF 23 25a9f8b16fSGleb Natapov #define EVNTSEL_CMASK_SHIFT 24 26a9f8b16fSGleb Natapov 27a9f8b16fSGleb Natapov #define EVNTSEL_EN (1 << EVNTSEL_EN_SHIF) 28a9f8b16fSGleb Natapov #define EVNTSEL_USR (1 << EVNTSEL_USR_SHIFT) 29a9f8b16fSGleb Natapov #define EVNTSEL_OS (1 << EVNTSEL_OS_SHIFT) 30a9f8b16fSGleb Natapov #define EVNTSEL_PC (1 << EVNTSEL_PC_SHIFT) 31a9f8b16fSGleb Natapov #define EVNTSEL_INT (1 << EVNTSEL_INT_SHIFT) 32a9f8b16fSGleb Natapov #define EVNTSEL_INV (1 << EVNTSEL_INV_SHIF) 33a9f8b16fSGleb Natapov 34a9f8b16fSGleb Natapov #define N 1000000 35a9f8b16fSGleb Natapov 36a9f8b16fSGleb Natapov typedef struct { 37a9f8b16fSGleb Natapov uint32_t ctr; 38a9f8b16fSGleb Natapov uint32_t config; 39a9f8b16fSGleb Natapov uint64_t count; 40a9f8b16fSGleb Natapov int idx; 41a9f8b16fSGleb Natapov } pmu_counter_t; 42a9f8b16fSGleb Natapov 43a9f8b16fSGleb Natapov union cpuid10_eax { 44a9f8b16fSGleb Natapov struct { 45a9f8b16fSGleb Natapov unsigned int version_id:8; 46a9f8b16fSGleb Natapov unsigned int num_counters:8; 47a9f8b16fSGleb Natapov unsigned int bit_width:8; 48a9f8b16fSGleb Natapov unsigned int mask_length:8; 49a9f8b16fSGleb Natapov } split; 50a9f8b16fSGleb Natapov unsigned int full; 51a9f8b16fSGleb Natapov } eax; 52a9f8b16fSGleb Natapov 53a9f8b16fSGleb Natapov union cpuid10_ebx { 54a9f8b16fSGleb Natapov struct { 55a9f8b16fSGleb Natapov unsigned int no_unhalted_core_cycles:1; 56a9f8b16fSGleb Natapov unsigned int no_instructions_retired:1; 57a9f8b16fSGleb Natapov unsigned int no_unhalted_reference_cycles:1; 58a9f8b16fSGleb Natapov unsigned int no_llc_reference:1; 59a9f8b16fSGleb Natapov unsigned int no_llc_misses:1; 60a9f8b16fSGleb Natapov unsigned int no_branch_instruction_retired:1; 61a9f8b16fSGleb Natapov unsigned int no_branch_misses_retired:1; 62a9f8b16fSGleb Natapov } split; 63a9f8b16fSGleb Natapov unsigned int full; 64a9f8b16fSGleb Natapov } ebx; 65a9f8b16fSGleb Natapov 66a9f8b16fSGleb Natapov union cpuid10_edx { 67a9f8b16fSGleb Natapov struct { 68a9f8b16fSGleb Natapov unsigned int num_counters_fixed:5; 69a9f8b16fSGleb Natapov unsigned int bit_width_fixed:8; 70a9f8b16fSGleb Natapov unsigned int reserved:19; 71a9f8b16fSGleb Natapov } split; 72a9f8b16fSGleb Natapov unsigned int full; 73a9f8b16fSGleb Natapov } edx; 74a9f8b16fSGleb Natapov 75a9f8b16fSGleb Natapov struct pmu_event { 76797d79a2SThomas Huth const char *name; 77a9f8b16fSGleb Natapov uint32_t unit_sel; 78a9f8b16fSGleb Natapov int min; 79a9f8b16fSGleb Natapov int max; 80a9f8b16fSGleb Natapov } gp_events[] = { 81a9f8b16fSGleb Natapov {"core cycles", 0x003c, 1*N, 50*N}, 82a9f8b16fSGleb Natapov {"instructions", 0x00c0, 10*N, 10.2*N}, 834779578bSGleb Natapov {"ref cycles", 0x013c, 0.1*N, 30*N}, 844779578bSGleb Natapov {"llc refference", 0x4f2e, 1, 2*N}, 85a9f8b16fSGleb Natapov {"llc misses", 0x412e, 1, 1*N}, 86a9f8b16fSGleb Natapov {"branches", 0x00c4, 1*N, 1.1*N}, 87a9f8b16fSGleb Natapov {"branch misses", 0x00c5, 0, 0.1*N}, 88a9f8b16fSGleb Natapov }, fixed_events[] = { 89a9f8b16fSGleb Natapov {"fixed 1", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N}, 90a9f8b16fSGleb Natapov {"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N}, 910ef1f6a8SPaolo Bonzini {"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N} 92a9f8b16fSGleb Natapov }; 93a9f8b16fSGleb Natapov 940ef1f6a8SPaolo Bonzini static int num_counters; 95a9f8b16fSGleb Natapov 96a9f8b16fSGleb Natapov char *buf; 97a9f8b16fSGleb Natapov 987db17e21SThomas Huth static inline void loop(void) 99a9f8b16fSGleb Natapov { 100a9f8b16fSGleb Natapov unsigned long tmp, tmp2, tmp3; 101a9f8b16fSGleb Natapov 102a9f8b16fSGleb Natapov asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b" 103a9f8b16fSGleb Natapov : "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf)); 104a9f8b16fSGleb Natapov 105a9f8b16fSGleb Natapov } 106a9f8b16fSGleb Natapov 107a9f8b16fSGleb Natapov volatile uint64_t irq_received; 108a9f8b16fSGleb Natapov 109a9f8b16fSGleb Natapov static void cnt_overflow(isr_regs_t *regs) 110a9f8b16fSGleb Natapov { 111a9f8b16fSGleb Natapov irq_received++; 112a9f8b16fSGleb Natapov apic_write(APIC_EOI, 0); 113a9f8b16fSGleb Natapov } 114a9f8b16fSGleb Natapov 115a9f8b16fSGleb Natapov static bool check_irq(void) 116a9f8b16fSGleb Natapov { 117a9f8b16fSGleb Natapov int i; 118a9f8b16fSGleb Natapov irq_received = 0; 119a9f8b16fSGleb Natapov irq_enable(); 120a9f8b16fSGleb Natapov for (i = 0; i < 100000 && !irq_received; i++) 121a9f8b16fSGleb Natapov asm volatile("pause"); 122a9f8b16fSGleb Natapov irq_disable(); 123a9f8b16fSGleb Natapov return irq_received; 124a9f8b16fSGleb Natapov } 125a9f8b16fSGleb Natapov 126a9f8b16fSGleb Natapov static bool is_gp(pmu_counter_t *evt) 127a9f8b16fSGleb Natapov { 128a9f8b16fSGleb Natapov return evt->ctr < MSR_CORE_PERF_FIXED_CTR0; 129a9f8b16fSGleb Natapov } 130a9f8b16fSGleb Natapov 131a9f8b16fSGleb Natapov static int event_to_global_idx(pmu_counter_t *cnt) 132a9f8b16fSGleb Natapov { 133a9f8b16fSGleb Natapov return cnt->ctr - (is_gp(cnt) ? MSR_IA32_PERFCTR0 : 134a9f8b16fSGleb Natapov (MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX)); 135a9f8b16fSGleb Natapov } 136a9f8b16fSGleb Natapov 137a9f8b16fSGleb Natapov static struct pmu_event* get_counter_event(pmu_counter_t *cnt) 138a9f8b16fSGleb Natapov { 139a9f8b16fSGleb Natapov if (is_gp(cnt)) { 140a9f8b16fSGleb Natapov int i; 141a9f8b16fSGleb Natapov 142a9f8b16fSGleb Natapov for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 143a9f8b16fSGleb Natapov if (gp_events[i].unit_sel == (cnt->config & 0xffff)) 144a9f8b16fSGleb Natapov return &gp_events[i]; 145a9f8b16fSGleb Natapov } else 146a9f8b16fSGleb Natapov return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0]; 147a9f8b16fSGleb Natapov 148a9f8b16fSGleb Natapov return (void*)0; 149a9f8b16fSGleb Natapov } 150a9f8b16fSGleb Natapov 151a9f8b16fSGleb Natapov static void global_enable(pmu_counter_t *cnt) 152a9f8b16fSGleb Natapov { 153a9f8b16fSGleb Natapov cnt->idx = event_to_global_idx(cnt); 154a9f8b16fSGleb Natapov 155a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) | 156a9f8b16fSGleb Natapov (1ull << cnt->idx)); 157a9f8b16fSGleb Natapov } 158a9f8b16fSGleb Natapov 159a9f8b16fSGleb Natapov static void global_disable(pmu_counter_t *cnt) 160a9f8b16fSGleb Natapov { 161a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) & 162a9f8b16fSGleb Natapov ~(1ull << cnt->idx)); 163a9f8b16fSGleb Natapov } 164a9f8b16fSGleb Natapov 165a9f8b16fSGleb Natapov 166a9f8b16fSGleb Natapov static void start_event(pmu_counter_t *evt) 167a9f8b16fSGleb Natapov { 168a9f8b16fSGleb Natapov wrmsr(evt->ctr, evt->count); 169a9f8b16fSGleb Natapov if (is_gp(evt)) 170a9f8b16fSGleb Natapov wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 171a9f8b16fSGleb Natapov evt->config | EVNTSEL_EN); 172a9f8b16fSGleb Natapov else { 173a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 174a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 175a9f8b16fSGleb Natapov uint32_t usrospmi = 0; 176a9f8b16fSGleb Natapov 177a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_OS) 178a9f8b16fSGleb Natapov usrospmi |= (1 << 0); 179a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_USR) 180a9f8b16fSGleb Natapov usrospmi |= (1 << 1); 181a9f8b16fSGleb Natapov if (evt->config & EVNTSEL_INT) 182a9f8b16fSGleb Natapov usrospmi |= (1 << 3); // PMI on overflow 183a9f8b16fSGleb Natapov ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift); 184a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl); 185a9f8b16fSGleb Natapov } 186a9f8b16fSGleb Natapov global_enable(evt); 187bb6ede96SNadav Amit apic_write(APIC_LVTPC, PC_VECTOR); 188a9f8b16fSGleb Natapov } 189a9f8b16fSGleb Natapov 190a9f8b16fSGleb Natapov static void stop_event(pmu_counter_t *evt) 191a9f8b16fSGleb Natapov { 192a9f8b16fSGleb Natapov global_disable(evt); 193a9f8b16fSGleb Natapov if (is_gp(evt)) 194a9f8b16fSGleb Natapov wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 195a9f8b16fSGleb Natapov evt->config & ~EVNTSEL_EN); 196a9f8b16fSGleb Natapov else { 197a9f8b16fSGleb Natapov uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 198a9f8b16fSGleb Natapov int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 199a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift)); 200a9f8b16fSGleb Natapov } 201a9f8b16fSGleb Natapov evt->count = rdmsr(evt->ctr); 202a9f8b16fSGleb Natapov } 203a9f8b16fSGleb Natapov 204a9f8b16fSGleb Natapov static void measure(pmu_counter_t *evt, int count) 205a9f8b16fSGleb Natapov { 206a9f8b16fSGleb Natapov int i; 207a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 208a9f8b16fSGleb Natapov start_event(&evt[i]); 209a9f8b16fSGleb Natapov loop(); 210a9f8b16fSGleb Natapov for (i = 0; i < count; i++) 211a9f8b16fSGleb Natapov stop_event(&evt[i]); 212a9f8b16fSGleb Natapov } 213a9f8b16fSGleb Natapov 214a9f8b16fSGleb Natapov static bool verify_event(uint64_t count, struct pmu_event *e) 215a9f8b16fSGleb Natapov { 216a9f8b16fSGleb Natapov // printf("%lld >= %lld <= %lld\n", e->min, count, e->max); 217a9f8b16fSGleb Natapov return count >= e->min && count <= e->max; 218a9f8b16fSGleb Natapov 219a9f8b16fSGleb Natapov } 220a9f8b16fSGleb Natapov 221a9f8b16fSGleb Natapov static bool verify_counter(pmu_counter_t *cnt) 222a9f8b16fSGleb Natapov { 223a9f8b16fSGleb Natapov return verify_event(cnt->count, get_counter_event(cnt)); 224a9f8b16fSGleb Natapov } 225a9f8b16fSGleb Natapov 226a9f8b16fSGleb Natapov static void check_gp_counter(struct pmu_event *evt) 227a9f8b16fSGleb Natapov { 228a9f8b16fSGleb Natapov pmu_counter_t cnt = { 229a9f8b16fSGleb Natapov .ctr = MSR_IA32_PERFCTR0, 230a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel, 231a9f8b16fSGleb Natapov }; 232a9f8b16fSGleb Natapov int i; 233a9f8b16fSGleb Natapov 2340ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters; i++, cnt.ctr++) { 235a9f8b16fSGleb Natapov cnt.count = 0; 236a9f8b16fSGleb Natapov measure(&cnt, 1); 237a299895bSThomas Huth report(verify_event(cnt.count, evt), "%s-%d", evt->name, i); 238a9f8b16fSGleb Natapov } 239a9f8b16fSGleb Natapov } 240a9f8b16fSGleb Natapov 241a9f8b16fSGleb Natapov static void check_gp_counters(void) 242a9f8b16fSGleb Natapov { 243a9f8b16fSGleb Natapov int i; 244a9f8b16fSGleb Natapov 245a9f8b16fSGleb Natapov for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 246a9f8b16fSGleb Natapov if (!(ebx.full & (1 << i))) 247a9f8b16fSGleb Natapov check_gp_counter(&gp_events[i]); 248a9f8b16fSGleb Natapov else 249a9f8b16fSGleb Natapov printf("GP event '%s' is disabled\n", 250a9f8b16fSGleb Natapov gp_events[i].name); 251a9f8b16fSGleb Natapov } 252a9f8b16fSGleb Natapov 253a9f8b16fSGleb Natapov static void check_fixed_counters(void) 254a9f8b16fSGleb Natapov { 255a9f8b16fSGleb Natapov pmu_counter_t cnt = { 256a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR, 257a9f8b16fSGleb Natapov }; 258a9f8b16fSGleb Natapov int i; 259a9f8b16fSGleb Natapov 260a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 261a9f8b16fSGleb Natapov cnt.count = 0; 262a9f8b16fSGleb Natapov cnt.ctr = fixed_events[i].unit_sel; 263a9f8b16fSGleb Natapov measure(&cnt, 1); 264a299895bSThomas Huth report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", 265a299895bSThomas Huth i); 266a9f8b16fSGleb Natapov } 267a9f8b16fSGleb Natapov } 268a9f8b16fSGleb Natapov 269a9f8b16fSGleb Natapov static void check_counters_many(void) 270a9f8b16fSGleb Natapov { 271a9f8b16fSGleb Natapov pmu_counter_t cnt[10]; 272a9f8b16fSGleb Natapov int i, n; 273a9f8b16fSGleb Natapov 2740ef1f6a8SPaolo Bonzini for (i = 0, n = 0; n < num_counters; i++) { 275a9f8b16fSGleb Natapov if (ebx.full & (1 << i)) 276a9f8b16fSGleb Natapov continue; 277a9f8b16fSGleb Natapov 278a9f8b16fSGleb Natapov cnt[n].count = 0; 279a9f8b16fSGleb Natapov cnt[n].ctr = MSR_IA32_PERFCTR0 + n; 2804ac45293SWei Huang cnt[n].config = EVNTSEL_OS | EVNTSEL_USR | 2814ac45293SWei Huang gp_events[i % ARRAY_SIZE(gp_events)].unit_sel; 282a9f8b16fSGleb Natapov n++; 283a9f8b16fSGleb Natapov } 284a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 285a9f8b16fSGleb Natapov cnt[n].count = 0; 286a9f8b16fSGleb Natapov cnt[n].ctr = fixed_events[i].unit_sel; 287a9f8b16fSGleb Natapov cnt[n].config = EVNTSEL_OS | EVNTSEL_USR; 288a9f8b16fSGleb Natapov n++; 289a9f8b16fSGleb Natapov } 290a9f8b16fSGleb Natapov 291a9f8b16fSGleb Natapov measure(cnt, n); 292a9f8b16fSGleb Natapov 293a9f8b16fSGleb Natapov for (i = 0; i < n; i++) 294a9f8b16fSGleb Natapov if (!verify_counter(&cnt[i])) 295a9f8b16fSGleb Natapov break; 296a9f8b16fSGleb Natapov 297a299895bSThomas Huth report(i == n, "all counters"); 298a9f8b16fSGleb Natapov } 299a9f8b16fSGleb Natapov 300a9f8b16fSGleb Natapov static void check_counter_overflow(void) 301a9f8b16fSGleb Natapov { 302a9f8b16fSGleb Natapov uint64_t count; 303a9f8b16fSGleb Natapov int i; 304a9f8b16fSGleb Natapov pmu_counter_t cnt = { 305a9f8b16fSGleb Natapov .ctr = MSR_IA32_PERFCTR0, 306a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 307a9f8b16fSGleb Natapov .count = 0, 308a9f8b16fSGleb Natapov }; 309a9f8b16fSGleb Natapov measure(&cnt, 1); 310a9f8b16fSGleb Natapov count = cnt.count; 311a9f8b16fSGleb Natapov 312a9f8b16fSGleb Natapov /* clear status before test */ 313a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 314a9f8b16fSGleb Natapov 3155bba1769SAndrew Jones report_prefix_push("overflow"); 3165bba1769SAndrew Jones 3170ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters + 1; i++, cnt.ctr++) { 318a9f8b16fSGleb Natapov uint64_t status; 319a9f8b16fSGleb Natapov int idx; 32033cfc1b0SNadav Amit 32133cfc1b0SNadav Amit cnt.count = 1 - count; 32233cfc1b0SNadav Amit 32333cfc1b0SNadav Amit if (i == num_counters) { 324a9f8b16fSGleb Natapov cnt.ctr = fixed_events[0].unit_sel; 32533cfc1b0SNadav Amit cnt.count &= (1ul << edx.split.bit_width_fixed) - 1; 32633cfc1b0SNadav Amit } 32733cfc1b0SNadav Amit 328a9f8b16fSGleb Natapov if (i % 2) 329a9f8b16fSGleb Natapov cnt.config |= EVNTSEL_INT; 330a9f8b16fSGleb Natapov else 331a9f8b16fSGleb Natapov cnt.config &= ~EVNTSEL_INT; 332a9f8b16fSGleb Natapov idx = event_to_global_idx(&cnt); 333a9f8b16fSGleb Natapov measure(&cnt, 1); 334a299895bSThomas Huth report(cnt.count == 1, "cntr-%d", i); 335a9f8b16fSGleb Natapov status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 336a299895bSThomas Huth report(status & (1ull << idx), "status-%d", i); 337a9f8b16fSGleb Natapov wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status); 338a9f8b16fSGleb Natapov status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 339a299895bSThomas Huth report(!(status & (1ull << idx)), "status clear-%d", i); 340a299895bSThomas Huth report(check_irq() == (i % 2), "irq-%d", i); 341a9f8b16fSGleb Natapov } 3425bba1769SAndrew Jones 3435bba1769SAndrew Jones report_prefix_pop(); 344a9f8b16fSGleb Natapov } 345a9f8b16fSGleb Natapov 346a9f8b16fSGleb Natapov static void check_gp_counter_cmask(void) 347a9f8b16fSGleb Natapov { 348a9f8b16fSGleb Natapov pmu_counter_t cnt = { 349a9f8b16fSGleb Natapov .ctr = MSR_IA32_PERFCTR0, 350a9f8b16fSGleb Natapov .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 351a9f8b16fSGleb Natapov .count = 0, 352a9f8b16fSGleb Natapov }; 353a9f8b16fSGleb Natapov cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT); 354a9f8b16fSGleb Natapov measure(&cnt, 1); 355a299895bSThomas Huth report(cnt.count < gp_events[1].min, "cmask"); 356a9f8b16fSGleb Natapov } 357a9f8b16fSGleb Natapov 358ca1b9de9SNadav Amit static void do_rdpmc_fast(void *ptr) 359ca1b9de9SNadav Amit { 360ca1b9de9SNadav Amit pmu_counter_t *cnt = ptr; 361ca1b9de9SNadav Amit uint32_t idx = (uint32_t)cnt->idx | (1u << 31); 362ca1b9de9SNadav Amit 363ca1b9de9SNadav Amit if (!is_gp(cnt)) 364ca1b9de9SNadav Amit idx |= 1 << 30; 365ca1b9de9SNadav Amit 366ca1b9de9SNadav Amit cnt->count = rdpmc(idx); 367ca1b9de9SNadav Amit } 368ca1b9de9SNadav Amit 369ca1b9de9SNadav Amit 370a9f8b16fSGleb Natapov static void check_rdpmc(void) 371a9f8b16fSGleb Natapov { 372a9f8b16fSGleb Natapov uint64_t val = 0x1f3456789ull; 373ca1b9de9SNadav Amit bool exc; 374a9f8b16fSGleb Natapov int i; 375a9f8b16fSGleb Natapov 3765bba1769SAndrew Jones report_prefix_push("rdpmc"); 3775bba1769SAndrew Jones 3780ef1f6a8SPaolo Bonzini for (i = 0; i < num_counters; i++) { 37933cfc1b0SNadav Amit uint64_t x; 380ca1b9de9SNadav Amit pmu_counter_t cnt = { 381ca1b9de9SNadav Amit .ctr = MSR_IA32_PERFCTR0 + i, 382ca1b9de9SNadav Amit .idx = i 383ca1b9de9SNadav Amit }; 38433cfc1b0SNadav Amit 38533cfc1b0SNadav Amit /* 38633cfc1b0SNadav Amit * Only the low 32 bits are writable, and the value is 38733cfc1b0SNadav Amit * sign-extended. 38833cfc1b0SNadav Amit */ 38933cfc1b0SNadav Amit x = (uint64_t)(int64_t)(int32_t)val; 39033cfc1b0SNadav Amit 39133cfc1b0SNadav Amit /* Mask according to the number of supported bits */ 39233cfc1b0SNadav Amit x &= (1ull << eax.split.bit_width) - 1; 39333cfc1b0SNadav Amit 394a9f8b16fSGleb Natapov wrmsr(MSR_IA32_PERFCTR0 + i, val); 395a299895bSThomas Huth report(rdpmc(i) == x, "cntr-%d", i); 396ca1b9de9SNadav Amit 397ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 398ca1b9de9SNadav Amit if (exc) 399ca1b9de9SNadav Amit report_skip("fast-%d", i); 400ca1b9de9SNadav Amit else 401a299895bSThomas Huth report(cnt.count == (u32)val, "fast-%d", i); 402a9f8b16fSGleb Natapov } 403a9f8b16fSGleb Natapov for (i = 0; i < edx.split.num_counters_fixed; i++) { 40433cfc1b0SNadav Amit uint64_t x = val & ((1ull << edx.split.bit_width_fixed) - 1); 405ca1b9de9SNadav Amit pmu_counter_t cnt = { 406ca1b9de9SNadav Amit .ctr = MSR_CORE_PERF_FIXED_CTR0 + i, 407ca1b9de9SNadav Amit .idx = i 408ca1b9de9SNadav Amit }; 40933cfc1b0SNadav Amit 41033cfc1b0SNadav Amit wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, x); 411a299895bSThomas Huth report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i); 412ca1b9de9SNadav Amit 413ca1b9de9SNadav Amit exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 414ca1b9de9SNadav Amit if (exc) 415ca1b9de9SNadav Amit report_skip("fixed fast-%d", i); 416ca1b9de9SNadav Amit else 417a299895bSThomas Huth report(cnt.count == (u32)x, "fixed fast-%d", i); 418a9f8b16fSGleb Natapov } 4195bba1769SAndrew Jones 4205bba1769SAndrew Jones report_prefix_pop(); 421a9f8b16fSGleb Natapov } 422a9f8b16fSGleb Natapov 423ddade902SEric Hankland static void check_running_counter_wrmsr(void) 424ddade902SEric Hankland { 425*59ca1413SEric Hankland uint64_t status; 426ddade902SEric Hankland pmu_counter_t evt = { 427ddade902SEric Hankland .ctr = MSR_IA32_PERFCTR0, 428ddade902SEric Hankland .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 429ddade902SEric Hankland .count = 0, 430ddade902SEric Hankland }; 431ddade902SEric Hankland 432*59ca1413SEric Hankland report_prefix_push("running counter wrmsr"); 433*59ca1413SEric Hankland 434ddade902SEric Hankland start_event(&evt); 435ddade902SEric Hankland loop(); 436ddade902SEric Hankland wrmsr(MSR_IA32_PERFCTR0, 0); 437ddade902SEric Hankland stop_event(&evt); 438*59ca1413SEric Hankland report(evt.count < gp_events[1].min, "cntr"); 439*59ca1413SEric Hankland 440*59ca1413SEric Hankland /* clear status before overflow test */ 441*59ca1413SEric Hankland wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 442*59ca1413SEric Hankland rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 443*59ca1413SEric Hankland 444*59ca1413SEric Hankland evt.count = 0; 445*59ca1413SEric Hankland start_event(&evt); 446*59ca1413SEric Hankland wrmsr(MSR_IA32_PERFCTR0, -1); 447*59ca1413SEric Hankland loop(); 448*59ca1413SEric Hankland stop_event(&evt); 449*59ca1413SEric Hankland status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 450*59ca1413SEric Hankland report(status & 1, "status"); 451*59ca1413SEric Hankland 452*59ca1413SEric Hankland report_prefix_pop(); 453ddade902SEric Hankland } 454ddade902SEric Hankland 455a9f8b16fSGleb Natapov int main(int ac, char **av) 456a9f8b16fSGleb Natapov { 457a9f8b16fSGleb Natapov struct cpuid id = cpuid(10); 458a9f8b16fSGleb Natapov 459a9f8b16fSGleb Natapov setup_vm(); 460a9f8b16fSGleb Natapov setup_idt(); 461a9f8b16fSGleb Natapov handle_irq(PC_VECTOR, cnt_overflow); 462dcda215bSPaolo Bonzini buf = malloc(N*64); 463a9f8b16fSGleb Natapov 464a9f8b16fSGleb Natapov eax.full = id.a; 465a9f8b16fSGleb Natapov ebx.full = id.b; 466a9f8b16fSGleb Natapov edx.full = id.d; 467a9f8b16fSGleb Natapov 468a9f8b16fSGleb Natapov if (!eax.split.version_id) { 469a9f8b16fSGleb Natapov printf("No pmu is detected!\n"); 47032b9603cSRadim Krčmář return report_summary(); 471a9f8b16fSGleb Natapov } 472a9f8b16fSGleb Natapov printf("PMU version: %d\n", eax.split.version_id); 473a9f8b16fSGleb Natapov printf("GP counters: %d\n", eax.split.num_counters); 474a9f8b16fSGleb Natapov printf("GP counter width: %d\n", eax.split.bit_width); 475a9f8b16fSGleb Natapov printf("Mask length: %d\n", eax.split.mask_length); 476a9f8b16fSGleb Natapov printf("Fixed counters: %d\n", edx.split.num_counters_fixed); 477a9f8b16fSGleb Natapov printf("Fixed counter width: %d\n", edx.split.bit_width_fixed); 478a9f8b16fSGleb Natapov 4790ef1f6a8SPaolo Bonzini num_counters = eax.split.num_counters; 4800ef1f6a8SPaolo Bonzini 481a9f8b16fSGleb Natapov apic_write(APIC_LVTPC, PC_VECTOR); 482a9f8b16fSGleb Natapov 483a9f8b16fSGleb Natapov check_gp_counters(); 484a9f8b16fSGleb Natapov check_fixed_counters(); 485a9f8b16fSGleb Natapov check_rdpmc(); 486a9f8b16fSGleb Natapov check_counters_many(); 487a9f8b16fSGleb Natapov check_counter_overflow(); 488a9f8b16fSGleb Natapov check_gp_counter_cmask(); 489ddade902SEric Hankland check_running_counter_wrmsr(); 490a9f8b16fSGleb Natapov 491f3cdd159SJan Kiszka return report_summary(); 492a9f8b16fSGleb Natapov } 493