1 2 #include "x86/msr.h" 3 #include "x86/processor.h" 4 #include "x86/apic-defs.h" 5 #include "x86/apic.h" 6 #include "x86/desc.h" 7 #include "x86/isr.h" 8 #include "alloc.h" 9 10 #include "libcflat.h" 11 #include <stdint.h> 12 13 #define FIXED_CNT_INDEX 32 14 #define PC_VECTOR 32 15 16 #define EVNSEL_EVENT_SHIFT 0 17 #define EVNTSEL_UMASK_SHIFT 8 18 #define EVNTSEL_USR_SHIFT 16 19 #define EVNTSEL_OS_SHIFT 17 20 #define EVNTSEL_EDGE_SHIFT 18 21 #define EVNTSEL_PC_SHIFT 19 22 #define EVNTSEL_INT_SHIFT 20 23 #define EVNTSEL_EN_SHIF 22 24 #define EVNTSEL_INV_SHIF 23 25 #define EVNTSEL_CMASK_SHIFT 24 26 27 #define EVNTSEL_EN (1 << EVNTSEL_EN_SHIF) 28 #define EVNTSEL_USR (1 << EVNTSEL_USR_SHIFT) 29 #define EVNTSEL_OS (1 << EVNTSEL_OS_SHIFT) 30 #define EVNTSEL_PC (1 << EVNTSEL_PC_SHIFT) 31 #define EVNTSEL_INT (1 << EVNTSEL_INT_SHIFT) 32 #define EVNTSEL_INV (1 << EVNTSEL_INV_SHIF) 33 34 #define N 1000000 35 36 // These values match the number of instructions and branches in the 37 // assembly block in check_emulated_instr(). 38 #define EXPECTED_INSTR 17 39 #define EXPECTED_BRNCH 5 40 41 typedef struct { 42 uint32_t ctr; 43 uint32_t config; 44 uint64_t count; 45 int idx; 46 } pmu_counter_t; 47 48 struct pmu_event { 49 const char *name; 50 uint32_t unit_sel; 51 int min; 52 int max; 53 } gp_events[] = { 54 {"core cycles", 0x003c, 1*N, 50*N}, 55 {"instructions", 0x00c0, 10*N, 10.2*N}, 56 {"ref cycles", 0x013c, 1*N, 30*N}, 57 {"llc references", 0x4f2e, 1, 2*N}, 58 {"llc misses", 0x412e, 1, 1*N}, 59 {"branches", 0x00c4, 1*N, 1.1*N}, 60 {"branch misses", 0x00c5, 0, 0.1*N}, 61 }, fixed_events[] = { 62 {"fixed 1", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N}, 63 {"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N}, 64 {"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N} 65 }; 66 67 #define PMU_CAP_FW_WRITES (1ULL << 13) 68 static u64 gp_counter_base = MSR_IA32_PERFCTR0; 69 70 char *buf; 71 72 static inline void loop(void) 73 { 74 unsigned long tmp, tmp2, tmp3; 75 76 asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b" 77 : "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf)); 78 79 } 80 81 volatile uint64_t irq_received; 82 83 static void cnt_overflow(isr_regs_t *regs) 84 { 85 irq_received++; 86 apic_write(APIC_EOI, 0); 87 } 88 89 static bool check_irq(void) 90 { 91 int i; 92 irq_received = 0; 93 irq_enable(); 94 for (i = 0; i < 100000 && !irq_received; i++) 95 asm volatile("pause"); 96 irq_disable(); 97 return irq_received; 98 } 99 100 static bool is_gp(pmu_counter_t *evt) 101 { 102 return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 || 103 evt->ctr >= MSR_IA32_PMC0; 104 } 105 106 static int event_to_global_idx(pmu_counter_t *cnt) 107 { 108 return cnt->ctr - (is_gp(cnt) ? gp_counter_base : 109 (MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX)); 110 } 111 112 static struct pmu_event* get_counter_event(pmu_counter_t *cnt) 113 { 114 if (is_gp(cnt)) { 115 int i; 116 117 for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 118 if (gp_events[i].unit_sel == (cnt->config & 0xffff)) 119 return &gp_events[i]; 120 } else 121 return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0]; 122 123 return (void*)0; 124 } 125 126 static void global_enable(pmu_counter_t *cnt) 127 { 128 cnt->idx = event_to_global_idx(cnt); 129 130 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) | 131 (1ull << cnt->idx)); 132 } 133 134 static void global_disable(pmu_counter_t *cnt) 135 { 136 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) & 137 ~(1ull << cnt->idx)); 138 } 139 140 static void __start_event(pmu_counter_t *evt, uint64_t count) 141 { 142 evt->count = count; 143 wrmsr(evt->ctr, evt->count); 144 if (is_gp(evt)) 145 wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 146 evt->config | EVNTSEL_EN); 147 else { 148 uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 149 int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 150 uint32_t usrospmi = 0; 151 152 if (evt->config & EVNTSEL_OS) 153 usrospmi |= (1 << 0); 154 if (evt->config & EVNTSEL_USR) 155 usrospmi |= (1 << 1); 156 if (evt->config & EVNTSEL_INT) 157 usrospmi |= (1 << 3); // PMI on overflow 158 ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift); 159 wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl); 160 } 161 global_enable(evt); 162 apic_write(APIC_LVTPC, PC_VECTOR); 163 } 164 165 static void start_event(pmu_counter_t *evt) 166 { 167 __start_event(evt, 0); 168 } 169 170 static void stop_event(pmu_counter_t *evt) 171 { 172 global_disable(evt); 173 if (is_gp(evt)) 174 wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt), 175 evt->config & ~EVNTSEL_EN); 176 else { 177 uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); 178 int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; 179 wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift)); 180 } 181 evt->count = rdmsr(evt->ctr); 182 } 183 184 static noinline void measure_many(pmu_counter_t *evt, int count) 185 { 186 int i; 187 for (i = 0; i < count; i++) 188 start_event(&evt[i]); 189 loop(); 190 for (i = 0; i < count; i++) 191 stop_event(&evt[i]); 192 } 193 194 static void measure_one(pmu_counter_t *evt) 195 { 196 measure_many(evt, 1); 197 } 198 199 static noinline void __measure(pmu_counter_t *evt, uint64_t count) 200 { 201 __start_event(evt, count); 202 loop(); 203 stop_event(evt); 204 } 205 206 static bool verify_event(uint64_t count, struct pmu_event *e) 207 { 208 // printf("%d <= %ld <= %d\n", e->min, count, e->max); 209 return count >= e->min && count <= e->max; 210 211 } 212 213 static bool verify_counter(pmu_counter_t *cnt) 214 { 215 return verify_event(cnt->count, get_counter_event(cnt)); 216 } 217 218 static void check_gp_counter(struct pmu_event *evt) 219 { 220 int nr_gp_counters = pmu_nr_gp_counters(); 221 pmu_counter_t cnt = { 222 .ctr = gp_counter_base, 223 .config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel, 224 }; 225 int i; 226 227 for (i = 0; i < nr_gp_counters; i++, cnt.ctr++) { 228 measure_one(&cnt); 229 report(verify_event(cnt.count, evt), "%s-%d", evt->name, i); 230 } 231 } 232 233 static void check_gp_counters(void) 234 { 235 int i; 236 237 for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++) 238 if (pmu_gp_counter_is_available(i)) 239 check_gp_counter(&gp_events[i]); 240 else 241 printf("GP event '%s' is disabled\n", 242 gp_events[i].name); 243 } 244 245 static void check_fixed_counters(void) 246 { 247 int nr_fixed_counters = pmu_nr_fixed_counters(); 248 pmu_counter_t cnt = { 249 .config = EVNTSEL_OS | EVNTSEL_USR, 250 }; 251 int i; 252 253 for (i = 0; i < nr_fixed_counters; i++) { 254 cnt.ctr = fixed_events[i].unit_sel; 255 measure_one(&cnt); 256 report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i); 257 } 258 } 259 260 static void check_counters_many(void) 261 { 262 int nr_fixed_counters = pmu_nr_fixed_counters(); 263 int nr_gp_counters = pmu_nr_gp_counters(); 264 pmu_counter_t cnt[10]; 265 int i, n; 266 267 for (i = 0, n = 0; n < nr_gp_counters; i++) { 268 if (!pmu_gp_counter_is_available(i)) 269 continue; 270 271 cnt[n].ctr = gp_counter_base + n; 272 cnt[n].config = EVNTSEL_OS | EVNTSEL_USR | 273 gp_events[i % ARRAY_SIZE(gp_events)].unit_sel; 274 n++; 275 } 276 for (i = 0; i < nr_fixed_counters; i++) { 277 cnt[n].ctr = fixed_events[i].unit_sel; 278 cnt[n].config = EVNTSEL_OS | EVNTSEL_USR; 279 n++; 280 } 281 282 measure_many(cnt, n); 283 284 for (i = 0; i < n; i++) 285 if (!verify_counter(&cnt[i])) 286 break; 287 288 report(i == n, "all counters"); 289 } 290 291 static void check_counter_overflow(void) 292 { 293 int nr_gp_counters = pmu_nr_gp_counters(); 294 uint64_t count; 295 int i; 296 pmu_counter_t cnt = { 297 .ctr = gp_counter_base, 298 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 299 }; 300 __measure(&cnt, 0); 301 count = cnt.count; 302 303 /* clear status before test */ 304 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 305 306 report_prefix_push("overflow"); 307 308 for (i = 0; i < nr_gp_counters + 1; i++, cnt.ctr++) { 309 uint64_t status; 310 int idx; 311 312 cnt.count = 1 - count; 313 if (gp_counter_base == MSR_IA32_PMC0) 314 cnt.count &= (1ull << pmu_gp_counter_width()) - 1; 315 316 if (i == nr_gp_counters) { 317 cnt.ctr = fixed_events[0].unit_sel; 318 cnt.count &= (1ull << pmu_fixed_counter_width()) - 1; 319 } 320 321 if (i % 2) 322 cnt.config |= EVNTSEL_INT; 323 else 324 cnt.config &= ~EVNTSEL_INT; 325 idx = event_to_global_idx(&cnt); 326 __measure(&cnt, cnt.count); 327 report(cnt.count == 1, "cntr-%d", i); 328 status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 329 report(status & (1ull << idx), "status-%d", i); 330 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status); 331 status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 332 report(!(status & (1ull << idx)), "status clear-%d", i); 333 report(check_irq() == (i % 2), "irq-%d", i); 334 } 335 336 report_prefix_pop(); 337 } 338 339 static void check_gp_counter_cmask(void) 340 { 341 pmu_counter_t cnt = { 342 .ctr = gp_counter_base, 343 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */, 344 }; 345 cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT); 346 measure_one(&cnt); 347 report(cnt.count < gp_events[1].min, "cmask"); 348 } 349 350 static void do_rdpmc_fast(void *ptr) 351 { 352 pmu_counter_t *cnt = ptr; 353 uint32_t idx = (uint32_t)cnt->idx | (1u << 31); 354 355 if (!is_gp(cnt)) 356 idx |= 1 << 30; 357 358 cnt->count = rdpmc(idx); 359 } 360 361 362 static void check_rdpmc(void) 363 { 364 int fixed_counter_width = pmu_fixed_counter_width(); 365 int nr_fixed_counters = pmu_nr_fixed_counters(); 366 u8 gp_counter_width = pmu_gp_counter_width(); 367 int nr_gp_counters = pmu_nr_gp_counters(); 368 uint64_t val = 0xff0123456789ull; 369 bool exc; 370 int i; 371 372 report_prefix_push("rdpmc"); 373 374 for (i = 0; i < nr_gp_counters; i++) { 375 uint64_t x; 376 pmu_counter_t cnt = { 377 .ctr = gp_counter_base + i, 378 .idx = i 379 }; 380 381 /* 382 * Without full-width writes, only the low 32 bits are writable, 383 * and the value is sign-extended. 384 */ 385 if (gp_counter_base == MSR_IA32_PERFCTR0) 386 x = (uint64_t)(int64_t)(int32_t)val; 387 else 388 x = (uint64_t)(int64_t)val; 389 390 /* Mask according to the number of supported bits */ 391 x &= (1ull << gp_counter_width) - 1; 392 393 wrmsr(gp_counter_base + i, val); 394 report(rdpmc(i) == x, "cntr-%d", i); 395 396 exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 397 if (exc) 398 report_skip("fast-%d", i); 399 else 400 report(cnt.count == (u32)val, "fast-%d", i); 401 } 402 for (i = 0; i < nr_fixed_counters; i++) { 403 uint64_t x = val & ((1ull << fixed_counter_width) - 1); 404 pmu_counter_t cnt = { 405 .ctr = MSR_CORE_PERF_FIXED_CTR0 + i, 406 .idx = i 407 }; 408 409 wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, x); 410 report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i); 411 412 exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt); 413 if (exc) 414 report_skip("fixed fast-%d", i); 415 else 416 report(cnt.count == (u32)x, "fixed fast-%d", i); 417 } 418 419 report_prefix_pop(); 420 } 421 422 static void check_running_counter_wrmsr(void) 423 { 424 uint64_t status; 425 uint64_t count; 426 pmu_counter_t evt = { 427 .ctr = gp_counter_base, 428 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 429 }; 430 431 report_prefix_push("running counter wrmsr"); 432 433 start_event(&evt); 434 loop(); 435 wrmsr(gp_counter_base, 0); 436 stop_event(&evt); 437 report(evt.count < gp_events[1].min, "cntr"); 438 439 /* clear status before overflow test */ 440 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 441 rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 442 443 start_event(&evt); 444 445 count = -1; 446 if (gp_counter_base == MSR_IA32_PMC0) 447 count &= (1ull << pmu_gp_counter_width()) - 1; 448 449 wrmsr(gp_counter_base, count); 450 451 loop(); 452 stop_event(&evt); 453 status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 454 report(status & 1, "status"); 455 456 report_prefix_pop(); 457 } 458 459 static void check_emulated_instr(void) 460 { 461 uint64_t status, instr_start, brnch_start; 462 pmu_counter_t brnch_cnt = { 463 .ctr = MSR_IA32_PERFCTR0, 464 /* branch instructions */ 465 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[5].unit_sel, 466 }; 467 pmu_counter_t instr_cnt = { 468 .ctr = MSR_IA32_PERFCTR0 + 1, 469 /* instructions */ 470 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel, 471 }; 472 report_prefix_push("emulated instruction"); 473 474 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 475 rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); 476 477 start_event(&brnch_cnt); 478 start_event(&instr_cnt); 479 480 brnch_start = -EXPECTED_BRNCH; 481 instr_start = -EXPECTED_INSTR; 482 wrmsr(MSR_IA32_PERFCTR0, brnch_start); 483 wrmsr(MSR_IA32_PERFCTR0 + 1, instr_start); 484 // KVM_FEP is a magic prefix that forces emulation so 485 // 'KVM_FEP "jne label\n"' just counts as a single instruction. 486 asm volatile( 487 "mov $0x0, %%eax\n" 488 "cmp $0x0, %%eax\n" 489 KVM_FEP "jne label\n" 490 KVM_FEP "jne label\n" 491 KVM_FEP "jne label\n" 492 KVM_FEP "jne label\n" 493 KVM_FEP "jne label\n" 494 "mov $0xa, %%eax\n" 495 "cpuid\n" 496 "mov $0xa, %%eax\n" 497 "cpuid\n" 498 "mov $0xa, %%eax\n" 499 "cpuid\n" 500 "mov $0xa, %%eax\n" 501 "cpuid\n" 502 "mov $0xa, %%eax\n" 503 "cpuid\n" 504 "label:\n" 505 : 506 : 507 : "eax", "ebx", "ecx", "edx"); 508 509 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); 510 511 stop_event(&brnch_cnt); 512 stop_event(&instr_cnt); 513 514 // Check that the end count - start count is at least the expected 515 // number of instructions and branches. 516 report(instr_cnt.count - instr_start >= EXPECTED_INSTR, 517 "instruction count"); 518 report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH, 519 "branch count"); 520 // Additionally check that those counters overflowed properly. 521 status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); 522 report(status & 1, "branch counter overflow"); 523 report(status & 2, "instruction counter overflow"); 524 525 report_prefix_pop(); 526 } 527 528 static void check_counters(void) 529 { 530 if (is_fep_available()) 531 check_emulated_instr(); 532 533 check_gp_counters(); 534 check_fixed_counters(); 535 check_rdpmc(); 536 check_counters_many(); 537 check_counter_overflow(); 538 check_gp_counter_cmask(); 539 check_running_counter_wrmsr(); 540 } 541 542 static void do_unsupported_width_counter_write(void *index) 543 { 544 wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull); 545 } 546 547 static void check_gp_counters_write_width(void) 548 { 549 u64 val_64 = 0xffffff0123456789ull; 550 u64 val_32 = val_64 & ((1ull << 32) - 1); 551 u64 val_max_width = val_64 & ((1ull << pmu_gp_counter_width()) - 1); 552 int nr_gp_counters = pmu_nr_gp_counters(); 553 int i; 554 555 /* 556 * MSR_IA32_PERFCTRn supports 64-bit writes, 557 * but only the lowest 32 bits are valid. 558 */ 559 for (i = 0; i < nr_gp_counters; i++) { 560 wrmsr(MSR_IA32_PERFCTR0 + i, val_32); 561 assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 562 assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 563 564 wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width); 565 assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 566 assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 567 568 wrmsr(MSR_IA32_PERFCTR0 + i, val_64); 569 assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 570 assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 571 } 572 573 /* 574 * MSR_IA32_PMCn supports writing values up to GP counter width, 575 * and only the lowest bits of GP counter width are valid. 576 */ 577 for (i = 0; i < nr_gp_counters; i++) { 578 wrmsr(MSR_IA32_PMC0 + i, val_32); 579 assert(rdmsr(MSR_IA32_PMC0 + i) == val_32); 580 assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32); 581 582 wrmsr(MSR_IA32_PMC0 + i, val_max_width); 583 assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width); 584 assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width); 585 586 report(test_for_exception(GP_VECTOR, 587 do_unsupported_width_counter_write, &i), 588 "writing unsupported width to MSR_IA32_PMC%d raises #GP", i); 589 } 590 } 591 592 /* 593 * Per the SDM, reference cycles are currently implemented using the 594 * core crystal clock, TSC, or bus clock. Calibrate to the TSC 595 * frequency to set reasonable expectations. 596 */ 597 static void set_ref_cycle_expectations(void) 598 { 599 pmu_counter_t cnt = { 600 .ctr = MSR_IA32_PERFCTR0, 601 .config = EVNTSEL_OS | EVNTSEL_USR | gp_events[2].unit_sel, 602 }; 603 uint64_t tsc_delta; 604 uint64_t t0, t1, t2, t3; 605 606 /* Bit 2 enumerates the availability of reference cycles events. */ 607 if (!pmu_nr_gp_counters() || !pmu_gp_counter_is_available(2)) 608 return; 609 610 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); 611 612 t0 = fenced_rdtsc(); 613 start_event(&cnt); 614 t1 = fenced_rdtsc(); 615 616 /* 617 * This loop has to run long enough to dominate the VM-exit 618 * costs for playing with the PMU MSRs on start and stop. 619 * 620 * On a 2.6GHz Ice Lake, with the TSC frequency at 104 times 621 * the core crystal clock, this function calculated a guest 622 * TSC : ref cycles ratio of around 105 with ECX initialized 623 * to one billion. 624 */ 625 asm volatile("loop ." : "+c"((int){1000000000ull})); 626 627 t2 = fenced_rdtsc(); 628 stop_event(&cnt); 629 t3 = fenced_rdtsc(); 630 631 tsc_delta = ((t2 - t1) + (t3 - t0)) / 2; 632 633 if (!tsc_delta) 634 return; 635 636 gp_events[2].min = (gp_events[2].min * cnt.count) / tsc_delta; 637 gp_events[2].max = (gp_events[2].max * cnt.count) / tsc_delta; 638 } 639 640 int main(int ac, char **av) 641 { 642 setup_vm(); 643 handle_irq(PC_VECTOR, cnt_overflow); 644 buf = malloc(N*64); 645 646 if (!pmu_version()) { 647 report_skip("No pmu is detected!"); 648 return report_summary(); 649 } 650 651 if (pmu_version() == 1) { 652 report_skip("PMU version 1 is not supported."); 653 return report_summary(); 654 } 655 656 set_ref_cycle_expectations(); 657 658 printf("PMU version: %d\n", pmu_version()); 659 printf("GP counters: %d\n", pmu_nr_gp_counters()); 660 printf("GP counter width: %d\n", pmu_gp_counter_width()); 661 printf("Mask length: %d\n", pmu_gp_counter_mask_length()); 662 printf("Fixed counters: %d\n", pmu_nr_fixed_counters()); 663 printf("Fixed counter width: %d\n", pmu_fixed_counter_width()); 664 665 apic_write(APIC_LVTPC, PC_VECTOR); 666 667 check_counters(); 668 669 if (this_cpu_perf_capabilities() & PMU_CAP_FW_WRITES) { 670 gp_counter_base = MSR_IA32_PMC0; 671 report_prefix_push("full-width writes"); 672 check_counters(); 673 check_gp_counters_write_width(); 674 report_prefix_pop(); 675 } 676 677 return report_summary(); 678 } 679