xref: /kvm-unit-tests/x86/pmu.c (revision 414ee7d1d108779ba95f89a837d9a85d52ffdc9c)
1 
2 #include "x86/msr.h"
3 #include "x86/processor.h"
4 #include "x86/pmu.h"
5 #include "x86/apic-defs.h"
6 #include "x86/apic.h"
7 #include "x86/desc.h"
8 #include "x86/isr.h"
9 #include "alloc.h"
10 
11 #include "libcflat.h"
12 #include <stdint.h>
13 
14 #define N 1000000
15 
16 // These values match the number of instructions and branches in the
17 // assembly block in check_emulated_instr().
18 #define EXPECTED_INSTR 17
19 #define EXPECTED_BRNCH 5
20 
21 typedef struct {
22 	uint32_t ctr;
23 	uint32_t config;
24 	uint64_t count;
25 	int idx;
26 } pmu_counter_t;
27 
28 struct pmu_event {
29 	const char *name;
30 	uint32_t unit_sel;
31 	int min;
32 	int max;
33 } gp_events[] = {
34 	{"core cycles", 0x003c, 1*N, 50*N},
35 	{"instructions", 0x00c0, 10*N, 10.2*N},
36 	{"ref cycles", 0x013c, 1*N, 30*N},
37 	{"llc references", 0x4f2e, 1, 2*N},
38 	{"llc misses", 0x412e, 1, 1*N},
39 	{"branches", 0x00c4, 1*N, 1.1*N},
40 	{"branch misses", 0x00c5, 0, 0.1*N},
41 }, fixed_events[] = {
42 	{"fixed 1", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N},
43 	{"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N},
44 	{"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N}
45 };
46 
47 static u64 gp_counter_base = MSR_IA32_PERFCTR0;
48 
49 char *buf;
50 
51 static inline void loop(void)
52 {
53 	unsigned long tmp, tmp2, tmp3;
54 
55 	asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b"
56 			: "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf));
57 
58 }
59 
60 volatile uint64_t irq_received;
61 
62 static void cnt_overflow(isr_regs_t *regs)
63 {
64 	irq_received++;
65 	apic_write(APIC_EOI, 0);
66 }
67 
68 static bool check_irq(void)
69 {
70 	int i;
71 	irq_received = 0;
72 	irq_enable();
73 	for (i = 0; i < 100000 && !irq_received; i++)
74 		asm volatile("pause");
75 	irq_disable();
76 	return irq_received;
77 }
78 
79 static bool is_gp(pmu_counter_t *evt)
80 {
81 	return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 ||
82 		evt->ctr >= MSR_IA32_PMC0;
83 }
84 
85 static int event_to_global_idx(pmu_counter_t *cnt)
86 {
87 	return cnt->ctr - (is_gp(cnt) ? gp_counter_base :
88 		(MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX));
89 }
90 
91 static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
92 {
93 	if (is_gp(cnt)) {
94 		int i;
95 
96 		for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++)
97 			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
98 				return &gp_events[i];
99 	} else
100 		return &fixed_events[cnt->ctr - MSR_CORE_PERF_FIXED_CTR0];
101 
102 	return (void*)0;
103 }
104 
105 static void global_enable(pmu_counter_t *cnt)
106 {
107 	cnt->idx = event_to_global_idx(cnt);
108 
109 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
110 			(1ull << cnt->idx));
111 }
112 
113 static void global_disable(pmu_counter_t *cnt)
114 {
115 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
116 			~(1ull << cnt->idx));
117 }
118 
119 static void __start_event(pmu_counter_t *evt, uint64_t count)
120 {
121     evt->count = count;
122     wrmsr(evt->ctr, evt->count);
123     if (is_gp(evt))
124 	    wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt),
125 			    evt->config | EVNTSEL_EN);
126     else {
127 	    uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL);
128 	    int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4;
129 	    uint32_t usrospmi = 0;
130 
131 	    if (evt->config & EVNTSEL_OS)
132 		    usrospmi |= (1 << 0);
133 	    if (evt->config & EVNTSEL_USR)
134 		    usrospmi |= (1 << 1);
135 	    if (evt->config & EVNTSEL_INT)
136 		    usrospmi |= (1 << 3); // PMI on overflow
137 	    ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift);
138 	    wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl);
139     }
140     global_enable(evt);
141     apic_write(APIC_LVTPC, PMI_VECTOR);
142 }
143 
144 static void start_event(pmu_counter_t *evt)
145 {
146 	__start_event(evt, 0);
147 }
148 
149 static void stop_event(pmu_counter_t *evt)
150 {
151 	global_disable(evt);
152 	if (is_gp(evt))
153 		wrmsr(MSR_P6_EVNTSEL0 + event_to_global_idx(evt),
154 				evt->config & ~EVNTSEL_EN);
155 	else {
156 		uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL);
157 		int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4;
158 		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift));
159 	}
160 	evt->count = rdmsr(evt->ctr);
161 }
162 
163 static noinline void measure_many(pmu_counter_t *evt, int count)
164 {
165 	int i;
166 	for (i = 0; i < count; i++)
167 		start_event(&evt[i]);
168 	loop();
169 	for (i = 0; i < count; i++)
170 		stop_event(&evt[i]);
171 }
172 
173 static void measure_one(pmu_counter_t *evt)
174 {
175 	measure_many(evt, 1);
176 }
177 
178 static noinline void __measure(pmu_counter_t *evt, uint64_t count)
179 {
180 	__start_event(evt, count);
181 	loop();
182 	stop_event(evt);
183 }
184 
185 static bool verify_event(uint64_t count, struct pmu_event *e)
186 {
187 	// printf("%d <= %ld <= %d\n", e->min, count, e->max);
188 	return count >= e->min  && count <= e->max;
189 
190 }
191 
192 static bool verify_counter(pmu_counter_t *cnt)
193 {
194 	return verify_event(cnt->count, get_counter_event(cnt));
195 }
196 
197 static void check_gp_counter(struct pmu_event *evt)
198 {
199 	pmu_counter_t cnt = {
200 		.ctr = gp_counter_base,
201 		.config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel,
202 	};
203 	int i;
204 
205 	for (i = 0; i < pmu.nr_gp_counters; i++, cnt.ctr++) {
206 		measure_one(&cnt);
207 		report(verify_event(cnt.count, evt), "%s-%d", evt->name, i);
208 	}
209 }
210 
211 static void check_gp_counters(void)
212 {
213 	int i;
214 
215 	for (i = 0; i < sizeof(gp_events)/sizeof(gp_events[0]); i++)
216 		if (pmu_gp_counter_is_available(i))
217 			check_gp_counter(&gp_events[i]);
218 		else
219 			printf("GP event '%s' is disabled\n",
220 					gp_events[i].name);
221 }
222 
223 static void check_fixed_counters(void)
224 {
225 	pmu_counter_t cnt = {
226 		.config = EVNTSEL_OS | EVNTSEL_USR,
227 	};
228 	int i;
229 
230 	for (i = 0; i < pmu.nr_fixed_counters; i++) {
231 		cnt.ctr = fixed_events[i].unit_sel;
232 		measure_one(&cnt);
233 		report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i);
234 	}
235 }
236 
237 static void check_counters_many(void)
238 {
239 	pmu_counter_t cnt[10];
240 	int i, n;
241 
242 	for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
243 		if (!pmu_gp_counter_is_available(i))
244 			continue;
245 
246 		cnt[n].ctr = gp_counter_base + n;
247 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR |
248 			gp_events[i % ARRAY_SIZE(gp_events)].unit_sel;
249 		n++;
250 	}
251 	for (i = 0; i < pmu.nr_fixed_counters; i++) {
252 		cnt[n].ctr = fixed_events[i].unit_sel;
253 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
254 		n++;
255 	}
256 
257 	measure_many(cnt, n);
258 
259 	for (i = 0; i < n; i++)
260 		if (!verify_counter(&cnt[i]))
261 			break;
262 
263 	report(i == n, "all counters");
264 }
265 
266 static uint64_t measure_for_overflow(pmu_counter_t *cnt)
267 {
268 	__measure(cnt, 0);
269 	/*
270 	 * To generate overflow, i.e. roll over to '0', the initial count just
271 	 * needs to be preset to the negative expected count.  However, as per
272 	 * Intel's SDM, the preset count needs to be incremented by 1 to ensure
273 	 * the overflow interrupt is generated immediately instead of possibly
274 	 * waiting for the overflow to propagate through the counter.
275 	 */
276 	assert(cnt->count > 1);
277 	return 1 - cnt->count;
278 }
279 
280 static void check_counter_overflow(void)
281 {
282 	uint64_t overflow_preset;
283 	int i;
284 	pmu_counter_t cnt = {
285 		.ctr = gp_counter_base,
286 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
287 	};
288 	overflow_preset = measure_for_overflow(&cnt);
289 
290 	/* clear status before test */
291 	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
292 
293 	report_prefix_push("overflow");
294 
295 	for (i = 0; i < pmu.nr_gp_counters + 1; i++, cnt.ctr++) {
296 		uint64_t status;
297 		int idx;
298 
299 		cnt.count = overflow_preset;
300 		if (gp_counter_base == MSR_IA32_PMC0)
301 			cnt.count &= (1ull << pmu.gp_counter_width) - 1;
302 
303 		if (i == pmu.nr_gp_counters) {
304 			cnt.ctr = fixed_events[0].unit_sel;
305 			cnt.count = measure_for_overflow(&cnt);
306 			cnt.count &= (1ull << pmu.fixed_counter_width) - 1;
307 		}
308 
309 		if (i % 2)
310 			cnt.config |= EVNTSEL_INT;
311 		else
312 			cnt.config &= ~EVNTSEL_INT;
313 		idx = event_to_global_idx(&cnt);
314 		__measure(&cnt, cnt.count);
315 		report(cnt.count == 1, "cntr-%d", i);
316 		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
317 		report(status & (1ull << idx), "status-%d", i);
318 		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status);
319 		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
320 		report(!(status & (1ull << idx)), "status clear-%d", i);
321 		report(check_irq() == (i % 2), "irq-%d", i);
322 	}
323 
324 	report_prefix_pop();
325 }
326 
327 static void check_gp_counter_cmask(void)
328 {
329 	pmu_counter_t cnt = {
330 		.ctr = gp_counter_base,
331 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
332 	};
333 	cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT);
334 	measure_one(&cnt);
335 	report(cnt.count < gp_events[1].min, "cmask");
336 }
337 
338 static void do_rdpmc_fast(void *ptr)
339 {
340 	pmu_counter_t *cnt = ptr;
341 	uint32_t idx = (uint32_t)cnt->idx | (1u << 31);
342 
343 	if (!is_gp(cnt))
344 		idx |= 1 << 30;
345 
346 	cnt->count = rdpmc(idx);
347 }
348 
349 
350 static void check_rdpmc(void)
351 {
352 	uint64_t val = 0xff0123456789ull;
353 	bool exc;
354 	int i;
355 
356 	report_prefix_push("rdpmc");
357 
358 	for (i = 0; i < pmu.nr_gp_counters; i++) {
359 		uint64_t x;
360 		pmu_counter_t cnt = {
361 			.ctr = gp_counter_base + i,
362 			.idx = i
363 		};
364 
365 	        /*
366 	         * Without full-width writes, only the low 32 bits are writable,
367 	         * and the value is sign-extended.
368 	         */
369 		if (gp_counter_base == MSR_IA32_PERFCTR0)
370 			x = (uint64_t)(int64_t)(int32_t)val;
371 		else
372 			x = (uint64_t)(int64_t)val;
373 
374 		/* Mask according to the number of supported bits */
375 		x &= (1ull << pmu.gp_counter_width) - 1;
376 
377 		wrmsr(gp_counter_base + i, val);
378 		report(rdpmc(i) == x, "cntr-%d", i);
379 
380 		exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt);
381 		if (exc)
382 			report_skip("fast-%d", i);
383 		else
384 			report(cnt.count == (u32)val, "fast-%d", i);
385 	}
386 	for (i = 0; i < pmu.nr_fixed_counters; i++) {
387 		uint64_t x = val & ((1ull << pmu.fixed_counter_width) - 1);
388 		pmu_counter_t cnt = {
389 			.ctr = MSR_CORE_PERF_FIXED_CTR0 + i,
390 			.idx = i
391 		};
392 
393 		wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, x);
394 		report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i);
395 
396 		exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt);
397 		if (exc)
398 			report_skip("fixed fast-%d", i);
399 		else
400 			report(cnt.count == (u32)x, "fixed fast-%d", i);
401 	}
402 
403 	report_prefix_pop();
404 }
405 
406 static void check_running_counter_wrmsr(void)
407 {
408 	uint64_t status;
409 	uint64_t count;
410 	pmu_counter_t evt = {
411 		.ctr = gp_counter_base,
412 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
413 	};
414 
415 	report_prefix_push("running counter wrmsr");
416 
417 	start_event(&evt);
418 	loop();
419 	wrmsr(gp_counter_base, 0);
420 	stop_event(&evt);
421 	report(evt.count < gp_events[1].min, "cntr");
422 
423 	/* clear status before overflow test */
424 	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
425 	      rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
426 
427 	start_event(&evt);
428 
429 	count = -1;
430 	if (gp_counter_base == MSR_IA32_PMC0)
431 		count &= (1ull << pmu.gp_counter_width) - 1;
432 
433 	wrmsr(gp_counter_base, count);
434 
435 	loop();
436 	stop_event(&evt);
437 	status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
438 	report(status & 1, "status");
439 
440 	report_prefix_pop();
441 }
442 
443 static void check_emulated_instr(void)
444 {
445 	uint64_t status, instr_start, brnch_start;
446 	pmu_counter_t brnch_cnt = {
447 		.ctr = MSR_IA32_PERFCTR0,
448 		/* branch instructions */
449 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[5].unit_sel,
450 	};
451 	pmu_counter_t instr_cnt = {
452 		.ctr = MSR_IA32_PERFCTR0 + 1,
453 		/* instructions */
454 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
455 	};
456 	report_prefix_push("emulated instruction");
457 
458 	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
459 	      rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
460 
461 	start_event(&brnch_cnt);
462 	start_event(&instr_cnt);
463 
464 	brnch_start = -EXPECTED_BRNCH;
465 	instr_start = -EXPECTED_INSTR;
466 	wrmsr(MSR_IA32_PERFCTR0, brnch_start);
467 	wrmsr(MSR_IA32_PERFCTR0 + 1, instr_start);
468 	// KVM_FEP is a magic prefix that forces emulation so
469 	// 'KVM_FEP "jne label\n"' just counts as a single instruction.
470 	asm volatile(
471 		"mov $0x0, %%eax\n"
472 		"cmp $0x0, %%eax\n"
473 		KVM_FEP "jne label\n"
474 		KVM_FEP "jne label\n"
475 		KVM_FEP "jne label\n"
476 		KVM_FEP "jne label\n"
477 		KVM_FEP "jne label\n"
478 		"mov $0xa, %%eax\n"
479 		"cpuid\n"
480 		"mov $0xa, %%eax\n"
481 		"cpuid\n"
482 		"mov $0xa, %%eax\n"
483 		"cpuid\n"
484 		"mov $0xa, %%eax\n"
485 		"cpuid\n"
486 		"mov $0xa, %%eax\n"
487 		"cpuid\n"
488 		"label:\n"
489 		:
490 		:
491 		: "eax", "ebx", "ecx", "edx");
492 
493 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
494 
495 	stop_event(&brnch_cnt);
496 	stop_event(&instr_cnt);
497 
498 	// Check that the end count - start count is at least the expected
499 	// number of instructions and branches.
500 	report(instr_cnt.count - instr_start >= EXPECTED_INSTR,
501 	       "instruction count");
502 	report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH,
503 	       "branch count");
504 	// Additionally check that those counters overflowed properly.
505 	status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
506 	report(status & 1, "branch counter overflow");
507 	report(status & 2, "instruction counter overflow");
508 
509 	report_prefix_pop();
510 }
511 
512 static void check_counters(void)
513 {
514 	if (is_fep_available())
515 		check_emulated_instr();
516 
517 	check_gp_counters();
518 	check_fixed_counters();
519 	check_rdpmc();
520 	check_counters_many();
521 	check_counter_overflow();
522 	check_gp_counter_cmask();
523 	check_running_counter_wrmsr();
524 }
525 
526 static void do_unsupported_width_counter_write(void *index)
527 {
528 	wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull);
529 }
530 
531 static void check_gp_counters_write_width(void)
532 {
533 	u64 val_64 = 0xffffff0123456789ull;
534 	u64 val_32 = val_64 & ((1ull << 32) - 1);
535 	u64 val_max_width = val_64 & ((1ull << pmu.gp_counter_width) - 1);
536 	int i;
537 
538 	/*
539 	 * MSR_IA32_PERFCTRn supports 64-bit writes,
540 	 * but only the lowest 32 bits are valid.
541 	 */
542 	for (i = 0; i < pmu.nr_gp_counters; i++) {
543 		wrmsr(MSR_IA32_PERFCTR0 + i, val_32);
544 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
545 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
546 
547 		wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width);
548 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
549 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
550 
551 		wrmsr(MSR_IA32_PERFCTR0 + i, val_64);
552 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
553 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
554 	}
555 
556 	/*
557 	 * MSR_IA32_PMCn supports writing values up to GP counter width,
558 	 * and only the lowest bits of GP counter width are valid.
559 	 */
560 	for (i = 0; i < pmu.nr_gp_counters; i++) {
561 		wrmsr(MSR_IA32_PMC0 + i, val_32);
562 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
563 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
564 
565 		wrmsr(MSR_IA32_PMC0 + i, val_max_width);
566 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width);
567 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width);
568 
569 		report(test_for_exception(GP_VECTOR,
570 			do_unsupported_width_counter_write, &i),
571 		"writing unsupported width to MSR_IA32_PMC%d raises #GP", i);
572 	}
573 }
574 
575 /*
576  * Per the SDM, reference cycles are currently implemented using the
577  * core crystal clock, TSC, or bus clock. Calibrate to the TSC
578  * frequency to set reasonable expectations.
579  */
580 static void set_ref_cycle_expectations(void)
581 {
582 	pmu_counter_t cnt = {
583 		.ctr = MSR_IA32_PERFCTR0,
584 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[2].unit_sel,
585 	};
586 	uint64_t tsc_delta;
587 	uint64_t t0, t1, t2, t3;
588 
589 	/* Bit 2 enumerates the availability of reference cycles events. */
590 	if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
591 		return;
592 
593 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
594 
595 	t0 = fenced_rdtsc();
596 	start_event(&cnt);
597 	t1 = fenced_rdtsc();
598 
599 	/*
600 	 * This loop has to run long enough to dominate the VM-exit
601 	 * costs for playing with the PMU MSRs on start and stop.
602 	 *
603 	 * On a 2.6GHz Ice Lake, with the TSC frequency at 104 times
604 	 * the core crystal clock, this function calculated a guest
605 	 * TSC : ref cycles ratio of around 105 with ECX initialized
606 	 * to one billion.
607 	 */
608 	asm volatile("loop ." : "+c"((int){1000000000ull}));
609 
610 	t2 = fenced_rdtsc();
611 	stop_event(&cnt);
612 	t3 = fenced_rdtsc();
613 
614 	tsc_delta = ((t2 - t1) + (t3 - t0)) / 2;
615 
616 	if (!tsc_delta)
617 		return;
618 
619 	gp_events[2].min = (gp_events[2].min * cnt.count) / tsc_delta;
620 	gp_events[2].max = (gp_events[2].max * cnt.count) / tsc_delta;
621 }
622 
623 static void check_invalid_rdpmc_gp(void)
624 {
625 	uint64_t val;
626 
627 	report(rdpmc_safe(64, &val) == GP_VECTOR,
628 	       "Expected #GP on RDPMC(64)");
629 }
630 
631 int main(int ac, char **av)
632 {
633 	setup_vm();
634 	handle_irq(PMI_VECTOR, cnt_overflow);
635 	buf = malloc(N*64);
636 
637 	check_invalid_rdpmc_gp();
638 
639 	if (!pmu.version) {
640 		report_skip("No Intel Arch PMU is detected!");
641 		return report_summary();
642 	}
643 
644 	if (pmu.version == 1) {
645 		report_skip("PMU version 1 is not supported.");
646 		return report_summary();
647 	}
648 
649 	set_ref_cycle_expectations();
650 
651 	printf("PMU version:         %d\n", pmu.version);
652 	printf("GP counters:         %d\n", pmu.nr_gp_counters);
653 	printf("GP counter width:    %d\n", pmu.gp_counter_width);
654 	printf("Mask length:         %d\n", pmu.gp_counter_mask_length);
655 	printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
656 	printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
657 
658 	apic_write(APIC_LVTPC, PMI_VECTOR);
659 
660 	check_counters();
661 
662 	if (pmu_has_full_writes()) {
663 		gp_counter_base = MSR_IA32_PMC0;
664 		report_prefix_push("full-width writes");
665 		check_counters();
666 		check_gp_counters_write_width();
667 		report_prefix_pop();
668 	}
669 
670 	return report_summary();
671 }
672