xref: /kvm-unit-tests/x86/pmu.c (revision f2a56148889b3536092090bfb5bc8b6104709630)
1a9f8b16fSGleb Natapov 
2a9f8b16fSGleb Natapov #include "x86/msr.h"
3a9f8b16fSGleb Natapov #include "x86/processor.h"
49f17508dSLike Xu #include "x86/pmu.h"
5a9f8b16fSGleb Natapov #include "x86/apic-defs.h"
6a9f8b16fSGleb Natapov #include "x86/apic.h"
7a9f8b16fSGleb Natapov #include "x86/desc.h"
8a9f8b16fSGleb Natapov #include "x86/isr.h"
995a94088SNicholas Piggin #include "vmalloc.h"
10dcda215bSPaolo Bonzini #include "alloc.h"
11a9f8b16fSGleb Natapov 
12a9f8b16fSGleb Natapov #include "libcflat.h"
13a9f8b16fSGleb Natapov #include <stdint.h>
14a9f8b16fSGleb Natapov 
15a9f8b16fSGleb Natapov #define N 1000000
16a9f8b16fSGleb Natapov 
1720cf9147SJim Mattson // These values match the number of instructions and branches in the
1820cf9147SJim Mattson // assembly block in check_emulated_instr().
1920cf9147SJim Mattson #define EXPECTED_INSTR 17
2020cf9147SJim Mattson #define EXPECTED_BRNCH 5
2120cf9147SJim Mattson 
22a9f8b16fSGleb Natapov typedef struct {
23a9f8b16fSGleb Natapov 	uint32_t ctr;
249720e46cSDapeng Mi 	uint32_t idx;
25006b089dSLike Xu 	uint64_t config;
26a9f8b16fSGleb Natapov 	uint64_t count;
27a9f8b16fSGleb Natapov } pmu_counter_t;
28a9f8b16fSGleb Natapov 
29a9f8b16fSGleb Natapov struct pmu_event {
30797d79a2SThomas Huth 	const char *name;
31a9f8b16fSGleb Natapov 	uint32_t unit_sel;
32a9f8b16fSGleb Natapov 	int min;
33a9f8b16fSGleb Natapov 	int max;
347c648ce2SLike Xu } intel_gp_events[] = {
35a9f8b16fSGleb Natapov 	{"core cycles", 0x003c, 1*N, 50*N},
36a9f8b16fSGleb Natapov 	{"instructions", 0x00c0, 10*N, 10.2*N},
37290f4213SJim Mattson 	{"ref cycles", 0x013c, 1*N, 30*N},
38290f4213SJim Mattson 	{"llc references", 0x4f2e, 1, 2*N},
39a9f8b16fSGleb Natapov 	{"llc misses", 0x412e, 1, 1*N},
40a9f8b16fSGleb Natapov 	{"branches", 0x00c4, 1*N, 1.1*N},
41a9f8b16fSGleb Natapov 	{"branch misses", 0x00c5, 0, 0.1*N},
42b883751aSLike Xu }, amd_gp_events[] = {
43b883751aSLike Xu 	{"core cycles", 0x0076, 1*N, 50*N},
44b883751aSLike Xu 	{"instructions", 0x00c0, 10*N, 10.2*N},
45b883751aSLike Xu 	{"branches", 0x00c2, 1*N, 1.1*N},
46b883751aSLike Xu 	{"branch misses", 0x00c3, 0, 0.1*N},
47a9f8b16fSGleb Natapov }, fixed_events[] = {
485d6a3a54SDapeng Mi 	{"fixed 0", MSR_CORE_PERF_FIXED_CTR0, 10*N, 10.2*N},
495d6a3a54SDapeng Mi 	{"fixed 1", MSR_CORE_PERF_FIXED_CTR0 + 1, 1*N, 30*N},
505d6a3a54SDapeng Mi 	{"fixed 2", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N}
51a9f8b16fSGleb Natapov };
52a9f8b16fSGleb Natapov 
53a9f8b16fSGleb Natapov char *buf;
54a9f8b16fSGleb Natapov 
557c648ce2SLike Xu static struct pmu_event *gp_events;
567c648ce2SLike Xu static unsigned int gp_events_size;
579c07c92bSDapeng Mi static unsigned int fixed_counters_num;
587c648ce2SLike Xu 
597db17e21SThomas Huth static inline void loop(void)
60a9f8b16fSGleb Natapov {
61a9f8b16fSGleb Natapov 	unsigned long tmp, tmp2, tmp3;
62a9f8b16fSGleb Natapov 
63a9f8b16fSGleb Natapov 	asm volatile("1: mov (%1), %2; add $64, %1; nop; nop; nop; nop; nop; nop; nop; loop 1b"
64a9f8b16fSGleb Natapov 			: "=c"(tmp), "=r"(tmp2), "=r"(tmp3): "0"(N), "1"(buf));
65a9f8b16fSGleb Natapov 
66a9f8b16fSGleb Natapov }
67a9f8b16fSGleb Natapov 
68a9f8b16fSGleb Natapov volatile uint64_t irq_received;
69a9f8b16fSGleb Natapov 
70a9f8b16fSGleb Natapov static void cnt_overflow(isr_regs_t *regs)
71a9f8b16fSGleb Natapov {
72a9f8b16fSGleb Natapov 	irq_received++;
73c595c361SMingwei Zhang 	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
74a9f8b16fSGleb Natapov 	apic_write(APIC_EOI, 0);
75a9f8b16fSGleb Natapov }
76a9f8b16fSGleb Natapov 
77a9f8b16fSGleb Natapov static bool check_irq(void)
78a9f8b16fSGleb Natapov {
79a9f8b16fSGleb Natapov 	int i;
80a9f8b16fSGleb Natapov 	irq_received = 0;
81787f0aebSMaxim Levitsky 	sti();
82a9f8b16fSGleb Natapov 	for (i = 0; i < 100000 && !irq_received; i++)
83a9f8b16fSGleb Natapov 		asm volatile("pause");
84787f0aebSMaxim Levitsky 	cli();
85a9f8b16fSGleb Natapov 	return irq_received;
86a9f8b16fSGleb Natapov }
87a9f8b16fSGleb Natapov 
88a9f8b16fSGleb Natapov static bool is_gp(pmu_counter_t *evt)
89a9f8b16fSGleb Natapov {
90b883751aSLike Xu 	if (!pmu.is_intel)
91b883751aSLike Xu 		return true;
92b883751aSLike Xu 
9322f2901aSLike Xu 	return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 ||
9422f2901aSLike Xu 		evt->ctr >= MSR_IA32_PMC0;
95a9f8b16fSGleb Natapov }
96a9f8b16fSGleb Natapov 
97a9f8b16fSGleb Natapov static int event_to_global_idx(pmu_counter_t *cnt)
98a9f8b16fSGleb Natapov {
99b883751aSLike Xu 	if (pmu.is_intel)
100cda64e80SLike Xu 		return cnt->ctr - (is_gp(cnt) ? pmu.msr_gp_counter_base :
101a9f8b16fSGleb Natapov 			(MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX));
102b883751aSLike Xu 
103b883751aSLike Xu 	if (pmu.msr_gp_counter_base == MSR_F15H_PERF_CTR0)
104b883751aSLike Xu 		return (cnt->ctr - pmu.msr_gp_counter_base) / 2;
105b883751aSLike Xu 	else
106b883751aSLike Xu 		return cnt->ctr - pmu.msr_gp_counter_base;
107a9f8b16fSGleb Natapov }
108a9f8b16fSGleb Natapov 
109a9f8b16fSGleb Natapov static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
110a9f8b16fSGleb Natapov {
111a9f8b16fSGleb Natapov 	if (is_gp(cnt)) {
112a9f8b16fSGleb Natapov 		int i;
113a9f8b16fSGleb Natapov 
1147c648ce2SLike Xu 		for (i = 0; i < gp_events_size; i++)
115a9f8b16fSGleb Natapov 			if (gp_events[i].unit_sel == (cnt->config & 0xffff))
116a9f8b16fSGleb Natapov 				return &gp_events[i];
1179c07c92bSDapeng Mi 	} else {
1189c07c92bSDapeng Mi 		unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0;
1199c07c92bSDapeng Mi 
1209c07c92bSDapeng Mi 		if (idx < ARRAY_SIZE(fixed_events))
1219c07c92bSDapeng Mi 			return &fixed_events[idx];
1229c07c92bSDapeng Mi 	}
123a9f8b16fSGleb Natapov 
124a9f8b16fSGleb Natapov 	return (void*)0;
125a9f8b16fSGleb Natapov }
126a9f8b16fSGleb Natapov 
127a9f8b16fSGleb Natapov static void global_enable(pmu_counter_t *cnt)
128a9f8b16fSGleb Natapov {
12962ba5036SLike Xu 	if (!this_cpu_has_perf_global_ctrl())
13062ba5036SLike Xu 		return;
13162ba5036SLike Xu 
132a9f8b16fSGleb Natapov 	cnt->idx = event_to_global_idx(cnt);
1338a2866d1SLike Xu 	wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) | BIT_ULL(cnt->idx));
134a9f8b16fSGleb Natapov }
135a9f8b16fSGleb Natapov 
136a9f8b16fSGleb Natapov static void global_disable(pmu_counter_t *cnt)
137a9f8b16fSGleb Natapov {
13862ba5036SLike Xu 	if (!this_cpu_has_perf_global_ctrl())
13962ba5036SLike Xu 		return;
14062ba5036SLike Xu 
1418a2866d1SLike Xu 	wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) & ~BIT_ULL(cnt->idx));
142a9f8b16fSGleb Natapov }
143a9f8b16fSGleb Natapov 
144e9e7577bSLike Xu static void __start_event(pmu_counter_t *evt, uint64_t count)
145a9f8b16fSGleb Natapov {
146e9e7577bSLike Xu     evt->count = count;
147a9f8b16fSGleb Natapov     wrmsr(evt->ctr, evt->count);
148cda64e80SLike Xu     if (is_gp(evt)) {
149cda64e80SLike Xu 	    wrmsr(MSR_GP_EVENT_SELECTx(event_to_global_idx(evt)),
150a9f8b16fSGleb Natapov 		  evt->config | EVNTSEL_EN);
151cda64e80SLike Xu     } else {
152a9f8b16fSGleb Natapov 	    uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL);
153a9f8b16fSGleb Natapov 	    int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4;
154a9f8b16fSGleb Natapov 	    uint32_t usrospmi = 0;
155a9f8b16fSGleb Natapov 
156a9f8b16fSGleb Natapov 	    if (evt->config & EVNTSEL_OS)
157a9f8b16fSGleb Natapov 		    usrospmi |= (1 << 0);
158a9f8b16fSGleb Natapov 	    if (evt->config & EVNTSEL_USR)
159a9f8b16fSGleb Natapov 		    usrospmi |= (1 << 1);
160a9f8b16fSGleb Natapov 	    if (evt->config & EVNTSEL_INT)
161a9f8b16fSGleb Natapov 		    usrospmi |= (1 << 3); // PMI on overflow
162a9f8b16fSGleb Natapov 	    ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift);
163a9f8b16fSGleb Natapov 	    wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl);
164a9f8b16fSGleb Natapov     }
165a9f8b16fSGleb Natapov     global_enable(evt);
1665a2cb3e6SLike Xu     apic_write(APIC_LVTPC, PMI_VECTOR);
167a9f8b16fSGleb Natapov }
168a9f8b16fSGleb Natapov 
169e9e7577bSLike Xu static void start_event(pmu_counter_t *evt)
170e9e7577bSLike Xu {
171e9e7577bSLike Xu 	__start_event(evt, 0);
172e9e7577bSLike Xu }
173e9e7577bSLike Xu 
174a9f8b16fSGleb Natapov static void stop_event(pmu_counter_t *evt)
175a9f8b16fSGleb Natapov {
176a9f8b16fSGleb Natapov 	global_disable(evt);
177cda64e80SLike Xu 	if (is_gp(evt)) {
178cda64e80SLike Xu 		wrmsr(MSR_GP_EVENT_SELECTx(event_to_global_idx(evt)),
179a9f8b16fSGleb Natapov 		      evt->config & ~EVNTSEL_EN);
180cda64e80SLike Xu 	} else {
181a9f8b16fSGleb Natapov 		uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL);
182a9f8b16fSGleb Natapov 		int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4;
183a9f8b16fSGleb Natapov 		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift));
184a9f8b16fSGleb Natapov 	}
185a9f8b16fSGleb Natapov 	evt->count = rdmsr(evt->ctr);
186a9f8b16fSGleb Natapov }
187a9f8b16fSGleb Natapov 
1888554261fSLike Xu static noinline void measure_many(pmu_counter_t *evt, int count)
189a9f8b16fSGleb Natapov {
190a9f8b16fSGleb Natapov 	int i;
191a9f8b16fSGleb Natapov 	for (i = 0; i < count; i++)
192a9f8b16fSGleb Natapov 		start_event(&evt[i]);
193a9f8b16fSGleb Natapov 	loop();
194a9f8b16fSGleb Natapov 	for (i = 0; i < count; i++)
195a9f8b16fSGleb Natapov 		stop_event(&evt[i]);
196a9f8b16fSGleb Natapov }
197a9f8b16fSGleb Natapov 
1988554261fSLike Xu static void measure_one(pmu_counter_t *evt)
1998554261fSLike Xu {
2008554261fSLike Xu 	measure_many(evt, 1);
2018554261fSLike Xu }
2028554261fSLike Xu 
203e9e7577bSLike Xu static noinline void __measure(pmu_counter_t *evt, uint64_t count)
204e9e7577bSLike Xu {
205e9e7577bSLike Xu 	__start_event(evt, count);
206e9e7577bSLike Xu 	loop();
207e9e7577bSLike Xu 	stop_event(evt);
208e9e7577bSLike Xu }
209e9e7577bSLike Xu 
210a9f8b16fSGleb Natapov static bool verify_event(uint64_t count, struct pmu_event *e)
211a9f8b16fSGleb Natapov {
2129c07c92bSDapeng Mi 	bool pass;
213d24d3381SDapeng Mi 
2149c07c92bSDapeng Mi 	if (!e)
2159c07c92bSDapeng Mi 		return false;
2169c07c92bSDapeng Mi 
2179c07c92bSDapeng Mi 	pass = count >= e->min && count <= e->max;
218d24d3381SDapeng Mi 	if (!pass)
219d24d3381SDapeng Mi 		printf("FAIL: %d <= %"PRId64" <= %d\n", e->min, count, e->max);
220d24d3381SDapeng Mi 
221d24d3381SDapeng Mi 	return pass;
222a9f8b16fSGleb Natapov }
223a9f8b16fSGleb Natapov 
224a9f8b16fSGleb Natapov static bool verify_counter(pmu_counter_t *cnt)
225a9f8b16fSGleb Natapov {
226a9f8b16fSGleb Natapov 	return verify_event(cnt->count, get_counter_event(cnt));
227a9f8b16fSGleb Natapov }
228a9f8b16fSGleb Natapov 
229a9f8b16fSGleb Natapov static void check_gp_counter(struct pmu_event *evt)
230a9f8b16fSGleb Natapov {
231a9f8b16fSGleb Natapov 	pmu_counter_t cnt = {
232a9f8b16fSGleb Natapov 		.config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel,
233a9f8b16fSGleb Natapov 	};
234a9f8b16fSGleb Natapov 	int i;
235a9f8b16fSGleb Natapov 
236cda64e80SLike Xu 	for (i = 0; i < pmu.nr_gp_counters; i++) {
237cda64e80SLike Xu 		cnt.ctr = MSR_GP_COUNTERx(i);
2388554261fSLike Xu 		measure_one(&cnt);
239a299895bSThomas Huth 		report(verify_event(cnt.count, evt), "%s-%d", evt->name, i);
240a9f8b16fSGleb Natapov 	}
241a9f8b16fSGleb Natapov }
242a9f8b16fSGleb Natapov 
243a9f8b16fSGleb Natapov static void check_gp_counters(void)
244a9f8b16fSGleb Natapov {
245a9f8b16fSGleb Natapov 	int i;
246a9f8b16fSGleb Natapov 
2477c648ce2SLike Xu 	for (i = 0; i < gp_events_size; i++)
2482719b92cSYang Weijiang 		if (pmu_gp_counter_is_available(i))
249a9f8b16fSGleb Natapov 			check_gp_counter(&gp_events[i]);
250a9f8b16fSGleb Natapov 		else
251a9f8b16fSGleb Natapov 			printf("GP event '%s' is disabled\n",
252a9f8b16fSGleb Natapov 					gp_events[i].name);
253a9f8b16fSGleb Natapov }
254a9f8b16fSGleb Natapov 
255a9f8b16fSGleb Natapov static void check_fixed_counters(void)
256a9f8b16fSGleb Natapov {
257a9f8b16fSGleb Natapov 	pmu_counter_t cnt = {
258a9f8b16fSGleb Natapov 		.config = EVNTSEL_OS | EVNTSEL_USR,
259a9f8b16fSGleb Natapov 	};
260a9f8b16fSGleb Natapov 	int i;
261a9f8b16fSGleb Natapov 
2629c07c92bSDapeng Mi 	for (i = 0; i < fixed_counters_num; i++) {
263a9f8b16fSGleb Natapov 		cnt.ctr = fixed_events[i].unit_sel;
2648554261fSLike Xu 		measure_one(&cnt);
2652719b92cSYang Weijiang 		report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i);
266a9f8b16fSGleb Natapov 	}
267a9f8b16fSGleb Natapov }
268a9f8b16fSGleb Natapov 
269a9f8b16fSGleb Natapov static void check_counters_many(void)
270a9f8b16fSGleb Natapov {
271f21c809eSDapeng Mi 	pmu_counter_t cnt[48];
272a9f8b16fSGleb Natapov 	int i, n;
273a9f8b16fSGleb Natapov 
274414ee7d1SSean Christopherson 	for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
2752719b92cSYang Weijiang 		if (!pmu_gp_counter_is_available(i))
276a9f8b16fSGleb Natapov 			continue;
277a9f8b16fSGleb Natapov 
278cda64e80SLike Xu 		cnt[n].ctr = MSR_GP_COUNTERx(n);
2794ac45293SWei Huang 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR |
2807c648ce2SLike Xu 			gp_events[i % gp_events_size].unit_sel;
281a9f8b16fSGleb Natapov 		n++;
282a9f8b16fSGleb Natapov 	}
2839c07c92bSDapeng Mi 	for (i = 0; i < fixed_counters_num; i++) {
284a9f8b16fSGleb Natapov 		cnt[n].ctr = fixed_events[i].unit_sel;
285a9f8b16fSGleb Natapov 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR;
286a9f8b16fSGleb Natapov 		n++;
287a9f8b16fSGleb Natapov 	}
288a9f8b16fSGleb Natapov 
289f21c809eSDapeng Mi 	assert(n <= ARRAY_SIZE(cnt));
2908554261fSLike Xu 	measure_many(cnt, n);
291a9f8b16fSGleb Natapov 
292a9f8b16fSGleb Natapov 	for (i = 0; i < n; i++)
293a9f8b16fSGleb Natapov 		if (!verify_counter(&cnt[i]))
294a9f8b16fSGleb Natapov 			break;
295a9f8b16fSGleb Natapov 
296a299895bSThomas Huth 	report(i == n, "all counters");
297a9f8b16fSGleb Natapov }
298a9f8b16fSGleb Natapov 
2997ec3b67aSLike Xu static uint64_t measure_for_overflow(pmu_counter_t *cnt)
3007ec3b67aSLike Xu {
3017ec3b67aSLike Xu 	__measure(cnt, 0);
3027ec3b67aSLike Xu 	/*
3037ec3b67aSLike Xu 	 * To generate overflow, i.e. roll over to '0', the initial count just
3047ec3b67aSLike Xu 	 * needs to be preset to the negative expected count.  However, as per
3057ec3b67aSLike Xu 	 * Intel's SDM, the preset count needs to be incremented by 1 to ensure
3067ec3b67aSLike Xu 	 * the overflow interrupt is generated immediately instead of possibly
3077ec3b67aSLike Xu 	 * waiting for the overflow to propagate through the counter.
3087ec3b67aSLike Xu 	 */
3097ec3b67aSLike Xu 	assert(cnt->count > 1);
3107ec3b67aSLike Xu 	return 1 - cnt->count;
3117ec3b67aSLike Xu }
3127ec3b67aSLike Xu 
313a9f8b16fSGleb Natapov static void check_counter_overflow(void)
314a9f8b16fSGleb Natapov {
3157ec3b67aSLike Xu 	uint64_t overflow_preset;
316a9f8b16fSGleb Natapov 	int i;
317a9f8b16fSGleb Natapov 	pmu_counter_t cnt = {
318cda64e80SLike Xu 		.ctr = MSR_GP_COUNTERx(0),
319a9f8b16fSGleb Natapov 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
320a9f8b16fSGleb Natapov 	};
3217ec3b67aSLike Xu 	overflow_preset = measure_for_overflow(&cnt);
322a9f8b16fSGleb Natapov 
323a9f8b16fSGleb Natapov 	/* clear status before test */
32462ba5036SLike Xu 	if (this_cpu_has_perf_global_status())
3258a2866d1SLike Xu 		pmu_clear_global_status();
326a9f8b16fSGleb Natapov 
3275bba1769SAndrew Jones 	report_prefix_push("overflow");
3285bba1769SAndrew Jones 
329cda64e80SLike Xu 	for (i = 0; i < pmu.nr_gp_counters + 1; i++) {
330a9f8b16fSGleb Natapov 		uint64_t status;
331a9f8b16fSGleb Natapov 		int idx;
33233cfc1b0SNadav Amit 
3337ec3b67aSLike Xu 		cnt.count = overflow_preset;
334cda64e80SLike Xu 		if (pmu_use_full_writes())
335414ee7d1SSean Christopherson 			cnt.count &= (1ull << pmu.gp_counter_width) - 1;
33633cfc1b0SNadav Amit 
337414ee7d1SSean Christopherson 		if (i == pmu.nr_gp_counters) {
338b883751aSLike Xu 			if (!pmu.is_intel)
339b883751aSLike Xu 				break;
340b883751aSLike Xu 
341a9f8b16fSGleb Natapov 			cnt.ctr = fixed_events[0].unit_sel;
3427ec3b67aSLike Xu 			cnt.count = measure_for_overflow(&cnt);
343cda64e80SLike Xu 			cnt.count &= (1ull << pmu.gp_counter_width) - 1;
344cda64e80SLike Xu 		} else {
345cda64e80SLike Xu 			cnt.ctr = MSR_GP_COUNTERx(i);
34633cfc1b0SNadav Amit 		}
34733cfc1b0SNadav Amit 
348a9f8b16fSGleb Natapov 		if (i % 2)
349a9f8b16fSGleb Natapov 			cnt.config |= EVNTSEL_INT;
350a9f8b16fSGleb Natapov 		else
351a9f8b16fSGleb Natapov 			cnt.config &= ~EVNTSEL_INT;
352a9f8b16fSGleb Natapov 		idx = event_to_global_idx(&cnt);
353e9e7577bSLike Xu 		__measure(&cnt, cnt.count);
354b883751aSLike Xu 		if (pmu.is_intel)
355a299895bSThomas Huth 			report(cnt.count == 1, "cntr-%d", i);
356b883751aSLike Xu 		else
357b883751aSLike Xu 			report(cnt.count == 0xffffffffffff || cnt.count < 7, "cntr-%d", i);
35862ba5036SLike Xu 
35962ba5036SLike Xu 		if (!this_cpu_has_perf_global_status())
36062ba5036SLike Xu 			continue;
36162ba5036SLike Xu 
3628a2866d1SLike Xu 		status = rdmsr(pmu.msr_global_status);
363a299895bSThomas Huth 		report(status & (1ull << idx), "status-%d", i);
3648a2866d1SLike Xu 		wrmsr(pmu.msr_global_status_clr, status);
3658a2866d1SLike Xu 		status = rdmsr(pmu.msr_global_status);
366a299895bSThomas Huth 		report(!(status & (1ull << idx)), "status clear-%d", i);
367a299895bSThomas Huth 		report(check_irq() == (i % 2), "irq-%d", i);
368a9f8b16fSGleb Natapov 	}
3695bba1769SAndrew Jones 
3705bba1769SAndrew Jones 	report_prefix_pop();
371a9f8b16fSGleb Natapov }
372a9f8b16fSGleb Natapov 
373a9f8b16fSGleb Natapov static void check_gp_counter_cmask(void)
374a9f8b16fSGleb Natapov {
375a9f8b16fSGleb Natapov 	pmu_counter_t cnt = {
376cda64e80SLike Xu 		.ctr = MSR_GP_COUNTERx(0),
377a9f8b16fSGleb Natapov 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
378a9f8b16fSGleb Natapov 	};
379a9f8b16fSGleb Natapov 	cnt.config |= (0x2 << EVNTSEL_CMASK_SHIFT);
3808554261fSLike Xu 	measure_one(&cnt);
381a299895bSThomas Huth 	report(cnt.count < gp_events[1].min, "cmask");
382a9f8b16fSGleb Natapov }
383a9f8b16fSGleb Natapov 
384ca1b9de9SNadav Amit static void do_rdpmc_fast(void *ptr)
385ca1b9de9SNadav Amit {
386ca1b9de9SNadav Amit 	pmu_counter_t *cnt = ptr;
387ca1b9de9SNadav Amit 	uint32_t idx = (uint32_t)cnt->idx | (1u << 31);
388ca1b9de9SNadav Amit 
389ca1b9de9SNadav Amit 	if (!is_gp(cnt))
390ca1b9de9SNadav Amit 		idx |= 1 << 30;
391ca1b9de9SNadav Amit 
392ca1b9de9SNadav Amit 	cnt->count = rdpmc(idx);
393ca1b9de9SNadav Amit }
394ca1b9de9SNadav Amit 
395ca1b9de9SNadav Amit 
396a9f8b16fSGleb Natapov static void check_rdpmc(void)
397a9f8b16fSGleb Natapov {
39822f2901aSLike Xu 	uint64_t val = 0xff0123456789ull;
399ca1b9de9SNadav Amit 	bool exc;
400a9f8b16fSGleb Natapov 	int i;
401a9f8b16fSGleb Natapov 
4025bba1769SAndrew Jones 	report_prefix_push("rdpmc");
4035bba1769SAndrew Jones 
404414ee7d1SSean Christopherson 	for (i = 0; i < pmu.nr_gp_counters; i++) {
40533cfc1b0SNadav Amit 		uint64_t x;
406ca1b9de9SNadav Amit 		pmu_counter_t cnt = {
407cda64e80SLike Xu 			.ctr = MSR_GP_COUNTERx(i),
408ca1b9de9SNadav Amit 			.idx = i
409ca1b9de9SNadav Amit 		};
41033cfc1b0SNadav Amit 
41133cfc1b0SNadav Amit 	        /*
41222f2901aSLike Xu 	         * Without full-width writes, only the low 32 bits are writable,
41322f2901aSLike Xu 	         * and the value is sign-extended.
41433cfc1b0SNadav Amit 	         */
415cda64e80SLike Xu 		if (pmu.msr_gp_counter_base == MSR_IA32_PERFCTR0)
41633cfc1b0SNadav Amit 			x = (uint64_t)(int64_t)(int32_t)val;
41722f2901aSLike Xu 		else
41822f2901aSLike Xu 			x = (uint64_t)(int64_t)val;
41933cfc1b0SNadav Amit 
42033cfc1b0SNadav Amit 		/* Mask according to the number of supported bits */
421414ee7d1SSean Christopherson 		x &= (1ull << pmu.gp_counter_width) - 1;
42233cfc1b0SNadav Amit 
423cda64e80SLike Xu 		wrmsr(MSR_GP_COUNTERx(i), val);
424a299895bSThomas Huth 		report(rdpmc(i) == x, "cntr-%d", i);
425ca1b9de9SNadav Amit 
426ca1b9de9SNadav Amit 		exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt);
427ca1b9de9SNadav Amit 		if (exc)
428ca1b9de9SNadav Amit 			report_skip("fast-%d", i);
429ca1b9de9SNadav Amit 		else
430a299895bSThomas Huth 			report(cnt.count == (u32)val, "fast-%d", i);
431a9f8b16fSGleb Natapov 	}
4329c07c92bSDapeng Mi 	for (i = 0; i < fixed_counters_num; i++) {
433414ee7d1SSean Christopherson 		uint64_t x = val & ((1ull << pmu.fixed_counter_width) - 1);
434ca1b9de9SNadav Amit 		pmu_counter_t cnt = {
435ca1b9de9SNadav Amit 			.ctr = MSR_CORE_PERF_FIXED_CTR0 + i,
436ca1b9de9SNadav Amit 			.idx = i
437ca1b9de9SNadav Amit 		};
43833cfc1b0SNadav Amit 
4393f914933SLike Xu 		wrmsr(MSR_PERF_FIXED_CTRx(i), x);
440a299895bSThomas Huth 		report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i);
441ca1b9de9SNadav Amit 
442ca1b9de9SNadav Amit 		exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt);
443ca1b9de9SNadav Amit 		if (exc)
444ca1b9de9SNadav Amit 			report_skip("fixed fast-%d", i);
445ca1b9de9SNadav Amit 		else
446a299895bSThomas Huth 			report(cnt.count == (u32)x, "fixed fast-%d", i);
447a9f8b16fSGleb Natapov 	}
4485bba1769SAndrew Jones 
4495bba1769SAndrew Jones 	report_prefix_pop();
450a9f8b16fSGleb Natapov }
451a9f8b16fSGleb Natapov 
452ddade902SEric Hankland static void check_running_counter_wrmsr(void)
453ddade902SEric Hankland {
45459ca1413SEric Hankland 	uint64_t status;
45522f2901aSLike Xu 	uint64_t count;
456ddade902SEric Hankland 	pmu_counter_t evt = {
457cda64e80SLike Xu 		.ctr = MSR_GP_COUNTERx(0),
458ddade902SEric Hankland 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
459ddade902SEric Hankland 	};
460ddade902SEric Hankland 
46159ca1413SEric Hankland 	report_prefix_push("running counter wrmsr");
46259ca1413SEric Hankland 
463ddade902SEric Hankland 	start_event(&evt);
464ddade902SEric Hankland 	loop();
465cda64e80SLike Xu 	wrmsr(MSR_GP_COUNTERx(0), 0);
466ddade902SEric Hankland 	stop_event(&evt);
46759ca1413SEric Hankland 	report(evt.count < gp_events[1].min, "cntr");
46859ca1413SEric Hankland 
46959ca1413SEric Hankland 	/* clear status before overflow test */
47062ba5036SLike Xu 	if (this_cpu_has_perf_global_status())
4718a2866d1SLike Xu 		pmu_clear_global_status();
47259ca1413SEric Hankland 
47359ca1413SEric Hankland 	start_event(&evt);
47422f2901aSLike Xu 
47522f2901aSLike Xu 	count = -1;
476cda64e80SLike Xu 	if (pmu_use_full_writes())
477414ee7d1SSean Christopherson 		count &= (1ull << pmu.gp_counter_width) - 1;
47822f2901aSLike Xu 
479cda64e80SLike Xu 	wrmsr(MSR_GP_COUNTERx(0), count);
48022f2901aSLike Xu 
48159ca1413SEric Hankland 	loop();
48259ca1413SEric Hankland 	stop_event(&evt);
48362ba5036SLike Xu 
48462ba5036SLike Xu 	if (this_cpu_has_perf_global_status()) {
4858a2866d1SLike Xu 		status = rdmsr(pmu.msr_global_status);
4868a2866d1SLike Xu 		report(status & 1, "status msr bit");
48762ba5036SLike Xu 	}
48859ca1413SEric Hankland 
48959ca1413SEric Hankland 	report_prefix_pop();
490ddade902SEric Hankland }
491ddade902SEric Hankland 
49220cf9147SJim Mattson static void check_emulated_instr(void)
49320cf9147SJim Mattson {
49420cf9147SJim Mattson 	uint64_t status, instr_start, brnch_start;
4958b547cc2SLike Xu 	uint64_t gp_counter_width = (1ull << pmu.gp_counter_width) - 1;
496b883751aSLike Xu 	unsigned int branch_idx = pmu.is_intel ? 5 : 2;
49720cf9147SJim Mattson 	pmu_counter_t brnch_cnt = {
498cda64e80SLike Xu 		.ctr = MSR_GP_COUNTERx(0),
49920cf9147SJim Mattson 		/* branch instructions */
500b883751aSLike Xu 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[branch_idx].unit_sel,
50120cf9147SJim Mattson 	};
50220cf9147SJim Mattson 	pmu_counter_t instr_cnt = {
503cda64e80SLike Xu 		.ctr = MSR_GP_COUNTERx(1),
50420cf9147SJim Mattson 		/* instructions */
50520cf9147SJim Mattson 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
50620cf9147SJim Mattson 	};
50720cf9147SJim Mattson 	report_prefix_push("emulated instruction");
50820cf9147SJim Mattson 
50962ba5036SLike Xu 	if (this_cpu_has_perf_global_status())
5108a2866d1SLike Xu 		pmu_clear_global_status();
51120cf9147SJim Mattson 
51220cf9147SJim Mattson 	start_event(&brnch_cnt);
51320cf9147SJim Mattson 	start_event(&instr_cnt);
51420cf9147SJim Mattson 
51520cf9147SJim Mattson 	brnch_start = -EXPECTED_BRNCH;
51620cf9147SJim Mattson 	instr_start = -EXPECTED_INSTR;
5178b547cc2SLike Xu 	wrmsr(MSR_GP_COUNTERx(0), brnch_start & gp_counter_width);
5188b547cc2SLike Xu 	wrmsr(MSR_GP_COUNTERx(1), instr_start & gp_counter_width);
51920cf9147SJim Mattson 	// KVM_FEP is a magic prefix that forces emulation so
52020cf9147SJim Mattson 	// 'KVM_FEP "jne label\n"' just counts as a single instruction.
52120cf9147SJim Mattson 	asm volatile(
52220cf9147SJim Mattson 		"mov $0x0, %%eax\n"
52320cf9147SJim Mattson 		"cmp $0x0, %%eax\n"
52420cf9147SJim Mattson 		KVM_FEP "jne label\n"
52520cf9147SJim Mattson 		KVM_FEP "jne label\n"
52620cf9147SJim Mattson 		KVM_FEP "jne label\n"
52720cf9147SJim Mattson 		KVM_FEP "jne label\n"
52820cf9147SJim Mattson 		KVM_FEP "jne label\n"
52920cf9147SJim Mattson 		"mov $0xa, %%eax\n"
53020cf9147SJim Mattson 		"cpuid\n"
53120cf9147SJim Mattson 		"mov $0xa, %%eax\n"
53220cf9147SJim Mattson 		"cpuid\n"
53320cf9147SJim Mattson 		"mov $0xa, %%eax\n"
53420cf9147SJim Mattson 		"cpuid\n"
53520cf9147SJim Mattson 		"mov $0xa, %%eax\n"
53620cf9147SJim Mattson 		"cpuid\n"
53720cf9147SJim Mattson 		"mov $0xa, %%eax\n"
53820cf9147SJim Mattson 		"cpuid\n"
53920cf9147SJim Mattson 		"label:\n"
54020cf9147SJim Mattson 		:
54120cf9147SJim Mattson 		:
54220cf9147SJim Mattson 		: "eax", "ebx", "ecx", "edx");
54320cf9147SJim Mattson 
54462ba5036SLike Xu 	if (this_cpu_has_perf_global_ctrl())
5458a2866d1SLike Xu 		wrmsr(pmu.msr_global_ctl, 0);
54620cf9147SJim Mattson 
54720cf9147SJim Mattson 	stop_event(&brnch_cnt);
54820cf9147SJim Mattson 	stop_event(&instr_cnt);
54920cf9147SJim Mattson 
55020cf9147SJim Mattson 	// Check that the end count - start count is at least the expected
55120cf9147SJim Mattson 	// number of instructions and branches.
55220cf9147SJim Mattson 	report(instr_cnt.count - instr_start >= EXPECTED_INSTR,
55320cf9147SJim Mattson 	       "instruction count");
55420cf9147SJim Mattson 	report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH,
55520cf9147SJim Mattson 	       "branch count");
55662ba5036SLike Xu 	if (this_cpu_has_perf_global_status()) {
55720cf9147SJim Mattson 		// Additionally check that those counters overflowed properly.
5588a2866d1SLike Xu 		status = rdmsr(pmu.msr_global_status);
5594070b9c6SLike Xu 		report(status & 1, "branch counter overflow");
5604070b9c6SLike Xu 		report(status & 2, "instruction counter overflow");
56162ba5036SLike Xu 	}
56220cf9147SJim Mattson 
56320cf9147SJim Mattson 	report_prefix_pop();
56420cf9147SJim Mattson }
56520cf9147SJim Mattson 
566006b089dSLike Xu #define XBEGIN_STARTED (~0u)
567006b089dSLike Xu static void check_tsx_cycles(void)
568006b089dSLike Xu {
569006b089dSLike Xu 	pmu_counter_t cnt;
570006b089dSLike Xu 	unsigned int i, ret = 0;
571006b089dSLike Xu 
572006b089dSLike Xu 	if (!this_cpu_has(X86_FEATURE_RTM))
573006b089dSLike Xu 		return;
574006b089dSLike Xu 
575006b089dSLike Xu 	report_prefix_push("TSX cycles");
576006b089dSLike Xu 
577006b089dSLike Xu 	for (i = 0; i < pmu.nr_gp_counters; i++) {
578006b089dSLike Xu 		cnt.ctr = MSR_GP_COUNTERx(i);
579006b089dSLike Xu 
580006b089dSLike Xu 		if (i == 2) {
581d4ae0a71SThomas Huth 			/* Transactional cycles committed only on gp counter 2 */
582006b089dSLike Xu 			cnt.config = EVNTSEL_OS | EVNTSEL_USR | 0x30000003c;
583006b089dSLike Xu 		} else {
584006b089dSLike Xu 			/* Transactional cycles */
585006b089dSLike Xu 			cnt.config = EVNTSEL_OS | EVNTSEL_USR | 0x10000003c;
586006b089dSLike Xu 		}
587006b089dSLike Xu 
588006b089dSLike Xu 		start_event(&cnt);
589006b089dSLike Xu 
590006b089dSLike Xu 		asm volatile("xbegin 1f\n\t"
591006b089dSLike Xu 				"1:\n\t"
592006b089dSLike Xu 				: "+a" (ret) :: "memory");
593006b089dSLike Xu 
594006b089dSLike Xu 		/* Generate a non-canonical #GP to trigger ABORT. */
595006b089dSLike Xu 		if (ret == XBEGIN_STARTED)
596006b089dSLike Xu 			*(int *)NONCANONICAL = 0;
597006b089dSLike Xu 
598006b089dSLike Xu 		stop_event(&cnt);
599006b089dSLike Xu 
600006b089dSLike Xu 		report(cnt.count > 0, "gp cntr-%d with a value of %" PRId64 "", i, cnt.count);
601006b089dSLike Xu 	}
602006b089dSLike Xu 
603006b089dSLike Xu 	report_prefix_pop();
604006b089dSLike Xu }
605006b089dSLike Xu 
606*f2a56148SDapeng Mi static void warm_up(void)
607*f2a56148SDapeng Mi {
608*f2a56148SDapeng Mi 	int i;
609*f2a56148SDapeng Mi 
610*f2a56148SDapeng Mi 	/*
611*f2a56148SDapeng Mi 	 * Since cycles event is always run as the first event, there would be
612*f2a56148SDapeng Mi 	 * a warm-up state to warm up the cache, it leads to the measured cycles
613*f2a56148SDapeng Mi 	 * value may exceed the pre-defined cycles upper boundary and cause
614*f2a56148SDapeng Mi 	 * false positive. To avoid this, introduce an warm-up state before
615*f2a56148SDapeng Mi 	 * the real verification.
616*f2a56148SDapeng Mi 	 */
617*f2a56148SDapeng Mi 	for (i = 0; i < 10; i++)
618*f2a56148SDapeng Mi 		loop();
619*f2a56148SDapeng Mi }
620*f2a56148SDapeng Mi 
62122f2901aSLike Xu static void check_counters(void)
62222f2901aSLike Xu {
62300dca75cSLike Xu 	if (is_fep_available())
62400dca75cSLike Xu 		check_emulated_instr();
62500dca75cSLike Xu 
626*f2a56148SDapeng Mi 	warm_up();
62722f2901aSLike Xu 	check_gp_counters();
62822f2901aSLike Xu 	check_fixed_counters();
62922f2901aSLike Xu 	check_rdpmc();
63022f2901aSLike Xu 	check_counters_many();
63122f2901aSLike Xu 	check_counter_overflow();
63222f2901aSLike Xu 	check_gp_counter_cmask();
63322f2901aSLike Xu 	check_running_counter_wrmsr();
634006b089dSLike Xu 	check_tsx_cycles();
63522f2901aSLike Xu }
63622f2901aSLike Xu 
63722f2901aSLike Xu static void do_unsupported_width_counter_write(void *index)
63822f2901aSLike Xu {
63922f2901aSLike Xu 	wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull);
64022f2901aSLike Xu }
64122f2901aSLike Xu 
64222f2901aSLike Xu static void check_gp_counters_write_width(void)
64322f2901aSLike Xu {
64422f2901aSLike Xu 	u64 val_64 = 0xffffff0123456789ull;
6454b74c718SThomas Huth 	u64 val_32 = val_64 & ((1ull << 32) - 1);
646414ee7d1SSean Christopherson 	u64 val_max_width = val_64 & ((1ull << pmu.gp_counter_width) - 1);
64722f2901aSLike Xu 	int i;
64822f2901aSLike Xu 
64922f2901aSLike Xu 	/*
65022f2901aSLike Xu 	 * MSR_IA32_PERFCTRn supports 64-bit writes,
65122f2901aSLike Xu 	 * but only the lowest 32 bits are valid.
65222f2901aSLike Xu 	 */
653414ee7d1SSean Christopherson 	for (i = 0; i < pmu.nr_gp_counters; i++) {
65422f2901aSLike Xu 		wrmsr(MSR_IA32_PERFCTR0 + i, val_32);
65522f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
65622f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
65722f2901aSLike Xu 
65822f2901aSLike Xu 		wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width);
65922f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
66022f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
66122f2901aSLike Xu 
66222f2901aSLike Xu 		wrmsr(MSR_IA32_PERFCTR0 + i, val_64);
66322f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
66422f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
66522f2901aSLike Xu 	}
66622f2901aSLike Xu 
66722f2901aSLike Xu 	/*
6684340720eSLike Xu 	 * MSR_IA32_PMCn supports writing values up to GP counter width,
66922f2901aSLike Xu 	 * and only the lowest bits of GP counter width are valid.
67022f2901aSLike Xu 	 */
671414ee7d1SSean Christopherson 	for (i = 0; i < pmu.nr_gp_counters; i++) {
67222f2901aSLike Xu 		wrmsr(MSR_IA32_PMC0 + i, val_32);
67322f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
67422f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
67522f2901aSLike Xu 
67622f2901aSLike Xu 		wrmsr(MSR_IA32_PMC0 + i, val_max_width);
67722f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width);
67822f2901aSLike Xu 		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width);
67922f2901aSLike Xu 
68022f2901aSLike Xu 		report(test_for_exception(GP_VECTOR,
68122f2901aSLike Xu 			do_unsupported_width_counter_write, &i),
68222f2901aSLike Xu 		"writing unsupported width to MSR_IA32_PMC%d raises #GP", i);
68322f2901aSLike Xu 	}
68422f2901aSLike Xu }
68522f2901aSLike Xu 
686290f4213SJim Mattson /*
687290f4213SJim Mattson  * Per the SDM, reference cycles are currently implemented using the
688290f4213SJim Mattson  * core crystal clock, TSC, or bus clock. Calibrate to the TSC
689290f4213SJim Mattson  * frequency to set reasonable expectations.
690290f4213SJim Mattson  */
691290f4213SJim Mattson static void set_ref_cycle_expectations(void)
692290f4213SJim Mattson {
693290f4213SJim Mattson 	pmu_counter_t cnt = {
694290f4213SJim Mattson 		.ctr = MSR_IA32_PERFCTR0,
6957c648ce2SLike Xu 		.config = EVNTSEL_OS | EVNTSEL_USR | intel_gp_events[2].unit_sel,
696290f4213SJim Mattson 	};
697290f4213SJim Mattson 	uint64_t tsc_delta;
698290f4213SJim Mattson 	uint64_t t0, t1, t2, t3;
699290f4213SJim Mattson 
7002719b92cSYang Weijiang 	/* Bit 2 enumerates the availability of reference cycles events. */
701414ee7d1SSean Christopherson 	if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
702290f4213SJim Mattson 		return;
703290f4213SJim Mattson 
70462ba5036SLike Xu 	if (this_cpu_has_perf_global_ctrl())
7058a2866d1SLike Xu 		wrmsr(pmu.msr_global_ctl, 0);
706290f4213SJim Mattson 
707290f4213SJim Mattson 	t0 = fenced_rdtsc();
708290f4213SJim Mattson 	start_event(&cnt);
709290f4213SJim Mattson 	t1 = fenced_rdtsc();
710290f4213SJim Mattson 
711290f4213SJim Mattson 	/*
712290f4213SJim Mattson 	 * This loop has to run long enough to dominate the VM-exit
713290f4213SJim Mattson 	 * costs for playing with the PMU MSRs on start and stop.
714290f4213SJim Mattson 	 *
715290f4213SJim Mattson 	 * On a 2.6GHz Ice Lake, with the TSC frequency at 104 times
716290f4213SJim Mattson 	 * the core crystal clock, this function calculated a guest
717290f4213SJim Mattson 	 * TSC : ref cycles ratio of around 105 with ECX initialized
718290f4213SJim Mattson 	 * to one billion.
719290f4213SJim Mattson 	 */
720290f4213SJim Mattson 	asm volatile("loop ." : "+c"((int){1000000000ull}));
721290f4213SJim Mattson 
722290f4213SJim Mattson 	t2 = fenced_rdtsc();
723290f4213SJim Mattson 	stop_event(&cnt);
724290f4213SJim Mattson 	t3 = fenced_rdtsc();
725290f4213SJim Mattson 
726290f4213SJim Mattson 	tsc_delta = ((t2 - t1) + (t3 - t0)) / 2;
727290f4213SJim Mattson 
728290f4213SJim Mattson 	if (!tsc_delta)
729290f4213SJim Mattson 		return;
730290f4213SJim Mattson 
7317c648ce2SLike Xu 	intel_gp_events[2].min = (intel_gp_events[2].min * cnt.count) / tsc_delta;
7327c648ce2SLike Xu 	intel_gp_events[2].max = (intel_gp_events[2].max * cnt.count) / tsc_delta;
733290f4213SJim Mattson }
734290f4213SJim Mattson 
73585c21181SLike Xu static void check_invalid_rdpmc_gp(void)
73685c21181SLike Xu {
73785c21181SLike Xu 	uint64_t val;
73885c21181SLike Xu 
73985c21181SLike Xu 	report(rdpmc_safe(64, &val) == GP_VECTOR,
74085c21181SLike Xu 	       "Expected #GP on RDPMC(64)");
74185c21181SLike Xu }
74285c21181SLike Xu 
743a9f8b16fSGleb Natapov int main(int ac, char **av)
744a9f8b16fSGleb Natapov {
745a9f8b16fSGleb Natapov 	setup_vm();
7465a2cb3e6SLike Xu 	handle_irq(PMI_VECTOR, cnt_overflow);
747dcda215bSPaolo Bonzini 	buf = malloc(N*64);
748a9f8b16fSGleb Natapov 
74985c21181SLike Xu 	check_invalid_rdpmc_gp();
75085c21181SLike Xu 
751b883751aSLike Xu 	if (pmu.is_intel) {
752414ee7d1SSean Christopherson 		if (!pmu.version) {
75303041e97SLike Xu 			report_skip("No Intel Arch PMU is detected!");
75432b9603cSRadim Krčmář 			return report_summary();
755a9f8b16fSGleb Natapov 		}
7567c648ce2SLike Xu 		gp_events = (struct pmu_event *)intel_gp_events;
7577c648ce2SLike Xu 		gp_events_size = sizeof(intel_gp_events)/sizeof(intel_gp_events[0]);
758b883751aSLike Xu 		report_prefix_push("Intel");
759290f4213SJim Mattson 		set_ref_cycle_expectations();
760b883751aSLike Xu 	} else {
761b883751aSLike Xu 		gp_events_size = sizeof(amd_gp_events)/sizeof(amd_gp_events[0]);
762b883751aSLike Xu 		gp_events = (struct pmu_event *)amd_gp_events;
763b883751aSLike Xu 		report_prefix_push("AMD");
764b883751aSLike Xu 	}
765290f4213SJim Mattson 
766414ee7d1SSean Christopherson 	printf("PMU version:         %d\n", pmu.version);
767414ee7d1SSean Christopherson 	printf("GP counters:         %d\n", pmu.nr_gp_counters);
768414ee7d1SSean Christopherson 	printf("GP counter width:    %d\n", pmu.gp_counter_width);
769414ee7d1SSean Christopherson 	printf("Mask length:         %d\n", pmu.gp_counter_mask_length);
770414ee7d1SSean Christopherson 	printf("Fixed counters:      %d\n", pmu.nr_fixed_counters);
771414ee7d1SSean Christopherson 	printf("Fixed counter width: %d\n", pmu.fixed_counter_width);
7720ef1f6a8SPaolo Bonzini 
7739c07c92bSDapeng Mi 	fixed_counters_num = MIN(pmu.nr_fixed_counters, ARRAY_SIZE(fixed_events));
7749c07c92bSDapeng Mi 	if (pmu.nr_fixed_counters > ARRAY_SIZE(fixed_events))
7759c07c92bSDapeng Mi 		report_info("Fixed counters number %d > defined fixed events %u.  "
7769c07c92bSDapeng Mi 			    "Please update test case.", pmu.nr_fixed_counters,
7779c07c92bSDapeng Mi 			    (uint32_t)ARRAY_SIZE(fixed_events));
7789c07c92bSDapeng Mi 
7795a2cb3e6SLike Xu 	apic_write(APIC_LVTPC, PMI_VECTOR);
780a9f8b16fSGleb Natapov 
781afa714b2SPaolo Bonzini 	check_counters();
78220cf9147SJim Mattson 
783879e7f07SLike Xu 	if (pmu_has_full_writes()) {
784cda64e80SLike Xu 		pmu.msr_gp_counter_base = MSR_IA32_PMC0;
785cda64e80SLike Xu 
78622f2901aSLike Xu 		report_prefix_push("full-width writes");
78722f2901aSLike Xu 		check_counters();
78822f2901aSLike Xu 		check_gp_counters_write_width();
789d7714e16SLike Xu 		report_prefix_pop();
79022f2901aSLike Xu 	}
791a9f8b16fSGleb Natapov 
792b883751aSLike Xu 	if (!pmu.is_intel) {
793b883751aSLike Xu 		report_prefix_push("K7");
794b883751aSLike Xu 		pmu.nr_gp_counters = AMD64_NUM_COUNTERS;
795b883751aSLike Xu 		pmu.msr_gp_counter_base = MSR_K7_PERFCTR0;
796b883751aSLike Xu 		pmu.msr_gp_event_select_base = MSR_K7_EVNTSEL0;
797b883751aSLike Xu 		check_counters();
798b883751aSLike Xu 		report_prefix_pop();
799b883751aSLike Xu 	}
800b883751aSLike Xu 
801f3cdd159SJan Kiszka 	return report_summary();
802a9f8b16fSGleb Natapov }
803