xref: /kvm-unit-tests/x86/pmu_pebs.c (revision dfc1fec2fbde04ad607e1aed560cf7059350c70f)
1 #include "x86/msr.h"
2 #include "x86/processor.h"
3 #include "x86/pmu.h"
4 #include "x86/isr.h"
5 #include "x86/apic.h"
6 #include "x86/apic-defs.h"
7 #include "x86/desc.h"
8 #include "alloc.h"
9 
10 #include "vm.h"
11 #include "processor.h"
12 #include "vmalloc.h"
13 #include "alloc_page.h"
14 
15 /* bits [63:48] provides the size of the current record in bytes */
16 #define	RECORD_SIZE_OFFSET	48
17 
18 static unsigned int max_nr_gp_events;
19 static unsigned long *ds_bufer;
20 static unsigned long *pebs_buffer;
21 static u64 ctr_start_val;
22 static bool has_baseline;
23 
24 struct debug_store {
25 	u64	bts_buffer_base;
26 	u64	bts_index;
27 	u64	bts_absolute_maximum;
28 	u64	bts_interrupt_threshold;
29 	u64	pebs_buffer_base;
30 	u64	pebs_index;
31 	u64	pebs_absolute_maximum;
32 	u64	pebs_interrupt_threshold;
33 	u64	pebs_event_reset[64];
34 };
35 
36 struct pebs_basic {
37 	u64 format_size;
38 	u64 ip;
39 	u64 applicable_counters;
40 	u64 tsc;
41 };
42 
43 struct pebs_meminfo {
44 	u64 address;
45 	u64 aux;
46 	u64 latency;
47 	u64 tsx_tuning;
48 };
49 
50 struct pebs_gprs {
51 	u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
52 	u64 r8, r9, r10, r11, r12, r13, r14, r15;
53 };
54 
55 struct pebs_xmm {
56 	u64 xmm[16*2];	/* two entries for each register */
57 };
58 
59 struct lbr_entry {
60 	u64 from;
61 	u64 to;
62 	u64 info;
63 };
64 
65 enum pmc_type {
66 	GP = 0,
67 	FIXED,
68 };
69 
70 static uint32_t intel_arch_events[] = {
71 	0x00c4, /* PERF_COUNT_HW_BRANCH_INSTRUCTIONS */
72 	0x00c5, /* PERF_COUNT_HW_BRANCH_MISSES */
73 	0x0300, /* PERF_COUNT_HW_REF_CPU_CYCLES */
74 	0x003c, /* PERF_COUNT_HW_CPU_CYCLES */
75 	0x00c0, /* PERF_COUNT_HW_INSTRUCTIONS */
76 	0x013c, /* PERF_COUNT_HW_BUS_CYCLES */
77 	0x4f2e, /* PERF_COUNT_HW_CACHE_REFERENCES */
78 	0x412e, /* PERF_COUNT_HW_CACHE_MISSES */
79 };
80 
81 static u64 pebs_data_cfgs[] = {
82 	PEBS_DATACFG_MEMINFO,
83 	PEBS_DATACFG_GP,
84 	PEBS_DATACFG_XMMS,
85 	PEBS_DATACFG_LBRS | ((MAX_NUM_LBR_ENTRY -1) << PEBS_DATACFG_LBR_SHIFT),
86 };
87 
88 /* Iterating each counter value is a waste of time, pick a few typical values. */
89 static u64 counter_start_values[] = {
90 	/* if PEBS counter doesn't overflow at all */
91 	0,
92 	0xfffffffffff0,
93 	/* normal counter overflow to have PEBS records */
94 	0xfffffffffffe,
95 	/* test whether emulated instructions should trigger PEBS */
96 	0xffffffffffff,
97 };
98 
99 static unsigned int get_adaptive_pebs_record_size(u64 pebs_data_cfg)
100 {
101 	unsigned int sz = sizeof(struct pebs_basic);
102 
103 	if (!has_baseline)
104 		return sz;
105 
106 	if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
107 		sz += sizeof(struct pebs_meminfo);
108 	if (pebs_data_cfg & PEBS_DATACFG_GP)
109 		sz += sizeof(struct pebs_gprs);
110 	if (pebs_data_cfg & PEBS_DATACFG_XMMS)
111 		sz += sizeof(struct pebs_xmm);
112 	if (pebs_data_cfg & PEBS_DATACFG_LBRS)
113 		sz += MAX_NUM_LBR_ENTRY * sizeof(struct lbr_entry);
114 
115 	return sz;
116 }
117 
118 static void cnt_overflow(isr_regs_t *regs)
119 {
120 	apic_write(APIC_EOI, 0);
121 }
122 
123 static inline void workload(void)
124 {
125 	asm volatile(
126 		"mov $0x0, %%eax\n"
127 		"cmp $0x0, %%eax\n"
128 		"jne label2\n"
129 		"jne label2\n"
130 		"jne label2\n"
131 		"jne label2\n"
132 		"mov $0x0, %%eax\n"
133 		"cmp $0x0, %%eax\n"
134 		"jne label2\n"
135 		"jne label2\n"
136 		"jne label2\n"
137 		"jne label2\n"
138 		"mov $0xa, %%eax\n"
139 		"cpuid\n"
140 		"mov $0xa, %%eax\n"
141 		"cpuid\n"
142 		"mov $0xa, %%eax\n"
143 		"cpuid\n"
144 		"mov $0xa, %%eax\n"
145 		"cpuid\n"
146 		"mov $0xa, %%eax\n"
147 		"cpuid\n"
148 		"mov $0xa, %%eax\n"
149 		"cpuid\n"
150 		"label2:\n"
151 		:
152 		:
153 		: "eax", "ebx", "ecx", "edx");
154 }
155 
156 static inline void workload2(void)
157 {
158 	asm volatile(
159 		"mov $0x0, %%eax\n"
160 		"cmp $0x0, %%eax\n"
161 		"jne label3\n"
162 		"jne label3\n"
163 		"jne label3\n"
164 		"jne label3\n"
165 		"mov $0x0, %%eax\n"
166 		"cmp $0x0, %%eax\n"
167 		"jne label3\n"
168 		"jne label3\n"
169 		"jne label3\n"
170 		"jne label3\n"
171 		"mov $0xa, %%eax\n"
172 		"cpuid\n"
173 		"mov $0xa, %%eax\n"
174 		"cpuid\n"
175 		"mov $0xa, %%eax\n"
176 		"cpuid\n"
177 		"mov $0xa, %%eax\n"
178 		"cpuid\n"
179 		"mov $0xa, %%eax\n"
180 		"cpuid\n"
181 		"mov $0xa, %%eax\n"
182 		"cpuid\n"
183 		"label3:\n"
184 		:
185 		:
186 		: "eax", "ebx", "ecx", "edx");
187 }
188 
189 static void alloc_buffers(void)
190 {
191 	ds_bufer = alloc_page();
192 	force_4k_page(ds_bufer);
193 	memset(ds_bufer, 0x0, PAGE_SIZE);
194 
195 	pebs_buffer = alloc_page();
196 	force_4k_page(pebs_buffer);
197 	memset(pebs_buffer, 0x0, PAGE_SIZE);
198 }
199 
200 static void free_buffers(void)
201 {
202 	if (ds_bufer)
203 		free_page(ds_bufer);
204 
205 	if (pebs_buffer)
206 		free_page(pebs_buffer);
207 }
208 
209 static void pebs_enable(u64 bitmask, u64 pebs_data_cfg)
210 {
211 	static struct debug_store *ds;
212 	u64 baseline_extra_ctrl = 0, fixed_ctr_ctrl = 0;
213 	unsigned int idx;
214 
215 	if (has_baseline)
216 		wrmsr(MSR_PEBS_DATA_CFG, pebs_data_cfg);
217 
218 	ds = (struct debug_store *)ds_bufer;
219 	ds->pebs_index = ds->pebs_buffer_base = (unsigned long)pebs_buffer;
220 	ds->pebs_absolute_maximum = (unsigned long)pebs_buffer + PAGE_SIZE;
221 	ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
222 		get_adaptive_pebs_record_size(pebs_data_cfg);
223 
224 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++) {
225 		if (!(BIT_ULL(FIXED_CNT_INDEX + idx) & bitmask))
226 			continue;
227 		if (has_baseline)
228 			baseline_extra_ctrl = BIT(FIXED_CNT_INDEX + idx * 4);
229 		wrmsr(MSR_PERF_FIXED_CTRx(idx), ctr_start_val);
230 		fixed_ctr_ctrl |= (0xbULL << (idx * 4) | baseline_extra_ctrl);
231 	}
232 	if (fixed_ctr_ctrl)
233 		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, fixed_ctr_ctrl);
234 
235 	for (idx = 0; idx < max_nr_gp_events; idx++) {
236 		if (!(BIT_ULL(idx) & bitmask))
237 			continue;
238 		if (has_baseline)
239 			baseline_extra_ctrl = ICL_EVENTSEL_ADAPTIVE;
240 		wrmsr(MSR_GP_EVENT_SELECTx(idx), EVNTSEL_EN | EVNTSEL_OS | EVNTSEL_USR |
241 						 intel_arch_events[idx] | baseline_extra_ctrl);
242 		wrmsr(MSR_GP_COUNTERx(idx), ctr_start_val);
243 	}
244 
245 	wrmsr(MSR_IA32_DS_AREA,  (unsigned long)ds_bufer);
246 	wrmsr(MSR_IA32_PEBS_ENABLE, bitmask);
247 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, bitmask);
248 }
249 
250 static void reset_pebs(void)
251 {
252 	memset(ds_bufer, 0x0, PAGE_SIZE);
253 	memset(pebs_buffer, 0x0, PAGE_SIZE);
254 	wrmsr(MSR_IA32_PEBS_ENABLE, 0);
255 	wrmsr(MSR_IA32_DS_AREA,  0);
256 	if (has_baseline)
257 		wrmsr(MSR_PEBS_DATA_CFG, 0);
258 
259 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
260 	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
261 
262 	pmu_reset_all_counters();
263 }
264 
265 static void pebs_disable(unsigned int idx)
266 {
267 	/*
268 	* If we only clear the PEBS_ENABLE bit, the counter will continue to increment.
269 	* In this very tiny time window, if the counter overflows no pebs record will be generated,
270 	* but a normal counter irq. Test this fully with two ways.
271 	*/
272 	if (idx % 2)
273 		wrmsr(MSR_IA32_PEBS_ENABLE, 0);
274 
275 	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
276 }
277 
278 static void check_pebs_records(u64 bitmask, u64 pebs_data_cfg)
279 {
280 	struct pebs_basic *pebs_rec = (struct pebs_basic *)pebs_buffer;
281 	struct debug_store *ds = (struct debug_store *)ds_bufer;
282 	unsigned int pebs_record_size = get_adaptive_pebs_record_size(pebs_data_cfg);
283 	unsigned int count = 0;
284 	bool expected, pebs_idx_match, pebs_size_match, data_cfg_match;
285 	void *cur_record;
286 
287 	expected = (ds->pebs_index == ds->pebs_buffer_base) && !pebs_rec->format_size;
288 	if (!(rdmsr(MSR_CORE_PERF_GLOBAL_STATUS) & GLOBAL_STATUS_BUFFER_OVF)) {
289 		report(expected, "No OVF irq, none PEBS records.");
290 		return;
291 	}
292 
293 	if (expected) {
294 		report(!expected, "A OVF irq, but none PEBS records.");
295 		return;
296 	}
297 
298 	expected = ds->pebs_index >= ds->pebs_interrupt_threshold;
299 	cur_record = (void *)pebs_buffer;
300 	do {
301 		pebs_rec = (struct pebs_basic *)cur_record;
302 		pebs_record_size = pebs_rec->format_size >> RECORD_SIZE_OFFSET;
303 		pebs_idx_match =
304 			pebs_rec->applicable_counters & bitmask;
305 		pebs_size_match =
306 			pebs_record_size == get_adaptive_pebs_record_size(pebs_data_cfg);
307 		data_cfg_match =
308 			(pebs_rec->format_size & GENMASK_ULL(47, 0)) == pebs_data_cfg;
309 		expected = pebs_idx_match && pebs_size_match && data_cfg_match;
310 		report(expected,
311 		       "PEBS record (written seq %d) is verified (inclduing size, counters and cfg).", count);
312 		cur_record = cur_record + pebs_record_size;
313 		count++;
314 	} while (expected && (void *)cur_record < (void *)ds->pebs_index);
315 
316 	if (!expected) {
317 		if (!pebs_idx_match)
318 			printf("FAIL: The applicable_counters (0x%lx) doesn't match with pmc_bitmask (0x%lx).\n",
319 			       pebs_rec->applicable_counters, bitmask);
320 		if (!pebs_size_match)
321 			printf("FAIL: The pebs_record_size (%d) doesn't match with MSR_PEBS_DATA_CFG (%d).\n",
322 			       pebs_record_size, get_adaptive_pebs_record_size(pebs_data_cfg));
323 		if (!data_cfg_match)
324 			printf("FAIL: The pebs_data_cfg (0x%lx) doesn't match with MSR_PEBS_DATA_CFG (0x%lx).\n",
325 			       pebs_rec->format_size & 0xffffffffffff, pebs_data_cfg);
326 	}
327 }
328 
329 static void check_one_counter(enum pmc_type type,
330 			      unsigned int idx, u64 pebs_data_cfg)
331 {
332 	int pebs_bit = BIT_ULL(type == FIXED ? FIXED_CNT_INDEX + idx : idx);
333 
334 	report_prefix_pushf("%s counter %d (0x%lx)",
335 			    type == FIXED ? "Extended Fixed" : "GP", idx, ctr_start_val);
336 	reset_pebs();
337 	pebs_enable(pebs_bit, pebs_data_cfg);
338 	workload();
339 	pebs_disable(idx);
340 	check_pebs_records(pebs_bit, pebs_data_cfg);
341 	report_prefix_pop();
342 }
343 
344 /* more than one PEBS records will be generated. */
345 static void check_multiple_counters(u64 bitmask, u64 pebs_data_cfg)
346 {
347 	reset_pebs();
348 	pebs_enable(bitmask, pebs_data_cfg);
349 	workload2();
350 	pebs_disable(0);
351 	check_pebs_records(bitmask, pebs_data_cfg);
352 }
353 
354 static void check_pebs_counters(u64 pebs_data_cfg)
355 {
356 	unsigned int idx;
357 	u64 bitmask = 0;
358 
359 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++)
360 		check_one_counter(FIXED, idx, pebs_data_cfg);
361 
362 	for (idx = 0; idx < max_nr_gp_events; idx++)
363 		check_one_counter(GP, idx, pebs_data_cfg);
364 
365 	for (idx = 0; idx < pmu.nr_fixed_counters; idx++)
366 		bitmask |= BIT_ULL(FIXED_CNT_INDEX + idx);
367 	for (idx = 0; idx < max_nr_gp_events; idx += 2)
368 		bitmask |= BIT_ULL(idx);
369 	report_prefix_pushf("Multiple (0x%lx)", bitmask);
370 	check_multiple_counters(bitmask, pebs_data_cfg);
371 	report_prefix_pop();
372 }
373 
374 /*
375  * Known reasons for none PEBS records:
376  *	1. The selected event does not support PEBS;
377  *	2. From a core pmu perspective, the vCPU and pCPU models are not same;
378  * 	3. Guest counter has not yet overflowed or been cross-mapped by the host;
379  */
380 int main(int ac, char **av)
381 {
382 	unsigned int i, j;
383 
384 	setup_vm();
385 
386 	max_nr_gp_events = MIN(pmu.nr_gp_counters, ARRAY_SIZE(intel_arch_events));
387 
388 	printf("PMU version: %d\n", pmu.version);
389 
390 	has_baseline = pmu_has_pebs_baseline();
391 	if (pmu_has_full_writes())
392 		pmu_activate_full_writes();
393 
394 	if (!pmu.is_intel) {
395 		report_skip("PEBS requires Intel ICX or later, non-Intel detected");
396 		return report_summary();
397 	} else if (!pmu_has_pebs()) {
398 		report_skip("PEBS required PMU version 2, reported version is %d", pmu.version);
399 		return report_summary();
400 	} else if (!pmu_pebs_format()) {
401 		report_skip("PEBS not enumerated in PERF_CAPABILITIES");
402 		return report_summary();
403 	} else if (rdmsr(MSR_IA32_MISC_ENABLE) & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL) {
404 		report_skip("PEBS unavailable according to MISC_ENABLE");
405 		return report_summary();
406 	}
407 
408 	printf("PEBS format: %d\n", pmu_pebs_format());
409 	printf("PEBS GP counters: %d\n", pmu.nr_gp_counters);
410 	printf("PEBS Fixed counters: %d\n", pmu.nr_fixed_counters);
411 	printf("PEBS baseline (Adaptive PEBS): %d\n", has_baseline);
412 
413 	handle_irq(PMI_VECTOR, cnt_overflow);
414 	alloc_buffers();
415 
416 	for (i = 0; i < ARRAY_SIZE(counter_start_values); i++) {
417 		ctr_start_val = counter_start_values[i];
418 		check_pebs_counters(0);
419 		if (!has_baseline)
420 			continue;
421 
422 		for (j = 0; j < ARRAY_SIZE(pebs_data_cfgs); j++) {
423 			report_prefix_pushf("Adaptive (0x%lx)", pebs_data_cfgs[j]);
424 			check_pebs_counters(pebs_data_cfgs[j]);
425 			report_prefix_pop();
426 		}
427 	}
428 
429 	free_buffers();
430 
431 	return report_summary();
432 }
433