xref: /kvm-unit-tests/arm/pmu.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 /*
2  * Test the ARM Performance Monitors Unit (PMU).
3  *
4  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
5  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License version 2.1 and
9  * only version 2.1 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
14  * for more details.
15  */
16 #include "libcflat.h"
17 #include "errata.h"
18 #include "asm/barrier.h"
19 #include "asm/sysreg.h"
20 #include "asm/processor.h"
21 #include <bitops.h>
22 #include <asm/gic.h>
23 
24 #define PMU_PMCR_E         (1 << 0)
25 #define PMU_PMCR_P         (1 << 1)
26 #define PMU_PMCR_C         (1 << 2)
27 #define PMU_PMCR_D         (1 << 3)
28 #define PMU_PMCR_X         (1 << 4)
29 #define PMU_PMCR_DP        (1 << 5)
30 #define PMU_PMCR_LC        (1 << 6)
31 #define PMU_PMCR_N_SHIFT   11
32 #define PMU_PMCR_N_MASK    0x1f
33 #define PMU_PMCR_ID_SHIFT  16
34 #define PMU_PMCR_ID_MASK   0xff
35 #define PMU_PMCR_IMP_SHIFT 24
36 #define PMU_PMCR_IMP_MASK  0xff
37 
38 #define PMU_CYCLE_IDX      31
39 
40 #define NR_SAMPLES 10
41 
42 /* Some PMU events */
43 #define SW_INCR			0x0
44 #define INST_RETIRED		0x8
45 #define CPU_CYCLES		0x11
46 #define MEM_ACCESS		0x13
47 #define INST_PREC		0x1B
48 #define STALL_FRONTEND		0x23
49 #define STALL_BACKEND		0x24
50 #define CHAIN			0x1E
51 
52 #define COMMON_EVENTS_LOW	0x0
53 #define COMMON_EVENTS_HIGH	0x3F
54 #define EXT_COMMON_EVENTS_LOW	0x4000
55 #define EXT_COMMON_EVENTS_HIGH	0x403F
56 
57 #define ALL_SET			0xFFFFFFFF
58 #define ALL_CLEAR		0x0
59 #define PRE_OVERFLOW		0xFFFFFFF0
60 #define PRE_OVERFLOW2		0xFFFFFFDC
61 
62 #define PMU_PPI			23
63 
64 struct pmu {
65 	unsigned int version;
66 	unsigned int nb_implemented_counters;
67 	uint32_t pmcr_ro;
68 };
69 
70 struct pmu_stats {
71 	unsigned long bitmap;
72 	uint32_t interrupts[32];
73 	bool unexpected;
74 };
75 
76 static struct pmu pmu;
77 
78 #if defined(__arm__)
79 #define ID_DFR0_PERFMON_SHIFT 24
80 #define ID_DFR0_PERFMON_MASK  0xf
81 
82 #define ID_DFR0_PMU_NOTIMPL	0b0000
83 #define ID_DFR0_PMU_V1		0b0001
84 #define ID_DFR0_PMU_V2		0b0010
85 #define ID_DFR0_PMU_V3		0b0011
86 #define ID_DFR0_PMU_V3_8_1	0b0100
87 #define ID_DFR0_PMU_V3_8_4	0b0101
88 #define ID_DFR0_PMU_V3_8_5	0b0110
89 #define ID_DFR0_PMU_IMPDEF	0b1111
90 
91 #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
92 #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
93 #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
94 #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
95 #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
96 #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
97 #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
98 
99 static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
100 static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
101 static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
102 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
103 
104 static inline uint8_t get_pmu_version(void)
105 {
106 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
107 }
108 
109 static inline uint64_t get_pmccntr(void)
110 {
111 	return read_sysreg(PMCCNTR32);
112 }
113 
114 static inline void set_pmccntr(uint64_t value)
115 {
116 	write_sysreg(value & 0xffffffff, PMCCNTR32);
117 }
118 
119 /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
120 static inline void set_pmccfiltr(uint32_t value)
121 {
122 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
123 	write_sysreg(value, PMXEVTYPER);
124 	isb();
125 }
126 
127 /*
128  * Extra instructions inserted by the compiler would be difficult to compensate
129  * for, so hand assemble everything between, and including, the PMCR accesses
130  * to start and stop counting. isb instructions were inserted to make sure
131  * pmccntr read after this function returns the exact instructions executed in
132  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
133  */
134 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
135 {
136 	asm volatile(
137 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
138 	"	isb\n"
139 	"1:	subs	%[loop], %[loop], #1\n"
140 	"	bgt	1b\n"
141 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
142 	"	isb\n"
143 	: [loop] "+r" (loop)
144 	: [pmcr] "r" (pmcr), [z] "r" (0)
145 	: "cc");
146 }
147 
148 /* event counter tests only implemented for aarch64 */
149 static void test_event_introspection(void) {}
150 static void test_event_counter_config(void) {}
151 static void test_basic_event_count(void) {}
152 static void test_mem_access(void) {}
153 static void test_sw_incr(void) {}
154 static void test_chained_counters(void) {}
155 static void test_chained_sw_incr(void) {}
156 static void test_chain_promotion(void) {}
157 static void test_overflow_interrupt(void) {}
158 
159 #elif defined(__aarch64__)
160 #define ID_AA64DFR0_PERFMON_SHIFT 8
161 #define ID_AA64DFR0_PERFMON_MASK  0xf
162 
163 #define ID_DFR0_PMU_NOTIMPL	0b0000
164 #define ID_DFR0_PMU_V3		0b0001
165 #define ID_DFR0_PMU_V3_8_1	0b0100
166 #define ID_DFR0_PMU_V3_8_4	0b0101
167 #define ID_DFR0_PMU_V3_8_5	0b0110
168 #define ID_DFR0_PMU_IMPDEF	0b1111
169 
170 static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
171 static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
172 static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
173 static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
174 static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
175 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
176 static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
177 
178 static inline uint8_t get_pmu_version(void)
179 {
180 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
181 	return ver;
182 }
183 
184 /*
185  * Extra instructions inserted by the compiler would be difficult to compensate
186  * for, so hand assemble everything between, and including, the PMCR accesses
187  * to start and stop counting. isb instructions are inserted to make sure
188  * pmccntr read after this function returns the exact instructions executed
189  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
190  */
191 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
192 {
193 	uint64_t pmcr64 = pmcr;
194 	asm volatile(
195 	"	msr	pmcr_el0, %[pmcr]\n"
196 	"	isb\n"
197 	"1:	subs	%w[loop], %w[loop], #1\n"
198 	"	b.gt	1b\n"
199 	"	msr	pmcr_el0, xzr\n"
200 	"	isb\n"
201 	: [loop] "+r" (loop)
202 	: [pmcr] "r" (pmcr64)
203 	: "cc");
204 }
205 
206 #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
207 #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
208 #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
209 
210 #define PMEVTYPER_EXCLUDE_EL1 BIT(31)
211 #define PMEVTYPER_EXCLUDE_EL0 BIT(30)
212 
213 static bool is_event_supported(uint32_t n, bool warn)
214 {
215 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
216 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
217 	bool supported;
218 	uint64_t reg;
219 
220 	/*
221 	 * The low 32-bits of PMCEID0/1 respectively describe
222 	 * event support for events 0-31/32-63. Their High
223 	 * 32-bits describe support for extended events
224 	 * starting at 0x4000, using the same split.
225 	 */
226 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
227 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
228 
229 	if (n <= COMMON_EVENTS_HIGH)
230 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
231 	else
232 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
233 
234 	supported =  reg & (1UL << (n & 0x3F));
235 
236 	if (!supported && warn)
237 		report_info("event 0x%x is not supported", n);
238 	return supported;
239 }
240 
241 static void test_event_introspection(void)
242 {
243 	bool required_events;
244 
245 	if (!pmu.nb_implemented_counters) {
246 		report_skip("No event counter, skip ...");
247 		return;
248 	}
249 
250 	/* PMUv3 requires an implementation includes some common events */
251 	required_events = is_event_supported(SW_INCR, true) &&
252 			  is_event_supported(CPU_CYCLES, true) &&
253 			  (is_event_supported(INST_RETIRED, true) ||
254 			   is_event_supported(INST_PREC, true));
255 
256 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
257 		required_events = required_events &&
258 				  is_event_supported(STALL_FRONTEND, true) &&
259 				  is_event_supported(STALL_BACKEND, true);
260 	}
261 
262 	report(required_events, "Check required events are implemented");
263 }
264 
265 /*
266  * Extra instructions inserted by the compiler would be difficult to compensate
267  * for, so hand assemble everything between, and including, the PMCR accesses
268  * to start and stop counting. isb instructions are inserted to make sure
269  * pmccntr read after this function returns the exact instructions executed
270  * in the controlled block. Loads @loop times the data at @address into x9.
271  */
272 static void mem_access_loop(void *addr, long loop, uint32_t pmcr)
273 {
274 	uint64_t pmcr64 = pmcr;
275 asm volatile(
276 	"       msr     pmcr_el0, %[pmcr]\n"
277 	"       isb\n"
278 	"       mov     x10, %[loop]\n"
279 	"1:     sub     x10, x10, #1\n"
280 	"       ldr	x9, [%[addr]]\n"
281 	"       cmp     x10, #0x0\n"
282 	"       b.gt    1b\n"
283 	"       msr     pmcr_el0, xzr\n"
284 	"       isb\n"
285 	:
286 	: [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop)
287 	: "x9", "x10", "cc");
288 }
289 
290 static struct pmu_stats pmu_stats;
291 
292 static void irq_handler(struct pt_regs *regs)
293 {
294 	uint32_t irqstat, irqnr;
295 
296 	irqstat = gic_read_iar();
297 	irqnr = gic_iar_irqnr(irqstat);
298 
299 	if (irqnr == PMU_PPI) {
300 		unsigned long overflows = read_sysreg(pmovsclr_el0);
301 		int i;
302 
303 		for (i = 0; i < 32; i++) {
304 			if (test_and_clear_bit(i, &overflows)) {
305 				pmu_stats.interrupts[i]++;
306 				pmu_stats.bitmap |= 1 << i;
307 			}
308 		}
309 		write_sysreg(ALL_SET, pmovsclr_el0);
310 	} else {
311 		pmu_stats.unexpected = true;
312 	}
313 	gic_write_eoir(irqstat);
314 }
315 
316 static void pmu_reset_stats(void)
317 {
318 	int i;
319 
320 	for (i = 0; i < 32; i++)
321 		pmu_stats.interrupts[i] = 0;
322 
323 	pmu_stats.bitmap = 0;
324 	pmu_stats.unexpected = false;
325 }
326 
327 static void pmu_reset(void)
328 {
329 	/* reset all counters, counting disabled at PMCR level*/
330 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
331 	/* Disable all counters */
332 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
333 	/* clear overflow reg */
334 	write_sysreg(ALL_SET, pmovsclr_el0);
335 	/* disable overflow interrupts on all counters */
336 	write_sysreg(ALL_SET, pmintenclr_el1);
337 	pmu_reset_stats();
338 	isb();
339 }
340 
341 static void test_event_counter_config(void)
342 {
343 	int i;
344 
345 	if (!pmu.nb_implemented_counters) {
346 		report_skip("No event counter, skip ...");
347 		return;
348 	}
349 
350 	pmu_reset();
351 
352 	/*
353 	 * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
354 	 * select counter 0
355 	 */
356 	write_sysreg(1, PMSELR_EL0);
357 	/* program this counter to count unsupported event */
358 	write_sysreg(0xEA, PMXEVTYPER_EL0);
359 	write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
360 	report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
361 		"PMESELR/PMXEVTYPER/PMEVTYPERn");
362 	report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
363 		"PMESELR/PMXEVCNTR/PMEVCNTRn");
364 
365 	/* try to configure an unsupported event within the range [0x0, 0x3F] */
366 	for (i = 0; i <= 0x3F; i++) {
367 		if (!is_event_supported(i, false))
368 			break;
369 	}
370 	if (i > 0x3F) {
371 		report_skip("pmevtyper: all events within [0x0, 0x3F] are supported");
372 		return;
373 	}
374 
375 	/* select counter 0 */
376 	write_sysreg(0, PMSELR_EL0);
377 	/* program this counter to count unsupported event */
378 	write_sysreg(i, PMXEVCNTR_EL0);
379 	/* read the counter value */
380 	read_sysreg(PMXEVCNTR_EL0);
381 	report(read_sysreg(PMXEVCNTR_EL0) == i,
382 		"read of a counter programmed with unsupported event");
383 }
384 
385 static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events)
386 {
387 	int i;
388 
389 	if (pmu.nb_implemented_counters < nb_events) {
390 		report_skip("Skip test as number of counters is too small (%d)",
391 			    pmu.nb_implemented_counters);
392 		return false;
393 	}
394 
395 	for (i = 0; i < nb_events; i++) {
396 		if (!is_event_supported(events[i], false)) {
397 			report_skip("Skip test as event 0x%x is not supported",
398 				    events[i]);
399 			return false;
400 		}
401 	}
402 	return true;
403 }
404 
405 static void test_basic_event_count(void)
406 {
407 	uint32_t implemented_counter_mask, non_implemented_counter_mask;
408 	uint32_t counter_mask;
409 	uint32_t events[] = {CPU_CYCLES, INST_RETIRED};
410 
411 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
412 		return;
413 
414 	implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1;
415 	non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask);
416 	counter_mask = implemented_counter_mask | non_implemented_counter_mask;
417 
418 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
419 	write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0);
420 
421 	/* disable all counters */
422 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
423 	report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0),
424 		"pmcntenclr: disable all counters");
425 
426 	/*
427 	 * clear cycle and all event counters and allow counter enablement
428 	 * through PMCNTENSET. LC is RES1.
429 	 */
430 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
431 	isb();
432 	report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC), "pmcr: reset counters");
433 
434 	/* Preset counter #0 to pre overflow value to trigger an overflow */
435 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
436 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
437 		"counter #0 preset to pre-overflow value");
438 	report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0");
439 
440 	/*
441 	 * Enable all implemented counters and also attempt to enable
442 	 * not supported counters. Counting still is disabled by !PMCR.E
443 	 */
444 	write_sysreg_s(counter_mask, PMCNTENSET_EL0);
445 
446 	/* check only those implemented are enabled */
447 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
448 		(read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask),
449 		"pmcntenset: enabled implemented_counters");
450 
451 	/* Disable all counters but counters #0 and #1 */
452 	write_sysreg_s(~0x3, PMCNTENCLR_EL0);
453 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
454 		(read_sysreg_s(PMCNTENSET_EL0) == 0x3),
455 		"pmcntenset: just enabled #0 and #1");
456 
457 	/* clear overflow register */
458 	write_sysreg(ALL_SET, pmovsclr_el0);
459 	report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0");
460 
461 	/* disable overflow interrupts on all counters*/
462 	write_sysreg(ALL_SET, pmintenclr_el1);
463 	report(!read_sysreg(pmintenclr_el1),
464 		"pmintenclr_el1=0, all interrupts disabled");
465 
466 	/* enable overflow interrupts on all event counters */
467 	write_sysreg(implemented_counter_mask | non_implemented_counter_mask,
468 		     pmintenset_el1);
469 	report(read_sysreg(pmintenset_el1) == implemented_counter_mask,
470 		"overflow interrupts enabled on all implemented counters");
471 
472 	/* Set PMCR.E, execute asm code and unset PMCR.E */
473 	precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E);
474 
475 	report_info("counter #0 is 0x%lx (CPU_CYCLES)",
476 		    read_regn_el0(pmevcntr, 0));
477 	report_info("counter #1 is 0x%lx (INST_RETIRED)",
478 		    read_regn_el0(pmevcntr, 1));
479 
480 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
481 	report(read_sysreg(pmovsclr_el0) & 0x1,
482 		"check overflow happened on #0 only");
483 }
484 
485 static void test_mem_access(void)
486 {
487 	void *addr = malloc(PAGE_SIZE);
488 	uint32_t events[] = {MEM_ACCESS, MEM_ACCESS};
489 
490 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
491 		return;
492 
493 	pmu_reset();
494 
495 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
496 	write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
497 	write_sysreg_s(0x3, PMCNTENSET_EL0);
498 	isb();
499 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
500 	report_info("counter #0 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 0));
501 	report_info("counter #1 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 1));
502 	/* We may measure more than 20 mem access depending on the core */
503 	report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) &&
504 	       (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0),
505 	       "Ran 20 mem accesses");
506 
507 	pmu_reset();
508 
509 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
510 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
511 	write_sysreg_s(0x3, PMCNTENSET_EL0);
512 	isb();
513 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
514 	report(read_sysreg(pmovsclr_el0) == 0x3,
515 	       "Ran 20 mem accesses with expected overflows on both counters");
516 	report_info("cnt#0 = %ld cnt#1=%ld overflow=0x%lx",
517 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
518 			read_sysreg(pmovsclr_el0));
519 }
520 
521 static void test_sw_incr(void)
522 {
523 	uint32_t events[] = {SW_INCR, SW_INCR};
524 	int i;
525 
526 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
527 		return;
528 
529 	pmu_reset();
530 
531 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
532 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
533 	/* enable counters #0 and #1 */
534 	write_sysreg_s(0x3, PMCNTENSET_EL0);
535 
536 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
537 
538 	for (i = 0; i < 100; i++)
539 		write_sysreg(0x1, pmswinc_el0);
540 
541 	report_info("SW_INCR counter #0 has value %ld", read_regn_el0(pmevcntr, 0));
542 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
543 		"PWSYNC does not increment if PMCR.E is unset");
544 
545 	pmu_reset();
546 
547 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
548 	write_sysreg_s(0x3, PMCNTENSET_EL0);
549 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
550 
551 	for (i = 0; i < 100; i++)
552 		write_sysreg(0x3, pmswinc_el0);
553 
554 	report(read_regn_el0(pmevcntr, 0)  == 84, "counter #1 after + 100 SW_INCR");
555 	report(read_regn_el0(pmevcntr, 1)  == 100,
556 		"counter #0 after + 100 SW_INCR");
557 	report_info("counter values after 100 SW_INCR #0=%ld #1=%ld",
558 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
559 	report(read_sysreg(pmovsclr_el0) == 0x1,
560 		"overflow on counter #0 after 100 SW_INCR");
561 }
562 
563 static void test_chained_counters(void)
564 {
565 	uint32_t events[] = {CPU_CYCLES, CHAIN};
566 
567 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
568 		return;
569 
570 	pmu_reset();
571 
572 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
573 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
574 	/* enable counters #0 and #1 */
575 	write_sysreg_s(0x3, PMCNTENSET_EL0);
576 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
577 
578 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
579 
580 	report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented");
581 	report(!read_sysreg(pmovsclr_el0), "no overflow recorded for chained incr #1");
582 
583 	/* test 64b overflow */
584 
585 	pmu_reset();
586 	write_sysreg_s(0x3, PMCNTENSET_EL0);
587 
588 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
589 	write_regn_el0(pmevcntr, 1, 0x1);
590 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
591 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
592 	report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2");
593 	report(!read_sysreg(pmovsclr_el0), "no overflow recorded for chained incr #2");
594 
595 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
596 	write_regn_el0(pmevcntr, 1, ALL_SET);
597 
598 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
599 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
600 	report(!read_regn_el0(pmevcntr, 1), "CHAIN counter #1 wrapped");
601 	report(read_sysreg(pmovsclr_el0) == 0x2, "overflow on chain counter");
602 }
603 
604 static void test_chained_sw_incr(void)
605 {
606 	uint32_t events[] = {SW_INCR, CHAIN};
607 	int i;
608 
609 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
610 		return;
611 
612 	pmu_reset();
613 
614 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
615 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
616 	/* enable counters #0 and #1 */
617 	write_sysreg_s(0x3, PMCNTENSET_EL0);
618 
619 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
620 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
621 	for (i = 0; i < 100; i++)
622 		write_sysreg(0x1, pmswinc_el0);
623 
624 	report(!read_sysreg(pmovsclr_el0) && (read_regn_el0(pmevcntr, 1) == 1),
625 		"no overflow and chain counter incremented after 100 SW_INCR/CHAIN");
626 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
627 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
628 
629 	/* 64b SW_INCR and overflow on CHAIN counter*/
630 	pmu_reset();
631 
632 	write_regn_el0(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
633 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
634 	write_regn_el0(pmevcntr, 1, ALL_SET);
635 	write_sysreg_s(0x3, PMCNTENSET_EL0);
636 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
637 	for (i = 0; i < 100; i++)
638 		write_sysreg(0x1, pmswinc_el0);
639 
640 	report((read_sysreg(pmovsclr_el0) == 0x2) &&
641 		(read_regn_el0(pmevcntr, 1) == 0) &&
642 		(read_regn_el0(pmevcntr, 0) == 84),
643 		"overflow on chain counter and expected values after 100 SW_INCR/CHAIN");
644 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
645 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
646 }
647 
648 static void test_chain_promotion(void)
649 {
650 	uint32_t events[] = {MEM_ACCESS, CHAIN};
651 	void *addr = malloc(PAGE_SIZE);
652 
653 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
654 		return;
655 
656 	/* Only enable CHAIN counter */
657 	pmu_reset();
658 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
659 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
660 	write_sysreg_s(0x2, PMCNTENSET_EL0);
661 	isb();
662 
663 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
664 	report(!read_regn_el0(pmevcntr, 0),
665 		"chain counter not counting if even counter is disabled");
666 
667 	/* Only enable even counter */
668 	pmu_reset();
669 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
670 	write_sysreg_s(0x1, PMCNTENSET_EL0);
671 	isb();
672 
673 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
674 	report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1),
675 		"odd counter did not increment on overflow if disabled");
676 	report_info("MEM_ACCESS counter #0 has value %ld",
677 		    read_regn_el0(pmevcntr, 0));
678 	report_info("CHAIN counter #1 has value %ld",
679 		    read_regn_el0(pmevcntr, 1));
680 	report_info("overflow counter %ld", read_sysreg(pmovsclr_el0));
681 
682 	/* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */
683 	pmu_reset();
684 	write_sysreg_s(0x3, PMCNTENSET_EL0);
685 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
686 	isb();
687 
688 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
689 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
690 		    read_regn_el0(pmevcntr, 0));
691 
692 	/* disable the CHAIN event */
693 	write_sysreg_s(0x2, PMCNTENCLR_EL0);
694 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
695 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
696 		    read_regn_el0(pmevcntr, 0));
697 	report(read_sysreg(pmovsclr_el0) == 0x1,
698 		"should have triggered an overflow on #0");
699 	report(!read_regn_el0(pmevcntr, 1),
700 		"CHAIN counter #1 shouldn't have incremented");
701 
702 	/* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */
703 
704 	pmu_reset();
705 	write_sysreg_s(0x1, PMCNTENSET_EL0);
706 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
707 	isb();
708 	report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx",
709 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
710 		    read_sysreg(pmovsclr_el0));
711 
712 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
713 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
714 		    read_regn_el0(pmevcntr, 0));
715 
716 	/* enable the CHAIN event */
717 	write_sysreg_s(0x3, PMCNTENSET_EL0);
718 	isb();
719 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
720 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
721 		    read_regn_el0(pmevcntr, 0));
722 
723 	report((read_regn_el0(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
724 		"CHAIN counter enabled: CHAIN counter was incremented and no overflow");
725 
726 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
727 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
728 
729 	/* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */
730 	pmu_reset();
731 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
732 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
733 	write_sysreg_s(0x3, PMCNTENSET_EL0);
734 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
735 	isb();
736 
737 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
738 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
739 		    read_regn_el0(pmevcntr, 0));
740 
741 	/* 0 becomes CHAINED */
742 	write_sysreg_s(0x0, PMCNTENSET_EL0);
743 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
744 	write_sysreg_s(0x3, PMCNTENSET_EL0);
745 	write_regn_el0(pmevcntr, 1, 0x0);
746 
747 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
748 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
749 		    read_regn_el0(pmevcntr, 0));
750 
751 	report((read_regn_el0(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
752 		"32b->64b: CHAIN counter incremented and no overflow");
753 
754 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
755 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
756 
757 	/* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */
758 	pmu_reset();
759 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
760 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
761 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
762 	write_sysreg_s(0x3, PMCNTENSET_EL0);
763 
764 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
765 	report_info("counter #0=0x%lx, counter #1=0x%lx",
766 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
767 
768 	write_sysreg_s(0x0, PMCNTENSET_EL0);
769 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
770 	write_sysreg_s(0x3, PMCNTENSET_EL0);
771 
772 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
773 	report(read_sysreg(pmovsclr_el0) == 1,
774 		"overflow is expected on counter 0");
775 	report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx",
776 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
777 			read_sysreg(pmovsclr_el0));
778 }
779 
780 static bool expect_interrupts(uint32_t bitmap)
781 {
782 	int i;
783 
784 	if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected)
785 		return false;
786 
787 	for (i = 0; i < 32; i++) {
788 		if (test_and_clear_bit(i, &pmu_stats.bitmap))
789 			if (pmu_stats.interrupts[i] != 1)
790 				return false;
791 	}
792 	return true;
793 }
794 
795 static void test_overflow_interrupt(void)
796 {
797 	uint32_t events[] = {MEM_ACCESS, SW_INCR};
798 	void *addr = malloc(PAGE_SIZE);
799 	int i;
800 
801 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
802 		return;
803 
804 	gic_enable_defaults();
805 	install_irq_handler(EL1H_IRQ, irq_handler);
806 	local_irq_enable();
807 	gic_enable_irq(23);
808 
809 	pmu_reset();
810 
811 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
812 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
813 	write_sysreg_s(0x3, PMCNTENSET_EL0);
814 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
815 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
816 	isb();
817 
818 	/* interrupts are disabled */
819 
820 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
821 	report(expect_interrupts(0), "no overflow interrupt after preset");
822 
823 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
824 	for (i = 0; i < 100; i++)
825 		write_sysreg(0x2, pmswinc_el0);
826 
827 	set_pmcr(pmu.pmcr_ro);
828 	report(expect_interrupts(0), "no overflow interrupt after counting");
829 
830 	/* enable interrupts */
831 
832 	pmu_reset_stats();
833 
834 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
835 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
836 	write_sysreg(ALL_SET, pmintenset_el1);
837 	isb();
838 
839 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
840 	for (i = 0; i < 100; i++)
841 		write_sysreg(0x3, pmswinc_el0);
842 
843 	mem_access_loop(addr, 200, pmu.pmcr_ro);
844 	report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0));
845 	report(expect_interrupts(0x3),
846 		"overflow interrupts expected on #0 and #1");
847 
848 	/* promote to 64-b */
849 
850 	pmu_reset_stats();
851 
852 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
853 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
854 	isb();
855 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
856 	report(expect_interrupts(0),
857 		"no overflow interrupt expected on 32b boundary");
858 
859 	/* overflow on odd counter */
860 	pmu_reset_stats();
861 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
862 	write_regn_el0(pmevcntr, 1, ALL_SET);
863 	isb();
864 	mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E);
865 	report(expect_interrupts(0x2),
866 		"expect overflow interrupt on odd counter");
867 }
868 #endif
869 
870 /*
871  * Ensure that the cycle counter progresses between back-to-back reads.
872  */
873 static bool check_cycles_increase(void)
874 {
875 	bool success = true;
876 
877 	/* init before event access, this test only cares about cycle count */
878 	set_pmcntenset(1 << PMU_CYCLE_IDX);
879 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
880 
881 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
882 
883 	for (int i = 0; i < NR_SAMPLES; i++) {
884 		uint64_t a, b;
885 
886 		a = get_pmccntr();
887 		b = get_pmccntr();
888 
889 		if (a >= b) {
890 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
891 			success = false;
892 			break;
893 		}
894 	}
895 
896 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
897 
898 	return success;
899 }
900 
901 /*
902  * Execute a known number of guest instructions. Only even instruction counts
903  * greater than or equal to 4 are supported by the in-line assembly code. The
904  * control register (PMCR_EL0) is initialized with the provided value (allowing
905  * for example for the cycle counter or event counters to be reset). At the end
906  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
907  * counting, allowing the cycle counter or event counters to be read at the
908  * leisure of the calling code.
909  */
910 static void measure_instrs(int num, uint32_t pmcr)
911 {
912 	int loop = (num - 2) / 2;
913 
914 	assert(num >= 4 && ((num - 2) % 2 == 0));
915 	precise_instrs_loop(loop, pmcr);
916 }
917 
918 /*
919  * Measure cycle counts for various known instruction counts. Ensure that the
920  * cycle counter progresses (similar to check_cycles_increase() but with more
921  * instructions and using reset and stop controls). If supplied a positive,
922  * nonzero CPI parameter, it also strictly checks that every measurement matches
923  * it. Strict CPI checking is used to test -icount mode.
924  */
925 static bool check_cpi(int cpi)
926 {
927 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
928 
929 	/* init before event access, this test only cares about cycle count */
930 	set_pmcntenset(1 << PMU_CYCLE_IDX);
931 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
932 
933 	if (cpi > 0)
934 		printf("Checking for CPI=%d.\n", cpi);
935 	printf("instrs : cycles0 cycles1 ...\n");
936 
937 	for (unsigned int i = 4; i < 300; i += 32) {
938 		uint64_t avg, sum = 0;
939 
940 		printf("%4d:", i);
941 		for (int j = 0; j < NR_SAMPLES; j++) {
942 			uint64_t cycles;
943 
944 			set_pmccntr(0);
945 			measure_instrs(i, pmcr);
946 			cycles = get_pmccntr();
947 			printf(" %4"PRId64"", cycles);
948 
949 			if (!cycles) {
950 				printf("\ncycles not incrementing!\n");
951 				return false;
952 			} else if (cpi > 0 && cycles != i * cpi) {
953 				printf("\nunexpected cycle count received!\n");
954 				return false;
955 			} else if ((cycles >> 32) != 0) {
956 				/* The cycles taken by the loop above should
957 				 * fit in 32 bits easily. We check the upper
958 				 * 32 bits of the cycle counter to make sure
959 				 * there is no supprise. */
960 				printf("\ncycle count bigger than 32bit!\n");
961 				return false;
962 			}
963 
964 			sum += cycles;
965 		}
966 		avg = sum / NR_SAMPLES;
967 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
968 		       (avg >= i) ? "cpi" : "ipc",
969 		       (avg >= i) ? avg / i : i / avg);
970 	}
971 
972 	return true;
973 }
974 
975 static void pmccntr64_test(void)
976 {
977 #ifdef __arm__
978 	if (pmu.version == ID_DFR0_PMU_V3) {
979 		if (ERRATA(9e3f7a296940)) {
980 			write_sysreg(0xdead, PMCCNTR64);
981 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
982 		} else
983 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
984 	}
985 #endif
986 }
987 
988 /* Return FALSE if no PMU found, otherwise return TRUE */
989 static bool pmu_probe(void)
990 {
991 	uint32_t pmcr;
992 	uint8_t implementer;
993 
994 	pmu.version = get_pmu_version();
995 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
996 		return false;
997 
998 	report_info("PMU version: 0x%x", pmu.version);
999 
1000 	pmcr = get_pmcr();
1001 	implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK;
1002 	report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32,
1003 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
1004 		    implementer ? implementer : ' ',
1005 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
1006 
1007 	/* store read-only and RES0 fields of the PMCR bottom-half*/
1008 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
1009 	pmu.nb_implemented_counters =
1010 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
1011 	report_info("Implements %d event counters",
1012 		    pmu.nb_implemented_counters);
1013 
1014 	return true;
1015 }
1016 
1017 int main(int argc, char *argv[])
1018 {
1019 	int cpi = 0;
1020 
1021 	if (!pmu_probe()) {
1022 		printf("No PMU found, test skipped...\n");
1023 		return report_summary();
1024 	}
1025 
1026 	if (argc < 2)
1027 		report_abort("no test specified");
1028 
1029 	report_prefix_push("pmu");
1030 
1031 	if (strcmp(argv[1], "cycle-counter") == 0) {
1032 		report_prefix_push(argv[1]);
1033 		if (argc > 2)
1034 			cpi = atol(argv[2]);
1035 		report(check_cycles_increase(),
1036 		       "Monotonically increasing cycle count");
1037 		report(check_cpi(cpi), "Cycle/instruction ratio");
1038 		pmccntr64_test();
1039 		report_prefix_pop();
1040 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
1041 		report_prefix_push(argv[1]);
1042 		test_event_introspection();
1043 		report_prefix_pop();
1044 	} else if (strcmp(argv[1], "pmu-event-counter-config") == 0) {
1045 		report_prefix_push(argv[1]);
1046 		test_event_counter_config();
1047 		report_prefix_pop();
1048 	} else if (strcmp(argv[1], "pmu-basic-event-count") == 0) {
1049 		report_prefix_push(argv[1]);
1050 		test_basic_event_count();
1051 		report_prefix_pop();
1052 	} else if (strcmp(argv[1], "pmu-mem-access") == 0) {
1053 		report_prefix_push(argv[1]);
1054 		test_mem_access();
1055 		report_prefix_pop();
1056 	} else if (strcmp(argv[1], "pmu-sw-incr") == 0) {
1057 		report_prefix_push(argv[1]);
1058 		test_sw_incr();
1059 		report_prefix_pop();
1060 	} else if (strcmp(argv[1], "pmu-chained-counters") == 0) {
1061 		report_prefix_push(argv[1]);
1062 		test_chained_counters();
1063 		report_prefix_pop();
1064 	} else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) {
1065 		report_prefix_push(argv[1]);
1066 		test_chained_sw_incr();
1067 		report_prefix_pop();
1068 	} else if (strcmp(argv[1], "pmu-chain-promotion") == 0) {
1069 		report_prefix_push(argv[1]);
1070 		test_chain_promotion();
1071 		report_prefix_pop();
1072 	} else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) {
1073 		report_prefix_push(argv[1]);
1074 		test_overflow_interrupt();
1075 		report_prefix_pop();
1076 	} else {
1077 		report_abort("Unknown sub-test '%s'", argv[1]);
1078 	}
1079 
1080 	return report_summary();
1081 }
1082