xref: /kvm-unit-tests/arm/pmu.c (revision d6d3a3bd85095f4554d0ea01fe0c88b99a4413f9)
1 /*
2  * Test the ARM Performance Monitors Unit (PMU).
3  *
4  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
5  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License version 2.1 and
9  * only version 2.1 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
14  * for more details.
15  */
16 #include "libcflat.h"
17 #include "errata.h"
18 #include "asm/barrier.h"
19 #include "asm/sysreg.h"
20 #include "asm/processor.h"
21 #include <bitops.h>
22 #include <asm/gic.h>
23 
24 #define PMU_PMCR_E         (1 << 0)
25 #define PMU_PMCR_P         (1 << 1)
26 #define PMU_PMCR_C         (1 << 2)
27 #define PMU_PMCR_D         (1 << 3)
28 #define PMU_PMCR_X         (1 << 4)
29 #define PMU_PMCR_DP        (1 << 5)
30 #define PMU_PMCR_LC        (1 << 6)
31 #define PMU_PMCR_N_SHIFT   11
32 #define PMU_PMCR_N_MASK    0x1f
33 #define PMU_PMCR_ID_SHIFT  16
34 #define PMU_PMCR_ID_MASK   0xff
35 #define PMU_PMCR_IMP_SHIFT 24
36 #define PMU_PMCR_IMP_MASK  0xff
37 
38 #define PMU_CYCLE_IDX      31
39 
40 #define NR_SAMPLES 10
41 
42 /* Some PMU events */
43 #define SW_INCR			0x0
44 #define INST_RETIRED		0x8
45 #define CPU_CYCLES		0x11
46 #define MEM_ACCESS		0x13
47 #define INST_PREC		0x1B
48 #define STALL_FRONTEND		0x23
49 #define STALL_BACKEND		0x24
50 #define CHAIN			0x1E
51 
52 #define COMMON_EVENTS_LOW	0x0
53 #define COMMON_EVENTS_HIGH	0x3F
54 #define EXT_COMMON_EVENTS_LOW	0x4000
55 #define EXT_COMMON_EVENTS_HIGH	0x403F
56 
57 #define ALL_SET			0xFFFFFFFF
58 #define ALL_CLEAR		0x0
59 #define PRE_OVERFLOW		0xFFFFFFF0
60 #define PRE_OVERFLOW2		0xFFFFFFDC
61 
62 #define PMU_PPI			23
63 
64 struct pmu {
65 	unsigned int version;
66 	unsigned int nb_implemented_counters;
67 	uint32_t pmcr_ro;
68 };
69 
70 struct pmu_stats {
71 	unsigned long bitmap;
72 	uint32_t interrupts[32];
73 	bool unexpected;
74 };
75 
76 static struct pmu pmu;
77 
78 #if defined(__arm__)
79 #define ID_DFR0_PERFMON_SHIFT 24
80 #define ID_DFR0_PERFMON_MASK  0xf
81 
82 #define ID_DFR0_PMU_NOTIMPL	0b0000
83 #define ID_DFR0_PMU_V1		0b0001
84 #define ID_DFR0_PMU_V2		0b0010
85 #define ID_DFR0_PMU_V3		0b0011
86 #define ID_DFR0_PMU_V3_8_1	0b0100
87 #define ID_DFR0_PMU_V3_8_4	0b0101
88 #define ID_DFR0_PMU_V3_8_5	0b0110
89 #define ID_DFR0_PMU_IMPDEF	0b1111
90 
91 #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
92 #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
93 #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
94 #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
95 #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
96 #define PMCNTENCLR   __ACCESS_CP15(c9, 0, c12, 2)
97 #define PMOVSR       __ACCESS_CP15(c9, 0, c12, 3)
98 #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
99 #define PMINTENCLR   __ACCESS_CP15(c9, 0, c14, 2)
100 #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
101 
102 static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
103 static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
104 static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
105 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
106 
107 static inline uint8_t get_pmu_version(void)
108 {
109 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
110 }
111 
112 static inline uint64_t get_pmccntr(void)
113 {
114 	return read_sysreg(PMCCNTR32);
115 }
116 
117 static inline void set_pmccntr(uint64_t value)
118 {
119 	write_sysreg(value & 0xffffffff, PMCCNTR32);
120 }
121 
122 /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
123 static inline void set_pmccfiltr(uint32_t value)
124 {
125 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
126 	write_sysreg(value, PMXEVTYPER);
127 	isb();
128 }
129 
130 /*
131  * Extra instructions inserted by the compiler would be difficult to compensate
132  * for, so hand assemble everything between, and including, the PMCR accesses
133  * to start and stop counting. isb instructions were inserted to make sure
134  * pmccntr read after this function returns the exact instructions executed in
135  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
136  */
137 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
138 {
139 	asm volatile(
140 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
141 	"	isb\n"
142 	"1:	subs	%[loop], %[loop], #1\n"
143 	"	bgt	1b\n"
144 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
145 	"	isb\n"
146 	: [loop] "+r" (loop)
147 	: [pmcr] "r" (pmcr), [z] "r" (0)
148 	: "cc");
149 }
150 
151 static void pmu_reset(void)
152 {
153 	/* reset all counters, counting disabled at PMCR level*/
154 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
155 	/* Disable all counters */
156 	write_sysreg(ALL_SET, PMCNTENCLR);
157 	/* clear overflow reg */
158 	write_sysreg(ALL_SET, PMOVSR);
159 	/* disable overflow interrupts on all counters */
160 	write_sysreg(ALL_SET, PMINTENCLR);
161 	isb();
162 }
163 
164 /* event counter tests only implemented for aarch64 */
165 static void test_event_introspection(void) {}
166 static void test_event_counter_config(void) {}
167 static void test_basic_event_count(void) {}
168 static void test_mem_access(void) {}
169 static void test_sw_incr(void) {}
170 static void test_chained_counters(void) {}
171 static void test_chained_sw_incr(void) {}
172 static void test_chain_promotion(void) {}
173 static void test_overflow_interrupt(void) {}
174 
175 #elif defined(__aarch64__)
176 #define ID_AA64DFR0_PERFMON_SHIFT 8
177 #define ID_AA64DFR0_PERFMON_MASK  0xf
178 
179 #define ID_DFR0_PMU_NOTIMPL	0b0000
180 #define ID_DFR0_PMU_V3		0b0001
181 #define ID_DFR0_PMU_V3_8_1	0b0100
182 #define ID_DFR0_PMU_V3_8_4	0b0101
183 #define ID_DFR0_PMU_V3_8_5	0b0110
184 #define ID_DFR0_PMU_IMPDEF	0b1111
185 
186 static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
187 static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
188 static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
189 static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
190 static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
191 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
192 static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
193 
194 static inline uint8_t get_pmu_version(void)
195 {
196 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
197 	return ver;
198 }
199 
200 /*
201  * Extra instructions inserted by the compiler would be difficult to compensate
202  * for, so hand assemble everything between, and including, the PMCR accesses
203  * to start and stop counting. isb instructions are inserted to make sure
204  * pmccntr read after this function returns the exact instructions executed
205  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
206  */
207 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
208 {
209 	uint64_t pmcr64 = pmcr;
210 	asm volatile(
211 	"	msr	pmcr_el0, %[pmcr]\n"
212 	"	isb\n"
213 	"1:	subs	%w[loop], %w[loop], #1\n"
214 	"	b.gt	1b\n"
215 	"	msr	pmcr_el0, xzr\n"
216 	"	isb\n"
217 	: [loop] "+r" (loop)
218 	: [pmcr] "r" (pmcr64)
219 	: "cc");
220 }
221 
222 #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
223 #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
224 #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
225 
226 #define PMEVTYPER_EXCLUDE_EL1 BIT(31)
227 #define PMEVTYPER_EXCLUDE_EL0 BIT(30)
228 
229 static bool is_event_supported(uint32_t n, bool warn)
230 {
231 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
232 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
233 	bool supported;
234 	uint64_t reg;
235 
236 	/*
237 	 * The low 32-bits of PMCEID0/1 respectively describe
238 	 * event support for events 0-31/32-63. Their High
239 	 * 32-bits describe support for extended events
240 	 * starting at 0x4000, using the same split.
241 	 */
242 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
243 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
244 
245 	if (n <= COMMON_EVENTS_HIGH)
246 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
247 	else
248 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
249 
250 	supported =  reg & (1UL << (n & 0x3F));
251 
252 	if (!supported && warn)
253 		report_info("event 0x%x is not supported", n);
254 	return supported;
255 }
256 
257 static void test_event_introspection(void)
258 {
259 	bool required_events;
260 
261 	if (!pmu.nb_implemented_counters) {
262 		report_skip("No event counter, skip ...");
263 		return;
264 	}
265 
266 	/* PMUv3 requires an implementation includes some common events */
267 	required_events = is_event_supported(SW_INCR, true) &&
268 			  is_event_supported(CPU_CYCLES, true) &&
269 			  (is_event_supported(INST_RETIRED, true) ||
270 			   is_event_supported(INST_PREC, true));
271 
272 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
273 		required_events = required_events &&
274 				  is_event_supported(STALL_FRONTEND, true) &&
275 				  is_event_supported(STALL_BACKEND, true);
276 	}
277 
278 	report(required_events, "Check required events are implemented");
279 }
280 
281 /*
282  * Extra instructions inserted by the compiler would be difficult to compensate
283  * for, so hand assemble everything between, and including, the PMCR accesses
284  * to start and stop counting. isb instructions are inserted to make sure
285  * pmccntr read after this function returns the exact instructions executed
286  * in the controlled block. Loads @loop times the data at @address into x9.
287  */
288 static void mem_access_loop(void *addr, long loop, uint32_t pmcr)
289 {
290 	uint64_t pmcr64 = pmcr;
291 asm volatile(
292 	"       msr     pmcr_el0, %[pmcr]\n"
293 	"       isb\n"
294 	"       mov     x10, %[loop]\n"
295 	"1:     sub     x10, x10, #1\n"
296 	"       ldr	x9, [%[addr]]\n"
297 	"       cmp     x10, #0x0\n"
298 	"       b.gt    1b\n"
299 	"       msr     pmcr_el0, xzr\n"
300 	"       isb\n"
301 	:
302 	: [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop)
303 	: "x9", "x10", "cc");
304 }
305 
306 static struct pmu_stats pmu_stats;
307 
308 static void irq_handler(struct pt_regs *regs)
309 {
310 	uint32_t irqstat, irqnr;
311 
312 	irqstat = gic_read_iar();
313 	irqnr = gic_iar_irqnr(irqstat);
314 
315 	if (irqnr == PMU_PPI) {
316 		unsigned long overflows = read_sysreg(pmovsclr_el0);
317 		int i;
318 
319 		for (i = 0; i < 32; i++) {
320 			if (test_and_clear_bit(i, &overflows)) {
321 				pmu_stats.interrupts[i]++;
322 				pmu_stats.bitmap |= 1 << i;
323 			}
324 		}
325 		write_sysreg(ALL_SET, pmovsclr_el0);
326 		isb();
327 	} else {
328 		pmu_stats.unexpected = true;
329 	}
330 	gic_write_eoir(irqstat);
331 }
332 
333 static void pmu_reset_stats(void)
334 {
335 	int i;
336 
337 	for (i = 0; i < 32; i++)
338 		pmu_stats.interrupts[i] = 0;
339 
340 	pmu_stats.bitmap = 0;
341 	pmu_stats.unexpected = false;
342 }
343 
344 static void pmu_reset(void)
345 {
346 	/* reset all counters, counting disabled at PMCR level*/
347 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
348 	/* Disable all counters */
349 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
350 	/* clear overflow reg */
351 	write_sysreg(ALL_SET, pmovsclr_el0);
352 	/* disable overflow interrupts on all counters */
353 	write_sysreg(ALL_SET, pmintenclr_el1);
354 	pmu_reset_stats();
355 	isb();
356 }
357 
358 static void test_event_counter_config(void)
359 {
360 	int i;
361 
362 	if (!pmu.nb_implemented_counters) {
363 		report_skip("No event counter, skip ...");
364 		return;
365 	}
366 
367 	pmu_reset();
368 
369 	/*
370 	 * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
371 	 * select counter 0
372 	 */
373 	write_sysreg(1, PMSELR_EL0);
374 	/* program this counter to count unsupported event */
375 	write_sysreg(0xEA, PMXEVTYPER_EL0);
376 	write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
377 	report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
378 		"PMESELR/PMXEVTYPER/PMEVTYPERn");
379 	report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
380 		"PMESELR/PMXEVCNTR/PMEVCNTRn");
381 
382 	/* try to configure an unsupported event within the range [0x0, 0x3F] */
383 	for (i = 0; i <= 0x3F; i++) {
384 		if (!is_event_supported(i, false))
385 			break;
386 	}
387 	if (i > 0x3F) {
388 		report_skip("pmevtyper: all events within [0x0, 0x3F] are supported");
389 		return;
390 	}
391 
392 	/* select counter 0 */
393 	write_sysreg(0, PMSELR_EL0);
394 	/* program this counter to count unsupported event */
395 	write_sysreg(i, PMXEVCNTR_EL0);
396 	/* read the counter value */
397 	read_sysreg(PMXEVCNTR_EL0);
398 	report(read_sysreg(PMXEVCNTR_EL0) == i,
399 		"read of a counter programmed with unsupported event");
400 }
401 
402 static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events)
403 {
404 	int i;
405 
406 	if (pmu.nb_implemented_counters < nb_events) {
407 		report_skip("Skip test as number of counters is too small (%d)",
408 			    pmu.nb_implemented_counters);
409 		return false;
410 	}
411 
412 	for (i = 0; i < nb_events; i++) {
413 		if (!is_event_supported(events[i], false)) {
414 			report_skip("Skip test as event 0x%x is not supported",
415 				    events[i]);
416 			return false;
417 		}
418 	}
419 	return true;
420 }
421 
422 static void test_basic_event_count(void)
423 {
424 	uint32_t implemented_counter_mask, non_implemented_counter_mask;
425 	uint32_t counter_mask;
426 	uint32_t events[] = {CPU_CYCLES, INST_RETIRED};
427 
428 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
429 		return;
430 
431 	implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1;
432 	non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask);
433 	counter_mask = implemented_counter_mask | non_implemented_counter_mask;
434 
435 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
436 	write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0);
437 
438 	/* disable all counters */
439 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
440 	report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0),
441 		"pmcntenclr: disable all counters");
442 
443 	/*
444 	 * clear cycle and all event counters and allow counter enablement
445 	 * through PMCNTENSET. LC is RES1.
446 	 */
447 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
448 	isb();
449 	report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC), "pmcr: reset counters");
450 
451 	/* Preset counter #0 to pre overflow value to trigger an overflow */
452 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
453 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
454 		"counter #0 preset to pre-overflow value");
455 	report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0");
456 
457 	/*
458 	 * Enable all implemented counters and also attempt to enable
459 	 * not supported counters. Counting still is disabled by !PMCR.E
460 	 */
461 	write_sysreg_s(counter_mask, PMCNTENSET_EL0);
462 
463 	/* check only those implemented are enabled */
464 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
465 		(read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask),
466 		"pmcntenset: enabled implemented_counters");
467 
468 	/* Disable all counters but counters #0 and #1 */
469 	write_sysreg_s(~0x3, PMCNTENCLR_EL0);
470 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
471 		(read_sysreg_s(PMCNTENSET_EL0) == 0x3),
472 		"pmcntenset: just enabled #0 and #1");
473 
474 	/* clear overflow register */
475 	write_sysreg(ALL_SET, pmovsclr_el0);
476 	report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0");
477 
478 	/* disable overflow interrupts on all counters*/
479 	write_sysreg(ALL_SET, pmintenclr_el1);
480 	report(!read_sysreg(pmintenclr_el1),
481 		"pmintenclr_el1=0, all interrupts disabled");
482 
483 	/* enable overflow interrupts on all event counters */
484 	write_sysreg(implemented_counter_mask | non_implemented_counter_mask,
485 		     pmintenset_el1);
486 	report(read_sysreg(pmintenset_el1) == implemented_counter_mask,
487 		"overflow interrupts enabled on all implemented counters");
488 
489 	/* Set PMCR.E, execute asm code and unset PMCR.E */
490 	precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E);
491 
492 	report_info("counter #0 is 0x%lx (CPU_CYCLES)",
493 		    read_regn_el0(pmevcntr, 0));
494 	report_info("counter #1 is 0x%lx (INST_RETIRED)",
495 		    read_regn_el0(pmevcntr, 1));
496 
497 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
498 	report(read_sysreg(pmovsclr_el0) & 0x1,
499 		"check overflow happened on #0 only");
500 }
501 
502 static void test_mem_access(void)
503 {
504 	void *addr = malloc(PAGE_SIZE);
505 	uint32_t events[] = {MEM_ACCESS, MEM_ACCESS};
506 
507 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
508 		return;
509 
510 	pmu_reset();
511 
512 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
513 	write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
514 	write_sysreg_s(0x3, PMCNTENSET_EL0);
515 	isb();
516 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
517 	report_info("counter #0 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 0));
518 	report_info("counter #1 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 1));
519 	/* We may measure more than 20 mem access depending on the core */
520 	report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) &&
521 	       (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0),
522 	       "Ran 20 mem accesses");
523 
524 	pmu_reset();
525 
526 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
527 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
528 	write_sysreg_s(0x3, PMCNTENSET_EL0);
529 	isb();
530 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
531 	report(read_sysreg(pmovsclr_el0) == 0x3,
532 	       "Ran 20 mem accesses with expected overflows on both counters");
533 	report_info("cnt#0 = %ld cnt#1=%ld overflow=0x%lx",
534 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
535 			read_sysreg(pmovsclr_el0));
536 }
537 
538 static void test_sw_incr(void)
539 {
540 	uint32_t events[] = {SW_INCR, SW_INCR};
541 	int i;
542 
543 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
544 		return;
545 
546 	pmu_reset();
547 
548 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
549 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
550 	/* enable counters #0 and #1 */
551 	write_sysreg_s(0x3, PMCNTENSET_EL0);
552 
553 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
554 	isb();
555 
556 	for (i = 0; i < 100; i++)
557 		write_sysreg(0x1, pmswinc_el0);
558 
559 	isb();
560 	report_info("SW_INCR counter #0 has value %ld", read_regn_el0(pmevcntr, 0));
561 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
562 		"PWSYNC does not increment if PMCR.E is unset");
563 
564 	pmu_reset();
565 
566 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
567 	write_sysreg_s(0x3, PMCNTENSET_EL0);
568 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
569 	isb();
570 
571 	for (i = 0; i < 100; i++)
572 		write_sysreg(0x3, pmswinc_el0);
573 
574 	isb();
575 	report(read_regn_el0(pmevcntr, 0)  == 84, "counter #1 after + 100 SW_INCR");
576 	report(read_regn_el0(pmevcntr, 1)  == 100,
577 		"counter #0 after + 100 SW_INCR");
578 	report_info("counter values after 100 SW_INCR #0=%ld #1=%ld",
579 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
580 	report(read_sysreg(pmovsclr_el0) == 0x1,
581 		"overflow on counter #0 after 100 SW_INCR");
582 }
583 
584 static void test_chained_counters(void)
585 {
586 	uint32_t events[] = {CPU_CYCLES, CHAIN};
587 
588 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
589 		return;
590 
591 	pmu_reset();
592 
593 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
594 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
595 	/* enable counters #0 and #1 */
596 	write_sysreg_s(0x3, PMCNTENSET_EL0);
597 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
598 
599 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
600 
601 	report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented");
602 	report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #1");
603 
604 	/* test 64b overflow */
605 
606 	pmu_reset();
607 	write_sysreg_s(0x3, PMCNTENSET_EL0);
608 
609 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
610 	write_regn_el0(pmevcntr, 1, 0x1);
611 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
612 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
613 	report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2");
614 	report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #2");
615 
616 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
617 	write_regn_el0(pmevcntr, 1, ALL_SET);
618 
619 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
620 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
621 	report(!read_regn_el0(pmevcntr, 1), "CHAIN counter #1 wrapped");
622 	report(read_sysreg(pmovsclr_el0) == 0x3, "overflow on even and odd counters");
623 }
624 
625 static void test_chained_sw_incr(void)
626 {
627 	uint32_t events[] = {SW_INCR, CHAIN};
628 	int i;
629 
630 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
631 		return;
632 
633 	pmu_reset();
634 
635 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
636 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
637 	/* enable counters #0 and #1 */
638 	write_sysreg_s(0x3, PMCNTENSET_EL0);
639 
640 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
641 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
642 	isb();
643 
644 	for (i = 0; i < 100; i++)
645 		write_sysreg(0x1, pmswinc_el0);
646 
647 	isb();
648 	report((read_sysreg(pmovsclr_el0) == 0x1) &&
649 		(read_regn_el0(pmevcntr, 1) == 1),
650 		"overflow and chain counter incremented after 100 SW_INCR/CHAIN");
651 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
652 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
653 
654 	/* 64b SW_INCR and overflow on CHAIN counter*/
655 	pmu_reset();
656 
657 	write_regn_el0(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
658 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
659 	write_regn_el0(pmevcntr, 1, ALL_SET);
660 	write_sysreg_s(0x3, PMCNTENSET_EL0);
661 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
662 	isb();
663 
664 	for (i = 0; i < 100; i++)
665 		write_sysreg(0x1, pmswinc_el0);
666 
667 	isb();
668 	report((read_sysreg(pmovsclr_el0) == 0x3) &&
669 		(read_regn_el0(pmevcntr, 1) == 0) &&
670 		(read_regn_el0(pmevcntr, 0) == 84),
671 		"expected overflows and values after 100 SW_INCR/CHAIN");
672 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
673 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
674 }
675 
676 static void test_chain_promotion(void)
677 {
678 	uint32_t events[] = {MEM_ACCESS, CHAIN};
679 	void *addr = malloc(PAGE_SIZE);
680 
681 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
682 		return;
683 
684 	/* Only enable CHAIN counter */
685 	pmu_reset();
686 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
687 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
688 	write_sysreg_s(0x2, PMCNTENSET_EL0);
689 	isb();
690 
691 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
692 	report(!read_regn_el0(pmevcntr, 0),
693 		"chain counter not counting if even counter is disabled");
694 
695 	/* Only enable even counter */
696 	pmu_reset();
697 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
698 	write_sysreg_s(0x1, PMCNTENSET_EL0);
699 	isb();
700 
701 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
702 	report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1),
703 		"odd counter did not increment on overflow if disabled");
704 	report_info("MEM_ACCESS counter #0 has value %ld",
705 		    read_regn_el0(pmevcntr, 0));
706 	report_info("CHAIN counter #1 has value %ld",
707 		    read_regn_el0(pmevcntr, 1));
708 	report_info("overflow counter %ld", read_sysreg(pmovsclr_el0));
709 
710 	/* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */
711 	pmu_reset();
712 	write_sysreg_s(0x3, PMCNTENSET_EL0);
713 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
714 	isb();
715 
716 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
717 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
718 		    read_regn_el0(pmevcntr, 0));
719 
720 	/* disable the CHAIN event */
721 	write_sysreg_s(0x2, PMCNTENCLR_EL0);
722 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
723 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
724 		    read_regn_el0(pmevcntr, 0));
725 	report(read_sysreg(pmovsclr_el0) == 0x1,
726 		"should have triggered an overflow on #0");
727 	report(!read_regn_el0(pmevcntr, 1),
728 		"CHAIN counter #1 shouldn't have incremented");
729 
730 	/* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */
731 
732 	pmu_reset();
733 	write_sysreg_s(0x1, PMCNTENSET_EL0);
734 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
735 	isb();
736 	report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx",
737 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
738 		    read_sysreg(pmovsclr_el0));
739 
740 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
741 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
742 		    read_regn_el0(pmevcntr, 0));
743 
744 	/* enable the CHAIN event */
745 	write_sysreg_s(0x3, PMCNTENSET_EL0);
746 	isb();
747 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
748 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
749 		    read_regn_el0(pmevcntr, 0));
750 
751 	report((read_regn_el0(pmevcntr, 1) == 1) &&
752 		(read_sysreg(pmovsclr_el0) == 0x1),
753 		"CHAIN counter enabled: CHAIN counter was incremented and overflow");
754 
755 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
756 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
757 
758 	/* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */
759 	pmu_reset();
760 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
761 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
762 	write_sysreg_s(0x3, PMCNTENSET_EL0);
763 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
764 	isb();
765 
766 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
767 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
768 		    read_regn_el0(pmevcntr, 0));
769 
770 	/* 0 becomes CHAINED */
771 	write_sysreg_s(0x0, PMCNTENSET_EL0);
772 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
773 	write_sysreg_s(0x3, PMCNTENSET_EL0);
774 	write_regn_el0(pmevcntr, 1, 0x0);
775 
776 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
777 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
778 		    read_regn_el0(pmevcntr, 0));
779 
780 	report((read_regn_el0(pmevcntr, 1) == 1) &&
781 		(read_sysreg(pmovsclr_el0) == 0x1),
782 		"32b->64b: CHAIN counter incremented and overflow");
783 
784 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
785 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
786 
787 	/* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */
788 	pmu_reset();
789 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
790 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
791 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
792 	write_sysreg_s(0x3, PMCNTENSET_EL0);
793 
794 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
795 	report_info("counter #0=0x%lx, counter #1=0x%lx",
796 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
797 
798 	write_sysreg_s(0x0, PMCNTENSET_EL0);
799 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
800 	write_sysreg_s(0x3, PMCNTENSET_EL0);
801 
802 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
803 	report(read_sysreg(pmovsclr_el0) == 1,
804 		"overflow is expected on counter 0");
805 	report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx",
806 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
807 			read_sysreg(pmovsclr_el0));
808 }
809 
810 static bool expect_interrupts(uint32_t bitmap)
811 {
812 	int i;
813 
814 	if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected)
815 		return false;
816 
817 	for (i = 0; i < 32; i++) {
818 		if (test_and_clear_bit(i, &pmu_stats.bitmap))
819 			if (pmu_stats.interrupts[i] != 1)
820 				return false;
821 	}
822 	return true;
823 }
824 
825 static void test_overflow_interrupt(void)
826 {
827 	uint32_t events[] = {MEM_ACCESS, SW_INCR};
828 	void *addr = malloc(PAGE_SIZE);
829 	int i;
830 
831 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
832 		return;
833 
834 	gic_enable_defaults();
835 	install_irq_handler(EL1H_IRQ, irq_handler);
836 	local_irq_enable();
837 	gic_enable_irq(23);
838 
839 	pmu_reset();
840 
841 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
842 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
843 	write_sysreg_s(0x3, PMCNTENSET_EL0);
844 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
845 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
846 	isb();
847 
848 	/* interrupts are disabled (PMINTENSET_EL1 == 0) */
849 
850 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
851 	report(expect_interrupts(0), "no overflow interrupt after preset");
852 
853 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
854 	isb();
855 
856 	for (i = 0; i < 100; i++)
857 		write_sysreg(0x2, pmswinc_el0);
858 
859 	isb();
860 	set_pmcr(pmu.pmcr_ro);
861 	isb();
862 	report(expect_interrupts(0), "no overflow interrupt after counting");
863 
864 	/* enable interrupts (PMINTENSET_EL1 <= ALL_SET) */
865 
866 	pmu_reset_stats();
867 
868 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
869 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
870 	write_sysreg(ALL_SET, pmintenset_el1);
871 	isb();
872 
873 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
874 	for (i = 0; i < 100; i++)
875 		write_sysreg(0x3, pmswinc_el0);
876 
877 	mem_access_loop(addr, 200, pmu.pmcr_ro);
878 	report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0));
879 	report(expect_interrupts(0x3),
880 		"overflow interrupts expected on #0 and #1");
881 
882 	/* promote to 64-b */
883 
884 	pmu_reset_stats();
885 
886 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
887 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
888 	isb();
889 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
890 	report(expect_interrupts(0x1),
891 		"expect overflow interrupt on 32b boundary");
892 
893 	/* overflow on odd counter */
894 	pmu_reset_stats();
895 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
896 	write_regn_el0(pmevcntr, 1, ALL_SET);
897 	isb();
898 	mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E);
899 	report(expect_interrupts(0x3),
900 		"expect overflow interrupt on even and odd counter");
901 }
902 #endif
903 
904 /*
905  * Ensure that the cycle counter progresses between back-to-back reads.
906  */
907 static bool check_cycles_increase(void)
908 {
909 	bool success = true;
910 
911 	/* init before event access, this test only cares about cycle count */
912 	pmu_reset();
913 	set_pmcntenset(1 << PMU_CYCLE_IDX);
914 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
915 
916 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
917 	isb();
918 
919 	for (int i = 0; i < NR_SAMPLES; i++) {
920 		uint64_t a, b;
921 
922 		a = get_pmccntr();
923 		b = get_pmccntr();
924 
925 		if (a >= b) {
926 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
927 			success = false;
928 			break;
929 		}
930 	}
931 
932 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
933 	isb();
934 
935 	return success;
936 }
937 
938 /*
939  * Execute a known number of guest instructions. Only even instruction counts
940  * greater than or equal to 4 are supported by the in-line assembly code. The
941  * control register (PMCR_EL0) is initialized with the provided value (allowing
942  * for example for the cycle counter or event counters to be reset). At the end
943  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
944  * counting, allowing the cycle counter or event counters to be read at the
945  * leisure of the calling code.
946  */
947 static void measure_instrs(int num, uint32_t pmcr)
948 {
949 	int loop = (num - 2) / 2;
950 
951 	assert(num >= 4 && ((num - 2) % 2 == 0));
952 	precise_instrs_loop(loop, pmcr);
953 }
954 
955 /*
956  * Measure cycle counts for various known instruction counts. Ensure that the
957  * cycle counter progresses (similar to check_cycles_increase() but with more
958  * instructions and using reset and stop controls). If supplied a positive,
959  * nonzero CPI parameter, it also strictly checks that every measurement matches
960  * it. Strict CPI checking is used to test -icount mode.
961  */
962 static bool check_cpi(int cpi)
963 {
964 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
965 
966 	/* init before event access, this test only cares about cycle count */
967 	pmu_reset();
968 	set_pmcntenset(1 << PMU_CYCLE_IDX);
969 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
970 
971 	if (cpi > 0)
972 		printf("Checking for CPI=%d.\n", cpi);
973 	printf("instrs : cycles0 cycles1 ...\n");
974 
975 	for (unsigned int i = 4; i < 300; i += 32) {
976 		uint64_t avg, sum = 0;
977 
978 		printf("%4d:", i);
979 		for (int j = 0; j < NR_SAMPLES; j++) {
980 			uint64_t cycles;
981 
982 			set_pmccntr(0);
983 			measure_instrs(i, pmcr);
984 			cycles = get_pmccntr();
985 			printf(" %4"PRId64"", cycles);
986 
987 			if (!cycles) {
988 				printf("\ncycles not incrementing!\n");
989 				return false;
990 			} else if (cpi > 0 && cycles != i * cpi) {
991 				printf("\nunexpected cycle count received!\n");
992 				return false;
993 			} else if ((cycles >> 32) != 0) {
994 				/* The cycles taken by the loop above should
995 				 * fit in 32 bits easily. We check the upper
996 				 * 32 bits of the cycle counter to make sure
997 				 * there is no supprise. */
998 				printf("\ncycle count bigger than 32bit!\n");
999 				return false;
1000 			}
1001 
1002 			sum += cycles;
1003 		}
1004 		avg = sum / NR_SAMPLES;
1005 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
1006 		       (avg >= i) ? "cpi" : "ipc",
1007 		       (avg >= i) ? avg / i : i / avg);
1008 	}
1009 
1010 	return true;
1011 }
1012 
1013 static void pmccntr64_test(void)
1014 {
1015 #ifdef __arm__
1016 	if (pmu.version == ID_DFR0_PMU_V3) {
1017 		if (ERRATA(9e3f7a296940)) {
1018 			write_sysreg(0xdead, PMCCNTR64);
1019 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
1020 		} else
1021 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
1022 	}
1023 #endif
1024 }
1025 
1026 /* Return FALSE if no PMU found, otherwise return TRUE */
1027 static bool pmu_probe(void)
1028 {
1029 	uint32_t pmcr;
1030 	uint8_t implementer;
1031 
1032 	pmu.version = get_pmu_version();
1033 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
1034 		return false;
1035 
1036 	report_info("PMU version: 0x%x", pmu.version);
1037 
1038 	pmcr = get_pmcr();
1039 	implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK;
1040 	report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32,
1041 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
1042 		    implementer ? implementer : ' ',
1043 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
1044 
1045 	/* store read-only and RES0 fields of the PMCR bottom-half*/
1046 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
1047 	pmu.nb_implemented_counters =
1048 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
1049 	report_info("Implements %d event counters",
1050 		    pmu.nb_implemented_counters);
1051 
1052 	return true;
1053 }
1054 
1055 int main(int argc, char *argv[])
1056 {
1057 	int cpi = 0;
1058 
1059 	if (!pmu_probe()) {
1060 		printf("No PMU found, test skipped...\n");
1061 		return report_summary();
1062 	}
1063 
1064 	if (argc < 2)
1065 		report_abort("no test specified");
1066 
1067 	report_prefix_push("pmu");
1068 
1069 	if (strcmp(argv[1], "cycle-counter") == 0) {
1070 		report_prefix_push(argv[1]);
1071 		if (argc > 2)
1072 			cpi = atol(argv[2]);
1073 		report(check_cycles_increase(),
1074 		       "Monotonically increasing cycle count");
1075 		report(check_cpi(cpi), "Cycle/instruction ratio");
1076 		pmccntr64_test();
1077 		report_prefix_pop();
1078 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
1079 		report_prefix_push(argv[1]);
1080 		test_event_introspection();
1081 		report_prefix_pop();
1082 	} else if (strcmp(argv[1], "pmu-event-counter-config") == 0) {
1083 		report_prefix_push(argv[1]);
1084 		test_event_counter_config();
1085 		report_prefix_pop();
1086 	} else if (strcmp(argv[1], "pmu-basic-event-count") == 0) {
1087 		report_prefix_push(argv[1]);
1088 		test_basic_event_count();
1089 		report_prefix_pop();
1090 	} else if (strcmp(argv[1], "pmu-mem-access") == 0) {
1091 		report_prefix_push(argv[1]);
1092 		test_mem_access();
1093 		report_prefix_pop();
1094 	} else if (strcmp(argv[1], "pmu-sw-incr") == 0) {
1095 		report_prefix_push(argv[1]);
1096 		test_sw_incr();
1097 		report_prefix_pop();
1098 	} else if (strcmp(argv[1], "pmu-chained-counters") == 0) {
1099 		report_prefix_push(argv[1]);
1100 		test_chained_counters();
1101 		report_prefix_pop();
1102 	} else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) {
1103 		report_prefix_push(argv[1]);
1104 		test_chained_sw_incr();
1105 		report_prefix_pop();
1106 	} else if (strcmp(argv[1], "pmu-chain-promotion") == 0) {
1107 		report_prefix_push(argv[1]);
1108 		test_chain_promotion();
1109 		report_prefix_pop();
1110 	} else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) {
1111 		report_prefix_push(argv[1]);
1112 		test_overflow_interrupt();
1113 		report_prefix_pop();
1114 	} else {
1115 		report_abort("Unknown sub-test '%s'", argv[1]);
1116 	}
1117 
1118 	return report_summary();
1119 }
1120