xref: /kvm-unit-tests/arm/pmu.c (revision 4870738c4ac733c3936cc8f5387e0984fa237bbe)
14244065bSChristopher Covington /*
24244065bSChristopher Covington  * Test the ARM Performance Monitors Unit (PMU).
34244065bSChristopher Covington  *
44244065bSChristopher Covington  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
54244065bSChristopher Covington  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
64244065bSChristopher Covington  *
74244065bSChristopher Covington  * This program is free software; you can redistribute it and/or modify it
84244065bSChristopher Covington  * under the terms of the GNU Lesser General Public License version 2.1 and
94244065bSChristopher Covington  * only version 2.1 as published by the Free Software Foundation.
104244065bSChristopher Covington  *
114244065bSChristopher Covington  * This program is distributed in the hope that it will be useful, but WITHOUT
124244065bSChristopher Covington  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
134244065bSChristopher Covington  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
144244065bSChristopher Covington  * for more details.
154244065bSChristopher Covington  */
164244065bSChristopher Covington #include "libcflat.h"
174c357610SAndrew Jones #include "errata.h"
184244065bSChristopher Covington #include "asm/barrier.h"
194244065bSChristopher Covington #include "asm/sysreg.h"
204244065bSChristopher Covington #include "asm/processor.h"
21*4870738cSEric Auger #include <bitops.h>
224244065bSChristopher Covington 
23d81bb7a3SChristopher Covington #define PMU_PMCR_E         (1 << 0)
24d81bb7a3SChristopher Covington #define PMU_PMCR_C         (1 << 2)
25d81bb7a3SChristopher Covington #define PMU_PMCR_LC        (1 << 6)
264244065bSChristopher Covington #define PMU_PMCR_N_SHIFT   11
274244065bSChristopher Covington #define PMU_PMCR_N_MASK    0x1f
284244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT  16
294244065bSChristopher Covington #define PMU_PMCR_ID_MASK   0xff
304244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24
314244065bSChristopher Covington #define PMU_PMCR_IMP_MASK  0xff
324244065bSChristopher Covington 
33d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX      31
34d81bb7a3SChristopher Covington 
35d81bb7a3SChristopher Covington #define NR_SAMPLES 10
36d81bb7a3SChristopher Covington 
37*4870738cSEric Auger /* Some PMU events */
38*4870738cSEric Auger #define SW_INCR			0x0
39*4870738cSEric Auger #define INST_RETIRED		0x8
40*4870738cSEric Auger #define CPU_CYCLES		0x11
41*4870738cSEric Auger #define INST_PREC		0x1B
42*4870738cSEric Auger #define STALL_FRONTEND		0x23
43*4870738cSEric Auger #define STALL_BACKEND		0x24
44*4870738cSEric Auger 
45*4870738cSEric Auger #define COMMON_EVENTS_LOW	0x0
46*4870738cSEric Auger #define COMMON_EVENTS_HIGH	0x3F
47*4870738cSEric Auger #define EXT_COMMON_EVENTS_LOW	0x4000
48*4870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH	0x403F
49*4870738cSEric Auger 
508f747a85SEric Auger struct pmu {
518f747a85SEric Auger 	unsigned int version;
528f747a85SEric Auger 	unsigned int nb_implemented_counters;
538f747a85SEric Auger 	uint32_t pmcr_ro;
548f747a85SEric Auger };
558f747a85SEric Auger 
568f747a85SEric Auger static struct pmu pmu;
578f747a85SEric Auger 
584244065bSChristopher Covington #if defined(__arm__)
59098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24
60098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK  0xf
61098add54SAndrew Jones 
62784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
63784ee933SEric Auger #define ID_DFR0_PMU_V1		0b0001
64784ee933SEric Auger #define ID_DFR0_PMU_V2		0b0010
65784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0011
66784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
67784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
68784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
69784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
70784ee933SEric Auger 
714244065bSChristopher Covington #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
724244065bSChristopher Covington #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
73d81bb7a3SChristopher Covington #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
74d81bb7a3SChristopher Covington #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
75d81bb7a3SChristopher Covington #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
76d81bb7a3SChristopher Covington #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
77d81bb7a3SChristopher Covington #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
784244065bSChristopher Covington 
794244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
804244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
81d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
82d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
83d81bb7a3SChristopher Covington 
84098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
85098add54SAndrew Jones {
86098add54SAndrew Jones 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
87098add54SAndrew Jones }
88098add54SAndrew Jones 
89d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void)
90d81bb7a3SChristopher Covington {
91d81bb7a3SChristopher Covington 	return read_sysreg(PMCCNTR32);
92d81bb7a3SChristopher Covington }
93d81bb7a3SChristopher Covington 
948f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value)
958f76a347SChristopher Covington {
968f76a347SChristopher Covington 	write_sysreg(value & 0xffffffff, PMCCNTR32);
978f76a347SChristopher Covington }
988f76a347SChristopher Covington 
99d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
100d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value)
101d81bb7a3SChristopher Covington {
102d81bb7a3SChristopher Covington 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
103d81bb7a3SChristopher Covington 	write_sysreg(value, PMXEVTYPER);
104d81bb7a3SChristopher Covington 	isb();
105d81bb7a3SChristopher Covington }
1068f76a347SChristopher Covington 
1078f76a347SChristopher Covington /*
1088f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1098f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1108f76a347SChristopher Covington  * to start and stop counting. isb instructions were inserted to make sure
1118f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed in
1128f76a347SChristopher Covington  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
1138f76a347SChristopher Covington  */
1148f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1158f76a347SChristopher Covington {
1168f76a347SChristopher Covington 	asm volatile(
1178f76a347SChristopher Covington 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
1188f76a347SChristopher Covington 	"	isb\n"
1198f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1208f76a347SChristopher Covington 	"	bgt	1b\n"
1218f76a347SChristopher Covington 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
1228f76a347SChristopher Covington 	"	isb\n"
1238f76a347SChristopher Covington 	: [loop] "+r" (loop)
1248f76a347SChristopher Covington 	: [pmcr] "r" (pmcr), [z] "r" (0)
1258f76a347SChristopher Covington 	: "cc");
1268f76a347SChristopher Covington }
127*4870738cSEric Auger 
128*4870738cSEric Auger /* event counter tests only implemented for aarch64 */
129*4870738cSEric Auger static void test_event_introspection(void) {}
130*4870738cSEric Auger 
1314244065bSChristopher Covington #elif defined(__aarch64__)
132098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8
133098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK  0xf
134098add54SAndrew Jones 
135784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
136784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0001
137784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
138784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
139784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
140784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
141784ee933SEric Auger 
142098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
1434244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
144d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
145d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
1468f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
147d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
148d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
1498f76a347SChristopher Covington 
150098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
151098add54SAndrew Jones {
152098add54SAndrew Jones 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
153784ee933SEric Auger 	return ver;
154098add54SAndrew Jones }
155098add54SAndrew Jones 
1568f76a347SChristopher Covington /*
1578f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1588f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1598f76a347SChristopher Covington  * to start and stop counting. isb instructions are inserted to make sure
1608f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed
1618f76a347SChristopher Covington  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
1628f76a347SChristopher Covington  */
1638f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1648f76a347SChristopher Covington {
1658f76a347SChristopher Covington 	asm volatile(
1668f76a347SChristopher Covington 	"	msr	pmcr_el0, %[pmcr]\n"
1678f76a347SChristopher Covington 	"	isb\n"
1688f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1698f76a347SChristopher Covington 	"	b.gt	1b\n"
1708f76a347SChristopher Covington 	"	msr	pmcr_el0, xzr\n"
1718f76a347SChristopher Covington 	"	isb\n"
1728f76a347SChristopher Covington 	: [loop] "+r" (loop)
1738f76a347SChristopher Covington 	: [pmcr] "r" (pmcr)
1748f76a347SChristopher Covington 	: "cc");
1758f76a347SChristopher Covington }
176*4870738cSEric Auger 
177*4870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
178*4870738cSEric Auger 
179*4870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn)
180*4870738cSEric Auger {
181*4870738cSEric Auger 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
182*4870738cSEric Auger 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
183*4870738cSEric Auger 	bool supported;
184*4870738cSEric Auger 	uint64_t reg;
185*4870738cSEric Auger 
186*4870738cSEric Auger 	/*
187*4870738cSEric Auger 	 * The low 32-bits of PMCEID0/1 respectively describe
188*4870738cSEric Auger 	 * event support for events 0-31/32-63. Their High
189*4870738cSEric Auger 	 * 32-bits describe support for extended events
190*4870738cSEric Auger 	 * starting at 0x4000, using the same split.
191*4870738cSEric Auger 	 */
192*4870738cSEric Auger 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
193*4870738cSEric Auger 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
194*4870738cSEric Auger 
195*4870738cSEric Auger 	if (n <= COMMON_EVENTS_HIGH)
196*4870738cSEric Auger 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
197*4870738cSEric Auger 	else
198*4870738cSEric Auger 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
199*4870738cSEric Auger 
200*4870738cSEric Auger 	supported =  reg & (1UL << (n & 0x3F));
201*4870738cSEric Auger 
202*4870738cSEric Auger 	if (!supported && warn)
203*4870738cSEric Auger 		report_info("event 0x%x is not supported", n);
204*4870738cSEric Auger 	return supported;
205*4870738cSEric Auger }
206*4870738cSEric Auger 
207*4870738cSEric Auger static void test_event_introspection(void)
208*4870738cSEric Auger {
209*4870738cSEric Auger 	bool required_events;
210*4870738cSEric Auger 
211*4870738cSEric Auger 	if (!pmu.nb_implemented_counters) {
212*4870738cSEric Auger 		report_skip("No event counter, skip ...");
213*4870738cSEric Auger 		return;
214*4870738cSEric Auger 	}
215*4870738cSEric Auger 
216*4870738cSEric Auger 	/* PMUv3 requires an implementation includes some common events */
217*4870738cSEric Auger 	required_events = is_event_supported(SW_INCR, true) &&
218*4870738cSEric Auger 			  is_event_supported(CPU_CYCLES, true) &&
219*4870738cSEric Auger 			  (is_event_supported(INST_RETIRED, true) ||
220*4870738cSEric Auger 			   is_event_supported(INST_PREC, true));
221*4870738cSEric Auger 
222*4870738cSEric Auger 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
223*4870738cSEric Auger 		required_events = required_events &&
224*4870738cSEric Auger 				  is_event_supported(STALL_FRONTEND, true) &&
225*4870738cSEric Auger 				  is_event_supported(STALL_BACKEND, true);
226*4870738cSEric Auger 	}
227*4870738cSEric Auger 
228*4870738cSEric Auger 	report(required_events, "Check required events are implemented");
229*4870738cSEric Auger }
230*4870738cSEric Auger 
2314244065bSChristopher Covington #endif
2324244065bSChristopher Covington 
2334244065bSChristopher Covington /*
234d81bb7a3SChristopher Covington  * Ensure that the cycle counter progresses between back-to-back reads.
235d81bb7a3SChristopher Covington  */
236d81bb7a3SChristopher Covington static bool check_cycles_increase(void)
237d81bb7a3SChristopher Covington {
238d81bb7a3SChristopher Covington 	bool success = true;
239d81bb7a3SChristopher Covington 
240d81bb7a3SChristopher Covington 	/* init before event access, this test only cares about cycle count */
241d81bb7a3SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
242d81bb7a3SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
243d81bb7a3SChristopher Covington 
244d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
245d81bb7a3SChristopher Covington 
246d81bb7a3SChristopher Covington 	for (int i = 0; i < NR_SAMPLES; i++) {
247d81bb7a3SChristopher Covington 		uint64_t a, b;
248d81bb7a3SChristopher Covington 
249d81bb7a3SChristopher Covington 		a = get_pmccntr();
250d81bb7a3SChristopher Covington 		b = get_pmccntr();
251d81bb7a3SChristopher Covington 
252d81bb7a3SChristopher Covington 		if (a >= b) {
253d81bb7a3SChristopher Covington 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
254d81bb7a3SChristopher Covington 			success = false;
255d81bb7a3SChristopher Covington 			break;
256d81bb7a3SChristopher Covington 		}
257d81bb7a3SChristopher Covington 	}
258d81bb7a3SChristopher Covington 
259d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
260d81bb7a3SChristopher Covington 
261d81bb7a3SChristopher Covington 	return success;
262d81bb7a3SChristopher Covington }
263d81bb7a3SChristopher Covington 
2648f76a347SChristopher Covington /*
2658f76a347SChristopher Covington  * Execute a known number of guest instructions. Only even instruction counts
2668f76a347SChristopher Covington  * greater than or equal to 4 are supported by the in-line assembly code. The
2678f76a347SChristopher Covington  * control register (PMCR_EL0) is initialized with the provided value (allowing
2688f76a347SChristopher Covington  * for example for the cycle counter or event counters to be reset). At the end
2698f76a347SChristopher Covington  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
2708f76a347SChristopher Covington  * counting, allowing the cycle counter or event counters to be read at the
2718f76a347SChristopher Covington  * leisure of the calling code.
2728f76a347SChristopher Covington  */
2738f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr)
2748f76a347SChristopher Covington {
2758f76a347SChristopher Covington 	int loop = (num - 2) / 2;
2768f76a347SChristopher Covington 
2778f76a347SChristopher Covington 	assert(num >= 4 && ((num - 2) % 2 == 0));
2788f76a347SChristopher Covington 	precise_instrs_loop(loop, pmcr);
2798f76a347SChristopher Covington }
2808f76a347SChristopher Covington 
2818f76a347SChristopher Covington /*
2828f76a347SChristopher Covington  * Measure cycle counts for various known instruction counts. Ensure that the
2838f76a347SChristopher Covington  * cycle counter progresses (similar to check_cycles_increase() but with more
2848f76a347SChristopher Covington  * instructions and using reset and stop controls). If supplied a positive,
2858f76a347SChristopher Covington  * nonzero CPI parameter, it also strictly checks that every measurement matches
2868f76a347SChristopher Covington  * it. Strict CPI checking is used to test -icount mode.
2878f76a347SChristopher Covington  */
2888f76a347SChristopher Covington static bool check_cpi(int cpi)
2898f76a347SChristopher Covington {
2908f76a347SChristopher Covington 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
2918f76a347SChristopher Covington 
2928f76a347SChristopher Covington 	/* init before event access, this test only cares about cycle count */
2938f76a347SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
2948f76a347SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
2958f76a347SChristopher Covington 
2968f76a347SChristopher Covington 	if (cpi > 0)
2978f76a347SChristopher Covington 		printf("Checking for CPI=%d.\n", cpi);
2988f76a347SChristopher Covington 	printf("instrs : cycles0 cycles1 ...\n");
2998f76a347SChristopher Covington 
3008f76a347SChristopher Covington 	for (unsigned int i = 4; i < 300; i += 32) {
3018f76a347SChristopher Covington 		uint64_t avg, sum = 0;
3028f76a347SChristopher Covington 
3038f76a347SChristopher Covington 		printf("%4d:", i);
3048f76a347SChristopher Covington 		for (int j = 0; j < NR_SAMPLES; j++) {
3058f76a347SChristopher Covington 			uint64_t cycles;
3068f76a347SChristopher Covington 
3078f76a347SChristopher Covington 			set_pmccntr(0);
3088f76a347SChristopher Covington 			measure_instrs(i, pmcr);
3098f76a347SChristopher Covington 			cycles = get_pmccntr();
3108f76a347SChristopher Covington 			printf(" %4"PRId64"", cycles);
3118f76a347SChristopher Covington 
3128f76a347SChristopher Covington 			if (!cycles) {
3138f76a347SChristopher Covington 				printf("\ncycles not incrementing!\n");
3148f76a347SChristopher Covington 				return false;
3158f76a347SChristopher Covington 			} else if (cpi > 0 && cycles != i * cpi) {
3168f76a347SChristopher Covington 				printf("\nunexpected cycle count received!\n");
3178f76a347SChristopher Covington 				return false;
3188f76a347SChristopher Covington 			} else if ((cycles >> 32) != 0) {
3198f76a347SChristopher Covington 				/* The cycles taken by the loop above should
3208f76a347SChristopher Covington 				 * fit in 32 bits easily. We check the upper
3218f76a347SChristopher Covington 				 * 32 bits of the cycle counter to make sure
3228f76a347SChristopher Covington 				 * there is no supprise. */
3238f76a347SChristopher Covington 				printf("\ncycle count bigger than 32bit!\n");
3248f76a347SChristopher Covington 				return false;
3258f76a347SChristopher Covington 			}
3268f76a347SChristopher Covington 
3278f76a347SChristopher Covington 			sum += cycles;
3288f76a347SChristopher Covington 		}
3298f76a347SChristopher Covington 		avg = sum / NR_SAMPLES;
3308f76a347SChristopher Covington 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
3318f76a347SChristopher Covington 		       (avg >= i) ? "cpi" : "ipc",
3328f76a347SChristopher Covington 		       (avg >= i) ? avg / i : i / avg);
3338f76a347SChristopher Covington 	}
3348f76a347SChristopher Covington 
3358f76a347SChristopher Covington 	return true;
3368f76a347SChristopher Covington }
3378f76a347SChristopher Covington 
3384c357610SAndrew Jones static void pmccntr64_test(void)
3394c357610SAndrew Jones {
3404c357610SAndrew Jones #ifdef __arm__
341784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_V3) {
3424c357610SAndrew Jones 		if (ERRATA(9e3f7a296940)) {
3434c357610SAndrew Jones 			write_sysreg(0xdead, PMCCNTR64);
344a299895bSThomas Huth 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
3454c357610SAndrew Jones 		} else
3464c357610SAndrew Jones 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
3474c357610SAndrew Jones 	}
3484c357610SAndrew Jones #endif
3494c357610SAndrew Jones }
3504c357610SAndrew Jones 
3514244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */
35223b8916bSThomas Huth static bool pmu_probe(void)
3534244065bSChristopher Covington {
354784ee933SEric Auger 	uint32_t pmcr = get_pmcr();
355eff8f161SEric Auger 
3568f747a85SEric Auger 	pmu.version = get_pmu_version();
357784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
358eff8f161SEric Auger 		return false;
359eff8f161SEric Auger 
360784ee933SEric Auger 	report_info("PMU version: 0x%x", pmu.version);
361eff8f161SEric Auger 
362eff8f161SEric Auger 	pmcr = get_pmcr();
3638f747a85SEric Auger 	report_info("PMU implementer/ID code: %#x(\"%c\")/%#x",
364eff8f161SEric Auger 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
365eff8f161SEric Auger 		    ((pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK) ? : ' ',
3668f747a85SEric Auger 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
3678f747a85SEric Auger 
3688f747a85SEric Auger 	/* store read-only and RES0 fields of the PMCR bottom-half*/
3698f747a85SEric Auger 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
3708f747a85SEric Auger 	pmu.nb_implemented_counters =
3718f747a85SEric Auger 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
3728f747a85SEric Auger 	report_info("Implements %d event counters",
3738f747a85SEric Auger 		    pmu.nb_implemented_counters);
374eff8f161SEric Auger 
375eff8f161SEric Auger 	return true;
3764244065bSChristopher Covington }
3774244065bSChristopher Covington 
3788f76a347SChristopher Covington int main(int argc, char *argv[])
3794244065bSChristopher Covington {
3808f76a347SChristopher Covington 	int cpi = 0;
3818f76a347SChristopher Covington 
3824244065bSChristopher Covington 	if (!pmu_probe()) {
3834244065bSChristopher Covington 		printf("No PMU found, test skipped...\n");
3844244065bSChristopher Covington 		return report_summary();
3854244065bSChristopher Covington 	}
3864244065bSChristopher Covington 
38757ec1086SEric Auger 	if (argc < 2)
38857ec1086SEric Auger 		report_abort("no test specified");
38957ec1086SEric Auger 
3904244065bSChristopher Covington 	report_prefix_push("pmu");
3914244065bSChristopher Covington 
39257ec1086SEric Auger 	if (strcmp(argv[1], "cycle-counter") == 0) {
39357ec1086SEric Auger 		report_prefix_push(argv[1]);
39457ec1086SEric Auger 		if (argc > 2)
39557ec1086SEric Auger 			cpi = atol(argv[2]);
396a299895bSThomas Huth 		report(check_cycles_increase(),
397a299895bSThomas Huth 		       "Monotonically increasing cycle count");
398a299895bSThomas Huth 		report(check_cpi(cpi), "Cycle/instruction ratio");
3994c357610SAndrew Jones 		pmccntr64_test();
40057ec1086SEric Auger 		report_prefix_pop();
401*4870738cSEric Auger 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
402*4870738cSEric Auger 		report_prefix_push(argv[1]);
403*4870738cSEric Auger 		test_event_introspection();
404*4870738cSEric Auger 		report_prefix_pop();
40557ec1086SEric Auger 	} else {
40657ec1086SEric Auger 		report_abort("Unknown sub-test '%s'", argv[1]);
40757ec1086SEric Auger 	}
4084c357610SAndrew Jones 
4094244065bSChristopher Covington 	return report_summary();
4104244065bSChristopher Covington }
411