xref: /kvm-unit-tests/arm/pmu.c (revision 4ce2a80456240694a72b5b2ba74598cefc6f4d6f)
14244065bSChristopher Covington /*
24244065bSChristopher Covington  * Test the ARM Performance Monitors Unit (PMU).
34244065bSChristopher Covington  *
44244065bSChristopher Covington  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
54244065bSChristopher Covington  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
64244065bSChristopher Covington  *
74244065bSChristopher Covington  * This program is free software; you can redistribute it and/or modify it
84244065bSChristopher Covington  * under the terms of the GNU Lesser General Public License version 2.1 and
94244065bSChristopher Covington  * only version 2.1 as published by the Free Software Foundation.
104244065bSChristopher Covington  *
114244065bSChristopher Covington  * This program is distributed in the hope that it will be useful, but WITHOUT
124244065bSChristopher Covington  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
134244065bSChristopher Covington  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
144244065bSChristopher Covington  * for more details.
154244065bSChristopher Covington  */
164244065bSChristopher Covington #include "libcflat.h"
174c357610SAndrew Jones #include "errata.h"
184244065bSChristopher Covington #include "asm/barrier.h"
194244065bSChristopher Covington #include "asm/sysreg.h"
204244065bSChristopher Covington #include "asm/processor.h"
214870738cSEric Auger #include <bitops.h>
22*4ce2a804SEric Auger #include <asm/gic.h>
234244065bSChristopher Covington 
24d81bb7a3SChristopher Covington #define PMU_PMCR_E         (1 << 0)
25*4ce2a804SEric Auger #define PMU_PMCR_P         (1 << 1)
26d81bb7a3SChristopher Covington #define PMU_PMCR_C         (1 << 2)
27*4ce2a804SEric Auger #define PMU_PMCR_D         (1 << 3)
28*4ce2a804SEric Auger #define PMU_PMCR_X         (1 << 4)
29*4ce2a804SEric Auger #define PMU_PMCR_DP        (1 << 5)
30d81bb7a3SChristopher Covington #define PMU_PMCR_LC        (1 << 6)
314244065bSChristopher Covington #define PMU_PMCR_N_SHIFT   11
324244065bSChristopher Covington #define PMU_PMCR_N_MASK    0x1f
334244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT  16
344244065bSChristopher Covington #define PMU_PMCR_ID_MASK   0xff
354244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24
364244065bSChristopher Covington #define PMU_PMCR_IMP_MASK  0xff
374244065bSChristopher Covington 
38d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX      31
39d81bb7a3SChristopher Covington 
40d81bb7a3SChristopher Covington #define NR_SAMPLES 10
41d81bb7a3SChristopher Covington 
424870738cSEric Auger /* Some PMU events */
434870738cSEric Auger #define SW_INCR			0x0
444870738cSEric Auger #define INST_RETIRED		0x8
454870738cSEric Auger #define CPU_CYCLES		0x11
46*4ce2a804SEric Auger #define MEM_ACCESS		0x13
474870738cSEric Auger #define INST_PREC		0x1B
484870738cSEric Auger #define STALL_FRONTEND		0x23
494870738cSEric Auger #define STALL_BACKEND		0x24
504870738cSEric Auger 
514870738cSEric Auger #define COMMON_EVENTS_LOW	0x0
524870738cSEric Auger #define COMMON_EVENTS_HIGH	0x3F
534870738cSEric Auger #define EXT_COMMON_EVENTS_LOW	0x4000
544870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH	0x403F
554870738cSEric Auger 
56*4ce2a804SEric Auger #define ALL_SET			0xFFFFFFFF
57*4ce2a804SEric Auger #define ALL_CLEAR		0x0
58*4ce2a804SEric Auger #define PRE_OVERFLOW		0xFFFFFFF0
59*4ce2a804SEric Auger 
608f747a85SEric Auger struct pmu {
618f747a85SEric Auger 	unsigned int version;
628f747a85SEric Auger 	unsigned int nb_implemented_counters;
638f747a85SEric Auger 	uint32_t pmcr_ro;
648f747a85SEric Auger };
658f747a85SEric Auger 
668f747a85SEric Auger static struct pmu pmu;
678f747a85SEric Auger 
684244065bSChristopher Covington #if defined(__arm__)
69098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24
70098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK  0xf
71098add54SAndrew Jones 
72784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
73784ee933SEric Auger #define ID_DFR0_PMU_V1		0b0001
74784ee933SEric Auger #define ID_DFR0_PMU_V2		0b0010
75784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0011
76784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
77784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
78784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
79784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
80784ee933SEric Auger 
814244065bSChristopher Covington #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
824244065bSChristopher Covington #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
83d81bb7a3SChristopher Covington #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
84d81bb7a3SChristopher Covington #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
85d81bb7a3SChristopher Covington #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
86d81bb7a3SChristopher Covington #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
87d81bb7a3SChristopher Covington #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
884244065bSChristopher Covington 
894244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
904244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
91d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
92d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
93d81bb7a3SChristopher Covington 
94098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
95098add54SAndrew Jones {
96098add54SAndrew Jones 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
97098add54SAndrew Jones }
98098add54SAndrew Jones 
99d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void)
100d81bb7a3SChristopher Covington {
101d81bb7a3SChristopher Covington 	return read_sysreg(PMCCNTR32);
102d81bb7a3SChristopher Covington }
103d81bb7a3SChristopher Covington 
1048f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value)
1058f76a347SChristopher Covington {
1068f76a347SChristopher Covington 	write_sysreg(value & 0xffffffff, PMCCNTR32);
1078f76a347SChristopher Covington }
1088f76a347SChristopher Covington 
109d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
110d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value)
111d81bb7a3SChristopher Covington {
112d81bb7a3SChristopher Covington 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
113d81bb7a3SChristopher Covington 	write_sysreg(value, PMXEVTYPER);
114d81bb7a3SChristopher Covington 	isb();
115d81bb7a3SChristopher Covington }
1168f76a347SChristopher Covington 
1178f76a347SChristopher Covington /*
1188f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1198f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1208f76a347SChristopher Covington  * to start and stop counting. isb instructions were inserted to make sure
1218f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed in
1228f76a347SChristopher Covington  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
1238f76a347SChristopher Covington  */
1248f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1258f76a347SChristopher Covington {
1268f76a347SChristopher Covington 	asm volatile(
1278f76a347SChristopher Covington 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
1288f76a347SChristopher Covington 	"	isb\n"
1298f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1308f76a347SChristopher Covington 	"	bgt	1b\n"
1318f76a347SChristopher Covington 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
1328f76a347SChristopher Covington 	"	isb\n"
1338f76a347SChristopher Covington 	: [loop] "+r" (loop)
1348f76a347SChristopher Covington 	: [pmcr] "r" (pmcr), [z] "r" (0)
1358f76a347SChristopher Covington 	: "cc");
1368f76a347SChristopher Covington }
1374870738cSEric Auger 
1384870738cSEric Auger /* event counter tests only implemented for aarch64 */
1394870738cSEric Auger static void test_event_introspection(void) {}
140*4ce2a804SEric Auger static void test_event_counter_config(void) {}
141*4ce2a804SEric Auger static void test_basic_event_count(void) {}
142*4ce2a804SEric Auger static void test_mem_access(void) {}
1434870738cSEric Auger 
1444244065bSChristopher Covington #elif defined(__aarch64__)
145098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8
146098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK  0xf
147098add54SAndrew Jones 
148784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
149784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0001
150784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
151784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
152784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
153784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
154784ee933SEric Auger 
155098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
1564244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
157d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
158d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
1598f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
160d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
161d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
1628f76a347SChristopher Covington 
163098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
164098add54SAndrew Jones {
165098add54SAndrew Jones 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
166784ee933SEric Auger 	return ver;
167098add54SAndrew Jones }
168098add54SAndrew Jones 
1698f76a347SChristopher Covington /*
1708f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1718f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1728f76a347SChristopher Covington  * to start and stop counting. isb instructions are inserted to make sure
1738f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed
1748f76a347SChristopher Covington  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
1758f76a347SChristopher Covington  */
1768f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1778f76a347SChristopher Covington {
1788f76a347SChristopher Covington 	asm volatile(
1798f76a347SChristopher Covington 	"	msr	pmcr_el0, %[pmcr]\n"
1808f76a347SChristopher Covington 	"	isb\n"
1818f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1828f76a347SChristopher Covington 	"	b.gt	1b\n"
1838f76a347SChristopher Covington 	"	msr	pmcr_el0, xzr\n"
1848f76a347SChristopher Covington 	"	isb\n"
1858f76a347SChristopher Covington 	: [loop] "+r" (loop)
1868f76a347SChristopher Covington 	: [pmcr] "r" (pmcr)
1878f76a347SChristopher Covington 	: "cc");
1888f76a347SChristopher Covington }
1894870738cSEric Auger 
1904870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
191*4ce2a804SEric Auger #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
192*4ce2a804SEric Auger #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
193*4ce2a804SEric Auger 
194*4ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL1 BIT(31)
195*4ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL0 BIT(30)
1964870738cSEric Auger 
1974870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn)
1984870738cSEric Auger {
1994870738cSEric Auger 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
2004870738cSEric Auger 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
2014870738cSEric Auger 	bool supported;
2024870738cSEric Auger 	uint64_t reg;
2034870738cSEric Auger 
2044870738cSEric Auger 	/*
2054870738cSEric Auger 	 * The low 32-bits of PMCEID0/1 respectively describe
2064870738cSEric Auger 	 * event support for events 0-31/32-63. Their High
2074870738cSEric Auger 	 * 32-bits describe support for extended events
2084870738cSEric Auger 	 * starting at 0x4000, using the same split.
2094870738cSEric Auger 	 */
2104870738cSEric Auger 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
2114870738cSEric Auger 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
2124870738cSEric Auger 
2134870738cSEric Auger 	if (n <= COMMON_EVENTS_HIGH)
2144870738cSEric Auger 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
2154870738cSEric Auger 	else
2164870738cSEric Auger 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
2174870738cSEric Auger 
2184870738cSEric Auger 	supported =  reg & (1UL << (n & 0x3F));
2194870738cSEric Auger 
2204870738cSEric Auger 	if (!supported && warn)
2214870738cSEric Auger 		report_info("event 0x%x is not supported", n);
2224870738cSEric Auger 	return supported;
2234870738cSEric Auger }
2244870738cSEric Auger 
2254870738cSEric Auger static void test_event_introspection(void)
2264870738cSEric Auger {
2274870738cSEric Auger 	bool required_events;
2284870738cSEric Auger 
2294870738cSEric Auger 	if (!pmu.nb_implemented_counters) {
2304870738cSEric Auger 		report_skip("No event counter, skip ...");
2314870738cSEric Auger 		return;
2324870738cSEric Auger 	}
2334870738cSEric Auger 
2344870738cSEric Auger 	/* PMUv3 requires an implementation includes some common events */
2354870738cSEric Auger 	required_events = is_event_supported(SW_INCR, true) &&
2364870738cSEric Auger 			  is_event_supported(CPU_CYCLES, true) &&
2374870738cSEric Auger 			  (is_event_supported(INST_RETIRED, true) ||
2384870738cSEric Auger 			   is_event_supported(INST_PREC, true));
2394870738cSEric Auger 
2404870738cSEric Auger 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
2414870738cSEric Auger 		required_events = required_events &&
2424870738cSEric Auger 				  is_event_supported(STALL_FRONTEND, true) &&
2434870738cSEric Auger 				  is_event_supported(STALL_BACKEND, true);
2444870738cSEric Auger 	}
2454870738cSEric Auger 
2464870738cSEric Auger 	report(required_events, "Check required events are implemented");
2474870738cSEric Auger }
2484870738cSEric Auger 
249*4ce2a804SEric Auger /*
250*4ce2a804SEric Auger  * Extra instructions inserted by the compiler would be difficult to compensate
251*4ce2a804SEric Auger  * for, so hand assemble everything between, and including, the PMCR accesses
252*4ce2a804SEric Auger  * to start and stop counting. isb instructions are inserted to make sure
253*4ce2a804SEric Auger  * pmccntr read after this function returns the exact instructions executed
254*4ce2a804SEric Auger  * in the controlled block. Loads @loop times the data at @address into x9.
255*4ce2a804SEric Auger  */
256*4ce2a804SEric Auger static void mem_access_loop(void *addr, int loop, uint32_t pmcr)
257*4ce2a804SEric Auger {
258*4ce2a804SEric Auger asm volatile(
259*4ce2a804SEric Auger 	"       msr     pmcr_el0, %[pmcr]\n"
260*4ce2a804SEric Auger 	"       isb\n"
261*4ce2a804SEric Auger 	"       mov     x10, %[loop]\n"
262*4ce2a804SEric Auger 	"1:     sub     x10, x10, #1\n"
263*4ce2a804SEric Auger 	"       ldr	x9, [%[addr]]\n"
264*4ce2a804SEric Auger 	"       cmp     x10, #0x0\n"
265*4ce2a804SEric Auger 	"       b.gt    1b\n"
266*4ce2a804SEric Auger 	"       msr     pmcr_el0, xzr\n"
267*4ce2a804SEric Auger 	"       isb\n"
268*4ce2a804SEric Auger 	:
269*4ce2a804SEric Auger 	: [addr] "r" (addr), [pmcr] "r" (pmcr), [loop] "r" (loop)
270*4ce2a804SEric Auger 	: "x9", "x10", "cc");
271*4ce2a804SEric Auger }
272*4ce2a804SEric Auger 
273*4ce2a804SEric Auger static void pmu_reset(void)
274*4ce2a804SEric Auger {
275*4ce2a804SEric Auger 	/* reset all counters, counting disabled at PMCR level*/
276*4ce2a804SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
277*4ce2a804SEric Auger 	/* Disable all counters */
278*4ce2a804SEric Auger 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
279*4ce2a804SEric Auger 	/* clear overflow reg */
280*4ce2a804SEric Auger 	write_sysreg(ALL_SET, pmovsclr_el0);
281*4ce2a804SEric Auger 	/* disable overflow interrupts on all counters */
282*4ce2a804SEric Auger 	write_sysreg(ALL_SET, pmintenclr_el1);
283*4ce2a804SEric Auger 	isb();
284*4ce2a804SEric Auger }
285*4ce2a804SEric Auger 
286*4ce2a804SEric Auger static void test_event_counter_config(void)
287*4ce2a804SEric Auger {
288*4ce2a804SEric Auger 	int i;
289*4ce2a804SEric Auger 
290*4ce2a804SEric Auger 	if (!pmu.nb_implemented_counters) {
291*4ce2a804SEric Auger 		report_skip("No event counter, skip ...");
292*4ce2a804SEric Auger 		return;
293*4ce2a804SEric Auger 	}
294*4ce2a804SEric Auger 
295*4ce2a804SEric Auger 	pmu_reset();
296*4ce2a804SEric Auger 
297*4ce2a804SEric Auger 	/*
298*4ce2a804SEric Auger 	 * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
299*4ce2a804SEric Auger 	 * select counter 0
300*4ce2a804SEric Auger 	 */
301*4ce2a804SEric Auger 	write_sysreg(1, PMSELR_EL0);
302*4ce2a804SEric Auger 	/* program this counter to count unsupported event */
303*4ce2a804SEric Auger 	write_sysreg(0xEA, PMXEVTYPER_EL0);
304*4ce2a804SEric Auger 	write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
305*4ce2a804SEric Auger 	report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
306*4ce2a804SEric Auger 		"PMESELR/PMXEVTYPER/PMEVTYPERn");
307*4ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
308*4ce2a804SEric Auger 		"PMESELR/PMXEVCNTR/PMEVCNTRn");
309*4ce2a804SEric Auger 
310*4ce2a804SEric Auger 	/* try to configure an unsupported event within the range [0x0, 0x3F] */
311*4ce2a804SEric Auger 	for (i = 0; i <= 0x3F; i++) {
312*4ce2a804SEric Auger 		if (!is_event_supported(i, false))
313*4ce2a804SEric Auger 			break;
314*4ce2a804SEric Auger 	}
315*4ce2a804SEric Auger 	if (i > 0x3F) {
316*4ce2a804SEric Auger 		report_skip("pmevtyper: all events within [0x0, 0x3F] are supported");
317*4ce2a804SEric Auger 		return;
318*4ce2a804SEric Auger 	}
319*4ce2a804SEric Auger 
320*4ce2a804SEric Auger 	/* select counter 0 */
321*4ce2a804SEric Auger 	write_sysreg(0, PMSELR_EL0);
322*4ce2a804SEric Auger 	/* program this counter to count unsupported event */
323*4ce2a804SEric Auger 	write_sysreg(i, PMXEVCNTR_EL0);
324*4ce2a804SEric Auger 	/* read the counter value */
325*4ce2a804SEric Auger 	read_sysreg(PMXEVCNTR_EL0);
326*4ce2a804SEric Auger 	report(read_sysreg(PMXEVCNTR_EL0) == i,
327*4ce2a804SEric Auger 		"read of a counter programmed with unsupported event");
328*4ce2a804SEric Auger }
329*4ce2a804SEric Auger 
330*4ce2a804SEric Auger static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events)
331*4ce2a804SEric Auger {
332*4ce2a804SEric Auger 	int i;
333*4ce2a804SEric Auger 
334*4ce2a804SEric Auger 	if (pmu.nb_implemented_counters < nb_events) {
335*4ce2a804SEric Auger 		report_skip("Skip test as number of counters is too small (%d)",
336*4ce2a804SEric Auger 			    pmu.nb_implemented_counters);
337*4ce2a804SEric Auger 		return false;
338*4ce2a804SEric Auger 	}
339*4ce2a804SEric Auger 
340*4ce2a804SEric Auger 	for (i = 0; i < nb_events; i++) {
341*4ce2a804SEric Auger 		if (!is_event_supported(events[i], false)) {
342*4ce2a804SEric Auger 			report_skip("Skip test as event 0x%x is not supported",
343*4ce2a804SEric Auger 				    events[i]);
344*4ce2a804SEric Auger 			return false;
345*4ce2a804SEric Auger 		}
346*4ce2a804SEric Auger 	}
347*4ce2a804SEric Auger 	return true;
348*4ce2a804SEric Auger }
349*4ce2a804SEric Auger 
350*4ce2a804SEric Auger static void test_basic_event_count(void)
351*4ce2a804SEric Auger {
352*4ce2a804SEric Auger 	uint32_t implemented_counter_mask, non_implemented_counter_mask;
353*4ce2a804SEric Auger 	uint32_t counter_mask;
354*4ce2a804SEric Auger 	uint32_t events[] = {CPU_CYCLES, INST_RETIRED};
355*4ce2a804SEric Auger 
356*4ce2a804SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
357*4ce2a804SEric Auger 		return;
358*4ce2a804SEric Auger 
359*4ce2a804SEric Auger 	implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1;
360*4ce2a804SEric Auger 	non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask);
361*4ce2a804SEric Auger 	counter_mask = implemented_counter_mask | non_implemented_counter_mask;
362*4ce2a804SEric Auger 
363*4ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
364*4ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0);
365*4ce2a804SEric Auger 
366*4ce2a804SEric Auger 	/* disable all counters */
367*4ce2a804SEric Auger 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
368*4ce2a804SEric Auger 	report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0),
369*4ce2a804SEric Auger 		"pmcntenclr: disable all counters");
370*4ce2a804SEric Auger 
371*4ce2a804SEric Auger 	/*
372*4ce2a804SEric Auger 	 * clear cycle and all event counters and allow counter enablement
373*4ce2a804SEric Auger 	 * through PMCNTENSET. LC is RES1.
374*4ce2a804SEric Auger 	 */
375*4ce2a804SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
376*4ce2a804SEric Auger 	isb();
377*4ce2a804SEric Auger 	report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC), "pmcr: reset counters");
378*4ce2a804SEric Auger 
379*4ce2a804SEric Auger 	/* Preset counter #0 to pre overflow value to trigger an overflow */
380*4ce2a804SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
381*4ce2a804SEric Auger 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
382*4ce2a804SEric Auger 		"counter #0 preset to pre-overflow value");
383*4ce2a804SEric Auger 	report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0");
384*4ce2a804SEric Auger 
385*4ce2a804SEric Auger 	/*
386*4ce2a804SEric Auger 	 * Enable all implemented counters and also attempt to enable
387*4ce2a804SEric Auger 	 * not supported counters. Counting still is disabled by !PMCR.E
388*4ce2a804SEric Auger 	 */
389*4ce2a804SEric Auger 	write_sysreg_s(counter_mask, PMCNTENSET_EL0);
390*4ce2a804SEric Auger 
391*4ce2a804SEric Auger 	/* check only those implemented are enabled */
392*4ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
393*4ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask),
394*4ce2a804SEric Auger 		"pmcntenset: enabled implemented_counters");
395*4ce2a804SEric Auger 
396*4ce2a804SEric Auger 	/* Disable all counters but counters #0 and #1 */
397*4ce2a804SEric Auger 	write_sysreg_s(~0x3, PMCNTENCLR_EL0);
398*4ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
399*4ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == 0x3),
400*4ce2a804SEric Auger 		"pmcntenset: just enabled #0 and #1");
401*4ce2a804SEric Auger 
402*4ce2a804SEric Auger 	/* clear overflow register */
403*4ce2a804SEric Auger 	write_sysreg(ALL_SET, pmovsclr_el0);
404*4ce2a804SEric Auger 	report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0");
405*4ce2a804SEric Auger 
406*4ce2a804SEric Auger 	/* disable overflow interrupts on all counters*/
407*4ce2a804SEric Auger 	write_sysreg(ALL_SET, pmintenclr_el1);
408*4ce2a804SEric Auger 	report(!read_sysreg(pmintenclr_el1),
409*4ce2a804SEric Auger 		"pmintenclr_el1=0, all interrupts disabled");
410*4ce2a804SEric Auger 
411*4ce2a804SEric Auger 	/* enable overflow interrupts on all event counters */
412*4ce2a804SEric Auger 	write_sysreg(implemented_counter_mask | non_implemented_counter_mask,
413*4ce2a804SEric Auger 		     pmintenset_el1);
414*4ce2a804SEric Auger 	report(read_sysreg(pmintenset_el1) == implemented_counter_mask,
415*4ce2a804SEric Auger 		"overflow interrupts enabled on all implemented counters");
416*4ce2a804SEric Auger 
417*4ce2a804SEric Auger 	/* Set PMCR.E, execute asm code and unset PMCR.E */
418*4ce2a804SEric Auger 	precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E);
419*4ce2a804SEric Auger 
420*4ce2a804SEric Auger 	report_info("counter #0 is 0x%lx (CPU_CYCLES)",
421*4ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 0));
422*4ce2a804SEric Auger 	report_info("counter #1 is 0x%lx (INST_RETIRED)",
423*4ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 1));
424*4ce2a804SEric Auger 
425*4ce2a804SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
426*4ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) & 0x1,
427*4ce2a804SEric Auger 		"check overflow happened on #0 only");
428*4ce2a804SEric Auger }
429*4ce2a804SEric Auger 
430*4ce2a804SEric Auger static void test_mem_access(void)
431*4ce2a804SEric Auger {
432*4ce2a804SEric Auger 	void *addr = malloc(PAGE_SIZE);
433*4ce2a804SEric Auger 	uint32_t events[] = {MEM_ACCESS, MEM_ACCESS};
434*4ce2a804SEric Auger 
435*4ce2a804SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
436*4ce2a804SEric Auger 		return;
437*4ce2a804SEric Auger 
438*4ce2a804SEric Auger 	pmu_reset();
439*4ce2a804SEric Auger 
440*4ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
441*4ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
442*4ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
443*4ce2a804SEric Auger 	isb();
444*4ce2a804SEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
445*4ce2a804SEric Auger 	report_info("counter #0 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 0));
446*4ce2a804SEric Auger 	report_info("counter #1 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 1));
447*4ce2a804SEric Auger 	/* We may measure more than 20 mem access depending on the core */
448*4ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) &&
449*4ce2a804SEric Auger 	       (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0),
450*4ce2a804SEric Auger 	       "Ran 20 mem accesses");
451*4ce2a804SEric Auger 
452*4ce2a804SEric Auger 	pmu_reset();
453*4ce2a804SEric Auger 
454*4ce2a804SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
455*4ce2a804SEric Auger 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
456*4ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
457*4ce2a804SEric Auger 	isb();
458*4ce2a804SEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
459*4ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x3,
460*4ce2a804SEric Auger 	       "Ran 20 mem accesses with expected overflows on both counters");
461*4ce2a804SEric Auger 	report_info("cnt#0 = %ld cnt#1=%ld overflow=0x%lx",
462*4ce2a804SEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
463*4ce2a804SEric Auger 			read_sysreg(pmovsclr_el0));
464*4ce2a804SEric Auger }
465*4ce2a804SEric Auger 
4664244065bSChristopher Covington #endif
4674244065bSChristopher Covington 
4684244065bSChristopher Covington /*
469d81bb7a3SChristopher Covington  * Ensure that the cycle counter progresses between back-to-back reads.
470d81bb7a3SChristopher Covington  */
471d81bb7a3SChristopher Covington static bool check_cycles_increase(void)
472d81bb7a3SChristopher Covington {
473d81bb7a3SChristopher Covington 	bool success = true;
474d81bb7a3SChristopher Covington 
475d81bb7a3SChristopher Covington 	/* init before event access, this test only cares about cycle count */
476d81bb7a3SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
477d81bb7a3SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
478d81bb7a3SChristopher Covington 
479d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
480d81bb7a3SChristopher Covington 
481d81bb7a3SChristopher Covington 	for (int i = 0; i < NR_SAMPLES; i++) {
482d81bb7a3SChristopher Covington 		uint64_t a, b;
483d81bb7a3SChristopher Covington 
484d81bb7a3SChristopher Covington 		a = get_pmccntr();
485d81bb7a3SChristopher Covington 		b = get_pmccntr();
486d81bb7a3SChristopher Covington 
487d81bb7a3SChristopher Covington 		if (a >= b) {
488d81bb7a3SChristopher Covington 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
489d81bb7a3SChristopher Covington 			success = false;
490d81bb7a3SChristopher Covington 			break;
491d81bb7a3SChristopher Covington 		}
492d81bb7a3SChristopher Covington 	}
493d81bb7a3SChristopher Covington 
494d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
495d81bb7a3SChristopher Covington 
496d81bb7a3SChristopher Covington 	return success;
497d81bb7a3SChristopher Covington }
498d81bb7a3SChristopher Covington 
4998f76a347SChristopher Covington /*
5008f76a347SChristopher Covington  * Execute a known number of guest instructions. Only even instruction counts
5018f76a347SChristopher Covington  * greater than or equal to 4 are supported by the in-line assembly code. The
5028f76a347SChristopher Covington  * control register (PMCR_EL0) is initialized with the provided value (allowing
5038f76a347SChristopher Covington  * for example for the cycle counter or event counters to be reset). At the end
5048f76a347SChristopher Covington  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
5058f76a347SChristopher Covington  * counting, allowing the cycle counter or event counters to be read at the
5068f76a347SChristopher Covington  * leisure of the calling code.
5078f76a347SChristopher Covington  */
5088f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr)
5098f76a347SChristopher Covington {
5108f76a347SChristopher Covington 	int loop = (num - 2) / 2;
5118f76a347SChristopher Covington 
5128f76a347SChristopher Covington 	assert(num >= 4 && ((num - 2) % 2 == 0));
5138f76a347SChristopher Covington 	precise_instrs_loop(loop, pmcr);
5148f76a347SChristopher Covington }
5158f76a347SChristopher Covington 
5168f76a347SChristopher Covington /*
5178f76a347SChristopher Covington  * Measure cycle counts for various known instruction counts. Ensure that the
5188f76a347SChristopher Covington  * cycle counter progresses (similar to check_cycles_increase() but with more
5198f76a347SChristopher Covington  * instructions and using reset and stop controls). If supplied a positive,
5208f76a347SChristopher Covington  * nonzero CPI parameter, it also strictly checks that every measurement matches
5218f76a347SChristopher Covington  * it. Strict CPI checking is used to test -icount mode.
5228f76a347SChristopher Covington  */
5238f76a347SChristopher Covington static bool check_cpi(int cpi)
5248f76a347SChristopher Covington {
5258f76a347SChristopher Covington 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
5268f76a347SChristopher Covington 
5278f76a347SChristopher Covington 	/* init before event access, this test only cares about cycle count */
5288f76a347SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
5298f76a347SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
5308f76a347SChristopher Covington 
5318f76a347SChristopher Covington 	if (cpi > 0)
5328f76a347SChristopher Covington 		printf("Checking for CPI=%d.\n", cpi);
5338f76a347SChristopher Covington 	printf("instrs : cycles0 cycles1 ...\n");
5348f76a347SChristopher Covington 
5358f76a347SChristopher Covington 	for (unsigned int i = 4; i < 300; i += 32) {
5368f76a347SChristopher Covington 		uint64_t avg, sum = 0;
5378f76a347SChristopher Covington 
5388f76a347SChristopher Covington 		printf("%4d:", i);
5398f76a347SChristopher Covington 		for (int j = 0; j < NR_SAMPLES; j++) {
5408f76a347SChristopher Covington 			uint64_t cycles;
5418f76a347SChristopher Covington 
5428f76a347SChristopher Covington 			set_pmccntr(0);
5438f76a347SChristopher Covington 			measure_instrs(i, pmcr);
5448f76a347SChristopher Covington 			cycles = get_pmccntr();
5458f76a347SChristopher Covington 			printf(" %4"PRId64"", cycles);
5468f76a347SChristopher Covington 
5478f76a347SChristopher Covington 			if (!cycles) {
5488f76a347SChristopher Covington 				printf("\ncycles not incrementing!\n");
5498f76a347SChristopher Covington 				return false;
5508f76a347SChristopher Covington 			} else if (cpi > 0 && cycles != i * cpi) {
5518f76a347SChristopher Covington 				printf("\nunexpected cycle count received!\n");
5528f76a347SChristopher Covington 				return false;
5538f76a347SChristopher Covington 			} else if ((cycles >> 32) != 0) {
5548f76a347SChristopher Covington 				/* The cycles taken by the loop above should
5558f76a347SChristopher Covington 				 * fit in 32 bits easily. We check the upper
5568f76a347SChristopher Covington 				 * 32 bits of the cycle counter to make sure
5578f76a347SChristopher Covington 				 * there is no supprise. */
5588f76a347SChristopher Covington 				printf("\ncycle count bigger than 32bit!\n");
5598f76a347SChristopher Covington 				return false;
5608f76a347SChristopher Covington 			}
5618f76a347SChristopher Covington 
5628f76a347SChristopher Covington 			sum += cycles;
5638f76a347SChristopher Covington 		}
5648f76a347SChristopher Covington 		avg = sum / NR_SAMPLES;
5658f76a347SChristopher Covington 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
5668f76a347SChristopher Covington 		       (avg >= i) ? "cpi" : "ipc",
5678f76a347SChristopher Covington 		       (avg >= i) ? avg / i : i / avg);
5688f76a347SChristopher Covington 	}
5698f76a347SChristopher Covington 
5708f76a347SChristopher Covington 	return true;
5718f76a347SChristopher Covington }
5728f76a347SChristopher Covington 
5734c357610SAndrew Jones static void pmccntr64_test(void)
5744c357610SAndrew Jones {
5754c357610SAndrew Jones #ifdef __arm__
576784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_V3) {
5774c357610SAndrew Jones 		if (ERRATA(9e3f7a296940)) {
5784c357610SAndrew Jones 			write_sysreg(0xdead, PMCCNTR64);
579a299895bSThomas Huth 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
5804c357610SAndrew Jones 		} else
5814c357610SAndrew Jones 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
5824c357610SAndrew Jones 	}
5834c357610SAndrew Jones #endif
5844c357610SAndrew Jones }
5854c357610SAndrew Jones 
5864244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */
58723b8916bSThomas Huth static bool pmu_probe(void)
5884244065bSChristopher Covington {
589784ee933SEric Auger 	uint32_t pmcr = get_pmcr();
590eff8f161SEric Auger 
5918f747a85SEric Auger 	pmu.version = get_pmu_version();
592784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
593eff8f161SEric Auger 		return false;
594eff8f161SEric Auger 
595784ee933SEric Auger 	report_info("PMU version: 0x%x", pmu.version);
596eff8f161SEric Auger 
597eff8f161SEric Auger 	pmcr = get_pmcr();
5988f747a85SEric Auger 	report_info("PMU implementer/ID code: %#x(\"%c\")/%#x",
599eff8f161SEric Auger 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
600eff8f161SEric Auger 		    ((pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK) ? : ' ',
6018f747a85SEric Auger 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
6028f747a85SEric Auger 
6038f747a85SEric Auger 	/* store read-only and RES0 fields of the PMCR bottom-half*/
6048f747a85SEric Auger 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
6058f747a85SEric Auger 	pmu.nb_implemented_counters =
6068f747a85SEric Auger 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
6078f747a85SEric Auger 	report_info("Implements %d event counters",
6088f747a85SEric Auger 		    pmu.nb_implemented_counters);
609eff8f161SEric Auger 
610eff8f161SEric Auger 	return true;
6114244065bSChristopher Covington }
6124244065bSChristopher Covington 
6138f76a347SChristopher Covington int main(int argc, char *argv[])
6144244065bSChristopher Covington {
6158f76a347SChristopher Covington 	int cpi = 0;
6168f76a347SChristopher Covington 
6174244065bSChristopher Covington 	if (!pmu_probe()) {
6184244065bSChristopher Covington 		printf("No PMU found, test skipped...\n");
6194244065bSChristopher Covington 		return report_summary();
6204244065bSChristopher Covington 	}
6214244065bSChristopher Covington 
62257ec1086SEric Auger 	if (argc < 2)
62357ec1086SEric Auger 		report_abort("no test specified");
62457ec1086SEric Auger 
6254244065bSChristopher Covington 	report_prefix_push("pmu");
6264244065bSChristopher Covington 
62757ec1086SEric Auger 	if (strcmp(argv[1], "cycle-counter") == 0) {
62857ec1086SEric Auger 		report_prefix_push(argv[1]);
62957ec1086SEric Auger 		if (argc > 2)
63057ec1086SEric Auger 			cpi = atol(argv[2]);
631a299895bSThomas Huth 		report(check_cycles_increase(),
632a299895bSThomas Huth 		       "Monotonically increasing cycle count");
633a299895bSThomas Huth 		report(check_cpi(cpi), "Cycle/instruction ratio");
6344c357610SAndrew Jones 		pmccntr64_test();
63557ec1086SEric Auger 		report_prefix_pop();
6364870738cSEric Auger 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
6374870738cSEric Auger 		report_prefix_push(argv[1]);
6384870738cSEric Auger 		test_event_introspection();
6394870738cSEric Auger 		report_prefix_pop();
640*4ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-event-counter-config") == 0) {
641*4ce2a804SEric Auger 		report_prefix_push(argv[1]);
642*4ce2a804SEric Auger 		test_event_counter_config();
643*4ce2a804SEric Auger 		report_prefix_pop();
644*4ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-basic-event-count") == 0) {
645*4ce2a804SEric Auger 		report_prefix_push(argv[1]);
646*4ce2a804SEric Auger 		test_basic_event_count();
647*4ce2a804SEric Auger 		report_prefix_pop();
648*4ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-mem-access") == 0) {
649*4ce2a804SEric Auger 		report_prefix_push(argv[1]);
650*4ce2a804SEric Auger 		test_mem_access();
651*4ce2a804SEric Auger 		report_prefix_pop();
65257ec1086SEric Auger 	} else {
65357ec1086SEric Auger 		report_abort("Unknown sub-test '%s'", argv[1]);
65457ec1086SEric Auger 	}
6554c357610SAndrew Jones 
6564244065bSChristopher Covington 	return report_summary();
6574244065bSChristopher Covington }
658