xref: /kvm-unit-tests/arm/pmu.c (revision a7509187facb9a0c0f1880bdc57d539ee18475c0)
14244065bSChristopher Covington /*
24244065bSChristopher Covington  * Test the ARM Performance Monitors Unit (PMU).
34244065bSChristopher Covington  *
44244065bSChristopher Covington  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
54244065bSChristopher Covington  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
64244065bSChristopher Covington  *
74244065bSChristopher Covington  * This program is free software; you can redistribute it and/or modify it
84244065bSChristopher Covington  * under the terms of the GNU Lesser General Public License version 2.1 and
94244065bSChristopher Covington  * only version 2.1 as published by the Free Software Foundation.
104244065bSChristopher Covington  *
114244065bSChristopher Covington  * This program is distributed in the hope that it will be useful, but WITHOUT
124244065bSChristopher Covington  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
134244065bSChristopher Covington  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
144244065bSChristopher Covington  * for more details.
154244065bSChristopher Covington  */
164244065bSChristopher Covington #include "libcflat.h"
174c357610SAndrew Jones #include "errata.h"
184244065bSChristopher Covington #include "asm/barrier.h"
194244065bSChristopher Covington #include "asm/sysreg.h"
204244065bSChristopher Covington #include "asm/processor.h"
214870738cSEric Auger #include <bitops.h>
224ce2a804SEric Auger #include <asm/gic.h>
234244065bSChristopher Covington 
24d81bb7a3SChristopher Covington #define PMU_PMCR_E         (1 << 0)
254ce2a804SEric Auger #define PMU_PMCR_P         (1 << 1)
26d81bb7a3SChristopher Covington #define PMU_PMCR_C         (1 << 2)
274ce2a804SEric Auger #define PMU_PMCR_D         (1 << 3)
284ce2a804SEric Auger #define PMU_PMCR_X         (1 << 4)
294ce2a804SEric Auger #define PMU_PMCR_DP        (1 << 5)
30d81bb7a3SChristopher Covington #define PMU_PMCR_LC        (1 << 6)
31036369c5SRicardo Koller #define PMU_PMCR_LP        (1 << 7)
324244065bSChristopher Covington #define PMU_PMCR_N_SHIFT   11
334244065bSChristopher Covington #define PMU_PMCR_N_MASK    0x1f
344244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT  16
354244065bSChristopher Covington #define PMU_PMCR_ID_MASK   0xff
364244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24
374244065bSChristopher Covington #define PMU_PMCR_IMP_MASK  0xff
384244065bSChristopher Covington 
39d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX      31
40d81bb7a3SChristopher Covington 
41d81bb7a3SChristopher Covington #define NR_SAMPLES 10
42d81bb7a3SChristopher Covington 
434870738cSEric Auger /* Some PMU events */
444870738cSEric Auger #define SW_INCR			0x0
454870738cSEric Auger #define INST_RETIRED		0x8
464870738cSEric Auger #define CPU_CYCLES		0x11
474ce2a804SEric Auger #define MEM_ACCESS		0x13
484870738cSEric Auger #define INST_PREC		0x1B
494870738cSEric Auger #define STALL_FRONTEND		0x23
504870738cSEric Auger #define STALL_BACKEND		0x24
5166fee034SEric Auger #define CHAIN			0x1E
524870738cSEric Auger 
534870738cSEric Auger #define COMMON_EVENTS_LOW	0x0
544870738cSEric Auger #define COMMON_EVENTS_HIGH	0x3F
554870738cSEric Auger #define EXT_COMMON_EVENTS_LOW	0x4000
564870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH	0x403F
574870738cSEric Auger 
587d1f853aSRicardo Koller #define ALL_SET_32			0x00000000FFFFFFFFULL
5939d1347aSRicardo Koller #define ALL_CLEAR		0x0000000000000000ULL
607d1f853aSRicardo Koller #define PRE_OVERFLOW_32		0x00000000FFFFFFF0ULL
617d1f853aSRicardo Koller #define PRE_OVERFLOW2_32	0x00000000FFFFFFDCULL
62036369c5SRicardo Koller #define PRE_OVERFLOW_64		0xFFFFFFFFFFFFFFF0ULL
63036369c5SRicardo Koller 
64036369c5SRicardo Koller #define PRE_OVERFLOW(__overflow_at_64bits)				\
65036369c5SRicardo Koller 	(__overflow_at_64bits ? PRE_OVERFLOW_64 : PRE_OVERFLOW_32)
664ce2a804SEric Auger 
674f5ef94fSEric Auger #define PMU_PPI			23
684f5ef94fSEric Auger 
698f747a85SEric Auger struct pmu {
708f747a85SEric Auger 	unsigned int version;
718f747a85SEric Auger 	unsigned int nb_implemented_counters;
728f747a85SEric Auger 	uint32_t pmcr_ro;
738f747a85SEric Auger };
748f747a85SEric Auger 
754f5ef94fSEric Auger struct pmu_stats {
764f5ef94fSEric Auger 	unsigned long bitmap;
774f5ef94fSEric Auger 	uint32_t interrupts[32];
784f5ef94fSEric Auger 	bool unexpected;
794f5ef94fSEric Auger };
804f5ef94fSEric Auger 
818f747a85SEric Auger static struct pmu pmu;
828f747a85SEric Auger 
834244065bSChristopher Covington #if defined(__arm__)
84098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24
85098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK  0xf
86098add54SAndrew Jones 
87784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
88784ee933SEric Auger #define ID_DFR0_PMU_V1		0b0001
89784ee933SEric Auger #define ID_DFR0_PMU_V2		0b0010
90784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0011
91784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
92784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
93784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
94784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
95784ee933SEric Auger 
964244065bSChristopher Covington #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
974244065bSChristopher Covington #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
98d81bb7a3SChristopher Covington #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
99d81bb7a3SChristopher Covington #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
100d81bb7a3SChristopher Covington #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
101a7326740SRicardo Koller #define PMCNTENCLR   __ACCESS_CP15(c9, 0, c12, 2)
102a7326740SRicardo Koller #define PMOVSR       __ACCESS_CP15(c9, 0, c12, 3)
103d81bb7a3SChristopher Covington #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
104a7326740SRicardo Koller #define PMINTENCLR   __ACCESS_CP15(c9, 0, c14, 2)
105d81bb7a3SChristopher Covington #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
1064244065bSChristopher Covington 
1074244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
1084244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
109d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
110d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
111d81bb7a3SChristopher Covington 
112098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
113098add54SAndrew Jones {
114098add54SAndrew Jones 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
115098add54SAndrew Jones }
116098add54SAndrew Jones 
117d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void)
118d81bb7a3SChristopher Covington {
119d81bb7a3SChristopher Covington 	return read_sysreg(PMCCNTR32);
120d81bb7a3SChristopher Covington }
121d81bb7a3SChristopher Covington 
1228f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value)
1238f76a347SChristopher Covington {
1248f76a347SChristopher Covington 	write_sysreg(value & 0xffffffff, PMCCNTR32);
1258f76a347SChristopher Covington }
1268f76a347SChristopher Covington 
127d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
128d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value)
129d81bb7a3SChristopher Covington {
130d81bb7a3SChristopher Covington 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
131d81bb7a3SChristopher Covington 	write_sysreg(value, PMXEVTYPER);
132d81bb7a3SChristopher Covington 	isb();
133d81bb7a3SChristopher Covington }
1348f76a347SChristopher Covington 
1358f76a347SChristopher Covington /*
1368f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1378f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1388f76a347SChristopher Covington  * to start and stop counting. isb instructions were inserted to make sure
1398f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed in
1408f76a347SChristopher Covington  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
1418f76a347SChristopher Covington  */
1428f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1438f76a347SChristopher Covington {
1448f76a347SChristopher Covington 	asm volatile(
1458f76a347SChristopher Covington 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
1468f76a347SChristopher Covington 	"	isb\n"
1478f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1488f76a347SChristopher Covington 	"	bgt	1b\n"
1498f76a347SChristopher Covington 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
1508f76a347SChristopher Covington 	"	isb\n"
1518f76a347SChristopher Covington 	: [loop] "+r" (loop)
1528f76a347SChristopher Covington 	: [pmcr] "r" (pmcr), [z] "r" (0)
1538f76a347SChristopher Covington 	: "cc");
1548f76a347SChristopher Covington }
1554870738cSEric Auger 
156a7326740SRicardo Koller static void pmu_reset(void)
157a7326740SRicardo Koller {
158a7326740SRicardo Koller 	/* reset all counters, counting disabled at PMCR level*/
159a7326740SRicardo Koller 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
160a7326740SRicardo Koller 	/* Disable all counters */
1617d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, PMCNTENCLR);
162a7326740SRicardo Koller 	/* clear overflow reg */
1637d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, PMOVSR);
164a7326740SRicardo Koller 	/* disable overflow interrupts on all counters */
1657d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, PMINTENCLR);
166a7326740SRicardo Koller 	isb();
167a7326740SRicardo Koller }
168a7326740SRicardo Koller 
1694870738cSEric Auger /* event counter tests only implemented for aarch64 */
1704870738cSEric Auger static void test_event_introspection(void) {}
1714ce2a804SEric Auger static void test_event_counter_config(void) {}
172041df25bSRicardo Koller static void test_basic_event_count(bool overflow_at_64bits) {}
173041df25bSRicardo Koller static void test_mem_access(bool overflow_at_64bits) {}
174041df25bSRicardo Koller static void test_sw_incr(bool overflow_at_64bits) {}
175041df25bSRicardo Koller static void test_chained_counters(bool unused) {}
176041df25bSRicardo Koller static void test_chained_sw_incr(bool unused) {}
177041df25bSRicardo Koller static void test_chain_promotion(bool unused) {}
178041df25bSRicardo Koller static void test_overflow_interrupt(bool overflow_at_64bits) {}
1794870738cSEric Auger 
1804244065bSChristopher Covington #elif defined(__aarch64__)
181098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8
182098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK  0xf
183098add54SAndrew Jones 
184784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
185784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0001
186784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
187784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
188784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
189784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
190784ee933SEric Auger 
191098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
1924244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
193d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
194d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
1958f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
196d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
197d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
1988f76a347SChristopher Covington 
199098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
200098add54SAndrew Jones {
201098add54SAndrew Jones 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
202784ee933SEric Auger 	return ver;
203098add54SAndrew Jones }
204098add54SAndrew Jones 
2058f76a347SChristopher Covington /*
2068f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
2078f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
2088f76a347SChristopher Covington  * to start and stop counting. isb instructions are inserted to make sure
2098f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed
2108f76a347SChristopher Covington  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
2118f76a347SChristopher Covington  */
2128f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
2138f76a347SChristopher Covington {
2149e186511SThomas Huth 	uint64_t pmcr64 = pmcr;
2158f76a347SChristopher Covington 	asm volatile(
2168f76a347SChristopher Covington 	"	msr	pmcr_el0, %[pmcr]\n"
2178f76a347SChristopher Covington 	"	isb\n"
2189e186511SThomas Huth 	"1:	subs	%w[loop], %w[loop], #1\n"
2198f76a347SChristopher Covington 	"	b.gt	1b\n"
2208f76a347SChristopher Covington 	"	msr	pmcr_el0, xzr\n"
2218f76a347SChristopher Covington 	"	isb\n"
2228f76a347SChristopher Covington 	: [loop] "+r" (loop)
2239e186511SThomas Huth 	: [pmcr] "r" (pmcr64)
2248f76a347SChristopher Covington 	: "cc");
2258f76a347SChristopher Covington }
2264870738cSEric Auger 
2274870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
2284ce2a804SEric Auger #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
2294ce2a804SEric Auger #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
2304ce2a804SEric Auger 
2314ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL1 BIT(31)
2324ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL0 BIT(30)
2334870738cSEric Auger 
2344870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn)
2354870738cSEric Auger {
2364870738cSEric Auger 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
2374870738cSEric Auger 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
2384870738cSEric Auger 	bool supported;
2394870738cSEric Auger 	uint64_t reg;
2404870738cSEric Auger 
2414870738cSEric Auger 	/*
2424870738cSEric Auger 	 * The low 32-bits of PMCEID0/1 respectively describe
2434870738cSEric Auger 	 * event support for events 0-31/32-63. Their High
2444870738cSEric Auger 	 * 32-bits describe support for extended events
2454870738cSEric Auger 	 * starting at 0x4000, using the same split.
2464870738cSEric Auger 	 */
2474870738cSEric Auger 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
2484870738cSEric Auger 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
2494870738cSEric Auger 
2504870738cSEric Auger 	if (n <= COMMON_EVENTS_HIGH)
2514870738cSEric Auger 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
2524870738cSEric Auger 	else
2534870738cSEric Auger 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
2544870738cSEric Auger 
2554870738cSEric Auger 	supported =  reg & (1UL << (n & 0x3F));
2564870738cSEric Auger 
2574870738cSEric Auger 	if (!supported && warn)
2584870738cSEric Auger 		report_info("event 0x%x is not supported", n);
2594870738cSEric Auger 	return supported;
2604870738cSEric Auger }
2614870738cSEric Auger 
2624870738cSEric Auger static void test_event_introspection(void)
2634870738cSEric Auger {
2644870738cSEric Auger 	bool required_events;
2654870738cSEric Auger 
2664870738cSEric Auger 	if (!pmu.nb_implemented_counters) {
2674870738cSEric Auger 		report_skip("No event counter, skip ...");
2684870738cSEric Auger 		return;
2694870738cSEric Auger 	}
2704870738cSEric Auger 
2714870738cSEric Auger 	/* PMUv3 requires an implementation includes some common events */
2724870738cSEric Auger 	required_events = is_event_supported(SW_INCR, true) &&
2734870738cSEric Auger 			  is_event_supported(CPU_CYCLES, true) &&
2744870738cSEric Auger 			  (is_event_supported(INST_RETIRED, true) ||
2754870738cSEric Auger 			   is_event_supported(INST_PREC, true));
2764870738cSEric Auger 
2774870738cSEric Auger 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
2784870738cSEric Auger 		required_events = required_events &&
2794870738cSEric Auger 				  is_event_supported(STALL_FRONTEND, true) &&
2804870738cSEric Auger 				  is_event_supported(STALL_BACKEND, true);
2814870738cSEric Auger 	}
2824870738cSEric Auger 
2834870738cSEric Auger 	report(required_events, "Check required events are implemented");
2844870738cSEric Auger }
2854870738cSEric Auger 
2864ce2a804SEric Auger /*
2874ce2a804SEric Auger  * Extra instructions inserted by the compiler would be difficult to compensate
2884ce2a804SEric Auger  * for, so hand assemble everything between, and including, the PMCR accesses
2894ce2a804SEric Auger  * to start and stop counting. isb instructions are inserted to make sure
2904ce2a804SEric Auger  * pmccntr read after this function returns the exact instructions executed
2914ce2a804SEric Auger  * in the controlled block. Loads @loop times the data at @address into x9.
2924ce2a804SEric Auger  */
2939e186511SThomas Huth static void mem_access_loop(void *addr, long loop, uint32_t pmcr)
2944ce2a804SEric Auger {
2959e186511SThomas Huth 	uint64_t pmcr64 = pmcr;
2964ce2a804SEric Auger asm volatile(
2974ce2a804SEric Auger 	"       msr     pmcr_el0, %[pmcr]\n"
2984ce2a804SEric Auger 	"       isb\n"
2994ce2a804SEric Auger 	"       mov     x10, %[loop]\n"
3004ce2a804SEric Auger 	"1:     sub     x10, x10, #1\n"
3014ce2a804SEric Auger 	"       ldr	x9, [%[addr]]\n"
3024ce2a804SEric Auger 	"       cmp     x10, #0x0\n"
3034ce2a804SEric Auger 	"       b.gt    1b\n"
3044ce2a804SEric Auger 	"       msr     pmcr_el0, xzr\n"
3054ce2a804SEric Auger 	"       isb\n"
3064ce2a804SEric Auger 	:
3079e186511SThomas Huth 	: [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop)
3084ce2a804SEric Auger 	: "x9", "x10", "cc");
3094ce2a804SEric Auger }
3104ce2a804SEric Auger 
3114f5ef94fSEric Auger static struct pmu_stats pmu_stats;
3124f5ef94fSEric Auger 
3134f5ef94fSEric Auger static void irq_handler(struct pt_regs *regs)
3144f5ef94fSEric Auger {
3154f5ef94fSEric Auger 	uint32_t irqstat, irqnr;
3164f5ef94fSEric Auger 
3174f5ef94fSEric Auger 	irqstat = gic_read_iar();
3184f5ef94fSEric Auger 	irqnr = gic_iar_irqnr(irqstat);
3194f5ef94fSEric Auger 
3204f5ef94fSEric Auger 	if (irqnr == PMU_PPI) {
3214f5ef94fSEric Auger 		unsigned long overflows = read_sysreg(pmovsclr_el0);
3224f5ef94fSEric Auger 		int i;
3234f5ef94fSEric Auger 
3244f5ef94fSEric Auger 		for (i = 0; i < 32; i++) {
3254f5ef94fSEric Auger 			if (test_and_clear_bit(i, &overflows)) {
3264f5ef94fSEric Auger 				pmu_stats.interrupts[i]++;
3274f5ef94fSEric Auger 				pmu_stats.bitmap |= 1 << i;
3284f5ef94fSEric Auger 			}
3294f5ef94fSEric Auger 		}
3307d1f853aSRicardo Koller 		write_sysreg(ALL_SET_32, pmovsclr_el0);
331e0a6e56bSRicardo Koller 		isb();
3324f5ef94fSEric Auger 	} else {
3334f5ef94fSEric Auger 		pmu_stats.unexpected = true;
3344f5ef94fSEric Auger 	}
3354f5ef94fSEric Auger 	gic_write_eoir(irqstat);
3364f5ef94fSEric Auger }
3374f5ef94fSEric Auger 
3384f5ef94fSEric Auger static void pmu_reset_stats(void)
3394f5ef94fSEric Auger {
3404f5ef94fSEric Auger 	int i;
3414f5ef94fSEric Auger 
3424f5ef94fSEric Auger 	for (i = 0; i < 32; i++)
3434f5ef94fSEric Auger 		pmu_stats.interrupts[i] = 0;
3444f5ef94fSEric Auger 
3454f5ef94fSEric Auger 	pmu_stats.bitmap = 0;
3464f5ef94fSEric Auger 	pmu_stats.unexpected = false;
3474f5ef94fSEric Auger }
3484f5ef94fSEric Auger 
3494ce2a804SEric Auger static void pmu_reset(void)
3504ce2a804SEric Auger {
3514ce2a804SEric Auger 	/* reset all counters, counting disabled at PMCR level*/
3524ce2a804SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
3534ce2a804SEric Auger 	/* Disable all counters */
3547d1f853aSRicardo Koller 	write_sysreg_s(ALL_SET_32, PMCNTENCLR_EL0);
3554ce2a804SEric Auger 	/* clear overflow reg */
3567d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, pmovsclr_el0);
3574ce2a804SEric Auger 	/* disable overflow interrupts on all counters */
3587d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, pmintenclr_el1);
3594f5ef94fSEric Auger 	pmu_reset_stats();
3604ce2a804SEric Auger 	isb();
3614ce2a804SEric Auger }
3624ce2a804SEric Auger 
3634ce2a804SEric Auger static void test_event_counter_config(void)
3644ce2a804SEric Auger {
3654ce2a804SEric Auger 	int i;
3664ce2a804SEric Auger 
3674ce2a804SEric Auger 	if (!pmu.nb_implemented_counters) {
3684ce2a804SEric Auger 		report_skip("No event counter, skip ...");
3694ce2a804SEric Auger 		return;
3704ce2a804SEric Auger 	}
3714ce2a804SEric Auger 
3724ce2a804SEric Auger 	pmu_reset();
3734ce2a804SEric Auger 
3744ce2a804SEric Auger 	/*
3754ce2a804SEric Auger 	 * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
3764ce2a804SEric Auger 	 * select counter 0
3774ce2a804SEric Auger 	 */
3784ce2a804SEric Auger 	write_sysreg(1, PMSELR_EL0);
3794ce2a804SEric Auger 	/* program this counter to count unsupported event */
3804ce2a804SEric Auger 	write_sysreg(0xEA, PMXEVTYPER_EL0);
3814ce2a804SEric Auger 	write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
3824ce2a804SEric Auger 	report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
3834ce2a804SEric Auger 		"PMESELR/PMXEVTYPER/PMEVTYPERn");
3844ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
3854ce2a804SEric Auger 		"PMESELR/PMXEVCNTR/PMEVCNTRn");
3864ce2a804SEric Auger 
3874ce2a804SEric Auger 	/* try to configure an unsupported event within the range [0x0, 0x3F] */
3884ce2a804SEric Auger 	for (i = 0; i <= 0x3F; i++) {
3894ce2a804SEric Auger 		if (!is_event_supported(i, false))
3904ce2a804SEric Auger 			break;
3914ce2a804SEric Auger 	}
3924ce2a804SEric Auger 	if (i > 0x3F) {
3934ce2a804SEric Auger 		report_skip("pmevtyper: all events within [0x0, 0x3F] are supported");
3944ce2a804SEric Auger 		return;
3954ce2a804SEric Auger 	}
3964ce2a804SEric Auger 
3974ce2a804SEric Auger 	/* select counter 0 */
3984ce2a804SEric Auger 	write_sysreg(0, PMSELR_EL0);
3994ce2a804SEric Auger 	/* program this counter to count unsupported event */
4004ce2a804SEric Auger 	write_sysreg(i, PMXEVCNTR_EL0);
4014ce2a804SEric Auger 	/* read the counter value */
4024ce2a804SEric Auger 	read_sysreg(PMXEVCNTR_EL0);
4034ce2a804SEric Auger 	report(read_sysreg(PMXEVCNTR_EL0) == i,
4044ce2a804SEric Auger 		"read of a counter programmed with unsupported event");
4054ce2a804SEric Auger }
4064ce2a804SEric Auger 
4074ce2a804SEric Auger static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events)
4084ce2a804SEric Auger {
4094ce2a804SEric Auger 	int i;
4104ce2a804SEric Auger 
4114ce2a804SEric Auger 	if (pmu.nb_implemented_counters < nb_events) {
4124ce2a804SEric Auger 		report_skip("Skip test as number of counters is too small (%d)",
4134ce2a804SEric Auger 			    pmu.nb_implemented_counters);
4144ce2a804SEric Auger 		return false;
4154ce2a804SEric Auger 	}
4164ce2a804SEric Auger 
4174ce2a804SEric Auger 	for (i = 0; i < nb_events; i++) {
4184ce2a804SEric Auger 		if (!is_event_supported(events[i], false)) {
4194ce2a804SEric Auger 			report_skip("Skip test as event 0x%x is not supported",
4204ce2a804SEric Auger 				    events[i]);
4214ce2a804SEric Auger 			return false;
4224ce2a804SEric Auger 		}
4234ce2a804SEric Auger 	}
4244ce2a804SEric Auger 	return true;
4254ce2a804SEric Auger }
4264ce2a804SEric Auger 
42739d1347aSRicardo Koller static uint64_t pmevcntr_mask(void)
42839d1347aSRicardo Koller {
42939d1347aSRicardo Koller 	/*
43039d1347aSRicardo Koller 	 * Bits [63:0] are always incremented for 64-bit counters,
43139d1347aSRicardo Koller 	 * even if the PMU is configured to generate an overflow at
43239d1347aSRicardo Koller 	 * bits [31:0]
43339d1347aSRicardo Koller 	 *
43439d1347aSRicardo Koller 	 * For more details see the AArch64.IncrementEventCounter()
43539d1347aSRicardo Koller 	 * pseudo-code in the ARM ARM DDI 0487I.a, section J1.1.1.
43639d1347aSRicardo Koller 	 */
43739d1347aSRicardo Koller 	if (pmu.version >= ID_DFR0_PMU_V3_8_5)
43839d1347aSRicardo Koller 		return ~0;
43939d1347aSRicardo Koller 
44039d1347aSRicardo Koller 	return (uint32_t)~0;
44139d1347aSRicardo Koller }
44239d1347aSRicardo Koller 
443041df25bSRicardo Koller static bool check_overflow_prerequisites(bool overflow_at_64bits)
444041df25bSRicardo Koller {
445041df25bSRicardo Koller 	if (overflow_at_64bits && pmu.version < ID_DFR0_PMU_V3_8_5) {
446041df25bSRicardo Koller 		report_skip("Skip test as 64 overflows need FEAT_PMUv3p5");
447041df25bSRicardo Koller 		return false;
448041df25bSRicardo Koller 	}
449041df25bSRicardo Koller 
450041df25bSRicardo Koller 	return true;
451041df25bSRicardo Koller }
452041df25bSRicardo Koller 
453041df25bSRicardo Koller static void test_basic_event_count(bool overflow_at_64bits)
4544ce2a804SEric Auger {
4554ce2a804SEric Auger 	uint32_t implemented_counter_mask, non_implemented_counter_mask;
456036369c5SRicardo Koller 	uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits);
457036369c5SRicardo Koller 	uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0;
4584ce2a804SEric Auger 	uint32_t events[] = {CPU_CYCLES, INST_RETIRED};
459036369c5SRicardo Koller 	uint32_t counter_mask;
4604ce2a804SEric Auger 
461041df25bSRicardo Koller 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) ||
462041df25bSRicardo Koller 	    !check_overflow_prerequisites(overflow_at_64bits))
4634ce2a804SEric Auger 		return;
4644ce2a804SEric Auger 
4654ce2a804SEric Auger 	implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1;
4664ce2a804SEric Auger 	non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask);
4674ce2a804SEric Auger 	counter_mask = implemented_counter_mask | non_implemented_counter_mask;
4684ce2a804SEric Auger 
4694ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
4704ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0);
4714ce2a804SEric Auger 
4724ce2a804SEric Auger 	/* disable all counters */
4737d1f853aSRicardo Koller 	write_sysreg_s(ALL_SET_32, PMCNTENCLR_EL0);
4744ce2a804SEric Auger 	report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0),
4754ce2a804SEric Auger 		"pmcntenclr: disable all counters");
4764ce2a804SEric Auger 
4774ce2a804SEric Auger 	/*
4784ce2a804SEric Auger 	 * clear cycle and all event counters and allow counter enablement
4794ce2a804SEric Auger 	 * through PMCNTENSET. LC is RES1.
4804ce2a804SEric Auger 	 */
481036369c5SRicardo Koller 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P | pmcr_lp);
4824ce2a804SEric Auger 	isb();
483036369c5SRicardo Koller 	report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC | pmcr_lp), "pmcr: reset counters");
4844ce2a804SEric Auger 
4854ce2a804SEric Auger 	/* Preset counter #0 to pre overflow value to trigger an overflow */
486036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
487036369c5SRicardo Koller 	report(read_regn_el0(pmevcntr, 0) == pre_overflow,
4884ce2a804SEric Auger 		"counter #0 preset to pre-overflow value");
4894ce2a804SEric Auger 	report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0");
4904ce2a804SEric Auger 
4914ce2a804SEric Auger 	/*
4924ce2a804SEric Auger 	 * Enable all implemented counters and also attempt to enable
4934ce2a804SEric Auger 	 * not supported counters. Counting still is disabled by !PMCR.E
4944ce2a804SEric Auger 	 */
4954ce2a804SEric Auger 	write_sysreg_s(counter_mask, PMCNTENSET_EL0);
4964ce2a804SEric Auger 
4974ce2a804SEric Auger 	/* check only those implemented are enabled */
4984ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
4994ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask),
5004ce2a804SEric Auger 		"pmcntenset: enabled implemented_counters");
5014ce2a804SEric Auger 
5024ce2a804SEric Auger 	/* Disable all counters but counters #0 and #1 */
5034ce2a804SEric Auger 	write_sysreg_s(~0x3, PMCNTENCLR_EL0);
5044ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
5054ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == 0x3),
5064ce2a804SEric Auger 		"pmcntenset: just enabled #0 and #1");
5074ce2a804SEric Auger 
5084ce2a804SEric Auger 	/* clear overflow register */
5097d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, pmovsclr_el0);
5104ce2a804SEric Auger 	report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0");
5114ce2a804SEric Auger 
5124ce2a804SEric Auger 	/* disable overflow interrupts on all counters*/
5137d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, pmintenclr_el1);
5144ce2a804SEric Auger 	report(!read_sysreg(pmintenclr_el1),
5154ce2a804SEric Auger 		"pmintenclr_el1=0, all interrupts disabled");
5164ce2a804SEric Auger 
5174ce2a804SEric Auger 	/* enable overflow interrupts on all event counters */
5184ce2a804SEric Auger 	write_sysreg(implemented_counter_mask | non_implemented_counter_mask,
5194ce2a804SEric Auger 		     pmintenset_el1);
5204ce2a804SEric Auger 	report(read_sysreg(pmintenset_el1) == implemented_counter_mask,
5214ce2a804SEric Auger 		"overflow interrupts enabled on all implemented counters");
5224ce2a804SEric Auger 
5234ce2a804SEric Auger 	/* Set PMCR.E, execute asm code and unset PMCR.E */
5244ce2a804SEric Auger 	precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E);
5254ce2a804SEric Auger 
5264ce2a804SEric Auger 	report_info("counter #0 is 0x%lx (CPU_CYCLES)",
5274ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 0));
5284ce2a804SEric Auger 	report_info("counter #1 is 0x%lx (INST_RETIRED)",
5294ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 1));
5304ce2a804SEric Auger 
5314ce2a804SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
5324ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) & 0x1,
5334ce2a804SEric Auger 		"check overflow happened on #0 only");
5344ce2a804SEric Auger }
5354ce2a804SEric Auger 
536041df25bSRicardo Koller static void test_mem_access(bool overflow_at_64bits)
5374ce2a804SEric Auger {
5384ce2a804SEric Auger 	void *addr = malloc(PAGE_SIZE);
5394ce2a804SEric Auger 	uint32_t events[] = {MEM_ACCESS, MEM_ACCESS};
540036369c5SRicardo Koller 	uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits);
541036369c5SRicardo Koller 	uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0;
5424ce2a804SEric Auger 
543041df25bSRicardo Koller 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) ||
544041df25bSRicardo Koller 	    !check_overflow_prerequisites(overflow_at_64bits))
5454ce2a804SEric Auger 		return;
5464ce2a804SEric Auger 
5474ce2a804SEric Auger 	pmu_reset();
5484ce2a804SEric Auger 
5494ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
5504ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
5514ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
5524ce2a804SEric Auger 	isb();
553036369c5SRicardo Koller 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
554*a7509187SRicardo Koller 	report_info("counter #0 is 0x%lx (MEM_ACCESS)", read_regn_el0(pmevcntr, 0));
555*a7509187SRicardo Koller 	report_info("counter #1 is 0x%lx (MEM_ACCESS)", read_regn_el0(pmevcntr, 1));
5564ce2a804SEric Auger 	/* We may measure more than 20 mem access depending on the core */
5574ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) &&
5584ce2a804SEric Auger 	       (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0),
5594ce2a804SEric Auger 	       "Ran 20 mem accesses");
5604ce2a804SEric Auger 
5614ce2a804SEric Auger 	pmu_reset();
5624ce2a804SEric Auger 
563036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
564036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 1, pre_overflow);
5654ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
5664ce2a804SEric Auger 	isb();
567036369c5SRicardo Koller 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
5684ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x3,
5694ce2a804SEric Auger 	       "Ran 20 mem accesses with expected overflows on both counters");
570*a7509187SRicardo Koller 	report_info("cnt#0=0x%lx cnt#1=0x%lx overflow=0x%lx",
5714ce2a804SEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
5724ce2a804SEric Auger 			read_sysreg(pmovsclr_el0));
5734ce2a804SEric Auger }
5744ce2a804SEric Auger 
575041df25bSRicardo Koller static void test_sw_incr(bool overflow_at_64bits)
576bb9a5adcSEric Auger {
577036369c5SRicardo Koller 	uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits);
578036369c5SRicardo Koller 	uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0;
579bb9a5adcSEric Auger 	uint32_t events[] = {SW_INCR, SW_INCR};
580036369c5SRicardo Koller 	uint64_t cntr0 = (pre_overflow + 100) & pmevcntr_mask();
581bb9a5adcSEric Auger 	int i;
582bb9a5adcSEric Auger 
583041df25bSRicardo Koller 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) ||
584041df25bSRicardo Koller 	    !check_overflow_prerequisites(overflow_at_64bits))
585bb9a5adcSEric Auger 		return;
586bb9a5adcSEric Auger 
587bb9a5adcSEric Auger 	pmu_reset();
588bb9a5adcSEric Auger 
589bb9a5adcSEric Auger 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
590bb9a5adcSEric Auger 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
591bb9a5adcSEric Auger 	/* enable counters #0 and #1 */
592bb9a5adcSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
593bb9a5adcSEric Auger 
594036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
595e0a6e56bSRicardo Koller 	isb();
596bb9a5adcSEric Auger 
597bb9a5adcSEric Auger 	for (i = 0; i < 100; i++)
598bb9a5adcSEric Auger 		write_sysreg(0x1, pmswinc_el0);
599bb9a5adcSEric Auger 
600e0a6e56bSRicardo Koller 	isb();
601*a7509187SRicardo Koller 	report_info("SW_INCR counter #0 has value 0x%lx", read_regn_el0(pmevcntr, 0));
602036369c5SRicardo Koller 	report(read_regn_el0(pmevcntr, 0) == pre_overflow,
603bb9a5adcSEric Auger 		"PWSYNC does not increment if PMCR.E is unset");
604bb9a5adcSEric Auger 
605bb9a5adcSEric Auger 	pmu_reset();
606bb9a5adcSEric Auger 
607036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
608bb9a5adcSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
609036369c5SRicardo Koller 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
610e0a6e56bSRicardo Koller 	isb();
611bb9a5adcSEric Auger 
612bb9a5adcSEric Auger 	for (i = 0; i < 100; i++)
613bb9a5adcSEric Auger 		write_sysreg(0x3, pmswinc_el0);
614bb9a5adcSEric Auger 
615e0a6e56bSRicardo Koller 	isb();
61639d1347aSRicardo Koller 	report(read_regn_el0(pmevcntr, 0) == cntr0, "counter #0 after + 100 SW_INCR");
61739d1347aSRicardo Koller 	report(read_regn_el0(pmevcntr, 1) == 100, "counter #1 after + 100 SW_INCR");
618*a7509187SRicardo Koller 	report_info("counter values after 100 SW_INCR #0=0x%lx #1=0x%lx",
619bb9a5adcSEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
620bb9a5adcSEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x1,
62166fee034SEric Auger 		"overflow on counter #0 after 100 SW_INCR");
62266fee034SEric Auger }
62366fee034SEric Auger 
624041df25bSRicardo Koller static void test_chained_counters(bool unused)
62566fee034SEric Auger {
62666fee034SEric Auger 	uint32_t events[] = {CPU_CYCLES, CHAIN};
627036369c5SRicardo Koller 	uint64_t all_set = pmevcntr_mask();
62866fee034SEric Auger 
62966fee034SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
63066fee034SEric Auger 		return;
63166fee034SEric Auger 
63266fee034SEric Auger 	pmu_reset();
63366fee034SEric Auger 
63466fee034SEric Auger 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
63566fee034SEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
63666fee034SEric Auger 	/* enable counters #0 and #1 */
63766fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
6387d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
63966fee034SEric Auger 
64066fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
64166fee034SEric Auger 
64266fee034SEric Auger 	report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented");
643b5489580SRicardo Koller 	report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #1");
64466fee034SEric Auger 
64566fee034SEric Auger 	/* test 64b overflow */
64666fee034SEric Auger 
64766fee034SEric Auger 	pmu_reset();
64866fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
64966fee034SEric Auger 
6507d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
65166fee034SEric Auger 	write_regn_el0(pmevcntr, 1, 0x1);
65266fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
65366fee034SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
65466fee034SEric Auger 	report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2");
655b5489580SRicardo Koller 	report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #2");
65666fee034SEric Auger 
6577d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
658036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 1, all_set);
65966fee034SEric Auger 
66066fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
66166fee034SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
662036369c5SRicardo Koller 	report(read_regn_el0(pmevcntr, 1) == 0, "CHAIN counter #1 wrapped");
663b5489580SRicardo Koller 	report(read_sysreg(pmovsclr_el0) == 0x3, "overflow on even and odd counters");
66466fee034SEric Auger }
66566fee034SEric Auger 
666041df25bSRicardo Koller static void test_chained_sw_incr(bool unused)
66766fee034SEric Auger {
66866fee034SEric Auger 	uint32_t events[] = {SW_INCR, CHAIN};
6697d1f853aSRicardo Koller 	uint64_t cntr0 = (PRE_OVERFLOW_32 + 100) & pmevcntr_mask();
6707d1f853aSRicardo Koller 	uint64_t cntr1 = (ALL_SET_32 + 1) & pmevcntr_mask();
67166fee034SEric Auger 	int i;
67266fee034SEric Auger 
67366fee034SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
67466fee034SEric Auger 		return;
67566fee034SEric Auger 
67666fee034SEric Auger 	pmu_reset();
67766fee034SEric Auger 
67866fee034SEric Auger 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
67966fee034SEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
68066fee034SEric Auger 	/* enable counters #0 and #1 */
68166fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
68266fee034SEric Auger 
6837d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
68466fee034SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
685e0a6e56bSRicardo Koller 	isb();
686e0a6e56bSRicardo Koller 
68766fee034SEric Auger 	for (i = 0; i < 100; i++)
68866fee034SEric Auger 		write_sysreg(0x1, pmswinc_el0);
68966fee034SEric Auger 
690e0a6e56bSRicardo Koller 	isb();
691b5489580SRicardo Koller 	report((read_sysreg(pmovsclr_el0) == 0x1) &&
692b5489580SRicardo Koller 		(read_regn_el0(pmevcntr, 1) == 1),
693b5489580SRicardo Koller 		"overflow and chain counter incremented after 100 SW_INCR/CHAIN");
694*a7509187SRicardo Koller 	report_info("overflow=0x%lx, #0=0x%lx #1=0x%lx", read_sysreg(pmovsclr_el0),
69566fee034SEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
69666fee034SEric Auger 
69766fee034SEric Auger 	/* 64b SW_INCR and overflow on CHAIN counter*/
69866fee034SEric Auger 	pmu_reset();
69966fee034SEric Auger 
70066fee034SEric Auger 	write_regn_el0(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
7017d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
7027d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 1, ALL_SET_32);
70366fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
70466fee034SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
705e0a6e56bSRicardo Koller 	isb();
706e0a6e56bSRicardo Koller 
70766fee034SEric Auger 	for (i = 0; i < 100; i++)
70866fee034SEric Auger 		write_sysreg(0x1, pmswinc_el0);
70966fee034SEric Auger 
710e0a6e56bSRicardo Koller 	isb();
711b5489580SRicardo Koller 	report((read_sysreg(pmovsclr_el0) == 0x3) &&
71239d1347aSRicardo Koller 	       (read_regn_el0(pmevcntr, 0) == cntr0) &&
71339d1347aSRicardo Koller 	       (read_regn_el0(pmevcntr, 1) == cntr1),
714b5489580SRicardo Koller 	       "expected overflows and values after 100 SW_INCR/CHAIN");
715*a7509187SRicardo Koller 	report_info("overflow=0x%lx, #0=0x%lx #1=0x%lx", read_sysreg(pmovsclr_el0),
71666fee034SEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
717bb9a5adcSEric Auger }
718bb9a5adcSEric Auger 
719041df25bSRicardo Koller static void test_chain_promotion(bool unused)
720ca42f29aSEric Auger {
721ca42f29aSEric Auger 	uint32_t events[] = {MEM_ACCESS, CHAIN};
722ca42f29aSEric Auger 	void *addr = malloc(PAGE_SIZE);
723ca42f29aSEric Auger 
724ca42f29aSEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
725ca42f29aSEric Auger 		return;
726ca42f29aSEric Auger 
727ca42f29aSEric Auger 	/* Only enable CHAIN counter */
728ca42f29aSEric Auger 	pmu_reset();
729ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
730ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
731ca42f29aSEric Auger 	write_sysreg_s(0x2, PMCNTENSET_EL0);
732ca42f29aSEric Auger 	isb();
733ca42f29aSEric Auger 
734ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
735ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 0),
736ca42f29aSEric Auger 		"chain counter not counting if even counter is disabled");
737ca42f29aSEric Auger 
738ca42f29aSEric Auger 	/* Only enable even counter */
739ca42f29aSEric Auger 	pmu_reset();
7407d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32);
741ca42f29aSEric Auger 	write_sysreg_s(0x1, PMCNTENSET_EL0);
742ca42f29aSEric Auger 	isb();
743ca42f29aSEric Auger 
744ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
745ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1),
746ca42f29aSEric Auger 		"odd counter did not increment on overflow if disabled");
747*a7509187SRicardo Koller 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
748ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
749*a7509187SRicardo Koller 	report_info("CHAIN counter #1 has value 0x%lx",
750ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 1));
751*a7509187SRicardo Koller 	report_info("overflow counter 0x%lx", read_sysreg(pmovsclr_el0));
752ca42f29aSEric Auger 
753ca42f29aSEric Auger 	/* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */
754ca42f29aSEric Auger 	pmu_reset();
755ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
7567d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32);
757ca42f29aSEric Auger 	isb();
758ca42f29aSEric Auger 
759ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
760ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
761ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
762ca42f29aSEric Auger 
763ca42f29aSEric Auger 	/* disable the CHAIN event */
764ca42f29aSEric Auger 	write_sysreg_s(0x2, PMCNTENCLR_EL0);
765ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
766ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
767ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
768ca42f29aSEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x1,
769ca42f29aSEric Auger 		"should have triggered an overflow on #0");
770ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 1),
771ca42f29aSEric Auger 		"CHAIN counter #1 shouldn't have incremented");
772ca42f29aSEric Auger 
773ca42f29aSEric Auger 	/* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */
774ca42f29aSEric Auger 
775ca42f29aSEric Auger 	pmu_reset();
776ca42f29aSEric Auger 	write_sysreg_s(0x1, PMCNTENSET_EL0);
7777d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32);
778ca42f29aSEric Auger 	isb();
779ca42f29aSEric Auger 	report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx",
780ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
781ca42f29aSEric Auger 		    read_sysreg(pmovsclr_el0));
782ca42f29aSEric Auger 
783ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
784ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
785ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
786ca42f29aSEric Auger 
787ca42f29aSEric Auger 	/* enable the CHAIN event */
788ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
789ca42f29aSEric Auger 	isb();
790ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
791ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
792ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
793ca42f29aSEric Auger 
794b5489580SRicardo Koller 	report((read_regn_el0(pmevcntr, 1) == 1) &&
795b5489580SRicardo Koller 		(read_sysreg(pmovsclr_el0) == 0x1),
796b5489580SRicardo Koller 		"CHAIN counter enabled: CHAIN counter was incremented and overflow");
797ca42f29aSEric Auger 
798ca42f29aSEric Auger 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
799ca42f29aSEric Auger 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
800ca42f29aSEric Auger 
801ca42f29aSEric Auger 	/* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */
802ca42f29aSEric Auger 	pmu_reset();
803ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
804ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
805ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
8067d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32);
807ca42f29aSEric Auger 	isb();
808ca42f29aSEric Auger 
809ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
810ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
811ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
812ca42f29aSEric Auger 
813ca42f29aSEric Auger 	/* 0 becomes CHAINED */
814ca42f29aSEric Auger 	write_sysreg_s(0x0, PMCNTENSET_EL0);
815ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
816ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
817ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 1, 0x0);
818ca42f29aSEric Auger 
819ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
820ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
821ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
822ca42f29aSEric Auger 
823b5489580SRicardo Koller 	report((read_regn_el0(pmevcntr, 1) == 1) &&
824b5489580SRicardo Koller 		(read_sysreg(pmovsclr_el0) == 0x1),
825b5489580SRicardo Koller 		"32b->64b: CHAIN counter incremented and overflow");
826ca42f29aSEric Auger 
827ca42f29aSEric Auger 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
828ca42f29aSEric Auger 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
829ca42f29aSEric Auger 
830ca42f29aSEric Auger 	/* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */
831ca42f29aSEric Auger 	pmu_reset();
832ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
833ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
8347d1f853aSRicardo Koller 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32);
835ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
836ca42f29aSEric Auger 
837ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
838ca42f29aSEric Auger 	report_info("counter #0=0x%lx, counter #1=0x%lx",
839ca42f29aSEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
840ca42f29aSEric Auger 
841ca42f29aSEric Auger 	write_sysreg_s(0x0, PMCNTENSET_EL0);
842ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
843ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
844ca42f29aSEric Auger 
845ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
846ca42f29aSEric Auger 	report(read_sysreg(pmovsclr_el0) == 1,
847ca42f29aSEric Auger 		"overflow is expected on counter 0");
848ca42f29aSEric Auger 	report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx",
849ca42f29aSEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
850ca42f29aSEric Auger 			read_sysreg(pmovsclr_el0));
851ca42f29aSEric Auger }
852ca42f29aSEric Auger 
8534f5ef94fSEric Auger static bool expect_interrupts(uint32_t bitmap)
8544f5ef94fSEric Auger {
8554f5ef94fSEric Auger 	int i;
8564f5ef94fSEric Auger 
8574f5ef94fSEric Auger 	if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected)
8584f5ef94fSEric Auger 		return false;
8594f5ef94fSEric Auger 
8604f5ef94fSEric Auger 	for (i = 0; i < 32; i++) {
8614f5ef94fSEric Auger 		if (test_and_clear_bit(i, &pmu_stats.bitmap))
8624f5ef94fSEric Auger 			if (pmu_stats.interrupts[i] != 1)
8634f5ef94fSEric Auger 				return false;
8644f5ef94fSEric Auger 	}
8654f5ef94fSEric Auger 	return true;
8664f5ef94fSEric Auger }
8674f5ef94fSEric Auger 
868041df25bSRicardo Koller static void test_overflow_interrupt(bool overflow_at_64bits)
8694f5ef94fSEric Auger {
870036369c5SRicardo Koller 	uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits);
871036369c5SRicardo Koller 	uint64_t all_set = pmevcntr_mask();
872036369c5SRicardo Koller 	uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0;
8734f5ef94fSEric Auger 	uint32_t events[] = {MEM_ACCESS, SW_INCR};
8744f5ef94fSEric Auger 	void *addr = malloc(PAGE_SIZE);
8754f5ef94fSEric Auger 	int i;
8764f5ef94fSEric Auger 
877041df25bSRicardo Koller 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) ||
878041df25bSRicardo Koller 	    !check_overflow_prerequisites(overflow_at_64bits))
8794f5ef94fSEric Auger 		return;
8804f5ef94fSEric Auger 
8814f5ef94fSEric Auger 	gic_enable_defaults();
8824f5ef94fSEric Auger 	install_irq_handler(EL1H_IRQ, irq_handler);
8834f5ef94fSEric Auger 	local_irq_enable();
8844f5ef94fSEric Auger 	gic_enable_irq(23);
8854f5ef94fSEric Auger 
8864f5ef94fSEric Auger 	pmu_reset();
8874f5ef94fSEric Auger 
8884f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
8894f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
8904f5ef94fSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
891036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
892036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 1, pre_overflow);
8934f5ef94fSEric Auger 	isb();
8944f5ef94fSEric Auger 
8951a97dad8SRicardo Koller 	/* interrupts are disabled (PMINTENSET_EL1 == 0) */
8964f5ef94fSEric Auger 
897036369c5SRicardo Koller 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
8984f5ef94fSEric Auger 	report(expect_interrupts(0), "no overflow interrupt after preset");
8994f5ef94fSEric Auger 
900036369c5SRicardo Koller 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
901e0a6e56bSRicardo Koller 	isb();
902e0a6e56bSRicardo Koller 
9034f5ef94fSEric Auger 	for (i = 0; i < 100; i++)
9044f5ef94fSEric Auger 		write_sysreg(0x2, pmswinc_el0);
9054f5ef94fSEric Auger 
906e0a6e56bSRicardo Koller 	isb();
9074f5ef94fSEric Auger 	set_pmcr(pmu.pmcr_ro);
908e0a6e56bSRicardo Koller 	isb();
9094f5ef94fSEric Auger 	report(expect_interrupts(0), "no overflow interrupt after counting");
9104f5ef94fSEric Auger 
9117d1f853aSRicardo Koller 	/* enable interrupts (PMINTENSET_EL1 <= ALL_SET_32) */
9124f5ef94fSEric Auger 
9134f5ef94fSEric Auger 	pmu_reset_stats();
9144f5ef94fSEric Auger 
915036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
916036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 1, pre_overflow);
9177d1f853aSRicardo Koller 	write_sysreg(ALL_SET_32, pmintenset_el1);
9184f5ef94fSEric Auger 	isb();
9194f5ef94fSEric Auger 
920036369c5SRicardo Koller 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
9214f5ef94fSEric Auger 	for (i = 0; i < 100; i++)
9224f5ef94fSEric Auger 		write_sysreg(0x3, pmswinc_el0);
9234f5ef94fSEric Auger 
9244f5ef94fSEric Auger 	mem_access_loop(addr, 200, pmu.pmcr_ro);
9254f5ef94fSEric Auger 	report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0));
9264f5ef94fSEric Auger 	report(expect_interrupts(0x3),
9274f5ef94fSEric Auger 		"overflow interrupts expected on #0 and #1");
9284f5ef94fSEric Auger 
929036369c5SRicardo Koller 	/*
930036369c5SRicardo Koller 	 * promote to 64-b:
931036369c5SRicardo Koller 	 *
932036369c5SRicardo Koller 	 * This only applies to the !overflow_at_64bits case, as
933036369c5SRicardo Koller 	 * overflow_at_64bits doesn't implement CHAIN events. The
934036369c5SRicardo Koller 	 * overflow_at_64bits case just checks that chained counters are
935036369c5SRicardo Koller 	 * not incremented when PMCR.LP == 1.
936036369c5SRicardo Koller 	 */
9374f5ef94fSEric Auger 
9384f5ef94fSEric Auger 	pmu_reset_stats();
9394f5ef94fSEric Auger 
9404f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
941036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
9424f5ef94fSEric Auger 	isb();
943036369c5SRicardo Koller 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
944036369c5SRicardo Koller 	report(expect_interrupts(0x1), "expect overflow interrupt");
9454f5ef94fSEric Auger 
9464f5ef94fSEric Auger 	/* overflow on odd counter */
9474f5ef94fSEric Auger 	pmu_reset_stats();
948036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 0, pre_overflow);
949036369c5SRicardo Koller 	write_regn_el0(pmevcntr, 1, all_set);
9504f5ef94fSEric Auger 	isb();
951036369c5SRicardo Koller 	mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp);
952036369c5SRicardo Koller 	if (overflow_at_64bits) {
953036369c5SRicardo Koller 		report(expect_interrupts(0x1),
954036369c5SRicardo Koller 		       "expect overflow interrupt on even counter");
955036369c5SRicardo Koller 		report(read_regn_el0(pmevcntr, 1) == all_set,
956036369c5SRicardo Koller 		       "Odd counter did not change");
957036369c5SRicardo Koller 	} else {
958b5489580SRicardo Koller 		report(expect_interrupts(0x3),
959b5489580SRicardo Koller 		       "expect overflow interrupt on even and odd counter");
960036369c5SRicardo Koller 		report(read_regn_el0(pmevcntr, 1) != all_set,
961036369c5SRicardo Koller 		       "Odd counter wrapped");
962036369c5SRicardo Koller 	}
9634f5ef94fSEric Auger }
9644244065bSChristopher Covington #endif
9654244065bSChristopher Covington 
9664244065bSChristopher Covington /*
967d81bb7a3SChristopher Covington  * Ensure that the cycle counter progresses between back-to-back reads.
968d81bb7a3SChristopher Covington  */
969d81bb7a3SChristopher Covington static bool check_cycles_increase(void)
970d81bb7a3SChristopher Covington {
971d81bb7a3SChristopher Covington 	bool success = true;
972d81bb7a3SChristopher Covington 
973d81bb7a3SChristopher Covington 	/* init before event access, this test only cares about cycle count */
9741a97dad8SRicardo Koller 	pmu_reset();
975d81bb7a3SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
976d81bb7a3SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
977d81bb7a3SChristopher Covington 
978d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
979e0a6e56bSRicardo Koller 	isb();
980d81bb7a3SChristopher Covington 
981d81bb7a3SChristopher Covington 	for (int i = 0; i < NR_SAMPLES; i++) {
982d81bb7a3SChristopher Covington 		uint64_t a, b;
983d81bb7a3SChristopher Covington 
984d81bb7a3SChristopher Covington 		a = get_pmccntr();
985d81bb7a3SChristopher Covington 		b = get_pmccntr();
986d81bb7a3SChristopher Covington 
987d81bb7a3SChristopher Covington 		if (a >= b) {
988d81bb7a3SChristopher Covington 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
989d81bb7a3SChristopher Covington 			success = false;
990d81bb7a3SChristopher Covington 			break;
991d81bb7a3SChristopher Covington 		}
992d81bb7a3SChristopher Covington 	}
993d81bb7a3SChristopher Covington 
994d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
995e0a6e56bSRicardo Koller 	isb();
996d81bb7a3SChristopher Covington 
997d81bb7a3SChristopher Covington 	return success;
998d81bb7a3SChristopher Covington }
999d81bb7a3SChristopher Covington 
10008f76a347SChristopher Covington /*
10018f76a347SChristopher Covington  * Execute a known number of guest instructions. Only even instruction counts
10028f76a347SChristopher Covington  * greater than or equal to 4 are supported by the in-line assembly code. The
10038f76a347SChristopher Covington  * control register (PMCR_EL0) is initialized with the provided value (allowing
10048f76a347SChristopher Covington  * for example for the cycle counter or event counters to be reset). At the end
10058f76a347SChristopher Covington  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
10068f76a347SChristopher Covington  * counting, allowing the cycle counter or event counters to be read at the
10078f76a347SChristopher Covington  * leisure of the calling code.
10088f76a347SChristopher Covington  */
10098f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr)
10108f76a347SChristopher Covington {
10118f76a347SChristopher Covington 	int loop = (num - 2) / 2;
10128f76a347SChristopher Covington 
10138f76a347SChristopher Covington 	assert(num >= 4 && ((num - 2) % 2 == 0));
10148f76a347SChristopher Covington 	precise_instrs_loop(loop, pmcr);
10158f76a347SChristopher Covington }
10168f76a347SChristopher Covington 
10178f76a347SChristopher Covington /*
10188f76a347SChristopher Covington  * Measure cycle counts for various known instruction counts. Ensure that the
10198f76a347SChristopher Covington  * cycle counter progresses (similar to check_cycles_increase() but with more
10208f76a347SChristopher Covington  * instructions and using reset and stop controls). If supplied a positive,
10218f76a347SChristopher Covington  * nonzero CPI parameter, it also strictly checks that every measurement matches
10228f76a347SChristopher Covington  * it. Strict CPI checking is used to test -icount mode.
10238f76a347SChristopher Covington  */
10248f76a347SChristopher Covington static bool check_cpi(int cpi)
10258f76a347SChristopher Covington {
10268f76a347SChristopher Covington 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
10278f76a347SChristopher Covington 
10288f76a347SChristopher Covington 	/* init before event access, this test only cares about cycle count */
10291a97dad8SRicardo Koller 	pmu_reset();
10308f76a347SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
10318f76a347SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
10328f76a347SChristopher Covington 
10338f76a347SChristopher Covington 	if (cpi > 0)
10348f76a347SChristopher Covington 		printf("Checking for CPI=%d.\n", cpi);
10358f76a347SChristopher Covington 	printf("instrs : cycles0 cycles1 ...\n");
10368f76a347SChristopher Covington 
10378f76a347SChristopher Covington 	for (unsigned int i = 4; i < 300; i += 32) {
10388f76a347SChristopher Covington 		uint64_t avg, sum = 0;
10398f76a347SChristopher Covington 
10408f76a347SChristopher Covington 		printf("%4d:", i);
10418f76a347SChristopher Covington 		for (int j = 0; j < NR_SAMPLES; j++) {
10428f76a347SChristopher Covington 			uint64_t cycles;
10438f76a347SChristopher Covington 
10448f76a347SChristopher Covington 			set_pmccntr(0);
10458f76a347SChristopher Covington 			measure_instrs(i, pmcr);
10468f76a347SChristopher Covington 			cycles = get_pmccntr();
10478f76a347SChristopher Covington 			printf(" %4"PRId64"", cycles);
10488f76a347SChristopher Covington 
10498f76a347SChristopher Covington 			if (!cycles) {
10508f76a347SChristopher Covington 				printf("\ncycles not incrementing!\n");
10518f76a347SChristopher Covington 				return false;
10528f76a347SChristopher Covington 			} else if (cpi > 0 && cycles != i * cpi) {
10538f76a347SChristopher Covington 				printf("\nunexpected cycle count received!\n");
10548f76a347SChristopher Covington 				return false;
10558f76a347SChristopher Covington 			} else if ((cycles >> 32) != 0) {
10568f76a347SChristopher Covington 				/* The cycles taken by the loop above should
10578f76a347SChristopher Covington 				 * fit in 32 bits easily. We check the upper
10588f76a347SChristopher Covington 				 * 32 bits of the cycle counter to make sure
10598f76a347SChristopher Covington 				 * there is no supprise. */
10608f76a347SChristopher Covington 				printf("\ncycle count bigger than 32bit!\n");
10618f76a347SChristopher Covington 				return false;
10628f76a347SChristopher Covington 			}
10638f76a347SChristopher Covington 
10648f76a347SChristopher Covington 			sum += cycles;
10658f76a347SChristopher Covington 		}
10668f76a347SChristopher Covington 		avg = sum / NR_SAMPLES;
10678f76a347SChristopher Covington 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
10688f76a347SChristopher Covington 		       (avg >= i) ? "cpi" : "ipc",
10698f76a347SChristopher Covington 		       (avg >= i) ? avg / i : i / avg);
10708f76a347SChristopher Covington 	}
10718f76a347SChristopher Covington 
10728f76a347SChristopher Covington 	return true;
10738f76a347SChristopher Covington }
10748f76a347SChristopher Covington 
10754c357610SAndrew Jones static void pmccntr64_test(void)
10764c357610SAndrew Jones {
10774c357610SAndrew Jones #ifdef __arm__
1078784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_V3) {
10794c357610SAndrew Jones 		if (ERRATA(9e3f7a296940)) {
10804c357610SAndrew Jones 			write_sysreg(0xdead, PMCCNTR64);
1081a299895bSThomas Huth 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
10824c357610SAndrew Jones 		} else
10834c357610SAndrew Jones 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
10844c357610SAndrew Jones 	}
10854c357610SAndrew Jones #endif
10864c357610SAndrew Jones }
10874c357610SAndrew Jones 
10884244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */
108923b8916bSThomas Huth static bool pmu_probe(void)
10904244065bSChristopher Covington {
10911e4f5392SAlexandru Elisei 	uint32_t pmcr;
109246ca10f4SAlexandru Elisei 	uint8_t implementer;
1093eff8f161SEric Auger 
10948f747a85SEric Auger 	pmu.version = get_pmu_version();
1095784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
1096eff8f161SEric Auger 		return false;
1097eff8f161SEric Auger 
1098784ee933SEric Auger 	report_info("PMU version: 0x%x", pmu.version);
1099eff8f161SEric Auger 
11001e4f5392SAlexandru Elisei 	pmcr = get_pmcr();
110146ca10f4SAlexandru Elisei 	implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK;
110246ca10f4SAlexandru Elisei 	report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32,
1103eff8f161SEric Auger 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
110446ca10f4SAlexandru Elisei 		    implementer ? implementer : ' ',
11058f747a85SEric Auger 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
11068f747a85SEric Auger 
11078f747a85SEric Auger 	/* store read-only and RES0 fields of the PMCR bottom-half*/
11088f747a85SEric Auger 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
11098f747a85SEric Auger 	pmu.nb_implemented_counters =
11108f747a85SEric Auger 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
11118f747a85SEric Auger 	report_info("Implements %d event counters",
11128f747a85SEric Auger 		    pmu.nb_implemented_counters);
1113eff8f161SEric Auger 
1114eff8f161SEric Auger 	return true;
11154244065bSChristopher Covington }
11164244065bSChristopher Covington 
1117041df25bSRicardo Koller static void run_test(const char *name, const char *prefix,
1118041df25bSRicardo Koller 		     void (*test)(bool), void *arg)
1119041df25bSRicardo Koller {
1120041df25bSRicardo Koller 	report_prefix_push(name);
1121041df25bSRicardo Koller 	report_prefix_push(prefix);
1122041df25bSRicardo Koller 
1123041df25bSRicardo Koller 	test(arg);
1124041df25bSRicardo Koller 
1125041df25bSRicardo Koller 	report_prefix_pop();
1126041df25bSRicardo Koller 	report_prefix_pop();
1127041df25bSRicardo Koller }
1128041df25bSRicardo Koller 
1129041df25bSRicardo Koller static void run_event_test(char *name, void (*test)(bool),
1130041df25bSRicardo Koller 			   bool overflow_at_64bits)
1131041df25bSRicardo Koller {
1132041df25bSRicardo Koller 	const char *prefix = overflow_at_64bits ? "64-bit overflows"
1133041df25bSRicardo Koller 						: "32-bit overflows";
1134041df25bSRicardo Koller 
1135041df25bSRicardo Koller 	run_test(name, prefix, test, (void *)overflow_at_64bits);
1136041df25bSRicardo Koller }
1137041df25bSRicardo Koller 
11388f76a347SChristopher Covington int main(int argc, char *argv[])
11394244065bSChristopher Covington {
11408f76a347SChristopher Covington 	int cpi = 0;
11418f76a347SChristopher Covington 
11424244065bSChristopher Covington 	if (!pmu_probe()) {
11434244065bSChristopher Covington 		printf("No PMU found, test skipped...\n");
11444244065bSChristopher Covington 		return report_summary();
11454244065bSChristopher Covington 	}
11464244065bSChristopher Covington 
114757ec1086SEric Auger 	if (argc < 2)
114857ec1086SEric Auger 		report_abort("no test specified");
114957ec1086SEric Auger 
11504244065bSChristopher Covington 	report_prefix_push("pmu");
11514244065bSChristopher Covington 
115257ec1086SEric Auger 	if (strcmp(argv[1], "cycle-counter") == 0) {
115357ec1086SEric Auger 		report_prefix_push(argv[1]);
115457ec1086SEric Auger 		if (argc > 2)
115557ec1086SEric Auger 			cpi = atol(argv[2]);
1156a299895bSThomas Huth 		report(check_cycles_increase(),
1157a299895bSThomas Huth 		       "Monotonically increasing cycle count");
1158a299895bSThomas Huth 		report(check_cpi(cpi), "Cycle/instruction ratio");
11594c357610SAndrew Jones 		pmccntr64_test();
116057ec1086SEric Auger 		report_prefix_pop();
11614870738cSEric Auger 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
11624870738cSEric Auger 		report_prefix_push(argv[1]);
11634870738cSEric Auger 		test_event_introspection();
11644870738cSEric Auger 		report_prefix_pop();
11654ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-event-counter-config") == 0) {
11664ce2a804SEric Auger 		report_prefix_push(argv[1]);
11674ce2a804SEric Auger 		test_event_counter_config();
11684ce2a804SEric Auger 		report_prefix_pop();
11694ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-basic-event-count") == 0) {
1170041df25bSRicardo Koller 		run_event_test(argv[1], test_basic_event_count, false);
1171036369c5SRicardo Koller 		run_event_test(argv[1], test_basic_event_count, true);
11724ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-mem-access") == 0) {
1173041df25bSRicardo Koller 		run_event_test(argv[1], test_mem_access, false);
1174036369c5SRicardo Koller 		run_event_test(argv[1], test_mem_access, true);
1175bb9a5adcSEric Auger 	} else if (strcmp(argv[1], "pmu-sw-incr") == 0) {
1176041df25bSRicardo Koller 		run_event_test(argv[1], test_sw_incr, false);
1177036369c5SRicardo Koller 		run_event_test(argv[1], test_sw_incr, true);
117866fee034SEric Auger 	} else if (strcmp(argv[1], "pmu-chained-counters") == 0) {
1179041df25bSRicardo Koller 		run_event_test(argv[1], test_chained_counters, false);
118066fee034SEric Auger 	} else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) {
1181041df25bSRicardo Koller 		run_event_test(argv[1], test_chained_sw_incr, false);
1182ca42f29aSEric Auger 	} else if (strcmp(argv[1], "pmu-chain-promotion") == 0) {
1183041df25bSRicardo Koller 		run_event_test(argv[1], test_chain_promotion, false);
11844f5ef94fSEric Auger 	} else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) {
1185041df25bSRicardo Koller 		run_event_test(argv[1], test_overflow_interrupt, false);
1186036369c5SRicardo Koller 		run_event_test(argv[1], test_overflow_interrupt, true);
118757ec1086SEric Auger 	} else {
118857ec1086SEric Auger 		report_abort("Unknown sub-test '%s'", argv[1]);
118957ec1086SEric Auger 	}
11904c357610SAndrew Jones 
11914244065bSChristopher Covington 	return report_summary();
11924244065bSChristopher Covington }
1193