xref: /kvm-unit-tests/arm/pmu.c (revision e0a6e56b6314bda7f5e9292dc4a978b92074f5eb)
14244065bSChristopher Covington /*
24244065bSChristopher Covington  * Test the ARM Performance Monitors Unit (PMU).
34244065bSChristopher Covington  *
44244065bSChristopher Covington  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
54244065bSChristopher Covington  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
64244065bSChristopher Covington  *
74244065bSChristopher Covington  * This program is free software; you can redistribute it and/or modify it
84244065bSChristopher Covington  * under the terms of the GNU Lesser General Public License version 2.1 and
94244065bSChristopher Covington  * only version 2.1 as published by the Free Software Foundation.
104244065bSChristopher Covington  *
114244065bSChristopher Covington  * This program is distributed in the hope that it will be useful, but WITHOUT
124244065bSChristopher Covington  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
134244065bSChristopher Covington  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
144244065bSChristopher Covington  * for more details.
154244065bSChristopher Covington  */
164244065bSChristopher Covington #include "libcflat.h"
174c357610SAndrew Jones #include "errata.h"
184244065bSChristopher Covington #include "asm/barrier.h"
194244065bSChristopher Covington #include "asm/sysreg.h"
204244065bSChristopher Covington #include "asm/processor.h"
214870738cSEric Auger #include <bitops.h>
224ce2a804SEric Auger #include <asm/gic.h>
234244065bSChristopher Covington 
24d81bb7a3SChristopher Covington #define PMU_PMCR_E         (1 << 0)
254ce2a804SEric Auger #define PMU_PMCR_P         (1 << 1)
26d81bb7a3SChristopher Covington #define PMU_PMCR_C         (1 << 2)
274ce2a804SEric Auger #define PMU_PMCR_D         (1 << 3)
284ce2a804SEric Auger #define PMU_PMCR_X         (1 << 4)
294ce2a804SEric Auger #define PMU_PMCR_DP        (1 << 5)
30d81bb7a3SChristopher Covington #define PMU_PMCR_LC        (1 << 6)
314244065bSChristopher Covington #define PMU_PMCR_N_SHIFT   11
324244065bSChristopher Covington #define PMU_PMCR_N_MASK    0x1f
334244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT  16
344244065bSChristopher Covington #define PMU_PMCR_ID_MASK   0xff
354244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24
364244065bSChristopher Covington #define PMU_PMCR_IMP_MASK  0xff
374244065bSChristopher Covington 
38d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX      31
39d81bb7a3SChristopher Covington 
40d81bb7a3SChristopher Covington #define NR_SAMPLES 10
41d81bb7a3SChristopher Covington 
424870738cSEric Auger /* Some PMU events */
434870738cSEric Auger #define SW_INCR			0x0
444870738cSEric Auger #define INST_RETIRED		0x8
454870738cSEric Auger #define CPU_CYCLES		0x11
464ce2a804SEric Auger #define MEM_ACCESS		0x13
474870738cSEric Auger #define INST_PREC		0x1B
484870738cSEric Auger #define STALL_FRONTEND		0x23
494870738cSEric Auger #define STALL_BACKEND		0x24
5066fee034SEric Auger #define CHAIN			0x1E
514870738cSEric Auger 
524870738cSEric Auger #define COMMON_EVENTS_LOW	0x0
534870738cSEric Auger #define COMMON_EVENTS_HIGH	0x3F
544870738cSEric Auger #define EXT_COMMON_EVENTS_LOW	0x4000
554870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH	0x403F
564870738cSEric Auger 
574ce2a804SEric Auger #define ALL_SET			0xFFFFFFFF
584ce2a804SEric Auger #define ALL_CLEAR		0x0
594ce2a804SEric Auger #define PRE_OVERFLOW		0xFFFFFFF0
60ca42f29aSEric Auger #define PRE_OVERFLOW2		0xFFFFFFDC
614ce2a804SEric Auger 
624f5ef94fSEric Auger #define PMU_PPI			23
634f5ef94fSEric Auger 
648f747a85SEric Auger struct pmu {
658f747a85SEric Auger 	unsigned int version;
668f747a85SEric Auger 	unsigned int nb_implemented_counters;
678f747a85SEric Auger 	uint32_t pmcr_ro;
688f747a85SEric Auger };
698f747a85SEric Auger 
704f5ef94fSEric Auger struct pmu_stats {
714f5ef94fSEric Auger 	unsigned long bitmap;
724f5ef94fSEric Auger 	uint32_t interrupts[32];
734f5ef94fSEric Auger 	bool unexpected;
744f5ef94fSEric Auger };
754f5ef94fSEric Auger 
768f747a85SEric Auger static struct pmu pmu;
778f747a85SEric Auger 
784244065bSChristopher Covington #if defined(__arm__)
79098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24
80098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK  0xf
81098add54SAndrew Jones 
82784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
83784ee933SEric Auger #define ID_DFR0_PMU_V1		0b0001
84784ee933SEric Auger #define ID_DFR0_PMU_V2		0b0010
85784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0011
86784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
87784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
88784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
89784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
90784ee933SEric Auger 
914244065bSChristopher Covington #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
924244065bSChristopher Covington #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
93d81bb7a3SChristopher Covington #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
94d81bb7a3SChristopher Covington #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
95d81bb7a3SChristopher Covington #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
96d81bb7a3SChristopher Covington #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
97d81bb7a3SChristopher Covington #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
984244065bSChristopher Covington 
994244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
1004244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
101d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
102d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
103d81bb7a3SChristopher Covington 
104098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
105098add54SAndrew Jones {
106098add54SAndrew Jones 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
107098add54SAndrew Jones }
108098add54SAndrew Jones 
109d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void)
110d81bb7a3SChristopher Covington {
111d81bb7a3SChristopher Covington 	return read_sysreg(PMCCNTR32);
112d81bb7a3SChristopher Covington }
113d81bb7a3SChristopher Covington 
1148f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value)
1158f76a347SChristopher Covington {
1168f76a347SChristopher Covington 	write_sysreg(value & 0xffffffff, PMCCNTR32);
1178f76a347SChristopher Covington }
1188f76a347SChristopher Covington 
119d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
120d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value)
121d81bb7a3SChristopher Covington {
122d81bb7a3SChristopher Covington 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
123d81bb7a3SChristopher Covington 	write_sysreg(value, PMXEVTYPER);
124d81bb7a3SChristopher Covington 	isb();
125d81bb7a3SChristopher Covington }
1268f76a347SChristopher Covington 
1278f76a347SChristopher Covington /*
1288f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1298f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1308f76a347SChristopher Covington  * to start and stop counting. isb instructions were inserted to make sure
1318f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed in
1328f76a347SChristopher Covington  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
1338f76a347SChristopher Covington  */
1348f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1358f76a347SChristopher Covington {
1368f76a347SChristopher Covington 	asm volatile(
1378f76a347SChristopher Covington 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
1388f76a347SChristopher Covington 	"	isb\n"
1398f76a347SChristopher Covington 	"1:	subs	%[loop], %[loop], #1\n"
1408f76a347SChristopher Covington 	"	bgt	1b\n"
1418f76a347SChristopher Covington 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
1428f76a347SChristopher Covington 	"	isb\n"
1438f76a347SChristopher Covington 	: [loop] "+r" (loop)
1448f76a347SChristopher Covington 	: [pmcr] "r" (pmcr), [z] "r" (0)
1458f76a347SChristopher Covington 	: "cc");
1468f76a347SChristopher Covington }
1474870738cSEric Auger 
1484870738cSEric Auger /* event counter tests only implemented for aarch64 */
1494870738cSEric Auger static void test_event_introspection(void) {}
1504ce2a804SEric Auger static void test_event_counter_config(void) {}
1514ce2a804SEric Auger static void test_basic_event_count(void) {}
1524ce2a804SEric Auger static void test_mem_access(void) {}
153bb9a5adcSEric Auger static void test_sw_incr(void) {}
15466fee034SEric Auger static void test_chained_counters(void) {}
15566fee034SEric Auger static void test_chained_sw_incr(void) {}
156ca42f29aSEric Auger static void test_chain_promotion(void) {}
1574f5ef94fSEric Auger static void test_overflow_interrupt(void) {}
1584870738cSEric Auger 
1594244065bSChristopher Covington #elif defined(__aarch64__)
160098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8
161098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK  0xf
162098add54SAndrew Jones 
163784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL	0b0000
164784ee933SEric Auger #define ID_DFR0_PMU_V3		0b0001
165784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1	0b0100
166784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4	0b0101
167784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5	0b0110
168784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF	0b1111
169784ee933SEric Auger 
170098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
1714244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
172d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
173d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
1748f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
175d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
176d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
1778f76a347SChristopher Covington 
178098add54SAndrew Jones static inline uint8_t get_pmu_version(void)
179098add54SAndrew Jones {
180098add54SAndrew Jones 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
181784ee933SEric Auger 	return ver;
182098add54SAndrew Jones }
183098add54SAndrew Jones 
1848f76a347SChristopher Covington /*
1858f76a347SChristopher Covington  * Extra instructions inserted by the compiler would be difficult to compensate
1868f76a347SChristopher Covington  * for, so hand assemble everything between, and including, the PMCR accesses
1878f76a347SChristopher Covington  * to start and stop counting. isb instructions are inserted to make sure
1888f76a347SChristopher Covington  * pmccntr read after this function returns the exact instructions executed
1898f76a347SChristopher Covington  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
1908f76a347SChristopher Covington  */
1918f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr)
1928f76a347SChristopher Covington {
1939e186511SThomas Huth 	uint64_t pmcr64 = pmcr;
1948f76a347SChristopher Covington 	asm volatile(
1958f76a347SChristopher Covington 	"	msr	pmcr_el0, %[pmcr]\n"
1968f76a347SChristopher Covington 	"	isb\n"
1979e186511SThomas Huth 	"1:	subs	%w[loop], %w[loop], #1\n"
1988f76a347SChristopher Covington 	"	b.gt	1b\n"
1998f76a347SChristopher Covington 	"	msr	pmcr_el0, xzr\n"
2008f76a347SChristopher Covington 	"	isb\n"
2018f76a347SChristopher Covington 	: [loop] "+r" (loop)
2029e186511SThomas Huth 	: [pmcr] "r" (pmcr64)
2038f76a347SChristopher Covington 	: "cc");
2048f76a347SChristopher Covington }
2054870738cSEric Auger 
2064870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
2074ce2a804SEric Auger #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
2084ce2a804SEric Auger #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
2094ce2a804SEric Auger 
2104ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL1 BIT(31)
2114ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL0 BIT(30)
2124870738cSEric Auger 
2134870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn)
2144870738cSEric Auger {
2154870738cSEric Auger 	uint64_t pmceid0 = read_sysreg(pmceid0_el0);
2164870738cSEric Auger 	uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0);
2174870738cSEric Auger 	bool supported;
2184870738cSEric Auger 	uint64_t reg;
2194870738cSEric Auger 
2204870738cSEric Auger 	/*
2214870738cSEric Auger 	 * The low 32-bits of PMCEID0/1 respectively describe
2224870738cSEric Auger 	 * event support for events 0-31/32-63. Their High
2234870738cSEric Auger 	 * 32-bits describe support for extended events
2244870738cSEric Auger 	 * starting at 0x4000, using the same split.
2254870738cSEric Auger 	 */
2264870738cSEric Auger 	assert((n >= COMMON_EVENTS_LOW  && n <= COMMON_EVENTS_HIGH) ||
2274870738cSEric Auger 	       (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH));
2284870738cSEric Auger 
2294870738cSEric Auger 	if (n <= COMMON_EVENTS_HIGH)
2304870738cSEric Auger 		reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32);
2314870738cSEric Auger 	else
2324870738cSEric Auger 		reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32);
2334870738cSEric Auger 
2344870738cSEric Auger 	supported =  reg & (1UL << (n & 0x3F));
2354870738cSEric Auger 
2364870738cSEric Auger 	if (!supported && warn)
2374870738cSEric Auger 		report_info("event 0x%x is not supported", n);
2384870738cSEric Auger 	return supported;
2394870738cSEric Auger }
2404870738cSEric Auger 
2414870738cSEric Auger static void test_event_introspection(void)
2424870738cSEric Auger {
2434870738cSEric Auger 	bool required_events;
2444870738cSEric Auger 
2454870738cSEric Auger 	if (!pmu.nb_implemented_counters) {
2464870738cSEric Auger 		report_skip("No event counter, skip ...");
2474870738cSEric Auger 		return;
2484870738cSEric Auger 	}
2494870738cSEric Auger 
2504870738cSEric Auger 	/* PMUv3 requires an implementation includes some common events */
2514870738cSEric Auger 	required_events = is_event_supported(SW_INCR, true) &&
2524870738cSEric Auger 			  is_event_supported(CPU_CYCLES, true) &&
2534870738cSEric Auger 			  (is_event_supported(INST_RETIRED, true) ||
2544870738cSEric Auger 			   is_event_supported(INST_PREC, true));
2554870738cSEric Auger 
2564870738cSEric Auger 	if (pmu.version >= ID_DFR0_PMU_V3_8_1) {
2574870738cSEric Auger 		required_events = required_events &&
2584870738cSEric Auger 				  is_event_supported(STALL_FRONTEND, true) &&
2594870738cSEric Auger 				  is_event_supported(STALL_BACKEND, true);
2604870738cSEric Auger 	}
2614870738cSEric Auger 
2624870738cSEric Auger 	report(required_events, "Check required events are implemented");
2634870738cSEric Auger }
2644870738cSEric Auger 
2654ce2a804SEric Auger /*
2664ce2a804SEric Auger  * Extra instructions inserted by the compiler would be difficult to compensate
2674ce2a804SEric Auger  * for, so hand assemble everything between, and including, the PMCR accesses
2684ce2a804SEric Auger  * to start and stop counting. isb instructions are inserted to make sure
2694ce2a804SEric Auger  * pmccntr read after this function returns the exact instructions executed
2704ce2a804SEric Auger  * in the controlled block. Loads @loop times the data at @address into x9.
2714ce2a804SEric Auger  */
2729e186511SThomas Huth static void mem_access_loop(void *addr, long loop, uint32_t pmcr)
2734ce2a804SEric Auger {
2749e186511SThomas Huth 	uint64_t pmcr64 = pmcr;
2754ce2a804SEric Auger asm volatile(
2764ce2a804SEric Auger 	"       msr     pmcr_el0, %[pmcr]\n"
2774ce2a804SEric Auger 	"       isb\n"
2784ce2a804SEric Auger 	"       mov     x10, %[loop]\n"
2794ce2a804SEric Auger 	"1:     sub     x10, x10, #1\n"
2804ce2a804SEric Auger 	"       ldr	x9, [%[addr]]\n"
2814ce2a804SEric Auger 	"       cmp     x10, #0x0\n"
2824ce2a804SEric Auger 	"       b.gt    1b\n"
2834ce2a804SEric Auger 	"       msr     pmcr_el0, xzr\n"
2844ce2a804SEric Auger 	"       isb\n"
2854ce2a804SEric Auger 	:
2869e186511SThomas Huth 	: [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop)
2874ce2a804SEric Auger 	: "x9", "x10", "cc");
2884ce2a804SEric Auger }
2894ce2a804SEric Auger 
2904f5ef94fSEric Auger static struct pmu_stats pmu_stats;
2914f5ef94fSEric Auger 
2924f5ef94fSEric Auger static void irq_handler(struct pt_regs *regs)
2934f5ef94fSEric Auger {
2944f5ef94fSEric Auger 	uint32_t irqstat, irqnr;
2954f5ef94fSEric Auger 
2964f5ef94fSEric Auger 	irqstat = gic_read_iar();
2974f5ef94fSEric Auger 	irqnr = gic_iar_irqnr(irqstat);
2984f5ef94fSEric Auger 
2994f5ef94fSEric Auger 	if (irqnr == PMU_PPI) {
3004f5ef94fSEric Auger 		unsigned long overflows = read_sysreg(pmovsclr_el0);
3014f5ef94fSEric Auger 		int i;
3024f5ef94fSEric Auger 
3034f5ef94fSEric Auger 		for (i = 0; i < 32; i++) {
3044f5ef94fSEric Auger 			if (test_and_clear_bit(i, &overflows)) {
3054f5ef94fSEric Auger 				pmu_stats.interrupts[i]++;
3064f5ef94fSEric Auger 				pmu_stats.bitmap |= 1 << i;
3074f5ef94fSEric Auger 			}
3084f5ef94fSEric Auger 		}
3094f5ef94fSEric Auger 		write_sysreg(ALL_SET, pmovsclr_el0);
310*e0a6e56bSRicardo Koller 		isb();
3114f5ef94fSEric Auger 	} else {
3124f5ef94fSEric Auger 		pmu_stats.unexpected = true;
3134f5ef94fSEric Auger 	}
3144f5ef94fSEric Auger 	gic_write_eoir(irqstat);
3154f5ef94fSEric Auger }
3164f5ef94fSEric Auger 
3174f5ef94fSEric Auger static void pmu_reset_stats(void)
3184f5ef94fSEric Auger {
3194f5ef94fSEric Auger 	int i;
3204f5ef94fSEric Auger 
3214f5ef94fSEric Auger 	for (i = 0; i < 32; i++)
3224f5ef94fSEric Auger 		pmu_stats.interrupts[i] = 0;
3234f5ef94fSEric Auger 
3244f5ef94fSEric Auger 	pmu_stats.bitmap = 0;
3254f5ef94fSEric Auger 	pmu_stats.unexpected = false;
3264f5ef94fSEric Auger }
3274f5ef94fSEric Auger 
3284ce2a804SEric Auger static void pmu_reset(void)
3294ce2a804SEric Auger {
3304ce2a804SEric Auger 	/* reset all counters, counting disabled at PMCR level*/
3314ce2a804SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
3324ce2a804SEric Auger 	/* Disable all counters */
3334ce2a804SEric Auger 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
3344ce2a804SEric Auger 	/* clear overflow reg */
3354ce2a804SEric Auger 	write_sysreg(ALL_SET, pmovsclr_el0);
3364ce2a804SEric Auger 	/* disable overflow interrupts on all counters */
3374ce2a804SEric Auger 	write_sysreg(ALL_SET, pmintenclr_el1);
3384f5ef94fSEric Auger 	pmu_reset_stats();
3394ce2a804SEric Auger 	isb();
3404ce2a804SEric Auger }
3414ce2a804SEric Auger 
3424ce2a804SEric Auger static void test_event_counter_config(void)
3434ce2a804SEric Auger {
3444ce2a804SEric Auger 	int i;
3454ce2a804SEric Auger 
3464ce2a804SEric Auger 	if (!pmu.nb_implemented_counters) {
3474ce2a804SEric Auger 		report_skip("No event counter, skip ...");
3484ce2a804SEric Auger 		return;
3494ce2a804SEric Auger 	}
3504ce2a804SEric Auger 
3514ce2a804SEric Auger 	pmu_reset();
3524ce2a804SEric Auger 
3534ce2a804SEric Auger 	/*
3544ce2a804SEric Auger 	 * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
3554ce2a804SEric Auger 	 * select counter 0
3564ce2a804SEric Auger 	 */
3574ce2a804SEric Auger 	write_sysreg(1, PMSELR_EL0);
3584ce2a804SEric Auger 	/* program this counter to count unsupported event */
3594ce2a804SEric Auger 	write_sysreg(0xEA, PMXEVTYPER_EL0);
3604ce2a804SEric Auger 	write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
3614ce2a804SEric Auger 	report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
3624ce2a804SEric Auger 		"PMESELR/PMXEVTYPER/PMEVTYPERn");
3634ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
3644ce2a804SEric Auger 		"PMESELR/PMXEVCNTR/PMEVCNTRn");
3654ce2a804SEric Auger 
3664ce2a804SEric Auger 	/* try to configure an unsupported event within the range [0x0, 0x3F] */
3674ce2a804SEric Auger 	for (i = 0; i <= 0x3F; i++) {
3684ce2a804SEric Auger 		if (!is_event_supported(i, false))
3694ce2a804SEric Auger 			break;
3704ce2a804SEric Auger 	}
3714ce2a804SEric Auger 	if (i > 0x3F) {
3724ce2a804SEric Auger 		report_skip("pmevtyper: all events within [0x0, 0x3F] are supported");
3734ce2a804SEric Auger 		return;
3744ce2a804SEric Auger 	}
3754ce2a804SEric Auger 
3764ce2a804SEric Auger 	/* select counter 0 */
3774ce2a804SEric Auger 	write_sysreg(0, PMSELR_EL0);
3784ce2a804SEric Auger 	/* program this counter to count unsupported event */
3794ce2a804SEric Auger 	write_sysreg(i, PMXEVCNTR_EL0);
3804ce2a804SEric Auger 	/* read the counter value */
3814ce2a804SEric Auger 	read_sysreg(PMXEVCNTR_EL0);
3824ce2a804SEric Auger 	report(read_sysreg(PMXEVCNTR_EL0) == i,
3834ce2a804SEric Auger 		"read of a counter programmed with unsupported event");
3844ce2a804SEric Auger }
3854ce2a804SEric Auger 
3864ce2a804SEric Auger static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events)
3874ce2a804SEric Auger {
3884ce2a804SEric Auger 	int i;
3894ce2a804SEric Auger 
3904ce2a804SEric Auger 	if (pmu.nb_implemented_counters < nb_events) {
3914ce2a804SEric Auger 		report_skip("Skip test as number of counters is too small (%d)",
3924ce2a804SEric Auger 			    pmu.nb_implemented_counters);
3934ce2a804SEric Auger 		return false;
3944ce2a804SEric Auger 	}
3954ce2a804SEric Auger 
3964ce2a804SEric Auger 	for (i = 0; i < nb_events; i++) {
3974ce2a804SEric Auger 		if (!is_event_supported(events[i], false)) {
3984ce2a804SEric Auger 			report_skip("Skip test as event 0x%x is not supported",
3994ce2a804SEric Auger 				    events[i]);
4004ce2a804SEric Auger 			return false;
4014ce2a804SEric Auger 		}
4024ce2a804SEric Auger 	}
4034ce2a804SEric Auger 	return true;
4044ce2a804SEric Auger }
4054ce2a804SEric Auger 
4064ce2a804SEric Auger static void test_basic_event_count(void)
4074ce2a804SEric Auger {
4084ce2a804SEric Auger 	uint32_t implemented_counter_mask, non_implemented_counter_mask;
4094ce2a804SEric Auger 	uint32_t counter_mask;
4104ce2a804SEric Auger 	uint32_t events[] = {CPU_CYCLES, INST_RETIRED};
4114ce2a804SEric Auger 
4124ce2a804SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
4134ce2a804SEric Auger 		return;
4144ce2a804SEric Auger 
4154ce2a804SEric Auger 	implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1;
4164ce2a804SEric Auger 	non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask);
4174ce2a804SEric Auger 	counter_mask = implemented_counter_mask | non_implemented_counter_mask;
4184ce2a804SEric Auger 
4194ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
4204ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0);
4214ce2a804SEric Auger 
4224ce2a804SEric Auger 	/* disable all counters */
4234ce2a804SEric Auger 	write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
4244ce2a804SEric Auger 	report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0),
4254ce2a804SEric Auger 		"pmcntenclr: disable all counters");
4264ce2a804SEric Auger 
4274ce2a804SEric Auger 	/*
4284ce2a804SEric Auger 	 * clear cycle and all event counters and allow counter enablement
4294ce2a804SEric Auger 	 * through PMCNTENSET. LC is RES1.
4304ce2a804SEric Auger 	 */
4314ce2a804SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
4324ce2a804SEric Auger 	isb();
4334ce2a804SEric Auger 	report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC), "pmcr: reset counters");
4344ce2a804SEric Auger 
4354ce2a804SEric Auger 	/* Preset counter #0 to pre overflow value to trigger an overflow */
4364ce2a804SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
4374ce2a804SEric Auger 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
4384ce2a804SEric Auger 		"counter #0 preset to pre-overflow value");
4394ce2a804SEric Auger 	report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0");
4404ce2a804SEric Auger 
4414ce2a804SEric Auger 	/*
4424ce2a804SEric Auger 	 * Enable all implemented counters and also attempt to enable
4434ce2a804SEric Auger 	 * not supported counters. Counting still is disabled by !PMCR.E
4444ce2a804SEric Auger 	 */
4454ce2a804SEric Auger 	write_sysreg_s(counter_mask, PMCNTENSET_EL0);
4464ce2a804SEric Auger 
4474ce2a804SEric Auger 	/* check only those implemented are enabled */
4484ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
4494ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask),
4504ce2a804SEric Auger 		"pmcntenset: enabled implemented_counters");
4514ce2a804SEric Auger 
4524ce2a804SEric Auger 	/* Disable all counters but counters #0 and #1 */
4534ce2a804SEric Auger 	write_sysreg_s(~0x3, PMCNTENCLR_EL0);
4544ce2a804SEric Auger 	report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) &&
4554ce2a804SEric Auger 		(read_sysreg_s(PMCNTENSET_EL0) == 0x3),
4564ce2a804SEric Auger 		"pmcntenset: just enabled #0 and #1");
4574ce2a804SEric Auger 
4584ce2a804SEric Auger 	/* clear overflow register */
4594ce2a804SEric Auger 	write_sysreg(ALL_SET, pmovsclr_el0);
4604ce2a804SEric Auger 	report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0");
4614ce2a804SEric Auger 
4624ce2a804SEric Auger 	/* disable overflow interrupts on all counters*/
4634ce2a804SEric Auger 	write_sysreg(ALL_SET, pmintenclr_el1);
4644ce2a804SEric Auger 	report(!read_sysreg(pmintenclr_el1),
4654ce2a804SEric Auger 		"pmintenclr_el1=0, all interrupts disabled");
4664ce2a804SEric Auger 
4674ce2a804SEric Auger 	/* enable overflow interrupts on all event counters */
4684ce2a804SEric Auger 	write_sysreg(implemented_counter_mask | non_implemented_counter_mask,
4694ce2a804SEric Auger 		     pmintenset_el1);
4704ce2a804SEric Auger 	report(read_sysreg(pmintenset_el1) == implemented_counter_mask,
4714ce2a804SEric Auger 		"overflow interrupts enabled on all implemented counters");
4724ce2a804SEric Auger 
4734ce2a804SEric Auger 	/* Set PMCR.E, execute asm code and unset PMCR.E */
4744ce2a804SEric Auger 	precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E);
4754ce2a804SEric Auger 
4764ce2a804SEric Auger 	report_info("counter #0 is 0x%lx (CPU_CYCLES)",
4774ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 0));
4784ce2a804SEric Auger 	report_info("counter #1 is 0x%lx (INST_RETIRED)",
4794ce2a804SEric Auger 		    read_regn_el0(pmevcntr, 1));
4804ce2a804SEric Auger 
4814ce2a804SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
4824ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) & 0x1,
4834ce2a804SEric Auger 		"check overflow happened on #0 only");
4844ce2a804SEric Auger }
4854ce2a804SEric Auger 
4864ce2a804SEric Auger static void test_mem_access(void)
4874ce2a804SEric Auger {
4884ce2a804SEric Auger 	void *addr = malloc(PAGE_SIZE);
4894ce2a804SEric Auger 	uint32_t events[] = {MEM_ACCESS, MEM_ACCESS};
4904ce2a804SEric Auger 
4914ce2a804SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
4924ce2a804SEric Auger 		return;
4934ce2a804SEric Auger 
4944ce2a804SEric Auger 	pmu_reset();
4954ce2a804SEric Auger 
4964ce2a804SEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
4974ce2a804SEric Auger 	write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
4984ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
4994ce2a804SEric Auger 	isb();
5004ce2a804SEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
5014ce2a804SEric Auger 	report_info("counter #0 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 0));
5024ce2a804SEric Auger 	report_info("counter #1 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 1));
5034ce2a804SEric Auger 	/* We may measure more than 20 mem access depending on the core */
5044ce2a804SEric Auger 	report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) &&
5054ce2a804SEric Auger 	       (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0),
5064ce2a804SEric Auger 	       "Ran 20 mem accesses");
5074ce2a804SEric Auger 
5084ce2a804SEric Auger 	pmu_reset();
5094ce2a804SEric Auger 
5104ce2a804SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
5114ce2a804SEric Auger 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
5124ce2a804SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
5134ce2a804SEric Auger 	isb();
5144ce2a804SEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
5154ce2a804SEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x3,
5164ce2a804SEric Auger 	       "Ran 20 mem accesses with expected overflows on both counters");
5174ce2a804SEric Auger 	report_info("cnt#0 = %ld cnt#1=%ld overflow=0x%lx",
5184ce2a804SEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
5194ce2a804SEric Auger 			read_sysreg(pmovsclr_el0));
5204ce2a804SEric Auger }
5214ce2a804SEric Auger 
522bb9a5adcSEric Auger static void test_sw_incr(void)
523bb9a5adcSEric Auger {
524bb9a5adcSEric Auger 	uint32_t events[] = {SW_INCR, SW_INCR};
525bb9a5adcSEric Auger 	int i;
526bb9a5adcSEric Auger 
527bb9a5adcSEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
528bb9a5adcSEric Auger 		return;
529bb9a5adcSEric Auger 
530bb9a5adcSEric Auger 	pmu_reset();
531bb9a5adcSEric Auger 
532bb9a5adcSEric Auger 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
533bb9a5adcSEric Auger 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
534bb9a5adcSEric Auger 	/* enable counters #0 and #1 */
535bb9a5adcSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
536bb9a5adcSEric Auger 
537bb9a5adcSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
538*e0a6e56bSRicardo Koller 	isb();
539bb9a5adcSEric Auger 
540bb9a5adcSEric Auger 	for (i = 0; i < 100; i++)
541bb9a5adcSEric Auger 		write_sysreg(0x1, pmswinc_el0);
542bb9a5adcSEric Auger 
543*e0a6e56bSRicardo Koller 	isb();
544bb9a5adcSEric Auger 	report_info("SW_INCR counter #0 has value %ld", read_regn_el0(pmevcntr, 0));
545bb9a5adcSEric Auger 	report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW,
546bb9a5adcSEric Auger 		"PWSYNC does not increment if PMCR.E is unset");
547bb9a5adcSEric Auger 
548bb9a5adcSEric Auger 	pmu_reset();
549bb9a5adcSEric Auger 
550bb9a5adcSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
551bb9a5adcSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
552bb9a5adcSEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
553*e0a6e56bSRicardo Koller 	isb();
554bb9a5adcSEric Auger 
555bb9a5adcSEric Auger 	for (i = 0; i < 100; i++)
556bb9a5adcSEric Auger 		write_sysreg(0x3, pmswinc_el0);
557bb9a5adcSEric Auger 
558*e0a6e56bSRicardo Koller 	isb();
559bb9a5adcSEric Auger 	report(read_regn_el0(pmevcntr, 0)  == 84, "counter #1 after + 100 SW_INCR");
560bb9a5adcSEric Auger 	report(read_regn_el0(pmevcntr, 1)  == 100,
561bb9a5adcSEric Auger 		"counter #0 after + 100 SW_INCR");
562bb9a5adcSEric Auger 	report_info("counter values after 100 SW_INCR #0=%ld #1=%ld",
563bb9a5adcSEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
564bb9a5adcSEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x1,
56566fee034SEric Auger 		"overflow on counter #0 after 100 SW_INCR");
56666fee034SEric Auger }
56766fee034SEric Auger 
56866fee034SEric Auger static void test_chained_counters(void)
56966fee034SEric Auger {
57066fee034SEric Auger 	uint32_t events[] = {CPU_CYCLES, CHAIN};
57166fee034SEric Auger 
57266fee034SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
57366fee034SEric Auger 		return;
57466fee034SEric Auger 
57566fee034SEric Auger 	pmu_reset();
57666fee034SEric Auger 
57766fee034SEric Auger 	write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
57866fee034SEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
57966fee034SEric Auger 	/* enable counters #0 and #1 */
58066fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
58166fee034SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
58266fee034SEric Auger 
58366fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
58466fee034SEric Auger 
58566fee034SEric Auger 	report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented");
58666fee034SEric Auger 	report(!read_sysreg(pmovsclr_el0), "no overflow recorded for chained incr #1");
58766fee034SEric Auger 
58866fee034SEric Auger 	/* test 64b overflow */
58966fee034SEric Auger 
59066fee034SEric Auger 	pmu_reset();
59166fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
59266fee034SEric Auger 
59366fee034SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
59466fee034SEric Auger 	write_regn_el0(pmevcntr, 1, 0x1);
59566fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
59666fee034SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
59766fee034SEric Auger 	report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2");
59866fee034SEric Auger 	report(!read_sysreg(pmovsclr_el0), "no overflow recorded for chained incr #2");
59966fee034SEric Auger 
60066fee034SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
60166fee034SEric Auger 	write_regn_el0(pmevcntr, 1, ALL_SET);
60266fee034SEric Auger 
60366fee034SEric Auger 	precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E);
60466fee034SEric Auger 	report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0));
60566fee034SEric Auger 	report(!read_regn_el0(pmevcntr, 1), "CHAIN counter #1 wrapped");
60666fee034SEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x2, "overflow on chain counter");
60766fee034SEric Auger }
60866fee034SEric Auger 
60966fee034SEric Auger static void test_chained_sw_incr(void)
61066fee034SEric Auger {
61166fee034SEric Auger 	uint32_t events[] = {SW_INCR, CHAIN};
61266fee034SEric Auger 	int i;
61366fee034SEric Auger 
61466fee034SEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
61566fee034SEric Auger 		return;
61666fee034SEric Auger 
61766fee034SEric Auger 	pmu_reset();
61866fee034SEric Auger 
61966fee034SEric Auger 	write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
62066fee034SEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
62166fee034SEric Auger 	/* enable counters #0 and #1 */
62266fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
62366fee034SEric Auger 
62466fee034SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
62566fee034SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
626*e0a6e56bSRicardo Koller 	isb();
627*e0a6e56bSRicardo Koller 
62866fee034SEric Auger 	for (i = 0; i < 100; i++)
62966fee034SEric Auger 		write_sysreg(0x1, pmswinc_el0);
63066fee034SEric Auger 
631*e0a6e56bSRicardo Koller 	isb();
63266fee034SEric Auger 	report(!read_sysreg(pmovsclr_el0) && (read_regn_el0(pmevcntr, 1) == 1),
63366fee034SEric Auger 		"no overflow and chain counter incremented after 100 SW_INCR/CHAIN");
63466fee034SEric Auger 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
63566fee034SEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
63666fee034SEric Auger 
63766fee034SEric Auger 	/* 64b SW_INCR and overflow on CHAIN counter*/
63866fee034SEric Auger 	pmu_reset();
63966fee034SEric Auger 
64066fee034SEric Auger 	write_regn_el0(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0);
64166fee034SEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
64266fee034SEric Auger 	write_regn_el0(pmevcntr, 1, ALL_SET);
64366fee034SEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
64466fee034SEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
645*e0a6e56bSRicardo Koller 	isb();
646*e0a6e56bSRicardo Koller 
64766fee034SEric Auger 	for (i = 0; i < 100; i++)
64866fee034SEric Auger 		write_sysreg(0x1, pmswinc_el0);
64966fee034SEric Auger 
650*e0a6e56bSRicardo Koller 	isb();
65166fee034SEric Auger 	report((read_sysreg(pmovsclr_el0) == 0x2) &&
65266fee034SEric Auger 		(read_regn_el0(pmevcntr, 1) == 0) &&
65366fee034SEric Auger 		(read_regn_el0(pmevcntr, 0) == 84),
65466fee034SEric Auger 		"overflow on chain counter and expected values after 100 SW_INCR/CHAIN");
65566fee034SEric Auger 	report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0),
65666fee034SEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
657bb9a5adcSEric Auger }
658bb9a5adcSEric Auger 
659ca42f29aSEric Auger static void test_chain_promotion(void)
660ca42f29aSEric Auger {
661ca42f29aSEric Auger 	uint32_t events[] = {MEM_ACCESS, CHAIN};
662ca42f29aSEric Auger 	void *addr = malloc(PAGE_SIZE);
663ca42f29aSEric Auger 
664ca42f29aSEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
665ca42f29aSEric Auger 		return;
666ca42f29aSEric Auger 
667ca42f29aSEric Auger 	/* Only enable CHAIN counter */
668ca42f29aSEric Auger 	pmu_reset();
669ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
670ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
671ca42f29aSEric Auger 	write_sysreg_s(0x2, PMCNTENSET_EL0);
672ca42f29aSEric Auger 	isb();
673ca42f29aSEric Auger 
674ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
675ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 0),
676ca42f29aSEric Auger 		"chain counter not counting if even counter is disabled");
677ca42f29aSEric Auger 
678ca42f29aSEric Auger 	/* Only enable even counter */
679ca42f29aSEric Auger 	pmu_reset();
680ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
681ca42f29aSEric Auger 	write_sysreg_s(0x1, PMCNTENSET_EL0);
682ca42f29aSEric Auger 	isb();
683ca42f29aSEric Auger 
684ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
685ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1),
686ca42f29aSEric Auger 		"odd counter did not increment on overflow if disabled");
687ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value %ld",
688ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
689ca42f29aSEric Auger 	report_info("CHAIN counter #1 has value %ld",
690ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 1));
691ca42f29aSEric Auger 	report_info("overflow counter %ld", read_sysreg(pmovsclr_el0));
692ca42f29aSEric Auger 
693ca42f29aSEric Auger 	/* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */
694ca42f29aSEric Auger 	pmu_reset();
695ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
696ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
697ca42f29aSEric Auger 	isb();
698ca42f29aSEric Auger 
699ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
700ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
701ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
702ca42f29aSEric Auger 
703ca42f29aSEric Auger 	/* disable the CHAIN event */
704ca42f29aSEric Auger 	write_sysreg_s(0x2, PMCNTENCLR_EL0);
705ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
706ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
707ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
708ca42f29aSEric Auger 	report(read_sysreg(pmovsclr_el0) == 0x1,
709ca42f29aSEric Auger 		"should have triggered an overflow on #0");
710ca42f29aSEric Auger 	report(!read_regn_el0(pmevcntr, 1),
711ca42f29aSEric Auger 		"CHAIN counter #1 shouldn't have incremented");
712ca42f29aSEric Auger 
713ca42f29aSEric Auger 	/* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */
714ca42f29aSEric Auger 
715ca42f29aSEric Auger 	pmu_reset();
716ca42f29aSEric Auger 	write_sysreg_s(0x1, PMCNTENSET_EL0);
717ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
718ca42f29aSEric Auger 	isb();
719ca42f29aSEric Auger 	report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx",
720ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
721ca42f29aSEric Auger 		    read_sysreg(pmovsclr_el0));
722ca42f29aSEric Auger 
723ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
724ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
725ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
726ca42f29aSEric Auger 
727ca42f29aSEric Auger 	/* enable the CHAIN event */
728ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
729ca42f29aSEric Auger 	isb();
730ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
731ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
732ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
733ca42f29aSEric Auger 
734ca42f29aSEric Auger 	report((read_regn_el0(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
735ca42f29aSEric Auger 		"CHAIN counter enabled: CHAIN counter was incremented and no overflow");
736ca42f29aSEric Auger 
737ca42f29aSEric Auger 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
738ca42f29aSEric Auger 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
739ca42f29aSEric Auger 
740ca42f29aSEric Auger 	/* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */
741ca42f29aSEric Auger 	pmu_reset();
742ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
743ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
744ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
745ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
746ca42f29aSEric Auger 	isb();
747ca42f29aSEric Auger 
748ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
749ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
750ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
751ca42f29aSEric Auger 
752ca42f29aSEric Auger 	/* 0 becomes CHAINED */
753ca42f29aSEric Auger 	write_sysreg_s(0x0, PMCNTENSET_EL0);
754ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
755ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
756ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 1, 0x0);
757ca42f29aSEric Auger 
758ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
759ca42f29aSEric Auger 	report_info("MEM_ACCESS counter #0 has value 0x%lx",
760ca42f29aSEric Auger 		    read_regn_el0(pmevcntr, 0));
761ca42f29aSEric Auger 
762ca42f29aSEric Auger 	report((read_regn_el0(pmevcntr, 1) == 1) && !read_sysreg(pmovsclr_el0),
763ca42f29aSEric Auger 		"32b->64b: CHAIN counter incremented and no overflow");
764ca42f29aSEric Auger 
765ca42f29aSEric Auger 	report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx",
766ca42f29aSEric Auger 		read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0));
767ca42f29aSEric Auger 
768ca42f29aSEric Auger 	/* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */
769ca42f29aSEric Auger 	pmu_reset();
770ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
771ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
772ca42f29aSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2);
773ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
774ca42f29aSEric Auger 
775ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
776ca42f29aSEric Auger 	report_info("counter #0=0x%lx, counter #1=0x%lx",
777ca42f29aSEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1));
778ca42f29aSEric Auger 
779ca42f29aSEric Auger 	write_sysreg_s(0x0, PMCNTENSET_EL0);
780ca42f29aSEric Auger 	write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0);
781ca42f29aSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
782ca42f29aSEric Auger 
783ca42f29aSEric Auger 	mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E);
784ca42f29aSEric Auger 	report(read_sysreg(pmovsclr_el0) == 1,
785ca42f29aSEric Auger 		"overflow is expected on counter 0");
786ca42f29aSEric Auger 	report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx",
787ca42f29aSEric Auger 			read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1),
788ca42f29aSEric Auger 			read_sysreg(pmovsclr_el0));
789ca42f29aSEric Auger }
790ca42f29aSEric Auger 
7914f5ef94fSEric Auger static bool expect_interrupts(uint32_t bitmap)
7924f5ef94fSEric Auger {
7934f5ef94fSEric Auger 	int i;
7944f5ef94fSEric Auger 
7954f5ef94fSEric Auger 	if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected)
7964f5ef94fSEric Auger 		return false;
7974f5ef94fSEric Auger 
7984f5ef94fSEric Auger 	for (i = 0; i < 32; i++) {
7994f5ef94fSEric Auger 		if (test_and_clear_bit(i, &pmu_stats.bitmap))
8004f5ef94fSEric Auger 			if (pmu_stats.interrupts[i] != 1)
8014f5ef94fSEric Auger 				return false;
8024f5ef94fSEric Auger 	}
8034f5ef94fSEric Auger 	return true;
8044f5ef94fSEric Auger }
8054f5ef94fSEric Auger 
8064f5ef94fSEric Auger static void test_overflow_interrupt(void)
8074f5ef94fSEric Auger {
8084f5ef94fSEric Auger 	uint32_t events[] = {MEM_ACCESS, SW_INCR};
8094f5ef94fSEric Auger 	void *addr = malloc(PAGE_SIZE);
8104f5ef94fSEric Auger 	int i;
8114f5ef94fSEric Auger 
8124f5ef94fSEric Auger 	if (!satisfy_prerequisites(events, ARRAY_SIZE(events)))
8134f5ef94fSEric Auger 		return;
8144f5ef94fSEric Auger 
8154f5ef94fSEric Auger 	gic_enable_defaults();
8164f5ef94fSEric Auger 	install_irq_handler(EL1H_IRQ, irq_handler);
8174f5ef94fSEric Auger 	local_irq_enable();
8184f5ef94fSEric Auger 	gic_enable_irq(23);
8194f5ef94fSEric Auger 
8204f5ef94fSEric Auger 	pmu_reset();
8214f5ef94fSEric Auger 
8224f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0);
8234f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0);
8244f5ef94fSEric Auger 	write_sysreg_s(0x3, PMCNTENSET_EL0);
8254f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
8264f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
8274f5ef94fSEric Auger 	isb();
8284f5ef94fSEric Auger 
8294f5ef94fSEric Auger 	/* interrupts are disabled */
8304f5ef94fSEric Auger 
8314f5ef94fSEric Auger 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
8324f5ef94fSEric Auger 	report(expect_interrupts(0), "no overflow interrupt after preset");
8334f5ef94fSEric Auger 
8344f5ef94fSEric Auger 	set_pmcr(pmu.pmcr_ro | PMU_PMCR_E);
835*e0a6e56bSRicardo Koller 	isb();
836*e0a6e56bSRicardo Koller 
8374f5ef94fSEric Auger 	for (i = 0; i < 100; i++)
8384f5ef94fSEric Auger 		write_sysreg(0x2, pmswinc_el0);
8394f5ef94fSEric Auger 
840*e0a6e56bSRicardo Koller 	isb();
8414f5ef94fSEric Auger 	set_pmcr(pmu.pmcr_ro);
842*e0a6e56bSRicardo Koller 	isb();
8434f5ef94fSEric Auger 	report(expect_interrupts(0), "no overflow interrupt after counting");
8444f5ef94fSEric Auger 
8454f5ef94fSEric Auger 	/* enable interrupts */
8464f5ef94fSEric Auger 
8474f5ef94fSEric Auger 	pmu_reset_stats();
8484f5ef94fSEric Auger 
8494f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
8504f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 1, PRE_OVERFLOW);
8514f5ef94fSEric Auger 	write_sysreg(ALL_SET, pmintenset_el1);
8524f5ef94fSEric Auger 	isb();
8534f5ef94fSEric Auger 
8544f5ef94fSEric Auger 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
8554f5ef94fSEric Auger 	for (i = 0; i < 100; i++)
8564f5ef94fSEric Auger 		write_sysreg(0x3, pmswinc_el0);
8574f5ef94fSEric Auger 
8584f5ef94fSEric Auger 	mem_access_loop(addr, 200, pmu.pmcr_ro);
8594f5ef94fSEric Auger 	report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0));
8604f5ef94fSEric Auger 	report(expect_interrupts(0x3),
8614f5ef94fSEric Auger 		"overflow interrupts expected on #0 and #1");
8624f5ef94fSEric Auger 
8634f5ef94fSEric Auger 	/* promote to 64-b */
8644f5ef94fSEric Auger 
8654f5ef94fSEric Auger 	pmu_reset_stats();
8664f5ef94fSEric Auger 
8674f5ef94fSEric Auger 	write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0);
8684f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
8694f5ef94fSEric Auger 	isb();
8704f5ef94fSEric Auger 	mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E);
8714f5ef94fSEric Auger 	report(expect_interrupts(0),
8724f5ef94fSEric Auger 		"no overflow interrupt expected on 32b boundary");
8734f5ef94fSEric Auger 
8744f5ef94fSEric Auger 	/* overflow on odd counter */
8754f5ef94fSEric Auger 	pmu_reset_stats();
8764f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 0, PRE_OVERFLOW);
8774f5ef94fSEric Auger 	write_regn_el0(pmevcntr, 1, ALL_SET);
8784f5ef94fSEric Auger 	isb();
8794f5ef94fSEric Auger 	mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E);
8804f5ef94fSEric Auger 	report(expect_interrupts(0x2),
8814f5ef94fSEric Auger 		"expect overflow interrupt on odd counter");
8824f5ef94fSEric Auger }
8834244065bSChristopher Covington #endif
8844244065bSChristopher Covington 
8854244065bSChristopher Covington /*
886d81bb7a3SChristopher Covington  * Ensure that the cycle counter progresses between back-to-back reads.
887d81bb7a3SChristopher Covington  */
888d81bb7a3SChristopher Covington static bool check_cycles_increase(void)
889d81bb7a3SChristopher Covington {
890d81bb7a3SChristopher Covington 	bool success = true;
891d81bb7a3SChristopher Covington 
892d81bb7a3SChristopher Covington 	/* init before event access, this test only cares about cycle count */
893d81bb7a3SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
894d81bb7a3SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
895d81bb7a3SChristopher Covington 
896d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
897*e0a6e56bSRicardo Koller 	isb();
898d81bb7a3SChristopher Covington 
899d81bb7a3SChristopher Covington 	for (int i = 0; i < NR_SAMPLES; i++) {
900d81bb7a3SChristopher Covington 		uint64_t a, b;
901d81bb7a3SChristopher Covington 
902d81bb7a3SChristopher Covington 		a = get_pmccntr();
903d81bb7a3SChristopher Covington 		b = get_pmccntr();
904d81bb7a3SChristopher Covington 
905d81bb7a3SChristopher Covington 		if (a >= b) {
906d81bb7a3SChristopher Covington 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
907d81bb7a3SChristopher Covington 			success = false;
908d81bb7a3SChristopher Covington 			break;
909d81bb7a3SChristopher Covington 		}
910d81bb7a3SChristopher Covington 	}
911d81bb7a3SChristopher Covington 
912d81bb7a3SChristopher Covington 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
913*e0a6e56bSRicardo Koller 	isb();
914d81bb7a3SChristopher Covington 
915d81bb7a3SChristopher Covington 	return success;
916d81bb7a3SChristopher Covington }
917d81bb7a3SChristopher Covington 
9188f76a347SChristopher Covington /*
9198f76a347SChristopher Covington  * Execute a known number of guest instructions. Only even instruction counts
9208f76a347SChristopher Covington  * greater than or equal to 4 are supported by the in-line assembly code. The
9218f76a347SChristopher Covington  * control register (PMCR_EL0) is initialized with the provided value (allowing
9228f76a347SChristopher Covington  * for example for the cycle counter or event counters to be reset). At the end
9238f76a347SChristopher Covington  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
9248f76a347SChristopher Covington  * counting, allowing the cycle counter or event counters to be read at the
9258f76a347SChristopher Covington  * leisure of the calling code.
9268f76a347SChristopher Covington  */
9278f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr)
9288f76a347SChristopher Covington {
9298f76a347SChristopher Covington 	int loop = (num - 2) / 2;
9308f76a347SChristopher Covington 
9318f76a347SChristopher Covington 	assert(num >= 4 && ((num - 2) % 2 == 0));
9328f76a347SChristopher Covington 	precise_instrs_loop(loop, pmcr);
9338f76a347SChristopher Covington }
9348f76a347SChristopher Covington 
9358f76a347SChristopher Covington /*
9368f76a347SChristopher Covington  * Measure cycle counts for various known instruction counts. Ensure that the
9378f76a347SChristopher Covington  * cycle counter progresses (similar to check_cycles_increase() but with more
9388f76a347SChristopher Covington  * instructions and using reset and stop controls). If supplied a positive,
9398f76a347SChristopher Covington  * nonzero CPI parameter, it also strictly checks that every measurement matches
9408f76a347SChristopher Covington  * it. Strict CPI checking is used to test -icount mode.
9418f76a347SChristopher Covington  */
9428f76a347SChristopher Covington static bool check_cpi(int cpi)
9438f76a347SChristopher Covington {
9448f76a347SChristopher Covington 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
9458f76a347SChristopher Covington 
9468f76a347SChristopher Covington 	/* init before event access, this test only cares about cycle count */
9478f76a347SChristopher Covington 	set_pmcntenset(1 << PMU_CYCLE_IDX);
9488f76a347SChristopher Covington 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
9498f76a347SChristopher Covington 
9508f76a347SChristopher Covington 	if (cpi > 0)
9518f76a347SChristopher Covington 		printf("Checking for CPI=%d.\n", cpi);
9528f76a347SChristopher Covington 	printf("instrs : cycles0 cycles1 ...\n");
9538f76a347SChristopher Covington 
9548f76a347SChristopher Covington 	for (unsigned int i = 4; i < 300; i += 32) {
9558f76a347SChristopher Covington 		uint64_t avg, sum = 0;
9568f76a347SChristopher Covington 
9578f76a347SChristopher Covington 		printf("%4d:", i);
9588f76a347SChristopher Covington 		for (int j = 0; j < NR_SAMPLES; j++) {
9598f76a347SChristopher Covington 			uint64_t cycles;
9608f76a347SChristopher Covington 
9618f76a347SChristopher Covington 			set_pmccntr(0);
9628f76a347SChristopher Covington 			measure_instrs(i, pmcr);
9638f76a347SChristopher Covington 			cycles = get_pmccntr();
9648f76a347SChristopher Covington 			printf(" %4"PRId64"", cycles);
9658f76a347SChristopher Covington 
9668f76a347SChristopher Covington 			if (!cycles) {
9678f76a347SChristopher Covington 				printf("\ncycles not incrementing!\n");
9688f76a347SChristopher Covington 				return false;
9698f76a347SChristopher Covington 			} else if (cpi > 0 && cycles != i * cpi) {
9708f76a347SChristopher Covington 				printf("\nunexpected cycle count received!\n");
9718f76a347SChristopher Covington 				return false;
9728f76a347SChristopher Covington 			} else if ((cycles >> 32) != 0) {
9738f76a347SChristopher Covington 				/* The cycles taken by the loop above should
9748f76a347SChristopher Covington 				 * fit in 32 bits easily. We check the upper
9758f76a347SChristopher Covington 				 * 32 bits of the cycle counter to make sure
9768f76a347SChristopher Covington 				 * there is no supprise. */
9778f76a347SChristopher Covington 				printf("\ncycle count bigger than 32bit!\n");
9788f76a347SChristopher Covington 				return false;
9798f76a347SChristopher Covington 			}
9808f76a347SChristopher Covington 
9818f76a347SChristopher Covington 			sum += cycles;
9828f76a347SChristopher Covington 		}
9838f76a347SChristopher Covington 		avg = sum / NR_SAMPLES;
9848f76a347SChristopher Covington 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
9858f76a347SChristopher Covington 		       (avg >= i) ? "cpi" : "ipc",
9868f76a347SChristopher Covington 		       (avg >= i) ? avg / i : i / avg);
9878f76a347SChristopher Covington 	}
9888f76a347SChristopher Covington 
9898f76a347SChristopher Covington 	return true;
9908f76a347SChristopher Covington }
9918f76a347SChristopher Covington 
9924c357610SAndrew Jones static void pmccntr64_test(void)
9934c357610SAndrew Jones {
9944c357610SAndrew Jones #ifdef __arm__
995784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_V3) {
9964c357610SAndrew Jones 		if (ERRATA(9e3f7a296940)) {
9974c357610SAndrew Jones 			write_sysreg(0xdead, PMCCNTR64);
998a299895bSThomas Huth 			report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64");
9994c357610SAndrew Jones 		} else
10004c357610SAndrew Jones 			report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable.");
10014c357610SAndrew Jones 	}
10024c357610SAndrew Jones #endif
10034c357610SAndrew Jones }
10044c357610SAndrew Jones 
10054244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */
100623b8916bSThomas Huth static bool pmu_probe(void)
10074244065bSChristopher Covington {
10081e4f5392SAlexandru Elisei 	uint32_t pmcr;
100946ca10f4SAlexandru Elisei 	uint8_t implementer;
1010eff8f161SEric Auger 
10118f747a85SEric Auger 	pmu.version = get_pmu_version();
1012784ee933SEric Auger 	if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF)
1013eff8f161SEric Auger 		return false;
1014eff8f161SEric Auger 
1015784ee933SEric Auger 	report_info("PMU version: 0x%x", pmu.version);
1016eff8f161SEric Auger 
10171e4f5392SAlexandru Elisei 	pmcr = get_pmcr();
101846ca10f4SAlexandru Elisei 	implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK;
101946ca10f4SAlexandru Elisei 	report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32,
1020eff8f161SEric Auger 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
102146ca10f4SAlexandru Elisei 		    implementer ? implementer : ' ',
10228f747a85SEric Auger 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK);
10238f747a85SEric Auger 
10248f747a85SEric Auger 	/* store read-only and RES0 fields of the PMCR bottom-half*/
10258f747a85SEric Auger 	pmu.pmcr_ro = pmcr & 0xFFFFFF00;
10268f747a85SEric Auger 	pmu.nb_implemented_counters =
10278f747a85SEric Auger 		(pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK;
10288f747a85SEric Auger 	report_info("Implements %d event counters",
10298f747a85SEric Auger 		    pmu.nb_implemented_counters);
1030eff8f161SEric Auger 
1031eff8f161SEric Auger 	return true;
10324244065bSChristopher Covington }
10334244065bSChristopher Covington 
10348f76a347SChristopher Covington int main(int argc, char *argv[])
10354244065bSChristopher Covington {
10368f76a347SChristopher Covington 	int cpi = 0;
10378f76a347SChristopher Covington 
10384244065bSChristopher Covington 	if (!pmu_probe()) {
10394244065bSChristopher Covington 		printf("No PMU found, test skipped...\n");
10404244065bSChristopher Covington 		return report_summary();
10414244065bSChristopher Covington 	}
10424244065bSChristopher Covington 
104357ec1086SEric Auger 	if (argc < 2)
104457ec1086SEric Auger 		report_abort("no test specified");
104557ec1086SEric Auger 
10464244065bSChristopher Covington 	report_prefix_push("pmu");
10474244065bSChristopher Covington 
104857ec1086SEric Auger 	if (strcmp(argv[1], "cycle-counter") == 0) {
104957ec1086SEric Auger 		report_prefix_push(argv[1]);
105057ec1086SEric Auger 		if (argc > 2)
105157ec1086SEric Auger 			cpi = atol(argv[2]);
1052a299895bSThomas Huth 		report(check_cycles_increase(),
1053a299895bSThomas Huth 		       "Monotonically increasing cycle count");
1054a299895bSThomas Huth 		report(check_cpi(cpi), "Cycle/instruction ratio");
10554c357610SAndrew Jones 		pmccntr64_test();
105657ec1086SEric Auger 		report_prefix_pop();
10574870738cSEric Auger 	} else if (strcmp(argv[1], "pmu-event-introspection") == 0) {
10584870738cSEric Auger 		report_prefix_push(argv[1]);
10594870738cSEric Auger 		test_event_introspection();
10604870738cSEric Auger 		report_prefix_pop();
10614ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-event-counter-config") == 0) {
10624ce2a804SEric Auger 		report_prefix_push(argv[1]);
10634ce2a804SEric Auger 		test_event_counter_config();
10644ce2a804SEric Auger 		report_prefix_pop();
10654ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-basic-event-count") == 0) {
10664ce2a804SEric Auger 		report_prefix_push(argv[1]);
10674ce2a804SEric Auger 		test_basic_event_count();
10684ce2a804SEric Auger 		report_prefix_pop();
10694ce2a804SEric Auger 	} else if (strcmp(argv[1], "pmu-mem-access") == 0) {
10704ce2a804SEric Auger 		report_prefix_push(argv[1]);
10714ce2a804SEric Auger 		test_mem_access();
10724ce2a804SEric Auger 		report_prefix_pop();
1073bb9a5adcSEric Auger 	} else if (strcmp(argv[1], "pmu-sw-incr") == 0) {
1074bb9a5adcSEric Auger 		report_prefix_push(argv[1]);
1075bb9a5adcSEric Auger 		test_sw_incr();
1076bb9a5adcSEric Auger 		report_prefix_pop();
107766fee034SEric Auger 	} else if (strcmp(argv[1], "pmu-chained-counters") == 0) {
107866fee034SEric Auger 		report_prefix_push(argv[1]);
107966fee034SEric Auger 		test_chained_counters();
108066fee034SEric Auger 		report_prefix_pop();
108166fee034SEric Auger 	} else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) {
108266fee034SEric Auger 		report_prefix_push(argv[1]);
108366fee034SEric Auger 		test_chained_sw_incr();
108466fee034SEric Auger 		report_prefix_pop();
1085ca42f29aSEric Auger 	} else if (strcmp(argv[1], "pmu-chain-promotion") == 0) {
1086ca42f29aSEric Auger 		report_prefix_push(argv[1]);
1087ca42f29aSEric Auger 		test_chain_promotion();
1088ca42f29aSEric Auger 		report_prefix_pop();
10894f5ef94fSEric Auger 	} else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) {
10904f5ef94fSEric Auger 		report_prefix_push(argv[1]);
10914f5ef94fSEric Auger 		test_overflow_interrupt();
10924f5ef94fSEric Auger 		report_prefix_pop();
109357ec1086SEric Auger 	} else {
109457ec1086SEric Auger 		report_abort("Unknown sub-test '%s'", argv[1]);
109557ec1086SEric Auger 	}
10964c357610SAndrew Jones 
10974244065bSChristopher Covington 	return report_summary();
10984244065bSChristopher Covington }
1099