14244065bSChristopher Covington /* 24244065bSChristopher Covington * Test the ARM Performance Monitors Unit (PMU). 34244065bSChristopher Covington * 44244065bSChristopher Covington * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 54244065bSChristopher Covington * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com> 64244065bSChristopher Covington * 74244065bSChristopher Covington * This program is free software; you can redistribute it and/or modify it 84244065bSChristopher Covington * under the terms of the GNU Lesser General Public License version 2.1 and 94244065bSChristopher Covington * only version 2.1 as published by the Free Software Foundation. 104244065bSChristopher Covington * 114244065bSChristopher Covington * This program is distributed in the hope that it will be useful, but WITHOUT 124244065bSChristopher Covington * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 134244065bSChristopher Covington * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 144244065bSChristopher Covington * for more details. 154244065bSChristopher Covington */ 164244065bSChristopher Covington #include "libcflat.h" 174c357610SAndrew Jones #include "errata.h" 184244065bSChristopher Covington #include "asm/barrier.h" 194244065bSChristopher Covington #include "asm/sysreg.h" 204244065bSChristopher Covington #include "asm/processor.h" 214870738cSEric Auger #include <bitops.h> 224ce2a804SEric Auger #include <asm/gic.h> 234244065bSChristopher Covington 24d81bb7a3SChristopher Covington #define PMU_PMCR_E (1 << 0) 254ce2a804SEric Auger #define PMU_PMCR_P (1 << 1) 26d81bb7a3SChristopher Covington #define PMU_PMCR_C (1 << 2) 274ce2a804SEric Auger #define PMU_PMCR_D (1 << 3) 284ce2a804SEric Auger #define PMU_PMCR_X (1 << 4) 294ce2a804SEric Auger #define PMU_PMCR_DP (1 << 5) 30d81bb7a3SChristopher Covington #define PMU_PMCR_LC (1 << 6) 31036369c5SRicardo Koller #define PMU_PMCR_LP (1 << 7) 324244065bSChristopher Covington #define PMU_PMCR_N_SHIFT 11 334244065bSChristopher Covington #define PMU_PMCR_N_MASK 0x1f 344244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT 16 354244065bSChristopher Covington #define PMU_PMCR_ID_MASK 0xff 364244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24 374244065bSChristopher Covington #define PMU_PMCR_IMP_MASK 0xff 384244065bSChristopher Covington 39d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX 31 40d81bb7a3SChristopher Covington 41d81bb7a3SChristopher Covington #define NR_SAMPLES 10 42d81bb7a3SChristopher Covington 434870738cSEric Auger /* Some PMU events */ 444870738cSEric Auger #define SW_INCR 0x0 454870738cSEric Auger #define INST_RETIRED 0x8 464870738cSEric Auger #define CPU_CYCLES 0x11 474ce2a804SEric Auger #define MEM_ACCESS 0x13 484870738cSEric Auger #define INST_PREC 0x1B 494870738cSEric Auger #define STALL_FRONTEND 0x23 504870738cSEric Auger #define STALL_BACKEND 0x24 5166fee034SEric Auger #define CHAIN 0x1E 524870738cSEric Auger 534870738cSEric Auger #define COMMON_EVENTS_LOW 0x0 544870738cSEric Auger #define COMMON_EVENTS_HIGH 0x3F 554870738cSEric Auger #define EXT_COMMON_EVENTS_LOW 0x4000 564870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH 0x403F 574870738cSEric Auger 587d1f853aSRicardo Koller #define ALL_SET_32 0x00000000FFFFFFFFULL 593c23bf40SEric Auger #define ALL_SET_64 0xFFFFFFFFFFFFFFFFULL 603c23bf40SEric Auger 613c23bf40SEric Auger #define ALL_SET(__overflow_at_64bits) \ 623c23bf40SEric Auger (__overflow_at_64bits ? ALL_SET_64 : ALL_SET_32) 633c23bf40SEric Auger 6439d1347aSRicardo Koller #define ALL_CLEAR 0x0000000000000000ULL 657d1f853aSRicardo Koller #define PRE_OVERFLOW_32 0x00000000FFFFFFF0ULL 66036369c5SRicardo Koller #define PRE_OVERFLOW_64 0xFFFFFFFFFFFFFFF0ULL 673c125accSEric Auger #define COUNT 250 683c125accSEric Auger #define MARGIN 100 6991806724SEric Auger /* 7091806724SEric Auger * PRE_OVERFLOW2 is set so that 1st @COUNT iterations do not 7191806724SEric Auger * produce 32b overflow and 2nd @COUNT iterations do. To accommodate 7291806724SEric Auger * for some observed variability we take into account a given @MARGIN 7391806724SEric Auger */ 7491806724SEric Auger #define PRE_OVERFLOW2_32 (ALL_SET_32 - COUNT - MARGIN) 753c23bf40SEric Auger #define PRE_OVERFLOW2_64 (ALL_SET_64 - COUNT - MARGIN) 763c23bf40SEric Auger 773c23bf40SEric Auger #define PRE_OVERFLOW2(__overflow_at_64bits) \ 783c23bf40SEric Auger (__overflow_at_64bits ? PRE_OVERFLOW2_64 : PRE_OVERFLOW2_32) 79036369c5SRicardo Koller 80036369c5SRicardo Koller #define PRE_OVERFLOW(__overflow_at_64bits) \ 81036369c5SRicardo Koller (__overflow_at_64bits ? PRE_OVERFLOW_64 : PRE_OVERFLOW_32) 824ce2a804SEric Auger 834f5ef94fSEric Auger #define PMU_PPI 23 844f5ef94fSEric Auger 858f747a85SEric Auger struct pmu { 868f747a85SEric Auger unsigned int version; 878f747a85SEric Auger unsigned int nb_implemented_counters; 888f747a85SEric Auger uint32_t pmcr_ro; 898f747a85SEric Auger }; 908f747a85SEric Auger 914f5ef94fSEric Auger struct pmu_stats { 924f5ef94fSEric Auger unsigned long bitmap; 934f5ef94fSEric Auger uint32_t interrupts[32]; 944f5ef94fSEric Auger bool unexpected; 954f5ef94fSEric Auger }; 964f5ef94fSEric Auger 978f747a85SEric Auger static struct pmu pmu; 988f747a85SEric Auger 994244065bSChristopher Covington #if defined(__arm__) 100098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24 101098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK 0xf 102098add54SAndrew Jones 103784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL 0b0000 104784ee933SEric Auger #define ID_DFR0_PMU_V1 0b0001 105784ee933SEric Auger #define ID_DFR0_PMU_V2 0b0010 106784ee933SEric Auger #define ID_DFR0_PMU_V3 0b0011 107784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1 0b0100 108784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4 0b0101 109784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5 0b0110 110784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF 0b1111 111784ee933SEric Auger 1124244065bSChristopher Covington #define PMCR __ACCESS_CP15(c9, 0, c12, 0) 1134244065bSChristopher Covington #define ID_DFR0 __ACCESS_CP15(c0, 0, c1, 2) 114d81bb7a3SChristopher Covington #define PMSELR __ACCESS_CP15(c9, 0, c12, 5) 115d81bb7a3SChristopher Covington #define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1) 116d81bb7a3SChristopher Covington #define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1) 117a7326740SRicardo Koller #define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2) 118a7326740SRicardo Koller #define PMOVSR __ACCESS_CP15(c9, 0, c12, 3) 119d81bb7a3SChristopher Covington #define PMCCNTR32 __ACCESS_CP15(c9, 0, c13, 0) 120a7326740SRicardo Koller #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) 121d81bb7a3SChristopher Covington #define PMCCNTR64 __ACCESS_CP15_64(0, c9) 1224244065bSChristopher Covington 1234244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); } 1244244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); } 125d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); } 126d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); } 127d81bb7a3SChristopher Covington 128098add54SAndrew Jones static inline uint8_t get_pmu_version(void) 129098add54SAndrew Jones { 130098add54SAndrew Jones return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK; 131098add54SAndrew Jones } 132098add54SAndrew Jones 133d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) 134d81bb7a3SChristopher Covington { 135d81bb7a3SChristopher Covington return read_sysreg(PMCCNTR32); 136d81bb7a3SChristopher Covington } 137d81bb7a3SChristopher Covington 1388f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value) 1398f76a347SChristopher Covington { 1408f76a347SChristopher Covington write_sysreg(value & 0xffffffff, PMCCNTR32); 1418f76a347SChristopher Covington } 1428f76a347SChristopher Covington 143d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */ 144d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value) 145d81bb7a3SChristopher Covington { 146d81bb7a3SChristopher Covington write_sysreg(PMU_CYCLE_IDX, PMSELR); 147d81bb7a3SChristopher Covington write_sysreg(value, PMXEVTYPER); 148d81bb7a3SChristopher Covington isb(); 149d81bb7a3SChristopher Covington } 1508f76a347SChristopher Covington 1518f76a347SChristopher Covington /* 1528f76a347SChristopher Covington * Extra instructions inserted by the compiler would be difficult to compensate 1538f76a347SChristopher Covington * for, so hand assemble everything between, and including, the PMCR accesses 1548f76a347SChristopher Covington * to start and stop counting. isb instructions were inserted to make sure 1558f76a347SChristopher Covington * pmccntr read after this function returns the exact instructions executed in 1568f76a347SChristopher Covington * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop. 1578f76a347SChristopher Covington */ 1588f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr) 1598f76a347SChristopher Covington { 1608f76a347SChristopher Covington asm volatile( 1618f76a347SChristopher Covington " mcr p15, 0, %[pmcr], c9, c12, 0\n" 1628f76a347SChristopher Covington " isb\n" 1638f76a347SChristopher Covington "1: subs %[loop], %[loop], #1\n" 1648f76a347SChristopher Covington " bgt 1b\n" 1658f76a347SChristopher Covington " mcr p15, 0, %[z], c9, c12, 0\n" 1668f76a347SChristopher Covington " isb\n" 1678f76a347SChristopher Covington : [loop] "+r" (loop) 1688f76a347SChristopher Covington : [pmcr] "r" (pmcr), [z] "r" (0) 1698f76a347SChristopher Covington : "cc"); 1708f76a347SChristopher Covington } 1714870738cSEric Auger 172a7326740SRicardo Koller static void pmu_reset(void) 173a7326740SRicardo Koller { 174a7326740SRicardo Koller /* reset all counters, counting disabled at PMCR level*/ 175a7326740SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P); 176a7326740SRicardo Koller /* Disable all counters */ 1777d1f853aSRicardo Koller write_sysreg(ALL_SET_32, PMCNTENCLR); 178a7326740SRicardo Koller /* clear overflow reg */ 1797d1f853aSRicardo Koller write_sysreg(ALL_SET_32, PMOVSR); 180a7326740SRicardo Koller /* disable overflow interrupts on all counters */ 1817d1f853aSRicardo Koller write_sysreg(ALL_SET_32, PMINTENCLR); 182a7326740SRicardo Koller isb(); 183a7326740SRicardo Koller } 184a7326740SRicardo Koller 1854870738cSEric Auger /* event counter tests only implemented for aarch64 */ 1864870738cSEric Auger static void test_event_introspection(void) {} 1874ce2a804SEric Auger static void test_event_counter_config(void) {} 188041df25bSRicardo Koller static void test_basic_event_count(bool overflow_at_64bits) {} 1893c23bf40SEric Auger static void test_mem_access_reliability(bool overflow_at_64bits) {} 190041df25bSRicardo Koller static void test_mem_access(bool overflow_at_64bits) {} 191041df25bSRicardo Koller static void test_sw_incr(bool overflow_at_64bits) {} 192041df25bSRicardo Koller static void test_chained_counters(bool unused) {} 193041df25bSRicardo Koller static void test_chained_sw_incr(bool unused) {} 194041df25bSRicardo Koller static void test_chain_promotion(bool unused) {} 195041df25bSRicardo Koller static void test_overflow_interrupt(bool overflow_at_64bits) {} 1964870738cSEric Auger 1974244065bSChristopher Covington #elif defined(__aarch64__) 198098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8 199098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK 0xf 200098add54SAndrew Jones 201784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL 0b0000 202784ee933SEric Auger #define ID_DFR0_PMU_V3 0b0001 203784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1 0b0100 204784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4 0b0101 205784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5 0b0110 206784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF 0b1111 207784ee933SEric Auger 208098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); } 2094244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); } 210d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); } 211d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); } 2128f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); } 213d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); } 214d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); } 2158f76a347SChristopher Covington 216098add54SAndrew Jones static inline uint8_t get_pmu_version(void) 217098add54SAndrew Jones { 218098add54SAndrew Jones uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK; 219784ee933SEric Auger return ver; 220098add54SAndrew Jones } 221098add54SAndrew Jones 2228f76a347SChristopher Covington /* 2238f76a347SChristopher Covington * Extra instructions inserted by the compiler would be difficult to compensate 2248f76a347SChristopher Covington * for, so hand assemble everything between, and including, the PMCR accesses 2258f76a347SChristopher Covington * to start and stop counting. isb instructions are inserted to make sure 2268f76a347SChristopher Covington * pmccntr read after this function returns the exact instructions executed 2278f76a347SChristopher Covington * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop. 2288f76a347SChristopher Covington */ 2298f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr) 2308f76a347SChristopher Covington { 2319e186511SThomas Huth uint64_t pmcr64 = pmcr; 2328f76a347SChristopher Covington asm volatile( 2338f76a347SChristopher Covington " msr pmcr_el0, %[pmcr]\n" 2348f76a347SChristopher Covington " isb\n" 2359e186511SThomas Huth "1: subs %w[loop], %w[loop], #1\n" 2368f76a347SChristopher Covington " b.gt 1b\n" 2378f76a347SChristopher Covington " msr pmcr_el0, xzr\n" 2388f76a347SChristopher Covington " isb\n" 2398f76a347SChristopher Covington : [loop] "+r" (loop) 2409e186511SThomas Huth : [pmcr] "r" (pmcr64) 2418f76a347SChristopher Covington : "cc"); 2428f76a347SChristopher Covington } 2434870738cSEric Auger 2444870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) 2454ce2a804SEric Auger #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) 2464ce2a804SEric Auger #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) 2474ce2a804SEric Auger 2484ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL1 BIT(31) 2494ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL0 BIT(30) 2504870738cSEric Auger 2514870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn) 2524870738cSEric Auger { 2534870738cSEric Auger uint64_t pmceid0 = read_sysreg(pmceid0_el0); 2544870738cSEric Auger uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0); 2554870738cSEric Auger bool supported; 2564870738cSEric Auger uint64_t reg; 2574870738cSEric Auger 2584870738cSEric Auger /* 2594870738cSEric Auger * The low 32-bits of PMCEID0/1 respectively describe 2604870738cSEric Auger * event support for events 0-31/32-63. Their High 2614870738cSEric Auger * 32-bits describe support for extended events 2624870738cSEric Auger * starting at 0x4000, using the same split. 2634870738cSEric Auger */ 2644870738cSEric Auger assert((n >= COMMON_EVENTS_LOW && n <= COMMON_EVENTS_HIGH) || 2654870738cSEric Auger (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH)); 2664870738cSEric Auger 2674870738cSEric Auger if (n <= COMMON_EVENTS_HIGH) 2684870738cSEric Auger reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32); 2694870738cSEric Auger else 2704870738cSEric Auger reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32); 2714870738cSEric Auger 2724870738cSEric Auger supported = reg & (1UL << (n & 0x3F)); 2734870738cSEric Auger 2744870738cSEric Auger if (!supported && warn) 2754870738cSEric Auger report_info("event 0x%x is not supported", n); 2764870738cSEric Auger return supported; 2774870738cSEric Auger } 2784870738cSEric Auger 2794870738cSEric Auger static void test_event_introspection(void) 2804870738cSEric Auger { 2814870738cSEric Auger bool required_events; 2824870738cSEric Auger 2834870738cSEric Auger if (!pmu.nb_implemented_counters) { 2844870738cSEric Auger report_skip("No event counter, skip ..."); 2854870738cSEric Auger return; 2864870738cSEric Auger } 2874870738cSEric Auger 2884870738cSEric Auger /* PMUv3 requires an implementation includes some common events */ 2894870738cSEric Auger required_events = is_event_supported(SW_INCR, true) && 2904870738cSEric Auger is_event_supported(CPU_CYCLES, true) && 2914870738cSEric Auger (is_event_supported(INST_RETIRED, true) || 2924870738cSEric Auger is_event_supported(INST_PREC, true)); 2934870738cSEric Auger 2944870738cSEric Auger if (pmu.version >= ID_DFR0_PMU_V3_8_1) { 2954870738cSEric Auger required_events = required_events && 2964870738cSEric Auger is_event_supported(STALL_FRONTEND, true) && 2974870738cSEric Auger is_event_supported(STALL_BACKEND, true); 2984870738cSEric Auger } 2994870738cSEric Auger 3004870738cSEric Auger report(required_events, "Check required events are implemented"); 3014870738cSEric Auger } 3024870738cSEric Auger 3034ce2a804SEric Auger /* 3044ce2a804SEric Auger * Extra instructions inserted by the compiler would be difficult to compensate 3054ce2a804SEric Auger * for, so hand assemble everything between, and including, the PMCR accesses 3064ce2a804SEric Auger * to start and stop counting. isb instructions are inserted to make sure 3074ce2a804SEric Auger * pmccntr read after this function returns the exact instructions executed 3084ce2a804SEric Auger * in the controlled block. Loads @loop times the data at @address into x9. 3094ce2a804SEric Auger */ 3109e186511SThomas Huth static void mem_access_loop(void *addr, long loop, uint32_t pmcr) 3114ce2a804SEric Auger { 3129e186511SThomas Huth uint64_t pmcr64 = pmcr; 3134ce2a804SEric Auger asm volatile( 3149f2f6819SEric Auger " dsb ish\n" 3154ce2a804SEric Auger " msr pmcr_el0, %[pmcr]\n" 3164ce2a804SEric Auger " isb\n" 3179f2f6819SEric Auger " dsb ish\n" 3184ce2a804SEric Auger " mov x10, %[loop]\n" 3194ce2a804SEric Auger "1: sub x10, x10, #1\n" 3204ce2a804SEric Auger " ldr x9, [%[addr]]\n" 3214ce2a804SEric Auger " cmp x10, #0x0\n" 3224ce2a804SEric Auger " b.gt 1b\n" 3239f2f6819SEric Auger " dsb ish\n" 3244ce2a804SEric Auger " msr pmcr_el0, xzr\n" 3254ce2a804SEric Auger " isb\n" 3264ce2a804SEric Auger : 3279e186511SThomas Huth : [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop) 3284ce2a804SEric Auger : "x9", "x10", "cc"); 3294ce2a804SEric Auger } 3304ce2a804SEric Auger 331*60f5b29eSEric Auger static volatile struct pmu_stats pmu_stats; 3324f5ef94fSEric Auger 3334f5ef94fSEric Auger static void irq_handler(struct pt_regs *regs) 3344f5ef94fSEric Auger { 3354f5ef94fSEric Auger uint32_t irqstat, irqnr; 3364f5ef94fSEric Auger 3374f5ef94fSEric Auger irqstat = gic_read_iar(); 3384f5ef94fSEric Auger irqnr = gic_iar_irqnr(irqstat); 3394f5ef94fSEric Auger 3404f5ef94fSEric Auger if (irqnr == PMU_PPI) { 3414f5ef94fSEric Auger unsigned long overflows = read_sysreg(pmovsclr_el0); 3424f5ef94fSEric Auger int i; 3434f5ef94fSEric Auger 3444f5ef94fSEric Auger for (i = 0; i < 32; i++) { 3454f5ef94fSEric Auger if (test_and_clear_bit(i, &overflows)) { 3464f5ef94fSEric Auger pmu_stats.interrupts[i]++; 3474f5ef94fSEric Auger pmu_stats.bitmap |= 1 << i; 3484f5ef94fSEric Auger } 3494f5ef94fSEric Auger } 3507d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmovsclr_el0); 351e0a6e56bSRicardo Koller isb(); 3524f5ef94fSEric Auger } else { 3534f5ef94fSEric Auger pmu_stats.unexpected = true; 3544f5ef94fSEric Auger } 3554f5ef94fSEric Auger gic_write_eoir(irqstat); 3564f5ef94fSEric Auger } 3574f5ef94fSEric Auger 3584f5ef94fSEric Auger static void pmu_reset_stats(void) 3594f5ef94fSEric Auger { 3604f5ef94fSEric Auger int i; 3614f5ef94fSEric Auger 3624f5ef94fSEric Auger for (i = 0; i < 32; i++) 3634f5ef94fSEric Auger pmu_stats.interrupts[i] = 0; 3644f5ef94fSEric Auger 3654f5ef94fSEric Auger pmu_stats.bitmap = 0; 3664f5ef94fSEric Auger pmu_stats.unexpected = false; 3674f5ef94fSEric Auger } 3684f5ef94fSEric Auger 3694ce2a804SEric Auger static void pmu_reset(void) 3704ce2a804SEric Auger { 3714ce2a804SEric Auger /* reset all counters, counting disabled at PMCR level*/ 3724ce2a804SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P); 3734ce2a804SEric Auger /* Disable all counters */ 3747d1f853aSRicardo Koller write_sysreg_s(ALL_SET_32, PMCNTENCLR_EL0); 3754ce2a804SEric Auger /* clear overflow reg */ 3767d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmovsclr_el0); 3774ce2a804SEric Auger /* disable overflow interrupts on all counters */ 3787d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmintenclr_el1); 3794f5ef94fSEric Auger pmu_reset_stats(); 3804ce2a804SEric Auger isb(); 3814ce2a804SEric Auger } 3824ce2a804SEric Auger 3834ce2a804SEric Auger static void test_event_counter_config(void) 3844ce2a804SEric Auger { 3854ce2a804SEric Auger int i; 3864ce2a804SEric Auger 3874ce2a804SEric Auger if (!pmu.nb_implemented_counters) { 3884ce2a804SEric Auger report_skip("No event counter, skip ..."); 3894ce2a804SEric Auger return; 3904ce2a804SEric Auger } 3914ce2a804SEric Auger 3924ce2a804SEric Auger pmu_reset(); 3934ce2a804SEric Auger 3944ce2a804SEric Auger /* 3954ce2a804SEric Auger * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read, 3964ce2a804SEric Auger * select counter 0 3974ce2a804SEric Auger */ 3984ce2a804SEric Auger write_sysreg(1, PMSELR_EL0); 3994ce2a804SEric Auger /* program this counter to count unsupported event */ 4004ce2a804SEric Auger write_sysreg(0xEA, PMXEVTYPER_EL0); 4014ce2a804SEric Auger write_sysreg(0xdeadbeef, PMXEVCNTR_EL0); 4024ce2a804SEric Auger report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA, 4034ce2a804SEric Auger "PMESELR/PMXEVTYPER/PMEVTYPERn"); 4044ce2a804SEric Auger report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef), 4054ce2a804SEric Auger "PMESELR/PMXEVCNTR/PMEVCNTRn"); 4064ce2a804SEric Auger 4074ce2a804SEric Auger /* try to configure an unsupported event within the range [0x0, 0x3F] */ 4084ce2a804SEric Auger for (i = 0; i <= 0x3F; i++) { 4094ce2a804SEric Auger if (!is_event_supported(i, false)) 4104ce2a804SEric Auger break; 4114ce2a804SEric Auger } 4124ce2a804SEric Auger if (i > 0x3F) { 4134ce2a804SEric Auger report_skip("pmevtyper: all events within [0x0, 0x3F] are supported"); 4144ce2a804SEric Auger return; 4154ce2a804SEric Auger } 4164ce2a804SEric Auger 4174ce2a804SEric Auger /* select counter 0 */ 4184ce2a804SEric Auger write_sysreg(0, PMSELR_EL0); 4194ce2a804SEric Auger /* program this counter to count unsupported event */ 4204ce2a804SEric Auger write_sysreg(i, PMXEVCNTR_EL0); 4214ce2a804SEric Auger /* read the counter value */ 4224ce2a804SEric Auger read_sysreg(PMXEVCNTR_EL0); 4234ce2a804SEric Auger report(read_sysreg(PMXEVCNTR_EL0) == i, 4244ce2a804SEric Auger "read of a counter programmed with unsupported event"); 4254ce2a804SEric Auger } 4264ce2a804SEric Auger 4274ce2a804SEric Auger static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events) 4284ce2a804SEric Auger { 4294ce2a804SEric Auger int i; 4304ce2a804SEric Auger 4314ce2a804SEric Auger if (pmu.nb_implemented_counters < nb_events) { 4324ce2a804SEric Auger report_skip("Skip test as number of counters is too small (%d)", 4334ce2a804SEric Auger pmu.nb_implemented_counters); 4344ce2a804SEric Auger return false; 4354ce2a804SEric Auger } 4364ce2a804SEric Auger 4374ce2a804SEric Auger for (i = 0; i < nb_events; i++) { 4384ce2a804SEric Auger if (!is_event_supported(events[i], false)) { 4394ce2a804SEric Auger report_skip("Skip test as event 0x%x is not supported", 4404ce2a804SEric Auger events[i]); 4414ce2a804SEric Auger return false; 4424ce2a804SEric Auger } 4434ce2a804SEric Auger } 4444ce2a804SEric Auger return true; 4454ce2a804SEric Auger } 4464ce2a804SEric Auger 44739d1347aSRicardo Koller static uint64_t pmevcntr_mask(void) 44839d1347aSRicardo Koller { 44939d1347aSRicardo Koller /* 45039d1347aSRicardo Koller * Bits [63:0] are always incremented for 64-bit counters, 45139d1347aSRicardo Koller * even if the PMU is configured to generate an overflow at 45239d1347aSRicardo Koller * bits [31:0] 45339d1347aSRicardo Koller * 45439d1347aSRicardo Koller * For more details see the AArch64.IncrementEventCounter() 45539d1347aSRicardo Koller * pseudo-code in the ARM ARM DDI 0487I.a, section J1.1.1. 45639d1347aSRicardo Koller */ 45739d1347aSRicardo Koller if (pmu.version >= ID_DFR0_PMU_V3_8_5) 45839d1347aSRicardo Koller return ~0; 45939d1347aSRicardo Koller 46039d1347aSRicardo Koller return (uint32_t)~0; 46139d1347aSRicardo Koller } 46239d1347aSRicardo Koller 463041df25bSRicardo Koller static bool check_overflow_prerequisites(bool overflow_at_64bits) 464041df25bSRicardo Koller { 465041df25bSRicardo Koller if (overflow_at_64bits && pmu.version < ID_DFR0_PMU_V3_8_5) { 466041df25bSRicardo Koller report_skip("Skip test as 64 overflows need FEAT_PMUv3p5"); 467041df25bSRicardo Koller return false; 468041df25bSRicardo Koller } 469041df25bSRicardo Koller 470041df25bSRicardo Koller return true; 471041df25bSRicardo Koller } 472041df25bSRicardo Koller 473041df25bSRicardo Koller static void test_basic_event_count(bool overflow_at_64bits) 4744ce2a804SEric Auger { 4754ce2a804SEric Auger uint32_t implemented_counter_mask, non_implemented_counter_mask; 476036369c5SRicardo Koller uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits); 477036369c5SRicardo Koller uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0; 4784ce2a804SEric Auger uint32_t events[] = {CPU_CYCLES, INST_RETIRED}; 479036369c5SRicardo Koller uint32_t counter_mask; 4804ce2a804SEric Auger 481041df25bSRicardo Koller if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) || 482041df25bSRicardo Koller !check_overflow_prerequisites(overflow_at_64bits)) 4834ce2a804SEric Auger return; 4844ce2a804SEric Auger 4854ce2a804SEric Auger implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1; 4864ce2a804SEric Auger non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask); 4874ce2a804SEric Auger counter_mask = implemented_counter_mask | non_implemented_counter_mask; 4884ce2a804SEric Auger 4894ce2a804SEric Auger write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 4904ce2a804SEric Auger write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0); 4914ce2a804SEric Auger 4924ce2a804SEric Auger /* disable all counters */ 4937d1f853aSRicardo Koller write_sysreg_s(ALL_SET_32, PMCNTENCLR_EL0); 4944ce2a804SEric Auger report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0), 4954ce2a804SEric Auger "pmcntenclr: disable all counters"); 4964ce2a804SEric Auger 4974ce2a804SEric Auger /* 4984ce2a804SEric Auger * clear cycle and all event counters and allow counter enablement 4994ce2a804SEric Auger * through PMCNTENSET. LC is RES1. 5004ce2a804SEric Auger */ 501036369c5SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P | pmcr_lp); 5024ce2a804SEric Auger isb(); 503036369c5SRicardo Koller report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC | pmcr_lp), "pmcr: reset counters"); 5044ce2a804SEric Auger 5054ce2a804SEric Auger /* Preset counter #0 to pre overflow value to trigger an overflow */ 506036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 507036369c5SRicardo Koller report(read_regn_el0(pmevcntr, 0) == pre_overflow, 5084ce2a804SEric Auger "counter #0 preset to pre-overflow value"); 5094ce2a804SEric Auger report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0"); 5104ce2a804SEric Auger 5114ce2a804SEric Auger /* 5124ce2a804SEric Auger * Enable all implemented counters and also attempt to enable 5134ce2a804SEric Auger * not supported counters. Counting still is disabled by !PMCR.E 5144ce2a804SEric Auger */ 5154ce2a804SEric Auger write_sysreg_s(counter_mask, PMCNTENSET_EL0); 5164ce2a804SEric Auger 5174ce2a804SEric Auger /* check only those implemented are enabled */ 5184ce2a804SEric Auger report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) && 5194ce2a804SEric Auger (read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask), 5204ce2a804SEric Auger "pmcntenset: enabled implemented_counters"); 5214ce2a804SEric Auger 5224ce2a804SEric Auger /* Disable all counters but counters #0 and #1 */ 5234ce2a804SEric Auger write_sysreg_s(~0x3, PMCNTENCLR_EL0); 5244ce2a804SEric Auger report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) && 5254ce2a804SEric Auger (read_sysreg_s(PMCNTENSET_EL0) == 0x3), 5264ce2a804SEric Auger "pmcntenset: just enabled #0 and #1"); 5274ce2a804SEric Auger 5284ce2a804SEric Auger /* clear overflow register */ 5297d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmovsclr_el0); 5304ce2a804SEric Auger report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0"); 5314ce2a804SEric Auger 5324ce2a804SEric Auger /* disable overflow interrupts on all counters*/ 5337d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmintenclr_el1); 5344ce2a804SEric Auger report(!read_sysreg(pmintenclr_el1), 5354ce2a804SEric Auger "pmintenclr_el1=0, all interrupts disabled"); 5364ce2a804SEric Auger 5374ce2a804SEric Auger /* enable overflow interrupts on all event counters */ 5384ce2a804SEric Auger write_sysreg(implemented_counter_mask | non_implemented_counter_mask, 5394ce2a804SEric Auger pmintenset_el1); 5404ce2a804SEric Auger report(read_sysreg(pmintenset_el1) == implemented_counter_mask, 5414ce2a804SEric Auger "overflow interrupts enabled on all implemented counters"); 5424ce2a804SEric Auger 5434ce2a804SEric Auger /* Set PMCR.E, execute asm code and unset PMCR.E */ 5444ce2a804SEric Auger precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E); 5454ce2a804SEric Auger 5464ce2a804SEric Auger report_info("counter #0 is 0x%lx (CPU_CYCLES)", 5474ce2a804SEric Auger read_regn_el0(pmevcntr, 0)); 5484ce2a804SEric Auger report_info("counter #1 is 0x%lx (INST_RETIRED)", 5494ce2a804SEric Auger read_regn_el0(pmevcntr, 1)); 5504ce2a804SEric Auger 5514ce2a804SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 5520be21597SMatthias Rosenfelder report(read_sysreg(pmovsclr_el0) == 0x1, 5534ce2a804SEric Auger "check overflow happened on #0 only"); 5544ce2a804SEric Auger } 5554ce2a804SEric Auger 556041df25bSRicardo Koller static void test_mem_access(bool overflow_at_64bits) 5574ce2a804SEric Auger { 5584ce2a804SEric Auger void *addr = malloc(PAGE_SIZE); 5594ce2a804SEric Auger uint32_t events[] = {MEM_ACCESS, MEM_ACCESS}; 560036369c5SRicardo Koller uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits); 561036369c5SRicardo Koller uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0; 5624ce2a804SEric Auger 563041df25bSRicardo Koller if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) || 564041df25bSRicardo Koller !check_overflow_prerequisites(overflow_at_64bits)) 5654ce2a804SEric Auger return; 5664ce2a804SEric Auger 5674ce2a804SEric Auger pmu_reset(); 5684ce2a804SEric Auger 5694ce2a804SEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 5704ce2a804SEric Auger write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 5714ce2a804SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 5724ce2a804SEric Auger isb(); 573036369c5SRicardo Koller mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 574a7509187SRicardo Koller report_info("counter #0 is 0x%lx (MEM_ACCESS)", read_regn_el0(pmevcntr, 0)); 575a7509187SRicardo Koller report_info("counter #1 is 0x%lx (MEM_ACCESS)", read_regn_el0(pmevcntr, 1)); 5764ce2a804SEric Auger /* We may measure more than 20 mem access depending on the core */ 5774ce2a804SEric Auger report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) && 5784ce2a804SEric Auger (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0), 5794ce2a804SEric Auger "Ran 20 mem accesses"); 5804ce2a804SEric Auger 5814ce2a804SEric Auger pmu_reset(); 5824ce2a804SEric Auger 583036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 584036369c5SRicardo Koller write_regn_el0(pmevcntr, 1, pre_overflow); 5854ce2a804SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 5864ce2a804SEric Auger isb(); 587036369c5SRicardo Koller mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 5884ce2a804SEric Auger report(read_sysreg(pmovsclr_el0) == 0x3, 5894ce2a804SEric Auger "Ran 20 mem accesses with expected overflows on both counters"); 590a7509187SRicardo Koller report_info("cnt#0=0x%lx cnt#1=0x%lx overflow=0x%lx", 5914ce2a804SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1), 5924ce2a804SEric Auger read_sysreg(pmovsclr_el0)); 5934ce2a804SEric Auger } 5944ce2a804SEric Auger 595041df25bSRicardo Koller static void test_sw_incr(bool overflow_at_64bits) 596bb9a5adcSEric Auger { 597036369c5SRicardo Koller uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits); 598036369c5SRicardo Koller uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0; 599bb9a5adcSEric Auger uint32_t events[] = {SW_INCR, SW_INCR}; 600036369c5SRicardo Koller uint64_t cntr0 = (pre_overflow + 100) & pmevcntr_mask(); 601bb9a5adcSEric Auger int i; 602bb9a5adcSEric Auger 603041df25bSRicardo Koller if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) || 604041df25bSRicardo Koller !check_overflow_prerequisites(overflow_at_64bits)) 605bb9a5adcSEric Auger return; 606bb9a5adcSEric Auger 607bb9a5adcSEric Auger pmu_reset(); 608bb9a5adcSEric Auger 609bb9a5adcSEric Auger write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 610bb9a5adcSEric Auger write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 611bb9a5adcSEric Auger /* enable counters #0 and #1 */ 612bb9a5adcSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 613bb9a5adcSEric Auger 614036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 615e0a6e56bSRicardo Koller isb(); 616bb9a5adcSEric Auger 617bb9a5adcSEric Auger for (i = 0; i < 100; i++) 618bb9a5adcSEric Auger write_sysreg(0x1, pmswinc_el0); 619bb9a5adcSEric Auger 620e0a6e56bSRicardo Koller isb(); 621a7509187SRicardo Koller report_info("SW_INCR counter #0 has value 0x%lx", read_regn_el0(pmevcntr, 0)); 622036369c5SRicardo Koller report(read_regn_el0(pmevcntr, 0) == pre_overflow, 623bb9a5adcSEric Auger "PWSYNC does not increment if PMCR.E is unset"); 624bb9a5adcSEric Auger 625bb9a5adcSEric Auger pmu_reset(); 626bb9a5adcSEric Auger 627036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 628bb9a5adcSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 629036369c5SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 630e0a6e56bSRicardo Koller isb(); 631bb9a5adcSEric Auger 632bb9a5adcSEric Auger for (i = 0; i < 100; i++) 633bb9a5adcSEric Auger write_sysreg(0x3, pmswinc_el0); 634bb9a5adcSEric Auger 635e0a6e56bSRicardo Koller isb(); 63639d1347aSRicardo Koller report(read_regn_el0(pmevcntr, 0) == cntr0, "counter #0 after + 100 SW_INCR"); 63739d1347aSRicardo Koller report(read_regn_el0(pmevcntr, 1) == 100, "counter #1 after + 100 SW_INCR"); 638a7509187SRicardo Koller report_info("counter values after 100 SW_INCR #0=0x%lx #1=0x%lx", 639bb9a5adcSEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 640bb9a5adcSEric Auger report(read_sysreg(pmovsclr_el0) == 0x1, 64166fee034SEric Auger "overflow on counter #0 after 100 SW_INCR"); 64266fee034SEric Auger } 64366fee034SEric Auger 644de62a4b6SEric Auger static void enable_chain_counter(int even) 645de62a4b6SEric Auger { 646de62a4b6SEric Auger write_sysreg_s(BIT(even + 1), PMCNTENSET_EL0); /* Enable the high counter first */ 647de62a4b6SEric Auger isb(); 648de62a4b6SEric Auger write_sysreg_s(BIT(even), PMCNTENSET_EL0); /* Enable the low counter */ 649de62a4b6SEric Auger isb(); 650de62a4b6SEric Auger } 651de62a4b6SEric Auger 652de62a4b6SEric Auger static void disable_chain_counter(int even) 653de62a4b6SEric Auger { 654de62a4b6SEric Auger write_sysreg_s(BIT(even), PMCNTENCLR_EL0); /* Disable the low counter first*/ 655de62a4b6SEric Auger isb(); 656de62a4b6SEric Auger write_sysreg_s(BIT(even + 1), PMCNTENCLR_EL0); /* Disable the high counter */ 657de62a4b6SEric Auger isb(); 658de62a4b6SEric Auger } 659de62a4b6SEric Auger 660041df25bSRicardo Koller static void test_chained_counters(bool unused) 66166fee034SEric Auger { 66266fee034SEric Auger uint32_t events[] = {CPU_CYCLES, CHAIN}; 663036369c5SRicardo Koller uint64_t all_set = pmevcntr_mask(); 66466fee034SEric Auger 66566fee034SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 66666fee034SEric Auger return; 66766fee034SEric Auger 66866fee034SEric Auger pmu_reset(); 66966fee034SEric Auger 67066fee034SEric Auger write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 67166fee034SEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 6727d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 673de62a4b6SEric Auger enable_chain_counter(0); 67466fee034SEric Auger 67566fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 67666fee034SEric Auger 67766fee034SEric Auger report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented"); 678b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #1"); 67966fee034SEric Auger 68066fee034SEric Auger /* test 64b overflow */ 68166fee034SEric Auger 68266fee034SEric Auger pmu_reset(); 68366fee034SEric Auger 6847d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 68566fee034SEric Auger write_regn_el0(pmevcntr, 1, 0x1); 686de62a4b6SEric Auger enable_chain_counter(0); 68766fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 68866fee034SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 68966fee034SEric Auger report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2"); 690b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #2"); 69166fee034SEric Auger 6927d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 693036369c5SRicardo Koller write_regn_el0(pmevcntr, 1, all_set); 69466fee034SEric Auger 69566fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 69666fee034SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 697036369c5SRicardo Koller report(read_regn_el0(pmevcntr, 1) == 0, "CHAIN counter #1 wrapped"); 698b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x3, "overflow on even and odd counters"); 69966fee034SEric Auger } 70066fee034SEric Auger 701041df25bSRicardo Koller static void test_chained_sw_incr(bool unused) 70266fee034SEric Auger { 70366fee034SEric Auger uint32_t events[] = {SW_INCR, CHAIN}; 7047d1f853aSRicardo Koller uint64_t cntr0 = (PRE_OVERFLOW_32 + 100) & pmevcntr_mask(); 7057d1f853aSRicardo Koller uint64_t cntr1 = (ALL_SET_32 + 1) & pmevcntr_mask(); 70666fee034SEric Auger int i; 70766fee034SEric Auger 70866fee034SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 70966fee034SEric Auger return; 71066fee034SEric Auger 71166fee034SEric Auger pmu_reset(); 71266fee034SEric Auger 71366fee034SEric Auger write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 71466fee034SEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 715de62a4b6SEric Auger enable_chain_counter(0); 71666fee034SEric Auger 7177d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 71866fee034SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 719e0a6e56bSRicardo Koller isb(); 720e0a6e56bSRicardo Koller 72166fee034SEric Auger for (i = 0; i < 100; i++) 72266fee034SEric Auger write_sysreg(0x1, pmswinc_el0); 72366fee034SEric Auger 724e0a6e56bSRicardo Koller isb(); 725b5489580SRicardo Koller report((read_sysreg(pmovsclr_el0) == 0x1) && 726b5489580SRicardo Koller (read_regn_el0(pmevcntr, 1) == 1), 727b5489580SRicardo Koller "overflow and chain counter incremented after 100 SW_INCR/CHAIN"); 728a7509187SRicardo Koller report_info("overflow=0x%lx, #0=0x%lx #1=0x%lx", read_sysreg(pmovsclr_el0), 72966fee034SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 73066fee034SEric Auger 73166fee034SEric Auger /* 64b SW_INCR and overflow on CHAIN counter*/ 73266fee034SEric Auger pmu_reset(); 73366fee034SEric Auger 7347d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 7357d1f853aSRicardo Koller write_regn_el0(pmevcntr, 1, ALL_SET_32); 736de62a4b6SEric Auger enable_chain_counter(0); 73766fee034SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 738e0a6e56bSRicardo Koller isb(); 739e0a6e56bSRicardo Koller 74066fee034SEric Auger for (i = 0; i < 100; i++) 74166fee034SEric Auger write_sysreg(0x1, pmswinc_el0); 74266fee034SEric Auger 743e0a6e56bSRicardo Koller isb(); 744b5489580SRicardo Koller report((read_sysreg(pmovsclr_el0) == 0x3) && 74539d1347aSRicardo Koller (read_regn_el0(pmevcntr, 0) == cntr0) && 74639d1347aSRicardo Koller (read_regn_el0(pmevcntr, 1) == cntr1), 747b5489580SRicardo Koller "expected overflows and values after 100 SW_INCR/CHAIN"); 748a7509187SRicardo Koller report_info("overflow=0x%lx, #0=0x%lx #1=0x%lx", read_sysreg(pmovsclr_el0), 74966fee034SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 750bb9a5adcSEric Auger } 751e4c27538SEric Auger #define PRINT_REGS(__s) \ 752e4c27538SEric Auger report_info("%s #1=0x%lx #0=0x%lx overflow=0x%lx", __s, \ 753e4c27538SEric Auger read_regn_el0(pmevcntr, 1), \ 754e4c27538SEric Auger read_regn_el0(pmevcntr, 0), \ 755e4c27538SEric Auger read_sysreg(pmovsclr_el0)) 756bb9a5adcSEric Auger 7573c23bf40SEric Auger /* 7583c23bf40SEric Auger * This test checks that a mem access loop featuring COUNT accesses 7593c23bf40SEric Auger * does not overflow with an init value of PRE_OVERFLOW2. It also 7603c23bf40SEric Auger * records the min/max access count to see how much the counting 7613c23bf40SEric Auger * is (un)reliable 7623c23bf40SEric Auger */ 7633c23bf40SEric Auger static void test_mem_access_reliability(bool overflow_at_64bits) 7643c23bf40SEric Auger { 7653c23bf40SEric Auger uint32_t events[] = {MEM_ACCESS}; 7663c23bf40SEric Auger void *addr = malloc(PAGE_SIZE); 7673c23bf40SEric Auger uint64_t cntr_val, num_events, max = 0, min = pmevcntr_mask(); 7683c23bf40SEric Auger uint64_t pre_overflow2 = PRE_OVERFLOW2(overflow_at_64bits); 7693c23bf40SEric Auger uint64_t all_set = ALL_SET(overflow_at_64bits); 7703c23bf40SEric Auger uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0; 7713c23bf40SEric Auger bool overflow = false; 7723c23bf40SEric Auger 7733c23bf40SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) || 7743c23bf40SEric Auger !check_overflow_prerequisites(overflow_at_64bits)) 7753c23bf40SEric Auger return; 7763c23bf40SEric Auger 7773c23bf40SEric Auger pmu_reset(); 7783c23bf40SEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 7793c23bf40SEric Auger for (int i = 0; i < 100; i++) { 7803c23bf40SEric Auger pmu_reset(); 7813c23bf40SEric Auger write_regn_el0(pmevcntr, 0, pre_overflow2); 7823c23bf40SEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); 7833c23bf40SEric Auger isb(); 7843c23bf40SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 7853c23bf40SEric Auger cntr_val = read_regn_el0(pmevcntr, 0); 7863c23bf40SEric Auger if (cntr_val >= pre_overflow2) { 7873c23bf40SEric Auger num_events = cntr_val - pre_overflow2; 7883c23bf40SEric Auger } else { 7893c23bf40SEric Auger /* unexpected counter overflow */ 7903c23bf40SEric Auger num_events = cntr_val + all_set - pre_overflow2; 7913c23bf40SEric Auger overflow = true; 7923c23bf40SEric Auger report_info("iter=%d num_events=%ld min=%ld max=%ld overflow!!!", 7933c23bf40SEric Auger i, num_events, min, max); 7943c23bf40SEric Auger } 7953c23bf40SEric Auger /* record extreme value */ 7963c23bf40SEric Auger max = MAX(num_events, max); 7973c23bf40SEric Auger min = MIN(num_events, min); 7983c23bf40SEric Auger } 7993c23bf40SEric Auger report_info("overflow=%d min=%ld max=%ld expected=%d acceptable margin=%d", 8003c23bf40SEric Auger overflow, min, max, COUNT, MARGIN); 8013c23bf40SEric Auger report(!overflow, "mem_access count is reliable"); 8023c23bf40SEric Auger } 8033c23bf40SEric Auger 804041df25bSRicardo Koller static void test_chain_promotion(bool unused) 805ca42f29aSEric Auger { 806ca42f29aSEric Auger uint32_t events[] = {MEM_ACCESS, CHAIN}; 807ca42f29aSEric Auger void *addr = malloc(PAGE_SIZE); 808ca42f29aSEric Auger 809ca42f29aSEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 810ca42f29aSEric Auger return; 811ca42f29aSEric Auger 812ca42f29aSEric Auger /* Only enable CHAIN counter */ 813e4c27538SEric Auger report_prefix_push("subtest1"); 814ca42f29aSEric Auger pmu_reset(); 815ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 816ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 817ca42f29aSEric Auger write_sysreg_s(0x2, PMCNTENSET_EL0); 818ca42f29aSEric Auger isb(); 819ca42f29aSEric Auger 82091806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 821e4c27538SEric Auger PRINT_REGS("post"); 822ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 0), 823ca42f29aSEric Auger "chain counter not counting if even counter is disabled"); 824e4c27538SEric Auger report_prefix_pop(); 825ca42f29aSEric Auger 826ca42f29aSEric Auger /* Only enable even counter */ 827e4c27538SEric Auger report_prefix_push("subtest2"); 828ca42f29aSEric Auger pmu_reset(); 8297d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW_32); 830ca42f29aSEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); 831ca42f29aSEric Auger isb(); 832ca42f29aSEric Auger 83391806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 834e4c27538SEric Auger PRINT_REGS("post"); 835ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1), 836ca42f29aSEric Auger "odd counter did not increment on overflow if disabled"); 837e4c27538SEric Auger report_prefix_pop(); 838ca42f29aSEric Auger 83991806724SEric Auger /* 1st COUNT with CHAIN enabled, next COUNT with CHAIN disabled */ 840e4c27538SEric Auger report_prefix_push("subtest3"); 841ca42f29aSEric Auger pmu_reset(); 8427d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32); 843de62a4b6SEric Auger enable_chain_counter(0); 844e4c27538SEric Auger PRINT_REGS("init"); 845ca42f29aSEric Auger 84691806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 847e4c27538SEric Auger PRINT_REGS("After 1st loop"); 848ca42f29aSEric Auger 849ca42f29aSEric Auger /* disable the CHAIN event */ 850de62a4b6SEric Auger disable_chain_counter(0); 851de62a4b6SEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); /* Enable the low counter */ 852de62a4b6SEric Auger isb(); 85391806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 854e4c27538SEric Auger PRINT_REGS("After 2nd loop"); 855ca42f29aSEric Auger report(read_sysreg(pmovsclr_el0) == 0x1, 856ca42f29aSEric Auger "should have triggered an overflow on #0"); 857ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 1), 858ca42f29aSEric Auger "CHAIN counter #1 shouldn't have incremented"); 859e4c27538SEric Auger report_prefix_pop(); 860ca42f29aSEric Auger 86191806724SEric Auger /* 1st COUNT with CHAIN disabled, next COUNT with CHAIN enabled */ 862ca42f29aSEric Auger 863e4c27538SEric Auger report_prefix_push("subtest4"); 864ca42f29aSEric Auger pmu_reset(); 865ca42f29aSEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); 8667d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32); 867ca42f29aSEric Auger isb(); 868e4c27538SEric Auger PRINT_REGS("init"); 869ca42f29aSEric Auger 87091806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 871e4c27538SEric Auger PRINT_REGS("After 1st loop"); 872ca42f29aSEric Auger 873de62a4b6SEric Auger /* Disable the low counter first and enable the chain counter */ 874de62a4b6SEric Auger write_sysreg_s(0x1, PMCNTENCLR_EL0); 875ca42f29aSEric Auger isb(); 876de62a4b6SEric Auger enable_chain_counter(0); 877de62a4b6SEric Auger 87891806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 879e4c27538SEric Auger 880e4c27538SEric Auger PRINT_REGS("After 2nd loop"); 881ca42f29aSEric Auger 882b5489580SRicardo Koller report((read_regn_el0(pmevcntr, 1) == 1) && 883b5489580SRicardo Koller (read_sysreg(pmovsclr_el0) == 0x1), 884b5489580SRicardo Koller "CHAIN counter enabled: CHAIN counter was incremented and overflow"); 885e4c27538SEric Auger report_prefix_pop(); 886ca42f29aSEric Auger 887ca42f29aSEric Auger /* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */ 888e4c27538SEric Auger report_prefix_push("subtest5"); 889ca42f29aSEric Auger pmu_reset(); 890ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 891ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 892ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 8937d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32); 894ca42f29aSEric Auger isb(); 895e4c27538SEric Auger PRINT_REGS("init"); 896ca42f29aSEric Auger 89791806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 898e4c27538SEric Auger PRINT_REGS("After 1st loop"); 899ca42f29aSEric Auger 900ca42f29aSEric Auger /* 0 becomes CHAINED */ 901de62a4b6SEric Auger write_sysreg_s(0x3, PMCNTENCLR_EL0); 902ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 903ca42f29aSEric Auger write_regn_el0(pmevcntr, 1, 0x0); 904de62a4b6SEric Auger enable_chain_counter(0); 905ca42f29aSEric Auger 90691806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 907e4c27538SEric Auger PRINT_REGS("After 2nd loop"); 908ca42f29aSEric Auger 909b5489580SRicardo Koller report((read_regn_el0(pmevcntr, 1) == 1) && 910b5489580SRicardo Koller (read_sysreg(pmovsclr_el0) == 0x1), 911b5489580SRicardo Koller "32b->64b: CHAIN counter incremented and overflow"); 912e4c27538SEric Auger report_prefix_pop(); 913ca42f29aSEric Auger 914ca42f29aSEric Auger /* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */ 915e4c27538SEric Auger report_prefix_push("subtest6"); 916ca42f29aSEric Auger pmu_reset(); 917ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 918ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 9197d1f853aSRicardo Koller write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2_32); 920de62a4b6SEric Auger enable_chain_counter(0); 921e4c27538SEric Auger PRINT_REGS("init"); 922ca42f29aSEric Auger 92391806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 924e4c27538SEric Auger PRINT_REGS("After 1st loop"); 925ca42f29aSEric Auger 926de62a4b6SEric Auger disable_chain_counter(0); 927ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 928ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 929ca42f29aSEric Auger 93091806724SEric Auger mem_access_loop(addr, COUNT, pmu.pmcr_ro | PMU_PMCR_E); 931e4c27538SEric Auger PRINT_REGS("After 2nd loop"); 932ca42f29aSEric Auger report(read_sysreg(pmovsclr_el0) == 1, 933ca42f29aSEric Auger "overflow is expected on counter 0"); 934e4c27538SEric Auger report_prefix_pop(); 935ca42f29aSEric Auger } 936ca42f29aSEric Auger 9374f5ef94fSEric Auger static bool expect_interrupts(uint32_t bitmap) 9384f5ef94fSEric Auger { 9394f5ef94fSEric Auger int i; 9404f5ef94fSEric Auger 9414f5ef94fSEric Auger if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected) 9424f5ef94fSEric Auger return false; 9434f5ef94fSEric Auger 9444f5ef94fSEric Auger for (i = 0; i < 32; i++) { 9454f5ef94fSEric Auger if (test_and_clear_bit(i, &pmu_stats.bitmap)) 9464f5ef94fSEric Auger if (pmu_stats.interrupts[i] != 1) 9474f5ef94fSEric Auger return false; 9484f5ef94fSEric Auger } 9494f5ef94fSEric Auger return true; 9504f5ef94fSEric Auger } 9514f5ef94fSEric Auger 952041df25bSRicardo Koller static void test_overflow_interrupt(bool overflow_at_64bits) 9534f5ef94fSEric Auger { 954036369c5SRicardo Koller uint64_t pre_overflow = PRE_OVERFLOW(overflow_at_64bits); 955036369c5SRicardo Koller uint64_t all_set = pmevcntr_mask(); 956036369c5SRicardo Koller uint64_t pmcr_lp = overflow_at_64bits ? PMU_PMCR_LP : 0; 9574f5ef94fSEric Auger uint32_t events[] = {MEM_ACCESS, SW_INCR}; 9584f5ef94fSEric Auger void *addr = malloc(PAGE_SIZE); 9594f5ef94fSEric Auger int i; 9604f5ef94fSEric Auger 961041df25bSRicardo Koller if (!satisfy_prerequisites(events, ARRAY_SIZE(events)) || 962041df25bSRicardo Koller !check_overflow_prerequisites(overflow_at_64bits)) 9634f5ef94fSEric Auger return; 9644f5ef94fSEric Auger 9654f5ef94fSEric Auger gic_enable_defaults(); 9664f5ef94fSEric Auger install_irq_handler(EL1H_IRQ, irq_handler); 9674f5ef94fSEric Auger local_irq_enable(); 9684f5ef94fSEric Auger gic_enable_irq(23); 9694f5ef94fSEric Auger 9704f5ef94fSEric Auger pmu_reset(); 9714f5ef94fSEric Auger 9724f5ef94fSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 9734f5ef94fSEric Auger write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 9744f5ef94fSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 975036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 976036369c5SRicardo Koller write_regn_el0(pmevcntr, 1, pre_overflow); 9774f5ef94fSEric Auger isb(); 9784f5ef94fSEric Auger 9791a97dad8SRicardo Koller /* interrupts are disabled (PMINTENSET_EL1 == 0) */ 9804f5ef94fSEric Auger 981036369c5SRicardo Koller mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 9824f5ef94fSEric Auger report(expect_interrupts(0), "no overflow interrupt after preset"); 9834f5ef94fSEric Auger 984036369c5SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 985e0a6e56bSRicardo Koller isb(); 986e0a6e56bSRicardo Koller 9874f5ef94fSEric Auger for (i = 0; i < 100; i++) 9884f5ef94fSEric Auger write_sysreg(0x2, pmswinc_el0); 9894f5ef94fSEric Auger 990e0a6e56bSRicardo Koller isb(); 9914f5ef94fSEric Auger set_pmcr(pmu.pmcr_ro); 992e0a6e56bSRicardo Koller isb(); 9934f5ef94fSEric Auger report(expect_interrupts(0), "no overflow interrupt after counting"); 9944f5ef94fSEric Auger 9957d1f853aSRicardo Koller /* enable interrupts (PMINTENSET_EL1 <= ALL_SET_32) */ 9964f5ef94fSEric Auger 9974f5ef94fSEric Auger pmu_reset_stats(); 9984f5ef94fSEric Auger 999036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 1000036369c5SRicardo Koller write_regn_el0(pmevcntr, 1, pre_overflow); 1001cc08ef11SRicardo Koller write_sysreg(ALL_SET_32, pmovsclr_el0); 10027d1f853aSRicardo Koller write_sysreg(ALL_SET_32, pmintenset_el1); 10034f5ef94fSEric Auger isb(); 10044f5ef94fSEric Auger 1005036369c5SRicardo Koller mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 1006cc08ef11SRicardo Koller 1007cc08ef11SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 1008cc08ef11SRicardo Koller isb(); 1009cc08ef11SRicardo Koller 10104f5ef94fSEric Auger for (i = 0; i < 100; i++) 10114f5ef94fSEric Auger write_sysreg(0x3, pmswinc_el0); 10124f5ef94fSEric Auger 10134f5ef94fSEric Auger mem_access_loop(addr, 200, pmu.pmcr_ro); 10144f5ef94fSEric Auger report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0)); 10154f5ef94fSEric Auger report(expect_interrupts(0x3), 10164f5ef94fSEric Auger "overflow interrupts expected on #0 and #1"); 10174f5ef94fSEric Auger 1018036369c5SRicardo Koller /* 1019036369c5SRicardo Koller * promote to 64-b: 1020036369c5SRicardo Koller * 1021036369c5SRicardo Koller * This only applies to the !overflow_at_64bits case, as 1022036369c5SRicardo Koller * overflow_at_64bits doesn't implement CHAIN events. The 1023036369c5SRicardo Koller * overflow_at_64bits case just checks that chained counters are 1024036369c5SRicardo Koller * not incremented when PMCR.LP == 1. 1025036369c5SRicardo Koller */ 10264f5ef94fSEric Auger 10274f5ef94fSEric Auger pmu_reset_stats(); 10284f5ef94fSEric Auger 10294f5ef94fSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 1030036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 10314f5ef94fSEric Auger isb(); 1032036369c5SRicardo Koller mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 1033036369c5SRicardo Koller report(expect_interrupts(0x1), "expect overflow interrupt"); 10344f5ef94fSEric Auger 10354f5ef94fSEric Auger /* overflow on odd counter */ 10364f5ef94fSEric Auger pmu_reset_stats(); 1037036369c5SRicardo Koller write_regn_el0(pmevcntr, 0, pre_overflow); 1038036369c5SRicardo Koller write_regn_el0(pmevcntr, 1, all_set); 10394f5ef94fSEric Auger isb(); 1040036369c5SRicardo Koller mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E | pmcr_lp); 1041036369c5SRicardo Koller if (overflow_at_64bits) { 1042036369c5SRicardo Koller report(expect_interrupts(0x1), 1043036369c5SRicardo Koller "expect overflow interrupt on even counter"); 1044036369c5SRicardo Koller report(read_regn_el0(pmevcntr, 1) == all_set, 1045036369c5SRicardo Koller "Odd counter did not change"); 1046036369c5SRicardo Koller } else { 1047b5489580SRicardo Koller report(expect_interrupts(0x3), 1048b5489580SRicardo Koller "expect overflow interrupt on even and odd counter"); 1049036369c5SRicardo Koller report(read_regn_el0(pmevcntr, 1) != all_set, 1050036369c5SRicardo Koller "Odd counter wrapped"); 1051036369c5SRicardo Koller } 10524f5ef94fSEric Auger } 10534244065bSChristopher Covington #endif 10544244065bSChristopher Covington 10554244065bSChristopher Covington /* 1056d81bb7a3SChristopher Covington * Ensure that the cycle counter progresses between back-to-back reads. 1057d81bb7a3SChristopher Covington */ 1058d81bb7a3SChristopher Covington static bool check_cycles_increase(void) 1059d81bb7a3SChristopher Covington { 1060d81bb7a3SChristopher Covington bool success = true; 1061d81bb7a3SChristopher Covington 1062d81bb7a3SChristopher Covington /* init before event access, this test only cares about cycle count */ 10631a97dad8SRicardo Koller pmu_reset(); 1064d81bb7a3SChristopher Covington set_pmcntenset(1 << PMU_CYCLE_IDX); 1065d81bb7a3SChristopher Covington set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */ 1066d81bb7a3SChristopher Covington 1067d81bb7a3SChristopher Covington set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E); 1068e0a6e56bSRicardo Koller isb(); 1069d81bb7a3SChristopher Covington 1070d81bb7a3SChristopher Covington for (int i = 0; i < NR_SAMPLES; i++) { 1071d81bb7a3SChristopher Covington uint64_t a, b; 1072d81bb7a3SChristopher Covington 1073d81bb7a3SChristopher Covington a = get_pmccntr(); 1074d81bb7a3SChristopher Covington b = get_pmccntr(); 1075d81bb7a3SChristopher Covington 1076d81bb7a3SChristopher Covington if (a >= b) { 1077d81bb7a3SChristopher Covington printf("Read %"PRId64" then %"PRId64".\n", a, b); 1078d81bb7a3SChristopher Covington success = false; 1079d81bb7a3SChristopher Covington break; 1080d81bb7a3SChristopher Covington } 1081d81bb7a3SChristopher Covington } 1082d81bb7a3SChristopher Covington 1083d81bb7a3SChristopher Covington set_pmcr(get_pmcr() & ~PMU_PMCR_E); 1084e0a6e56bSRicardo Koller isb(); 1085d81bb7a3SChristopher Covington 1086d81bb7a3SChristopher Covington return success; 1087d81bb7a3SChristopher Covington } 1088d81bb7a3SChristopher Covington 10898f76a347SChristopher Covington /* 10908f76a347SChristopher Covington * Execute a known number of guest instructions. Only even instruction counts 10918f76a347SChristopher Covington * greater than or equal to 4 are supported by the in-line assembly code. The 10928f76a347SChristopher Covington * control register (PMCR_EL0) is initialized with the provided value (allowing 10938f76a347SChristopher Covington * for example for the cycle counter or event counters to be reset). At the end 10948f76a347SChristopher Covington * of the exact instruction loop, zero is written to PMCR_EL0 to disable 10958f76a347SChristopher Covington * counting, allowing the cycle counter or event counters to be read at the 10968f76a347SChristopher Covington * leisure of the calling code. 10978f76a347SChristopher Covington */ 10988f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr) 10998f76a347SChristopher Covington { 11008f76a347SChristopher Covington int loop = (num - 2) / 2; 11018f76a347SChristopher Covington 11028f76a347SChristopher Covington assert(num >= 4 && ((num - 2) % 2 == 0)); 11038f76a347SChristopher Covington precise_instrs_loop(loop, pmcr); 11048f76a347SChristopher Covington } 11058f76a347SChristopher Covington 11068f76a347SChristopher Covington /* 11078f76a347SChristopher Covington * Measure cycle counts for various known instruction counts. Ensure that the 11088f76a347SChristopher Covington * cycle counter progresses (similar to check_cycles_increase() but with more 11098f76a347SChristopher Covington * instructions and using reset and stop controls). If supplied a positive, 11108f76a347SChristopher Covington * nonzero CPI parameter, it also strictly checks that every measurement matches 11118f76a347SChristopher Covington * it. Strict CPI checking is used to test -icount mode. 11128f76a347SChristopher Covington */ 11138f76a347SChristopher Covington static bool check_cpi(int cpi) 11148f76a347SChristopher Covington { 11158f76a347SChristopher Covington uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E; 11168f76a347SChristopher Covington 11178f76a347SChristopher Covington /* init before event access, this test only cares about cycle count */ 11181a97dad8SRicardo Koller pmu_reset(); 11198f76a347SChristopher Covington set_pmcntenset(1 << PMU_CYCLE_IDX); 11208f76a347SChristopher Covington set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */ 11218f76a347SChristopher Covington 11228f76a347SChristopher Covington if (cpi > 0) 11238f76a347SChristopher Covington printf("Checking for CPI=%d.\n", cpi); 11248f76a347SChristopher Covington printf("instrs : cycles0 cycles1 ...\n"); 11258f76a347SChristopher Covington 11268f76a347SChristopher Covington for (unsigned int i = 4; i < 300; i += 32) { 11278f76a347SChristopher Covington uint64_t avg, sum = 0; 11288f76a347SChristopher Covington 11298f76a347SChristopher Covington printf("%4d:", i); 11308f76a347SChristopher Covington for (int j = 0; j < NR_SAMPLES; j++) { 11318f76a347SChristopher Covington uint64_t cycles; 11328f76a347SChristopher Covington 11338f76a347SChristopher Covington set_pmccntr(0); 11348f76a347SChristopher Covington measure_instrs(i, pmcr); 11358f76a347SChristopher Covington cycles = get_pmccntr(); 11368f76a347SChristopher Covington printf(" %4"PRId64"", cycles); 11378f76a347SChristopher Covington 11388f76a347SChristopher Covington if (!cycles) { 11398f76a347SChristopher Covington printf("\ncycles not incrementing!\n"); 11408f76a347SChristopher Covington return false; 11418f76a347SChristopher Covington } else if (cpi > 0 && cycles != i * cpi) { 11428f76a347SChristopher Covington printf("\nunexpected cycle count received!\n"); 11438f76a347SChristopher Covington return false; 11448f76a347SChristopher Covington } else if ((cycles >> 32) != 0) { 11458f76a347SChristopher Covington /* The cycles taken by the loop above should 11468f76a347SChristopher Covington * fit in 32 bits easily. We check the upper 11478f76a347SChristopher Covington * 32 bits of the cycle counter to make sure 11488f76a347SChristopher Covington * there is no supprise. */ 11498f76a347SChristopher Covington printf("\ncycle count bigger than 32bit!\n"); 11508f76a347SChristopher Covington return false; 11518f76a347SChristopher Covington } 11528f76a347SChristopher Covington 11538f76a347SChristopher Covington sum += cycles; 11548f76a347SChristopher Covington } 11558f76a347SChristopher Covington avg = sum / NR_SAMPLES; 11568f76a347SChristopher Covington printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg, 11578f76a347SChristopher Covington (avg >= i) ? "cpi" : "ipc", 11588f76a347SChristopher Covington (avg >= i) ? avg / i : i / avg); 11598f76a347SChristopher Covington } 11608f76a347SChristopher Covington 11618f76a347SChristopher Covington return true; 11628f76a347SChristopher Covington } 11638f76a347SChristopher Covington 11644c357610SAndrew Jones static void pmccntr64_test(void) 11654c357610SAndrew Jones { 11664c357610SAndrew Jones #ifdef __arm__ 1167784ee933SEric Auger if (pmu.version == ID_DFR0_PMU_V3) { 11684c357610SAndrew Jones if (ERRATA(9e3f7a296940)) { 11694c357610SAndrew Jones write_sysreg(0xdead, PMCCNTR64); 1170a299895bSThomas Huth report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64"); 11714c357610SAndrew Jones } else 11724c357610SAndrew Jones report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable."); 11734c357610SAndrew Jones } 11744c357610SAndrew Jones #endif 11754c357610SAndrew Jones } 11764c357610SAndrew Jones 11774244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */ 117823b8916bSThomas Huth static bool pmu_probe(void) 11794244065bSChristopher Covington { 11801e4f5392SAlexandru Elisei uint32_t pmcr; 118146ca10f4SAlexandru Elisei uint8_t implementer; 1182eff8f161SEric Auger 11838f747a85SEric Auger pmu.version = get_pmu_version(); 1184784ee933SEric Auger if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF) 1185eff8f161SEric Auger return false; 1186eff8f161SEric Auger 1187784ee933SEric Auger report_info("PMU version: 0x%x", pmu.version); 1188eff8f161SEric Auger 11891e4f5392SAlexandru Elisei pmcr = get_pmcr(); 119046ca10f4SAlexandru Elisei implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK; 119146ca10f4SAlexandru Elisei report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32, 1192eff8f161SEric Auger (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK, 119346ca10f4SAlexandru Elisei implementer ? implementer : ' ', 11948f747a85SEric Auger (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK); 11958f747a85SEric Auger 11968f747a85SEric Auger /* store read-only and RES0 fields of the PMCR bottom-half*/ 11978f747a85SEric Auger pmu.pmcr_ro = pmcr & 0xFFFFFF00; 11988f747a85SEric Auger pmu.nb_implemented_counters = 11998f747a85SEric Auger (pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK; 12008f747a85SEric Auger report_info("Implements %d event counters", 12018f747a85SEric Auger pmu.nb_implemented_counters); 1202eff8f161SEric Auger 1203eff8f161SEric Auger return true; 12044244065bSChristopher Covington } 12054244065bSChristopher Covington 1206041df25bSRicardo Koller static void run_test(const char *name, const char *prefix, 1207041df25bSRicardo Koller void (*test)(bool), void *arg) 1208041df25bSRicardo Koller { 1209041df25bSRicardo Koller report_prefix_push(name); 1210041df25bSRicardo Koller report_prefix_push(prefix); 1211041df25bSRicardo Koller 1212041df25bSRicardo Koller test(arg); 1213041df25bSRicardo Koller 1214041df25bSRicardo Koller report_prefix_pop(); 1215041df25bSRicardo Koller report_prefix_pop(); 1216041df25bSRicardo Koller } 1217041df25bSRicardo Koller 1218041df25bSRicardo Koller static void run_event_test(char *name, void (*test)(bool), 1219041df25bSRicardo Koller bool overflow_at_64bits) 1220041df25bSRicardo Koller { 1221041df25bSRicardo Koller const char *prefix = overflow_at_64bits ? "64-bit overflows" 1222041df25bSRicardo Koller : "32-bit overflows"; 1223041df25bSRicardo Koller 1224041df25bSRicardo Koller run_test(name, prefix, test, (void *)overflow_at_64bits); 1225041df25bSRicardo Koller } 1226041df25bSRicardo Koller 12278f76a347SChristopher Covington int main(int argc, char *argv[]) 12284244065bSChristopher Covington { 12298f76a347SChristopher Covington int cpi = 0; 12308f76a347SChristopher Covington 12314244065bSChristopher Covington if (!pmu_probe()) { 12324244065bSChristopher Covington printf("No PMU found, test skipped...\n"); 12334244065bSChristopher Covington return report_summary(); 12344244065bSChristopher Covington } 12354244065bSChristopher Covington 123657ec1086SEric Auger if (argc < 2) 123757ec1086SEric Auger report_abort("no test specified"); 123857ec1086SEric Auger 12394244065bSChristopher Covington report_prefix_push("pmu"); 12404244065bSChristopher Covington 124157ec1086SEric Auger if (strcmp(argv[1], "cycle-counter") == 0) { 124257ec1086SEric Auger report_prefix_push(argv[1]); 124357ec1086SEric Auger if (argc > 2) 124457ec1086SEric Auger cpi = atol(argv[2]); 1245a299895bSThomas Huth report(check_cycles_increase(), 1246a299895bSThomas Huth "Monotonically increasing cycle count"); 1247a299895bSThomas Huth report(check_cpi(cpi), "Cycle/instruction ratio"); 12484c357610SAndrew Jones pmccntr64_test(); 124957ec1086SEric Auger report_prefix_pop(); 12504870738cSEric Auger } else if (strcmp(argv[1], "pmu-event-introspection") == 0) { 12514870738cSEric Auger report_prefix_push(argv[1]); 12524870738cSEric Auger test_event_introspection(); 12534870738cSEric Auger report_prefix_pop(); 12544ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-event-counter-config") == 0) { 12554ce2a804SEric Auger report_prefix_push(argv[1]); 12564ce2a804SEric Auger test_event_counter_config(); 12574ce2a804SEric Auger report_prefix_pop(); 12584ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-basic-event-count") == 0) { 1259041df25bSRicardo Koller run_event_test(argv[1], test_basic_event_count, false); 1260036369c5SRicardo Koller run_event_test(argv[1], test_basic_event_count, true); 12613c23bf40SEric Auger } else if (strcmp(argv[1], "pmu-mem-access-reliability") == 0) { 12623c23bf40SEric Auger run_event_test(argv[1], test_mem_access_reliability, false); 12633c23bf40SEric Auger run_event_test(argv[1], test_mem_access_reliability, true); 12644ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-mem-access") == 0) { 1265041df25bSRicardo Koller run_event_test(argv[1], test_mem_access, false); 1266036369c5SRicardo Koller run_event_test(argv[1], test_mem_access, true); 1267bb9a5adcSEric Auger } else if (strcmp(argv[1], "pmu-sw-incr") == 0) { 1268041df25bSRicardo Koller run_event_test(argv[1], test_sw_incr, false); 1269036369c5SRicardo Koller run_event_test(argv[1], test_sw_incr, true); 127066fee034SEric Auger } else if (strcmp(argv[1], "pmu-chained-counters") == 0) { 1271041df25bSRicardo Koller run_event_test(argv[1], test_chained_counters, false); 127266fee034SEric Auger } else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) { 1273041df25bSRicardo Koller run_event_test(argv[1], test_chained_sw_incr, false); 1274ca42f29aSEric Auger } else if (strcmp(argv[1], "pmu-chain-promotion") == 0) { 1275041df25bSRicardo Koller run_event_test(argv[1], test_chain_promotion, false); 12764f5ef94fSEric Auger } else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) { 1277041df25bSRicardo Koller run_event_test(argv[1], test_overflow_interrupt, false); 1278036369c5SRicardo Koller run_event_test(argv[1], test_overflow_interrupt, true); 127957ec1086SEric Auger } else { 128057ec1086SEric Auger report_abort("Unknown sub-test '%s'", argv[1]); 128157ec1086SEric Auger } 12824c357610SAndrew Jones 12834244065bSChristopher Covington return report_summary(); 12844244065bSChristopher Covington } 1285