14244065bSChristopher Covington /* 24244065bSChristopher Covington * Test the ARM Performance Monitors Unit (PMU). 34244065bSChristopher Covington * 44244065bSChristopher Covington * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 54244065bSChristopher Covington * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com> 64244065bSChristopher Covington * 74244065bSChristopher Covington * This program is free software; you can redistribute it and/or modify it 84244065bSChristopher Covington * under the terms of the GNU Lesser General Public License version 2.1 and 94244065bSChristopher Covington * only version 2.1 as published by the Free Software Foundation. 104244065bSChristopher Covington * 114244065bSChristopher Covington * This program is distributed in the hope that it will be useful, but WITHOUT 124244065bSChristopher Covington * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 134244065bSChristopher Covington * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 144244065bSChristopher Covington * for more details. 154244065bSChristopher Covington */ 164244065bSChristopher Covington #include "libcflat.h" 174c357610SAndrew Jones #include "errata.h" 184244065bSChristopher Covington #include "asm/barrier.h" 194244065bSChristopher Covington #include "asm/sysreg.h" 204244065bSChristopher Covington #include "asm/processor.h" 214870738cSEric Auger #include <bitops.h> 224ce2a804SEric Auger #include <asm/gic.h> 234244065bSChristopher Covington 24d81bb7a3SChristopher Covington #define PMU_PMCR_E (1 << 0) 254ce2a804SEric Auger #define PMU_PMCR_P (1 << 1) 26d81bb7a3SChristopher Covington #define PMU_PMCR_C (1 << 2) 274ce2a804SEric Auger #define PMU_PMCR_D (1 << 3) 284ce2a804SEric Auger #define PMU_PMCR_X (1 << 4) 294ce2a804SEric Auger #define PMU_PMCR_DP (1 << 5) 30d81bb7a3SChristopher Covington #define PMU_PMCR_LC (1 << 6) 314244065bSChristopher Covington #define PMU_PMCR_N_SHIFT 11 324244065bSChristopher Covington #define PMU_PMCR_N_MASK 0x1f 334244065bSChristopher Covington #define PMU_PMCR_ID_SHIFT 16 344244065bSChristopher Covington #define PMU_PMCR_ID_MASK 0xff 354244065bSChristopher Covington #define PMU_PMCR_IMP_SHIFT 24 364244065bSChristopher Covington #define PMU_PMCR_IMP_MASK 0xff 374244065bSChristopher Covington 38d81bb7a3SChristopher Covington #define PMU_CYCLE_IDX 31 39d81bb7a3SChristopher Covington 40d81bb7a3SChristopher Covington #define NR_SAMPLES 10 41d81bb7a3SChristopher Covington 424870738cSEric Auger /* Some PMU events */ 434870738cSEric Auger #define SW_INCR 0x0 444870738cSEric Auger #define INST_RETIRED 0x8 454870738cSEric Auger #define CPU_CYCLES 0x11 464ce2a804SEric Auger #define MEM_ACCESS 0x13 474870738cSEric Auger #define INST_PREC 0x1B 484870738cSEric Auger #define STALL_FRONTEND 0x23 494870738cSEric Auger #define STALL_BACKEND 0x24 5066fee034SEric Auger #define CHAIN 0x1E 514870738cSEric Auger 524870738cSEric Auger #define COMMON_EVENTS_LOW 0x0 534870738cSEric Auger #define COMMON_EVENTS_HIGH 0x3F 544870738cSEric Auger #define EXT_COMMON_EVENTS_LOW 0x4000 554870738cSEric Auger #define EXT_COMMON_EVENTS_HIGH 0x403F 564870738cSEric Auger 574ce2a804SEric Auger #define ALL_SET 0xFFFFFFFF 584ce2a804SEric Auger #define ALL_CLEAR 0x0 594ce2a804SEric Auger #define PRE_OVERFLOW 0xFFFFFFF0 60ca42f29aSEric Auger #define PRE_OVERFLOW2 0xFFFFFFDC 614ce2a804SEric Auger 624f5ef94fSEric Auger #define PMU_PPI 23 634f5ef94fSEric Auger 648f747a85SEric Auger struct pmu { 658f747a85SEric Auger unsigned int version; 668f747a85SEric Auger unsigned int nb_implemented_counters; 678f747a85SEric Auger uint32_t pmcr_ro; 688f747a85SEric Auger }; 698f747a85SEric Auger 704f5ef94fSEric Auger struct pmu_stats { 714f5ef94fSEric Auger unsigned long bitmap; 724f5ef94fSEric Auger uint32_t interrupts[32]; 734f5ef94fSEric Auger bool unexpected; 744f5ef94fSEric Auger }; 754f5ef94fSEric Auger 768f747a85SEric Auger static struct pmu pmu; 778f747a85SEric Auger 784244065bSChristopher Covington #if defined(__arm__) 79098add54SAndrew Jones #define ID_DFR0_PERFMON_SHIFT 24 80098add54SAndrew Jones #define ID_DFR0_PERFMON_MASK 0xf 81098add54SAndrew Jones 82784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL 0b0000 83784ee933SEric Auger #define ID_DFR0_PMU_V1 0b0001 84784ee933SEric Auger #define ID_DFR0_PMU_V2 0b0010 85784ee933SEric Auger #define ID_DFR0_PMU_V3 0b0011 86784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1 0b0100 87784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4 0b0101 88784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5 0b0110 89784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF 0b1111 90784ee933SEric Auger 914244065bSChristopher Covington #define PMCR __ACCESS_CP15(c9, 0, c12, 0) 924244065bSChristopher Covington #define ID_DFR0 __ACCESS_CP15(c0, 0, c1, 2) 93d81bb7a3SChristopher Covington #define PMSELR __ACCESS_CP15(c9, 0, c12, 5) 94d81bb7a3SChristopher Covington #define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1) 95d81bb7a3SChristopher Covington #define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1) 96a7326740SRicardo Koller #define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2) 97a7326740SRicardo Koller #define PMOVSR __ACCESS_CP15(c9, 0, c12, 3) 98d81bb7a3SChristopher Covington #define PMCCNTR32 __ACCESS_CP15(c9, 0, c13, 0) 99a7326740SRicardo Koller #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) 100d81bb7a3SChristopher Covington #define PMCCNTR64 __ACCESS_CP15_64(0, c9) 1014244065bSChristopher Covington 1024244065bSChristopher Covington static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); } 1034244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); } 104d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); } 105d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); } 106d81bb7a3SChristopher Covington 107098add54SAndrew Jones static inline uint8_t get_pmu_version(void) 108098add54SAndrew Jones { 109098add54SAndrew Jones return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK; 110098add54SAndrew Jones } 111098add54SAndrew Jones 112d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) 113d81bb7a3SChristopher Covington { 114d81bb7a3SChristopher Covington return read_sysreg(PMCCNTR32); 115d81bb7a3SChristopher Covington } 116d81bb7a3SChristopher Covington 1178f76a347SChristopher Covington static inline void set_pmccntr(uint64_t value) 1188f76a347SChristopher Covington { 1198f76a347SChristopher Covington write_sysreg(value & 0xffffffff, PMCCNTR32); 1208f76a347SChristopher Covington } 1218f76a347SChristopher Covington 122d81bb7a3SChristopher Covington /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */ 123d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t value) 124d81bb7a3SChristopher Covington { 125d81bb7a3SChristopher Covington write_sysreg(PMU_CYCLE_IDX, PMSELR); 126d81bb7a3SChristopher Covington write_sysreg(value, PMXEVTYPER); 127d81bb7a3SChristopher Covington isb(); 128d81bb7a3SChristopher Covington } 1298f76a347SChristopher Covington 1308f76a347SChristopher Covington /* 1318f76a347SChristopher Covington * Extra instructions inserted by the compiler would be difficult to compensate 1328f76a347SChristopher Covington * for, so hand assemble everything between, and including, the PMCR accesses 1338f76a347SChristopher Covington * to start and stop counting. isb instructions were inserted to make sure 1348f76a347SChristopher Covington * pmccntr read after this function returns the exact instructions executed in 1358f76a347SChristopher Covington * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop. 1368f76a347SChristopher Covington */ 1378f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr) 1388f76a347SChristopher Covington { 1398f76a347SChristopher Covington asm volatile( 1408f76a347SChristopher Covington " mcr p15, 0, %[pmcr], c9, c12, 0\n" 1418f76a347SChristopher Covington " isb\n" 1428f76a347SChristopher Covington "1: subs %[loop], %[loop], #1\n" 1438f76a347SChristopher Covington " bgt 1b\n" 1448f76a347SChristopher Covington " mcr p15, 0, %[z], c9, c12, 0\n" 1458f76a347SChristopher Covington " isb\n" 1468f76a347SChristopher Covington : [loop] "+r" (loop) 1478f76a347SChristopher Covington : [pmcr] "r" (pmcr), [z] "r" (0) 1488f76a347SChristopher Covington : "cc"); 1498f76a347SChristopher Covington } 1504870738cSEric Auger 151a7326740SRicardo Koller static void pmu_reset(void) 152a7326740SRicardo Koller { 153a7326740SRicardo Koller /* reset all counters, counting disabled at PMCR level*/ 154a7326740SRicardo Koller set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P); 155a7326740SRicardo Koller /* Disable all counters */ 156a7326740SRicardo Koller write_sysreg(ALL_SET, PMCNTENCLR); 157a7326740SRicardo Koller /* clear overflow reg */ 158a7326740SRicardo Koller write_sysreg(ALL_SET, PMOVSR); 159a7326740SRicardo Koller /* disable overflow interrupts on all counters */ 160a7326740SRicardo Koller write_sysreg(ALL_SET, PMINTENCLR); 161a7326740SRicardo Koller isb(); 162a7326740SRicardo Koller } 163a7326740SRicardo Koller 1644870738cSEric Auger /* event counter tests only implemented for aarch64 */ 1654870738cSEric Auger static void test_event_introspection(void) {} 1664ce2a804SEric Auger static void test_event_counter_config(void) {} 1674ce2a804SEric Auger static void test_basic_event_count(void) {} 1684ce2a804SEric Auger static void test_mem_access(void) {} 169bb9a5adcSEric Auger static void test_sw_incr(void) {} 17066fee034SEric Auger static void test_chained_counters(void) {} 17166fee034SEric Auger static void test_chained_sw_incr(void) {} 172ca42f29aSEric Auger static void test_chain_promotion(void) {} 1734f5ef94fSEric Auger static void test_overflow_interrupt(void) {} 1744870738cSEric Auger 1754244065bSChristopher Covington #elif defined(__aarch64__) 176098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_SHIFT 8 177098add54SAndrew Jones #define ID_AA64DFR0_PERFMON_MASK 0xf 178098add54SAndrew Jones 179784ee933SEric Auger #define ID_DFR0_PMU_NOTIMPL 0b0000 180784ee933SEric Auger #define ID_DFR0_PMU_V3 0b0001 181784ee933SEric Auger #define ID_DFR0_PMU_V3_8_1 0b0100 182784ee933SEric Auger #define ID_DFR0_PMU_V3_8_4 0b0101 183784ee933SEric Auger #define ID_DFR0_PMU_V3_8_5 0b0110 184784ee933SEric Auger #define ID_DFR0_PMU_IMPDEF 0b1111 185784ee933SEric Auger 186098add54SAndrew Jones static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); } 1874244065bSChristopher Covington static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); } 188d81bb7a3SChristopher Covington static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); } 189d81bb7a3SChristopher Covington static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); } 1908f76a347SChristopher Covington static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); } 191d81bb7a3SChristopher Covington static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); } 192d81bb7a3SChristopher Covington static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); } 1938f76a347SChristopher Covington 194098add54SAndrew Jones static inline uint8_t get_pmu_version(void) 195098add54SAndrew Jones { 196098add54SAndrew Jones uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK; 197784ee933SEric Auger return ver; 198098add54SAndrew Jones } 199098add54SAndrew Jones 2008f76a347SChristopher Covington /* 2018f76a347SChristopher Covington * Extra instructions inserted by the compiler would be difficult to compensate 2028f76a347SChristopher Covington * for, so hand assemble everything between, and including, the PMCR accesses 2038f76a347SChristopher Covington * to start and stop counting. isb instructions are inserted to make sure 2048f76a347SChristopher Covington * pmccntr read after this function returns the exact instructions executed 2058f76a347SChristopher Covington * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop. 2068f76a347SChristopher Covington */ 2078f76a347SChristopher Covington static inline void precise_instrs_loop(int loop, uint32_t pmcr) 2088f76a347SChristopher Covington { 2099e186511SThomas Huth uint64_t pmcr64 = pmcr; 2108f76a347SChristopher Covington asm volatile( 2118f76a347SChristopher Covington " msr pmcr_el0, %[pmcr]\n" 2128f76a347SChristopher Covington " isb\n" 2139e186511SThomas Huth "1: subs %w[loop], %w[loop], #1\n" 2148f76a347SChristopher Covington " b.gt 1b\n" 2158f76a347SChristopher Covington " msr pmcr_el0, xzr\n" 2168f76a347SChristopher Covington " isb\n" 2178f76a347SChristopher Covington : [loop] "+r" (loop) 2189e186511SThomas Huth : [pmcr] "r" (pmcr64) 2198f76a347SChristopher Covington : "cc"); 2208f76a347SChristopher Covington } 2214870738cSEric Auger 2224870738cSEric Auger #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7) 2234ce2a804SEric Auger #define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1) 2244ce2a804SEric Auger #define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2) 2254ce2a804SEric Auger 2264ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL1 BIT(31) 2274ce2a804SEric Auger #define PMEVTYPER_EXCLUDE_EL0 BIT(30) 2284870738cSEric Auger 2294870738cSEric Auger static bool is_event_supported(uint32_t n, bool warn) 2304870738cSEric Auger { 2314870738cSEric Auger uint64_t pmceid0 = read_sysreg(pmceid0_el0); 2324870738cSEric Auger uint64_t pmceid1 = read_sysreg_s(PMCEID1_EL0); 2334870738cSEric Auger bool supported; 2344870738cSEric Auger uint64_t reg; 2354870738cSEric Auger 2364870738cSEric Auger /* 2374870738cSEric Auger * The low 32-bits of PMCEID0/1 respectively describe 2384870738cSEric Auger * event support for events 0-31/32-63. Their High 2394870738cSEric Auger * 32-bits describe support for extended events 2404870738cSEric Auger * starting at 0x4000, using the same split. 2414870738cSEric Auger */ 2424870738cSEric Auger assert((n >= COMMON_EVENTS_LOW && n <= COMMON_EVENTS_HIGH) || 2434870738cSEric Auger (n >= EXT_COMMON_EVENTS_LOW && n <= EXT_COMMON_EVENTS_HIGH)); 2444870738cSEric Auger 2454870738cSEric Auger if (n <= COMMON_EVENTS_HIGH) 2464870738cSEric Auger reg = lower_32_bits(pmceid0) | ((u64)lower_32_bits(pmceid1) << 32); 2474870738cSEric Auger else 2484870738cSEric Auger reg = upper_32_bits(pmceid0) | ((u64)upper_32_bits(pmceid1) << 32); 2494870738cSEric Auger 2504870738cSEric Auger supported = reg & (1UL << (n & 0x3F)); 2514870738cSEric Auger 2524870738cSEric Auger if (!supported && warn) 2534870738cSEric Auger report_info("event 0x%x is not supported", n); 2544870738cSEric Auger return supported; 2554870738cSEric Auger } 2564870738cSEric Auger 2574870738cSEric Auger static void test_event_introspection(void) 2584870738cSEric Auger { 2594870738cSEric Auger bool required_events; 2604870738cSEric Auger 2614870738cSEric Auger if (!pmu.nb_implemented_counters) { 2624870738cSEric Auger report_skip("No event counter, skip ..."); 2634870738cSEric Auger return; 2644870738cSEric Auger } 2654870738cSEric Auger 2664870738cSEric Auger /* PMUv3 requires an implementation includes some common events */ 2674870738cSEric Auger required_events = is_event_supported(SW_INCR, true) && 2684870738cSEric Auger is_event_supported(CPU_CYCLES, true) && 2694870738cSEric Auger (is_event_supported(INST_RETIRED, true) || 2704870738cSEric Auger is_event_supported(INST_PREC, true)); 2714870738cSEric Auger 2724870738cSEric Auger if (pmu.version >= ID_DFR0_PMU_V3_8_1) { 2734870738cSEric Auger required_events = required_events && 2744870738cSEric Auger is_event_supported(STALL_FRONTEND, true) && 2754870738cSEric Auger is_event_supported(STALL_BACKEND, true); 2764870738cSEric Auger } 2774870738cSEric Auger 2784870738cSEric Auger report(required_events, "Check required events are implemented"); 2794870738cSEric Auger } 2804870738cSEric Auger 2814ce2a804SEric Auger /* 2824ce2a804SEric Auger * Extra instructions inserted by the compiler would be difficult to compensate 2834ce2a804SEric Auger * for, so hand assemble everything between, and including, the PMCR accesses 2844ce2a804SEric Auger * to start and stop counting. isb instructions are inserted to make sure 2854ce2a804SEric Auger * pmccntr read after this function returns the exact instructions executed 2864ce2a804SEric Auger * in the controlled block. Loads @loop times the data at @address into x9. 2874ce2a804SEric Auger */ 2889e186511SThomas Huth static void mem_access_loop(void *addr, long loop, uint32_t pmcr) 2894ce2a804SEric Auger { 2909e186511SThomas Huth uint64_t pmcr64 = pmcr; 2914ce2a804SEric Auger asm volatile( 2924ce2a804SEric Auger " msr pmcr_el0, %[pmcr]\n" 2934ce2a804SEric Auger " isb\n" 2944ce2a804SEric Auger " mov x10, %[loop]\n" 2954ce2a804SEric Auger "1: sub x10, x10, #1\n" 2964ce2a804SEric Auger " ldr x9, [%[addr]]\n" 2974ce2a804SEric Auger " cmp x10, #0x0\n" 2984ce2a804SEric Auger " b.gt 1b\n" 2994ce2a804SEric Auger " msr pmcr_el0, xzr\n" 3004ce2a804SEric Auger " isb\n" 3014ce2a804SEric Auger : 3029e186511SThomas Huth : [addr] "r" (addr), [pmcr] "r" (pmcr64), [loop] "r" (loop) 3034ce2a804SEric Auger : "x9", "x10", "cc"); 3044ce2a804SEric Auger } 3054ce2a804SEric Auger 3064f5ef94fSEric Auger static struct pmu_stats pmu_stats; 3074f5ef94fSEric Auger 3084f5ef94fSEric Auger static void irq_handler(struct pt_regs *regs) 3094f5ef94fSEric Auger { 3104f5ef94fSEric Auger uint32_t irqstat, irqnr; 3114f5ef94fSEric Auger 3124f5ef94fSEric Auger irqstat = gic_read_iar(); 3134f5ef94fSEric Auger irqnr = gic_iar_irqnr(irqstat); 3144f5ef94fSEric Auger 3154f5ef94fSEric Auger if (irqnr == PMU_PPI) { 3164f5ef94fSEric Auger unsigned long overflows = read_sysreg(pmovsclr_el0); 3174f5ef94fSEric Auger int i; 3184f5ef94fSEric Auger 3194f5ef94fSEric Auger for (i = 0; i < 32; i++) { 3204f5ef94fSEric Auger if (test_and_clear_bit(i, &overflows)) { 3214f5ef94fSEric Auger pmu_stats.interrupts[i]++; 3224f5ef94fSEric Auger pmu_stats.bitmap |= 1 << i; 3234f5ef94fSEric Auger } 3244f5ef94fSEric Auger } 3254f5ef94fSEric Auger write_sysreg(ALL_SET, pmovsclr_el0); 326e0a6e56bSRicardo Koller isb(); 3274f5ef94fSEric Auger } else { 3284f5ef94fSEric Auger pmu_stats.unexpected = true; 3294f5ef94fSEric Auger } 3304f5ef94fSEric Auger gic_write_eoir(irqstat); 3314f5ef94fSEric Auger } 3324f5ef94fSEric Auger 3334f5ef94fSEric Auger static void pmu_reset_stats(void) 3344f5ef94fSEric Auger { 3354f5ef94fSEric Auger int i; 3364f5ef94fSEric Auger 3374f5ef94fSEric Auger for (i = 0; i < 32; i++) 3384f5ef94fSEric Auger pmu_stats.interrupts[i] = 0; 3394f5ef94fSEric Auger 3404f5ef94fSEric Auger pmu_stats.bitmap = 0; 3414f5ef94fSEric Auger pmu_stats.unexpected = false; 3424f5ef94fSEric Auger } 3434f5ef94fSEric Auger 3444ce2a804SEric Auger static void pmu_reset(void) 3454ce2a804SEric Auger { 3464ce2a804SEric Auger /* reset all counters, counting disabled at PMCR level*/ 3474ce2a804SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P); 3484ce2a804SEric Auger /* Disable all counters */ 3494ce2a804SEric Auger write_sysreg_s(ALL_SET, PMCNTENCLR_EL0); 3504ce2a804SEric Auger /* clear overflow reg */ 3514ce2a804SEric Auger write_sysreg(ALL_SET, pmovsclr_el0); 3524ce2a804SEric Auger /* disable overflow interrupts on all counters */ 3534ce2a804SEric Auger write_sysreg(ALL_SET, pmintenclr_el1); 3544f5ef94fSEric Auger pmu_reset_stats(); 3554ce2a804SEric Auger isb(); 3564ce2a804SEric Auger } 3574ce2a804SEric Auger 3584ce2a804SEric Auger static void test_event_counter_config(void) 3594ce2a804SEric Auger { 3604ce2a804SEric Auger int i; 3614ce2a804SEric Auger 3624ce2a804SEric Auger if (!pmu.nb_implemented_counters) { 3634ce2a804SEric Auger report_skip("No event counter, skip ..."); 3644ce2a804SEric Auger return; 3654ce2a804SEric Auger } 3664ce2a804SEric Auger 3674ce2a804SEric Auger pmu_reset(); 3684ce2a804SEric Auger 3694ce2a804SEric Auger /* 3704ce2a804SEric Auger * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read, 3714ce2a804SEric Auger * select counter 0 3724ce2a804SEric Auger */ 3734ce2a804SEric Auger write_sysreg(1, PMSELR_EL0); 3744ce2a804SEric Auger /* program this counter to count unsupported event */ 3754ce2a804SEric Auger write_sysreg(0xEA, PMXEVTYPER_EL0); 3764ce2a804SEric Auger write_sysreg(0xdeadbeef, PMXEVCNTR_EL0); 3774ce2a804SEric Auger report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA, 3784ce2a804SEric Auger "PMESELR/PMXEVTYPER/PMEVTYPERn"); 3794ce2a804SEric Auger report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef), 3804ce2a804SEric Auger "PMESELR/PMXEVCNTR/PMEVCNTRn"); 3814ce2a804SEric Auger 3824ce2a804SEric Auger /* try to configure an unsupported event within the range [0x0, 0x3F] */ 3834ce2a804SEric Auger for (i = 0; i <= 0x3F; i++) { 3844ce2a804SEric Auger if (!is_event_supported(i, false)) 3854ce2a804SEric Auger break; 3864ce2a804SEric Auger } 3874ce2a804SEric Auger if (i > 0x3F) { 3884ce2a804SEric Auger report_skip("pmevtyper: all events within [0x0, 0x3F] are supported"); 3894ce2a804SEric Auger return; 3904ce2a804SEric Auger } 3914ce2a804SEric Auger 3924ce2a804SEric Auger /* select counter 0 */ 3934ce2a804SEric Auger write_sysreg(0, PMSELR_EL0); 3944ce2a804SEric Auger /* program this counter to count unsupported event */ 3954ce2a804SEric Auger write_sysreg(i, PMXEVCNTR_EL0); 3964ce2a804SEric Auger /* read the counter value */ 3974ce2a804SEric Auger read_sysreg(PMXEVCNTR_EL0); 3984ce2a804SEric Auger report(read_sysreg(PMXEVCNTR_EL0) == i, 3994ce2a804SEric Auger "read of a counter programmed with unsupported event"); 4004ce2a804SEric Auger } 4014ce2a804SEric Auger 4024ce2a804SEric Auger static bool satisfy_prerequisites(uint32_t *events, unsigned int nb_events) 4034ce2a804SEric Auger { 4044ce2a804SEric Auger int i; 4054ce2a804SEric Auger 4064ce2a804SEric Auger if (pmu.nb_implemented_counters < nb_events) { 4074ce2a804SEric Auger report_skip("Skip test as number of counters is too small (%d)", 4084ce2a804SEric Auger pmu.nb_implemented_counters); 4094ce2a804SEric Auger return false; 4104ce2a804SEric Auger } 4114ce2a804SEric Auger 4124ce2a804SEric Auger for (i = 0; i < nb_events; i++) { 4134ce2a804SEric Auger if (!is_event_supported(events[i], false)) { 4144ce2a804SEric Auger report_skip("Skip test as event 0x%x is not supported", 4154ce2a804SEric Auger events[i]); 4164ce2a804SEric Auger return false; 4174ce2a804SEric Auger } 4184ce2a804SEric Auger } 4194ce2a804SEric Auger return true; 4204ce2a804SEric Auger } 4214ce2a804SEric Auger 4224ce2a804SEric Auger static void test_basic_event_count(void) 4234ce2a804SEric Auger { 4244ce2a804SEric Auger uint32_t implemented_counter_mask, non_implemented_counter_mask; 4254ce2a804SEric Auger uint32_t counter_mask; 4264ce2a804SEric Auger uint32_t events[] = {CPU_CYCLES, INST_RETIRED}; 4274ce2a804SEric Auger 4284ce2a804SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 4294ce2a804SEric Auger return; 4304ce2a804SEric Auger 4314ce2a804SEric Auger implemented_counter_mask = BIT(pmu.nb_implemented_counters) - 1; 4324ce2a804SEric Auger non_implemented_counter_mask = ~(BIT(31) | implemented_counter_mask); 4334ce2a804SEric Auger counter_mask = implemented_counter_mask | non_implemented_counter_mask; 4344ce2a804SEric Auger 4354ce2a804SEric Auger write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 4364ce2a804SEric Auger write_regn_el0(pmevtyper, 1, INST_RETIRED | PMEVTYPER_EXCLUDE_EL0); 4374ce2a804SEric Auger 4384ce2a804SEric Auger /* disable all counters */ 4394ce2a804SEric Auger write_sysreg_s(ALL_SET, PMCNTENCLR_EL0); 4404ce2a804SEric Auger report(!read_sysreg_s(PMCNTENCLR_EL0) && !read_sysreg_s(PMCNTENSET_EL0), 4414ce2a804SEric Auger "pmcntenclr: disable all counters"); 4424ce2a804SEric Auger 4434ce2a804SEric Auger /* 4444ce2a804SEric Auger * clear cycle and all event counters and allow counter enablement 4454ce2a804SEric Auger * through PMCNTENSET. LC is RES1. 4464ce2a804SEric Auger */ 4474ce2a804SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P); 4484ce2a804SEric Auger isb(); 4494ce2a804SEric Auger report(get_pmcr() == (pmu.pmcr_ro | PMU_PMCR_LC), "pmcr: reset counters"); 4504ce2a804SEric Auger 4514ce2a804SEric Auger /* Preset counter #0 to pre overflow value to trigger an overflow */ 4524ce2a804SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 4534ce2a804SEric Auger report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW, 4544ce2a804SEric Auger "counter #0 preset to pre-overflow value"); 4554ce2a804SEric Auger report(!read_regn_el0(pmevcntr, 1), "counter #1 is 0"); 4564ce2a804SEric Auger 4574ce2a804SEric Auger /* 4584ce2a804SEric Auger * Enable all implemented counters and also attempt to enable 4594ce2a804SEric Auger * not supported counters. Counting still is disabled by !PMCR.E 4604ce2a804SEric Auger */ 4614ce2a804SEric Auger write_sysreg_s(counter_mask, PMCNTENSET_EL0); 4624ce2a804SEric Auger 4634ce2a804SEric Auger /* check only those implemented are enabled */ 4644ce2a804SEric Auger report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) && 4654ce2a804SEric Auger (read_sysreg_s(PMCNTENSET_EL0) == implemented_counter_mask), 4664ce2a804SEric Auger "pmcntenset: enabled implemented_counters"); 4674ce2a804SEric Auger 4684ce2a804SEric Auger /* Disable all counters but counters #0 and #1 */ 4694ce2a804SEric Auger write_sysreg_s(~0x3, PMCNTENCLR_EL0); 4704ce2a804SEric Auger report((read_sysreg_s(PMCNTENSET_EL0) == read_sysreg_s(PMCNTENCLR_EL0)) && 4714ce2a804SEric Auger (read_sysreg_s(PMCNTENSET_EL0) == 0x3), 4724ce2a804SEric Auger "pmcntenset: just enabled #0 and #1"); 4734ce2a804SEric Auger 4744ce2a804SEric Auger /* clear overflow register */ 4754ce2a804SEric Auger write_sysreg(ALL_SET, pmovsclr_el0); 4764ce2a804SEric Auger report(!read_sysreg(pmovsclr_el0), "check overflow reg is 0"); 4774ce2a804SEric Auger 4784ce2a804SEric Auger /* disable overflow interrupts on all counters*/ 4794ce2a804SEric Auger write_sysreg(ALL_SET, pmintenclr_el1); 4804ce2a804SEric Auger report(!read_sysreg(pmintenclr_el1), 4814ce2a804SEric Auger "pmintenclr_el1=0, all interrupts disabled"); 4824ce2a804SEric Auger 4834ce2a804SEric Auger /* enable overflow interrupts on all event counters */ 4844ce2a804SEric Auger write_sysreg(implemented_counter_mask | non_implemented_counter_mask, 4854ce2a804SEric Auger pmintenset_el1); 4864ce2a804SEric Auger report(read_sysreg(pmintenset_el1) == implemented_counter_mask, 4874ce2a804SEric Auger "overflow interrupts enabled on all implemented counters"); 4884ce2a804SEric Auger 4894ce2a804SEric Auger /* Set PMCR.E, execute asm code and unset PMCR.E */ 4904ce2a804SEric Auger precise_instrs_loop(20, pmu.pmcr_ro | PMU_PMCR_E); 4914ce2a804SEric Auger 4924ce2a804SEric Auger report_info("counter #0 is 0x%lx (CPU_CYCLES)", 4934ce2a804SEric Auger read_regn_el0(pmevcntr, 0)); 4944ce2a804SEric Auger report_info("counter #1 is 0x%lx (INST_RETIRED)", 4954ce2a804SEric Auger read_regn_el0(pmevcntr, 1)); 4964ce2a804SEric Auger 4974ce2a804SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 4984ce2a804SEric Auger report(read_sysreg(pmovsclr_el0) & 0x1, 4994ce2a804SEric Auger "check overflow happened on #0 only"); 5004ce2a804SEric Auger } 5014ce2a804SEric Auger 5024ce2a804SEric Auger static void test_mem_access(void) 5034ce2a804SEric Auger { 5044ce2a804SEric Auger void *addr = malloc(PAGE_SIZE); 5054ce2a804SEric Auger uint32_t events[] = {MEM_ACCESS, MEM_ACCESS}; 5064ce2a804SEric Auger 5074ce2a804SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 5084ce2a804SEric Auger return; 5094ce2a804SEric Auger 5104ce2a804SEric Auger pmu_reset(); 5114ce2a804SEric Auger 5124ce2a804SEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 5134ce2a804SEric Auger write_regn_el0(pmevtyper, 1, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 5144ce2a804SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 5154ce2a804SEric Auger isb(); 5164ce2a804SEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 5174ce2a804SEric Auger report_info("counter #0 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 0)); 5184ce2a804SEric Auger report_info("counter #1 is %ld (MEM_ACCESS)", read_regn_el0(pmevcntr, 1)); 5194ce2a804SEric Auger /* We may measure more than 20 mem access depending on the core */ 5204ce2a804SEric Auger report((read_regn_el0(pmevcntr, 0) == read_regn_el0(pmevcntr, 1)) && 5214ce2a804SEric Auger (read_regn_el0(pmevcntr, 0) >= 20) && !read_sysreg(pmovsclr_el0), 5224ce2a804SEric Auger "Ran 20 mem accesses"); 5234ce2a804SEric Auger 5244ce2a804SEric Auger pmu_reset(); 5254ce2a804SEric Auger 5264ce2a804SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 5274ce2a804SEric Auger write_regn_el0(pmevcntr, 1, PRE_OVERFLOW); 5284ce2a804SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 5294ce2a804SEric Auger isb(); 5304ce2a804SEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 5314ce2a804SEric Auger report(read_sysreg(pmovsclr_el0) == 0x3, 5324ce2a804SEric Auger "Ran 20 mem accesses with expected overflows on both counters"); 5334ce2a804SEric Auger report_info("cnt#0 = %ld cnt#1=%ld overflow=0x%lx", 5344ce2a804SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1), 5354ce2a804SEric Auger read_sysreg(pmovsclr_el0)); 5364ce2a804SEric Auger } 5374ce2a804SEric Auger 538bb9a5adcSEric Auger static void test_sw_incr(void) 539bb9a5adcSEric Auger { 540bb9a5adcSEric Auger uint32_t events[] = {SW_INCR, SW_INCR}; 541bb9a5adcSEric Auger int i; 542bb9a5adcSEric Auger 543bb9a5adcSEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 544bb9a5adcSEric Auger return; 545bb9a5adcSEric Auger 546bb9a5adcSEric Auger pmu_reset(); 547bb9a5adcSEric Auger 548bb9a5adcSEric Auger write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 549bb9a5adcSEric Auger write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 550bb9a5adcSEric Auger /* enable counters #0 and #1 */ 551bb9a5adcSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 552bb9a5adcSEric Auger 553bb9a5adcSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 554e0a6e56bSRicardo Koller isb(); 555bb9a5adcSEric Auger 556bb9a5adcSEric Auger for (i = 0; i < 100; i++) 557bb9a5adcSEric Auger write_sysreg(0x1, pmswinc_el0); 558bb9a5adcSEric Auger 559e0a6e56bSRicardo Koller isb(); 560bb9a5adcSEric Auger report_info("SW_INCR counter #0 has value %ld", read_regn_el0(pmevcntr, 0)); 561bb9a5adcSEric Auger report(read_regn_el0(pmevcntr, 0) == PRE_OVERFLOW, 562bb9a5adcSEric Auger "PWSYNC does not increment if PMCR.E is unset"); 563bb9a5adcSEric Auger 564bb9a5adcSEric Auger pmu_reset(); 565bb9a5adcSEric Auger 566bb9a5adcSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 567bb9a5adcSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 568bb9a5adcSEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 569e0a6e56bSRicardo Koller isb(); 570bb9a5adcSEric Auger 571bb9a5adcSEric Auger for (i = 0; i < 100; i++) 572bb9a5adcSEric Auger write_sysreg(0x3, pmswinc_el0); 573bb9a5adcSEric Auger 574e0a6e56bSRicardo Koller isb(); 575bb9a5adcSEric Auger report(read_regn_el0(pmevcntr, 0) == 84, "counter #1 after + 100 SW_INCR"); 576bb9a5adcSEric Auger report(read_regn_el0(pmevcntr, 1) == 100, 577bb9a5adcSEric Auger "counter #0 after + 100 SW_INCR"); 578bb9a5adcSEric Auger report_info("counter values after 100 SW_INCR #0=%ld #1=%ld", 579bb9a5adcSEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 580bb9a5adcSEric Auger report(read_sysreg(pmovsclr_el0) == 0x1, 58166fee034SEric Auger "overflow on counter #0 after 100 SW_INCR"); 58266fee034SEric Auger } 58366fee034SEric Auger 58466fee034SEric Auger static void test_chained_counters(void) 58566fee034SEric Auger { 58666fee034SEric Auger uint32_t events[] = {CPU_CYCLES, CHAIN}; 58766fee034SEric Auger 58866fee034SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 58966fee034SEric Auger return; 59066fee034SEric Auger 59166fee034SEric Auger pmu_reset(); 59266fee034SEric Auger 59366fee034SEric Auger write_regn_el0(pmevtyper, 0, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 59466fee034SEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 59566fee034SEric Auger /* enable counters #0 and #1 */ 59666fee034SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 59766fee034SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 59866fee034SEric Auger 59966fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 60066fee034SEric Auger 60166fee034SEric Auger report(read_regn_el0(pmevcntr, 1) == 1, "CHAIN counter #1 incremented"); 602*b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #1"); 60366fee034SEric Auger 60466fee034SEric Auger /* test 64b overflow */ 60566fee034SEric Auger 60666fee034SEric Auger pmu_reset(); 60766fee034SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 60866fee034SEric Auger 60966fee034SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 61066fee034SEric Auger write_regn_el0(pmevcntr, 1, 0x1); 61166fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 61266fee034SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 61366fee034SEric Auger report(read_regn_el0(pmevcntr, 1) == 2, "CHAIN counter #1 set to 2"); 614*b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x1, "overflow recorded for chained incr #2"); 61566fee034SEric Auger 61666fee034SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 61766fee034SEric Auger write_regn_el0(pmevcntr, 1, ALL_SET); 61866fee034SEric Auger 61966fee034SEric Auger precise_instrs_loop(22, pmu.pmcr_ro | PMU_PMCR_E); 62066fee034SEric Auger report_info("overflow reg = 0x%lx", read_sysreg(pmovsclr_el0)); 62166fee034SEric Auger report(!read_regn_el0(pmevcntr, 1), "CHAIN counter #1 wrapped"); 622*b5489580SRicardo Koller report(read_sysreg(pmovsclr_el0) == 0x3, "overflow on even and odd counters"); 62366fee034SEric Auger } 62466fee034SEric Auger 62566fee034SEric Auger static void test_chained_sw_incr(void) 62666fee034SEric Auger { 62766fee034SEric Auger uint32_t events[] = {SW_INCR, CHAIN}; 62866fee034SEric Auger int i; 62966fee034SEric Auger 63066fee034SEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 63166fee034SEric Auger return; 63266fee034SEric Auger 63366fee034SEric Auger pmu_reset(); 63466fee034SEric Auger 63566fee034SEric Auger write_regn_el0(pmevtyper, 0, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 63666fee034SEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 63766fee034SEric Auger /* enable counters #0 and #1 */ 63866fee034SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 63966fee034SEric Auger 64066fee034SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 64166fee034SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 642e0a6e56bSRicardo Koller isb(); 643e0a6e56bSRicardo Koller 64466fee034SEric Auger for (i = 0; i < 100; i++) 64566fee034SEric Auger write_sysreg(0x1, pmswinc_el0); 64666fee034SEric Auger 647e0a6e56bSRicardo Koller isb(); 648*b5489580SRicardo Koller report((read_sysreg(pmovsclr_el0) == 0x1) && 649*b5489580SRicardo Koller (read_regn_el0(pmevcntr, 1) == 1), 650*b5489580SRicardo Koller "overflow and chain counter incremented after 100 SW_INCR/CHAIN"); 65166fee034SEric Auger report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0), 65266fee034SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 65366fee034SEric Auger 65466fee034SEric Auger /* 64b SW_INCR and overflow on CHAIN counter*/ 65566fee034SEric Auger pmu_reset(); 65666fee034SEric Auger 65766fee034SEric Auger write_regn_el0(pmevtyper, 1, events[1] | PMEVTYPER_EXCLUDE_EL0); 65866fee034SEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 65966fee034SEric Auger write_regn_el0(pmevcntr, 1, ALL_SET); 66066fee034SEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 66166fee034SEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 662e0a6e56bSRicardo Koller isb(); 663e0a6e56bSRicardo Koller 66466fee034SEric Auger for (i = 0; i < 100; i++) 66566fee034SEric Auger write_sysreg(0x1, pmswinc_el0); 66666fee034SEric Auger 667e0a6e56bSRicardo Koller isb(); 668*b5489580SRicardo Koller report((read_sysreg(pmovsclr_el0) == 0x3) && 66966fee034SEric Auger (read_regn_el0(pmevcntr, 1) == 0) && 67066fee034SEric Auger (read_regn_el0(pmevcntr, 0) == 84), 671*b5489580SRicardo Koller "expected overflows and values after 100 SW_INCR/CHAIN"); 67266fee034SEric Auger report_info("overflow=0x%lx, #0=%ld #1=%ld", read_sysreg(pmovsclr_el0), 67366fee034SEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 674bb9a5adcSEric Auger } 675bb9a5adcSEric Auger 676ca42f29aSEric Auger static void test_chain_promotion(void) 677ca42f29aSEric Auger { 678ca42f29aSEric Auger uint32_t events[] = {MEM_ACCESS, CHAIN}; 679ca42f29aSEric Auger void *addr = malloc(PAGE_SIZE); 680ca42f29aSEric Auger 681ca42f29aSEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 682ca42f29aSEric Auger return; 683ca42f29aSEric Auger 684ca42f29aSEric Auger /* Only enable CHAIN counter */ 685ca42f29aSEric Auger pmu_reset(); 686ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 687ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 688ca42f29aSEric Auger write_sysreg_s(0x2, PMCNTENSET_EL0); 689ca42f29aSEric Auger isb(); 690ca42f29aSEric Auger 691ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 692ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 0), 693ca42f29aSEric Auger "chain counter not counting if even counter is disabled"); 694ca42f29aSEric Auger 695ca42f29aSEric Auger /* Only enable even counter */ 696ca42f29aSEric Auger pmu_reset(); 697ca42f29aSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 698ca42f29aSEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); 699ca42f29aSEric Auger isb(); 700ca42f29aSEric Auger 701ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 702ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 1) && (read_sysreg(pmovsclr_el0) == 0x1), 703ca42f29aSEric Auger "odd counter did not increment on overflow if disabled"); 704ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value %ld", 705ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 706ca42f29aSEric Auger report_info("CHAIN counter #1 has value %ld", 707ca42f29aSEric Auger read_regn_el0(pmevcntr, 1)); 708ca42f29aSEric Auger report_info("overflow counter %ld", read_sysreg(pmovsclr_el0)); 709ca42f29aSEric Auger 710ca42f29aSEric Auger /* start at 0xFFFFFFDC, +20 with CHAIN enabled, +20 with CHAIN disabled */ 711ca42f29aSEric Auger pmu_reset(); 712ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 713ca42f29aSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2); 714ca42f29aSEric Auger isb(); 715ca42f29aSEric Auger 716ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 717ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 718ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 719ca42f29aSEric Auger 720ca42f29aSEric Auger /* disable the CHAIN event */ 721ca42f29aSEric Auger write_sysreg_s(0x2, PMCNTENCLR_EL0); 722ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 723ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 724ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 725ca42f29aSEric Auger report(read_sysreg(pmovsclr_el0) == 0x1, 726ca42f29aSEric Auger "should have triggered an overflow on #0"); 727ca42f29aSEric Auger report(!read_regn_el0(pmevcntr, 1), 728ca42f29aSEric Auger "CHAIN counter #1 shouldn't have incremented"); 729ca42f29aSEric Auger 730ca42f29aSEric Auger /* start at 0xFFFFFFDC, +20 with CHAIN disabled, +20 with CHAIN enabled */ 731ca42f29aSEric Auger 732ca42f29aSEric Auger pmu_reset(); 733ca42f29aSEric Auger write_sysreg_s(0x1, PMCNTENSET_EL0); 734ca42f29aSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2); 735ca42f29aSEric Auger isb(); 736ca42f29aSEric Auger report_info("counter #0 = 0x%lx, counter #1 = 0x%lx overflow=0x%lx", 737ca42f29aSEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1), 738ca42f29aSEric Auger read_sysreg(pmovsclr_el0)); 739ca42f29aSEric Auger 740ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 741ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 742ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 743ca42f29aSEric Auger 744ca42f29aSEric Auger /* enable the CHAIN event */ 745ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 746ca42f29aSEric Auger isb(); 747ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 748ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 749ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 750ca42f29aSEric Auger 751*b5489580SRicardo Koller report((read_regn_el0(pmevcntr, 1) == 1) && 752*b5489580SRicardo Koller (read_sysreg(pmovsclr_el0) == 0x1), 753*b5489580SRicardo Koller "CHAIN counter enabled: CHAIN counter was incremented and overflow"); 754ca42f29aSEric Auger 755ca42f29aSEric Auger report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx", 756ca42f29aSEric Auger read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0)); 757ca42f29aSEric Auger 758ca42f29aSEric Auger /* start as MEM_ACCESS/CPU_CYCLES and move to CHAIN/MEM_ACCESS */ 759ca42f29aSEric Auger pmu_reset(); 760ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 761ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 762ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 763ca42f29aSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2); 764ca42f29aSEric Auger isb(); 765ca42f29aSEric Auger 766ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 767ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 768ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 769ca42f29aSEric Auger 770ca42f29aSEric Auger /* 0 becomes CHAINED */ 771ca42f29aSEric Auger write_sysreg_s(0x0, PMCNTENSET_EL0); 772ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 773ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 774ca42f29aSEric Auger write_regn_el0(pmevcntr, 1, 0x0); 775ca42f29aSEric Auger 776ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 777ca42f29aSEric Auger report_info("MEM_ACCESS counter #0 has value 0x%lx", 778ca42f29aSEric Auger read_regn_el0(pmevcntr, 0)); 779ca42f29aSEric Auger 780*b5489580SRicardo Koller report((read_regn_el0(pmevcntr, 1) == 1) && 781*b5489580SRicardo Koller (read_sysreg(pmovsclr_el0) == 0x1), 782*b5489580SRicardo Koller "32b->64b: CHAIN counter incremented and overflow"); 783ca42f29aSEric Auger 784ca42f29aSEric Auger report_info("CHAIN counter #1 = 0x%lx, overflow=0x%lx", 785ca42f29aSEric Auger read_regn_el0(pmevcntr, 1), read_sysreg(pmovsclr_el0)); 786ca42f29aSEric Auger 787ca42f29aSEric Auger /* start as CHAIN/MEM_ACCESS and move to MEM_ACCESS/CPU_CYCLES */ 788ca42f29aSEric Auger pmu_reset(); 789ca42f29aSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 790ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 791ca42f29aSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW2); 792ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 793ca42f29aSEric Auger 794ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 795ca42f29aSEric Auger report_info("counter #0=0x%lx, counter #1=0x%lx", 796ca42f29aSEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1)); 797ca42f29aSEric Auger 798ca42f29aSEric Auger write_sysreg_s(0x0, PMCNTENSET_EL0); 799ca42f29aSEric Auger write_regn_el0(pmevtyper, 1, CPU_CYCLES | PMEVTYPER_EXCLUDE_EL0); 800ca42f29aSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 801ca42f29aSEric Auger 802ca42f29aSEric Auger mem_access_loop(addr, 20, pmu.pmcr_ro | PMU_PMCR_E); 803ca42f29aSEric Auger report(read_sysreg(pmovsclr_el0) == 1, 804ca42f29aSEric Auger "overflow is expected on counter 0"); 805ca42f29aSEric Auger report_info("counter #0=0x%lx, counter #1=0x%lx overflow=0x%lx", 806ca42f29aSEric Auger read_regn_el0(pmevcntr, 0), read_regn_el0(pmevcntr, 1), 807ca42f29aSEric Auger read_sysreg(pmovsclr_el0)); 808ca42f29aSEric Auger } 809ca42f29aSEric Auger 8104f5ef94fSEric Auger static bool expect_interrupts(uint32_t bitmap) 8114f5ef94fSEric Auger { 8124f5ef94fSEric Auger int i; 8134f5ef94fSEric Auger 8144f5ef94fSEric Auger if (pmu_stats.bitmap ^ bitmap || pmu_stats.unexpected) 8154f5ef94fSEric Auger return false; 8164f5ef94fSEric Auger 8174f5ef94fSEric Auger for (i = 0; i < 32; i++) { 8184f5ef94fSEric Auger if (test_and_clear_bit(i, &pmu_stats.bitmap)) 8194f5ef94fSEric Auger if (pmu_stats.interrupts[i] != 1) 8204f5ef94fSEric Auger return false; 8214f5ef94fSEric Auger } 8224f5ef94fSEric Auger return true; 8234f5ef94fSEric Auger } 8244f5ef94fSEric Auger 8254f5ef94fSEric Auger static void test_overflow_interrupt(void) 8264f5ef94fSEric Auger { 8274f5ef94fSEric Auger uint32_t events[] = {MEM_ACCESS, SW_INCR}; 8284f5ef94fSEric Auger void *addr = malloc(PAGE_SIZE); 8294f5ef94fSEric Auger int i; 8304f5ef94fSEric Auger 8314f5ef94fSEric Auger if (!satisfy_prerequisites(events, ARRAY_SIZE(events))) 8324f5ef94fSEric Auger return; 8334f5ef94fSEric Auger 8344f5ef94fSEric Auger gic_enable_defaults(); 8354f5ef94fSEric Auger install_irq_handler(EL1H_IRQ, irq_handler); 8364f5ef94fSEric Auger local_irq_enable(); 8374f5ef94fSEric Auger gic_enable_irq(23); 8384f5ef94fSEric Auger 8394f5ef94fSEric Auger pmu_reset(); 8404f5ef94fSEric Auger 8414f5ef94fSEric Auger write_regn_el0(pmevtyper, 0, MEM_ACCESS | PMEVTYPER_EXCLUDE_EL0); 8424f5ef94fSEric Auger write_regn_el0(pmevtyper, 1, SW_INCR | PMEVTYPER_EXCLUDE_EL0); 8434f5ef94fSEric Auger write_sysreg_s(0x3, PMCNTENSET_EL0); 8444f5ef94fSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 8454f5ef94fSEric Auger write_regn_el0(pmevcntr, 1, PRE_OVERFLOW); 8464f5ef94fSEric Auger isb(); 8474f5ef94fSEric Auger 8481a97dad8SRicardo Koller /* interrupts are disabled (PMINTENSET_EL1 == 0) */ 8494f5ef94fSEric Auger 8504f5ef94fSEric Auger mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E); 8514f5ef94fSEric Auger report(expect_interrupts(0), "no overflow interrupt after preset"); 8524f5ef94fSEric Auger 8534f5ef94fSEric Auger set_pmcr(pmu.pmcr_ro | PMU_PMCR_E); 854e0a6e56bSRicardo Koller isb(); 855e0a6e56bSRicardo Koller 8564f5ef94fSEric Auger for (i = 0; i < 100; i++) 8574f5ef94fSEric Auger write_sysreg(0x2, pmswinc_el0); 8584f5ef94fSEric Auger 859e0a6e56bSRicardo Koller isb(); 8604f5ef94fSEric Auger set_pmcr(pmu.pmcr_ro); 861e0a6e56bSRicardo Koller isb(); 8624f5ef94fSEric Auger report(expect_interrupts(0), "no overflow interrupt after counting"); 8634f5ef94fSEric Auger 8641a97dad8SRicardo Koller /* enable interrupts (PMINTENSET_EL1 <= ALL_SET) */ 8654f5ef94fSEric Auger 8664f5ef94fSEric Auger pmu_reset_stats(); 8674f5ef94fSEric Auger 8684f5ef94fSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 8694f5ef94fSEric Auger write_regn_el0(pmevcntr, 1, PRE_OVERFLOW); 8704f5ef94fSEric Auger write_sysreg(ALL_SET, pmintenset_el1); 8714f5ef94fSEric Auger isb(); 8724f5ef94fSEric Auger 8734f5ef94fSEric Auger mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E); 8744f5ef94fSEric Auger for (i = 0; i < 100; i++) 8754f5ef94fSEric Auger write_sysreg(0x3, pmswinc_el0); 8764f5ef94fSEric Auger 8774f5ef94fSEric Auger mem_access_loop(addr, 200, pmu.pmcr_ro); 8784f5ef94fSEric Auger report_info("overflow=0x%lx", read_sysreg(pmovsclr_el0)); 8794f5ef94fSEric Auger report(expect_interrupts(0x3), 8804f5ef94fSEric Auger "overflow interrupts expected on #0 and #1"); 8814f5ef94fSEric Auger 8824f5ef94fSEric Auger /* promote to 64-b */ 8834f5ef94fSEric Auger 8844f5ef94fSEric Auger pmu_reset_stats(); 8854f5ef94fSEric Auger 8864f5ef94fSEric Auger write_regn_el0(pmevtyper, 1, CHAIN | PMEVTYPER_EXCLUDE_EL0); 8874f5ef94fSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 8884f5ef94fSEric Auger isb(); 8894f5ef94fSEric Auger mem_access_loop(addr, 200, pmu.pmcr_ro | PMU_PMCR_E); 890*b5489580SRicardo Koller report(expect_interrupts(0x1), 891*b5489580SRicardo Koller "expect overflow interrupt on 32b boundary"); 8924f5ef94fSEric Auger 8934f5ef94fSEric Auger /* overflow on odd counter */ 8944f5ef94fSEric Auger pmu_reset_stats(); 8954f5ef94fSEric Auger write_regn_el0(pmevcntr, 0, PRE_OVERFLOW); 8964f5ef94fSEric Auger write_regn_el0(pmevcntr, 1, ALL_SET); 8974f5ef94fSEric Auger isb(); 8984f5ef94fSEric Auger mem_access_loop(addr, 400, pmu.pmcr_ro | PMU_PMCR_E); 899*b5489580SRicardo Koller report(expect_interrupts(0x3), 900*b5489580SRicardo Koller "expect overflow interrupt on even and odd counter"); 9014f5ef94fSEric Auger } 9024244065bSChristopher Covington #endif 9034244065bSChristopher Covington 9044244065bSChristopher Covington /* 905d81bb7a3SChristopher Covington * Ensure that the cycle counter progresses between back-to-back reads. 906d81bb7a3SChristopher Covington */ 907d81bb7a3SChristopher Covington static bool check_cycles_increase(void) 908d81bb7a3SChristopher Covington { 909d81bb7a3SChristopher Covington bool success = true; 910d81bb7a3SChristopher Covington 911d81bb7a3SChristopher Covington /* init before event access, this test only cares about cycle count */ 9121a97dad8SRicardo Koller pmu_reset(); 913d81bb7a3SChristopher Covington set_pmcntenset(1 << PMU_CYCLE_IDX); 914d81bb7a3SChristopher Covington set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */ 915d81bb7a3SChristopher Covington 916d81bb7a3SChristopher Covington set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E); 917e0a6e56bSRicardo Koller isb(); 918d81bb7a3SChristopher Covington 919d81bb7a3SChristopher Covington for (int i = 0; i < NR_SAMPLES; i++) { 920d81bb7a3SChristopher Covington uint64_t a, b; 921d81bb7a3SChristopher Covington 922d81bb7a3SChristopher Covington a = get_pmccntr(); 923d81bb7a3SChristopher Covington b = get_pmccntr(); 924d81bb7a3SChristopher Covington 925d81bb7a3SChristopher Covington if (a >= b) { 926d81bb7a3SChristopher Covington printf("Read %"PRId64" then %"PRId64".\n", a, b); 927d81bb7a3SChristopher Covington success = false; 928d81bb7a3SChristopher Covington break; 929d81bb7a3SChristopher Covington } 930d81bb7a3SChristopher Covington } 931d81bb7a3SChristopher Covington 932d81bb7a3SChristopher Covington set_pmcr(get_pmcr() & ~PMU_PMCR_E); 933e0a6e56bSRicardo Koller isb(); 934d81bb7a3SChristopher Covington 935d81bb7a3SChristopher Covington return success; 936d81bb7a3SChristopher Covington } 937d81bb7a3SChristopher Covington 9388f76a347SChristopher Covington /* 9398f76a347SChristopher Covington * Execute a known number of guest instructions. Only even instruction counts 9408f76a347SChristopher Covington * greater than or equal to 4 are supported by the in-line assembly code. The 9418f76a347SChristopher Covington * control register (PMCR_EL0) is initialized with the provided value (allowing 9428f76a347SChristopher Covington * for example for the cycle counter or event counters to be reset). At the end 9438f76a347SChristopher Covington * of the exact instruction loop, zero is written to PMCR_EL0 to disable 9448f76a347SChristopher Covington * counting, allowing the cycle counter or event counters to be read at the 9458f76a347SChristopher Covington * leisure of the calling code. 9468f76a347SChristopher Covington */ 9478f76a347SChristopher Covington static void measure_instrs(int num, uint32_t pmcr) 9488f76a347SChristopher Covington { 9498f76a347SChristopher Covington int loop = (num - 2) / 2; 9508f76a347SChristopher Covington 9518f76a347SChristopher Covington assert(num >= 4 && ((num - 2) % 2 == 0)); 9528f76a347SChristopher Covington precise_instrs_loop(loop, pmcr); 9538f76a347SChristopher Covington } 9548f76a347SChristopher Covington 9558f76a347SChristopher Covington /* 9568f76a347SChristopher Covington * Measure cycle counts for various known instruction counts. Ensure that the 9578f76a347SChristopher Covington * cycle counter progresses (similar to check_cycles_increase() but with more 9588f76a347SChristopher Covington * instructions and using reset and stop controls). If supplied a positive, 9598f76a347SChristopher Covington * nonzero CPI parameter, it also strictly checks that every measurement matches 9608f76a347SChristopher Covington * it. Strict CPI checking is used to test -icount mode. 9618f76a347SChristopher Covington */ 9628f76a347SChristopher Covington static bool check_cpi(int cpi) 9638f76a347SChristopher Covington { 9648f76a347SChristopher Covington uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E; 9658f76a347SChristopher Covington 9668f76a347SChristopher Covington /* init before event access, this test only cares about cycle count */ 9671a97dad8SRicardo Koller pmu_reset(); 9688f76a347SChristopher Covington set_pmcntenset(1 << PMU_CYCLE_IDX); 9698f76a347SChristopher Covington set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */ 9708f76a347SChristopher Covington 9718f76a347SChristopher Covington if (cpi > 0) 9728f76a347SChristopher Covington printf("Checking for CPI=%d.\n", cpi); 9738f76a347SChristopher Covington printf("instrs : cycles0 cycles1 ...\n"); 9748f76a347SChristopher Covington 9758f76a347SChristopher Covington for (unsigned int i = 4; i < 300; i += 32) { 9768f76a347SChristopher Covington uint64_t avg, sum = 0; 9778f76a347SChristopher Covington 9788f76a347SChristopher Covington printf("%4d:", i); 9798f76a347SChristopher Covington for (int j = 0; j < NR_SAMPLES; j++) { 9808f76a347SChristopher Covington uint64_t cycles; 9818f76a347SChristopher Covington 9828f76a347SChristopher Covington set_pmccntr(0); 9838f76a347SChristopher Covington measure_instrs(i, pmcr); 9848f76a347SChristopher Covington cycles = get_pmccntr(); 9858f76a347SChristopher Covington printf(" %4"PRId64"", cycles); 9868f76a347SChristopher Covington 9878f76a347SChristopher Covington if (!cycles) { 9888f76a347SChristopher Covington printf("\ncycles not incrementing!\n"); 9898f76a347SChristopher Covington return false; 9908f76a347SChristopher Covington } else if (cpi > 0 && cycles != i * cpi) { 9918f76a347SChristopher Covington printf("\nunexpected cycle count received!\n"); 9928f76a347SChristopher Covington return false; 9938f76a347SChristopher Covington } else if ((cycles >> 32) != 0) { 9948f76a347SChristopher Covington /* The cycles taken by the loop above should 9958f76a347SChristopher Covington * fit in 32 bits easily. We check the upper 9968f76a347SChristopher Covington * 32 bits of the cycle counter to make sure 9978f76a347SChristopher Covington * there is no supprise. */ 9988f76a347SChristopher Covington printf("\ncycle count bigger than 32bit!\n"); 9998f76a347SChristopher Covington return false; 10008f76a347SChristopher Covington } 10018f76a347SChristopher Covington 10028f76a347SChristopher Covington sum += cycles; 10038f76a347SChristopher Covington } 10048f76a347SChristopher Covington avg = sum / NR_SAMPLES; 10058f76a347SChristopher Covington printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg, 10068f76a347SChristopher Covington (avg >= i) ? "cpi" : "ipc", 10078f76a347SChristopher Covington (avg >= i) ? avg / i : i / avg); 10088f76a347SChristopher Covington } 10098f76a347SChristopher Covington 10108f76a347SChristopher Covington return true; 10118f76a347SChristopher Covington } 10128f76a347SChristopher Covington 10134c357610SAndrew Jones static void pmccntr64_test(void) 10144c357610SAndrew Jones { 10154c357610SAndrew Jones #ifdef __arm__ 1016784ee933SEric Auger if (pmu.version == ID_DFR0_PMU_V3) { 10174c357610SAndrew Jones if (ERRATA(9e3f7a296940)) { 10184c357610SAndrew Jones write_sysreg(0xdead, PMCCNTR64); 1019a299895bSThomas Huth report(read_sysreg(PMCCNTR64) == 0xdead, "pmccntr64"); 10204c357610SAndrew Jones } else 10214c357610SAndrew Jones report_skip("Skipping unsafe pmccntr64 test. Set ERRATA_9e3f7a296940=y to enable."); 10224c357610SAndrew Jones } 10234c357610SAndrew Jones #endif 10244c357610SAndrew Jones } 10254c357610SAndrew Jones 10264244065bSChristopher Covington /* Return FALSE if no PMU found, otherwise return TRUE */ 102723b8916bSThomas Huth static bool pmu_probe(void) 10284244065bSChristopher Covington { 10291e4f5392SAlexandru Elisei uint32_t pmcr; 103046ca10f4SAlexandru Elisei uint8_t implementer; 1031eff8f161SEric Auger 10328f747a85SEric Auger pmu.version = get_pmu_version(); 1033784ee933SEric Auger if (pmu.version == ID_DFR0_PMU_NOTIMPL || pmu.version == ID_DFR0_PMU_IMPDEF) 1034eff8f161SEric Auger return false; 1035eff8f161SEric Auger 1036784ee933SEric Auger report_info("PMU version: 0x%x", pmu.version); 1037eff8f161SEric Auger 10381e4f5392SAlexandru Elisei pmcr = get_pmcr(); 103946ca10f4SAlexandru Elisei implementer = (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK; 104046ca10f4SAlexandru Elisei report_info("PMU implementer/ID code: %#"PRIx32"(\"%c\")/%#"PRIx32, 1041eff8f161SEric Auger (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK, 104246ca10f4SAlexandru Elisei implementer ? implementer : ' ', 10438f747a85SEric Auger (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK); 10448f747a85SEric Auger 10458f747a85SEric Auger /* store read-only and RES0 fields of the PMCR bottom-half*/ 10468f747a85SEric Auger pmu.pmcr_ro = pmcr & 0xFFFFFF00; 10478f747a85SEric Auger pmu.nb_implemented_counters = 10488f747a85SEric Auger (pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK; 10498f747a85SEric Auger report_info("Implements %d event counters", 10508f747a85SEric Auger pmu.nb_implemented_counters); 1051eff8f161SEric Auger 1052eff8f161SEric Auger return true; 10534244065bSChristopher Covington } 10544244065bSChristopher Covington 10558f76a347SChristopher Covington int main(int argc, char *argv[]) 10564244065bSChristopher Covington { 10578f76a347SChristopher Covington int cpi = 0; 10588f76a347SChristopher Covington 10594244065bSChristopher Covington if (!pmu_probe()) { 10604244065bSChristopher Covington printf("No PMU found, test skipped...\n"); 10614244065bSChristopher Covington return report_summary(); 10624244065bSChristopher Covington } 10634244065bSChristopher Covington 106457ec1086SEric Auger if (argc < 2) 106557ec1086SEric Auger report_abort("no test specified"); 106657ec1086SEric Auger 10674244065bSChristopher Covington report_prefix_push("pmu"); 10684244065bSChristopher Covington 106957ec1086SEric Auger if (strcmp(argv[1], "cycle-counter") == 0) { 107057ec1086SEric Auger report_prefix_push(argv[1]); 107157ec1086SEric Auger if (argc > 2) 107257ec1086SEric Auger cpi = atol(argv[2]); 1073a299895bSThomas Huth report(check_cycles_increase(), 1074a299895bSThomas Huth "Monotonically increasing cycle count"); 1075a299895bSThomas Huth report(check_cpi(cpi), "Cycle/instruction ratio"); 10764c357610SAndrew Jones pmccntr64_test(); 107757ec1086SEric Auger report_prefix_pop(); 10784870738cSEric Auger } else if (strcmp(argv[1], "pmu-event-introspection") == 0) { 10794870738cSEric Auger report_prefix_push(argv[1]); 10804870738cSEric Auger test_event_introspection(); 10814870738cSEric Auger report_prefix_pop(); 10824ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-event-counter-config") == 0) { 10834ce2a804SEric Auger report_prefix_push(argv[1]); 10844ce2a804SEric Auger test_event_counter_config(); 10854ce2a804SEric Auger report_prefix_pop(); 10864ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-basic-event-count") == 0) { 10874ce2a804SEric Auger report_prefix_push(argv[1]); 10884ce2a804SEric Auger test_basic_event_count(); 10894ce2a804SEric Auger report_prefix_pop(); 10904ce2a804SEric Auger } else if (strcmp(argv[1], "pmu-mem-access") == 0) { 10914ce2a804SEric Auger report_prefix_push(argv[1]); 10924ce2a804SEric Auger test_mem_access(); 10934ce2a804SEric Auger report_prefix_pop(); 1094bb9a5adcSEric Auger } else if (strcmp(argv[1], "pmu-sw-incr") == 0) { 1095bb9a5adcSEric Auger report_prefix_push(argv[1]); 1096bb9a5adcSEric Auger test_sw_incr(); 1097bb9a5adcSEric Auger report_prefix_pop(); 109866fee034SEric Auger } else if (strcmp(argv[1], "pmu-chained-counters") == 0) { 109966fee034SEric Auger report_prefix_push(argv[1]); 110066fee034SEric Auger test_chained_counters(); 110166fee034SEric Auger report_prefix_pop(); 110266fee034SEric Auger } else if (strcmp(argv[1], "pmu-chained-sw-incr") == 0) { 110366fee034SEric Auger report_prefix_push(argv[1]); 110466fee034SEric Auger test_chained_sw_incr(); 110566fee034SEric Auger report_prefix_pop(); 1106ca42f29aSEric Auger } else if (strcmp(argv[1], "pmu-chain-promotion") == 0) { 1107ca42f29aSEric Auger report_prefix_push(argv[1]); 1108ca42f29aSEric Auger test_chain_promotion(); 1109ca42f29aSEric Auger report_prefix_pop(); 11104f5ef94fSEric Auger } else if (strcmp(argv[1], "pmu-overflow-interrupt") == 0) { 11114f5ef94fSEric Auger report_prefix_push(argv[1]); 11124f5ef94fSEric Auger test_overflow_interrupt(); 11134f5ef94fSEric Auger report_prefix_pop(); 111457ec1086SEric Auger } else { 111557ec1086SEric Auger report_abort("Unknown sub-test '%s'", argv[1]); 111657ec1086SEric Auger } 11174c357610SAndrew Jones 11184244065bSChristopher Covington return report_summary(); 11194244065bSChristopher Covington } 1120