xref: /linux/drivers/perf/arm_v7_pmu.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
243eab878SWill Deacon /*
343eab878SWill Deacon  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
443eab878SWill Deacon  *
543eab878SWill Deacon  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
643eab878SWill Deacon  * 2010 (c) MontaVista Software, LLC.
743eab878SWill Deacon  *
843eab878SWill Deacon  * Copied from ARMv6 code, with the low level code inspired
943eab878SWill Deacon  *  by the ARMv7 Oprofile code.
1043eab878SWill Deacon  *
1143eab878SWill Deacon  * Cortex-A8 has up to 4 configurable performance counters and
1243eab878SWill Deacon  *  a single cycle counter.
1343eab878SWill Deacon  * Cortex-A9 has up to 31 configurable performance counters and
1443eab878SWill Deacon  *  a single cycle counter.
1543eab878SWill Deacon  *
1643eab878SWill Deacon  * All counters can be enabled/disabled and IRQ masked separately. The cycle
1743eab878SWill Deacon  *  counter and all 4 performance counters together can be reset separately.
1843eab878SWill Deacon  */
1943eab878SWill Deacon 
20b7aafe99SStephen Boyd #include <asm/cp15.h>
2129ba0f37SMark Rutland #include <asm/cputype.h>
2229ba0f37SMark Rutland #include <asm/irq_regs.h>
23b7aafe99SStephen Boyd #include <asm/vfp.h>
24b7aafe99SStephen Boyd #include "../vfp/vfpinstr.h"
25b7aafe99SStephen Boyd 
2629ba0f37SMark Rutland #include <linux/of.h>
27fa8ad788SMark Rutland #include <linux/perf/arm_pmu.h>
2829ba0f37SMark Rutland #include <linux/platform_device.h>
2929ba0f37SMark Rutland 
306d4eaf99SWill Deacon /*
316d4eaf99SWill Deacon  * Common ARMv7 event types
326d4eaf99SWill Deacon  *
336d4eaf99SWill Deacon  * Note: An implementation may not be able to count all of these events
346d4eaf99SWill Deacon  * but the encodings are considered to be `reserved' in the case that
356d4eaf99SWill Deacon  * they are not available.
366d4eaf99SWill Deacon  */
37f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
38f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
39f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_ITLB_REFILL			0x02
40f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
41f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
42f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_DTLB_REFILL			0x05
43f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_READ				0x06
44f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_WRITE				0x07
45f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
46f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_EXC_TAKEN				0x09
47f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
48f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CID_WRITE				0x0B
494d301512SWill Deacon 
504d301512SWill Deacon /*
514d301512SWill Deacon  * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
5243eab878SWill Deacon  * It counts:
534d301512SWill Deacon  *  - all (taken) branch instructions,
5443eab878SWill Deacon  *  - instructions that explicitly write the PC,
5543eab878SWill Deacon  *  - exception generating instructions.
5643eab878SWill Deacon  */
57f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_WRITE				0x0C
58f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
59f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
60f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
61f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
62f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
63f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
644d301512SWill Deacon 
654d301512SWill Deacon /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
66f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_ACCESS			0x13
67f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
68f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
69f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
70f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
71f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_WB			0x18
72f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_BUS_ACCESS			0x19
73f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_ERROR				0x1A
74f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_INSTR_SPEC			0x1B
75f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_TTBR_WRITE			0x1C
76f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_BUS_CYCLES			0x1D
7743eab878SWill Deacon 
78f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CPU_CYCLES			0xFF
7943eab878SWill Deacon 
8043eab878SWill Deacon /* ARMv7 Cortex-A8 specific event types */
81f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
82f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
83f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
84f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
8543eab878SWill Deacon 
8643eab878SWill Deacon /* ARMv7 Cortex-A9 specific event types */
87f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
88f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
89f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
9043eab878SWill Deacon 
910c205cbeSWill Deacon /* ARMv7 Cortex-A5 specific event types */
92f4ab36cbSDrew Richardson #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
93f4ab36cbSDrew Richardson #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
940c205cbeSWill Deacon 
9514abd038SWill Deacon /* ARMv7 Cortex-A15 specific event types */
96f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
97f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
98f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
99f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
10014abd038SWill Deacon 
101f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
102f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
10314abd038SWill Deacon 
104f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
105f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
106f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
107f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
10814abd038SWill Deacon 
109f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
11014abd038SWill Deacon 
1118e781f65SAlbin Tonnerre /* ARMv7 Cortex-A12 specific event types */
112f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
113f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
1148e781f65SAlbin Tonnerre 
115f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
116f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
1178e781f65SAlbin Tonnerre 
118f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
1198e781f65SAlbin Tonnerre 
120f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
1218e781f65SAlbin Tonnerre 
122b7aafe99SStephen Boyd /* ARMv7 Krait specific event types */
123f4ab36cbSDrew Richardson #define KRAIT_PMRESR0_GROUP0				0xcc
124f4ab36cbSDrew Richardson #define KRAIT_PMRESR1_GROUP0				0xd0
125f4ab36cbSDrew Richardson #define KRAIT_PMRESR2_GROUP0				0xd4
126f4ab36cbSDrew Richardson #define KRAIT_VPMRESR0_GROUP0				0xd8
127b7aafe99SStephen Boyd 
128f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
129f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
130b7aafe99SStephen Boyd 
131f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
132f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
133b7aafe99SStephen Boyd 
134341e42c4SStephen Boyd /* ARMv7 Scorpion specific event types */
135f4ab36cbSDrew Richardson #define SCORPION_LPM0_GROUP0				0x4c
136f4ab36cbSDrew Richardson #define SCORPION_LPM1_GROUP0				0x50
137f4ab36cbSDrew Richardson #define SCORPION_LPM2_GROUP0				0x54
138f4ab36cbSDrew Richardson #define SCORPION_L2LPM_GROUP0				0x58
139f4ab36cbSDrew Richardson #define SCORPION_VLPM_GROUP0				0x5c
140341e42c4SStephen Boyd 
141f4ab36cbSDrew Richardson #define SCORPION_ICACHE_ACCESS				0x10053
142f4ab36cbSDrew Richardson #define SCORPION_ICACHE_MISS				0x10052
143341e42c4SStephen Boyd 
144f4ab36cbSDrew Richardson #define SCORPION_DTLB_ACCESS				0x12013
145f4ab36cbSDrew Richardson #define SCORPION_DTLB_MISS				0x12012
146341e42c4SStephen Boyd 
147f4ab36cbSDrew Richardson #define SCORPION_ITLB_MISS				0x12021
148341e42c4SStephen Boyd 
14943eab878SWill Deacon /*
15043eab878SWill Deacon  * Cortex-A8 HW events mapping
15143eab878SWill Deacon  *
15243eab878SWill Deacon  * The hardware events that we support. We do support cache operations but
15343eab878SWill Deacon  * we have harvard caches and no way to combine instruction and data
15443eab878SWill Deacon  * accesses/misses in hardware.
15543eab878SWill Deacon  */
15643eab878SWill Deacon static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1576b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
15843eab878SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
15943eab878SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
1604d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1614d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
16243eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
16343eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1640445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
16543eab878SWill Deacon };
16643eab878SWill Deacon 
16743eab878SWill Deacon static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
16843eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_OP_MAX]
16943eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1706b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
1716b7658ecSMark Rutland 
17243eab878SWill Deacon 	/*
1736b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
1746b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
1756b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
17643eab878SWill Deacon 	 */
1776b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1786b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
1796b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1806b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
1816b7658ecSMark Rutland 
1826b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
1836b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
1846b7658ecSMark Rutland 
1856b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
1866b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
1876b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
1886b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
1896b7658ecSMark Rutland 
1906b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
1916b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
1926b7658ecSMark Rutland 
1936b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
1946b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
1956b7658ecSMark Rutland 
1966b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
1976b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1986b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
1996b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
20043eab878SWill Deacon };
20143eab878SWill Deacon 
20243eab878SWill Deacon /*
20343eab878SWill Deacon  * Cortex-A9 HW events mapping
20443eab878SWill Deacon  */
20543eab878SWill Deacon static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
2066b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
20743eab878SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
2084d301512SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
2094d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2104d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
21143eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
21243eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2130445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
2140445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
21543eab878SWill Deacon };
21643eab878SWill Deacon 
21743eab878SWill Deacon static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
21843eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_OP_MAX]
21943eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2206b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
2216b7658ecSMark Rutland 
22243eab878SWill Deacon 	/*
2236b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
2246b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
2256b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
22643eab878SWill Deacon 	 */
2276b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2286b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2296b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2306b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2316b7658ecSMark Rutland 
2326b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
2336b7658ecSMark Rutland 
2346b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2356b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2366b7658ecSMark Rutland 
2376b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2386b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2396b7658ecSMark Rutland 
2406b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2416b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2426b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2436b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
24443eab878SWill Deacon };
24543eab878SWill Deacon 
24643eab878SWill Deacon /*
2470c205cbeSWill Deacon  * Cortex-A5 HW events mapping
2480c205cbeSWill Deacon  */
2490c205cbeSWill Deacon static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
2506b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
2510c205cbeSWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
2520c205cbeSWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
2534d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2544d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2550c205cbeSWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
2560c205cbeSWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2570c205cbeSWill Deacon };
2580c205cbeSWill Deacon 
2590c205cbeSWill Deacon static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
2600c205cbeSWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
2610c205cbeSWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2626b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
2636b7658ecSMark Rutland 
2646b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2656b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2666b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2676b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2686b7658ecSMark Rutland 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
2696b7658ecSMark Rutland 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
2706b7658ecSMark Rutland 
2716b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
2726b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
2730c205cbeSWill Deacon 	/*
2746b7658ecSMark Rutland 	 * The prefetch counters don't differentiate between the I side and the
2756b7658ecSMark Rutland 	 * D side.
2760c205cbeSWill Deacon 	 */
2776b7658ecSMark Rutland 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
2786b7658ecSMark Rutland 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
2796b7658ecSMark Rutland 
2806b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2816b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2826b7658ecSMark Rutland 
2836b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2846b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2856b7658ecSMark Rutland 
2866b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2876b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2886b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2896b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2900c205cbeSWill Deacon };
2910c205cbeSWill Deacon 
2920c205cbeSWill Deacon /*
29314abd038SWill Deacon  * Cortex-A15 HW events mapping
29414abd038SWill Deacon  */
29514abd038SWill Deacon static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
2966b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
29714abd038SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
29814abd038SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
2994d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3004d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3014d301512SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
30214abd038SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
30314abd038SWill Deacon 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
30414abd038SWill Deacon };
30514abd038SWill Deacon 
30614abd038SWill Deacon static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
30714abd038SWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
30814abd038SWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
3096b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
3106b7658ecSMark Rutland 
3116b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
3126b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
3136b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
3146b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
3156b7658ecSMark Rutland 
31614abd038SWill Deacon 	/*
3176b7658ecSMark Rutland 	 * Not all performance counters differentiate between read and write
3186b7658ecSMark Rutland 	 * accesses/misses so we're not always strictly correct, but it's the
3196b7658ecSMark Rutland 	 * best we can do. Writes and reads get combined in these cases.
32014abd038SWill Deacon 	 */
3216b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
3226b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
3236b7658ecSMark Rutland 
3246b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
3256b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
3266b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
3276b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
3286b7658ecSMark Rutland 
3296b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
3306b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
3316b7658ecSMark Rutland 
3326b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3336b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3346b7658ecSMark Rutland 
3356b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3366b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
3376b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3386b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
33914abd038SWill Deacon };
34014abd038SWill Deacon 
34114abd038SWill Deacon /*
342d33c88c6SWill Deacon  * Cortex-A7 HW events mapping
343d33c88c6SWill Deacon  */
344d33c88c6SWill Deacon static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
3456b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
346d33c88c6SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
347d33c88c6SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
348d33c88c6SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
349d33c88c6SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
350d33c88c6SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
351d33c88c6SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
352d33c88c6SWill Deacon 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
353d33c88c6SWill Deacon };
354d33c88c6SWill Deacon 
355d33c88c6SWill Deacon static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
356d33c88c6SWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
357d33c88c6SWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
3586b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
3596b7658ecSMark Rutland 
360d33c88c6SWill Deacon 	/*
3616b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
3626b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
3636b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
364d33c88c6SWill Deacon 	 */
3656b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3666b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3676b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3686b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3696b7658ecSMark Rutland 
3706b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
3716b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
3726b7658ecSMark Rutland 
3736b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
3746b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
3756b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
3766b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
3776b7658ecSMark Rutland 
3786b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
3796b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
3806b7658ecSMark Rutland 
3816b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3826b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3836b7658ecSMark Rutland 
3846b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3856b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
3866b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3876b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
388d33c88c6SWill Deacon };
389d33c88c6SWill Deacon 
390d33c88c6SWill Deacon /*
3918e781f65SAlbin Tonnerre  * Cortex-A12 HW events mapping
3928e781f65SAlbin Tonnerre  */
3938e781f65SAlbin Tonnerre static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
3946b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
3958e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
3968e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
3978e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3988e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3998e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
4008e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4018e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
4028e781f65SAlbin Tonnerre };
4038e781f65SAlbin Tonnerre 
4048e781f65SAlbin Tonnerre static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
4058e781f65SAlbin Tonnerre 					[PERF_COUNT_HW_CACHE_OP_MAX]
4068e781f65SAlbin Tonnerre 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
4076b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
4086b7658ecSMark Rutland 
4096b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
4106b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4116b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
4126b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4136b7658ecSMark Rutland 
4148e781f65SAlbin Tonnerre 	/*
4156b7658ecSMark Rutland 	 * Not all performance counters differentiate between read and write
4166b7658ecSMark Rutland 	 * accesses/misses so we're not always strictly correct, but it's the
4176b7658ecSMark Rutland 	 * best we can do. Writes and reads get combined in these cases.
4188e781f65SAlbin Tonnerre 	 */
4196b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
4206b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
4216b7658ecSMark Rutland 
4226b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
4236b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
4246b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
4256b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
4266b7658ecSMark Rutland 
4276b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
4286b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
4296b7658ecSMark Rutland 	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
4306b7658ecSMark Rutland 
4316b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
4326b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
4336b7658ecSMark Rutland 
4346b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4356b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4366b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4376b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4388e781f65SAlbin Tonnerre };
4398e781f65SAlbin Tonnerre 
4408e781f65SAlbin Tonnerre /*
4412a3391cdSStephen Boyd  * Krait HW events mapping
4422a3391cdSStephen Boyd  */
4432a3391cdSStephen Boyd static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
4446b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
4452a3391cdSStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
4462a3391cdSStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
4472a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
4482a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4492a3391cdSStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
4502a3391cdSStephen Boyd };
4512a3391cdSStephen Boyd 
4522a3391cdSStephen Boyd static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
4536b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
4542a3391cdSStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
4552a3391cdSStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
4562a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4572a3391cdSStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
4582a3391cdSStephen Boyd };
4592a3391cdSStephen Boyd 
4602a3391cdSStephen Boyd static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
4612a3391cdSStephen Boyd 					  [PERF_COUNT_HW_CACHE_OP_MAX]
4622a3391cdSStephen Boyd 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
4636b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
4646b7658ecSMark Rutland 
4652a3391cdSStephen Boyd 	/*
4666b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
4676b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
4686b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
4692a3391cdSStephen Boyd 	 */
4706b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
4716b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4726b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
4736b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4746b7658ecSMark Rutland 
4756b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
4766b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
4776b7658ecSMark Rutland 
4786b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
4796b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
4806b7658ecSMark Rutland 
4816b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
4826b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
4836b7658ecSMark Rutland 
4846b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4856b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4866b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4876b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4882a3391cdSStephen Boyd };
4892a3391cdSStephen Boyd 
4902a3391cdSStephen Boyd /*
491341e42c4SStephen Boyd  * Scorpion HW events mapping
492341e42c4SStephen Boyd  */
493341e42c4SStephen Boyd static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
494341e42c4SStephen Boyd 	PERF_MAP_ALL_UNSUPPORTED,
495341e42c4SStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
496341e42c4SStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
497341e42c4SStephen Boyd 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
498341e42c4SStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
499341e42c4SStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
500341e42c4SStephen Boyd };
501341e42c4SStephen Boyd 
502341e42c4SStephen Boyd static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
503341e42c4SStephen Boyd 					    [PERF_COUNT_HW_CACHE_OP_MAX]
504341e42c4SStephen Boyd 					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
505341e42c4SStephen Boyd 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
506341e42c4SStephen Boyd 	/*
507341e42c4SStephen Boyd 	 * The performance counters don't differentiate between read and write
508341e42c4SStephen Boyd 	 * accesses/misses so this isn't strictly correct, but it's the best we
509341e42c4SStephen Boyd 	 * can do. Writes and reads get combined.
510341e42c4SStephen Boyd 	 */
511341e42c4SStephen Boyd 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
512341e42c4SStephen Boyd 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
513341e42c4SStephen Boyd 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
514341e42c4SStephen Boyd 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
515341e42c4SStephen Boyd 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
516341e42c4SStephen Boyd 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
517341e42c4SStephen Boyd 	/*
518341e42c4SStephen Boyd 	 * Only ITLB misses and DTLB refills are supported.  If users want the
519341e42c4SStephen Boyd 	 * DTLB refills misses a raw counter must be used.
520341e42c4SStephen Boyd 	 */
521341e42c4SStephen Boyd 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
522341e42c4SStephen Boyd 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
523341e42c4SStephen Boyd 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
524341e42c4SStephen Boyd 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
525341e42c4SStephen Boyd 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
526341e42c4SStephen Boyd 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
527341e42c4SStephen Boyd 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
528341e42c4SStephen Boyd 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
529341e42c4SStephen Boyd 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
530341e42c4SStephen Boyd 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
531341e42c4SStephen Boyd };
532341e42c4SStephen Boyd 
533abff083cSWill Deacon PMU_FORMAT_ATTR(event, "config:0-7");
534abff083cSWill Deacon 
535abff083cSWill Deacon static struct attribute *armv7_pmu_format_attrs[] = {
536abff083cSWill Deacon 	&format_attr_event.attr,
537abff083cSWill Deacon 	NULL,
538abff083cSWill Deacon };
539abff083cSWill Deacon 
540abff083cSWill Deacon static struct attribute_group armv7_pmu_format_attr_group = {
541abff083cSWill Deacon 	.name = "format",
542abff083cSWill Deacon 	.attrs = armv7_pmu_format_attrs,
543abff083cSWill Deacon };
544abff083cSWill Deacon 
5453fbac6ccSDrew Richardson #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
5463fbac6ccSDrew Richardson #define ARMV7_EVENT_ATTR(name, config) \
5473fbac6ccSDrew Richardson 	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
5483fbac6ccSDrew Richardson 			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
5493fbac6ccSDrew Richardson 
5503fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
5513fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
5523fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
5533fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
5543fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
5553fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
5563fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
5573fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
5583fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
5593fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
5603fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
5613fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
5623fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
5633fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
5643fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
5653fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
5663fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
5673fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
5683fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
5693fbac6ccSDrew Richardson 
5703fbac6ccSDrew Richardson static struct attribute *armv7_pmuv1_event_attrs[] = {
5713fbac6ccSDrew Richardson 	&armv7_event_attr_sw_incr.attr.attr,
5723fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache_refill.attr.attr,
5733fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
5743fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_refill.attr.attr,
5753fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache.attr.attr,
5763fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
5773fbac6ccSDrew Richardson 	&armv7_event_attr_ld_retired.attr.attr,
5783fbac6ccSDrew Richardson 	&armv7_event_attr_st_retired.attr.attr,
5793fbac6ccSDrew Richardson 	&armv7_event_attr_inst_retired.attr.attr,
5803fbac6ccSDrew Richardson 	&armv7_event_attr_exc_taken.attr.attr,
5813fbac6ccSDrew Richardson 	&armv7_event_attr_exc_return.attr.attr,
5823fbac6ccSDrew Richardson 	&armv7_event_attr_cid_write_retired.attr.attr,
5833fbac6ccSDrew Richardson 	&armv7_event_attr_pc_write_retired.attr.attr,
5843fbac6ccSDrew Richardson 	&armv7_event_attr_br_immed_retired.attr.attr,
5853fbac6ccSDrew Richardson 	&armv7_event_attr_br_return_retired.attr.attr,
5863fbac6ccSDrew Richardson 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
5873fbac6ccSDrew Richardson 	&armv7_event_attr_br_mis_pred.attr.attr,
5883fbac6ccSDrew Richardson 	&armv7_event_attr_cpu_cycles.attr.attr,
5893fbac6ccSDrew Richardson 	&armv7_event_attr_br_pred.attr.attr,
590abff083cSWill Deacon 	NULL,
5913fbac6ccSDrew Richardson };
5923fbac6ccSDrew Richardson 
5933fbac6ccSDrew Richardson static struct attribute_group armv7_pmuv1_events_attr_group = {
5943fbac6ccSDrew Richardson 	.name = "events",
5953fbac6ccSDrew Richardson 	.attrs = armv7_pmuv1_event_attrs,
5963fbac6ccSDrew Richardson };
5973fbac6ccSDrew Richardson 
5983fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
5993fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
6003fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
6013fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
6023fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
6033fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
6043fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
6053fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
6063fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
6073fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
6083fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
6093fbac6ccSDrew Richardson 
6103fbac6ccSDrew Richardson static struct attribute *armv7_pmuv2_event_attrs[] = {
6113fbac6ccSDrew Richardson 	&armv7_event_attr_sw_incr.attr.attr,
6123fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache_refill.attr.attr,
6133fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
6143fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_refill.attr.attr,
6153fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache.attr.attr,
6163fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
6173fbac6ccSDrew Richardson 	&armv7_event_attr_ld_retired.attr.attr,
6183fbac6ccSDrew Richardson 	&armv7_event_attr_st_retired.attr.attr,
6193fbac6ccSDrew Richardson 	&armv7_event_attr_inst_retired.attr.attr,
6203fbac6ccSDrew Richardson 	&armv7_event_attr_exc_taken.attr.attr,
6213fbac6ccSDrew Richardson 	&armv7_event_attr_exc_return.attr.attr,
6223fbac6ccSDrew Richardson 	&armv7_event_attr_cid_write_retired.attr.attr,
6233fbac6ccSDrew Richardson 	&armv7_event_attr_pc_write_retired.attr.attr,
6243fbac6ccSDrew Richardson 	&armv7_event_attr_br_immed_retired.attr.attr,
6253fbac6ccSDrew Richardson 	&armv7_event_attr_br_return_retired.attr.attr,
6263fbac6ccSDrew Richardson 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
6273fbac6ccSDrew Richardson 	&armv7_event_attr_br_mis_pred.attr.attr,
6283fbac6ccSDrew Richardson 	&armv7_event_attr_cpu_cycles.attr.attr,
6293fbac6ccSDrew Richardson 	&armv7_event_attr_br_pred.attr.attr,
6303fbac6ccSDrew Richardson 	&armv7_event_attr_mem_access.attr.attr,
6313fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache.attr.attr,
6323fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_wb.attr.attr,
6333fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache.attr.attr,
6343fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache_refill.attr.attr,
6353fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache_wb.attr.attr,
6363fbac6ccSDrew Richardson 	&armv7_event_attr_bus_access.attr.attr,
6373fbac6ccSDrew Richardson 	&armv7_event_attr_memory_error.attr.attr,
6383fbac6ccSDrew Richardson 	&armv7_event_attr_inst_spec.attr.attr,
6393fbac6ccSDrew Richardson 	&armv7_event_attr_ttbr_write_retired.attr.attr,
6403fbac6ccSDrew Richardson 	&armv7_event_attr_bus_cycles.attr.attr,
641abff083cSWill Deacon 	NULL,
6423fbac6ccSDrew Richardson };
6433fbac6ccSDrew Richardson 
6443fbac6ccSDrew Richardson static struct attribute_group armv7_pmuv2_events_attr_group = {
6453fbac6ccSDrew Richardson 	.name = "events",
6463fbac6ccSDrew Richardson 	.attrs = armv7_pmuv2_event_attrs,
6473fbac6ccSDrew Richardson };
6483fbac6ccSDrew Richardson 
649341e42c4SStephen Boyd /*
650c691bb62SWill Deacon  * Perf Events' indices
65143eab878SWill Deacon  */
652bf5ffc8cSRob Herring (Arm) #define	ARMV7_IDX_CYCLE_COUNTER	31
653bf5ffc8cSRob Herring (Arm) #define	ARMV7_IDX_COUNTER_MAX	31
65443eab878SWill Deacon /*
65543eab878SWill Deacon  * ARMv7 low level PMNC access
65643eab878SWill Deacon  */
65743eab878SWill Deacon 
65843eab878SWill Deacon /*
65943eab878SWill Deacon  * Per-CPU PMNC: config reg
66043eab878SWill Deacon  */
66143eab878SWill Deacon #define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
66243eab878SWill Deacon #define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
66343eab878SWill Deacon #define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
66443eab878SWill Deacon #define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
66543eab878SWill Deacon #define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
66643eab878SWill Deacon #define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
66743eab878SWill Deacon #define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
66843eab878SWill Deacon #define	ARMV7_PMNC_N_MASK	0x1f
66943eab878SWill Deacon #define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
67043eab878SWill Deacon 
67143eab878SWill Deacon /*
67243eab878SWill Deacon  * FLAG: counters overflow flag status reg
67343eab878SWill Deacon  */
67443eab878SWill Deacon #define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
67543eab878SWill Deacon #define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
67643eab878SWill Deacon 
677a505addcSWill Deacon /*
678a505addcSWill Deacon  * PMXEVTYPER: Event selection reg
679a505addcSWill Deacon  */
680f2fe09b0SWill Deacon #define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
681a505addcSWill Deacon #define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
682a505addcSWill Deacon 
683a505addcSWill Deacon /*
684a505addcSWill Deacon  * Event filters for PMUv2
685a505addcSWill Deacon  */
6866f8f3570SPhong Tran #define	ARMV7_EXCLUDE_PL1	BIT(31)
6876f8f3570SPhong Tran #define	ARMV7_EXCLUDE_USER	BIT(30)
6886f8f3570SPhong Tran #define	ARMV7_INCLUDE_HYP	BIT(27)
689a505addcSWill Deacon 
6908d1a0ae7SMartin Fuzzey /*
6918d1a0ae7SMartin Fuzzey  * Secure debug enable reg
6928d1a0ae7SMartin Fuzzey  */
6938d1a0ae7SMartin Fuzzey #define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
6948d1a0ae7SMartin Fuzzey 
armv7_pmnc_read(void)6956330aae7SWill Deacon static inline u32 armv7_pmnc_read(void)
69643eab878SWill Deacon {
69743eab878SWill Deacon 	u32 val;
69843eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
69943eab878SWill Deacon 	return val;
70043eab878SWill Deacon }
70143eab878SWill Deacon 
armv7_pmnc_write(u32 val)7026330aae7SWill Deacon static inline void armv7_pmnc_write(u32 val)
70343eab878SWill Deacon {
70443eab878SWill Deacon 	val &= ARMV7_PMNC_MASK;
705d25d3b4cSWill Deacon 	isb();
70643eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
70743eab878SWill Deacon }
70843eab878SWill Deacon 
armv7_pmnc_has_overflowed(u32 pmnc)7096330aae7SWill Deacon static inline int armv7_pmnc_has_overflowed(u32 pmnc)
71043eab878SWill Deacon {
71143eab878SWill Deacon 	return pmnc & ARMV7_OVERFLOWED_MASK;
71243eab878SWill Deacon }
71343eab878SWill Deacon 
armv7_pmnc_counter_valid(struct arm_pmu * cpu_pmu,int idx)7147279adbdSSudeep KarkadaNagesha static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
715c691bb62SWill Deacon {
716bf5ffc8cSRob Herring (Arm) 	return test_bit(idx, cpu_pmu->cntr_mask);
717c691bb62SWill Deacon }
718c691bb62SWill Deacon 
armv7_pmnc_counter_has_overflowed(u32 pmnc,int idx)719c691bb62SWill Deacon static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
72043eab878SWill Deacon {
721bf5ffc8cSRob Herring (Arm) 	return pmnc & BIT(idx);
72243eab878SWill Deacon }
72343eab878SWill Deacon 
armv7_pmnc_select_counter(int idx)724cb6eb108Schai wen static inline void armv7_pmnc_select_counter(int idx)
72543eab878SWill Deacon {
726bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (idx));
727d25d3b4cSWill Deacon 	isb();
72843eab878SWill Deacon }
72943eab878SWill Deacon 
armv7pmu_read_counter(struct perf_event * event)7303a95200dSSuzuki K Poulose static inline u64 armv7pmu_read_counter(struct perf_event *event)
73143eab878SWill Deacon {
7327279adbdSSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
733ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
734ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
7356330aae7SWill Deacon 	u32 value = 0;
73643eab878SWill Deacon 
737cb6eb108Schai wen 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
73843eab878SWill Deacon 		pr_err("CPU%u reading wrong counter %d\n",
73943eab878SWill Deacon 			smp_processor_id(), idx);
740cb6eb108Schai wen 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
741c691bb62SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
742cb6eb108Schai wen 	} else {
743cb6eb108Schai wen 		armv7_pmnc_select_counter(idx);
744c691bb62SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
745cb6eb108Schai wen 	}
74643eab878SWill Deacon 
74743eab878SWill Deacon 	return value;
74843eab878SWill Deacon }
74943eab878SWill Deacon 
armv7pmu_write_counter(struct perf_event * event,u64 value)7503a95200dSSuzuki K Poulose static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
75143eab878SWill Deacon {
7527279adbdSSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
753ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
754ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
755ed6f2a52SSudeep KarkadaNagesha 
756cb6eb108Schai wen 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
75743eab878SWill Deacon 		pr_err("CPU%u writing wrong counter %d\n",
75843eab878SWill Deacon 			smp_processor_id(), idx);
759cb6eb108Schai wen 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
760fdbef8c4SYang Jihong 		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
761cb6eb108Schai wen 	} else {
762cb6eb108Schai wen 		armv7_pmnc_select_counter(idx);
763fdbef8c4SYang Jihong 		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
76443eab878SWill Deacon 	}
765cb6eb108Schai wen }
76643eab878SWill Deacon 
armv7_pmnc_write_evtsel(int idx,u32 val)76725e29c7cSWill Deacon static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
76843eab878SWill Deacon {
769cb6eb108Schai wen 	armv7_pmnc_select_counter(idx);
770a505addcSWill Deacon 	val &= ARMV7_EVTYPE_MASK;
77143eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
77243eab878SWill Deacon }
77343eab878SWill Deacon 
armv7_pmnc_enable_counter(int idx)774cb6eb108Schai wen static inline void armv7_pmnc_enable_counter(int idx)
77543eab878SWill Deacon {
776bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(idx)));
77743eab878SWill Deacon }
77843eab878SWill Deacon 
armv7_pmnc_disable_counter(int idx)779cb6eb108Schai wen static inline void armv7_pmnc_disable_counter(int idx)
78043eab878SWill Deacon {
781bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(idx)));
78243eab878SWill Deacon }
78343eab878SWill Deacon 
armv7_pmnc_enable_intens(int idx)784cb6eb108Schai wen static inline void armv7_pmnc_enable_intens(int idx)
78543eab878SWill Deacon {
786bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(idx)));
78743eab878SWill Deacon }
78843eab878SWill Deacon 
armv7_pmnc_disable_intens(int idx)789cb6eb108Schai wen static inline void armv7_pmnc_disable_intens(int idx)
79043eab878SWill Deacon {
791bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(idx)));
79299c1745bSWill Deacon 	isb();
79399c1745bSWill Deacon 	/* Clear the overflow flag in case an interrupt is pending. */
794bf5ffc8cSRob Herring (Arm) 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(idx)));
79599c1745bSWill Deacon 	isb();
79643eab878SWill Deacon }
79743eab878SWill Deacon 
armv7_pmnc_getreset_flags(void)79843eab878SWill Deacon static inline u32 armv7_pmnc_getreset_flags(void)
79943eab878SWill Deacon {
80043eab878SWill Deacon 	u32 val;
80143eab878SWill Deacon 
80243eab878SWill Deacon 	/* Read */
80343eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
80443eab878SWill Deacon 
80543eab878SWill Deacon 	/* Write to clear flags */
80643eab878SWill Deacon 	val &= ARMV7_FLAG_MASK;
80743eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
80843eab878SWill Deacon 
80943eab878SWill Deacon 	return val;
81043eab878SWill Deacon }
81143eab878SWill Deacon 
81243eab878SWill Deacon #ifdef DEBUG
armv7_pmnc_dump_regs(struct arm_pmu * cpu_pmu)8137279adbdSSudeep KarkadaNagesha static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
81443eab878SWill Deacon {
81543eab878SWill Deacon 	u32 val;
81643eab878SWill Deacon 	unsigned int cnt;
81743eab878SWill Deacon 
81852a5566eSWill Deacon 	pr_info("PMNC registers dump:\n");
81943eab878SWill Deacon 
82043eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
82152a5566eSWill Deacon 	pr_info("PMNC  =0x%08x\n", val);
82243eab878SWill Deacon 
82343eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
82452a5566eSWill Deacon 	pr_info("CNTENS=0x%08x\n", val);
82543eab878SWill Deacon 
82643eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
82752a5566eSWill Deacon 	pr_info("INTENS=0x%08x\n", val);
82843eab878SWill Deacon 
82943eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
83052a5566eSWill Deacon 	pr_info("FLAGS =0x%08x\n", val);
83143eab878SWill Deacon 
83243eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
83352a5566eSWill Deacon 	pr_info("SELECT=0x%08x\n", val);
83443eab878SWill Deacon 
83543eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
83652a5566eSWill Deacon 	pr_info("CCNT  =0x%08x\n", val);
83743eab878SWill Deacon 
838bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(cnt, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
83943eab878SWill Deacon 		armv7_pmnc_select_counter(cnt);
84043eab878SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
841bf5ffc8cSRob Herring (Arm) 		pr_info("CNT[%d] count =0x%08x\n", cnt, val);
84243eab878SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
843bf5ffc8cSRob Herring (Arm) 		pr_info("CNT[%d] evtsel=0x%08x\n", cnt, val);
84443eab878SWill Deacon 	}
84543eab878SWill Deacon }
84643eab878SWill Deacon #endif
84743eab878SWill Deacon 
armv7pmu_enable_event(struct perf_event * event)848ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_enable_event(struct perf_event *event)
84943eab878SWill Deacon {
850ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
851ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
852ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
85343eab878SWill Deacon 
8547279adbdSSudeep KarkadaNagesha 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
8557279adbdSSudeep KarkadaNagesha 		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
8567279adbdSSudeep KarkadaNagesha 			smp_processor_id(), idx);
8577279adbdSSudeep KarkadaNagesha 		return;
8587279adbdSSudeep KarkadaNagesha 	}
8597279adbdSSudeep KarkadaNagesha 
86043eab878SWill Deacon 	/*
86143eab878SWill Deacon 	 * Set event (if destined for PMNx counters)
862a505addcSWill Deacon 	 * We only need to set the event for the cycle counter if we
863a505addcSWill Deacon 	 * have the ability to perform event filtering.
86443eab878SWill Deacon 	 */
865513c99ceSSudeep KarkadaNagesha 	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
86643eab878SWill Deacon 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
86743eab878SWill Deacon 
86843eab878SWill Deacon 	armv7_pmnc_enable_intens(idx);
86943eab878SWill Deacon 	armv7_pmnc_enable_counter(idx);
87043eab878SWill Deacon }
87143eab878SWill Deacon 
armv7pmu_disable_event(struct perf_event * event)872ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_disable_event(struct perf_event *event)
87343eab878SWill Deacon {
874ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
875ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
876ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
87743eab878SWill Deacon 
8787279adbdSSudeep KarkadaNagesha 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
8797279adbdSSudeep KarkadaNagesha 		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
8807279adbdSSudeep KarkadaNagesha 			smp_processor_id(), idx);
8817279adbdSSudeep KarkadaNagesha 		return;
8827279adbdSSudeep KarkadaNagesha 	}
8837279adbdSSudeep KarkadaNagesha 
88443eab878SWill Deacon 	armv7_pmnc_disable_counter(idx);
88543eab878SWill Deacon 	armv7_pmnc_disable_intens(idx);
88643eab878SWill Deacon }
88743eab878SWill Deacon 
armv7pmu_handle_irq(struct arm_pmu * cpu_pmu)8880788f1e9SMark Rutland static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
88943eab878SWill Deacon {
8906330aae7SWill Deacon 	u32 pmnc;
89143eab878SWill Deacon 	struct perf_sample_data data;
89211679250SMark Rutland 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
89343eab878SWill Deacon 	struct pt_regs *regs;
89443eab878SWill Deacon 	int idx;
89543eab878SWill Deacon 
89643eab878SWill Deacon 	/*
89743eab878SWill Deacon 	 * Get and reset the IRQ flags
89843eab878SWill Deacon 	 */
89943eab878SWill Deacon 	pmnc = armv7_pmnc_getreset_flags();
90043eab878SWill Deacon 
90143eab878SWill Deacon 	/*
90243eab878SWill Deacon 	 * Did an overflow occur?
90343eab878SWill Deacon 	 */
90443eab878SWill Deacon 	if (!armv7_pmnc_has_overflowed(pmnc))
90543eab878SWill Deacon 		return IRQ_NONE;
90643eab878SWill Deacon 
90743eab878SWill Deacon 	/*
90843eab878SWill Deacon 	 * Handle the counter(s) overflow(s)
90943eab878SWill Deacon 	 */
91043eab878SWill Deacon 	regs = get_irq_regs();
91143eab878SWill Deacon 
912bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
91343eab878SWill Deacon 		struct perf_event *event = cpuc->events[idx];
91443eab878SWill Deacon 		struct hw_perf_event *hwc;
91543eab878SWill Deacon 
916f6f5a30cSWill Deacon 		/* Ignore if we don't have an event. */
917f6f5a30cSWill Deacon 		if (!event)
918f6f5a30cSWill Deacon 			continue;
919f6f5a30cSWill Deacon 
92043eab878SWill Deacon 		/*
92143eab878SWill Deacon 		 * We have a single interrupt for all counters. Check that
92243eab878SWill Deacon 		 * each counter has overflowed before we process it.
92343eab878SWill Deacon 		 */
92443eab878SWill Deacon 		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
92543eab878SWill Deacon 			continue;
92643eab878SWill Deacon 
92743eab878SWill Deacon 		hwc = &event->hw;
928ed6f2a52SSudeep KarkadaNagesha 		armpmu_event_update(event);
929fd0d000bSRobert Richter 		perf_sample_data_init(&data, 0, hwc->last_period);
930ed6f2a52SSudeep KarkadaNagesha 		if (!armpmu_event_set_period(event))
93143eab878SWill Deacon 			continue;
93243eab878SWill Deacon 
933*15073765SKan Liang 		perf_event_overflow(event, &data, regs);
93443eab878SWill Deacon 	}
93543eab878SWill Deacon 
93643eab878SWill Deacon 	/*
93743eab878SWill Deacon 	 * Handle the pending perf events.
93843eab878SWill Deacon 	 *
93943eab878SWill Deacon 	 * Note: this call *must* be run with interrupts disabled. For
94043eab878SWill Deacon 	 * platforms that can have the PMU interrupts raised as an NMI, this
94143eab878SWill Deacon 	 * will not work.
94243eab878SWill Deacon 	 */
94343eab878SWill Deacon 	irq_work_run();
94443eab878SWill Deacon 
94543eab878SWill Deacon 	return IRQ_HANDLED;
94643eab878SWill Deacon }
94743eab878SWill Deacon 
armv7pmu_start(struct arm_pmu * cpu_pmu)948ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_start(struct arm_pmu *cpu_pmu)
94943eab878SWill Deacon {
95043eab878SWill Deacon 	/* Enable all counters */
95143eab878SWill Deacon 	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
95243eab878SWill Deacon }
95343eab878SWill Deacon 
armv7pmu_stop(struct arm_pmu * cpu_pmu)954ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
95543eab878SWill Deacon {
95643eab878SWill Deacon 	/* Disable all counters */
95743eab878SWill Deacon 	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
95843eab878SWill Deacon }
95943eab878SWill Deacon 
armv7pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)9608be3f9a2SMark Rutland static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
961ed6f2a52SSudeep KarkadaNagesha 				  struct perf_event *event)
96243eab878SWill Deacon {
96343eab878SWill Deacon 	int idx;
964ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
965ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
966ed6f2a52SSudeep KarkadaNagesha 	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
96743eab878SWill Deacon 
96843eab878SWill Deacon 	/* Always place a cycle counter into the cycle counter. */
969a505addcSWill Deacon 	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
970c691bb62SWill Deacon 		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
97143eab878SWill Deacon 			return -EAGAIN;
97243eab878SWill Deacon 
973c691bb62SWill Deacon 		return ARMV7_IDX_CYCLE_COUNTER;
974c691bb62SWill Deacon 	}
975c691bb62SWill Deacon 
97643eab878SWill Deacon 	/*
97743eab878SWill Deacon 	 * For anything other than a cycle counter, try and use
97843eab878SWill Deacon 	 * the events counters
97943eab878SWill Deacon 	 */
980bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
98143eab878SWill Deacon 		if (!test_and_set_bit(idx, cpuc->used_mask))
98243eab878SWill Deacon 			return idx;
98343eab878SWill Deacon 	}
98443eab878SWill Deacon 
98543eab878SWill Deacon 	/* The counters are all in use. */
98643eab878SWill Deacon 	return -EAGAIN;
98743eab878SWill Deacon }
98843eab878SWill Deacon 
armv7pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)9897dfc8db1SSuzuki K Poulose static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
9907dfc8db1SSuzuki K Poulose 				     struct perf_event *event)
9917dfc8db1SSuzuki K Poulose {
9927dfc8db1SSuzuki K Poulose 	clear_bit(event->hw.idx, cpuc->used_mask);
9937dfc8db1SSuzuki K Poulose }
9947dfc8db1SSuzuki K Poulose 
995a505addcSWill Deacon /*
996a505addcSWill Deacon  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
997a505addcSWill Deacon  */
armv7pmu_set_event_filter(struct hw_perf_event * event,struct perf_event_attr * attr)998a505addcSWill Deacon static int armv7pmu_set_event_filter(struct hw_perf_event *event,
999a505addcSWill Deacon 				     struct perf_event_attr *attr)
1000a505addcSWill Deacon {
1001a505addcSWill Deacon 	unsigned long config_base = 0;
1002a505addcSWill Deacon 
1003186c91aaSJames Clark 	if (attr->exclude_idle) {
1004186c91aaSJames Clark 		pr_debug("ARM performance counters do not support mode exclusion\n");
1005186c91aaSJames Clark 		return -EOPNOTSUPP;
1006186c91aaSJames Clark 	}
1007a505addcSWill Deacon 	if (attr->exclude_user)
1008a505addcSWill Deacon 		config_base |= ARMV7_EXCLUDE_USER;
1009a505addcSWill Deacon 	if (attr->exclude_kernel)
1010a505addcSWill Deacon 		config_base |= ARMV7_EXCLUDE_PL1;
1011a505addcSWill Deacon 	if (!attr->exclude_hv)
1012a505addcSWill Deacon 		config_base |= ARMV7_INCLUDE_HYP;
1013a505addcSWill Deacon 
1014a505addcSWill Deacon 	/*
1015a505addcSWill Deacon 	 * Install the filter into config_base as this is used to
1016a505addcSWill Deacon 	 * construct the event type.
1017a505addcSWill Deacon 	 */
1018a505addcSWill Deacon 	event->config_base = config_base;
1019a505addcSWill Deacon 
1020a505addcSWill Deacon 	return 0;
102143eab878SWill Deacon }
102243eab878SWill Deacon 
armv7pmu_reset(void * info)1023574b69cbSWill Deacon static void armv7pmu_reset(void *info)
1024574b69cbSWill Deacon {
1025ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1026bf5ffc8cSRob Herring (Arm) 	u32 idx, val;
10278d1a0ae7SMartin Fuzzey 
10288d1a0ae7SMartin Fuzzey 	if (cpu_pmu->secure_access) {
10298d1a0ae7SMartin Fuzzey 		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
10308d1a0ae7SMartin Fuzzey 		val |= ARMV7_SDER_SUNIDEN;
10318d1a0ae7SMartin Fuzzey 		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
10328d1a0ae7SMartin Fuzzey 	}
1033574b69cbSWill Deacon 
1034574b69cbSWill Deacon 	/* The counter and interrupt enable registers are unknown at reset. */
1035bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
1036ed6f2a52SSudeep KarkadaNagesha 		armv7_pmnc_disable_counter(idx);
1037ed6f2a52SSudeep KarkadaNagesha 		armv7_pmnc_disable_intens(idx);
1038ed6f2a52SSudeep KarkadaNagesha 	}
1039574b69cbSWill Deacon 
1040574b69cbSWill Deacon 	/* Initialize & Reset PMNC: C and P bits */
1041574b69cbSWill Deacon 	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1042574b69cbSWill Deacon }
1043574b69cbSWill Deacon 
armv7_a8_map_event(struct perf_event * event)1044e1f431b5SMark Rutland static int armv7_a8_map_event(struct perf_event *event)
1045e1f431b5SMark Rutland {
10466dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a8_perf_map,
1047e1f431b5SMark Rutland 				&armv7_a8_perf_cache_map, 0xFF);
1048e1f431b5SMark Rutland }
1049e1f431b5SMark Rutland 
armv7_a9_map_event(struct perf_event * event)1050e1f431b5SMark Rutland static int armv7_a9_map_event(struct perf_event *event)
1051e1f431b5SMark Rutland {
10526dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a9_perf_map,
1053e1f431b5SMark Rutland 				&armv7_a9_perf_cache_map, 0xFF);
1054e1f431b5SMark Rutland }
1055e1f431b5SMark Rutland 
armv7_a5_map_event(struct perf_event * event)1056e1f431b5SMark Rutland static int armv7_a5_map_event(struct perf_event *event)
1057e1f431b5SMark Rutland {
10586dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a5_perf_map,
1059e1f431b5SMark Rutland 				&armv7_a5_perf_cache_map, 0xFF);
1060e1f431b5SMark Rutland }
1061e1f431b5SMark Rutland 
armv7_a15_map_event(struct perf_event * event)1062e1f431b5SMark Rutland static int armv7_a15_map_event(struct perf_event *event)
1063e1f431b5SMark Rutland {
10646dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a15_perf_map,
1065e1f431b5SMark Rutland 				&armv7_a15_perf_cache_map, 0xFF);
1066e1f431b5SMark Rutland }
1067e1f431b5SMark Rutland 
armv7_a7_map_event(struct perf_event * event)1068d33c88c6SWill Deacon static int armv7_a7_map_event(struct perf_event *event)
1069d33c88c6SWill Deacon {
10706dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a7_perf_map,
1071d33c88c6SWill Deacon 				&armv7_a7_perf_cache_map, 0xFF);
1072d33c88c6SWill Deacon }
1073d33c88c6SWill Deacon 
armv7_a12_map_event(struct perf_event * event)10748e781f65SAlbin Tonnerre static int armv7_a12_map_event(struct perf_event *event)
10758e781f65SAlbin Tonnerre {
10768e781f65SAlbin Tonnerre 	return armpmu_map_event(event, &armv7_a12_perf_map,
10778e781f65SAlbin Tonnerre 				&armv7_a12_perf_cache_map, 0xFF);
10788e781f65SAlbin Tonnerre }
10798e781f65SAlbin Tonnerre 
krait_map_event(struct perf_event * event)10802a3391cdSStephen Boyd static int krait_map_event(struct perf_event *event)
10812a3391cdSStephen Boyd {
10822a3391cdSStephen Boyd 	return armpmu_map_event(event, &krait_perf_map,
10832a3391cdSStephen Boyd 				&krait_perf_cache_map, 0xFFFFF);
10842a3391cdSStephen Boyd }
10852a3391cdSStephen Boyd 
krait_map_event_no_branch(struct perf_event * event)10862a3391cdSStephen Boyd static int krait_map_event_no_branch(struct perf_event *event)
10872a3391cdSStephen Boyd {
10882a3391cdSStephen Boyd 	return armpmu_map_event(event, &krait_perf_map_no_branch,
10892a3391cdSStephen Boyd 				&krait_perf_cache_map, 0xFFFFF);
10902a3391cdSStephen Boyd }
10912a3391cdSStephen Boyd 
scorpion_map_event(struct perf_event * event)1092341e42c4SStephen Boyd static int scorpion_map_event(struct perf_event *event)
1093341e42c4SStephen Boyd {
1094341e42c4SStephen Boyd 	return armpmu_map_event(event, &scorpion_perf_map,
1095341e42c4SStephen Boyd 				&scorpion_perf_cache_map, 0xFFFFF);
1096341e42c4SStephen Boyd }
1097341e42c4SStephen Boyd 
armv7pmu_init(struct arm_pmu * cpu_pmu)1098513c99ceSSudeep KarkadaNagesha static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1099513c99ceSSudeep KarkadaNagesha {
1100513c99ceSSudeep KarkadaNagesha 	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1101513c99ceSSudeep KarkadaNagesha 	cpu_pmu->enable		= armv7pmu_enable_event;
1102513c99ceSSudeep KarkadaNagesha 	cpu_pmu->disable	= armv7pmu_disable_event;
1103513c99ceSSudeep KarkadaNagesha 	cpu_pmu->read_counter	= armv7pmu_read_counter;
1104513c99ceSSudeep KarkadaNagesha 	cpu_pmu->write_counter	= armv7pmu_write_counter;
1105513c99ceSSudeep KarkadaNagesha 	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
11067dfc8db1SSuzuki K Poulose 	cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1107513c99ceSSudeep KarkadaNagesha 	cpu_pmu->start		= armv7pmu_start;
1108513c99ceSSudeep KarkadaNagesha 	cpu_pmu->stop		= armv7pmu_stop;
1109513c99ceSSudeep KarkadaNagesha 	cpu_pmu->reset		= armv7pmu_reset;
111043eab878SWill Deacon };
111143eab878SWill Deacon 
armv7_read_num_pmnc_events(void * info)11120e3038d1SMark Rutland static void armv7_read_num_pmnc_events(void *info)
111343eab878SWill Deacon {
1114bf5ffc8cSRob Herring (Arm) 	int nb_cnt;
1115bf5ffc8cSRob Herring (Arm) 	struct arm_pmu *cpu_pmu = info;
111643eab878SWill Deacon 
111743eab878SWill Deacon 	/* Read the nb of CNTx counters supported from PMNC */
1118bf5ffc8cSRob Herring (Arm) 	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1119bf5ffc8cSRob Herring (Arm) 	bitmap_set(cpu_pmu->cntr_mask, 0, nb_cnt);
112043eab878SWill Deacon 
11210e3038d1SMark Rutland 	/* Add the CPU cycles counter */
1122bf5ffc8cSRob Herring (Arm) 	set_bit(ARMV7_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask);
11230e3038d1SMark Rutland }
11240e3038d1SMark Rutland 
armv7_probe_num_events(struct arm_pmu * arm_pmu)11250e3038d1SMark Rutland static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
11260e3038d1SMark Rutland {
11270e3038d1SMark Rutland 	return smp_call_function_any(&arm_pmu->supported_cpus,
11280e3038d1SMark Rutland 				     armv7_read_num_pmnc_events,
1129bf5ffc8cSRob Herring (Arm) 				     arm_pmu, 1);
113043eab878SWill Deacon }
113143eab878SWill Deacon 
armv7_a8_pmu_init(struct arm_pmu * cpu_pmu)1132351a102dSGreg Kroah-Hartman static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
113343eab878SWill Deacon {
1134513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
11353d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a8";
1136513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a8_map_event;
11379268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
11389268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
11399268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
11409268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
11410e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
114243eab878SWill Deacon }
114343eab878SWill Deacon 
armv7_a9_pmu_init(struct arm_pmu * cpu_pmu)1144351a102dSGreg Kroah-Hartman static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
114543eab878SWill Deacon {
1146513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
11473d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a9";
1148513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a9_map_event;
11499268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
11509268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
11519268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
11529268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
11530e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
115443eab878SWill Deacon }
11550c205cbeSWill Deacon 
armv7_a5_pmu_init(struct arm_pmu * cpu_pmu)1156351a102dSGreg Kroah-Hartman static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
11570c205cbeSWill Deacon {
1158513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
11593d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a5";
1160513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a5_map_event;
11619268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
11629268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
11639268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
11649268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
11650e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
11660c205cbeSWill Deacon }
116714abd038SWill Deacon 
armv7_a15_pmu_init(struct arm_pmu * cpu_pmu)1168351a102dSGreg Kroah-Hartman static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
116914abd038SWill Deacon {
1170513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
11713d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a15";
1172513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a15_map_event;
1173513c99ceSSudeep KarkadaNagesha 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
11749268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
11759268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
11769268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
11779268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
11780e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
117914abd038SWill Deacon }
1180d33c88c6SWill Deacon 
armv7_a7_pmu_init(struct arm_pmu * cpu_pmu)1181351a102dSGreg Kroah-Hartman static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1182d33c88c6SWill Deacon {
1183513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
11843d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a7";
1185513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a7_map_event;
1186513c99ceSSudeep KarkadaNagesha 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
11879268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
11889268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
11899268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
11909268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
11910e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
1192d33c88c6SWill Deacon }
11932a3391cdSStephen Boyd 
armv7_a12_pmu_init(struct arm_pmu * cpu_pmu)11948e781f65SAlbin Tonnerre static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
11958e781f65SAlbin Tonnerre {
11968e781f65SAlbin Tonnerre 	armv7pmu_init(cpu_pmu);
11973d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a12";
11988e781f65SAlbin Tonnerre 	cpu_pmu->map_event	= armv7_a12_map_event;
11998e781f65SAlbin Tonnerre 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
12009268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12019268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12029268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12039268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12040e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
12058e781f65SAlbin Tonnerre }
12068e781f65SAlbin Tonnerre 
armv7_a17_pmu_init(struct arm_pmu * cpu_pmu)120703eff46cSWill Deacon static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
120803eff46cSWill Deacon {
12090e3038d1SMark Rutland 	int ret = armv7_a12_pmu_init(cpu_pmu);
12103d1ff755SMark Rutland 	cpu_pmu->name = "armv7_cortex_a17";
12119268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12129268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12139268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12149268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12150e3038d1SMark Rutland 	return ret;
121603eff46cSWill Deacon }
121703eff46cSWill Deacon 
1218b7aafe99SStephen Boyd /*
1219b7aafe99SStephen Boyd  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1220b7aafe99SStephen Boyd  *
1221b7aafe99SStephen Boyd  *            31   30     24     16     8      0
1222b7aafe99SStephen Boyd  *            +--------------------------------+
1223b7aafe99SStephen Boyd  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1224b7aafe99SStephen Boyd  *            +--------------------------------+
1225b7aafe99SStephen Boyd  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1226b7aafe99SStephen Boyd  *            +--------------------------------+
1227b7aafe99SStephen Boyd  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1228b7aafe99SStephen Boyd  *            +--------------------------------+
1229b7aafe99SStephen Boyd  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1230b7aafe99SStephen Boyd  *            +--------------------------------+
1231b7aafe99SStephen Boyd  *              EN | G=3  | G=2  | G=1  | G=0
1232b7aafe99SStephen Boyd  *
1233b7aafe99SStephen Boyd  *  Event Encoding:
1234b7aafe99SStephen Boyd  *
1235b7aafe99SStephen Boyd  *      hwc->config_base = 0xNRCCG
1236b7aafe99SStephen Boyd  *
1237b7aafe99SStephen Boyd  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1238b7aafe99SStephen Boyd  *      R  = region register
1239b7aafe99SStephen Boyd  *      CC = class of events the group G is choosing from
1240b7aafe99SStephen Boyd  *      G  = group or particular event
1241b7aafe99SStephen Boyd  *
1242b7aafe99SStephen Boyd  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1243b7aafe99SStephen Boyd  *
1244b7aafe99SStephen Boyd  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1245b7aafe99SStephen Boyd  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1246b7aafe99SStephen Boyd  *  events (interrupts for example). An event code is broken down into
1247b7aafe99SStephen Boyd  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1248b7aafe99SStephen Boyd  *  example).
1249b7aafe99SStephen Boyd  */
1250b7aafe99SStephen Boyd 
1251b7aafe99SStephen Boyd #define KRAIT_EVENT		(1 << 16)
1252b7aafe99SStephen Boyd #define VENUM_EVENT		(2 << 16)
1253b7aafe99SStephen Boyd #define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1254b7aafe99SStephen Boyd #define PMRESRn_EN		BIT(31)
1255b7aafe99SStephen Boyd 
125665bab451SStephen Boyd #define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
125765bab451SStephen Boyd #define EVENT_GROUP(event)	((event) & 0xf)			/* G */
125865bab451SStephen Boyd #define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
125965bab451SStephen Boyd #define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
126065bab451SStephen Boyd #define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
126165bab451SStephen Boyd 
krait_read_pmresrn(int n)1262b7aafe99SStephen Boyd static u32 krait_read_pmresrn(int n)
1263b7aafe99SStephen Boyd {
1264b7aafe99SStephen Boyd 	u32 val;
1265b7aafe99SStephen Boyd 
1266b7aafe99SStephen Boyd 	switch (n) {
1267b7aafe99SStephen Boyd 	case 0:
1268b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1269b7aafe99SStephen Boyd 		break;
1270b7aafe99SStephen Boyd 	case 1:
1271b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1272b7aafe99SStephen Boyd 		break;
1273b7aafe99SStephen Boyd 	case 2:
1274b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1275b7aafe99SStephen Boyd 		break;
1276b7aafe99SStephen Boyd 	default:
1277b7aafe99SStephen Boyd 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1278b7aafe99SStephen Boyd 	}
1279b7aafe99SStephen Boyd 
1280b7aafe99SStephen Boyd 	return val;
1281b7aafe99SStephen Boyd }
1282b7aafe99SStephen Boyd 
krait_write_pmresrn(int n,u32 val)1283b7aafe99SStephen Boyd static void krait_write_pmresrn(int n, u32 val)
1284b7aafe99SStephen Boyd {
1285b7aafe99SStephen Boyd 	switch (n) {
1286b7aafe99SStephen Boyd 	case 0:
1287b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1288b7aafe99SStephen Boyd 		break;
1289b7aafe99SStephen Boyd 	case 1:
1290b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1291b7aafe99SStephen Boyd 		break;
1292b7aafe99SStephen Boyd 	case 2:
1293b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1294b7aafe99SStephen Boyd 		break;
1295b7aafe99SStephen Boyd 	default:
1296b7aafe99SStephen Boyd 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1297b7aafe99SStephen Boyd 	}
1298b7aafe99SStephen Boyd }
1299b7aafe99SStephen Boyd 
venum_read_pmresr(void)130065bab451SStephen Boyd static u32 venum_read_pmresr(void)
1301b7aafe99SStephen Boyd {
1302b7aafe99SStephen Boyd 	u32 val;
1303b7aafe99SStephen Boyd 	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1304b7aafe99SStephen Boyd 	return val;
1305b7aafe99SStephen Boyd }
1306b7aafe99SStephen Boyd 
venum_write_pmresr(u32 val)130765bab451SStephen Boyd static void venum_write_pmresr(u32 val)
1308b7aafe99SStephen Boyd {
1309b7aafe99SStephen Boyd 	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1310b7aafe99SStephen Boyd }
1311b7aafe99SStephen Boyd 
venum_pre_pmresr(u32 * venum_orig_val,u32 * fp_orig_val)131265bab451SStephen Boyd static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1313b7aafe99SStephen Boyd {
1314b7aafe99SStephen Boyd 	u32 venum_new_val;
1315b7aafe99SStephen Boyd 	u32 fp_new_val;
1316b7aafe99SStephen Boyd 
1317b7aafe99SStephen Boyd 	BUG_ON(preemptible());
1318b7aafe99SStephen Boyd 	/* CPACR Enable CP10 and CP11 access */
1319b7aafe99SStephen Boyd 	*venum_orig_val = get_copro_access();
1320b7aafe99SStephen Boyd 	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1321b7aafe99SStephen Boyd 	set_copro_access(venum_new_val);
1322b7aafe99SStephen Boyd 
1323b7aafe99SStephen Boyd 	/* Enable FPEXC */
1324b7aafe99SStephen Boyd 	*fp_orig_val = fmrx(FPEXC);
1325b7aafe99SStephen Boyd 	fp_new_val = *fp_orig_val | FPEXC_EN;
1326b7aafe99SStephen Boyd 	fmxr(FPEXC, fp_new_val);
1327b7aafe99SStephen Boyd }
1328b7aafe99SStephen Boyd 
venum_post_pmresr(u32 venum_orig_val,u32 fp_orig_val)132965bab451SStephen Boyd static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1330b7aafe99SStephen Boyd {
1331b7aafe99SStephen Boyd 	BUG_ON(preemptible());
1332b7aafe99SStephen Boyd 	/* Restore FPEXC */
1333b7aafe99SStephen Boyd 	fmxr(FPEXC, fp_orig_val);
1334b7aafe99SStephen Boyd 	isb();
1335b7aafe99SStephen Boyd 	/* Restore CPACR */
1336b7aafe99SStephen Boyd 	set_copro_access(venum_orig_val);
1337b7aafe99SStephen Boyd }
1338b7aafe99SStephen Boyd 
krait_get_pmresrn_event(unsigned int region)1339b7aafe99SStephen Boyd static u32 krait_get_pmresrn_event(unsigned int region)
1340b7aafe99SStephen Boyd {
1341b7aafe99SStephen Boyd 	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1342b7aafe99SStephen Boyd 					     KRAIT_PMRESR1_GROUP0,
1343b7aafe99SStephen Boyd 					     KRAIT_PMRESR2_GROUP0 };
1344b7aafe99SStephen Boyd 	return pmresrn_table[region];
1345b7aafe99SStephen Boyd }
1346b7aafe99SStephen Boyd 
krait_evt_setup(int idx,u32 config_base)1347b7aafe99SStephen Boyd static void krait_evt_setup(int idx, u32 config_base)
1348b7aafe99SStephen Boyd {
1349b7aafe99SStephen Boyd 	u32 val;
1350b7aafe99SStephen Boyd 	u32 mask;
1351b7aafe99SStephen Boyd 	u32 vval, fval;
135265bab451SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
135365bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
135465bab451SStephen Boyd 	unsigned int code = EVENT_CODE(config_base);
1355b7aafe99SStephen Boyd 	unsigned int group_shift;
135665bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1357b7aafe99SStephen Boyd 
1358b7aafe99SStephen Boyd 	group_shift = group * 8;
1359b7aafe99SStephen Boyd 	mask = 0xff << group_shift;
1360b7aafe99SStephen Boyd 
1361b7aafe99SStephen Boyd 	/* Configure evtsel for the region and group */
1362b7aafe99SStephen Boyd 	if (venum_event)
1363b7aafe99SStephen Boyd 		val = KRAIT_VPMRESR0_GROUP0;
1364b7aafe99SStephen Boyd 	else
1365b7aafe99SStephen Boyd 		val = krait_get_pmresrn_event(region);
1366b7aafe99SStephen Boyd 	val += group;
1367b7aafe99SStephen Boyd 	/* Mix in mode-exclusion bits */
1368b7aafe99SStephen Boyd 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1369b7aafe99SStephen Boyd 	armv7_pmnc_write_evtsel(idx, val);
1370b7aafe99SStephen Boyd 
1371b7aafe99SStephen Boyd 	if (venum_event) {
137265bab451SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
137365bab451SStephen Boyd 		val = venum_read_pmresr();
1374b7aafe99SStephen Boyd 		val &= ~mask;
1375b7aafe99SStephen Boyd 		val |= code << group_shift;
1376b7aafe99SStephen Boyd 		val |= PMRESRn_EN;
137765bab451SStephen Boyd 		venum_write_pmresr(val);
137865bab451SStephen Boyd 		venum_post_pmresr(vval, fval);
1379b7aafe99SStephen Boyd 	} else {
1380b7aafe99SStephen Boyd 		val = krait_read_pmresrn(region);
1381b7aafe99SStephen Boyd 		val &= ~mask;
1382b7aafe99SStephen Boyd 		val |= code << group_shift;
1383b7aafe99SStephen Boyd 		val |= PMRESRn_EN;
1384b7aafe99SStephen Boyd 		krait_write_pmresrn(region, val);
1385b7aafe99SStephen Boyd 	}
1386b7aafe99SStephen Boyd }
1387b7aafe99SStephen Boyd 
clear_pmresrn_group(u32 val,int group)138865bab451SStephen Boyd static u32 clear_pmresrn_group(u32 val, int group)
1389b7aafe99SStephen Boyd {
1390b7aafe99SStephen Boyd 	u32 mask;
1391b7aafe99SStephen Boyd 	int group_shift;
1392b7aafe99SStephen Boyd 
1393b7aafe99SStephen Boyd 	group_shift = group * 8;
1394b7aafe99SStephen Boyd 	mask = 0xff << group_shift;
1395b7aafe99SStephen Boyd 	val &= ~mask;
1396b7aafe99SStephen Boyd 
1397b7aafe99SStephen Boyd 	/* Don't clear enable bit if entire region isn't disabled */
1398b7aafe99SStephen Boyd 	if (val & ~PMRESRn_EN)
1399b7aafe99SStephen Boyd 		return val |= PMRESRn_EN;
1400b7aafe99SStephen Boyd 
1401b7aafe99SStephen Boyd 	return 0;
1402b7aafe99SStephen Boyd }
1403b7aafe99SStephen Boyd 
krait_clearpmu(u32 config_base)1404b7aafe99SStephen Boyd static void krait_clearpmu(u32 config_base)
1405b7aafe99SStephen Boyd {
1406b7aafe99SStephen Boyd 	u32 val;
1407b7aafe99SStephen Boyd 	u32 vval, fval;
140865bab451SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
140965bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
141065bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1411b7aafe99SStephen Boyd 
1412b7aafe99SStephen Boyd 	if (venum_event) {
141365bab451SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
141465bab451SStephen Boyd 		val = venum_read_pmresr();
141565bab451SStephen Boyd 		val = clear_pmresrn_group(val, group);
141665bab451SStephen Boyd 		venum_write_pmresr(val);
141765bab451SStephen Boyd 		venum_post_pmresr(vval, fval);
1418b7aafe99SStephen Boyd 	} else {
1419b7aafe99SStephen Boyd 		val = krait_read_pmresrn(region);
142065bab451SStephen Boyd 		val = clear_pmresrn_group(val, group);
1421b7aafe99SStephen Boyd 		krait_write_pmresrn(region, val);
1422b7aafe99SStephen Boyd 	}
1423b7aafe99SStephen Boyd }
1424b7aafe99SStephen Boyd 
krait_pmu_disable_event(struct perf_event * event)1425b7aafe99SStephen Boyd static void krait_pmu_disable_event(struct perf_event *event)
1426b7aafe99SStephen Boyd {
1427b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1428b7aafe99SStephen Boyd 	int idx = hwc->idx;
1429b7aafe99SStephen Boyd 
1430b7aafe99SStephen Boyd 	/* Disable counter and interrupt */
1431b7aafe99SStephen Boyd 
1432b7aafe99SStephen Boyd 	/* Disable counter */
1433b7aafe99SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1434b7aafe99SStephen Boyd 
1435b7aafe99SStephen Boyd 	/*
1436b7aafe99SStephen Boyd 	 * Clear pmresr code (if destined for PMNx counters)
1437b7aafe99SStephen Boyd 	 */
1438b7aafe99SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1439b7aafe99SStephen Boyd 		krait_clearpmu(hwc->config_base);
1440b7aafe99SStephen Boyd 
1441b7aafe99SStephen Boyd 	/* Disable interrupt for this counter */
1442b7aafe99SStephen Boyd 	armv7_pmnc_disable_intens(idx);
1443b7aafe99SStephen Boyd }
1444b7aafe99SStephen Boyd 
krait_pmu_enable_event(struct perf_event * event)1445b7aafe99SStephen Boyd static void krait_pmu_enable_event(struct perf_event *event)
1446b7aafe99SStephen Boyd {
1447b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1448b7aafe99SStephen Boyd 	int idx = hwc->idx;
1449b7aafe99SStephen Boyd 
1450b7aafe99SStephen Boyd 	/*
1451b7aafe99SStephen Boyd 	 * Set event (if destined for PMNx counters)
1452b7aafe99SStephen Boyd 	 * We set the event for the cycle counter because we
1453b7aafe99SStephen Boyd 	 * have the ability to perform event filtering.
1454b7aafe99SStephen Boyd 	 */
1455b7aafe99SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1456b7aafe99SStephen Boyd 		krait_evt_setup(idx, hwc->config_base);
1457b7aafe99SStephen Boyd 	else
1458b7aafe99SStephen Boyd 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1459b7aafe99SStephen Boyd 
1460b7aafe99SStephen Boyd 	armv7_pmnc_enable_intens(idx);
1461b7aafe99SStephen Boyd 	armv7_pmnc_enable_counter(idx);
1462b7aafe99SStephen Boyd }
1463b7aafe99SStephen Boyd 
krait_pmu_reset(void * info)1464b7aafe99SStephen Boyd static void krait_pmu_reset(void *info)
1465b7aafe99SStephen Boyd {
1466b7aafe99SStephen Boyd 	u32 vval, fval;
146793499918SStephen Boyd 	struct arm_pmu *cpu_pmu = info;
1468bf5ffc8cSRob Herring (Arm) 	u32 idx;
1469b7aafe99SStephen Boyd 
1470b7aafe99SStephen Boyd 	armv7pmu_reset(info);
1471b7aafe99SStephen Boyd 
1472b7aafe99SStephen Boyd 	/* Clear all pmresrs */
1473b7aafe99SStephen Boyd 	krait_write_pmresrn(0, 0);
1474b7aafe99SStephen Boyd 	krait_write_pmresrn(1, 0);
1475b7aafe99SStephen Boyd 	krait_write_pmresrn(2, 0);
1476b7aafe99SStephen Boyd 
147765bab451SStephen Boyd 	venum_pre_pmresr(&vval, &fval);
147865bab451SStephen Boyd 	venum_write_pmresr(0);
147965bab451SStephen Boyd 	venum_post_pmresr(vval, fval);
148093499918SStephen Boyd 
148193499918SStephen Boyd 	/* Reset PMxEVNCTCR to sane default */
1482bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
148393499918SStephen Boyd 		armv7_pmnc_select_counter(idx);
148493499918SStephen Boyd 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
148593499918SStephen Boyd 	}
148693499918SStephen Boyd 
1487b7aafe99SStephen Boyd }
1488b7aafe99SStephen Boyd 
krait_event_to_bit(struct perf_event * event,unsigned int region,unsigned int group)1489b7aafe99SStephen Boyd static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1490b7aafe99SStephen Boyd 			      unsigned int group)
1491b7aafe99SStephen Boyd {
1492b7aafe99SStephen Boyd 	int bit;
1493b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1494b7aafe99SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1495b7aafe99SStephen Boyd 
1496b7aafe99SStephen Boyd 	if (hwc->config_base & VENUM_EVENT)
1497b7aafe99SStephen Boyd 		bit = KRAIT_VPMRESR0_GROUP0;
1498b7aafe99SStephen Boyd 	else
1499b7aafe99SStephen Boyd 		bit = krait_get_pmresrn_event(region);
1500b7aafe99SStephen Boyd 	bit -= krait_get_pmresrn_event(0);
1501b7aafe99SStephen Boyd 	bit += group;
1502b7aafe99SStephen Boyd 	/*
1503b7aafe99SStephen Boyd 	 * Lower bits are reserved for use by the counters (see
1504b7aafe99SStephen Boyd 	 * armv7pmu_get_event_idx() for more info)
1505b7aafe99SStephen Boyd 	 */
1506bf5ffc8cSRob Herring (Arm) 	bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
1507b7aafe99SStephen Boyd 
1508b7aafe99SStephen Boyd 	return bit;
1509b7aafe99SStephen Boyd }
1510b7aafe99SStephen Boyd 
1511b7aafe99SStephen Boyd /*
1512b7aafe99SStephen Boyd  * We check for column exclusion constraints here.
1513b7aafe99SStephen Boyd  * Two events cant use the same group within a pmresr register.
1514b7aafe99SStephen Boyd  */
krait_pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1515b7aafe99SStephen Boyd static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1516b7aafe99SStephen Boyd 				   struct perf_event *event)
1517b7aafe99SStephen Boyd {
1518b7aafe99SStephen Boyd 	int idx;
15196a78371aSRussell King 	int bit = -1;
1520b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
152165bab451SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
152265bab451SStephen Boyd 	unsigned int code = EVENT_CODE(hwc->config_base);
152365bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
152465bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
152565bab451SStephen Boyd 	bool krait_event = EVENT_CPU(hwc->config_base);
1526b7aafe99SStephen Boyd 
152765bab451SStephen Boyd 	if (venum_event || krait_event) {
1528b7aafe99SStephen Boyd 		/* Ignore invalid events */
1529b7aafe99SStephen Boyd 		if (group > 3 || region > 2)
1530b7aafe99SStephen Boyd 			return -EINVAL;
153165bab451SStephen Boyd 		if (venum_event && (code & 0xe0))
1532b7aafe99SStephen Boyd 			return -EINVAL;
1533b7aafe99SStephen Boyd 
1534b7aafe99SStephen Boyd 		bit = krait_event_to_bit(event, region, group);
1535b7aafe99SStephen Boyd 		if (test_and_set_bit(bit, cpuc->used_mask))
1536b7aafe99SStephen Boyd 			return -EAGAIN;
1537b7aafe99SStephen Boyd 	}
1538b7aafe99SStephen Boyd 
1539b7aafe99SStephen Boyd 	idx = armv7pmu_get_event_idx(cpuc, event);
15406a78371aSRussell King 	if (idx < 0 && bit >= 0)
1541b7aafe99SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1542b7aafe99SStephen Boyd 
1543b7aafe99SStephen Boyd 	return idx;
1544b7aafe99SStephen Boyd }
1545b7aafe99SStephen Boyd 
krait_pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1546b7aafe99SStephen Boyd static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1547b7aafe99SStephen Boyd 				      struct perf_event *event)
1548b7aafe99SStephen Boyd {
1549b7aafe99SStephen Boyd 	int bit;
1550b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
155165bab451SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
155265bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
155365bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
155465bab451SStephen Boyd 	bool krait_event = EVENT_CPU(hwc->config_base);
1555b7aafe99SStephen Boyd 
15567dfc8db1SSuzuki K Poulose 	armv7pmu_clear_event_idx(cpuc, event);
155765bab451SStephen Boyd 	if (venum_event || krait_event) {
1558b7aafe99SStephen Boyd 		bit = krait_event_to_bit(event, region, group);
1559b7aafe99SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1560b7aafe99SStephen Boyd 	}
1561b7aafe99SStephen Boyd }
1562b7aafe99SStephen Boyd 
krait_pmu_init(struct arm_pmu * cpu_pmu)15632a3391cdSStephen Boyd static int krait_pmu_init(struct arm_pmu *cpu_pmu)
15642a3391cdSStephen Boyd {
15652a3391cdSStephen Boyd 	armv7pmu_init(cpu_pmu);
15663d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_krait";
15672a3391cdSStephen Boyd 	/* Some early versions of Krait don't support PC write events */
15682a3391cdSStephen Boyd 	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
15692a3391cdSStephen Boyd 				  "qcom,no-pc-write"))
15702a3391cdSStephen Boyd 		cpu_pmu->map_event = krait_map_event_no_branch;
15712a3391cdSStephen Boyd 	else
15722a3391cdSStephen Boyd 		cpu_pmu->map_event = krait_map_event;
15732a3391cdSStephen Boyd 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1574b7aafe99SStephen Boyd 	cpu_pmu->reset		= krait_pmu_reset;
1575b7aafe99SStephen Boyd 	cpu_pmu->enable		= krait_pmu_enable_event;
1576b7aafe99SStephen Boyd 	cpu_pmu->disable	= krait_pmu_disable_event;
1577b7aafe99SStephen Boyd 	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1578b7aafe99SStephen Boyd 	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
15790e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
15802a3391cdSStephen Boyd }
1581341e42c4SStephen Boyd 
1582341e42c4SStephen Boyd /*
1583341e42c4SStephen Boyd  * Scorpion Local Performance Monitor Register (LPMn)
1584341e42c4SStephen Boyd  *
1585341e42c4SStephen Boyd  *            31   30     24     16     8      0
1586341e42c4SStephen Boyd  *            +--------------------------------+
1587341e42c4SStephen Boyd  *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1588341e42c4SStephen Boyd  *            +--------------------------------+
1589341e42c4SStephen Boyd  *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1590341e42c4SStephen Boyd  *            +--------------------------------+
1591341e42c4SStephen Boyd  *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1592341e42c4SStephen Boyd  *            +--------------------------------+
1593341e42c4SStephen Boyd  *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1594341e42c4SStephen Boyd  *            +--------------------------------+
1595341e42c4SStephen Boyd  *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1596341e42c4SStephen Boyd  *            +--------------------------------+
1597341e42c4SStephen Boyd  *              EN | G=3  | G=2  | G=1  | G=0
1598341e42c4SStephen Boyd  *
1599341e42c4SStephen Boyd  *
1600341e42c4SStephen Boyd  *  Event Encoding:
1601341e42c4SStephen Boyd  *
1602341e42c4SStephen Boyd  *      hwc->config_base = 0xNRCCG
1603341e42c4SStephen Boyd  *
1604341e42c4SStephen Boyd  *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1605341e42c4SStephen Boyd  *      R  = region register
1606341e42c4SStephen Boyd  *      CC = class of events the group G is choosing from
1607341e42c4SStephen Boyd  *      G  = group or particular event
1608341e42c4SStephen Boyd  *
1609341e42c4SStephen Boyd  *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1610341e42c4SStephen Boyd  *
1611341e42c4SStephen Boyd  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1612341e42c4SStephen Boyd  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1613341e42c4SStephen Boyd  *  events (interrupts for example). An event code is broken down into
1614341e42c4SStephen Boyd  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1615341e42c4SStephen Boyd  *  example).
1616341e42c4SStephen Boyd  */
1617341e42c4SStephen Boyd 
scorpion_read_pmresrn(int n)1618341e42c4SStephen Boyd static u32 scorpion_read_pmresrn(int n)
1619341e42c4SStephen Boyd {
1620341e42c4SStephen Boyd 	u32 val;
1621341e42c4SStephen Boyd 
1622341e42c4SStephen Boyd 	switch (n) {
1623341e42c4SStephen Boyd 	case 0:
1624341e42c4SStephen Boyd 		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1625341e42c4SStephen Boyd 		break;
1626341e42c4SStephen Boyd 	case 1:
1627341e42c4SStephen Boyd 		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1628341e42c4SStephen Boyd 		break;
1629341e42c4SStephen Boyd 	case 2:
1630341e42c4SStephen Boyd 		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1631341e42c4SStephen Boyd 		break;
1632341e42c4SStephen Boyd 	case 3:
1633341e42c4SStephen Boyd 		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1634341e42c4SStephen Boyd 		break;
1635341e42c4SStephen Boyd 	default:
1636341e42c4SStephen Boyd 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1637341e42c4SStephen Boyd 	}
1638341e42c4SStephen Boyd 
1639341e42c4SStephen Boyd 	return val;
1640341e42c4SStephen Boyd }
1641341e42c4SStephen Boyd 
scorpion_write_pmresrn(int n,u32 val)1642341e42c4SStephen Boyd static void scorpion_write_pmresrn(int n, u32 val)
1643341e42c4SStephen Boyd {
1644341e42c4SStephen Boyd 	switch (n) {
1645341e42c4SStephen Boyd 	case 0:
1646341e42c4SStephen Boyd 		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1647341e42c4SStephen Boyd 		break;
1648341e42c4SStephen Boyd 	case 1:
1649341e42c4SStephen Boyd 		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1650341e42c4SStephen Boyd 		break;
1651341e42c4SStephen Boyd 	case 2:
1652341e42c4SStephen Boyd 		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1653341e42c4SStephen Boyd 		break;
1654341e42c4SStephen Boyd 	case 3:
1655341e42c4SStephen Boyd 		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1656341e42c4SStephen Boyd 		break;
1657341e42c4SStephen Boyd 	default:
1658341e42c4SStephen Boyd 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1659341e42c4SStephen Boyd 	}
1660341e42c4SStephen Boyd }
1661341e42c4SStephen Boyd 
scorpion_get_pmresrn_event(unsigned int region)1662341e42c4SStephen Boyd static u32 scorpion_get_pmresrn_event(unsigned int region)
1663341e42c4SStephen Boyd {
1664341e42c4SStephen Boyd 	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1665341e42c4SStephen Boyd 					     SCORPION_LPM1_GROUP0,
1666341e42c4SStephen Boyd 					     SCORPION_LPM2_GROUP0,
1667341e42c4SStephen Boyd 					     SCORPION_L2LPM_GROUP0 };
1668341e42c4SStephen Boyd 	return pmresrn_table[region];
1669341e42c4SStephen Boyd }
1670341e42c4SStephen Boyd 
scorpion_evt_setup(int idx,u32 config_base)1671341e42c4SStephen Boyd static void scorpion_evt_setup(int idx, u32 config_base)
1672341e42c4SStephen Boyd {
1673341e42c4SStephen Boyd 	u32 val;
1674341e42c4SStephen Boyd 	u32 mask;
1675341e42c4SStephen Boyd 	u32 vval, fval;
1676341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
1677341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
1678341e42c4SStephen Boyd 	unsigned int code = EVENT_CODE(config_base);
1679341e42c4SStephen Boyd 	unsigned int group_shift;
1680341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1681341e42c4SStephen Boyd 
1682341e42c4SStephen Boyd 	group_shift = group * 8;
1683341e42c4SStephen Boyd 	mask = 0xff << group_shift;
1684341e42c4SStephen Boyd 
1685341e42c4SStephen Boyd 	/* Configure evtsel for the region and group */
1686341e42c4SStephen Boyd 	if (venum_event)
1687341e42c4SStephen Boyd 		val = SCORPION_VLPM_GROUP0;
1688341e42c4SStephen Boyd 	else
1689341e42c4SStephen Boyd 		val = scorpion_get_pmresrn_event(region);
1690341e42c4SStephen Boyd 	val += group;
1691341e42c4SStephen Boyd 	/* Mix in mode-exclusion bits */
1692341e42c4SStephen Boyd 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1693341e42c4SStephen Boyd 	armv7_pmnc_write_evtsel(idx, val);
1694341e42c4SStephen Boyd 
1695341e42c4SStephen Boyd 	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1696341e42c4SStephen Boyd 
1697341e42c4SStephen Boyd 	if (venum_event) {
1698341e42c4SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
1699341e42c4SStephen Boyd 		val = venum_read_pmresr();
1700341e42c4SStephen Boyd 		val &= ~mask;
1701341e42c4SStephen Boyd 		val |= code << group_shift;
1702341e42c4SStephen Boyd 		val |= PMRESRn_EN;
1703341e42c4SStephen Boyd 		venum_write_pmresr(val);
1704341e42c4SStephen Boyd 		venum_post_pmresr(vval, fval);
1705341e42c4SStephen Boyd 	} else {
1706341e42c4SStephen Boyd 		val = scorpion_read_pmresrn(region);
1707341e42c4SStephen Boyd 		val &= ~mask;
1708341e42c4SStephen Boyd 		val |= code << group_shift;
1709341e42c4SStephen Boyd 		val |= PMRESRn_EN;
1710341e42c4SStephen Boyd 		scorpion_write_pmresrn(region, val);
1711341e42c4SStephen Boyd 	}
1712341e42c4SStephen Boyd }
1713341e42c4SStephen Boyd 
scorpion_clearpmu(u32 config_base)1714341e42c4SStephen Boyd static void scorpion_clearpmu(u32 config_base)
1715341e42c4SStephen Boyd {
1716341e42c4SStephen Boyd 	u32 val;
1717341e42c4SStephen Boyd 	u32 vval, fval;
1718341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
1719341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
1720341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1721341e42c4SStephen Boyd 
1722341e42c4SStephen Boyd 	if (venum_event) {
1723341e42c4SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
1724341e42c4SStephen Boyd 		val = venum_read_pmresr();
1725341e42c4SStephen Boyd 		val = clear_pmresrn_group(val, group);
1726341e42c4SStephen Boyd 		venum_write_pmresr(val);
1727341e42c4SStephen Boyd 		venum_post_pmresr(vval, fval);
1728341e42c4SStephen Boyd 	} else {
1729341e42c4SStephen Boyd 		val = scorpion_read_pmresrn(region);
1730341e42c4SStephen Boyd 		val = clear_pmresrn_group(val, group);
1731341e42c4SStephen Boyd 		scorpion_write_pmresrn(region, val);
1732341e42c4SStephen Boyd 	}
1733341e42c4SStephen Boyd }
1734341e42c4SStephen Boyd 
scorpion_pmu_disable_event(struct perf_event * event)1735341e42c4SStephen Boyd static void scorpion_pmu_disable_event(struct perf_event *event)
1736341e42c4SStephen Boyd {
1737341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1738341e42c4SStephen Boyd 	int idx = hwc->idx;
1739341e42c4SStephen Boyd 
1740341e42c4SStephen Boyd 	/* Disable counter and interrupt */
1741341e42c4SStephen Boyd 
1742341e42c4SStephen Boyd 	/* Disable counter */
1743341e42c4SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1744341e42c4SStephen Boyd 
1745341e42c4SStephen Boyd 	/*
1746341e42c4SStephen Boyd 	 * Clear pmresr code (if destined for PMNx counters)
1747341e42c4SStephen Boyd 	 */
1748341e42c4SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1749341e42c4SStephen Boyd 		scorpion_clearpmu(hwc->config_base);
1750341e42c4SStephen Boyd 
1751341e42c4SStephen Boyd 	/* Disable interrupt for this counter */
1752341e42c4SStephen Boyd 	armv7_pmnc_disable_intens(idx);
1753341e42c4SStephen Boyd }
1754341e42c4SStephen Boyd 
scorpion_pmu_enable_event(struct perf_event * event)1755341e42c4SStephen Boyd static void scorpion_pmu_enable_event(struct perf_event *event)
1756341e42c4SStephen Boyd {
1757341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1758341e42c4SStephen Boyd 	int idx = hwc->idx;
1759341e42c4SStephen Boyd 
1760341e42c4SStephen Boyd 	/*
1761341e42c4SStephen Boyd 	 * Set event (if destined for PMNx counters)
1762341e42c4SStephen Boyd 	 * We don't set the event for the cycle counter because we
1763341e42c4SStephen Boyd 	 * don't have the ability to perform event filtering.
1764341e42c4SStephen Boyd 	 */
1765341e42c4SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1766341e42c4SStephen Boyd 		scorpion_evt_setup(idx, hwc->config_base);
1767341e42c4SStephen Boyd 	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1768341e42c4SStephen Boyd 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1769341e42c4SStephen Boyd 
1770341e42c4SStephen Boyd 	armv7_pmnc_enable_intens(idx);
1771341e42c4SStephen Boyd 	armv7_pmnc_enable_counter(idx);
1772341e42c4SStephen Boyd }
1773341e42c4SStephen Boyd 
scorpion_pmu_reset(void * info)1774341e42c4SStephen Boyd static void scorpion_pmu_reset(void *info)
1775341e42c4SStephen Boyd {
1776341e42c4SStephen Boyd 	u32 vval, fval;
1777341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = info;
1778bf5ffc8cSRob Herring (Arm) 	u32 idx;
1779341e42c4SStephen Boyd 
1780341e42c4SStephen Boyd 	armv7pmu_reset(info);
1781341e42c4SStephen Boyd 
1782341e42c4SStephen Boyd 	/* Clear all pmresrs */
1783341e42c4SStephen Boyd 	scorpion_write_pmresrn(0, 0);
1784341e42c4SStephen Boyd 	scorpion_write_pmresrn(1, 0);
1785341e42c4SStephen Boyd 	scorpion_write_pmresrn(2, 0);
1786341e42c4SStephen Boyd 	scorpion_write_pmresrn(3, 0);
1787341e42c4SStephen Boyd 
1788341e42c4SStephen Boyd 	venum_pre_pmresr(&vval, &fval);
1789341e42c4SStephen Boyd 	venum_write_pmresr(0);
1790341e42c4SStephen Boyd 	venum_post_pmresr(vval, fval);
1791341e42c4SStephen Boyd 
1792341e42c4SStephen Boyd 	/* Reset PMxEVNCTCR to sane default */
1793bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
1794341e42c4SStephen Boyd 		armv7_pmnc_select_counter(idx);
1795341e42c4SStephen Boyd 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1796341e42c4SStephen Boyd 	}
1797341e42c4SStephen Boyd }
1798341e42c4SStephen Boyd 
scorpion_event_to_bit(struct perf_event * event,unsigned int region,unsigned int group)1799341e42c4SStephen Boyd static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1800341e42c4SStephen Boyd 			      unsigned int group)
1801341e42c4SStephen Boyd {
1802341e42c4SStephen Boyd 	int bit;
1803341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1804341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1805341e42c4SStephen Boyd 
1806341e42c4SStephen Boyd 	if (hwc->config_base & VENUM_EVENT)
1807341e42c4SStephen Boyd 		bit = SCORPION_VLPM_GROUP0;
1808341e42c4SStephen Boyd 	else
1809341e42c4SStephen Boyd 		bit = scorpion_get_pmresrn_event(region);
1810341e42c4SStephen Boyd 	bit -= scorpion_get_pmresrn_event(0);
1811341e42c4SStephen Boyd 	bit += group;
1812341e42c4SStephen Boyd 	/*
1813341e42c4SStephen Boyd 	 * Lower bits are reserved for use by the counters (see
1814341e42c4SStephen Boyd 	 * armv7pmu_get_event_idx() for more info)
1815341e42c4SStephen Boyd 	 */
1816bf5ffc8cSRob Herring (Arm) 	bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
1817341e42c4SStephen Boyd 
1818341e42c4SStephen Boyd 	return bit;
1819341e42c4SStephen Boyd }
1820341e42c4SStephen Boyd 
1821341e42c4SStephen Boyd /*
1822341e42c4SStephen Boyd  * We check for column exclusion constraints here.
1823341e42c4SStephen Boyd  * Two events cant use the same group within a pmresr register.
1824341e42c4SStephen Boyd  */
scorpion_pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1825341e42c4SStephen Boyd static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1826341e42c4SStephen Boyd 				   struct perf_event *event)
1827341e42c4SStephen Boyd {
1828341e42c4SStephen Boyd 	int idx;
1829341e42c4SStephen Boyd 	int bit = -1;
1830341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1831341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
1832341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
1833341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
1834341e42c4SStephen Boyd 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1835341e42c4SStephen Boyd 
1836341e42c4SStephen Boyd 	if (venum_event || scorpion_event) {
1837341e42c4SStephen Boyd 		/* Ignore invalid events */
1838341e42c4SStephen Boyd 		if (group > 3 || region > 3)
1839341e42c4SStephen Boyd 			return -EINVAL;
1840341e42c4SStephen Boyd 
1841341e42c4SStephen Boyd 		bit = scorpion_event_to_bit(event, region, group);
1842341e42c4SStephen Boyd 		if (test_and_set_bit(bit, cpuc->used_mask))
1843341e42c4SStephen Boyd 			return -EAGAIN;
1844341e42c4SStephen Boyd 	}
1845341e42c4SStephen Boyd 
1846341e42c4SStephen Boyd 	idx = armv7pmu_get_event_idx(cpuc, event);
1847341e42c4SStephen Boyd 	if (idx < 0 && bit >= 0)
1848341e42c4SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1849341e42c4SStephen Boyd 
1850341e42c4SStephen Boyd 	return idx;
1851341e42c4SStephen Boyd }
1852341e42c4SStephen Boyd 
scorpion_pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1853341e42c4SStephen Boyd static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1854341e42c4SStephen Boyd 				      struct perf_event *event)
1855341e42c4SStephen Boyd {
1856341e42c4SStephen Boyd 	int bit;
1857341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1858341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
1859341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
1860341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
1861341e42c4SStephen Boyd 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1862341e42c4SStephen Boyd 
18637dfc8db1SSuzuki K Poulose 	armv7pmu_clear_event_idx(cpuc, event);
1864341e42c4SStephen Boyd 	if (venum_event || scorpion_event) {
1865341e42c4SStephen Boyd 		bit = scorpion_event_to_bit(event, region, group);
1866341e42c4SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1867341e42c4SStephen Boyd 	}
1868341e42c4SStephen Boyd }
1869341e42c4SStephen Boyd 
scorpion_pmu_init(struct arm_pmu * cpu_pmu)1870341e42c4SStephen Boyd static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1871341e42c4SStephen Boyd {
1872341e42c4SStephen Boyd 	armv7pmu_init(cpu_pmu);
1873341e42c4SStephen Boyd 	cpu_pmu->name		= "armv7_scorpion";
1874341e42c4SStephen Boyd 	cpu_pmu->map_event	= scorpion_map_event;
1875341e42c4SStephen Boyd 	cpu_pmu->reset		= scorpion_pmu_reset;
1876341e42c4SStephen Boyd 	cpu_pmu->enable		= scorpion_pmu_enable_event;
1877341e42c4SStephen Boyd 	cpu_pmu->disable	= scorpion_pmu_disable_event;
1878341e42c4SStephen Boyd 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1879341e42c4SStephen Boyd 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
18800e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
1881341e42c4SStephen Boyd }
1882341e42c4SStephen Boyd 
scorpion_mp_pmu_init(struct arm_pmu * cpu_pmu)1883341e42c4SStephen Boyd static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1884341e42c4SStephen Boyd {
1885341e42c4SStephen Boyd 	armv7pmu_init(cpu_pmu);
1886341e42c4SStephen Boyd 	cpu_pmu->name		= "armv7_scorpion_mp";
1887341e42c4SStephen Boyd 	cpu_pmu->map_event	= scorpion_map_event;
1888341e42c4SStephen Boyd 	cpu_pmu->reset		= scorpion_pmu_reset;
1889341e42c4SStephen Boyd 	cpu_pmu->enable		= scorpion_pmu_enable_event;
1890341e42c4SStephen Boyd 	cpu_pmu->disable	= scorpion_pmu_disable_event;
1891341e42c4SStephen Boyd 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1892341e42c4SStephen Boyd 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
18930e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
1894341e42c4SStephen Boyd }
189529ba0f37SMark Rutland 
189629ba0f37SMark Rutland static const struct of_device_id armv7_pmu_of_device_ids[] = {
189729ba0f37SMark Rutland 	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
189829ba0f37SMark Rutland 	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
189929ba0f37SMark Rutland 	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
190029ba0f37SMark Rutland 	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
190129ba0f37SMark Rutland 	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
190229ba0f37SMark Rutland 	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
190329ba0f37SMark Rutland 	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
190429ba0f37SMark Rutland 	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
190529ba0f37SMark Rutland 	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
190629ba0f37SMark Rutland 	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
190729ba0f37SMark Rutland 	{},
190829ba0f37SMark Rutland };
190929ba0f37SMark Rutland 
armv7_pmu_device_probe(struct platform_device * pdev)191029ba0f37SMark Rutland static int armv7_pmu_device_probe(struct platform_device *pdev)
191143eab878SWill Deacon {
191212f051c9SRob Herring (Arm) 	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, NULL);
191343eab878SWill Deacon }
191443eab878SWill Deacon 
191529ba0f37SMark Rutland static struct platform_driver armv7_pmu_driver = {
191629ba0f37SMark Rutland 	.driver		= {
191729ba0f37SMark Rutland 		.name	= "armv7-pmu",
191829ba0f37SMark Rutland 		.of_match_table = armv7_pmu_of_device_ids,
191964b2f025SStefan Agner 		.suppress_bind_attrs = true,
192029ba0f37SMark Rutland 	},
192129ba0f37SMark Rutland 	.probe		= armv7_pmu_device_probe,
192229ba0f37SMark Rutland };
19230c205cbeSWill Deacon 
1924b128cb55SGeliang Tang builtin_platform_driver(armv7_pmu_driver);
1925