xref: /kvm-unit-tests/arm/pmu.c (revision d77d128fb09ce1a113feec023dd84d3b1cbe5da6)
1 /*
2  * Test the ARM Performance Monitors Unit (PMU).
3  *
4  * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
5  * Copyright (C) 2016, Red Hat Inc, Wei Huang <wei@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU Lesser General Public License version 2.1 and
9  * only version 2.1 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
14  * for more details.
15  */
16 #include "libcflat.h"
17 #include "asm/barrier.h"
18 #include "asm/sysreg.h"
19 #include "asm/processor.h"
20 
21 #define PMU_PMCR_E         (1 << 0)
22 #define PMU_PMCR_C         (1 << 2)
23 #define PMU_PMCR_LC        (1 << 6)
24 #define PMU_PMCR_N_SHIFT   11
25 #define PMU_PMCR_N_MASK    0x1f
26 #define PMU_PMCR_ID_SHIFT  16
27 #define PMU_PMCR_ID_MASK   0xff
28 #define PMU_PMCR_IMP_SHIFT 24
29 #define PMU_PMCR_IMP_MASK  0xff
30 
31 #define PMU_CYCLE_IDX      31
32 
33 #define NR_SAMPLES 10
34 
35 static unsigned int pmu_version;
36 #if defined(__arm__)
37 #define ID_DFR0_PERFMON_SHIFT 24
38 #define ID_DFR0_PERFMON_MASK  0xf
39 
40 #define PMCR         __ACCESS_CP15(c9, 0, c12, 0)
41 #define ID_DFR0      __ACCESS_CP15(c0, 0, c1, 2)
42 #define PMSELR       __ACCESS_CP15(c9, 0, c12, 5)
43 #define PMXEVTYPER   __ACCESS_CP15(c9, 0, c13, 1)
44 #define PMCNTENSET   __ACCESS_CP15(c9, 0, c12, 1)
45 #define PMCCNTR32    __ACCESS_CP15(c9, 0, c13, 0)
46 #define PMCCNTR64    __ACCESS_CP15_64(0, c9)
47 
48 static inline uint32_t get_id_dfr0(void) { return read_sysreg(ID_DFR0); }
49 static inline uint32_t get_pmcr(void) { return read_sysreg(PMCR); }
50 static inline void set_pmcr(uint32_t v) { write_sysreg(v, PMCR); }
51 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, PMCNTENSET); }
52 
53 static inline uint8_t get_pmu_version(void)
54 {
55 	return (get_id_dfr0() >> ID_DFR0_PERFMON_SHIFT) & ID_DFR0_PERFMON_MASK;
56 }
57 
58 static inline uint64_t get_pmccntr(void)
59 {
60 	if (pmu_version == 0x3)
61 		return read_sysreg(PMCCNTR64);
62 	else
63 		return read_sysreg(PMCCNTR32);
64 }
65 
66 static inline void set_pmccntr(uint64_t value)
67 {
68 	if (pmu_version == 0x3)
69 		write_sysreg(value, PMCCNTR64);
70 	else
71 		write_sysreg(value & 0xffffffff, PMCCNTR32);
72 }
73 
74 /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */
75 static inline void set_pmccfiltr(uint32_t value)
76 {
77 	write_sysreg(PMU_CYCLE_IDX, PMSELR);
78 	write_sysreg(value, PMXEVTYPER);
79 	isb();
80 }
81 
82 /*
83  * Extra instructions inserted by the compiler would be difficult to compensate
84  * for, so hand assemble everything between, and including, the PMCR accesses
85  * to start and stop counting. isb instructions were inserted to make sure
86  * pmccntr read after this function returns the exact instructions executed in
87  * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop.
88  */
89 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
90 {
91 	asm volatile(
92 	"	mcr	p15, 0, %[pmcr], c9, c12, 0\n"
93 	"	isb\n"
94 	"1:	subs	%[loop], %[loop], #1\n"
95 	"	bgt	1b\n"
96 	"	mcr	p15, 0, %[z], c9, c12, 0\n"
97 	"	isb\n"
98 	: [loop] "+r" (loop)
99 	: [pmcr] "r" (pmcr), [z] "r" (0)
100 	: "cc");
101 }
102 #elif defined(__aarch64__)
103 #define ID_AA64DFR0_PERFMON_SHIFT 8
104 #define ID_AA64DFR0_PERFMON_MASK  0xf
105 
106 static inline uint32_t get_id_aa64dfr0(void) { return read_sysreg(id_aa64dfr0_el1); }
107 static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); }
108 static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); }
109 static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); }
110 static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); }
111 static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); }
112 static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); }
113 
114 static inline uint8_t get_pmu_version(void)
115 {
116 	uint8_t ver = (get_id_aa64dfr0() >> ID_AA64DFR0_PERFMON_SHIFT) & ID_AA64DFR0_PERFMON_MASK;
117 	return ver == 1 ? 3 : ver;
118 }
119 
120 /*
121  * Extra instructions inserted by the compiler would be difficult to compensate
122  * for, so hand assemble everything between, and including, the PMCR accesses
123  * to start and stop counting. isb instructions are inserted to make sure
124  * pmccntr read after this function returns the exact instructions executed
125  * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop.
126  */
127 static inline void precise_instrs_loop(int loop, uint32_t pmcr)
128 {
129 	asm volatile(
130 	"	msr	pmcr_el0, %[pmcr]\n"
131 	"	isb\n"
132 	"1:	subs	%[loop], %[loop], #1\n"
133 	"	b.gt	1b\n"
134 	"	msr	pmcr_el0, xzr\n"
135 	"	isb\n"
136 	: [loop] "+r" (loop)
137 	: [pmcr] "r" (pmcr)
138 	: "cc");
139 }
140 #endif
141 
142 /*
143  * As a simple sanity check on the PMCR_EL0, ensure the implementer field isn't
144  * null. Also print out a couple other interesting fields for diagnostic
145  * purposes. For example, as of fall 2016, QEMU TCG mode doesn't implement
146  * event counters and therefore reports zero event counters, but hopefully
147  * support for at least the instructions event will be added in the future and
148  * the reported number of event counters will become nonzero.
149  */
150 static bool check_pmcr(void)
151 {
152 	uint32_t pmcr;
153 
154 	pmcr = get_pmcr();
155 
156 	report_info("PMU implementer/ID code/counters: 0x%x(\"%c\")/0x%x/%d",
157 		    (pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK,
158 		    ((pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK) ? : ' ',
159 		    (pmcr >> PMU_PMCR_ID_SHIFT) & PMU_PMCR_ID_MASK,
160 		    (pmcr >> PMU_PMCR_N_SHIFT) & PMU_PMCR_N_MASK);
161 
162 	return ((pmcr >> PMU_PMCR_IMP_SHIFT) & PMU_PMCR_IMP_MASK) != 0;
163 }
164 
165 /*
166  * Ensure that the cycle counter progresses between back-to-back reads.
167  */
168 static bool check_cycles_increase(void)
169 {
170 	bool success = true;
171 
172 	/* init before event access, this test only cares about cycle count */
173 	set_pmcntenset(1 << PMU_CYCLE_IDX);
174 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
175 
176 	set_pmcr(get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E);
177 
178 	for (int i = 0; i < NR_SAMPLES; i++) {
179 		uint64_t a, b;
180 
181 		a = get_pmccntr();
182 		b = get_pmccntr();
183 
184 		if (a >= b) {
185 			printf("Read %"PRId64" then %"PRId64".\n", a, b);
186 			success = false;
187 			break;
188 		}
189 	}
190 
191 	set_pmcr(get_pmcr() & ~PMU_PMCR_E);
192 
193 	return success;
194 }
195 
196 /*
197  * Execute a known number of guest instructions. Only even instruction counts
198  * greater than or equal to 4 are supported by the in-line assembly code. The
199  * control register (PMCR_EL0) is initialized with the provided value (allowing
200  * for example for the cycle counter or event counters to be reset). At the end
201  * of the exact instruction loop, zero is written to PMCR_EL0 to disable
202  * counting, allowing the cycle counter or event counters to be read at the
203  * leisure of the calling code.
204  */
205 static void measure_instrs(int num, uint32_t pmcr)
206 {
207 	int loop = (num - 2) / 2;
208 
209 	assert(num >= 4 && ((num - 2) % 2 == 0));
210 	precise_instrs_loop(loop, pmcr);
211 }
212 
213 /*
214  * Measure cycle counts for various known instruction counts. Ensure that the
215  * cycle counter progresses (similar to check_cycles_increase() but with more
216  * instructions and using reset and stop controls). If supplied a positive,
217  * nonzero CPI parameter, it also strictly checks that every measurement matches
218  * it. Strict CPI checking is used to test -icount mode.
219  */
220 static bool check_cpi(int cpi)
221 {
222 	uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E;
223 
224 	/* init before event access, this test only cares about cycle count */
225 	set_pmcntenset(1 << PMU_CYCLE_IDX);
226 	set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */
227 
228 	if (cpi > 0)
229 		printf("Checking for CPI=%d.\n", cpi);
230 	printf("instrs : cycles0 cycles1 ...\n");
231 
232 	for (unsigned int i = 4; i < 300; i += 32) {
233 		uint64_t avg, sum = 0;
234 
235 		printf("%4d:", i);
236 		for (int j = 0; j < NR_SAMPLES; j++) {
237 			uint64_t cycles;
238 
239 			set_pmccntr(0);
240 			measure_instrs(i, pmcr);
241 			cycles = get_pmccntr();
242 			printf(" %4"PRId64"", cycles);
243 
244 			if (!cycles) {
245 				printf("\ncycles not incrementing!\n");
246 				return false;
247 			} else if (cpi > 0 && cycles != i * cpi) {
248 				printf("\nunexpected cycle count received!\n");
249 				return false;
250 			} else if ((cycles >> 32) != 0) {
251 				/* The cycles taken by the loop above should
252 				 * fit in 32 bits easily. We check the upper
253 				 * 32 bits of the cycle counter to make sure
254 				 * there is no supprise. */
255 				printf("\ncycle count bigger than 32bit!\n");
256 				return false;
257 			}
258 
259 			sum += cycles;
260 		}
261 		avg = sum / NR_SAMPLES;
262 		printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg,
263 		       (avg >= i) ? "cpi" : "ipc",
264 		       (avg >= i) ? avg / i : i / avg);
265 	}
266 
267 	return true;
268 }
269 
270 /* Return FALSE if no PMU found, otherwise return TRUE */
271 bool pmu_probe(void)
272 {
273 	pmu_version = get_pmu_version();
274 	report_info("PMU version: %d", pmu_version);
275 	return pmu_version != 0 && pmu_version != 0xf;
276 }
277 
278 int main(int argc, char *argv[])
279 {
280 	int cpi = 0;
281 
282 	if (argc > 1)
283 		cpi = atol(argv[1]);
284 
285 	if (!pmu_probe()) {
286 		printf("No PMU found, test skipped...\n");
287 		return report_summary();
288 	}
289 
290 	report_prefix_push("pmu");
291 
292 	report("Control register", check_pmcr());
293 	report("Monotonically increasing cycle count", check_cycles_increase());
294 	report("Cycle/instruction ratio", check_cpi(cpi));
295 
296 	return report_summary();
297 }
298