1 /*
2  * Freescale Embedded oprofile support, based on ppc64 oprofile support
3  * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4  *
5  * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
6  *
7  * Author: Andy Fleming
8  * Maintainer: Kumar Gala <galak@kernel.crashing.org>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version
13  * 2 of the License, or (at your option) any later version.
14  */
15 
16 #include <linux/oprofile.h>
17 #include <linux/init.h>
18 #include <linux/smp.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
21 #include <asm/processor.h>
22 #include <asm/cputable.h>
23 #include <asm/reg_fsl_emb.h>
24 #include <asm/page.h>
25 #include <asm/pmc.h>
26 #include <asm/oprofile_impl.h>
27 
28 static unsigned long reset_value[OP_MAX_COUNTER];
29 
30 static int num_counters;
31 static int oprofile_running;
32 
get_pmlca(int ctr)33 static inline u32 get_pmlca(int ctr)
34 {
35 	u32 pmlca;
36 
37 	switch (ctr) {
38 		case 0:
39 			pmlca = mfpmr(PMRN_PMLCA0);
40 			break;
41 		case 1:
42 			pmlca = mfpmr(PMRN_PMLCA1);
43 			break;
44 		case 2:
45 			pmlca = mfpmr(PMRN_PMLCA2);
46 			break;
47 		case 3:
48 			pmlca = mfpmr(PMRN_PMLCA3);
49 			break;
50 		default:
51 			panic("Bad ctr number\n");
52 	}
53 
54 	return pmlca;
55 }
56 
set_pmlca(int ctr,u32 pmlca)57 static inline void set_pmlca(int ctr, u32 pmlca)
58 {
59 	switch (ctr) {
60 		case 0:
61 			mtpmr(PMRN_PMLCA0, pmlca);
62 			break;
63 		case 1:
64 			mtpmr(PMRN_PMLCA1, pmlca);
65 			break;
66 		case 2:
67 			mtpmr(PMRN_PMLCA2, pmlca);
68 			break;
69 		case 3:
70 			mtpmr(PMRN_PMLCA3, pmlca);
71 			break;
72 		default:
73 			panic("Bad ctr number\n");
74 	}
75 }
76 
ctr_read(unsigned int i)77 static inline unsigned int ctr_read(unsigned int i)
78 {
79 	switch(i) {
80 		case 0:
81 			return mfpmr(PMRN_PMC0);
82 		case 1:
83 			return mfpmr(PMRN_PMC1);
84 		case 2:
85 			return mfpmr(PMRN_PMC2);
86 		case 3:
87 			return mfpmr(PMRN_PMC3);
88 		default:
89 			return 0;
90 	}
91 }
92 
ctr_write(unsigned int i,unsigned int val)93 static inline void ctr_write(unsigned int i, unsigned int val)
94 {
95 	switch(i) {
96 		case 0:
97 			mtpmr(PMRN_PMC0, val);
98 			break;
99 		case 1:
100 			mtpmr(PMRN_PMC1, val);
101 			break;
102 		case 2:
103 			mtpmr(PMRN_PMC2, val);
104 			break;
105 		case 3:
106 			mtpmr(PMRN_PMC3, val);
107 			break;
108 		default:
109 			break;
110 	}
111 }
112 
113 
init_pmc_stop(int ctr)114 static void init_pmc_stop(int ctr)
115 {
116 	u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
117 			PMLCA_FCM1 | PMLCA_FCM0);
118 	u32 pmlcb = 0;
119 
120 	switch (ctr) {
121 		case 0:
122 			mtpmr(PMRN_PMLCA0, pmlca);
123 			mtpmr(PMRN_PMLCB0, pmlcb);
124 			break;
125 		case 1:
126 			mtpmr(PMRN_PMLCA1, pmlca);
127 			mtpmr(PMRN_PMLCB1, pmlcb);
128 			break;
129 		case 2:
130 			mtpmr(PMRN_PMLCA2, pmlca);
131 			mtpmr(PMRN_PMLCB2, pmlcb);
132 			break;
133 		case 3:
134 			mtpmr(PMRN_PMLCA3, pmlca);
135 			mtpmr(PMRN_PMLCB3, pmlcb);
136 			break;
137 		default:
138 			panic("Bad ctr number!\n");
139 	}
140 }
141 
set_pmc_event(int ctr,int event)142 static void set_pmc_event(int ctr, int event)
143 {
144 	u32 pmlca;
145 
146 	pmlca = get_pmlca(ctr);
147 
148 	pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
149 		((event << PMLCA_EVENT_SHIFT) &
150 		 PMLCA_EVENT_MASK);
151 
152 	set_pmlca(ctr, pmlca);
153 }
154 
set_pmc_user_kernel(int ctr,int user,int kernel)155 static void set_pmc_user_kernel(int ctr, int user, int kernel)
156 {
157 	u32 pmlca;
158 
159 	pmlca = get_pmlca(ctr);
160 
161 	if(user)
162 		pmlca &= ~PMLCA_FCU;
163 	else
164 		pmlca |= PMLCA_FCU;
165 
166 	if(kernel)
167 		pmlca &= ~PMLCA_FCS;
168 	else
169 		pmlca |= PMLCA_FCS;
170 
171 	set_pmlca(ctr, pmlca);
172 }
173 
set_pmc_marked(int ctr,int mark0,int mark1)174 static void set_pmc_marked(int ctr, int mark0, int mark1)
175 {
176 	u32 pmlca = get_pmlca(ctr);
177 
178 	if(mark0)
179 		pmlca &= ~PMLCA_FCM0;
180 	else
181 		pmlca |= PMLCA_FCM0;
182 
183 	if(mark1)
184 		pmlca &= ~PMLCA_FCM1;
185 	else
186 		pmlca |= PMLCA_FCM1;
187 
188 	set_pmlca(ctr, pmlca);
189 }
190 
pmc_start_ctr(int ctr,int enable)191 static void pmc_start_ctr(int ctr, int enable)
192 {
193 	u32 pmlca = get_pmlca(ctr);
194 
195 	pmlca &= ~PMLCA_FC;
196 
197 	if (enable)
198 		pmlca |= PMLCA_CE;
199 	else
200 		pmlca &= ~PMLCA_CE;
201 
202 	set_pmlca(ctr, pmlca);
203 }
204 
pmc_start_ctrs(int enable)205 static void pmc_start_ctrs(int enable)
206 {
207 	u32 pmgc0 = mfpmr(PMRN_PMGC0);
208 
209 	pmgc0 &= ~PMGC0_FAC;
210 	pmgc0 |= PMGC0_FCECE;
211 
212 	if (enable)
213 		pmgc0 |= PMGC0_PMIE;
214 	else
215 		pmgc0 &= ~PMGC0_PMIE;
216 
217 	mtpmr(PMRN_PMGC0, pmgc0);
218 }
219 
pmc_stop_ctrs(void)220 static void pmc_stop_ctrs(void)
221 {
222 	u32 pmgc0 = mfpmr(PMRN_PMGC0);
223 
224 	pmgc0 |= PMGC0_FAC;
225 
226 	pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
227 
228 	mtpmr(PMRN_PMGC0, pmgc0);
229 }
230 
fsl_emb_cpu_setup(struct op_counter_config * ctr)231 static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
232 {
233 	int i;
234 
235 	/* freeze all counters */
236 	pmc_stop_ctrs();
237 
238 	for (i = 0;i < num_counters;i++) {
239 		init_pmc_stop(i);
240 
241 		set_pmc_event(i, ctr[i].event);
242 
243 		set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
244 	}
245 
246 	return 0;
247 }
248 
fsl_emb_reg_setup(struct op_counter_config * ctr,struct op_system_config * sys,int num_ctrs)249 static int fsl_emb_reg_setup(struct op_counter_config *ctr,
250 			     struct op_system_config *sys,
251 			     int num_ctrs)
252 {
253 	int i;
254 
255 	num_counters = num_ctrs;
256 
257 	/* Our counters count up, and "count" refers to
258 	 * how much before the next interrupt, and we interrupt
259 	 * on overflow.  So we calculate the starting value
260 	 * which will give us "count" until overflow.
261 	 * Then we set the events on the enabled counters */
262 	for (i = 0; i < num_counters; ++i)
263 		reset_value[i] = 0x80000000UL - ctr[i].count;
264 
265 	return 0;
266 }
267 
fsl_emb_start(struct op_counter_config * ctr)268 static int fsl_emb_start(struct op_counter_config *ctr)
269 {
270 	int i;
271 
272 	mtmsr(mfmsr() | MSR_PMM);
273 
274 	for (i = 0; i < num_counters; ++i) {
275 		if (ctr[i].enabled) {
276 			ctr_write(i, reset_value[i]);
277 			/* Set each enabled counter to only
278 			 * count when the Mark bit is *not* set */
279 			set_pmc_marked(i, 1, 0);
280 			pmc_start_ctr(i, 1);
281 		} else {
282 			ctr_write(i, 0);
283 
284 			/* Set the ctr to be stopped */
285 			pmc_start_ctr(i, 0);
286 		}
287 	}
288 
289 	/* Clear the freeze bit, and enable the interrupt.
290 	 * The counters won't actually start until the rfi clears
291 	 * the PMM bit */
292 	pmc_start_ctrs(1);
293 
294 	oprofile_running = 1;
295 
296 	pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
297 			mfpmr(PMRN_PMGC0));
298 
299 	return 0;
300 }
301 
fsl_emb_stop(void)302 static void fsl_emb_stop(void)
303 {
304 	/* freeze counters */
305 	pmc_stop_ctrs();
306 
307 	oprofile_running = 0;
308 
309 	pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
310 			mfpmr(PMRN_PMGC0));
311 
312 	mb();
313 }
314 
315 
fsl_emb_handle_interrupt(struct pt_regs * regs,struct op_counter_config * ctr)316 static void fsl_emb_handle_interrupt(struct pt_regs *regs,
317 				    struct op_counter_config *ctr)
318 {
319 	unsigned long pc;
320 	int is_kernel;
321 	int val;
322 	int i;
323 
324 	pc = regs->nip;
325 	is_kernel = is_kernel_addr(pc);
326 
327 	for (i = 0; i < num_counters; ++i) {
328 		val = ctr_read(i);
329 		if (val < 0) {
330 			if (oprofile_running && ctr[i].enabled) {
331 				oprofile_add_ext_sample(pc, regs, i, is_kernel);
332 				ctr_write(i, reset_value[i]);
333 			} else {
334 				ctr_write(i, 0);
335 			}
336 		}
337 	}
338 
339 	/* The freeze bit was set by the interrupt. */
340 	/* Clear the freeze bit, and reenable the interrupt.  The
341 	 * counters won't actually start until the rfi clears the PMM
342 	 * bit.  The PMM bit should not be set until after the interrupt
343 	 * is cleared to avoid it getting lost in some hypervisor
344 	 * environments.
345 	 */
346 	mtmsr(mfmsr() | MSR_PMM);
347 	pmc_start_ctrs(1);
348 }
349 
350 struct op_powerpc_model op_model_fsl_emb = {
351 	.reg_setup		= fsl_emb_reg_setup,
352 	.cpu_setup		= fsl_emb_cpu_setup,
353 	.start			= fsl_emb_start,
354 	.stop			= fsl_emb_stop,
355 	.handle_interrupt	= fsl_emb_handle_interrupt,
356 };
357