1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/text-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 /*
32  * We generally only have a single long_branch tramp and at most 2 or 3 plt
33  * tramps generated. But, we don't use the plt tramps currently. We also allot
34  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
35  * tramps in total. Set aside 8 just to be sure.
36  */
37 #define	NUM_FTRACE_TRAMPS	8
38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
39 
ftrace_call_adjust(unsigned long addr)40 unsigned long ftrace_call_adjust(unsigned long addr)
41 {
42 	return addr;
43 }
44 
45 static ppc_inst_t
ftrace_call_replace(unsigned long ip,unsigned long addr,int link)46 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
47 {
48 	ppc_inst_t op;
49 
50 	addr = ppc_function_entry((void *)addr);
51 
52 	/* if (link) set op to 'bl' else 'b' */
53 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
54 
55 	return op;
56 }
57 
58 static inline int
ftrace_modify_code(unsigned long ip,ppc_inst_t old,ppc_inst_t new)59 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
60 {
61 	ppc_inst_t replaced;
62 
63 	/*
64 	 * Note:
65 	 * We are paranoid about modifying text, as if a bug was to happen, it
66 	 * could cause us to read or write to someplace that could cause harm.
67 	 * Carefully read and modify the code with probe_kernel_*(), and make
68 	 * sure what we read is what we expected it to be before modifying it.
69 	 */
70 
71 	/* read the text we want to modify */
72 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
73 		return -EFAULT;
74 
75 	/* Make sure it is what we expect it to be */
76 	if (!ppc_inst_equal(replaced, old)) {
77 		pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
78 		       ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
79 		return -EINVAL;
80 	}
81 
82 	/* replace the text with the new text */
83 	return patch_instruction((u32 *)ip, new);
84 }
85 
86 /*
87  * Helper functions that are the same for both PPC64 and PPC32.
88  */
test_24bit_addr(unsigned long ip,unsigned long addr)89 static int test_24bit_addr(unsigned long ip, unsigned long addr)
90 {
91 	addr = ppc_function_entry((void *)addr);
92 
93 	return is_offset_in_branch_range(addr - ip);
94 }
95 
is_bl_op(ppc_inst_t op)96 static int is_bl_op(ppc_inst_t op)
97 {
98 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
99 }
100 
is_b_op(ppc_inst_t op)101 static int is_b_op(ppc_inst_t op)
102 {
103 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
104 }
105 
find_bl_target(unsigned long ip,ppc_inst_t op)106 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
107 {
108 	int offset;
109 
110 	offset = PPC_LI(ppc_inst_val(op));
111 	/* make it signed */
112 	if (offset & 0x02000000)
113 		offset |= 0xfe000000;
114 
115 	return ip + (long)offset;
116 }
117 
118 #ifdef CONFIG_MODULES
ftrace_lookup_module(struct dyn_ftrace * rec)119 static struct module *ftrace_lookup_module(struct dyn_ftrace *rec)
120 {
121 	struct module *mod;
122 
123 	scoped_guard(rcu)
124 		mod = __module_text_address(rec->ip);
125 	if (!mod)
126 		pr_err("No module loaded at addr=%lx\n", rec->ip);
127 
128 	return mod;
129 }
130 
131 static int
__ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)132 __ftrace_make_nop(struct module *mod,
133 		  struct dyn_ftrace *rec, unsigned long addr)
134 {
135 	unsigned long entry, ptr, tramp;
136 	unsigned long ip = rec->ip;
137 	ppc_inst_t op, pop;
138 
139 	if (!mod) {
140 		mod = ftrace_lookup_module(rec);
141 		if (!mod)
142 			return -EINVAL;
143 	}
144 
145 	/* read where this goes */
146 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
147 		pr_err("Fetching opcode failed.\n");
148 		return -EFAULT;
149 	}
150 
151 	/* Make sure that this is still a 24bit jump */
152 	if (!is_bl_op(op)) {
153 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
154 		return -EINVAL;
155 	}
156 
157 	/* lets find where the pointer goes */
158 	tramp = find_bl_target(ip, op);
159 
160 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
161 
162 	if (module_trampoline_target(mod, tramp, &ptr)) {
163 		pr_err("Failed to get trampoline target\n");
164 		return -EFAULT;
165 	}
166 
167 	pr_devel("trampoline target %lx", ptr);
168 
169 	entry = ppc_global_function_entry((void *)addr);
170 	/* This should match what was called */
171 	if (ptr != entry) {
172 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
173 		return -EINVAL;
174 	}
175 
176 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
177 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
178 			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
179 			return -EFAULT;
180 		}
181 
182 		/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
183 		if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
184 		    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
185 			pr_err("Unexpected instruction %08lx around bl _mcount\n",
186 			       ppc_inst_as_ulong(op));
187 			return -EINVAL;
188 		}
189 	} else if (IS_ENABLED(CONFIG_PPC64)) {
190 		/*
191 		 * Check what is in the next instruction. We can see ld r2,40(r1), but
192 		 * on first pass after boot we will see mflr r0.
193 		 */
194 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
195 			pr_err("Fetching op failed.\n");
196 			return -EFAULT;
197 		}
198 
199 		if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
200 			pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
201 			       ppc_inst_as_ulong(op));
202 			return -EINVAL;
203 		}
204 	}
205 
206 	/*
207 	 * When using -mprofile-kernel or PPC32 there is no load to jump over.
208 	 *
209 	 * Otherwise our original call site looks like:
210 	 *
211 	 * bl <tramp>
212 	 * ld r2,XX(r1)
213 	 *
214 	 * Milton Miller pointed out that we can not simply nop the branch.
215 	 * If a task was preempted when calling a trace function, the nops
216 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
217 	 * get corrupted.
218 	 *
219 	 * Use a b +8 to jump over the load.
220 	 */
221 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
222 		pop = ppc_inst(PPC_RAW_NOP());
223 	else
224 		pop = ppc_inst(PPC_RAW_BRANCH(8));	/* b +8 */
225 
226 	if (patch_instruction((u32 *)ip, pop)) {
227 		pr_err("Patching NOP failed.\n");
228 		return -EPERM;
229 	}
230 
231 	return 0;
232 }
233 #else
__ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)234 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
235 {
236 	return 0;
237 }
238 #endif /* CONFIG_MODULES */
239 
find_ftrace_tramp(unsigned long ip)240 static unsigned long find_ftrace_tramp(unsigned long ip)
241 {
242 	int i;
243 
244 	/*
245 	 * We have the compiler generated long_branch tramps at the end
246 	 * and we prefer those
247 	 */
248 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
249 		if (!ftrace_tramps[i])
250 			continue;
251 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
252 			return ftrace_tramps[i];
253 
254 	return 0;
255 }
256 
add_ftrace_tramp(unsigned long tramp)257 static int add_ftrace_tramp(unsigned long tramp)
258 {
259 	int i;
260 
261 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
262 		if (!ftrace_tramps[i]) {
263 			ftrace_tramps[i] = tramp;
264 			return 0;
265 		}
266 
267 	return -1;
268 }
269 
270 /*
271  * If this is a compiler generated long_branch trampoline (essentially, a
272  * trampoline that has a branch to _mcount()), we re-write the branch to
273  * instead go to ftrace_[regs_]caller() and note down the location of this
274  * trampoline.
275  */
setup_mcount_compiler_tramp(unsigned long tramp)276 static int setup_mcount_compiler_tramp(unsigned long tramp)
277 {
278 	int i;
279 	ppc_inst_t op;
280 	unsigned long ptr;
281 
282 	/* Is this a known long jump tramp? */
283 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
284 		if (ftrace_tramps[i] == tramp)
285 			return 0;
286 
287 	/* New trampoline -- read where this goes */
288 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
289 		pr_debug("Fetching opcode failed.\n");
290 		return -1;
291 	}
292 
293 	/* Is this a 24 bit branch? */
294 	if (!is_b_op(op)) {
295 		pr_debug("Trampoline is not a long branch tramp.\n");
296 		return -1;
297 	}
298 
299 	/* lets find where the pointer goes */
300 	ptr = find_bl_target(tramp, op);
301 
302 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
303 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
304 		return -1;
305 	}
306 
307 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
308 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
309 		ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
310 	else
311 		ptr = ppc_global_function_entry((void *)ftrace_caller);
312 
313 	if (patch_branch((u32 *)tramp, ptr, 0)) {
314 		pr_debug("REL24 out of range!\n");
315 		return -1;
316 	}
317 
318 	if (add_ftrace_tramp(tramp)) {
319 		pr_debug("No tramp locations left\n");
320 		return -1;
321 	}
322 
323 	return 0;
324 }
325 
__ftrace_make_nop_kernel(struct dyn_ftrace * rec,unsigned long addr)326 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
327 {
328 	unsigned long tramp, ip = rec->ip;
329 	ppc_inst_t op;
330 
331 	/* Read where this goes */
332 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
333 		pr_err("Fetching opcode failed.\n");
334 		return -EFAULT;
335 	}
336 
337 	/* Make sure that this is still a 24bit jump */
338 	if (!is_bl_op(op)) {
339 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
340 		return -EINVAL;
341 	}
342 
343 	/* Let's find where the pointer goes */
344 	tramp = find_bl_target(ip, op);
345 
346 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
347 
348 	if (setup_mcount_compiler_tramp(tramp)) {
349 		/* Are other trampolines reachable? */
350 		if (!find_ftrace_tramp(ip)) {
351 			pr_err("No ftrace trampolines reachable from %ps\n",
352 					(void *)ip);
353 			return -EINVAL;
354 		}
355 	}
356 
357 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
358 		pr_err("Patching NOP failed.\n");
359 		return -EPERM;
360 	}
361 
362 	return 0;
363 }
364 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)365 int ftrace_make_nop(struct module *mod,
366 		    struct dyn_ftrace *rec, unsigned long addr)
367 {
368 	unsigned long ip = rec->ip;
369 	ppc_inst_t old, new;
370 
371 	/*
372 	 * If the calling address is more that 24 bits away,
373 	 * then we had to use a trampoline to make the call.
374 	 * Otherwise just update the call site.
375 	 */
376 	if (test_24bit_addr(ip, addr)) {
377 		/* within range */
378 		old = ftrace_call_replace(ip, addr, 1);
379 		new = ppc_inst(PPC_RAW_NOP());
380 		return ftrace_modify_code(ip, old, new);
381 	} else if (core_kernel_text(ip)) {
382 		return __ftrace_make_nop_kernel(rec, addr);
383 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
384 		return -EINVAL;
385 	}
386 
387 	return __ftrace_make_nop(mod, rec, addr);
388 }
389 
390 #ifdef CONFIG_MODULES
391 /*
392  * Examine the existing instructions for __ftrace_make_call.
393  * They should effectively be a NOP, and follow formal constraints,
394  * depending on the ABI. Return false if they don't.
395  */
expected_nop_sequence(void * ip,ppc_inst_t op0,ppc_inst_t op1)396 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
397 {
398 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
399 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
400 	else
401 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
402 		       ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
403 }
404 
405 static int
__ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)406 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
407 {
408 	ppc_inst_t op[2];
409 	void *ip = (void *)rec->ip;
410 	unsigned long entry, ptr, tramp;
411 	struct module *mod = ftrace_lookup_module(rec);
412 
413 	if (!mod)
414 		return -EINVAL;
415 
416 	/* read where this goes */
417 	if (copy_inst_from_kernel_nofault(op, ip))
418 		return -EFAULT;
419 
420 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
421 	    copy_inst_from_kernel_nofault(op + 1, ip + 4))
422 		return -EFAULT;
423 
424 	if (!expected_nop_sequence(ip, op[0], op[1])) {
425 		pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
426 		       ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
427 		return -EINVAL;
428 	}
429 
430 	/* If we never set up ftrace trampoline(s), then bail */
431 	if (!mod->arch.tramp ||
432 	    (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
433 		pr_err("No ftrace trampoline\n");
434 		return -EINVAL;
435 	}
436 
437 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
438 		tramp = mod->arch.tramp_regs;
439 	else
440 		tramp = mod->arch.tramp;
441 
442 	if (module_trampoline_target(mod, tramp, &ptr)) {
443 		pr_err("Failed to get trampoline target\n");
444 		return -EFAULT;
445 	}
446 
447 	pr_devel("trampoline target %lx", ptr);
448 
449 	entry = ppc_global_function_entry((void *)addr);
450 	/* This should match what was called */
451 	if (ptr != entry) {
452 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
453 		return -EINVAL;
454 	}
455 
456 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
457 		pr_err("REL24 out of range!\n");
458 		return -EINVAL;
459 	}
460 
461 	return 0;
462 }
463 #else
__ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)464 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
465 {
466 	return 0;
467 }
468 #endif /* CONFIG_MODULES */
469 
__ftrace_make_call_kernel(struct dyn_ftrace * rec,unsigned long addr)470 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
471 {
472 	ppc_inst_t op;
473 	void *ip = (void *)rec->ip;
474 	unsigned long tramp, entry, ptr;
475 
476 	/* Make sure we're being asked to patch branch to a known ftrace addr */
477 	entry = ppc_global_function_entry((void *)ftrace_caller);
478 	ptr = ppc_global_function_entry((void *)addr);
479 
480 	if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
481 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
482 
483 	if (ptr != entry) {
484 		pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
485 		return -EINVAL;
486 	}
487 
488 	/* Make sure we have a nop */
489 	if (copy_inst_from_kernel_nofault(&op, ip)) {
490 		pr_err("Unable to read ftrace location %p\n", ip);
491 		return -EFAULT;
492 	}
493 
494 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
495 		pr_err("Unexpected call sequence at %p: %08lx\n",
496 		       ip, ppc_inst_as_ulong(op));
497 		return -EINVAL;
498 	}
499 
500 	tramp = find_ftrace_tramp((unsigned long)ip);
501 	if (!tramp) {
502 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
503 		return -EINVAL;
504 	}
505 
506 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
507 		pr_err("Error patching branch to ftrace tramp!\n");
508 		return -EINVAL;
509 	}
510 
511 	return 0;
512 }
513 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)514 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
515 {
516 	unsigned long ip = rec->ip;
517 	ppc_inst_t old, new;
518 
519 	/*
520 	 * If the calling address is more that 24 bits away,
521 	 * then we had to use a trampoline to make the call.
522 	 * Otherwise just update the call site.
523 	 */
524 	if (test_24bit_addr(ip, addr)) {
525 		/* within range */
526 		old = ppc_inst(PPC_RAW_NOP());
527 		new = ftrace_call_replace(ip, addr, 1);
528 		return ftrace_modify_code(ip, old, new);
529 	} else if (core_kernel_text(ip)) {
530 		return __ftrace_make_call_kernel(rec, addr);
531 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
532 		/* We should not get here without modules */
533 		return -EINVAL;
534 	}
535 
536 	return __ftrace_make_call(rec, addr);
537 }
538 
539 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
540 #ifdef CONFIG_MODULES
541 static int
__ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)542 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
543 					unsigned long addr)
544 {
545 	ppc_inst_t op;
546 	unsigned long ip = rec->ip;
547 	unsigned long entry, ptr, tramp;
548 	struct module *mod = ftrace_lookup_module(rec);
549 
550 	if (!mod)
551 		return -EINVAL;
552 
553 	/* If we never set up ftrace trampolines, then bail */
554 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
555 		pr_err("No ftrace trampoline\n");
556 		return -EINVAL;
557 	}
558 
559 	/* read where this goes */
560 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
561 		pr_err("Fetching opcode failed.\n");
562 		return -EFAULT;
563 	}
564 
565 	/* Make sure that this is still a 24bit jump */
566 	if (!is_bl_op(op)) {
567 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
568 		return -EINVAL;
569 	}
570 
571 	/* lets find where the pointer goes */
572 	tramp = find_bl_target(ip, op);
573 	entry = ppc_global_function_entry((void *)old_addr);
574 
575 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
576 
577 	if (tramp != entry) {
578 		/* old_addr is not within range, so we must have used a trampoline */
579 		if (module_trampoline_target(mod, tramp, &ptr)) {
580 			pr_err("Failed to get trampoline target\n");
581 			return -EFAULT;
582 		}
583 
584 		pr_devel("trampoline target %lx", ptr);
585 
586 		/* This should match what was called */
587 		if (ptr != entry) {
588 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
589 			return -EINVAL;
590 		}
591 	}
592 
593 	/* The new target may be within range */
594 	if (test_24bit_addr(ip, addr)) {
595 		/* within range */
596 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
597 			pr_err("REL24 out of range!\n");
598 			return -EINVAL;
599 		}
600 
601 		return 0;
602 	}
603 
604 	if (rec->flags & FTRACE_FL_REGS)
605 		tramp = mod->arch.tramp_regs;
606 	else
607 		tramp = mod->arch.tramp;
608 
609 	if (module_trampoline_target(mod, tramp, &ptr)) {
610 		pr_err("Failed to get trampoline target\n");
611 		return -EFAULT;
612 	}
613 
614 	pr_devel("trampoline target %lx", ptr);
615 
616 	entry = ppc_global_function_entry((void *)addr);
617 	/* This should match what was called */
618 	if (ptr != entry) {
619 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
620 		return -EINVAL;
621 	}
622 
623 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
624 		pr_err("REL24 out of range!\n");
625 		return -EINVAL;
626 	}
627 
628 	return 0;
629 }
630 #else
__ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)631 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
632 {
633 	return 0;
634 }
635 #endif
636 
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)637 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
638 			unsigned long addr)
639 {
640 	unsigned long ip = rec->ip;
641 	ppc_inst_t old, new;
642 
643 	/*
644 	 * If the calling address is more that 24 bits away,
645 	 * then we had to use a trampoline to make the call.
646 	 * Otherwise just update the call site.
647 	 */
648 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
649 		/* within range */
650 		old = ftrace_call_replace(ip, old_addr, 1);
651 		new = ftrace_call_replace(ip, addr, 1);
652 		return ftrace_modify_code(ip, old, new);
653 	} else if (core_kernel_text(ip)) {
654 		/*
655 		 * We always patch out of range locations to go to the regs
656 		 * variant, so there is nothing to do here
657 		 */
658 		return 0;
659 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
660 		/* We should not get here without modules */
661 		return -EINVAL;
662 	}
663 
664 	return __ftrace_modify_call(rec, old_addr, addr);
665 }
666 #endif
667 
ftrace_update_ftrace_func(ftrace_func_t func)668 int ftrace_update_ftrace_func(ftrace_func_t func)
669 {
670 	unsigned long ip = (unsigned long)(&ftrace_call);
671 	ppc_inst_t old, new;
672 	int ret;
673 
674 	old = ppc_inst_read((u32 *)&ftrace_call);
675 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
676 	ret = ftrace_modify_code(ip, old, new);
677 
678 	/* Also update the regs callback function */
679 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
680 		ip = (unsigned long)(&ftrace_regs_call);
681 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
682 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
683 		ret = ftrace_modify_code(ip, old, new);
684 	}
685 
686 	return ret;
687 }
688 
689 /*
690  * Use the default ftrace_modify_all_code, but without
691  * stop_machine().
692  */
arch_ftrace_update_code(int command)693 void arch_ftrace_update_code(int command)
694 {
695 	ftrace_modify_all_code(command);
696 }
697 
698 #ifdef CONFIG_PPC64
699 #define PACATOC offsetof(struct paca_struct, kernel_toc)
700 
701 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
702 
ftrace_free_init_tramp(void)703 void ftrace_free_init_tramp(void)
704 {
705 	int i;
706 
707 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
708 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
709 			ftrace_tramps[i] = 0;
710 			return;
711 		}
712 }
713 
ftrace_dyn_arch_init(void)714 int __init ftrace_dyn_arch_init(void)
715 {
716 	int i;
717 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
718 	u32 stub_insns[] = {
719 		PPC_RAW_LD(_R12, _R13, PACATOC),
720 		PPC_RAW_ADDIS(_R12, _R12, 0),
721 		PPC_RAW_ADDI(_R12, _R12, 0),
722 		PPC_RAW_MTCTR(_R12),
723 		PPC_RAW_BCTR()
724 	};
725 	unsigned long addr;
726 	long reladdr;
727 
728 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
729 		addr = ppc_global_function_entry((void *)ftrace_regs_caller);
730 	else
731 		addr = ppc_global_function_entry((void *)ftrace_caller);
732 
733 	reladdr = addr - kernel_toc_addr();
734 
735 	if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
736 		pr_err("Address of %ps out of range of kernel_toc.\n",
737 				(void *)addr);
738 		return -1;
739 	}
740 
741 	for (i = 0; i < 2; i++) {
742 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
743 		tramp[i][1] |= PPC_HA(reladdr);
744 		tramp[i][2] |= PPC_LO(reladdr);
745 		add_ftrace_tramp((unsigned long)tramp[i]);
746 	}
747 
748 	return 0;
749 }
750 #endif
751 
752 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
753 
754 extern void ftrace_graph_call(void);
755 extern void ftrace_graph_stub(void);
756 
ftrace_modify_ftrace_graph_caller(bool enable)757 static int ftrace_modify_ftrace_graph_caller(bool enable)
758 {
759 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
760 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
761 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
762 	ppc_inst_t old, new;
763 
764 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
765 		return 0;
766 
767 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
768 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
769 
770 	return ftrace_modify_code(ip, old, new);
771 }
772 
ftrace_enable_ftrace_graph_caller(void)773 int ftrace_enable_ftrace_graph_caller(void)
774 {
775 	return ftrace_modify_ftrace_graph_caller(true);
776 }
777 
ftrace_disable_ftrace_graph_caller(void)778 int ftrace_disable_ftrace_graph_caller(void)
779 {
780 	return ftrace_modify_ftrace_graph_caller(false);
781 }
782 
783 /*
784  * Hook the return address and push it in the stack of return addrs
785  * in current thread info. Return the address we want to divert to.
786  */
787 static unsigned long
__prepare_ftrace_return(unsigned long parent,unsigned long ip,unsigned long sp,struct ftrace_regs * fregs)788 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp,
789 			struct ftrace_regs *fregs)
790 {
791 	unsigned long return_hooker;
792 
793 	if (unlikely(ftrace_graph_is_dead()))
794 		goto out;
795 
796 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
797 		goto out;
798 
799 	return_hooker = ppc_function_entry(return_to_handler);
800 
801 	if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs))
802 		parent = return_hooker;
803 
804 out:
805 	return parent;
806 }
807 
808 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)809 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
810 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
811 {
812 	arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip,
813 						arch_ftrace_regs(fregs)->regs.gpr[1], fregs);
814 }
815 #else
prepare_ftrace_return(unsigned long parent,unsigned long ip,unsigned long sp)816 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
817 				    unsigned long sp)
818 {
819 	return __prepare_ftrace_return(parent, ip, sp, NULL);
820 }
821 #endif
822 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
823 
824 #ifdef CONFIG_PPC64_ELF_ABI_V1
arch_ftrace_match_adjust(char * str,const char * search)825 char *arch_ftrace_match_adjust(char *str, const char *search)
826 {
827 	if (str[0] == '.' && search[0] != '.')
828 		return str + 1;
829 	else
830 		return str;
831 }
832 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
833