1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 
36 #include <asm/uaccess.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/system.h>
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43 
44 /*
45  * The parameter save area on the stack is used to store arguments being passed
46  * to callee function and is located at fixed offset from stack pointer.
47  */
48 #ifdef CONFIG_PPC32
49 #define PARAMETER_SAVE_AREA_OFFSET	24  /* bytes */
50 #else /* CONFIG_PPC32 */
51 #define PARAMETER_SAVE_AREA_OFFSET	48  /* bytes */
52 #endif
53 
54 struct pt_regs_offset {
55 	const char *name;
56 	int offset;
57 };
58 
59 #define STR(s)	#s			/* convert to string */
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define GPR_OFFSET_NAME(num)	\
62 	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 
65 static const struct pt_regs_offset regoffset_table[] = {
66 	GPR_OFFSET_NAME(0),
67 	GPR_OFFSET_NAME(1),
68 	GPR_OFFSET_NAME(2),
69 	GPR_OFFSET_NAME(3),
70 	GPR_OFFSET_NAME(4),
71 	GPR_OFFSET_NAME(5),
72 	GPR_OFFSET_NAME(6),
73 	GPR_OFFSET_NAME(7),
74 	GPR_OFFSET_NAME(8),
75 	GPR_OFFSET_NAME(9),
76 	GPR_OFFSET_NAME(10),
77 	GPR_OFFSET_NAME(11),
78 	GPR_OFFSET_NAME(12),
79 	GPR_OFFSET_NAME(13),
80 	GPR_OFFSET_NAME(14),
81 	GPR_OFFSET_NAME(15),
82 	GPR_OFFSET_NAME(16),
83 	GPR_OFFSET_NAME(17),
84 	GPR_OFFSET_NAME(18),
85 	GPR_OFFSET_NAME(19),
86 	GPR_OFFSET_NAME(20),
87 	GPR_OFFSET_NAME(21),
88 	GPR_OFFSET_NAME(22),
89 	GPR_OFFSET_NAME(23),
90 	GPR_OFFSET_NAME(24),
91 	GPR_OFFSET_NAME(25),
92 	GPR_OFFSET_NAME(26),
93 	GPR_OFFSET_NAME(27),
94 	GPR_OFFSET_NAME(28),
95 	GPR_OFFSET_NAME(29),
96 	GPR_OFFSET_NAME(30),
97 	GPR_OFFSET_NAME(31),
98 	REG_OFFSET_NAME(nip),
99 	REG_OFFSET_NAME(msr),
100 	REG_OFFSET_NAME(ctr),
101 	REG_OFFSET_NAME(link),
102 	REG_OFFSET_NAME(xer),
103 	REG_OFFSET_NAME(ccr),
104 #ifdef CONFIG_PPC64
105 	REG_OFFSET_NAME(softe),
106 #else
107 	REG_OFFSET_NAME(mq),
108 #endif
109 	REG_OFFSET_NAME(trap),
110 	REG_OFFSET_NAME(dar),
111 	REG_OFFSET_NAME(dsisr),
112 	REG_OFFSET_END,
113 };
114 
115 /**
116  * regs_query_register_offset() - query register offset from its name
117  * @name:	the name of a register
118  *
119  * regs_query_register_offset() returns the offset of a register in struct
120  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
121  */
regs_query_register_offset(const char * name)122 int regs_query_register_offset(const char *name)
123 {
124 	const struct pt_regs_offset *roff;
125 	for (roff = regoffset_table; roff->name != NULL; roff++)
126 		if (!strcmp(roff->name, name))
127 			return roff->offset;
128 	return -EINVAL;
129 }
130 
131 /**
132  * regs_query_register_name() - query register name from its offset
133  * @offset:	the offset of a register in struct pt_regs.
134  *
135  * regs_query_register_name() returns the name of a register from its
136  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
137  */
regs_query_register_name(unsigned int offset)138 const char *regs_query_register_name(unsigned int offset)
139 {
140 	const struct pt_regs_offset *roff;
141 	for (roff = regoffset_table; roff->name != NULL; roff++)
142 		if (roff->offset == offset)
143 			return roff->name;
144 	return NULL;
145 }
146 
147 /*
148  * does not yet catch signals sent when the child dies.
149  * in exit.c or in signal.c.
150  */
151 
152 /*
153  * Set of msr bits that gdb can change on behalf of a process.
154  */
155 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
156 #define MSR_DEBUGCHANGE	0
157 #else
158 #define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
159 #endif
160 
161 /*
162  * Max register writeable via put_reg
163  */
164 #ifdef CONFIG_PPC32
165 #define PT_MAX_PUT_REG	PT_MQ
166 #else
167 #define PT_MAX_PUT_REG	PT_CCR
168 #endif
169 
get_user_msr(struct task_struct * task)170 static unsigned long get_user_msr(struct task_struct *task)
171 {
172 	return task->thread.regs->msr | task->thread.fpexc_mode;
173 }
174 
set_user_msr(struct task_struct * task,unsigned long msr)175 static int set_user_msr(struct task_struct *task, unsigned long msr)
176 {
177 	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
178 	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
179 	return 0;
180 }
181 
182 /*
183  * We prevent mucking around with the reserved area of trap
184  * which are used internally by the kernel.
185  */
set_user_trap(struct task_struct * task,unsigned long trap)186 static int set_user_trap(struct task_struct *task, unsigned long trap)
187 {
188 	task->thread.regs->trap = trap & 0xfff0;
189 	return 0;
190 }
191 
192 /*
193  * Get contents of register REGNO in task TASK.
194  */
ptrace_get_reg(struct task_struct * task,int regno)195 unsigned long ptrace_get_reg(struct task_struct *task, int regno)
196 {
197 	if (task->thread.regs == NULL)
198 		return -EIO;
199 
200 	if (regno == PT_MSR)
201 		return get_user_msr(task);
202 
203 	if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
204 		return ((unsigned long *)task->thread.regs)[regno];
205 
206 	return -EIO;
207 }
208 
209 /*
210  * Write contents of register REGNO in task TASK.
211  */
ptrace_put_reg(struct task_struct * task,int regno,unsigned long data)212 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
213 {
214 	if (task->thread.regs == NULL)
215 		return -EIO;
216 
217 	if (regno == PT_MSR)
218 		return set_user_msr(task, data);
219 	if (regno == PT_TRAP)
220 		return set_user_trap(task, data);
221 
222 	if (regno <= PT_MAX_PUT_REG) {
223 		((unsigned long *)task->thread.regs)[regno] = data;
224 		return 0;
225 	}
226 	return -EIO;
227 }
228 
gpr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)229 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
230 		   unsigned int pos, unsigned int count,
231 		   void *kbuf, void __user *ubuf)
232 {
233 	int i, ret;
234 
235 	if (target->thread.regs == NULL)
236 		return -EIO;
237 
238 	if (!FULL_REGS(target->thread.regs)) {
239 		/* We have a partial register set.  Fill 14-31 with bogus values */
240 		for (i = 14; i < 32; i++)
241 			target->thread.regs->gpr[i] = NV_REG_POISON;
242 	}
243 
244 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
245 				  target->thread.regs,
246 				  0, offsetof(struct pt_regs, msr));
247 	if (!ret) {
248 		unsigned long msr = get_user_msr(target);
249 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
250 					  offsetof(struct pt_regs, msr),
251 					  offsetof(struct pt_regs, msr) +
252 					  sizeof(msr));
253 	}
254 
255 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
256 		     offsetof(struct pt_regs, msr) + sizeof(long));
257 
258 	if (!ret)
259 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
260 					  &target->thread.regs->orig_gpr3,
261 					  offsetof(struct pt_regs, orig_gpr3),
262 					  sizeof(struct pt_regs));
263 	if (!ret)
264 		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
265 					       sizeof(struct pt_regs), -1);
266 
267 	return ret;
268 }
269 
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)270 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
271 		   unsigned int pos, unsigned int count,
272 		   const void *kbuf, const void __user *ubuf)
273 {
274 	unsigned long reg;
275 	int ret;
276 
277 	if (target->thread.regs == NULL)
278 		return -EIO;
279 
280 	CHECK_FULL_REGS(target->thread.regs);
281 
282 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
283 				 target->thread.regs,
284 				 0, PT_MSR * sizeof(reg));
285 
286 	if (!ret && count > 0) {
287 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
288 					 PT_MSR * sizeof(reg),
289 					 (PT_MSR + 1) * sizeof(reg));
290 		if (!ret)
291 			ret = set_user_msr(target, reg);
292 	}
293 
294 	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
295 		     offsetof(struct pt_regs, msr) + sizeof(long));
296 
297 	if (!ret)
298 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
299 					 &target->thread.regs->orig_gpr3,
300 					 PT_ORIG_R3 * sizeof(reg),
301 					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
302 
303 	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
304 		ret = user_regset_copyin_ignore(
305 			&pos, &count, &kbuf, &ubuf,
306 			(PT_MAX_PUT_REG + 1) * sizeof(reg),
307 			PT_TRAP * sizeof(reg));
308 
309 	if (!ret && count > 0) {
310 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
311 					 PT_TRAP * sizeof(reg),
312 					 (PT_TRAP + 1) * sizeof(reg));
313 		if (!ret)
314 			ret = set_user_trap(target, reg);
315 	}
316 
317 	if (!ret)
318 		ret = user_regset_copyin_ignore(
319 			&pos, &count, &kbuf, &ubuf,
320 			(PT_TRAP + 1) * sizeof(reg), -1);
321 
322 	return ret;
323 }
324 
fpr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)325 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
326 		   unsigned int pos, unsigned int count,
327 		   void *kbuf, void __user *ubuf)
328 {
329 #ifdef CONFIG_VSX
330 	double buf[33];
331 	int i;
332 #endif
333 	flush_fp_to_thread(target);
334 
335 #ifdef CONFIG_VSX
336 	/* copy to local buffer then write that out */
337 	for (i = 0; i < 32 ; i++)
338 		buf[i] = target->thread.TS_FPR(i);
339 	memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
340 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
341 
342 #else
343 	BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
344 		     offsetof(struct thread_struct, TS_FPR(32)));
345 
346 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
347 				   &target->thread.fpr, 0, -1);
348 #endif
349 }
350 
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)351 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
352 		   unsigned int pos, unsigned int count,
353 		   const void *kbuf, const void __user *ubuf)
354 {
355 #ifdef CONFIG_VSX
356 	double buf[33];
357 	int i;
358 #endif
359 	flush_fp_to_thread(target);
360 
361 #ifdef CONFIG_VSX
362 	/* copy to local buffer then write that out */
363 	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
364 	if (i)
365 		return i;
366 	for (i = 0; i < 32 ; i++)
367 		target->thread.TS_FPR(i) = buf[i];
368 	memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
369 	return 0;
370 #else
371 	BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
372 		     offsetof(struct thread_struct, TS_FPR(32)));
373 
374 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
375 				  &target->thread.fpr, 0, -1);
376 #endif
377 }
378 
379 #ifdef CONFIG_ALTIVEC
380 /*
381  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
382  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
383  * corresponding vector registers.  Quadword 32 contains the vscr as the
384  * last word (offset 12) within that quadword.  Quadword 33 contains the
385  * vrsave as the first word (offset 0) within the quadword.
386  *
387  * This definition of the VMX state is compatible with the current PPC32
388  * ptrace interface.  This allows signal handling and ptrace to use the
389  * same structures.  This also simplifies the implementation of a bi-arch
390  * (combined (32- and 64-bit) gdb.
391  */
392 
vr_active(struct task_struct * target,const struct user_regset * regset)393 static int vr_active(struct task_struct *target,
394 		     const struct user_regset *regset)
395 {
396 	flush_altivec_to_thread(target);
397 	return target->thread.used_vr ? regset->n : 0;
398 }
399 
vr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)400 static int vr_get(struct task_struct *target, const struct user_regset *regset,
401 		  unsigned int pos, unsigned int count,
402 		  void *kbuf, void __user *ubuf)
403 {
404 	int ret;
405 
406 	flush_altivec_to_thread(target);
407 
408 	BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
409 		     offsetof(struct thread_struct, vr[32]));
410 
411 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
412 				  &target->thread.vr, 0,
413 				  33 * sizeof(vector128));
414 	if (!ret) {
415 		/*
416 		 * Copy out only the low-order word of vrsave.
417 		 */
418 		union {
419 			elf_vrreg_t reg;
420 			u32 word;
421 		} vrsave;
422 		memset(&vrsave, 0, sizeof(vrsave));
423 		vrsave.word = target->thread.vrsave;
424 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
425 					  33 * sizeof(vector128), -1);
426 	}
427 
428 	return ret;
429 }
430 
vr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)431 static int vr_set(struct task_struct *target, const struct user_regset *regset,
432 		  unsigned int pos, unsigned int count,
433 		  const void *kbuf, const void __user *ubuf)
434 {
435 	int ret;
436 
437 	flush_altivec_to_thread(target);
438 
439 	BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
440 		     offsetof(struct thread_struct, vr[32]));
441 
442 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
443 				 &target->thread.vr, 0, 33 * sizeof(vector128));
444 	if (!ret && count > 0) {
445 		/*
446 		 * We use only the first word of vrsave.
447 		 */
448 		union {
449 			elf_vrreg_t reg;
450 			u32 word;
451 		} vrsave;
452 		memset(&vrsave, 0, sizeof(vrsave));
453 		vrsave.word = target->thread.vrsave;
454 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
455 					 33 * sizeof(vector128), -1);
456 		if (!ret)
457 			target->thread.vrsave = vrsave.word;
458 	}
459 
460 	return ret;
461 }
462 #endif /* CONFIG_ALTIVEC */
463 
464 #ifdef CONFIG_VSX
465 /*
466  * Currently to set and and get all the vsx state, you need to call
467  * the fp and VMX calls as well.  This only get/sets the lower 32
468  * 128bit VSX registers.
469  */
470 
vsr_active(struct task_struct * target,const struct user_regset * regset)471 static int vsr_active(struct task_struct *target,
472 		      const struct user_regset *regset)
473 {
474 	flush_vsx_to_thread(target);
475 	return target->thread.used_vsr ? regset->n : 0;
476 }
477 
vsr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)478 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
479 		   unsigned int pos, unsigned int count,
480 		   void *kbuf, void __user *ubuf)
481 {
482 	double buf[32];
483 	int ret, i;
484 
485 	flush_vsx_to_thread(target);
486 
487 	for (i = 0; i < 32 ; i++)
488 		buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
489 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
490 				  buf, 0, 32 * sizeof(double));
491 
492 	return ret;
493 }
494 
vsr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)495 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
496 		   unsigned int pos, unsigned int count,
497 		   const void *kbuf, const void __user *ubuf)
498 {
499 	double buf[32];
500 	int ret,i;
501 
502 	flush_vsx_to_thread(target);
503 
504 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
505 				 buf, 0, 32 * sizeof(double));
506 	for (i = 0; i < 32 ; i++)
507 		target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
508 
509 
510 	return ret;
511 }
512 #endif /* CONFIG_VSX */
513 
514 #ifdef CONFIG_SPE
515 
516 /*
517  * For get_evrregs/set_evrregs functions 'data' has the following layout:
518  *
519  * struct {
520  *   u32 evr[32];
521  *   u64 acc;
522  *   u32 spefscr;
523  * }
524  */
525 
evr_active(struct task_struct * target,const struct user_regset * regset)526 static int evr_active(struct task_struct *target,
527 		      const struct user_regset *regset)
528 {
529 	flush_spe_to_thread(target);
530 	return target->thread.used_spe ? regset->n : 0;
531 }
532 
evr_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)533 static int evr_get(struct task_struct *target, const struct user_regset *regset,
534 		   unsigned int pos, unsigned int count,
535 		   void *kbuf, void __user *ubuf)
536 {
537 	int ret;
538 
539 	flush_spe_to_thread(target);
540 
541 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
542 				  &target->thread.evr,
543 				  0, sizeof(target->thread.evr));
544 
545 	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
546 		     offsetof(struct thread_struct, spefscr));
547 
548 	if (!ret)
549 		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
550 					  &target->thread.acc,
551 					  sizeof(target->thread.evr), -1);
552 
553 	return ret;
554 }
555 
evr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)556 static int evr_set(struct task_struct *target, const struct user_regset *regset,
557 		   unsigned int pos, unsigned int count,
558 		   const void *kbuf, const void __user *ubuf)
559 {
560 	int ret;
561 
562 	flush_spe_to_thread(target);
563 
564 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
565 				 &target->thread.evr,
566 				 0, sizeof(target->thread.evr));
567 
568 	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
569 		     offsetof(struct thread_struct, spefscr));
570 
571 	if (!ret)
572 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
573 					 &target->thread.acc,
574 					 sizeof(target->thread.evr), -1);
575 
576 	return ret;
577 }
578 #endif /* CONFIG_SPE */
579 
580 
581 /*
582  * These are our native regset flavors.
583  */
584 enum powerpc_regset {
585 	REGSET_GPR,
586 	REGSET_FPR,
587 #ifdef CONFIG_ALTIVEC
588 	REGSET_VMX,
589 #endif
590 #ifdef CONFIG_VSX
591 	REGSET_VSX,
592 #endif
593 #ifdef CONFIG_SPE
594 	REGSET_SPE,
595 #endif
596 };
597 
598 static const struct user_regset native_regsets[] = {
599 	[REGSET_GPR] = {
600 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
601 		.size = sizeof(long), .align = sizeof(long),
602 		.get = gpr_get, .set = gpr_set
603 	},
604 	[REGSET_FPR] = {
605 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
606 		.size = sizeof(double), .align = sizeof(double),
607 		.get = fpr_get, .set = fpr_set
608 	},
609 #ifdef CONFIG_ALTIVEC
610 	[REGSET_VMX] = {
611 		.core_note_type = NT_PPC_VMX, .n = 34,
612 		.size = sizeof(vector128), .align = sizeof(vector128),
613 		.active = vr_active, .get = vr_get, .set = vr_set
614 	},
615 #endif
616 #ifdef CONFIG_VSX
617 	[REGSET_VSX] = {
618 		.core_note_type = NT_PPC_VSX, .n = 32,
619 		.size = sizeof(double), .align = sizeof(double),
620 		.active = vsr_active, .get = vsr_get, .set = vsr_set
621 	},
622 #endif
623 #ifdef CONFIG_SPE
624 	[REGSET_SPE] = {
625 		.n = 35,
626 		.size = sizeof(u32), .align = sizeof(u32),
627 		.active = evr_active, .get = evr_get, .set = evr_set
628 	},
629 #endif
630 };
631 
632 static const struct user_regset_view user_ppc_native_view = {
633 	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
634 	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
635 };
636 
637 #ifdef CONFIG_PPC64
638 #include <linux/compat.h>
639 
gpr32_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)640 static int gpr32_get(struct task_struct *target,
641 		     const struct user_regset *regset,
642 		     unsigned int pos, unsigned int count,
643 		     void *kbuf, void __user *ubuf)
644 {
645 	const unsigned long *regs = &target->thread.regs->gpr[0];
646 	compat_ulong_t *k = kbuf;
647 	compat_ulong_t __user *u = ubuf;
648 	compat_ulong_t reg;
649 	int i;
650 
651 	if (target->thread.regs == NULL)
652 		return -EIO;
653 
654 	if (!FULL_REGS(target->thread.regs)) {
655 		/* We have a partial register set.  Fill 14-31 with bogus values */
656 		for (i = 14; i < 32; i++)
657 			target->thread.regs->gpr[i] = NV_REG_POISON;
658 	}
659 
660 	pos /= sizeof(reg);
661 	count /= sizeof(reg);
662 
663 	if (kbuf)
664 		for (; count > 0 && pos < PT_MSR; --count)
665 			*k++ = regs[pos++];
666 	else
667 		for (; count > 0 && pos < PT_MSR; --count)
668 			if (__put_user((compat_ulong_t) regs[pos++], u++))
669 				return -EFAULT;
670 
671 	if (count > 0 && pos == PT_MSR) {
672 		reg = get_user_msr(target);
673 		if (kbuf)
674 			*k++ = reg;
675 		else if (__put_user(reg, u++))
676 			return -EFAULT;
677 		++pos;
678 		--count;
679 	}
680 
681 	if (kbuf)
682 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
683 			*k++ = regs[pos++];
684 	else
685 		for (; count > 0 && pos < PT_REGS_COUNT; --count)
686 			if (__put_user((compat_ulong_t) regs[pos++], u++))
687 				return -EFAULT;
688 
689 	kbuf = k;
690 	ubuf = u;
691 	pos *= sizeof(reg);
692 	count *= sizeof(reg);
693 	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
694 					PT_REGS_COUNT * sizeof(reg), -1);
695 }
696 
gpr32_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)697 static int gpr32_set(struct task_struct *target,
698 		     const struct user_regset *regset,
699 		     unsigned int pos, unsigned int count,
700 		     const void *kbuf, const void __user *ubuf)
701 {
702 	unsigned long *regs = &target->thread.regs->gpr[0];
703 	const compat_ulong_t *k = kbuf;
704 	const compat_ulong_t __user *u = ubuf;
705 	compat_ulong_t reg;
706 
707 	if (target->thread.regs == NULL)
708 		return -EIO;
709 
710 	CHECK_FULL_REGS(target->thread.regs);
711 
712 	pos /= sizeof(reg);
713 	count /= sizeof(reg);
714 
715 	if (kbuf)
716 		for (; count > 0 && pos < PT_MSR; --count)
717 			regs[pos++] = *k++;
718 	else
719 		for (; count > 0 && pos < PT_MSR; --count) {
720 			if (__get_user(reg, u++))
721 				return -EFAULT;
722 			regs[pos++] = reg;
723 		}
724 
725 
726 	if (count > 0 && pos == PT_MSR) {
727 		if (kbuf)
728 			reg = *k++;
729 		else if (__get_user(reg, u++))
730 			return -EFAULT;
731 		set_user_msr(target, reg);
732 		++pos;
733 		--count;
734 	}
735 
736 	if (kbuf) {
737 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
738 			regs[pos++] = *k++;
739 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
740 			++k;
741 	} else {
742 		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
743 			if (__get_user(reg, u++))
744 				return -EFAULT;
745 			regs[pos++] = reg;
746 		}
747 		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
748 			if (__get_user(reg, u++))
749 				return -EFAULT;
750 	}
751 
752 	if (count > 0 && pos == PT_TRAP) {
753 		if (kbuf)
754 			reg = *k++;
755 		else if (__get_user(reg, u++))
756 			return -EFAULT;
757 		set_user_trap(target, reg);
758 		++pos;
759 		--count;
760 	}
761 
762 	kbuf = k;
763 	ubuf = u;
764 	pos *= sizeof(reg);
765 	count *= sizeof(reg);
766 	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
767 					 (PT_TRAP + 1) * sizeof(reg), -1);
768 }
769 
770 /*
771  * These are the regset flavors matching the CONFIG_PPC32 native set.
772  */
773 static const struct user_regset compat_regsets[] = {
774 	[REGSET_GPR] = {
775 		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
776 		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
777 		.get = gpr32_get, .set = gpr32_set
778 	},
779 	[REGSET_FPR] = {
780 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
781 		.size = sizeof(double), .align = sizeof(double),
782 		.get = fpr_get, .set = fpr_set
783 	},
784 #ifdef CONFIG_ALTIVEC
785 	[REGSET_VMX] = {
786 		.core_note_type = NT_PPC_VMX, .n = 34,
787 		.size = sizeof(vector128), .align = sizeof(vector128),
788 		.active = vr_active, .get = vr_get, .set = vr_set
789 	},
790 #endif
791 #ifdef CONFIG_SPE
792 	[REGSET_SPE] = {
793 		.core_note_type = NT_PPC_SPE, .n = 35,
794 		.size = sizeof(u32), .align = sizeof(u32),
795 		.active = evr_active, .get = evr_get, .set = evr_set
796 	},
797 #endif
798 };
799 
800 static const struct user_regset_view user_ppc_compat_view = {
801 	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
802 	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
803 };
804 #endif	/* CONFIG_PPC64 */
805 
task_user_regset_view(struct task_struct * task)806 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
807 {
808 #ifdef CONFIG_PPC64
809 	if (test_tsk_thread_flag(task, TIF_32BIT))
810 		return &user_ppc_compat_view;
811 #endif
812 	return &user_ppc_native_view;
813 }
814 
815 
user_enable_single_step(struct task_struct * task)816 void user_enable_single_step(struct task_struct *task)
817 {
818 	struct pt_regs *regs = task->thread.regs;
819 
820 	if (regs != NULL) {
821 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
822 		task->thread.dbcr0 &= ~DBCR0_BT;
823 		task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
824 		regs->msr |= MSR_DE;
825 #else
826 		regs->msr &= ~MSR_BE;
827 		regs->msr |= MSR_SE;
828 #endif
829 	}
830 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
831 }
832 
user_enable_block_step(struct task_struct * task)833 void user_enable_block_step(struct task_struct *task)
834 {
835 	struct pt_regs *regs = task->thread.regs;
836 
837 	if (regs != NULL) {
838 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
839 		task->thread.dbcr0 &= ~DBCR0_IC;
840 		task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
841 		regs->msr |= MSR_DE;
842 #else
843 		regs->msr &= ~MSR_SE;
844 		regs->msr |= MSR_BE;
845 #endif
846 	}
847 	set_tsk_thread_flag(task, TIF_SINGLESTEP);
848 }
849 
user_disable_single_step(struct task_struct * task)850 void user_disable_single_step(struct task_struct *task)
851 {
852 	struct pt_regs *regs = task->thread.regs;
853 
854 	if (regs != NULL) {
855 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
856 		/*
857 		 * The logic to disable single stepping should be as
858 		 * simple as turning off the Instruction Complete flag.
859 		 * And, after doing so, if all debug flags are off, turn
860 		 * off DBCR0(IDM) and MSR(DE) .... Torez
861 		 */
862 		task->thread.dbcr0 &= ~DBCR0_IC;
863 		/*
864 		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
865 		 */
866 		if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
867 					task->thread.dbcr1)) {
868 			/*
869 			 * All debug events were off.....
870 			 */
871 			task->thread.dbcr0 &= ~DBCR0_IDM;
872 			regs->msr &= ~MSR_DE;
873 		}
874 #else
875 		regs->msr &= ~(MSR_SE | MSR_BE);
876 #endif
877 	}
878 	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
879 }
880 
881 #ifdef CONFIG_HAVE_HW_BREAKPOINT
ptrace_triggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)882 void ptrace_triggered(struct perf_event *bp,
883 		      struct perf_sample_data *data, struct pt_regs *regs)
884 {
885 	struct perf_event_attr attr;
886 
887 	/*
888 	 * Disable the breakpoint request here since ptrace has defined a
889 	 * one-shot behaviour for breakpoint exceptions in PPC64.
890 	 * The SIGTRAP signal is generated automatically for us in do_dabr().
891 	 * We don't have to do anything about that here
892 	 */
893 	attr = bp->attr;
894 	attr.disabled = true;
895 	modify_user_hw_breakpoint(bp, &attr);
896 }
897 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
898 
ptrace_set_debugreg(struct task_struct * task,unsigned long addr,unsigned long data)899 int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
900 			       unsigned long data)
901 {
902 #ifdef CONFIG_HAVE_HW_BREAKPOINT
903 	int ret;
904 	struct thread_struct *thread = &(task->thread);
905 	struct perf_event *bp;
906 	struct perf_event_attr attr;
907 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
908 
909 	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
910 	 *  For embedded processors we support one DAC and no IAC's at the
911 	 *  moment.
912 	 */
913 	if (addr > 0)
914 		return -EINVAL;
915 
916 	/* The bottom 3 bits in dabr are flags */
917 	if ((data & ~0x7UL) >= TASK_SIZE)
918 		return -EIO;
919 
920 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
921 	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
922 	 *  It was assumed, on previous implementations, that 3 bits were
923 	 *  passed together with the data address, fitting the design of the
924 	 *  DABR register, as follows:
925 	 *
926 	 *  bit 0: Read flag
927 	 *  bit 1: Write flag
928 	 *  bit 2: Breakpoint translation
929 	 *
930 	 *  Thus, we use them here as so.
931 	 */
932 
933 	/* Ensure breakpoint translation bit is set */
934 	if (data && !(data & DABR_TRANSLATION))
935 		return -EIO;
936 #ifdef CONFIG_HAVE_HW_BREAKPOINT
937 	if (ptrace_get_breakpoints(task) < 0)
938 		return -ESRCH;
939 
940 	bp = thread->ptrace_bps[0];
941 	if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
942 		if (bp) {
943 			unregister_hw_breakpoint(bp);
944 			thread->ptrace_bps[0] = NULL;
945 		}
946 		ptrace_put_breakpoints(task);
947 		return 0;
948 	}
949 	if (bp) {
950 		attr = bp->attr;
951 		attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
952 		arch_bp_generic_fields(data &
953 					(DABR_DATA_WRITE | DABR_DATA_READ),
954 							&attr.bp_type);
955 		ret =  modify_user_hw_breakpoint(bp, &attr);
956 		if (ret) {
957 			ptrace_put_breakpoints(task);
958 			return ret;
959 		}
960 		thread->ptrace_bps[0] = bp;
961 		ptrace_put_breakpoints(task);
962 		thread->dabr = data;
963 		return 0;
964 	}
965 
966 	/* Create a new breakpoint request if one doesn't exist already */
967 	hw_breakpoint_init(&attr);
968 	attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
969 	arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
970 								&attr.bp_type);
971 
972 	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
973 					       ptrace_triggered, NULL, task);
974 	if (IS_ERR(bp)) {
975 		thread->ptrace_bps[0] = NULL;
976 		ptrace_put_breakpoints(task);
977 		return PTR_ERR(bp);
978 	}
979 
980 	ptrace_put_breakpoints(task);
981 
982 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
983 
984 	/* Move contents to the DABR register */
985 	task->thread.dabr = data;
986 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
987 	/* As described above, it was assumed 3 bits were passed with the data
988 	 *  address, but we will assume only the mode bits will be passed
989 	 *  as to not cause alignment restrictions for DAC-based processors.
990 	 */
991 
992 	/* DAC's hold the whole address without any mode flags */
993 	task->thread.dac1 = data & ~0x3UL;
994 
995 	if (task->thread.dac1 == 0) {
996 		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
997 		if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
998 					task->thread.dbcr1)) {
999 			task->thread.regs->msr &= ~MSR_DE;
1000 			task->thread.dbcr0 &= ~DBCR0_IDM;
1001 		}
1002 		return 0;
1003 	}
1004 
1005 	/* Read or Write bits must be set */
1006 
1007 	if (!(data & 0x3UL))
1008 		return -EINVAL;
1009 
1010 	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1011 	   register */
1012 	task->thread.dbcr0 |= DBCR0_IDM;
1013 
1014 	/* Check for write and read flags and set DBCR0
1015 	   accordingly */
1016 	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
1017 	if (data & 0x1UL)
1018 		dbcr_dac(task) |= DBCR_DAC1R;
1019 	if (data & 0x2UL)
1020 		dbcr_dac(task) |= DBCR_DAC1W;
1021 	task->thread.regs->msr |= MSR_DE;
1022 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1023 	return 0;
1024 }
1025 
1026 /*
1027  * Called by kernel/ptrace.c when detaching..
1028  *
1029  * Make sure single step bits etc are not set.
1030  */
ptrace_disable(struct task_struct * child)1031 void ptrace_disable(struct task_struct *child)
1032 {
1033 	/* make sure the single step bit is not set. */
1034 	user_disable_single_step(child);
1035 }
1036 
1037 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
set_intruction_bp(struct task_struct * child,struct ppc_hw_breakpoint * bp_info)1038 static long set_intruction_bp(struct task_struct *child,
1039 			      struct ppc_hw_breakpoint *bp_info)
1040 {
1041 	int slot;
1042 	int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
1043 	int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
1044 	int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
1045 	int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
1046 
1047 	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1048 		slot2_in_use = 1;
1049 	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1050 		slot4_in_use = 1;
1051 
1052 	if (bp_info->addr >= TASK_SIZE)
1053 		return -EIO;
1054 
1055 	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1056 
1057 		/* Make sure range is valid. */
1058 		if (bp_info->addr2 >= TASK_SIZE)
1059 			return -EIO;
1060 
1061 		/* We need a pair of IAC regsisters */
1062 		if ((!slot1_in_use) && (!slot2_in_use)) {
1063 			slot = 1;
1064 			child->thread.iac1 = bp_info->addr;
1065 			child->thread.iac2 = bp_info->addr2;
1066 			child->thread.dbcr0 |= DBCR0_IAC1;
1067 			if (bp_info->addr_mode ==
1068 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1069 				dbcr_iac_range(child) |= DBCR_IAC12X;
1070 			else
1071 				dbcr_iac_range(child) |= DBCR_IAC12I;
1072 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1073 		} else if ((!slot3_in_use) && (!slot4_in_use)) {
1074 			slot = 3;
1075 			child->thread.iac3 = bp_info->addr;
1076 			child->thread.iac4 = bp_info->addr2;
1077 			child->thread.dbcr0 |= DBCR0_IAC3;
1078 			if (bp_info->addr_mode ==
1079 					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1080 				dbcr_iac_range(child) |= DBCR_IAC34X;
1081 			else
1082 				dbcr_iac_range(child) |= DBCR_IAC34I;
1083 #endif
1084 		} else
1085 			return -ENOSPC;
1086 	} else {
1087 		/* We only need one.  If possible leave a pair free in
1088 		 * case a range is needed later
1089 		 */
1090 		if (!slot1_in_use) {
1091 			/*
1092 			 * Don't use iac1 if iac1-iac2 are free and either
1093 			 * iac3 or iac4 (but not both) are free
1094 			 */
1095 			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1096 				slot = 1;
1097 				child->thread.iac1 = bp_info->addr;
1098 				child->thread.dbcr0 |= DBCR0_IAC1;
1099 				goto out;
1100 			}
1101 		}
1102 		if (!slot2_in_use) {
1103 			slot = 2;
1104 			child->thread.iac2 = bp_info->addr;
1105 			child->thread.dbcr0 |= DBCR0_IAC2;
1106 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1107 		} else if (!slot3_in_use) {
1108 			slot = 3;
1109 			child->thread.iac3 = bp_info->addr;
1110 			child->thread.dbcr0 |= DBCR0_IAC3;
1111 		} else if (!slot4_in_use) {
1112 			slot = 4;
1113 			child->thread.iac4 = bp_info->addr;
1114 			child->thread.dbcr0 |= DBCR0_IAC4;
1115 #endif
1116 		} else
1117 			return -ENOSPC;
1118 	}
1119 out:
1120 	child->thread.dbcr0 |= DBCR0_IDM;
1121 	child->thread.regs->msr |= MSR_DE;
1122 
1123 	return slot;
1124 }
1125 
del_instruction_bp(struct task_struct * child,int slot)1126 static int del_instruction_bp(struct task_struct *child, int slot)
1127 {
1128 	switch (slot) {
1129 	case 1:
1130 		if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
1131 			return -ENOENT;
1132 
1133 		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1134 			/* address range - clear slots 1 & 2 */
1135 			child->thread.iac2 = 0;
1136 			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1137 		}
1138 		child->thread.iac1 = 0;
1139 		child->thread.dbcr0 &= ~DBCR0_IAC1;
1140 		break;
1141 	case 2:
1142 		if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
1143 			return -ENOENT;
1144 
1145 		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1146 			/* used in a range */
1147 			return -EINVAL;
1148 		child->thread.iac2 = 0;
1149 		child->thread.dbcr0 &= ~DBCR0_IAC2;
1150 		break;
1151 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1152 	case 3:
1153 		if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
1154 			return -ENOENT;
1155 
1156 		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1157 			/* address range - clear slots 3 & 4 */
1158 			child->thread.iac4 = 0;
1159 			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1160 		}
1161 		child->thread.iac3 = 0;
1162 		child->thread.dbcr0 &= ~DBCR0_IAC3;
1163 		break;
1164 	case 4:
1165 		if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
1166 			return -ENOENT;
1167 
1168 		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1169 			/* Used in a range */
1170 			return -EINVAL;
1171 		child->thread.iac4 = 0;
1172 		child->thread.dbcr0 &= ~DBCR0_IAC4;
1173 		break;
1174 #endif
1175 	default:
1176 		return -EINVAL;
1177 	}
1178 	return 0;
1179 }
1180 
set_dac(struct task_struct * child,struct ppc_hw_breakpoint * bp_info)1181 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1182 {
1183 	int byte_enable =
1184 		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
1185 		& 0xf;
1186 	int condition_mode =
1187 		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1188 	int slot;
1189 
1190 	if (byte_enable && (condition_mode == 0))
1191 		return -EINVAL;
1192 
1193 	if (bp_info->addr >= TASK_SIZE)
1194 		return -EIO;
1195 
1196 	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1197 		slot = 1;
1198 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1199 			dbcr_dac(child) |= DBCR_DAC1R;
1200 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1201 			dbcr_dac(child) |= DBCR_DAC1W;
1202 		child->thread.dac1 = (unsigned long)bp_info->addr;
1203 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1204 		if (byte_enable) {
1205 			child->thread.dvc1 =
1206 				(unsigned long)bp_info->condition_value;
1207 			child->thread.dbcr2 |=
1208 				((byte_enable << DBCR2_DVC1BE_SHIFT) |
1209 				 (condition_mode << DBCR2_DVC1M_SHIFT));
1210 		}
1211 #endif
1212 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1213 	} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1214 		/* Both dac1 and dac2 are part of a range */
1215 		return -ENOSPC;
1216 #endif
1217 	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1218 		slot = 2;
1219 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1220 			dbcr_dac(child) |= DBCR_DAC2R;
1221 		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1222 			dbcr_dac(child) |= DBCR_DAC2W;
1223 		child->thread.dac2 = (unsigned long)bp_info->addr;
1224 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1225 		if (byte_enable) {
1226 			child->thread.dvc2 =
1227 				(unsigned long)bp_info->condition_value;
1228 			child->thread.dbcr2 |=
1229 				((byte_enable << DBCR2_DVC2BE_SHIFT) |
1230 				 (condition_mode << DBCR2_DVC2M_SHIFT));
1231 		}
1232 #endif
1233 	} else
1234 		return -ENOSPC;
1235 	child->thread.dbcr0 |= DBCR0_IDM;
1236 	child->thread.regs->msr |= MSR_DE;
1237 
1238 	return slot + 4;
1239 }
1240 
del_dac(struct task_struct * child,int slot)1241 static int del_dac(struct task_struct *child, int slot)
1242 {
1243 	if (slot == 1) {
1244 		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1245 			return -ENOENT;
1246 
1247 		child->thread.dac1 = 0;
1248 		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1249 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1250 		if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1251 			child->thread.dac2 = 0;
1252 			child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1253 		}
1254 		child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1255 #endif
1256 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1257 		child->thread.dvc1 = 0;
1258 #endif
1259 	} else if (slot == 2) {
1260 		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1261 			return -ENOENT;
1262 
1263 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1264 		if (child->thread.dbcr2 & DBCR2_DAC12MODE)
1265 			/* Part of a range */
1266 			return -EINVAL;
1267 		child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1268 #endif
1269 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1270 		child->thread.dvc2 = 0;
1271 #endif
1272 		child->thread.dac2 = 0;
1273 		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1274 	} else
1275 		return -EINVAL;
1276 
1277 	return 0;
1278 }
1279 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1280 
1281 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
set_dac_range(struct task_struct * child,struct ppc_hw_breakpoint * bp_info)1282 static int set_dac_range(struct task_struct *child,
1283 			 struct ppc_hw_breakpoint *bp_info)
1284 {
1285 	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1286 
1287 	/* We don't allow range watchpoints to be used with DVC */
1288 	if (bp_info->condition_mode)
1289 		return -EINVAL;
1290 
1291 	/*
1292 	 * Best effort to verify the address range.  The user/supervisor bits
1293 	 * prevent trapping in kernel space, but let's fail on an obvious bad
1294 	 * range.  The simple test on the mask is not fool-proof, and any
1295 	 * exclusive range will spill over into kernel space.
1296 	 */
1297 	if (bp_info->addr >= TASK_SIZE)
1298 		return -EIO;
1299 	if (mode == PPC_BREAKPOINT_MODE_MASK) {
1300 		/*
1301 		 * dac2 is a bitmask.  Don't allow a mask that makes a
1302 		 * kernel space address from a valid dac1 value
1303 		 */
1304 		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1305 			return -EIO;
1306 	} else {
1307 		/*
1308 		 * For range breakpoints, addr2 must also be a valid address
1309 		 */
1310 		if (bp_info->addr2 >= TASK_SIZE)
1311 			return -EIO;
1312 	}
1313 
1314 	if (child->thread.dbcr0 &
1315 	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1316 		return -ENOSPC;
1317 
1318 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1319 		child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1320 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1321 		child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1322 	child->thread.dac1 = bp_info->addr;
1323 	child->thread.dac2 = bp_info->addr2;
1324 	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1325 		child->thread.dbcr2  |= DBCR2_DAC12M;
1326 	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1327 		child->thread.dbcr2  |= DBCR2_DAC12MX;
1328 	else	/* PPC_BREAKPOINT_MODE_MASK */
1329 		child->thread.dbcr2  |= DBCR2_DAC12MM;
1330 	child->thread.regs->msr |= MSR_DE;
1331 
1332 	return 5;
1333 }
1334 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1335 
ppc_set_hwdebug(struct task_struct * child,struct ppc_hw_breakpoint * bp_info)1336 static long ppc_set_hwdebug(struct task_struct *child,
1337 		     struct ppc_hw_breakpoint *bp_info)
1338 {
1339 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1340 	unsigned long dabr;
1341 #endif
1342 
1343 	if (bp_info->version != 1)
1344 		return -ENOTSUPP;
1345 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1346 	/*
1347 	 * Check for invalid flags and combinations
1348 	 */
1349 	if ((bp_info->trigger_type == 0) ||
1350 	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
1351 				       PPC_BREAKPOINT_TRIGGER_RW)) ||
1352 	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1353 	    (bp_info->condition_mode &
1354 	     ~(PPC_BREAKPOINT_CONDITION_MODE |
1355 	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
1356 		return -EINVAL;
1357 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1358 	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1359 		return -EINVAL;
1360 #endif
1361 
1362 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
1363 		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1364 		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1365 			return -EINVAL;
1366 		return set_intruction_bp(child, bp_info);
1367 	}
1368 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1369 		return set_dac(child, bp_info);
1370 
1371 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1372 	return set_dac_range(child, bp_info);
1373 #else
1374 	return -EINVAL;
1375 #endif
1376 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1377 	/*
1378 	 * We only support one data breakpoint
1379 	 */
1380 	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1381 	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1382 	    bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
1383 	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1384 		return -EINVAL;
1385 
1386 	if (child->thread.dabr)
1387 		return -ENOSPC;
1388 
1389 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
1390 		return -EIO;
1391 
1392 	dabr = (unsigned long)bp_info->addr & ~7UL;
1393 	dabr |= DABR_TRANSLATION;
1394 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1395 		dabr |= DABR_DATA_READ;
1396 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1397 		dabr |= DABR_DATA_WRITE;
1398 
1399 	child->thread.dabr = dabr;
1400 
1401 	return 1;
1402 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1403 }
1404 
ppc_del_hwdebug(struct task_struct * child,long addr,long data)1405 static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
1406 {
1407 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1408 	int rc;
1409 
1410 	if (data <= 4)
1411 		rc = del_instruction_bp(child, (int)data);
1412 	else
1413 		rc = del_dac(child, (int)data - 4);
1414 
1415 	if (!rc) {
1416 		if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
1417 					child->thread.dbcr1)) {
1418 			child->thread.dbcr0 &= ~DBCR0_IDM;
1419 			child->thread.regs->msr &= ~MSR_DE;
1420 		}
1421 	}
1422 	return rc;
1423 #else
1424 	if (data != 1)
1425 		return -EINVAL;
1426 	if (child->thread.dabr == 0)
1427 		return -ENOENT;
1428 
1429 	child->thread.dabr = 0;
1430 
1431 	return 0;
1432 #endif
1433 }
1434 
1435 /*
1436  * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
1437  * we mark them as obsolete now, they will be removed in a future version
1438  */
arch_ptrace_old(struct task_struct * child,long request,unsigned long addr,unsigned long data)1439 static long arch_ptrace_old(struct task_struct *child, long request,
1440 			    unsigned long addr, unsigned long data)
1441 {
1442 	void __user *datavp = (void __user *) data;
1443 
1444 	switch (request) {
1445 	case PPC_PTRACE_GETREGS:	/* Get GPRs 0 - 31. */
1446 		return copy_regset_to_user(child, &user_ppc_native_view,
1447 					   REGSET_GPR, 0, 32 * sizeof(long),
1448 					   datavp);
1449 
1450 	case PPC_PTRACE_SETREGS:	/* Set GPRs 0 - 31. */
1451 		return copy_regset_from_user(child, &user_ppc_native_view,
1452 					     REGSET_GPR, 0, 32 * sizeof(long),
1453 					     datavp);
1454 
1455 	case PPC_PTRACE_GETFPREGS:	/* Get FPRs 0 - 31. */
1456 		return copy_regset_to_user(child, &user_ppc_native_view,
1457 					   REGSET_FPR, 0, 32 * sizeof(double),
1458 					   datavp);
1459 
1460 	case PPC_PTRACE_SETFPREGS:	/* Set FPRs 0 - 31. */
1461 		return copy_regset_from_user(child, &user_ppc_native_view,
1462 					     REGSET_FPR, 0, 32 * sizeof(double),
1463 					     datavp);
1464 	}
1465 
1466 	return -EPERM;
1467 }
1468 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)1469 long arch_ptrace(struct task_struct *child, long request,
1470 		 unsigned long addr, unsigned long data)
1471 {
1472 	int ret = -EPERM;
1473 	void __user *datavp = (void __user *) data;
1474 	unsigned long __user *datalp = datavp;
1475 
1476 	switch (request) {
1477 	/* read the word at location addr in the USER area. */
1478 	case PTRACE_PEEKUSR: {
1479 		unsigned long index, tmp;
1480 
1481 		ret = -EIO;
1482 		/* convert to index and check */
1483 #ifdef CONFIG_PPC32
1484 		index = addr >> 2;
1485 		if ((addr & 3) || (index > PT_FPSCR)
1486 		    || (child->thread.regs == NULL))
1487 #else
1488 		index = addr >> 3;
1489 		if ((addr & 7) || (index > PT_FPSCR))
1490 #endif
1491 			break;
1492 
1493 		CHECK_FULL_REGS(child->thread.regs);
1494 		if (index < PT_FPR0) {
1495 			tmp = ptrace_get_reg(child, (int) index);
1496 		} else {
1497 			unsigned int fpidx = index - PT_FPR0;
1498 
1499 			flush_fp_to_thread(child);
1500 			if (fpidx < (PT_FPSCR - PT_FPR0))
1501 				tmp = ((unsigned long *)child->thread.fpr)
1502 					[fpidx * TS_FPRWIDTH];
1503 			else
1504 				tmp = child->thread.fpscr.val;
1505 		}
1506 		ret = put_user(tmp, datalp);
1507 		break;
1508 	}
1509 
1510 	/* write the word at location addr in the USER area */
1511 	case PTRACE_POKEUSR: {
1512 		unsigned long index;
1513 
1514 		ret = -EIO;
1515 		/* convert to index and check */
1516 #ifdef CONFIG_PPC32
1517 		index = addr >> 2;
1518 		if ((addr & 3) || (index > PT_FPSCR)
1519 		    || (child->thread.regs == NULL))
1520 #else
1521 		index = addr >> 3;
1522 		if ((addr & 7) || (index > PT_FPSCR))
1523 #endif
1524 			break;
1525 
1526 		CHECK_FULL_REGS(child->thread.regs);
1527 		if (index < PT_FPR0) {
1528 			ret = ptrace_put_reg(child, index, data);
1529 		} else {
1530 			unsigned int fpidx = index - PT_FPR0;
1531 
1532 			flush_fp_to_thread(child);
1533 			if (fpidx < (PT_FPSCR - PT_FPR0))
1534 				((unsigned long *)child->thread.fpr)
1535 					[fpidx * TS_FPRWIDTH] = data;
1536 			else
1537 				child->thread.fpscr.val = data;
1538 			ret = 0;
1539 		}
1540 		break;
1541 	}
1542 
1543 	case PPC_PTRACE_GETHWDBGINFO: {
1544 		struct ppc_debug_info dbginfo;
1545 
1546 		dbginfo.version = 1;
1547 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1548 		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1549 		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1550 		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1551 		dbginfo.data_bp_alignment = 4;
1552 		dbginfo.sizeof_condition = 4;
1553 		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
1554 				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
1555 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1556 		dbginfo.features |=
1557 				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
1558 				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
1559 #endif
1560 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1561 		dbginfo.num_instruction_bps = 0;
1562 		dbginfo.num_data_bps = 1;
1563 		dbginfo.num_condition_regs = 0;
1564 #ifdef CONFIG_PPC64
1565 		dbginfo.data_bp_alignment = 8;
1566 #else
1567 		dbginfo.data_bp_alignment = 4;
1568 #endif
1569 		dbginfo.sizeof_condition = 0;
1570 		dbginfo.features = 0;
1571 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1572 
1573 		if (!access_ok(VERIFY_WRITE, datavp,
1574 			       sizeof(struct ppc_debug_info)))
1575 			return -EFAULT;
1576 		ret = __copy_to_user(datavp, &dbginfo,
1577 				     sizeof(struct ppc_debug_info)) ?
1578 		      -EFAULT : 0;
1579 		break;
1580 	}
1581 
1582 	case PPC_PTRACE_SETHWDEBUG: {
1583 		struct ppc_hw_breakpoint bp_info;
1584 
1585 		if (!access_ok(VERIFY_READ, datavp,
1586 			       sizeof(struct ppc_hw_breakpoint)))
1587 			return -EFAULT;
1588 		ret = __copy_from_user(&bp_info, datavp,
1589 				       sizeof(struct ppc_hw_breakpoint)) ?
1590 		      -EFAULT : 0;
1591 		if (!ret)
1592 			ret = ppc_set_hwdebug(child, &bp_info);
1593 		break;
1594 	}
1595 
1596 	case PPC_PTRACE_DELHWDEBUG: {
1597 		ret = ppc_del_hwdebug(child, addr, data);
1598 		break;
1599 	}
1600 
1601 	case PTRACE_GET_DEBUGREG: {
1602 		ret = -EINVAL;
1603 		/* We only support one DABR and no IABRS at the moment */
1604 		if (addr > 0)
1605 			break;
1606 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1607 		ret = put_user(child->thread.dac1, datalp);
1608 #else
1609 		ret = put_user(child->thread.dabr, datalp);
1610 #endif
1611 		break;
1612 	}
1613 
1614 	case PTRACE_SET_DEBUGREG:
1615 		ret = ptrace_set_debugreg(child, addr, data);
1616 		break;
1617 
1618 #ifdef CONFIG_PPC64
1619 	case PTRACE_GETREGS64:
1620 #endif
1621 	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
1622 		return copy_regset_to_user(child, &user_ppc_native_view,
1623 					   REGSET_GPR,
1624 					   0, sizeof(struct pt_regs),
1625 					   datavp);
1626 
1627 #ifdef CONFIG_PPC64
1628 	case PTRACE_SETREGS64:
1629 #endif
1630 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1631 		return copy_regset_from_user(child, &user_ppc_native_view,
1632 					     REGSET_GPR,
1633 					     0, sizeof(struct pt_regs),
1634 					     datavp);
1635 
1636 	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1637 		return copy_regset_to_user(child, &user_ppc_native_view,
1638 					   REGSET_FPR,
1639 					   0, sizeof(elf_fpregset_t),
1640 					   datavp);
1641 
1642 	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1643 		return copy_regset_from_user(child, &user_ppc_native_view,
1644 					     REGSET_FPR,
1645 					     0, sizeof(elf_fpregset_t),
1646 					     datavp);
1647 
1648 #ifdef CONFIG_ALTIVEC
1649 	case PTRACE_GETVRREGS:
1650 		return copy_regset_to_user(child, &user_ppc_native_view,
1651 					   REGSET_VMX,
1652 					   0, (33 * sizeof(vector128) +
1653 					       sizeof(u32)),
1654 					   datavp);
1655 
1656 	case PTRACE_SETVRREGS:
1657 		return copy_regset_from_user(child, &user_ppc_native_view,
1658 					     REGSET_VMX,
1659 					     0, (33 * sizeof(vector128) +
1660 						 sizeof(u32)),
1661 					     datavp);
1662 #endif
1663 #ifdef CONFIG_VSX
1664 	case PTRACE_GETVSRREGS:
1665 		return copy_regset_to_user(child, &user_ppc_native_view,
1666 					   REGSET_VSX,
1667 					   0, 32 * sizeof(double),
1668 					   datavp);
1669 
1670 	case PTRACE_SETVSRREGS:
1671 		return copy_regset_from_user(child, &user_ppc_native_view,
1672 					     REGSET_VSX,
1673 					     0, 32 * sizeof(double),
1674 					     datavp);
1675 #endif
1676 #ifdef CONFIG_SPE
1677 	case PTRACE_GETEVRREGS:
1678 		/* Get the child spe register state. */
1679 		return copy_regset_to_user(child, &user_ppc_native_view,
1680 					   REGSET_SPE, 0, 35 * sizeof(u32),
1681 					   datavp);
1682 
1683 	case PTRACE_SETEVRREGS:
1684 		/* Set the child spe register state. */
1685 		return copy_regset_from_user(child, &user_ppc_native_view,
1686 					     REGSET_SPE, 0, 35 * sizeof(u32),
1687 					     datavp);
1688 #endif
1689 
1690 	/* Old reverse args ptrace callss */
1691 	case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
1692 	case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
1693 	case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
1694 	case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
1695 		ret = arch_ptrace_old(child, request, addr, data);
1696 		break;
1697 
1698 	default:
1699 		ret = ptrace_request(child, request, addr, data);
1700 		break;
1701 	}
1702 	return ret;
1703 }
1704 
1705 /*
1706  * We must return the syscall number to actually look up in the table.
1707  * This can be -1L to skip running any syscall at all.
1708  */
do_syscall_trace_enter(struct pt_regs * regs)1709 long do_syscall_trace_enter(struct pt_regs *regs)
1710 {
1711 	long ret = 0;
1712 
1713 	secure_computing(regs->gpr[0]);
1714 
1715 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1716 	    tracehook_report_syscall_entry(regs))
1717 		/*
1718 		 * Tracing decided this syscall should not happen.
1719 		 * We'll return a bogus call number to get an ENOSYS
1720 		 * error, but leave the original number in regs->gpr[0].
1721 		 */
1722 		ret = -1L;
1723 
1724 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1725 		trace_sys_enter(regs, regs->gpr[0]);
1726 
1727 #ifdef CONFIG_PPC64
1728 	if (!is_32bit_task())
1729 		audit_syscall_entry(AUDIT_ARCH_PPC64,
1730 				    regs->gpr[0],
1731 				    regs->gpr[3], regs->gpr[4],
1732 				    regs->gpr[5], regs->gpr[6]);
1733 	else
1734 #endif
1735 		audit_syscall_entry(AUDIT_ARCH_PPC,
1736 				    regs->gpr[0],
1737 				    regs->gpr[3] & 0xffffffff,
1738 				    regs->gpr[4] & 0xffffffff,
1739 				    regs->gpr[5] & 0xffffffff,
1740 				    regs->gpr[6] & 0xffffffff);
1741 
1742 	return ret ?: regs->gpr[0];
1743 }
1744 
do_syscall_trace_leave(struct pt_regs * regs)1745 void do_syscall_trace_leave(struct pt_regs *regs)
1746 {
1747 	int step;
1748 
1749 	audit_syscall_exit(regs);
1750 
1751 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1752 		trace_sys_exit(regs, regs->result);
1753 
1754 	step = test_thread_flag(TIF_SINGLESTEP);
1755 	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1756 		tracehook_report_syscall_exit(regs, step);
1757 }
1758