1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ptrace user space interface.
4 *
5 * Copyright IBM Corp. 1999, 2010
6 * Author(s): Denis Joseph Barrow
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/cpufeature.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/signal.h>
22 #include <linux/elf.h>
23 #include <linux/regset.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/guarded_storage.h>
28 #include <asm/access-regs.h>
29 #include <asm/page.h>
30 #include <linux/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/runtime_instr.h>
33 #include <asm/facility.h>
34 #include <asm/machine.h>
35 #include <asm/ptrace.h>
36 #include <asm/rwonce.h>
37 #include <asm/fpu.h>
38
39 #include "entry.h"
40
41 #ifdef CONFIG_COMPAT
42 #include "compat_ptrace.h"
43 #endif
44
update_cr_regs(struct task_struct * task)45 void update_cr_regs(struct task_struct *task)
46 {
47 struct pt_regs *regs = task_pt_regs(task);
48 struct thread_struct *thread = &task->thread;
49 union ctlreg0 cr0_old, cr0_new;
50 union ctlreg2 cr2_old, cr2_new;
51 int cr0_changed, cr2_changed;
52 union {
53 struct ctlreg regs[3];
54 struct {
55 struct ctlreg control;
56 struct ctlreg start;
57 struct ctlreg end;
58 };
59 } old, new;
60
61 local_ctl_store(0, &cr0_old.reg);
62 local_ctl_store(2, &cr2_old.reg);
63 cr0_new = cr0_old;
64 cr2_new = cr2_old;
65 /* Take care of the enable/disable of transactional execution. */
66 if (machine_has_tx()) {
67 /* Set or clear transaction execution TXC bit 8. */
68 cr0_new.tcx = 1;
69 if (task->thread.per_flags & PER_FLAG_NO_TE)
70 cr0_new.tcx = 0;
71 /* Set or clear transaction execution TDC bits 62 and 63. */
72 cr2_new.tdc = 0;
73 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
74 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
75 cr2_new.tdc = 1;
76 else
77 cr2_new.tdc = 2;
78 }
79 }
80 /* Take care of enable/disable of guarded storage. */
81 if (cpu_has_gs()) {
82 cr2_new.gse = 0;
83 if (task->thread.gs_cb)
84 cr2_new.gse = 1;
85 }
86 /* Load control register 0/2 iff changed */
87 cr0_changed = cr0_new.val != cr0_old.val;
88 cr2_changed = cr2_new.val != cr2_old.val;
89 if (cr0_changed)
90 local_ctl_load(0, &cr0_new.reg);
91 if (cr2_changed)
92 local_ctl_load(2, &cr2_new.reg);
93 /* Copy user specified PER registers */
94 new.control.val = thread->per_user.control;
95 new.start.val = thread->per_user.start;
96 new.end.val = thread->per_user.end;
97
98 /* merge TIF_SINGLE_STEP into user specified PER registers. */
99 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
100 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
101 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
102 new.control.val |= PER_EVENT_BRANCH;
103 else
104 new.control.val |= PER_EVENT_IFETCH;
105 new.control.val |= PER_CONTROL_SUSPENSION;
106 new.control.val |= PER_EVENT_TRANSACTION_END;
107 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
108 new.control.val |= PER_EVENT_IFETCH;
109 new.start.val = 0;
110 new.end.val = -1UL;
111 }
112
113 /* Take care of the PER enablement bit in the PSW. */
114 if (!(new.control.val & PER_EVENT_MASK)) {
115 regs->psw.mask &= ~PSW_MASK_PER;
116 return;
117 }
118 regs->psw.mask |= PSW_MASK_PER;
119 __local_ctl_store(9, 11, old.regs);
120 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
121 __local_ctl_load(9, 11, new.regs);
122 }
123
user_enable_single_step(struct task_struct * task)124 void user_enable_single_step(struct task_struct *task)
125 {
126 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
127 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
128 }
129
user_disable_single_step(struct task_struct * task)130 void user_disable_single_step(struct task_struct *task)
131 {
132 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
133 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
134 }
135
user_enable_block_step(struct task_struct * task)136 void user_enable_block_step(struct task_struct *task)
137 {
138 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
139 set_tsk_thread_flag(task, TIF_BLOCK_STEP);
140 }
141
142 /*
143 * Called by kernel/ptrace.c when detaching..
144 *
145 * Clear all debugging related fields.
146 */
ptrace_disable(struct task_struct * task)147 void ptrace_disable(struct task_struct *task)
148 {
149 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
150 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
151 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
152 clear_tsk_thread_flag(task, TIF_PER_TRAP);
153 task->thread.per_flags = 0;
154 }
155
156 #define __ADDR_MASK 7
157
__peek_user_per(struct task_struct * child,addr_t addr)158 static inline unsigned long __peek_user_per(struct task_struct *child,
159 addr_t addr)
160 {
161 if (addr == offsetof(struct per_struct_kernel, cr9))
162 /* Control bits of the active per set. */
163 return test_thread_flag(TIF_SINGLE_STEP) ?
164 PER_EVENT_IFETCH : child->thread.per_user.control;
165 else if (addr == offsetof(struct per_struct_kernel, cr10))
166 /* Start address of the active per set. */
167 return test_thread_flag(TIF_SINGLE_STEP) ?
168 0 : child->thread.per_user.start;
169 else if (addr == offsetof(struct per_struct_kernel, cr11))
170 /* End address of the active per set. */
171 return test_thread_flag(TIF_SINGLE_STEP) ?
172 -1UL : child->thread.per_user.end;
173 else if (addr == offsetof(struct per_struct_kernel, bits))
174 /* Single-step bit. */
175 return test_thread_flag(TIF_SINGLE_STEP) ?
176 (1UL << (BITS_PER_LONG - 1)) : 0;
177 else if (addr == offsetof(struct per_struct_kernel, starting_addr))
178 /* Start address of the user specified per set. */
179 return child->thread.per_user.start;
180 else if (addr == offsetof(struct per_struct_kernel, ending_addr))
181 /* End address of the user specified per set. */
182 return child->thread.per_user.end;
183 else if (addr == offsetof(struct per_struct_kernel, perc_atmid))
184 /* PER code, ATMID and AI of the last PER trap */
185 return (unsigned long)
186 child->thread.per_event.cause << (BITS_PER_LONG - 16);
187 else if (addr == offsetof(struct per_struct_kernel, address))
188 /* Address of the last PER trap */
189 return child->thread.per_event.address;
190 else if (addr == offsetof(struct per_struct_kernel, access_id))
191 /* Access id of the last PER trap */
192 return (unsigned long)
193 child->thread.per_event.paid << (BITS_PER_LONG - 8);
194 return 0;
195 }
196
197 /*
198 * Read the word at offset addr from the user area of a process. The
199 * trouble here is that the information is littered over different
200 * locations. The process registers are found on the kernel stack,
201 * the floating point stuff and the trace settings are stored in
202 * the task structure. In addition the different structures in
203 * struct user contain pad bytes that should be read as zeroes.
204 * Lovely...
205 */
__peek_user(struct task_struct * child,addr_t addr)206 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
207 {
208 addr_t offset, tmp;
209
210 if (addr < offsetof(struct user, regs.acrs)) {
211 /*
212 * psw and gprs are stored on the stack
213 */
214 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
215 if (addr == offsetof(struct user, regs.psw.mask)) {
216 /* Return a clean psw mask. */
217 tmp &= PSW_MASK_USER | PSW_MASK_RI;
218 tmp |= PSW_USER_BITS;
219 }
220
221 } else if (addr < offsetof(struct user, regs.orig_gpr2)) {
222 /*
223 * access registers are stored in the thread structure
224 */
225 offset = addr - offsetof(struct user, regs.acrs);
226 /*
227 * Very special case: old & broken 64 bit gdb reading
228 * from acrs[15]. Result is a 64 bit value. Read the
229 * 32 bit acrs[15] value and shift it by 32. Sick...
230 */
231 if (addr == offsetof(struct user, regs.acrs[15]))
232 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
233 else
234 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
235
236 } else if (addr == offsetof(struct user, regs.orig_gpr2)) {
237 /*
238 * orig_gpr2 is stored on the kernel stack
239 */
240 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
241
242 } else if (addr < offsetof(struct user, regs.fp_regs)) {
243 /*
244 * prevent reads of padding hole between
245 * orig_gpr2 and fp_regs on s390.
246 */
247 tmp = 0;
248
249 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
250 /*
251 * floating point control reg. is in the thread structure
252 */
253 tmp = child->thread.ufpu.fpc;
254 tmp <<= BITS_PER_LONG - 32;
255
256 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
257 /*
258 * floating point regs. are in the child->thread.ufpu.vxrs array
259 */
260 offset = addr - offsetof(struct user, regs.fp_regs.fprs);
261 tmp = *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
262 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
263 /*
264 * Handle access to the per_info structure.
265 */
266 addr -= offsetof(struct user, regs.per_info);
267 tmp = __peek_user_per(child, addr);
268
269 } else
270 tmp = 0;
271
272 return tmp;
273 }
274
275 static int
peek_user(struct task_struct * child,addr_t addr,addr_t data)276 peek_user(struct task_struct *child, addr_t addr, addr_t data)
277 {
278 addr_t tmp, mask;
279
280 /*
281 * Stupid gdb peeks/pokes the access registers in 64 bit with
282 * an alignment of 4. Programmers from hell...
283 */
284 mask = __ADDR_MASK;
285 if (addr >= offsetof(struct user, regs.acrs) &&
286 addr < offsetof(struct user, regs.orig_gpr2))
287 mask = 3;
288 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
289 return -EIO;
290
291 tmp = __peek_user(child, addr);
292 return put_user(tmp, (addr_t __user *) data);
293 }
294
__poke_user_per(struct task_struct * child,addr_t addr,addr_t data)295 static inline void __poke_user_per(struct task_struct *child,
296 addr_t addr, addr_t data)
297 {
298 /*
299 * There are only three fields in the per_info struct that the
300 * debugger user can write to.
301 * 1) cr9: the debugger wants to set a new PER event mask
302 * 2) starting_addr: the debugger wants to set a new starting
303 * address to use with the PER event mask.
304 * 3) ending_addr: the debugger wants to set a new ending
305 * address to use with the PER event mask.
306 * The user specified PER event mask and the start and end
307 * addresses are used only if single stepping is not in effect.
308 * Writes to any other field in per_info are ignored.
309 */
310 if (addr == offsetof(struct per_struct_kernel, cr9))
311 /* PER event mask of the user specified per set. */
312 child->thread.per_user.control =
313 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
314 else if (addr == offsetof(struct per_struct_kernel, starting_addr))
315 /* Starting address of the user specified per set. */
316 child->thread.per_user.start = data;
317 else if (addr == offsetof(struct per_struct_kernel, ending_addr))
318 /* Ending address of the user specified per set. */
319 child->thread.per_user.end = data;
320 }
321
322 /*
323 * Write a word to the user area of a process at location addr. This
324 * operation does have an additional problem compared to peek_user.
325 * Stores to the program status word and on the floating point
326 * control register needs to get checked for validity.
327 */
__poke_user(struct task_struct * child,addr_t addr,addr_t data)328 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
329 {
330 addr_t offset;
331
332
333 if (addr < offsetof(struct user, regs.acrs)) {
334 struct pt_regs *regs = task_pt_regs(child);
335 /*
336 * psw and gprs are stored on the stack
337 */
338 if (addr == offsetof(struct user, regs.psw.mask)) {
339 unsigned long mask = PSW_MASK_USER;
340
341 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
342 if ((data ^ PSW_USER_BITS) & ~mask)
343 /* Invalid psw mask. */
344 return -EINVAL;
345 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
346 /* Invalid address-space-control bits */
347 return -EINVAL;
348 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
349 /* Invalid addressing mode bits */
350 return -EINVAL;
351 }
352
353 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
354 addr == offsetof(struct user, regs.gprs[2])) {
355 struct pt_regs *regs = task_pt_regs(child);
356
357 regs->int_code = 0x20000 | (data & 0xffff);
358 }
359 *(addr_t *)((addr_t) ®s->psw + addr) = data;
360 } else if (addr < offsetof(struct user, regs.orig_gpr2)) {
361 /*
362 * access registers are stored in the thread structure
363 */
364 offset = addr - offsetof(struct user, regs.acrs);
365 /*
366 * Very special case: old & broken 64 bit gdb writing
367 * to acrs[15] with a 64 bit value. Ignore the lower
368 * half of the value and write the upper 32 bit to
369 * acrs[15]. Sick...
370 */
371 if (addr == offsetof(struct user, regs.acrs[15]))
372 child->thread.acrs[15] = (unsigned int) (data >> 32);
373 else
374 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
375
376 } else if (addr == offsetof(struct user, regs.orig_gpr2)) {
377 /*
378 * orig_gpr2 is stored on the kernel stack
379 */
380 task_pt_regs(child)->orig_gpr2 = data;
381
382 } else if (addr < offsetof(struct user, regs.fp_regs)) {
383 /*
384 * prevent writes of padding hole between
385 * orig_gpr2 and fp_regs on s390.
386 */
387 return 0;
388
389 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
390 /*
391 * floating point control reg. is in the thread structure
392 */
393 if ((unsigned int)data != 0)
394 return -EINVAL;
395 child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
396
397 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
398 /*
399 * floating point regs. are in the child->thread.ufpu.vxrs array
400 */
401 offset = addr - offsetof(struct user, regs.fp_regs.fprs);
402 *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = data;
403 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
404 /*
405 * Handle access to the per_info structure.
406 */
407 addr -= offsetof(struct user, regs.per_info);
408 __poke_user_per(child, addr, data);
409
410 }
411
412 return 0;
413 }
414
poke_user(struct task_struct * child,addr_t addr,addr_t data)415 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
416 {
417 addr_t mask;
418
419 /*
420 * Stupid gdb peeks/pokes the access registers in 64 bit with
421 * an alignment of 4. Programmers from hell indeed...
422 */
423 mask = __ADDR_MASK;
424 if (addr >= offsetof(struct user, regs.acrs) &&
425 addr < offsetof(struct user, regs.orig_gpr2))
426 mask = 3;
427 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
428 return -EIO;
429
430 return __poke_user(child, addr, data);
431 }
432
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)433 long arch_ptrace(struct task_struct *child, long request,
434 unsigned long addr, unsigned long data)
435 {
436 ptrace_area parea;
437 int copied, ret;
438
439 switch (request) {
440 case PTRACE_PEEKUSR:
441 /* read the word at location addr in the USER area. */
442 return peek_user(child, addr, data);
443
444 case PTRACE_POKEUSR:
445 /* write the word at location addr in the USER area */
446 return poke_user(child, addr, data);
447
448 case PTRACE_PEEKUSR_AREA:
449 case PTRACE_POKEUSR_AREA:
450 if (copy_from_user(&parea, (void __force __user *) addr,
451 sizeof(parea)))
452 return -EFAULT;
453 addr = parea.kernel_addr;
454 data = parea.process_addr;
455 copied = 0;
456 while (copied < parea.len) {
457 if (request == PTRACE_PEEKUSR_AREA)
458 ret = peek_user(child, addr, data);
459 else {
460 addr_t utmp;
461 if (get_user(utmp,
462 (addr_t __force __user *) data))
463 return -EFAULT;
464 ret = poke_user(child, addr, utmp);
465 }
466 if (ret)
467 return ret;
468 addr += sizeof(unsigned long);
469 data += sizeof(unsigned long);
470 copied += sizeof(unsigned long);
471 }
472 return 0;
473 case PTRACE_GET_LAST_BREAK:
474 return put_user(child->thread.last_break, (unsigned long __user *)data);
475 case PTRACE_ENABLE_TE:
476 if (!machine_has_tx())
477 return -EIO;
478 child->thread.per_flags &= ~PER_FLAG_NO_TE;
479 return 0;
480 case PTRACE_DISABLE_TE:
481 if (!machine_has_tx())
482 return -EIO;
483 child->thread.per_flags |= PER_FLAG_NO_TE;
484 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
485 return 0;
486 case PTRACE_TE_ABORT_RAND:
487 if (!machine_has_tx() || (child->thread.per_flags & PER_FLAG_NO_TE))
488 return -EIO;
489 switch (data) {
490 case 0UL:
491 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
492 break;
493 case 1UL:
494 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
495 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
496 break;
497 case 2UL:
498 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
499 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
500 break;
501 default:
502 return -EINVAL;
503 }
504 return 0;
505 default:
506 return ptrace_request(child, request, addr, data);
507 }
508 }
509
510 #ifdef CONFIG_COMPAT
511 /*
512 * Now the fun part starts... a 31 bit program running in the
513 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
514 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
515 * to handle, the difference to the 64 bit versions of the requests
516 * is that the access is done in multiples of 4 byte instead of
517 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
518 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
519 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
520 * is a 31 bit program too, the content of struct user can be
521 * emulated. A 31 bit program peeking into the struct user of
522 * a 64 bit program is a no-no.
523 */
524
525 /*
526 * Same as peek_user_per but for a 31 bit program.
527 */
__peek_user_per_compat(struct task_struct * child,addr_t addr)528 static inline __u32 __peek_user_per_compat(struct task_struct *child,
529 addr_t addr)
530 {
531 if (addr == offsetof(struct compat_per_struct_kernel, cr9))
532 /* Control bits of the active per set. */
533 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
534 PER_EVENT_IFETCH : child->thread.per_user.control;
535 else if (addr == offsetof(struct compat_per_struct_kernel, cr10))
536 /* Start address of the active per set. */
537 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
538 0 : child->thread.per_user.start;
539 else if (addr == offsetof(struct compat_per_struct_kernel, cr11))
540 /* End address of the active per set. */
541 return test_thread_flag(TIF_SINGLE_STEP) ?
542 PSW32_ADDR_INSN : child->thread.per_user.end;
543 else if (addr == offsetof(struct compat_per_struct_kernel, bits))
544 /* Single-step bit. */
545 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
546 0x80000000 : 0;
547 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
548 /* Start address of the user specified per set. */
549 return (__u32) child->thread.per_user.start;
550 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
551 /* End address of the user specified per set. */
552 return (__u32) child->thread.per_user.end;
553 else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid))
554 /* PER code, ATMID and AI of the last PER trap */
555 return (__u32) child->thread.per_event.cause << 16;
556 else if (addr == offsetof(struct compat_per_struct_kernel, address))
557 /* Address of the last PER trap */
558 return (__u32) child->thread.per_event.address;
559 else if (addr == offsetof(struct compat_per_struct_kernel, access_id))
560 /* Access id of the last PER trap */
561 return (__u32) child->thread.per_event.paid << 24;
562 return 0;
563 }
564
565 /*
566 * Same as peek_user but for a 31 bit program.
567 */
__peek_user_compat(struct task_struct * child,addr_t addr)568 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
569 {
570 addr_t offset;
571 __u32 tmp;
572
573 if (addr < offsetof(struct compat_user, regs.acrs)) {
574 struct pt_regs *regs = task_pt_regs(child);
575 /*
576 * psw and gprs are stored on the stack
577 */
578 if (addr == offsetof(struct compat_user, regs.psw.mask)) {
579 /* Fake a 31 bit psw mask. */
580 tmp = (__u32)(regs->psw.mask >> 32);
581 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
582 tmp |= PSW32_USER_BITS;
583 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
584 /* Fake a 31 bit psw address. */
585 tmp = (__u32) regs->psw.addr |
586 (__u32)(regs->psw.mask & PSW_MASK_BA);
587 } else {
588 /* gpr 0-15 */
589 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4);
590 }
591 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
592 /*
593 * access registers are stored in the thread structure
594 */
595 offset = addr - offsetof(struct compat_user, regs.acrs);
596 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
597
598 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
599 /*
600 * orig_gpr2 is stored on the kernel stack
601 */
602 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
603
604 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
605 /*
606 * prevent reads of padding hole between
607 * orig_gpr2 and fp_regs on s390.
608 */
609 tmp = 0;
610
611 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
612 /*
613 * floating point control reg. is in the thread structure
614 */
615 tmp = child->thread.ufpu.fpc;
616
617 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
618 /*
619 * floating point regs. are in the child->thread.ufpu.vxrs array
620 */
621 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
622 tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
623 } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
624 /*
625 * Handle access to the per_info structure.
626 */
627 addr -= offsetof(struct compat_user, regs.per_info);
628 tmp = __peek_user_per_compat(child, addr);
629
630 } else
631 tmp = 0;
632
633 return tmp;
634 }
635
peek_user_compat(struct task_struct * child,addr_t addr,addr_t data)636 static int peek_user_compat(struct task_struct *child,
637 addr_t addr, addr_t data)
638 {
639 __u32 tmp;
640
641 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
642 return -EIO;
643
644 tmp = __peek_user_compat(child, addr);
645 return put_user(tmp, (__u32 __user *) data);
646 }
647
648 /*
649 * Same as poke_user_per but for a 31 bit program.
650 */
__poke_user_per_compat(struct task_struct * child,addr_t addr,__u32 data)651 static inline void __poke_user_per_compat(struct task_struct *child,
652 addr_t addr, __u32 data)
653 {
654 if (addr == offsetof(struct compat_per_struct_kernel, cr9))
655 /* PER event mask of the user specified per set. */
656 child->thread.per_user.control =
657 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
658 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr))
659 /* Starting address of the user specified per set. */
660 child->thread.per_user.start = data;
661 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr))
662 /* Ending address of the user specified per set. */
663 child->thread.per_user.end = data;
664 }
665
666 /*
667 * Same as poke_user but for a 31 bit program.
668 */
__poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)669 static int __poke_user_compat(struct task_struct *child,
670 addr_t addr, addr_t data)
671 {
672 __u32 tmp = (__u32) data;
673 addr_t offset;
674
675 if (addr < offsetof(struct compat_user, regs.acrs)) {
676 struct pt_regs *regs = task_pt_regs(child);
677 /*
678 * psw, gprs, acrs and orig_gpr2 are stored on the stack
679 */
680 if (addr == offsetof(struct compat_user, regs.psw.mask)) {
681 __u32 mask = PSW32_MASK_USER;
682
683 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
684 /* Build a 64 bit psw mask from 31 bit mask. */
685 if ((tmp ^ PSW32_USER_BITS) & ~mask)
686 /* Invalid psw mask. */
687 return -EINVAL;
688 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
689 /* Invalid address-space-control bits */
690 return -EINVAL;
691 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
692 (regs->psw.mask & PSW_MASK_BA) |
693 (__u64)(tmp & mask) << 32;
694 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) {
695 /* Build a 64 bit psw address from 31 bit address. */
696 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
697 /* Transfer 31 bit amode bit to psw mask. */
698 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
699 (__u64)(tmp & PSW32_ADDR_AMODE);
700 } else {
701 if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
702 addr == offsetof(struct compat_user, regs.gprs[2])) {
703 struct pt_regs *regs = task_pt_regs(child);
704
705 regs->int_code = 0x20000 | (data & 0xffff);
706 }
707 /* gpr 0-15 */
708 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
709 }
710 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) {
711 /*
712 * access registers are stored in the thread structure
713 */
714 offset = addr - offsetof(struct compat_user, regs.acrs);
715 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
716
717 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) {
718 /*
719 * orig_gpr2 is stored on the kernel stack
720 */
721 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
722
723 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) {
724 /*
725 * prevent writess of padding hole between
726 * orig_gpr2 and fp_regs on s390.
727 */
728 return 0;
729
730 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) {
731 /*
732 * floating point control reg. is in the thread structure
733 */
734 child->thread.ufpu.fpc = data;
735
736 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
737 /*
738 * floating point regs. are in the child->thread.ufpu.vxrs array
739 */
740 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
741 *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp;
742 } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
743 /*
744 * Handle access to the per_info structure.
745 */
746 addr -= offsetof(struct compat_user, regs.per_info);
747 __poke_user_per_compat(child, addr, data);
748 }
749
750 return 0;
751 }
752
poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)753 static int poke_user_compat(struct task_struct *child,
754 addr_t addr, addr_t data)
755 {
756 if (!is_compat_task() || (addr & 3) ||
757 addr > sizeof(struct compat_user) - 3)
758 return -EIO;
759
760 return __poke_user_compat(child, addr, data);
761 }
762
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)763 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
764 compat_ulong_t caddr, compat_ulong_t cdata)
765 {
766 unsigned long addr = caddr;
767 unsigned long data = cdata;
768 compat_ptrace_area parea;
769 int copied, ret;
770
771 switch (request) {
772 case PTRACE_PEEKUSR:
773 /* read the word at location addr in the USER area. */
774 return peek_user_compat(child, addr, data);
775
776 case PTRACE_POKEUSR:
777 /* write the word at location addr in the USER area */
778 return poke_user_compat(child, addr, data);
779
780 case PTRACE_PEEKUSR_AREA:
781 case PTRACE_POKEUSR_AREA:
782 if (copy_from_user(&parea, (void __force __user *) addr,
783 sizeof(parea)))
784 return -EFAULT;
785 addr = parea.kernel_addr;
786 data = parea.process_addr;
787 copied = 0;
788 while (copied < parea.len) {
789 if (request == PTRACE_PEEKUSR_AREA)
790 ret = peek_user_compat(child, addr, data);
791 else {
792 __u32 utmp;
793 if (get_user(utmp,
794 (__u32 __force __user *) data))
795 return -EFAULT;
796 ret = poke_user_compat(child, addr, utmp);
797 }
798 if (ret)
799 return ret;
800 addr += sizeof(unsigned int);
801 data += sizeof(unsigned int);
802 copied += sizeof(unsigned int);
803 }
804 return 0;
805 case PTRACE_GET_LAST_BREAK:
806 return put_user(child->thread.last_break, (unsigned int __user *)data);
807 }
808 return compat_ptrace_request(child, request, addr, data);
809 }
810 #endif
811
812 /*
813 * user_regset definitions.
814 */
815
s390_regs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)816 static int s390_regs_get(struct task_struct *target,
817 const struct user_regset *regset,
818 struct membuf to)
819 {
820 unsigned pos;
821 if (target == current)
822 save_access_regs(target->thread.acrs);
823
824 for (pos = 0; pos < sizeof(s390_regs); pos += sizeof(long))
825 membuf_store(&to, __peek_user(target, pos));
826 return 0;
827 }
828
s390_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)829 static int s390_regs_set(struct task_struct *target,
830 const struct user_regset *regset,
831 unsigned int pos, unsigned int count,
832 const void *kbuf, const void __user *ubuf)
833 {
834 int rc = 0;
835
836 if (target == current)
837 save_access_regs(target->thread.acrs);
838
839 if (kbuf) {
840 const unsigned long *k = kbuf;
841 while (count > 0 && !rc) {
842 rc = __poke_user(target, pos, *k++);
843 count -= sizeof(*k);
844 pos += sizeof(*k);
845 }
846 } else {
847 const unsigned long __user *u = ubuf;
848 while (count > 0 && !rc) {
849 unsigned long word;
850 rc = __get_user(word, u++);
851 if (rc)
852 break;
853 rc = __poke_user(target, pos, word);
854 count -= sizeof(*u);
855 pos += sizeof(*u);
856 }
857 }
858
859 if (rc == 0 && target == current)
860 restore_access_regs(target->thread.acrs);
861
862 return rc;
863 }
864
s390_fpregs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)865 static int s390_fpregs_get(struct task_struct *target,
866 const struct user_regset *regset,
867 struct membuf to)
868 {
869 _s390_fp_regs fp_regs;
870
871 if (target == current)
872 save_user_fpu_regs();
873
874 fp_regs.fpc = target->thread.ufpu.fpc;
875 fpregs_store(&fp_regs, &target->thread.ufpu);
876
877 return membuf_write(&to, &fp_regs, sizeof(fp_regs));
878 }
879
s390_fpregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)880 static int s390_fpregs_set(struct task_struct *target,
881 const struct user_regset *regset, unsigned int pos,
882 unsigned int count, const void *kbuf,
883 const void __user *ubuf)
884 {
885 int rc = 0;
886 freg_t fprs[__NUM_FPRS];
887
888 if (target == current)
889 save_user_fpu_regs();
890 convert_vx_to_fp(fprs, target->thread.ufpu.vxrs);
891 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
892 u32 ufpc[2] = { target->thread.ufpu.fpc, 0 };
893 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
894 0, offsetof(s390_fp_regs, fprs));
895 if (rc)
896 return rc;
897 if (ufpc[1] != 0)
898 return -EINVAL;
899 target->thread.ufpu.fpc = ufpc[0];
900 }
901
902 if (rc == 0 && count > 0)
903 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
904 fprs, offsetof(s390_fp_regs, fprs), -1);
905 if (rc)
906 return rc;
907 convert_fp_to_vx(target->thread.ufpu.vxrs, fprs);
908 return rc;
909 }
910
s390_last_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)911 static int s390_last_break_get(struct task_struct *target,
912 const struct user_regset *regset,
913 struct membuf to)
914 {
915 return membuf_store(&to, target->thread.last_break);
916 }
917
s390_last_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)918 static int s390_last_break_set(struct task_struct *target,
919 const struct user_regset *regset,
920 unsigned int pos, unsigned int count,
921 const void *kbuf, const void __user *ubuf)
922 {
923 return 0;
924 }
925
s390_tdb_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)926 static int s390_tdb_get(struct task_struct *target,
927 const struct user_regset *regset,
928 struct membuf to)
929 {
930 struct pt_regs *regs = task_pt_regs(target);
931 size_t size;
932
933 if (!(regs->int_code & 0x200))
934 return -ENODATA;
935 size = sizeof(target->thread.trap_tdb.data);
936 return membuf_write(&to, target->thread.trap_tdb.data, size);
937 }
938
s390_tdb_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)939 static int s390_tdb_set(struct task_struct *target,
940 const struct user_regset *regset,
941 unsigned int pos, unsigned int count,
942 const void *kbuf, const void __user *ubuf)
943 {
944 return 0;
945 }
946
s390_vxrs_low_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)947 static int s390_vxrs_low_get(struct task_struct *target,
948 const struct user_regset *regset,
949 struct membuf to)
950 {
951 __u64 vxrs[__NUM_VXRS_LOW];
952 int i;
953
954 if (!cpu_has_vx())
955 return -ENODEV;
956 if (target == current)
957 save_user_fpu_regs();
958 for (i = 0; i < __NUM_VXRS_LOW; i++)
959 vxrs[i] = target->thread.ufpu.vxrs[i].low;
960 return membuf_write(&to, vxrs, sizeof(vxrs));
961 }
962
s390_vxrs_low_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)963 static int s390_vxrs_low_set(struct task_struct *target,
964 const struct user_regset *regset,
965 unsigned int pos, unsigned int count,
966 const void *kbuf, const void __user *ubuf)
967 {
968 __u64 vxrs[__NUM_VXRS_LOW];
969 int i, rc;
970
971 if (!cpu_has_vx())
972 return -ENODEV;
973 if (target == current)
974 save_user_fpu_regs();
975
976 for (i = 0; i < __NUM_VXRS_LOW; i++)
977 vxrs[i] = target->thread.ufpu.vxrs[i].low;
978
979 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
980 if (rc == 0)
981 for (i = 0; i < __NUM_VXRS_LOW; i++)
982 target->thread.ufpu.vxrs[i].low = vxrs[i];
983
984 return rc;
985 }
986
s390_vxrs_high_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)987 static int s390_vxrs_high_get(struct task_struct *target,
988 const struct user_regset *regset,
989 struct membuf to)
990 {
991 if (!cpu_has_vx())
992 return -ENODEV;
993 if (target == current)
994 save_user_fpu_regs();
995 return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW,
996 __NUM_VXRS_HIGH * sizeof(__vector128));
997 }
998
s390_vxrs_high_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)999 static int s390_vxrs_high_set(struct task_struct *target,
1000 const struct user_regset *regset,
1001 unsigned int pos, unsigned int count,
1002 const void *kbuf, const void __user *ubuf)
1003 {
1004 int rc;
1005
1006 if (!cpu_has_vx())
1007 return -ENODEV;
1008 if (target == current)
1009 save_user_fpu_regs();
1010
1011 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1012 target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1013 return rc;
1014 }
1015
s390_system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1016 static int s390_system_call_get(struct task_struct *target,
1017 const struct user_regset *regset,
1018 struct membuf to)
1019 {
1020 return membuf_store(&to, target->thread.system_call);
1021 }
1022
s390_system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1023 static int s390_system_call_set(struct task_struct *target,
1024 const struct user_regset *regset,
1025 unsigned int pos, unsigned int count,
1026 const void *kbuf, const void __user *ubuf)
1027 {
1028 unsigned int *data = &target->thread.system_call;
1029 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1030 data, 0, sizeof(unsigned int));
1031 }
1032
s390_gs_cb_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1033 static int s390_gs_cb_get(struct task_struct *target,
1034 const struct user_regset *regset,
1035 struct membuf to)
1036 {
1037 struct gs_cb *data = target->thread.gs_cb;
1038
1039 if (!cpu_has_gs())
1040 return -ENODEV;
1041 if (!data)
1042 return -ENODATA;
1043 if (target == current)
1044 save_gs_cb(data);
1045 return membuf_write(&to, data, sizeof(struct gs_cb));
1046 }
1047
s390_gs_cb_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1048 static int s390_gs_cb_set(struct task_struct *target,
1049 const struct user_regset *regset,
1050 unsigned int pos, unsigned int count,
1051 const void *kbuf, const void __user *ubuf)
1052 {
1053 struct gs_cb gs_cb = { }, *data = NULL;
1054 int rc;
1055
1056 if (!cpu_has_gs())
1057 return -ENODEV;
1058 if (!target->thread.gs_cb) {
1059 data = kzalloc(sizeof(*data), GFP_KERNEL);
1060 if (!data)
1061 return -ENOMEM;
1062 }
1063 if (!target->thread.gs_cb)
1064 gs_cb.gsd = 25;
1065 else if (target == current)
1066 save_gs_cb(&gs_cb);
1067 else
1068 gs_cb = *target->thread.gs_cb;
1069 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1070 &gs_cb, 0, sizeof(gs_cb));
1071 if (rc) {
1072 kfree(data);
1073 return -EFAULT;
1074 }
1075 preempt_disable();
1076 if (!target->thread.gs_cb)
1077 target->thread.gs_cb = data;
1078 *target->thread.gs_cb = gs_cb;
1079 if (target == current) {
1080 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
1081 restore_gs_cb(target->thread.gs_cb);
1082 }
1083 preempt_enable();
1084 return rc;
1085 }
1086
s390_gs_bc_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1087 static int s390_gs_bc_get(struct task_struct *target,
1088 const struct user_regset *regset,
1089 struct membuf to)
1090 {
1091 struct gs_cb *data = target->thread.gs_bc_cb;
1092
1093 if (!cpu_has_gs())
1094 return -ENODEV;
1095 if (!data)
1096 return -ENODATA;
1097 return membuf_write(&to, data, sizeof(struct gs_cb));
1098 }
1099
s390_gs_bc_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1100 static int s390_gs_bc_set(struct task_struct *target,
1101 const struct user_regset *regset,
1102 unsigned int pos, unsigned int count,
1103 const void *kbuf, const void __user *ubuf)
1104 {
1105 struct gs_cb *data = target->thread.gs_bc_cb;
1106
1107 if (!cpu_has_gs())
1108 return -ENODEV;
1109 if (!data) {
1110 data = kzalloc(sizeof(*data), GFP_KERNEL);
1111 if (!data)
1112 return -ENOMEM;
1113 target->thread.gs_bc_cb = data;
1114 }
1115 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1116 data, 0, sizeof(struct gs_cb));
1117 }
1118
is_ri_cb_valid(struct runtime_instr_cb * cb)1119 static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1120 {
1121 return (cb->rca & 0x1f) == 0 &&
1122 (cb->roa & 0xfff) == 0 &&
1123 (cb->rla & 0xfff) == 0xfff &&
1124 cb->s == 1 &&
1125 cb->k == 1 &&
1126 cb->h == 0 &&
1127 cb->reserved1 == 0 &&
1128 cb->ps == 1 &&
1129 cb->qs == 0 &&
1130 cb->pc == 1 &&
1131 cb->qc == 0 &&
1132 cb->reserved2 == 0 &&
1133 cb->reserved3 == 0 &&
1134 cb->reserved4 == 0 &&
1135 cb->reserved5 == 0 &&
1136 cb->reserved6 == 0 &&
1137 cb->reserved7 == 0 &&
1138 cb->reserved8 == 0 &&
1139 cb->rla >= cb->roa &&
1140 cb->rca >= cb->roa &&
1141 cb->rca <= cb->rla+1 &&
1142 cb->m < 3;
1143 }
1144
s390_runtime_instr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1145 static int s390_runtime_instr_get(struct task_struct *target,
1146 const struct user_regset *regset,
1147 struct membuf to)
1148 {
1149 struct runtime_instr_cb *data = target->thread.ri_cb;
1150
1151 if (!test_facility(64))
1152 return -ENODEV;
1153 if (!data)
1154 return -ENODATA;
1155
1156 return membuf_write(&to, data, sizeof(struct runtime_instr_cb));
1157 }
1158
s390_runtime_instr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1159 static int s390_runtime_instr_set(struct task_struct *target,
1160 const struct user_regset *regset,
1161 unsigned int pos, unsigned int count,
1162 const void *kbuf, const void __user *ubuf)
1163 {
1164 struct runtime_instr_cb ri_cb = { }, *data = NULL;
1165 int rc;
1166
1167 if (!test_facility(64))
1168 return -ENODEV;
1169
1170 if (!target->thread.ri_cb) {
1171 data = kzalloc(sizeof(*data), GFP_KERNEL);
1172 if (!data)
1173 return -ENOMEM;
1174 }
1175
1176 if (target->thread.ri_cb) {
1177 if (target == current)
1178 store_runtime_instr_cb(&ri_cb);
1179 else
1180 ri_cb = *target->thread.ri_cb;
1181 }
1182
1183 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1184 &ri_cb, 0, sizeof(struct runtime_instr_cb));
1185 if (rc) {
1186 kfree(data);
1187 return -EFAULT;
1188 }
1189
1190 if (!is_ri_cb_valid(&ri_cb)) {
1191 kfree(data);
1192 return -EINVAL;
1193 }
1194 /*
1195 * Override access key in any case, since user space should
1196 * not be able to set it, nor should it care about it.
1197 */
1198 ri_cb.key = PAGE_DEFAULT_KEY >> 4;
1199 preempt_disable();
1200 if (!target->thread.ri_cb)
1201 target->thread.ri_cb = data;
1202 *target->thread.ri_cb = ri_cb;
1203 if (target == current)
1204 load_runtime_instr_cb(target->thread.ri_cb);
1205 preempt_enable();
1206
1207 return 0;
1208 }
1209
1210 static const struct user_regset s390_regsets[] = {
1211 {
1212 .core_note_type = NT_PRSTATUS,
1213 .n = sizeof(s390_regs) / sizeof(long),
1214 .size = sizeof(long),
1215 .align = sizeof(long),
1216 .regset_get = s390_regs_get,
1217 .set = s390_regs_set,
1218 },
1219 {
1220 .core_note_type = NT_PRFPREG,
1221 .n = sizeof(s390_fp_regs) / sizeof(long),
1222 .size = sizeof(long),
1223 .align = sizeof(long),
1224 .regset_get = s390_fpregs_get,
1225 .set = s390_fpregs_set,
1226 },
1227 {
1228 .core_note_type = NT_S390_SYSTEM_CALL,
1229 .n = 1,
1230 .size = sizeof(unsigned int),
1231 .align = sizeof(unsigned int),
1232 .regset_get = s390_system_call_get,
1233 .set = s390_system_call_set,
1234 },
1235 {
1236 .core_note_type = NT_S390_LAST_BREAK,
1237 .n = 1,
1238 .size = sizeof(long),
1239 .align = sizeof(long),
1240 .regset_get = s390_last_break_get,
1241 .set = s390_last_break_set,
1242 },
1243 {
1244 .core_note_type = NT_S390_TDB,
1245 .n = 1,
1246 .size = 256,
1247 .align = 1,
1248 .regset_get = s390_tdb_get,
1249 .set = s390_tdb_set,
1250 },
1251 {
1252 .core_note_type = NT_S390_VXRS_LOW,
1253 .n = __NUM_VXRS_LOW,
1254 .size = sizeof(__u64),
1255 .align = sizeof(__u64),
1256 .regset_get = s390_vxrs_low_get,
1257 .set = s390_vxrs_low_set,
1258 },
1259 {
1260 .core_note_type = NT_S390_VXRS_HIGH,
1261 .n = __NUM_VXRS_HIGH,
1262 .size = sizeof(__vector128),
1263 .align = sizeof(__vector128),
1264 .regset_get = s390_vxrs_high_get,
1265 .set = s390_vxrs_high_set,
1266 },
1267 {
1268 .core_note_type = NT_S390_GS_CB,
1269 .n = sizeof(struct gs_cb) / sizeof(__u64),
1270 .size = sizeof(__u64),
1271 .align = sizeof(__u64),
1272 .regset_get = s390_gs_cb_get,
1273 .set = s390_gs_cb_set,
1274 },
1275 {
1276 .core_note_type = NT_S390_GS_BC,
1277 .n = sizeof(struct gs_cb) / sizeof(__u64),
1278 .size = sizeof(__u64),
1279 .align = sizeof(__u64),
1280 .regset_get = s390_gs_bc_get,
1281 .set = s390_gs_bc_set,
1282 },
1283 {
1284 .core_note_type = NT_S390_RI_CB,
1285 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1286 .size = sizeof(__u64),
1287 .align = sizeof(__u64),
1288 .regset_get = s390_runtime_instr_get,
1289 .set = s390_runtime_instr_set,
1290 },
1291 };
1292
1293 static const struct user_regset_view user_s390_view = {
1294 .name = "s390x",
1295 .e_machine = EM_S390,
1296 .regsets = s390_regsets,
1297 .n = ARRAY_SIZE(s390_regsets)
1298 };
1299
1300 #ifdef CONFIG_COMPAT
s390_compat_regs_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1301 static int s390_compat_regs_get(struct task_struct *target,
1302 const struct user_regset *regset,
1303 struct membuf to)
1304 {
1305 unsigned n;
1306
1307 if (target == current)
1308 save_access_regs(target->thread.acrs);
1309
1310 for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t))
1311 membuf_store(&to, __peek_user_compat(target, n));
1312 return 0;
1313 }
1314
s390_compat_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1315 static int s390_compat_regs_set(struct task_struct *target,
1316 const struct user_regset *regset,
1317 unsigned int pos, unsigned int count,
1318 const void *kbuf, const void __user *ubuf)
1319 {
1320 int rc = 0;
1321
1322 if (target == current)
1323 save_access_regs(target->thread.acrs);
1324
1325 if (kbuf) {
1326 const compat_ulong_t *k = kbuf;
1327 while (count > 0 && !rc) {
1328 rc = __poke_user_compat(target, pos, *k++);
1329 count -= sizeof(*k);
1330 pos += sizeof(*k);
1331 }
1332 } else {
1333 const compat_ulong_t __user *u = ubuf;
1334 while (count > 0 && !rc) {
1335 compat_ulong_t word;
1336 rc = __get_user(word, u++);
1337 if (rc)
1338 break;
1339 rc = __poke_user_compat(target, pos, word);
1340 count -= sizeof(*u);
1341 pos += sizeof(*u);
1342 }
1343 }
1344
1345 if (rc == 0 && target == current)
1346 restore_access_regs(target->thread.acrs);
1347
1348 return rc;
1349 }
1350
s390_compat_regs_high_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1351 static int s390_compat_regs_high_get(struct task_struct *target,
1352 const struct user_regset *regset,
1353 struct membuf to)
1354 {
1355 compat_ulong_t *gprs_high;
1356 int i;
1357
1358 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs;
1359 for (i = 0; i < NUM_GPRS; i++, gprs_high += 2)
1360 membuf_store(&to, *gprs_high);
1361 return 0;
1362 }
1363
s390_compat_regs_high_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1364 static int s390_compat_regs_high_set(struct task_struct *target,
1365 const struct user_regset *regset,
1366 unsigned int pos, unsigned int count,
1367 const void *kbuf, const void __user *ubuf)
1368 {
1369 compat_ulong_t *gprs_high;
1370 int rc = 0;
1371
1372 gprs_high = (compat_ulong_t *)
1373 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1374 if (kbuf) {
1375 const compat_ulong_t *k = kbuf;
1376 while (count > 0) {
1377 *gprs_high = *k++;
1378 *gprs_high += 2;
1379 count -= sizeof(*k);
1380 }
1381 } else {
1382 const compat_ulong_t __user *u = ubuf;
1383 while (count > 0 && !rc) {
1384 unsigned long word;
1385 rc = __get_user(word, u++);
1386 if (rc)
1387 break;
1388 *gprs_high = word;
1389 *gprs_high += 2;
1390 count -= sizeof(*u);
1391 }
1392 }
1393
1394 return rc;
1395 }
1396
s390_compat_last_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1397 static int s390_compat_last_break_get(struct task_struct *target,
1398 const struct user_regset *regset,
1399 struct membuf to)
1400 {
1401 compat_ulong_t last_break = target->thread.last_break;
1402
1403 return membuf_store(&to, (unsigned long)last_break);
1404 }
1405
s390_compat_last_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1406 static int s390_compat_last_break_set(struct task_struct *target,
1407 const struct user_regset *regset,
1408 unsigned int pos, unsigned int count,
1409 const void *kbuf, const void __user *ubuf)
1410 {
1411 return 0;
1412 }
1413
1414 static const struct user_regset s390_compat_regsets[] = {
1415 {
1416 .core_note_type = NT_PRSTATUS,
1417 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1418 .size = sizeof(compat_long_t),
1419 .align = sizeof(compat_long_t),
1420 .regset_get = s390_compat_regs_get,
1421 .set = s390_compat_regs_set,
1422 },
1423 {
1424 .core_note_type = NT_PRFPREG,
1425 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1426 .size = sizeof(compat_long_t),
1427 .align = sizeof(compat_long_t),
1428 .regset_get = s390_fpregs_get,
1429 .set = s390_fpregs_set,
1430 },
1431 {
1432 .core_note_type = NT_S390_SYSTEM_CALL,
1433 .n = 1,
1434 .size = sizeof(compat_uint_t),
1435 .align = sizeof(compat_uint_t),
1436 .regset_get = s390_system_call_get,
1437 .set = s390_system_call_set,
1438 },
1439 {
1440 .core_note_type = NT_S390_LAST_BREAK,
1441 .n = 1,
1442 .size = sizeof(long),
1443 .align = sizeof(long),
1444 .regset_get = s390_compat_last_break_get,
1445 .set = s390_compat_last_break_set,
1446 },
1447 {
1448 .core_note_type = NT_S390_TDB,
1449 .n = 1,
1450 .size = 256,
1451 .align = 1,
1452 .regset_get = s390_tdb_get,
1453 .set = s390_tdb_set,
1454 },
1455 {
1456 .core_note_type = NT_S390_VXRS_LOW,
1457 .n = __NUM_VXRS_LOW,
1458 .size = sizeof(__u64),
1459 .align = sizeof(__u64),
1460 .regset_get = s390_vxrs_low_get,
1461 .set = s390_vxrs_low_set,
1462 },
1463 {
1464 .core_note_type = NT_S390_VXRS_HIGH,
1465 .n = __NUM_VXRS_HIGH,
1466 .size = sizeof(__vector128),
1467 .align = sizeof(__vector128),
1468 .regset_get = s390_vxrs_high_get,
1469 .set = s390_vxrs_high_set,
1470 },
1471 {
1472 .core_note_type = NT_S390_HIGH_GPRS,
1473 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1474 .size = sizeof(compat_long_t),
1475 .align = sizeof(compat_long_t),
1476 .regset_get = s390_compat_regs_high_get,
1477 .set = s390_compat_regs_high_set,
1478 },
1479 {
1480 .core_note_type = NT_S390_GS_CB,
1481 .n = sizeof(struct gs_cb) / sizeof(__u64),
1482 .size = sizeof(__u64),
1483 .align = sizeof(__u64),
1484 .regset_get = s390_gs_cb_get,
1485 .set = s390_gs_cb_set,
1486 },
1487 {
1488 .core_note_type = NT_S390_GS_BC,
1489 .n = sizeof(struct gs_cb) / sizeof(__u64),
1490 .size = sizeof(__u64),
1491 .align = sizeof(__u64),
1492 .regset_get = s390_gs_bc_get,
1493 .set = s390_gs_bc_set,
1494 },
1495 {
1496 .core_note_type = NT_S390_RI_CB,
1497 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1498 .size = sizeof(__u64),
1499 .align = sizeof(__u64),
1500 .regset_get = s390_runtime_instr_get,
1501 .set = s390_runtime_instr_set,
1502 },
1503 };
1504
1505 static const struct user_regset_view user_s390_compat_view = {
1506 .name = "s390",
1507 .e_machine = EM_S390,
1508 .regsets = s390_compat_regsets,
1509 .n = ARRAY_SIZE(s390_compat_regsets)
1510 };
1511 #endif
1512
task_user_regset_view(struct task_struct * task)1513 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1514 {
1515 #ifdef CONFIG_COMPAT
1516 if (test_tsk_thread_flag(task, TIF_31BIT))
1517 return &user_s390_compat_view;
1518 #endif
1519 return &user_s390_view;
1520 }
1521
1522 static const char *gpr_names[NUM_GPRS] = {
1523 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1524 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1525 };
1526
regs_get_register(struct pt_regs * regs,unsigned int offset)1527 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1528 {
1529 if (offset >= NUM_GPRS)
1530 return 0;
1531 return regs->gprs[offset];
1532 }
1533
regs_query_register_offset(const char * name)1534 int regs_query_register_offset(const char *name)
1535 {
1536 unsigned long offset;
1537
1538 if (!name || *name != 'r')
1539 return -EINVAL;
1540 if (kstrtoul(name + 1, 10, &offset))
1541 return -EINVAL;
1542 if (offset >= NUM_GPRS)
1543 return -EINVAL;
1544 return offset;
1545 }
1546
regs_query_register_name(unsigned int offset)1547 const char *regs_query_register_name(unsigned int offset)
1548 {
1549 if (offset >= NUM_GPRS)
1550 return NULL;
1551 return gpr_names[offset];
1552 }
1553
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)1554 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1555 {
1556 unsigned long ksp = kernel_stack_pointer(regs);
1557
1558 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1559 }
1560
1561 /**
1562 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1563 * @regs:pt_regs which contains kernel stack pointer.
1564 * @n:stack entry number.
1565 *
1566 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1567 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1568 * this returns 0.
1569 */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)1570 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1571 {
1572 unsigned long addr;
1573
1574 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1575 if (!regs_within_kernel_stack(regs, addr))
1576 return 0;
1577 return READ_ONCE_NOCHECK(addr);
1578 }
1579