1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 Ross Biro 7 * Copyright (C) Linus Torvalds 8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 9 * Copyright (C) 1996 David S. Miller 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 1999 MIPS Technologies, Inc. 12 * Copyright (C) 2000 Ulf Carlsson 13 * 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit 15 * binaries. 16 */ 17 #include <linux/compiler.h> 18 #include <linux/context_tracking.h> 19 #include <linux/elf.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/mm.h> 24 #include <linux/errno.h> 25 #include <linux/ptrace.h> 26 #include <linux/regset.h> 27 #include <linux/smp.h> 28 #include <linux/security.h> 29 #include <linux/stddef.h> 30 #include <linux/audit.h> 31 #include <linux/seccomp.h> 32 #include <linux/ftrace.h> 33 34 #include <asm/branch.h> 35 #include <asm/byteorder.h> 36 #include <asm/cpu.h> 37 #include <asm/cpu-info.h> 38 #include <asm/dsp.h> 39 #include <asm/fpu.h> 40 #include <asm/mipsregs.h> 41 #include <asm/mipsmtregs.h> 42 #include <asm/page.h> 43 #include <asm/processor.h> 44 #include <asm/syscall.h> 45 #include <linux/uaccess.h> 46 #include <asm/bootinfo.h> 47 #include <asm/reg.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/syscalls.h> 51 52 unsigned long exception_ip(struct pt_regs *regs) 53 { 54 return exception_epc(regs); 55 } 56 EXPORT_SYMBOL(exception_ip); 57 58 /* 59 * Called by kernel/ptrace.c when detaching.. 60 * 61 * Make sure single step bits etc are not set. 62 */ 63 void ptrace_disable(struct task_struct *child) 64 { 65 /* Don't load the watchpoint registers for the ex-child. */ 66 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 67 } 68 69 /* 70 * Read a general register set. We always use the 64-bit format, even 71 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. 72 * Registers are sign extended to fill the available space. 73 */ 74 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) 75 { 76 struct pt_regs *regs; 77 int i; 78 79 if (!access_ok(data, 38 * 8)) 80 return -EIO; 81 82 regs = task_pt_regs(child); 83 84 for (i = 0; i < 32; i++) 85 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); 86 __put_user((long)regs->lo, (__s64 __user *)&data->lo); 87 __put_user((long)regs->hi, (__s64 __user *)&data->hi); 88 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 89 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); 90 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); 91 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); 92 93 return 0; 94 } 95 96 /* 97 * Write a general register set. As for PTRACE_GETREGS, we always use 98 * the 64-bit format. On a 32-bit kernel only the lower order half 99 * (according to endianness) will be used. 100 */ 101 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) 102 { 103 struct pt_regs *regs; 104 int i; 105 106 if (!access_ok(data, 38 * 8)) 107 return -EIO; 108 109 regs = task_pt_regs(child); 110 111 for (i = 0; i < 32; i++) 112 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); 113 __get_user(regs->lo, (__s64 __user *)&data->lo); 114 __get_user(regs->hi, (__s64 __user *)&data->hi); 115 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); 116 117 /* badvaddr, status, and cause may not be written. */ 118 119 /* System call number may have been changed */ 120 mips_syscall_update_nr(child, regs); 121 122 return 0; 123 } 124 125 int ptrace_get_watch_regs(struct task_struct *child, 126 struct pt_watch_regs __user *addr) 127 { 128 enum pt_watch_style style; 129 int i; 130 131 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 132 return -EIO; 133 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 134 return -EIO; 135 136 #ifdef CONFIG_32BIT 137 style = pt_watch_style_mips32; 138 #define WATCH_STYLE mips32 139 #else 140 style = pt_watch_style_mips64; 141 #define WATCH_STYLE mips64 142 #endif 143 144 __put_user(style, &addr->style); 145 __put_user(boot_cpu_data.watch_reg_use_cnt, 146 &addr->WATCH_STYLE.num_valid); 147 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 148 __put_user(child->thread.watch.mips3264.watchlo[i], 149 &addr->WATCH_STYLE.watchlo[i]); 150 __put_user(child->thread.watch.mips3264.watchhi[i] & 151 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), 152 &addr->WATCH_STYLE.watchhi[i]); 153 __put_user(boot_cpu_data.watch_reg_masks[i], 154 &addr->WATCH_STYLE.watch_masks[i]); 155 } 156 for (; i < 8; i++) { 157 __put_user(0, &addr->WATCH_STYLE.watchlo[i]); 158 __put_user(0, &addr->WATCH_STYLE.watchhi[i]); 159 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); 160 } 161 162 return 0; 163 } 164 165 int ptrace_set_watch_regs(struct task_struct *child, 166 struct pt_watch_regs __user *addr) 167 { 168 int i; 169 int watch_active = 0; 170 unsigned long lt[NUM_WATCH_REGS]; 171 u16 ht[NUM_WATCH_REGS]; 172 173 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) 174 return -EIO; 175 if (!access_ok(addr, sizeof(struct pt_watch_regs))) 176 return -EIO; 177 /* Check the values. */ 178 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 179 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 180 #ifdef CONFIG_32BIT 181 if (lt[i] & __UA_LIMIT) 182 return -EINVAL; 183 #else 184 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { 185 if (lt[i] & 0xffffffff80000000UL) 186 return -EINVAL; 187 } else { 188 if (lt[i] & __UA_LIMIT) 189 return -EINVAL; 190 } 191 #endif 192 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); 193 if (ht[i] & ~MIPS_WATCHHI_MASK) 194 return -EINVAL; 195 } 196 /* Install them. */ 197 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { 198 if (lt[i] & MIPS_WATCHLO_IRW) 199 watch_active = 1; 200 child->thread.watch.mips3264.watchlo[i] = lt[i]; 201 /* Set the G bit. */ 202 child->thread.watch.mips3264.watchhi[i] = ht[i]; 203 } 204 205 if (watch_active) 206 set_tsk_thread_flag(child, TIF_LOAD_WATCH); 207 else 208 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 209 210 return 0; 211 } 212 213 /* regset get/set implementations */ 214 215 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 216 217 static int gpr32_get(struct task_struct *target, 218 const struct user_regset *regset, 219 struct membuf to) 220 { 221 struct pt_regs *regs = task_pt_regs(target); 222 u32 uregs[ELF_NGREG] = {}; 223 224 mips_dump_regs32(uregs, regs); 225 return membuf_write(&to, uregs, sizeof(uregs)); 226 } 227 228 static int gpr32_set(struct task_struct *target, 229 const struct user_regset *regset, 230 unsigned int pos, unsigned int count, 231 const void *kbuf, const void __user *ubuf) 232 { 233 struct pt_regs *regs = task_pt_regs(target); 234 u32 uregs[ELF_NGREG]; 235 unsigned start, num_regs, i; 236 int err; 237 238 start = pos / sizeof(u32); 239 num_regs = count / sizeof(u32); 240 241 if (start + num_regs > ELF_NGREG) 242 return -EIO; 243 244 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 245 sizeof(uregs)); 246 if (err) 247 return err; 248 249 for (i = start; i < num_regs; i++) { 250 /* 251 * Cast all values to signed here so that if this is a 64-bit 252 * kernel, the supplied 32-bit values will be sign extended. 253 */ 254 switch (i) { 255 case MIPS32_EF_R1 ... MIPS32_EF_R25: 256 /* k0/k1 are ignored. */ 257 case MIPS32_EF_R28 ... MIPS32_EF_R31: 258 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; 259 break; 260 case MIPS32_EF_LO: 261 regs->lo = (s32)uregs[i]; 262 break; 263 case MIPS32_EF_HI: 264 regs->hi = (s32)uregs[i]; 265 break; 266 case MIPS32_EF_CP0_EPC: 267 regs->cp0_epc = (s32)uregs[i]; 268 break; 269 } 270 } 271 272 /* System call number may have been changed */ 273 mips_syscall_update_nr(target, regs); 274 275 return 0; 276 } 277 278 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 279 280 #ifdef CONFIG_64BIT 281 282 static int gpr64_get(struct task_struct *target, 283 const struct user_regset *regset, 284 struct membuf to) 285 { 286 struct pt_regs *regs = task_pt_regs(target); 287 u64 uregs[ELF_NGREG] = {}; 288 289 mips_dump_regs64(uregs, regs); 290 return membuf_write(&to, uregs, sizeof(uregs)); 291 } 292 293 static int gpr64_set(struct task_struct *target, 294 const struct user_regset *regset, 295 unsigned int pos, unsigned int count, 296 const void *kbuf, const void __user *ubuf) 297 { 298 struct pt_regs *regs = task_pt_regs(target); 299 u64 uregs[ELF_NGREG]; 300 unsigned start, num_regs, i; 301 int err; 302 303 start = pos / sizeof(u64); 304 num_regs = count / sizeof(u64); 305 306 if (start + num_regs > ELF_NGREG) 307 return -EIO; 308 309 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 310 sizeof(uregs)); 311 if (err) 312 return err; 313 314 for (i = start; i < num_regs; i++) { 315 switch (i) { 316 case MIPS64_EF_R1 ... MIPS64_EF_R25: 317 /* k0/k1 are ignored. */ 318 case MIPS64_EF_R28 ... MIPS64_EF_R31: 319 regs->regs[i - MIPS64_EF_R0] = uregs[i]; 320 break; 321 case MIPS64_EF_LO: 322 regs->lo = uregs[i]; 323 break; 324 case MIPS64_EF_HI: 325 regs->hi = uregs[i]; 326 break; 327 case MIPS64_EF_CP0_EPC: 328 regs->cp0_epc = uregs[i]; 329 break; 330 } 331 } 332 333 /* System call number may have been changed */ 334 mips_syscall_update_nr(target, regs); 335 336 return 0; 337 } 338 339 #endif /* CONFIG_64BIT */ 340 341 342 #ifdef CONFIG_MIPS_FP_SUPPORT 343 344 /* 345 * Poke at FCSR according to its mask. Set the Cause bits even 346 * if a corresponding Enable bit is set. This will be noticed at 347 * the time the thread is switched to and SIGFPE thrown accordingly. 348 */ 349 static void ptrace_setfcr31(struct task_struct *child, u32 value) 350 { 351 u32 fcr31; 352 u32 mask; 353 354 fcr31 = child->thread.fpu.fcr31; 355 mask = boot_cpu_data.fpu_msk31; 356 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); 357 } 358 359 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 360 { 361 int i; 362 363 if (!access_ok(data, 33 * 8)) 364 return -EIO; 365 366 if (tsk_used_math(child)) { 367 union fpureg *fregs = get_fpu_regs(child); 368 for (i = 0; i < 32; i++) 369 __put_user(get_fpr64(&fregs[i], 0), 370 i + (__u64 __user *)data); 371 } else { 372 for (i = 0; i < 32; i++) 373 __put_user((__u64) -1, i + (__u64 __user *) data); 374 } 375 376 __put_user(child->thread.fpu.fcr31, data + 64); 377 __put_user(boot_cpu_data.fpu_id, data + 65); 378 379 return 0; 380 } 381 382 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 383 { 384 union fpureg *fregs; 385 u64 fpr_val; 386 u32 value; 387 int i; 388 389 if (!access_ok(data, 33 * 8)) 390 return -EIO; 391 392 init_fp_ctx(child); 393 fregs = get_fpu_regs(child); 394 395 for (i = 0; i < 32; i++) { 396 __get_user(fpr_val, i + (__u64 __user *)data); 397 set_fpr64(&fregs[i], 0, fpr_val); 398 } 399 400 __get_user(value, data + 64); 401 ptrace_setfcr31(child, value); 402 403 /* FIR may not be written. */ 404 405 return 0; 406 } 407 408 /* 409 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 410 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots 411 * correspond 1:1 to buffer slots. Only general registers are copied. 412 */ 413 static void fpr_get_fpa(struct task_struct *target, 414 struct membuf *to) 415 { 416 membuf_write(to, &target->thread.fpu, 417 NUM_FPU_REGS * sizeof(elf_fpreg_t)); 418 } 419 420 /* 421 * Copy the floating-point context to the supplied NT_PRFPREG buffer, 422 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's 423 * general register slots are copied to buffer slots. Only general 424 * registers are copied. 425 */ 426 static void fpr_get_msa(struct task_struct *target, struct membuf *to) 427 { 428 unsigned int i; 429 430 BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t)); 431 for (i = 0; i < NUM_FPU_REGS; i++) 432 membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0)); 433 } 434 435 /* 436 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 437 * Choose the appropriate helper for general registers, and then copy 438 * the FCSR and FIR registers separately. 439 */ 440 static int fpr_get(struct task_struct *target, 441 const struct user_regset *regset, 442 struct membuf to) 443 { 444 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 445 fpr_get_fpa(target, &to); 446 else 447 fpr_get_msa(target, &to); 448 449 membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32)); 450 membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32)); 451 return 0; 452 } 453 454 /* 455 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 456 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP 457 * context's general register slots. Only general registers are copied. 458 */ 459 static int fpr_set_fpa(struct task_struct *target, 460 unsigned int *pos, unsigned int *count, 461 const void **kbuf, const void __user **ubuf) 462 { 463 return user_regset_copyin(pos, count, kbuf, ubuf, 464 &target->thread.fpu, 465 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 466 } 467 468 /* 469 * Copy the supplied NT_PRFPREG buffer to the floating-point context, 470 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 471 * bits only of FP context's general register slots. Only general 472 * registers are copied. 473 */ 474 static int fpr_set_msa(struct task_struct *target, 475 unsigned int *pos, unsigned int *count, 476 const void **kbuf, const void __user **ubuf) 477 { 478 unsigned int i; 479 u64 fpr_val; 480 int err; 481 482 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 483 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 484 err = user_regset_copyin(pos, count, kbuf, ubuf, 485 &fpr_val, i * sizeof(elf_fpreg_t), 486 (i + 1) * sizeof(elf_fpreg_t)); 487 if (err) 488 return err; 489 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 490 } 491 492 return 0; 493 } 494 495 /* 496 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 497 * Choose the appropriate helper for general registers, and then copy 498 * the FCSR register separately. Ignore the incoming FIR register 499 * contents though, as the register is read-only. 500 * 501 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 502 * which is supposed to have been guaranteed by the kernel before 503 * calling us, e.g. in `ptrace_regset'. We enforce that requirement, 504 * so that we can safely avoid preinitializing temporaries for 505 * partial register writes. 506 */ 507 static int fpr_set(struct task_struct *target, 508 const struct user_regset *regset, 509 unsigned int pos, unsigned int count, 510 const void *kbuf, const void __user *ubuf) 511 { 512 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 513 const int fir_pos = fcr31_pos + sizeof(u32); 514 u32 fcr31; 515 int err; 516 517 BUG_ON(count % sizeof(elf_fpreg_t)); 518 519 if (pos + count > sizeof(elf_fpregset_t)) 520 return -EIO; 521 522 init_fp_ctx(target); 523 524 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 525 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); 526 else 527 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); 528 if (err) 529 return err; 530 531 if (count > 0) { 532 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 533 &fcr31, 534 fcr31_pos, fcr31_pos + sizeof(u32)); 535 if (err) 536 return err; 537 538 ptrace_setfcr31(target, fcr31); 539 } 540 541 if (count > 0) { 542 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 543 fir_pos, fir_pos + sizeof(u32)); 544 return 0; 545 } 546 547 return err; 548 } 549 550 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ 551 static int fp_mode_get(struct task_struct *target, 552 const struct user_regset *regset, 553 struct membuf to) 554 { 555 return membuf_store(&to, (int)mips_get_process_fp_mode(target)); 556 } 557 558 /* 559 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. 560 * 561 * We optimize for the case where `count % sizeof(int) == 0', which 562 * is supposed to have been guaranteed by the kernel before calling 563 * us, e.g. in `ptrace_regset'. We enforce that requirement, so 564 * that we can safely avoid preinitializing temporaries for partial 565 * mode writes. 566 */ 567 static int fp_mode_set(struct task_struct *target, 568 const struct user_regset *regset, 569 unsigned int pos, unsigned int count, 570 const void *kbuf, const void __user *ubuf) 571 { 572 int fp_mode; 573 int err; 574 575 BUG_ON(count % sizeof(int)); 576 577 if (pos + count > sizeof(fp_mode)) 578 return -EIO; 579 580 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, 581 sizeof(fp_mode)); 582 if (err) 583 return err; 584 585 if (count > 0) 586 err = mips_set_process_fp_mode(target, fp_mode); 587 588 return err; 589 } 590 591 #endif /* CONFIG_MIPS_FP_SUPPORT */ 592 593 #ifdef CONFIG_CPU_HAS_MSA 594 595 struct msa_control_regs { 596 unsigned int fir; 597 unsigned int fcsr; 598 unsigned int msair; 599 unsigned int msacsr; 600 }; 601 602 static void copy_pad_fprs(struct task_struct *target, 603 const struct user_regset *regset, 604 struct membuf *to, 605 unsigned int live_sz) 606 { 607 int i, j; 608 unsigned long long fill = ~0ull; 609 unsigned int cp_sz, pad_sz; 610 611 cp_sz = min(regset->size, live_sz); 612 pad_sz = regset->size - cp_sz; 613 WARN_ON(pad_sz % sizeof(fill)); 614 615 for (i = 0; i < NUM_FPU_REGS; i++) { 616 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); 617 for (j = 0; j < (pad_sz / sizeof(fill)); j++) 618 membuf_store(to, fill); 619 } 620 } 621 622 static int msa_get(struct task_struct *target, 623 const struct user_regset *regset, 624 struct membuf to) 625 { 626 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 627 const struct msa_control_regs ctrl_regs = { 628 .fir = boot_cpu_data.fpu_id, 629 .fcsr = target->thread.fpu.fcr31, 630 .msair = boot_cpu_data.msa_id, 631 .msacsr = target->thread.fpu.msacsr, 632 }; 633 634 if (!tsk_used_math(target)) { 635 /* The task hasn't used FP or MSA, fill with 0xff */ 636 copy_pad_fprs(target, regset, &to, 0); 637 } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { 638 /* Copy scalar FP context, fill the rest with 0xff */ 639 copy_pad_fprs(target, regset, &to, 8); 640 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 641 /* Trivially copy the vector registers */ 642 membuf_write(&to, &target->thread.fpu.fpr, wr_size); 643 } else { 644 /* Copy as much context as possible, fill the rest with 0xff */ 645 copy_pad_fprs(target, regset, &to, 646 sizeof(target->thread.fpu.fpr[0])); 647 } 648 649 return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs)); 650 } 651 652 static int msa_set(struct task_struct *target, 653 const struct user_regset *regset, 654 unsigned int pos, unsigned int count, 655 const void *kbuf, const void __user *ubuf) 656 { 657 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 658 struct msa_control_regs ctrl_regs; 659 unsigned int cp_sz; 660 int i, err, start; 661 662 init_fp_ctx(target); 663 664 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 665 /* Trivially copy the vector registers */ 666 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 667 &target->thread.fpu.fpr, 668 0, wr_size); 669 } else { 670 /* Copy as much context as possible */ 671 cp_sz = min_t(unsigned int, regset->size, 672 sizeof(target->thread.fpu.fpr[0])); 673 674 i = start = err = 0; 675 for (; i < NUM_FPU_REGS; i++, start += regset->size) { 676 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 677 &target->thread.fpu.fpr[i], 678 start, start + cp_sz); 679 } 680 } 681 682 if (!err) 683 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs, 684 wr_size, wr_size + sizeof(ctrl_regs)); 685 if (!err) { 686 target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X; 687 target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF; 688 } 689 690 return err; 691 } 692 693 #endif /* CONFIG_CPU_HAS_MSA */ 694 695 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 696 697 /* 698 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer. 699 */ 700 static int dsp32_get(struct task_struct *target, 701 const struct user_regset *regset, 702 struct membuf to) 703 { 704 u32 dspregs[NUM_DSP_REGS + 1]; 705 unsigned int i; 706 707 BUG_ON(to.left % sizeof(u32)); 708 709 if (!cpu_has_dsp) 710 return -EIO; 711 712 for (i = 0; i < NUM_DSP_REGS; i++) 713 dspregs[i] = target->thread.dsp.dspr[i]; 714 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 715 return membuf_write(&to, dspregs, sizeof(dspregs)); 716 } 717 718 /* 719 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context. 720 */ 721 static int dsp32_set(struct task_struct *target, 722 const struct user_regset *regset, 723 unsigned int pos, unsigned int count, 724 const void *kbuf, const void __user *ubuf) 725 { 726 unsigned int start, num_regs, i; 727 u32 dspregs[NUM_DSP_REGS + 1]; 728 int err; 729 730 BUG_ON(count % sizeof(u32)); 731 732 if (!cpu_has_dsp) 733 return -EIO; 734 735 start = pos / sizeof(u32); 736 num_regs = count / sizeof(u32); 737 738 if (start + num_regs > NUM_DSP_REGS + 1) 739 return -EIO; 740 741 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 742 sizeof(dspregs)); 743 if (err) 744 return err; 745 746 for (i = start; i < num_regs; i++) 747 switch (i) { 748 case 0 ... NUM_DSP_REGS - 1: 749 target->thread.dsp.dspr[i] = (s32)dspregs[i]; 750 break; 751 case NUM_DSP_REGS: 752 target->thread.dsp.dspcontrol = (s32)dspregs[i]; 753 break; 754 } 755 756 return 0; 757 } 758 759 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 760 761 #ifdef CONFIG_64BIT 762 763 /* 764 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer. 765 */ 766 static int dsp64_get(struct task_struct *target, 767 const struct user_regset *regset, 768 struct membuf to) 769 { 770 u64 dspregs[NUM_DSP_REGS + 1]; 771 unsigned int i; 772 773 BUG_ON(to.left % sizeof(u64)); 774 775 if (!cpu_has_dsp) 776 return -EIO; 777 778 for (i = 0; i < NUM_DSP_REGS; i++) 779 dspregs[i] = target->thread.dsp.dspr[i]; 780 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; 781 return membuf_write(&to, dspregs, sizeof(dspregs)); 782 } 783 784 /* 785 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context. 786 */ 787 static int dsp64_set(struct task_struct *target, 788 const struct user_regset *regset, 789 unsigned int pos, unsigned int count, 790 const void *kbuf, const void __user *ubuf) 791 { 792 unsigned int start, num_regs, i; 793 u64 dspregs[NUM_DSP_REGS + 1]; 794 int err; 795 796 BUG_ON(count % sizeof(u64)); 797 798 if (!cpu_has_dsp) 799 return -EIO; 800 801 start = pos / sizeof(u64); 802 num_regs = count / sizeof(u64); 803 804 if (start + num_regs > NUM_DSP_REGS + 1) 805 return -EIO; 806 807 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, 808 sizeof(dspregs)); 809 if (err) 810 return err; 811 812 for (i = start; i < num_regs; i++) 813 switch (i) { 814 case 0 ... NUM_DSP_REGS - 1: 815 target->thread.dsp.dspr[i] = dspregs[i]; 816 break; 817 case NUM_DSP_REGS: 818 target->thread.dsp.dspcontrol = dspregs[i]; 819 break; 820 } 821 822 return 0; 823 } 824 825 #endif /* CONFIG_64BIT */ 826 827 /* 828 * Determine whether the DSP context is present. 829 */ 830 static int dsp_active(struct task_struct *target, 831 const struct user_regset *regset) 832 { 833 return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV; 834 } 835 836 enum mips_regset { 837 REGSET_GPR, 838 REGSET_DSP, 839 #ifdef CONFIG_MIPS_FP_SUPPORT 840 REGSET_FPR, 841 REGSET_FP_MODE, 842 #endif 843 #ifdef CONFIG_CPU_HAS_MSA 844 REGSET_MSA, 845 #endif 846 }; 847 848 struct pt_regs_offset { 849 const char *name; 850 int offset; 851 }; 852 853 #define REG_OFFSET_NAME(reg, r) { \ 854 .name = #reg, \ 855 .offset = offsetof(struct pt_regs, r) \ 856 } 857 858 #define REG_OFFSET_END { \ 859 .name = NULL, \ 860 .offset = 0 \ 861 } 862 863 static const struct pt_regs_offset regoffset_table[] = { 864 REG_OFFSET_NAME(r0, regs[0]), 865 REG_OFFSET_NAME(r1, regs[1]), 866 REG_OFFSET_NAME(r2, regs[2]), 867 REG_OFFSET_NAME(r3, regs[3]), 868 REG_OFFSET_NAME(r4, regs[4]), 869 REG_OFFSET_NAME(r5, regs[5]), 870 REG_OFFSET_NAME(r6, regs[6]), 871 REG_OFFSET_NAME(r7, regs[7]), 872 REG_OFFSET_NAME(r8, regs[8]), 873 REG_OFFSET_NAME(r9, regs[9]), 874 REG_OFFSET_NAME(r10, regs[10]), 875 REG_OFFSET_NAME(r11, regs[11]), 876 REG_OFFSET_NAME(r12, regs[12]), 877 REG_OFFSET_NAME(r13, regs[13]), 878 REG_OFFSET_NAME(r14, regs[14]), 879 REG_OFFSET_NAME(r15, regs[15]), 880 REG_OFFSET_NAME(r16, regs[16]), 881 REG_OFFSET_NAME(r17, regs[17]), 882 REG_OFFSET_NAME(r18, regs[18]), 883 REG_OFFSET_NAME(r19, regs[19]), 884 REG_OFFSET_NAME(r20, regs[20]), 885 REG_OFFSET_NAME(r21, regs[21]), 886 REG_OFFSET_NAME(r22, regs[22]), 887 REG_OFFSET_NAME(r23, regs[23]), 888 REG_OFFSET_NAME(r24, regs[24]), 889 REG_OFFSET_NAME(r25, regs[25]), 890 REG_OFFSET_NAME(r26, regs[26]), 891 REG_OFFSET_NAME(r27, regs[27]), 892 REG_OFFSET_NAME(r28, regs[28]), 893 REG_OFFSET_NAME(r29, regs[29]), 894 REG_OFFSET_NAME(r30, regs[30]), 895 REG_OFFSET_NAME(r31, regs[31]), 896 REG_OFFSET_NAME(c0_status, cp0_status), 897 REG_OFFSET_NAME(hi, hi), 898 REG_OFFSET_NAME(lo, lo), 899 #ifdef CONFIG_CPU_HAS_SMARTMIPS 900 REG_OFFSET_NAME(acx, acx), 901 #endif 902 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), 903 REG_OFFSET_NAME(c0_cause, cp0_cause), 904 REG_OFFSET_NAME(c0_epc, cp0_epc), 905 #ifdef CONFIG_CPU_CAVIUM_OCTEON 906 REG_OFFSET_NAME(mpl0, mpl[0]), 907 REG_OFFSET_NAME(mpl1, mpl[1]), 908 REG_OFFSET_NAME(mpl2, mpl[2]), 909 REG_OFFSET_NAME(mtp0, mtp[0]), 910 REG_OFFSET_NAME(mtp1, mtp[1]), 911 REG_OFFSET_NAME(mtp2, mtp[2]), 912 #endif 913 REG_OFFSET_END, 914 }; 915 916 /** 917 * regs_query_register_offset() - query register offset from its name 918 * @name: the name of a register 919 * 920 * regs_query_register_offset() returns the offset of a register in struct 921 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 922 */ 923 int regs_query_register_offset(const char *name) 924 { 925 const struct pt_regs_offset *roff; 926 927 for (roff = regoffset_table; roff->name != NULL; roff++) 928 if (!strcmp(roff->name, name)) 929 return roff->offset; 930 931 return -EINVAL; 932 } 933 934 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) 935 936 static const struct user_regset mips_regsets[] = { 937 [REGSET_GPR] = { 938 .core_note_type = NT_PRSTATUS, 939 .n = ELF_NGREG, 940 .size = sizeof(unsigned int), 941 .align = sizeof(unsigned int), 942 .regset_get = gpr32_get, 943 .set = gpr32_set, 944 }, 945 [REGSET_DSP] = { 946 .core_note_type = NT_MIPS_DSP, 947 .n = NUM_DSP_REGS + 1, 948 .size = sizeof(u32), 949 .align = sizeof(u32), 950 .regset_get = dsp32_get, 951 .set = dsp32_set, 952 .active = dsp_active, 953 }, 954 #ifdef CONFIG_MIPS_FP_SUPPORT 955 [REGSET_FPR] = { 956 .core_note_type = NT_PRFPREG, 957 .n = ELF_NFPREG, 958 .size = sizeof(elf_fpreg_t), 959 .align = sizeof(elf_fpreg_t), 960 .regset_get = fpr_get, 961 .set = fpr_set, 962 }, 963 [REGSET_FP_MODE] = { 964 .core_note_type = NT_MIPS_FP_MODE, 965 .n = 1, 966 .size = sizeof(int), 967 .align = sizeof(int), 968 .regset_get = fp_mode_get, 969 .set = fp_mode_set, 970 }, 971 #endif 972 #ifdef CONFIG_CPU_HAS_MSA 973 [REGSET_MSA] = { 974 .core_note_type = NT_MIPS_MSA, 975 .n = NUM_FPU_REGS + 1, 976 .size = 16, 977 .align = 16, 978 .regset_get = msa_get, 979 .set = msa_set, 980 }, 981 #endif 982 }; 983 984 static const struct user_regset_view user_mips_view = { 985 .name = "mips", 986 .e_machine = ELF_ARCH, 987 .ei_osabi = ELF_OSABI, 988 .regsets = mips_regsets, 989 .n = ARRAY_SIZE(mips_regsets), 990 }; 991 992 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ 993 994 #ifdef CONFIG_64BIT 995 996 static const struct user_regset mips64_regsets[] = { 997 [REGSET_GPR] = { 998 .core_note_type = NT_PRSTATUS, 999 .n = ELF_NGREG, 1000 .size = sizeof(unsigned long), 1001 .align = sizeof(unsigned long), 1002 .regset_get = gpr64_get, 1003 .set = gpr64_set, 1004 }, 1005 [REGSET_DSP] = { 1006 .core_note_type = NT_MIPS_DSP, 1007 .n = NUM_DSP_REGS + 1, 1008 .size = sizeof(u64), 1009 .align = sizeof(u64), 1010 .regset_get = dsp64_get, 1011 .set = dsp64_set, 1012 .active = dsp_active, 1013 }, 1014 #ifdef CONFIG_MIPS_FP_SUPPORT 1015 [REGSET_FP_MODE] = { 1016 .core_note_type = NT_MIPS_FP_MODE, 1017 .n = 1, 1018 .size = sizeof(int), 1019 .align = sizeof(int), 1020 .regset_get = fp_mode_get, 1021 .set = fp_mode_set, 1022 }, 1023 [REGSET_FPR] = { 1024 .core_note_type = NT_PRFPREG, 1025 .n = ELF_NFPREG, 1026 .size = sizeof(elf_fpreg_t), 1027 .align = sizeof(elf_fpreg_t), 1028 .regset_get = fpr_get, 1029 .set = fpr_set, 1030 }, 1031 #endif 1032 #ifdef CONFIG_CPU_HAS_MSA 1033 [REGSET_MSA] = { 1034 .core_note_type = NT_MIPS_MSA, 1035 .n = NUM_FPU_REGS + 1, 1036 .size = 16, 1037 .align = 16, 1038 .regset_get = msa_get, 1039 .set = msa_set, 1040 }, 1041 #endif 1042 }; 1043 1044 static const struct user_regset_view user_mips64_view = { 1045 .name = "mips64", 1046 .e_machine = ELF_ARCH, 1047 .ei_osabi = ELF_OSABI, 1048 .regsets = mips64_regsets, 1049 .n = ARRAY_SIZE(mips64_regsets), 1050 }; 1051 1052 #ifdef CONFIG_MIPS32_N32 1053 1054 static const struct user_regset_view user_mipsn32_view = { 1055 .name = "mipsn32", 1056 .e_flags = EF_MIPS_ABI2, 1057 .e_machine = ELF_ARCH, 1058 .ei_osabi = ELF_OSABI, 1059 .regsets = mips64_regsets, 1060 .n = ARRAY_SIZE(mips64_regsets), 1061 }; 1062 1063 #endif /* CONFIG_MIPS32_N32 */ 1064 1065 #endif /* CONFIG_64BIT */ 1066 1067 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1068 { 1069 #ifdef CONFIG_32BIT 1070 return &user_mips_view; 1071 #else 1072 #ifdef CONFIG_MIPS32_O32 1073 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) 1074 return &user_mips_view; 1075 #endif 1076 #ifdef CONFIG_MIPS32_N32 1077 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) 1078 return &user_mipsn32_view; 1079 #endif 1080 return &user_mips64_view; 1081 #endif 1082 } 1083 1084 long arch_ptrace(struct task_struct *child, long request, 1085 unsigned long addr, unsigned long data) 1086 { 1087 int ret; 1088 void __user *addrp = (void __user *) addr; 1089 void __user *datavp = (void __user *) data; 1090 unsigned long __user *datalp = (void __user *) data; 1091 1092 switch (request) { 1093 /* when I and D space are separate, these will need to be fixed. */ 1094 case PTRACE_PEEKTEXT: /* read word at location addr. */ 1095 case PTRACE_PEEKDATA: 1096 ret = generic_ptrace_peekdata(child, addr, data); 1097 break; 1098 1099 /* Read the word at location addr in the USER area. */ 1100 case PTRACE_PEEKUSR: { 1101 struct pt_regs *regs; 1102 unsigned long tmp = 0; 1103 1104 regs = task_pt_regs(child); 1105 ret = 0; /* Default return value. */ 1106 1107 switch (addr) { 1108 case 0 ... 31: 1109 tmp = regs->regs[addr]; 1110 break; 1111 #ifdef CONFIG_MIPS_FP_SUPPORT 1112 case FPR_BASE ... FPR_BASE + 31: { 1113 union fpureg *fregs; 1114 1115 if (!tsk_used_math(child)) { 1116 /* FP not yet used */ 1117 tmp = -1; 1118 break; 1119 } 1120 fregs = get_fpu_regs(child); 1121 1122 #ifdef CONFIG_32BIT 1123 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1124 /* 1125 * The odd registers are actually the high 1126 * order bits of the values stored in the even 1127 * registers. 1128 */ 1129 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1130 addr & 1); 1131 break; 1132 } 1133 #endif 1134 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); 1135 break; 1136 } 1137 case FPC_CSR: 1138 tmp = child->thread.fpu.fcr31; 1139 break; 1140 case FPC_EIR: 1141 /* implementation / version register */ 1142 tmp = boot_cpu_data.fpu_id; 1143 break; 1144 #endif 1145 case PC: 1146 tmp = regs->cp0_epc; 1147 break; 1148 case CAUSE: 1149 tmp = regs->cp0_cause; 1150 break; 1151 case BADVADDR: 1152 tmp = regs->cp0_badvaddr; 1153 break; 1154 case MMHI: 1155 tmp = regs->hi; 1156 break; 1157 case MMLO: 1158 tmp = regs->lo; 1159 break; 1160 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1161 case ACX: 1162 tmp = regs->acx; 1163 break; 1164 #endif 1165 case DSP_BASE ... DSP_BASE + 5: { 1166 dspreg_t *dregs; 1167 1168 if (!cpu_has_dsp) { 1169 tmp = 0; 1170 ret = -EIO; 1171 goto out; 1172 } 1173 dregs = __get_dsp_regs(child); 1174 tmp = dregs[addr - DSP_BASE]; 1175 break; 1176 } 1177 case DSP_CONTROL: 1178 if (!cpu_has_dsp) { 1179 tmp = 0; 1180 ret = -EIO; 1181 goto out; 1182 } 1183 tmp = child->thread.dsp.dspcontrol; 1184 break; 1185 default: 1186 tmp = 0; 1187 ret = -EIO; 1188 goto out; 1189 } 1190 ret = put_user(tmp, datalp); 1191 break; 1192 } 1193 1194 /* when I and D space are separate, this will have to be fixed. */ 1195 case PTRACE_POKETEXT: /* write the word at location addr. */ 1196 case PTRACE_POKEDATA: 1197 ret = generic_ptrace_pokedata(child, addr, data); 1198 break; 1199 1200 case PTRACE_POKEUSR: { 1201 struct pt_regs *regs; 1202 ret = 0; 1203 regs = task_pt_regs(child); 1204 1205 switch (addr) { 1206 case 0 ... 31: 1207 regs->regs[addr] = data; 1208 /* System call number may have been changed */ 1209 if (addr == 2) 1210 mips_syscall_update_nr(child, regs); 1211 else if (addr == 4 && 1212 mips_syscall_is_indirect(child, regs)) 1213 mips_syscall_update_nr(child, regs); 1214 break; 1215 #ifdef CONFIG_MIPS_FP_SUPPORT 1216 case FPR_BASE ... FPR_BASE + 31: { 1217 union fpureg *fregs = get_fpu_regs(child); 1218 1219 init_fp_ctx(child); 1220 #ifdef CONFIG_32BIT 1221 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { 1222 /* 1223 * The odd registers are actually the high 1224 * order bits of the values stored in the even 1225 * registers. 1226 */ 1227 set_fpr32(&fregs[(addr & ~1) - FPR_BASE], 1228 addr & 1, data); 1229 break; 1230 } 1231 #endif 1232 set_fpr64(&fregs[addr - FPR_BASE], 0, data); 1233 break; 1234 } 1235 case FPC_CSR: 1236 init_fp_ctx(child); 1237 ptrace_setfcr31(child, data); 1238 break; 1239 #endif 1240 case PC: 1241 regs->cp0_epc = data; 1242 break; 1243 case MMHI: 1244 regs->hi = data; 1245 break; 1246 case MMLO: 1247 regs->lo = data; 1248 break; 1249 #ifdef CONFIG_CPU_HAS_SMARTMIPS 1250 case ACX: 1251 regs->acx = data; 1252 break; 1253 #endif 1254 case DSP_BASE ... DSP_BASE + 5: { 1255 dspreg_t *dregs; 1256 1257 if (!cpu_has_dsp) { 1258 ret = -EIO; 1259 break; 1260 } 1261 1262 dregs = __get_dsp_regs(child); 1263 dregs[addr - DSP_BASE] = data; 1264 break; 1265 } 1266 case DSP_CONTROL: 1267 if (!cpu_has_dsp) { 1268 ret = -EIO; 1269 break; 1270 } 1271 child->thread.dsp.dspcontrol = data; 1272 break; 1273 default: 1274 /* The rest are not allowed. */ 1275 ret = -EIO; 1276 break; 1277 } 1278 break; 1279 } 1280 1281 case PTRACE_GETREGS: 1282 ret = ptrace_getregs(child, datavp); 1283 break; 1284 1285 case PTRACE_SETREGS: 1286 ret = ptrace_setregs(child, datavp); 1287 break; 1288 1289 #ifdef CONFIG_MIPS_FP_SUPPORT 1290 case PTRACE_GETFPREGS: 1291 ret = ptrace_getfpregs(child, datavp); 1292 break; 1293 1294 case PTRACE_SETFPREGS: 1295 ret = ptrace_setfpregs(child, datavp); 1296 break; 1297 #endif 1298 case PTRACE_GET_THREAD_AREA: 1299 ret = put_user(task_thread_info(child)->tp_value, datalp); 1300 break; 1301 1302 case PTRACE_GET_WATCH_REGS: 1303 ret = ptrace_get_watch_regs(child, addrp); 1304 break; 1305 1306 case PTRACE_SET_WATCH_REGS: 1307 ret = ptrace_set_watch_regs(child, addrp); 1308 break; 1309 1310 default: 1311 ret = ptrace_request(child, request, addr, data); 1312 break; 1313 } 1314 out: 1315 return ret; 1316 } 1317 1318 /* 1319 * Notification of system call entry/exit 1320 * - triggered by current->work.syscall_trace 1321 */ 1322 asmlinkage long syscall_trace_enter(struct pt_regs *regs) 1323 { 1324 user_exit(); 1325 1326 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 1327 if (ptrace_report_syscall_entry(regs)) 1328 return -1; 1329 } 1330 1331 if (secure_computing()) 1332 return -1; 1333 1334 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1335 trace_sys_enter(regs, regs->regs[2]); 1336 1337 audit_syscall_entry(current_thread_info()->syscall, 1338 regs->regs[4], regs->regs[5], 1339 regs->regs[6], regs->regs[7]); 1340 1341 /* 1342 * Negative syscall numbers are mistaken for rejected syscalls, but 1343 * won't have had the return value set appropriately, so we do so now. 1344 */ 1345 if (current_thread_info()->syscall < 0) 1346 syscall_set_return_value(current, regs, -ENOSYS, 0); 1347 return current_thread_info()->syscall; 1348 } 1349 1350 /* 1351 * Notification of system call entry/exit 1352 * - triggered by current->work.syscall_trace 1353 */ 1354 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1355 { 1356 /* 1357 * We may come here right after calling schedule_user() 1358 * or do_notify_resume(), in which case we can be in RCU 1359 * user mode. 1360 */ 1361 user_exit(); 1362 1363 audit_syscall_exit(regs); 1364 1365 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1366 trace_sys_exit(regs, regs_return_value(regs)); 1367 1368 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1369 ptrace_report_syscall_exit(regs, 0); 1370 1371 user_enter(); 1372 } 1373