1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 #include <linux/pkeys.h> 23 24 #include <asm/daifflags.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/elf.h> 27 #include <asm/exception.h> 28 #include <asm/cacheflush.h> 29 #include <asm/gcs.h> 30 #include <asm/ucontext.h> 31 #include <asm/unistd.h> 32 #include <asm/fpsimd.h> 33 #include <asm/ptrace.h> 34 #include <asm/syscall.h> 35 #include <asm/signal32.h> 36 #include <asm/traps.h> 37 #include <asm/vdso.h> 38 39 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK) 40 41 /* 42 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 43 */ 44 struct rt_sigframe { 45 struct siginfo info; 46 struct ucontext uc; 47 }; 48 49 struct rt_sigframe_user_layout { 50 struct rt_sigframe __user *sigframe; 51 struct frame_record __user *next_frame; 52 53 unsigned long size; /* size of allocated sigframe data */ 54 unsigned long limit; /* largest allowed size */ 55 56 unsigned long fpsimd_offset; 57 unsigned long esr_offset; 58 unsigned long gcs_offset; 59 unsigned long sve_offset; 60 unsigned long tpidr2_offset; 61 unsigned long za_offset; 62 unsigned long zt_offset; 63 unsigned long fpmr_offset; 64 unsigned long poe_offset; 65 unsigned long extra_offset; 66 unsigned long end_offset; 67 }; 68 69 /* 70 * Holds any EL0-controlled state that influences unprivileged memory accesses. 71 * This includes both accesses done in userspace and uaccess done in the kernel. 72 * 73 * This state needs to be carefully managed to ensure that it doesn't cause 74 * uaccess to fail when setting up the signal frame, and the signal handler 75 * itself also expects a well-defined state when entered. 76 */ 77 struct user_access_state { 78 u64 por_el0; 79 }; 80 81 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 82 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 83 84 /* 85 * Save the user access state into ua_state and reset it to disable any 86 * restrictions. 87 */ 88 static void save_reset_user_access_state(struct user_access_state *ua_state) 89 { 90 if (system_supports_poe()) { 91 u64 por_enable_all = 0; 92 93 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 94 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX); 95 96 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); 97 write_sysreg_s(por_enable_all, SYS_POR_EL0); 98 /* Ensure that any subsequent uaccess observes the updated value */ 99 isb(); 100 } 101 } 102 103 /* 104 * Set the user access state for invoking the signal handler. 105 * 106 * No uaccess should be done after that function is called. 107 */ 108 static void set_handler_user_access_state(void) 109 { 110 if (system_supports_poe()) 111 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 112 } 113 114 /* 115 * Restore the user access state to the values saved in ua_state. 116 * 117 * No uaccess should be done after that function is called. 118 */ 119 static void restore_user_access_state(const struct user_access_state *ua_state) 120 { 121 if (system_supports_poe()) 122 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); 123 } 124 125 static void init_user_layout(struct rt_sigframe_user_layout *user) 126 { 127 const size_t reserved_size = 128 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 129 130 memset(user, 0, sizeof(*user)); 131 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 132 133 user->limit = user->size + reserved_size; 134 135 user->limit -= TERMINATOR_SIZE; 136 user->limit -= EXTRA_CONTEXT_SIZE; 137 /* Reserve space for extension and terminator ^ */ 138 } 139 140 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 141 { 142 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 143 } 144 145 /* 146 * Sanity limit on the approximate maximum size of signal frame we'll 147 * try to generate. Stack alignment padding and the frame record are 148 * not taken into account. This limit is not a guarantee and is 149 * NOT ABI. 150 */ 151 #define SIGFRAME_MAXSZ SZ_256K 152 153 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 154 unsigned long *offset, size_t size, bool extend) 155 { 156 size_t padded_size = round_up(size, 16); 157 158 if (padded_size > user->limit - user->size && 159 !user->extra_offset && 160 extend) { 161 int ret; 162 163 user->limit += EXTRA_CONTEXT_SIZE; 164 ret = __sigframe_alloc(user, &user->extra_offset, 165 sizeof(struct extra_context), false); 166 if (ret) { 167 user->limit -= EXTRA_CONTEXT_SIZE; 168 return ret; 169 } 170 171 /* Reserve space for the __reserved[] terminator */ 172 user->size += TERMINATOR_SIZE; 173 174 /* 175 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 176 * the terminator: 177 */ 178 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 179 } 180 181 /* Still not enough space? Bad luck! */ 182 if (padded_size > user->limit - user->size) 183 return -ENOMEM; 184 185 *offset = user->size; 186 user->size += padded_size; 187 188 return 0; 189 } 190 191 /* 192 * Allocate space for an optional record of <size> bytes in the user 193 * signal frame. The offset from the signal frame base address to the 194 * allocated block is assigned to *offset. 195 */ 196 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 197 unsigned long *offset, size_t size) 198 { 199 return __sigframe_alloc(user, offset, size, true); 200 } 201 202 /* Allocate the null terminator record and prevent further allocations */ 203 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 204 { 205 int ret; 206 207 /* Un-reserve the space reserved for the terminator: */ 208 user->limit += TERMINATOR_SIZE; 209 210 ret = sigframe_alloc(user, &user->end_offset, 211 sizeof(struct _aarch64_ctx)); 212 if (ret) 213 return ret; 214 215 /* Prevent further allocation: */ 216 user->limit = user->size; 217 return 0; 218 } 219 220 static void __user *apply_user_offset( 221 struct rt_sigframe_user_layout const *user, unsigned long offset) 222 { 223 char __user *base = (char __user *)user->sigframe; 224 225 return base + offset; 226 } 227 228 struct user_ctxs { 229 struct fpsimd_context __user *fpsimd; 230 u32 fpsimd_size; 231 struct sve_context __user *sve; 232 u32 sve_size; 233 struct tpidr2_context __user *tpidr2; 234 u32 tpidr2_size; 235 struct za_context __user *za; 236 u32 za_size; 237 struct zt_context __user *zt; 238 u32 zt_size; 239 struct fpmr_context __user *fpmr; 240 u32 fpmr_size; 241 struct poe_context __user *poe; 242 u32 poe_size; 243 struct gcs_context __user *gcs; 244 u32 gcs_size; 245 }; 246 247 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 248 { 249 struct user_fpsimd_state const *fpsimd = 250 ¤t->thread.uw.fpsimd_state; 251 int err; 252 253 fpsimd_sync_from_effective_state(current); 254 255 /* copy the FP and status/control registers */ 256 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 257 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 258 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 259 260 /* copy the magic/size information */ 261 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 262 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 263 264 return err ? -EFAULT : 0; 265 } 266 267 static int read_fpsimd_context(struct user_fpsimd_state *fpsimd, 268 struct user_ctxs *user) 269 { 270 int err; 271 272 /* check the size information */ 273 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 274 return -EINVAL; 275 276 /* copy the FP and status/control registers */ 277 err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs), 278 sizeof(fpsimd->vregs)); 279 __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err); 280 __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err); 281 282 return err ? -EFAULT : 0; 283 } 284 285 static int restore_fpsimd_context(struct user_ctxs *user) 286 { 287 struct user_fpsimd_state fpsimd; 288 int err; 289 290 err = read_fpsimd_context(&fpsimd, user); 291 if (err) 292 return err; 293 294 clear_thread_flag(TIF_SVE); 295 current->thread.svcr &= ~SVCR_SM_MASK; 296 current->thread.fp_type = FP_STATE_FPSIMD; 297 298 /* load the hardware registers from the fpsimd_state structure */ 299 fpsimd_update_current_state(&fpsimd); 300 return 0; 301 } 302 303 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 304 { 305 int err = 0; 306 307 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 308 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 309 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 310 311 return err; 312 } 313 314 static int restore_fpmr_context(struct user_ctxs *user) 315 { 316 u64 fpmr; 317 int err = 0; 318 319 if (user->fpmr_size != sizeof(*user->fpmr)) 320 return -EINVAL; 321 322 __get_user_error(fpmr, &user->fpmr->fpmr, err); 323 if (!err) 324 current->thread.uw.fpmr = fpmr; 325 326 return err; 327 } 328 329 static int preserve_poe_context(struct poe_context __user *ctx, 330 const struct user_access_state *ua_state) 331 { 332 int err = 0; 333 334 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 335 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 336 __put_user_error(ua_state->por_el0, &ctx->por_el0, err); 337 338 return err; 339 } 340 341 static int restore_poe_context(struct user_ctxs *user, 342 struct user_access_state *ua_state) 343 { 344 u64 por_el0; 345 int err = 0; 346 347 if (user->poe_size != sizeof(*user->poe)) 348 return -EINVAL; 349 350 __get_user_error(por_el0, &(user->poe->por_el0), err); 351 if (!err) 352 ua_state->por_el0 = por_el0; 353 354 return err; 355 } 356 357 #ifdef CONFIG_ARM64_SVE 358 359 static int preserve_sve_context(struct sve_context __user *ctx) 360 { 361 int err = 0; 362 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 363 u16 flags = 0; 364 unsigned int vl = task_get_sve_vl(current); 365 unsigned int vq = 0; 366 367 if (thread_sm_enabled(¤t->thread)) { 368 vl = task_get_sme_vl(current); 369 vq = sve_vq_from_vl(vl); 370 flags |= SVE_SIG_FLAG_SM; 371 } else if (current->thread.fp_type == FP_STATE_SVE) { 372 vq = sve_vq_from_vl(vl); 373 } 374 375 memset(reserved, 0, sizeof(reserved)); 376 377 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 378 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 379 &ctx->head.size, err); 380 __put_user_error(vl, &ctx->vl, err); 381 __put_user_error(flags, &ctx->flags, err); 382 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 383 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 384 385 if (vq) { 386 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 387 current->thread.sve_state, 388 SVE_SIG_REGS_SIZE(vq)); 389 } 390 391 return err ? -EFAULT : 0; 392 } 393 394 static int restore_sve_fpsimd_context(struct user_ctxs *user) 395 { 396 int err = 0; 397 unsigned int vl, vq; 398 struct user_fpsimd_state fpsimd; 399 u16 user_vl, flags; 400 bool sm; 401 402 if (user->sve_size < sizeof(*user->sve)) 403 return -EINVAL; 404 405 __get_user_error(user_vl, &(user->sve->vl), err); 406 __get_user_error(flags, &(user->sve->flags), err); 407 if (err) 408 return err; 409 410 sm = flags & SVE_SIG_FLAG_SM; 411 if (sm) { 412 if (!system_supports_sme()) 413 return -EINVAL; 414 415 vl = task_get_sme_vl(current); 416 } else { 417 /* 418 * A SME only system use SVE for streaming mode so can 419 * have a SVE formatted context with a zero VL and no 420 * payload data. 421 */ 422 if (!system_supports_sve() && !system_supports_sme()) 423 return -EINVAL; 424 425 vl = task_get_sve_vl(current); 426 } 427 428 if (user_vl != vl) 429 return -EINVAL; 430 431 /* 432 * Non-streaming SVE state may be preserved without an SVE payload, in 433 * which case the SVE context only has a header with VL==0, and all 434 * state can be restored from the FPSIMD context. 435 * 436 * Streaming SVE state is always preserved with an SVE payload. For 437 * consistency and robustness, reject restoring streaming SVE state 438 * without an SVE payload. 439 */ 440 if (!sm && user->sve_size == sizeof(*user->sve)) 441 return restore_fpsimd_context(user); 442 443 vq = sve_vq_from_vl(vl); 444 445 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 446 return -EINVAL; 447 448 sve_alloc(current, true); 449 if (!current->thread.sve_state) { 450 clear_thread_flag(TIF_SVE); 451 return -ENOMEM; 452 } 453 454 err = __copy_from_user(current->thread.sve_state, 455 (char __user const *)user->sve + 456 SVE_SIG_REGS_OFFSET, 457 SVE_SIG_REGS_SIZE(vq)); 458 if (err) 459 return -EFAULT; 460 461 if (flags & SVE_SIG_FLAG_SM) 462 current->thread.svcr |= SVCR_SM_MASK; 463 else 464 set_thread_flag(TIF_SVE); 465 current->thread.fp_type = FP_STATE_SVE; 466 467 err = read_fpsimd_context(&fpsimd, user); 468 if (err) 469 return err; 470 471 /* Merge the FPSIMD registers into the SVE state */ 472 fpsimd_update_current_state(&fpsimd); 473 474 return 0; 475 } 476 477 #else /* ! CONFIG_ARM64_SVE */ 478 479 static int restore_sve_fpsimd_context(struct user_ctxs *user) 480 { 481 WARN_ON_ONCE(1); 482 return -EINVAL; 483 } 484 485 /* Turn any non-optimised out attempts to use this into a link error: */ 486 extern int preserve_sve_context(void __user *ctx); 487 488 #endif /* ! CONFIG_ARM64_SVE */ 489 490 #ifdef CONFIG_ARM64_SME 491 492 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 493 { 494 u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 495 int err = 0; 496 497 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 498 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 499 __put_user_error(tpidr2_el0, &ctx->tpidr2, err); 500 501 return err; 502 } 503 504 static int restore_tpidr2_context(struct user_ctxs *user) 505 { 506 u64 tpidr2_el0; 507 int err = 0; 508 509 if (user->tpidr2_size != sizeof(*user->tpidr2)) 510 return -EINVAL; 511 512 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 513 if (!err) 514 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 515 516 return err; 517 } 518 519 static int preserve_za_context(struct za_context __user *ctx) 520 { 521 int err = 0; 522 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 523 unsigned int vl = task_get_sme_vl(current); 524 unsigned int vq; 525 526 if (thread_za_enabled(¤t->thread)) 527 vq = sve_vq_from_vl(vl); 528 else 529 vq = 0; 530 531 memset(reserved, 0, sizeof(reserved)); 532 533 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 534 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 535 &ctx->head.size, err); 536 __put_user_error(vl, &ctx->vl, err); 537 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 538 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 539 540 if (vq) { 541 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 542 current->thread.sme_state, 543 ZA_SIG_REGS_SIZE(vq)); 544 } 545 546 return err ? -EFAULT : 0; 547 } 548 549 static int restore_za_context(struct user_ctxs *user) 550 { 551 int err = 0; 552 unsigned int vq; 553 u16 user_vl; 554 555 if (user->za_size < sizeof(*user->za)) 556 return -EINVAL; 557 558 __get_user_error(user_vl, &(user->za->vl), err); 559 if (err) 560 return err; 561 562 if (user_vl != task_get_sme_vl(current)) 563 return -EINVAL; 564 565 if (user->za_size == sizeof(*user->za)) { 566 current->thread.svcr &= ~SVCR_ZA_MASK; 567 return 0; 568 } 569 570 vq = sve_vq_from_vl(user_vl); 571 572 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 573 return -EINVAL; 574 575 sme_alloc(current, true); 576 if (!current->thread.sme_state) { 577 current->thread.svcr &= ~SVCR_ZA_MASK; 578 clear_thread_flag(TIF_SME); 579 return -ENOMEM; 580 } 581 582 err = __copy_from_user(current->thread.sme_state, 583 (char __user const *)user->za + 584 ZA_SIG_REGS_OFFSET, 585 ZA_SIG_REGS_SIZE(vq)); 586 if (err) 587 return -EFAULT; 588 589 set_thread_flag(TIF_SME); 590 current->thread.svcr |= SVCR_ZA_MASK; 591 592 return 0; 593 } 594 595 static int preserve_zt_context(struct zt_context __user *ctx) 596 { 597 int err = 0; 598 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 599 600 if (WARN_ON(!thread_za_enabled(¤t->thread))) 601 return -EINVAL; 602 603 memset(reserved, 0, sizeof(reserved)); 604 605 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 606 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 607 &ctx->head.size, err); 608 __put_user_error(1, &ctx->nregs, err); 609 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 610 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 611 612 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 613 thread_zt_state(¤t->thread), 614 ZT_SIG_REGS_SIZE(1)); 615 616 return err ? -EFAULT : 0; 617 } 618 619 static int restore_zt_context(struct user_ctxs *user) 620 { 621 int err; 622 u16 nregs; 623 624 /* ZA must be restored first for this check to be valid */ 625 if (!thread_za_enabled(¤t->thread)) 626 return -EINVAL; 627 628 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 629 return -EINVAL; 630 631 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 632 return -EFAULT; 633 634 if (nregs != 1) 635 return -EINVAL; 636 637 err = __copy_from_user(thread_zt_state(¤t->thread), 638 (char __user const *)user->zt + 639 ZT_SIG_REGS_OFFSET, 640 ZT_SIG_REGS_SIZE(1)); 641 if (err) 642 return -EFAULT; 643 644 return 0; 645 } 646 647 #else /* ! CONFIG_ARM64_SME */ 648 649 /* Turn any non-optimised out attempts to use these into a link error: */ 650 extern int preserve_tpidr2_context(void __user *ctx); 651 extern int restore_tpidr2_context(struct user_ctxs *user); 652 extern int preserve_za_context(void __user *ctx); 653 extern int restore_za_context(struct user_ctxs *user); 654 extern int preserve_zt_context(void __user *ctx); 655 extern int restore_zt_context(struct user_ctxs *user); 656 657 #endif /* ! CONFIG_ARM64_SME */ 658 659 #ifdef CONFIG_ARM64_GCS 660 661 static int preserve_gcs_context(struct gcs_context __user *ctx) 662 { 663 int err = 0; 664 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0); 665 666 /* 667 * If GCS is enabled we will add a cap token to the frame, 668 * include it in the GCSPR_EL0 we report to support stack 669 * switching via sigreturn if GCS is enabled. We do not allow 670 * enabling via sigreturn so the token is only relevant for 671 * threads with GCS enabled. 672 */ 673 if (task_gcs_el0_enabled(current)) 674 gcspr -= 8; 675 676 __put_user_error(GCS_MAGIC, &ctx->head.magic, err); 677 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 678 __put_user_error(gcspr, &ctx->gcspr, err); 679 __put_user_error(0, &ctx->reserved, err); 680 __put_user_error(current->thread.gcs_el0_mode, 681 &ctx->features_enabled, err); 682 683 return err; 684 } 685 686 static int restore_gcs_context(struct user_ctxs *user) 687 { 688 u64 gcspr, enabled; 689 int err = 0; 690 691 if (user->gcs_size != sizeof(*user->gcs)) 692 return -EINVAL; 693 694 __get_user_error(gcspr, &user->gcs->gcspr, err); 695 __get_user_error(enabled, &user->gcs->features_enabled, err); 696 if (err) 697 return err; 698 699 /* Don't allow unknown modes */ 700 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 701 return -EINVAL; 702 703 err = gcs_check_locked(current, enabled); 704 if (err != 0) 705 return err; 706 707 /* Don't allow enabling */ 708 if (!task_gcs_el0_enabled(current) && 709 (enabled & PR_SHADOW_STACK_ENABLE)) 710 return -EINVAL; 711 712 /* If we are disabling disable everything */ 713 if (!(enabled & PR_SHADOW_STACK_ENABLE)) 714 enabled = 0; 715 716 current->thread.gcs_el0_mode = enabled; 717 718 /* 719 * We let userspace set GCSPR_EL0 to anything here, we will 720 * validate later in gcs_restore_signal(). 721 */ 722 write_sysreg_s(gcspr, SYS_GCSPR_EL0); 723 724 return 0; 725 } 726 727 #else /* ! CONFIG_ARM64_GCS */ 728 729 /* Turn any non-optimised out attempts to use these into a link error: */ 730 extern int preserve_gcs_context(void __user *ctx); 731 extern int restore_gcs_context(struct user_ctxs *user); 732 733 #endif /* ! CONFIG_ARM64_GCS */ 734 735 static int parse_user_sigframe(struct user_ctxs *user, 736 struct rt_sigframe __user *sf) 737 { 738 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 739 struct _aarch64_ctx __user *head; 740 char __user *base = (char __user *)&sc->__reserved; 741 size_t offset = 0; 742 size_t limit = sizeof(sc->__reserved); 743 bool have_extra_context = false; 744 char const __user *const sfp = (char const __user *)sf; 745 746 user->fpsimd = NULL; 747 user->sve = NULL; 748 user->tpidr2 = NULL; 749 user->za = NULL; 750 user->zt = NULL; 751 user->fpmr = NULL; 752 user->poe = NULL; 753 user->gcs = NULL; 754 755 if (!IS_ALIGNED((unsigned long)base, 16)) 756 goto invalid; 757 758 while (1) { 759 int err = 0; 760 u32 magic, size; 761 char const __user *userp; 762 struct extra_context const __user *extra; 763 u64 extra_datap; 764 u32 extra_size; 765 struct _aarch64_ctx const __user *end; 766 u32 end_magic, end_size; 767 768 if (limit - offset < sizeof(*head)) 769 goto invalid; 770 771 if (!IS_ALIGNED(offset, 16)) 772 goto invalid; 773 774 head = (struct _aarch64_ctx __user *)(base + offset); 775 __get_user_error(magic, &head->magic, err); 776 __get_user_error(size, &head->size, err); 777 if (err) 778 return err; 779 780 if (limit - offset < size) 781 goto invalid; 782 783 switch (magic) { 784 case 0: 785 if (size) 786 goto invalid; 787 788 goto done; 789 790 case FPSIMD_MAGIC: 791 if (!system_supports_fpsimd()) 792 goto invalid; 793 if (user->fpsimd) 794 goto invalid; 795 796 user->fpsimd = (struct fpsimd_context __user *)head; 797 user->fpsimd_size = size; 798 break; 799 800 case ESR_MAGIC: 801 /* ignore */ 802 break; 803 804 case POE_MAGIC: 805 if (!system_supports_poe()) 806 goto invalid; 807 808 if (user->poe) 809 goto invalid; 810 811 user->poe = (struct poe_context __user *)head; 812 user->poe_size = size; 813 break; 814 815 case SVE_MAGIC: 816 if (!system_supports_sve() && !system_supports_sme()) 817 goto invalid; 818 819 if (user->sve) 820 goto invalid; 821 822 user->sve = (struct sve_context __user *)head; 823 user->sve_size = size; 824 break; 825 826 case TPIDR2_MAGIC: 827 if (!system_supports_tpidr2()) 828 goto invalid; 829 830 if (user->tpidr2) 831 goto invalid; 832 833 user->tpidr2 = (struct tpidr2_context __user *)head; 834 user->tpidr2_size = size; 835 break; 836 837 case ZA_MAGIC: 838 if (!system_supports_sme()) 839 goto invalid; 840 841 if (user->za) 842 goto invalid; 843 844 user->za = (struct za_context __user *)head; 845 user->za_size = size; 846 break; 847 848 case ZT_MAGIC: 849 if (!system_supports_sme2()) 850 goto invalid; 851 852 if (user->zt) 853 goto invalid; 854 855 user->zt = (struct zt_context __user *)head; 856 user->zt_size = size; 857 break; 858 859 case FPMR_MAGIC: 860 if (!system_supports_fpmr()) 861 goto invalid; 862 863 if (user->fpmr) 864 goto invalid; 865 866 user->fpmr = (struct fpmr_context __user *)head; 867 user->fpmr_size = size; 868 break; 869 870 case GCS_MAGIC: 871 if (!system_supports_gcs()) 872 goto invalid; 873 874 if (user->gcs) 875 goto invalid; 876 877 user->gcs = (struct gcs_context __user *)head; 878 user->gcs_size = size; 879 break; 880 881 case EXTRA_MAGIC: 882 if (have_extra_context) 883 goto invalid; 884 885 if (size < sizeof(*extra)) 886 goto invalid; 887 888 userp = (char const __user *)head; 889 890 extra = (struct extra_context const __user *)userp; 891 userp += size; 892 893 __get_user_error(extra_datap, &extra->datap, err); 894 __get_user_error(extra_size, &extra->size, err); 895 if (err) 896 return err; 897 898 /* Check for the dummy terminator in __reserved[]: */ 899 900 if (limit - offset - size < TERMINATOR_SIZE) 901 goto invalid; 902 903 end = (struct _aarch64_ctx const __user *)userp; 904 userp += TERMINATOR_SIZE; 905 906 __get_user_error(end_magic, &end->magic, err); 907 __get_user_error(end_size, &end->size, err); 908 if (err) 909 return err; 910 911 if (end_magic || end_size) 912 goto invalid; 913 914 /* Prevent looping/repeated parsing of extra_context */ 915 have_extra_context = true; 916 917 base = (__force void __user *)extra_datap; 918 if (!IS_ALIGNED((unsigned long)base, 16)) 919 goto invalid; 920 921 if (!IS_ALIGNED(extra_size, 16)) 922 goto invalid; 923 924 if (base != userp) 925 goto invalid; 926 927 /* Reject "unreasonably large" frames: */ 928 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 929 goto invalid; 930 931 /* 932 * Ignore trailing terminator in __reserved[] 933 * and start parsing extra data: 934 */ 935 offset = 0; 936 limit = extra_size; 937 938 if (!access_ok(base, limit)) 939 goto invalid; 940 941 continue; 942 943 default: 944 goto invalid; 945 } 946 947 if (size < sizeof(*head)) 948 goto invalid; 949 950 if (limit - offset < size) 951 goto invalid; 952 953 offset += size; 954 } 955 956 done: 957 return 0; 958 959 invalid: 960 return -EINVAL; 961 } 962 963 static int restore_sigframe(struct pt_regs *regs, 964 struct rt_sigframe __user *sf, 965 struct user_access_state *ua_state) 966 { 967 sigset_t set; 968 int i, err; 969 struct user_ctxs user; 970 971 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 972 if (err == 0) 973 set_current_blocked(&set); 974 975 for (i = 0; i < 31; i++) 976 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 977 err); 978 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 979 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 980 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 981 982 /* 983 * Avoid sys_rt_sigreturn() restarting. 984 */ 985 forget_syscall(regs); 986 987 fpsimd_save_and_flush_current_state(); 988 989 err |= !valid_user_regs(®s->user_regs, current); 990 if (err == 0) 991 err = parse_user_sigframe(&user, sf); 992 993 if (err == 0 && system_supports_fpsimd()) { 994 if (!user.fpsimd) 995 return -EINVAL; 996 997 if (user.sve) 998 err = restore_sve_fpsimd_context(&user); 999 else 1000 err = restore_fpsimd_context(&user); 1001 } 1002 1003 if (err == 0 && system_supports_gcs() && user.gcs) 1004 err = restore_gcs_context(&user); 1005 1006 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 1007 err = restore_tpidr2_context(&user); 1008 1009 if (err == 0 && system_supports_fpmr() && user.fpmr) 1010 err = restore_fpmr_context(&user); 1011 1012 if (err == 0 && system_supports_sme() && user.za) 1013 err = restore_za_context(&user); 1014 1015 if (err == 0 && system_supports_sme2() && user.zt) 1016 err = restore_zt_context(&user); 1017 1018 if (err == 0 && system_supports_poe() && user.poe) 1019 err = restore_poe_context(&user, ua_state); 1020 1021 return err; 1022 } 1023 1024 #ifdef CONFIG_ARM64_GCS 1025 static int gcs_restore_signal(void) 1026 { 1027 u64 gcspr_el0, cap; 1028 int ret; 1029 1030 if (!system_supports_gcs()) 1031 return 0; 1032 1033 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) 1034 return 0; 1035 1036 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1037 1038 /* 1039 * Ensure that any changes to the GCS done via GCS operations 1040 * are visible to the normal reads we do to validate the 1041 * token. 1042 */ 1043 gcsb_dsync(); 1044 1045 /* 1046 * GCSPR_EL0 should be pointing at a capped GCS, read the cap. 1047 * We don't enforce that this is in a GCS page, if it is not 1048 * then faults will be generated on GCS operations - the main 1049 * concern is to protect GCS pages. 1050 */ 1051 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0, 1052 sizeof(cap)); 1053 if (ret) 1054 return -EFAULT; 1055 1056 /* 1057 * Check that the cap is the actual GCS before replacing it. 1058 */ 1059 if (cap != GCS_SIGNAL_CAP(gcspr_el0)) 1060 return -EINVAL; 1061 1062 /* Invalidate the token to prevent reuse */ 1063 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret); 1064 if (ret != 0) 1065 return -EFAULT; 1066 1067 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0); 1068 1069 return 0; 1070 } 1071 1072 #else 1073 static int gcs_restore_signal(void) { return 0; } 1074 #endif 1075 1076 SYSCALL_DEFINE0(rt_sigreturn) 1077 { 1078 struct pt_regs *regs = current_pt_regs(); 1079 struct rt_sigframe __user *frame; 1080 struct user_access_state ua_state; 1081 1082 /* Always make any pending restarted system calls return -EINTR */ 1083 current->restart_block.fn = do_no_restart_syscall; 1084 1085 /* 1086 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 1087 * be word aligned here. 1088 */ 1089 if (regs->sp & 15) 1090 goto badframe; 1091 1092 frame = (struct rt_sigframe __user *)regs->sp; 1093 1094 if (!access_ok(frame, sizeof (*frame))) 1095 goto badframe; 1096 1097 if (restore_sigframe(regs, frame, &ua_state)) 1098 goto badframe; 1099 1100 if (gcs_restore_signal()) 1101 goto badframe; 1102 1103 if (restore_altstack(&frame->uc.uc_stack)) 1104 goto badframe; 1105 1106 restore_user_access_state(&ua_state); 1107 1108 return regs->regs[0]; 1109 1110 badframe: 1111 arm64_notify_segfault(regs->sp); 1112 return 0; 1113 } 1114 1115 /* 1116 * Determine the layout of optional records in the signal frame 1117 * 1118 * add_all: if true, lays out the biggest possible signal frame for 1119 * this task; otherwise, generates a layout for the current state 1120 * of the task. 1121 */ 1122 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1123 bool add_all) 1124 { 1125 int err; 1126 1127 if (system_supports_fpsimd()) { 1128 err = sigframe_alloc(user, &user->fpsimd_offset, 1129 sizeof(struct fpsimd_context)); 1130 if (err) 1131 return err; 1132 } 1133 1134 /* fault information, if valid */ 1135 if (add_all || current->thread.fault_code) { 1136 err = sigframe_alloc(user, &user->esr_offset, 1137 sizeof(struct esr_context)); 1138 if (err) 1139 return err; 1140 } 1141 1142 #ifdef CONFIG_ARM64_GCS 1143 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) { 1144 err = sigframe_alloc(user, &user->gcs_offset, 1145 sizeof(struct gcs_context)); 1146 if (err) 1147 return err; 1148 } 1149 #endif 1150 1151 if (system_supports_sve() || system_supports_sme()) { 1152 unsigned int vq = 0; 1153 1154 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1155 thread_sm_enabled(¤t->thread)) { 1156 int vl = max(sve_max_vl(), sme_max_vl()); 1157 1158 if (!add_all) 1159 vl = thread_get_cur_vl(¤t->thread); 1160 1161 vq = sve_vq_from_vl(vl); 1162 } 1163 1164 err = sigframe_alloc(user, &user->sve_offset, 1165 SVE_SIG_CONTEXT_SIZE(vq)); 1166 if (err) 1167 return err; 1168 } 1169 1170 if (system_supports_tpidr2()) { 1171 err = sigframe_alloc(user, &user->tpidr2_offset, 1172 sizeof(struct tpidr2_context)); 1173 if (err) 1174 return err; 1175 } 1176 1177 if (system_supports_sme()) { 1178 unsigned int vl; 1179 unsigned int vq = 0; 1180 1181 if (add_all) 1182 vl = sme_max_vl(); 1183 else 1184 vl = task_get_sme_vl(current); 1185 1186 if (thread_za_enabled(¤t->thread)) 1187 vq = sve_vq_from_vl(vl); 1188 1189 err = sigframe_alloc(user, &user->za_offset, 1190 ZA_SIG_CONTEXT_SIZE(vq)); 1191 if (err) 1192 return err; 1193 } 1194 1195 if (system_supports_sme2()) { 1196 if (add_all || thread_za_enabled(¤t->thread)) { 1197 err = sigframe_alloc(user, &user->zt_offset, 1198 ZT_SIG_CONTEXT_SIZE(1)); 1199 if (err) 1200 return err; 1201 } 1202 } 1203 1204 if (system_supports_fpmr()) { 1205 err = sigframe_alloc(user, &user->fpmr_offset, 1206 sizeof(struct fpmr_context)); 1207 if (err) 1208 return err; 1209 } 1210 1211 if (system_supports_poe()) { 1212 err = sigframe_alloc(user, &user->poe_offset, 1213 sizeof(struct poe_context)); 1214 if (err) 1215 return err; 1216 } 1217 1218 return sigframe_alloc_end(user); 1219 } 1220 1221 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1222 struct pt_regs *regs, sigset_t *set, 1223 const struct user_access_state *ua_state) 1224 { 1225 int i, err = 0; 1226 struct rt_sigframe __user *sf = user->sigframe; 1227 1228 /* set up the stack frame for unwinding */ 1229 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1230 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1231 1232 for (i = 0; i < 31; i++) 1233 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1234 err); 1235 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1236 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1237 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1238 1239 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1240 1241 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1242 1243 if (err == 0 && system_supports_fpsimd()) { 1244 struct fpsimd_context __user *fpsimd_ctx = 1245 apply_user_offset(user, user->fpsimd_offset); 1246 err |= preserve_fpsimd_context(fpsimd_ctx); 1247 } 1248 1249 /* fault information, if valid */ 1250 if (err == 0 && user->esr_offset) { 1251 struct esr_context __user *esr_ctx = 1252 apply_user_offset(user, user->esr_offset); 1253 1254 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1255 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1256 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1257 } 1258 1259 if (system_supports_gcs() && err == 0 && user->gcs_offset) { 1260 struct gcs_context __user *gcs_ctx = 1261 apply_user_offset(user, user->gcs_offset); 1262 err |= preserve_gcs_context(gcs_ctx); 1263 } 1264 1265 /* Scalable Vector Extension state (including streaming), if present */ 1266 if ((system_supports_sve() || system_supports_sme()) && 1267 err == 0 && user->sve_offset) { 1268 struct sve_context __user *sve_ctx = 1269 apply_user_offset(user, user->sve_offset); 1270 err |= preserve_sve_context(sve_ctx); 1271 } 1272 1273 /* TPIDR2 if supported */ 1274 if (system_supports_tpidr2() && err == 0) { 1275 struct tpidr2_context __user *tpidr2_ctx = 1276 apply_user_offset(user, user->tpidr2_offset); 1277 err |= preserve_tpidr2_context(tpidr2_ctx); 1278 } 1279 1280 /* FPMR if supported */ 1281 if (system_supports_fpmr() && err == 0) { 1282 struct fpmr_context __user *fpmr_ctx = 1283 apply_user_offset(user, user->fpmr_offset); 1284 err |= preserve_fpmr_context(fpmr_ctx); 1285 } 1286 1287 if (system_supports_poe() && err == 0) { 1288 struct poe_context __user *poe_ctx = 1289 apply_user_offset(user, user->poe_offset); 1290 1291 err |= preserve_poe_context(poe_ctx, ua_state); 1292 } 1293 1294 /* ZA state if present */ 1295 if (system_supports_sme() && err == 0 && user->za_offset) { 1296 struct za_context __user *za_ctx = 1297 apply_user_offset(user, user->za_offset); 1298 err |= preserve_za_context(za_ctx); 1299 } 1300 1301 /* ZT state if present */ 1302 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1303 struct zt_context __user *zt_ctx = 1304 apply_user_offset(user, user->zt_offset); 1305 err |= preserve_zt_context(zt_ctx); 1306 } 1307 1308 if (err == 0 && user->extra_offset) { 1309 char __user *sfp = (char __user *)user->sigframe; 1310 char __user *userp = 1311 apply_user_offset(user, user->extra_offset); 1312 1313 struct extra_context __user *extra; 1314 struct _aarch64_ctx __user *end; 1315 u64 extra_datap; 1316 u32 extra_size; 1317 1318 extra = (struct extra_context __user *)userp; 1319 userp += EXTRA_CONTEXT_SIZE; 1320 1321 end = (struct _aarch64_ctx __user *)userp; 1322 userp += TERMINATOR_SIZE; 1323 1324 /* 1325 * extra_datap is just written to the signal frame. 1326 * The value gets cast back to a void __user * 1327 * during sigreturn. 1328 */ 1329 extra_datap = (__force u64)userp; 1330 extra_size = sfp + round_up(user->size, 16) - userp; 1331 1332 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1333 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1334 __put_user_error(extra_datap, &extra->datap, err); 1335 __put_user_error(extra_size, &extra->size, err); 1336 1337 /* Add the terminator */ 1338 __put_user_error(0, &end->magic, err); 1339 __put_user_error(0, &end->size, err); 1340 } 1341 1342 /* set the "end" magic */ 1343 if (err == 0) { 1344 struct _aarch64_ctx __user *end = 1345 apply_user_offset(user, user->end_offset); 1346 1347 __put_user_error(0, &end->magic, err); 1348 __put_user_error(0, &end->size, err); 1349 } 1350 1351 return err; 1352 } 1353 1354 static int get_sigframe(struct rt_sigframe_user_layout *user, 1355 struct ksignal *ksig, struct pt_regs *regs) 1356 { 1357 unsigned long sp, sp_top; 1358 int err; 1359 1360 init_user_layout(user); 1361 err = setup_sigframe_layout(user, false); 1362 if (err) 1363 return err; 1364 1365 sp = sp_top = sigsp(regs->sp, ksig); 1366 1367 sp = round_down(sp - sizeof(struct frame_record), 16); 1368 user->next_frame = (struct frame_record __user *)sp; 1369 1370 sp = round_down(sp, 16) - sigframe_size(user); 1371 user->sigframe = (struct rt_sigframe __user *)sp; 1372 1373 /* 1374 * Check that we can actually write to the signal frame. 1375 */ 1376 if (!access_ok(user->sigframe, sp_top - sp)) 1377 return -EFAULT; 1378 1379 return 0; 1380 } 1381 1382 #ifdef CONFIG_ARM64_GCS 1383 1384 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1385 { 1386 u64 gcspr_el0; 1387 int ret = 0; 1388 1389 if (!system_supports_gcs()) 1390 return 0; 1391 1392 if (!task_gcs_el0_enabled(current)) 1393 return 0; 1394 1395 /* 1396 * We are entering a signal handler, current register state is 1397 * active. 1398 */ 1399 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1400 1401 /* 1402 * Push a cap and the GCS entry for the trampoline onto the GCS. 1403 */ 1404 put_user_gcs((unsigned long)sigtramp, 1405 (unsigned long __user *)(gcspr_el0 - 16), &ret); 1406 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8), 1407 (unsigned long __user *)(gcspr_el0 - 8), &ret); 1408 if (ret != 0) 1409 return ret; 1410 1411 gcspr_el0 -= 16; 1412 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0); 1413 1414 return 0; 1415 } 1416 #else 1417 1418 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1419 { 1420 return 0; 1421 } 1422 1423 #endif 1424 1425 static int setup_return(struct pt_regs *regs, struct ksignal *ksig, 1426 struct rt_sigframe_user_layout *user, int usig) 1427 { 1428 __sigrestore_t sigtramp; 1429 int err; 1430 1431 if (ksig->ka.sa.sa_flags & SA_RESTORER) 1432 sigtramp = ksig->ka.sa.sa_restorer; 1433 else 1434 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1435 1436 err = gcs_signal_entry(sigtramp, ksig); 1437 if (err) 1438 return err; 1439 1440 /* 1441 * We must not fail from this point onwards. We are going to update 1442 * registers, including SP, in order to invoke the signal handler. If 1443 * we failed and attempted to deliver a nested SIGSEGV to a handler 1444 * after that point, the subsequent sigreturn would end up restoring 1445 * the (partial) state for the original signal handler. 1446 */ 1447 1448 regs->regs[0] = usig; 1449 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1450 regs->regs[1] = (unsigned long)&user->sigframe->info; 1451 regs->regs[2] = (unsigned long)&user->sigframe->uc; 1452 } 1453 regs->sp = (unsigned long)user->sigframe; 1454 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1455 regs->regs[30] = (unsigned long)sigtramp; 1456 regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 1457 1458 /* 1459 * Signal delivery is a (wacky) indirect function call in 1460 * userspace, so simulate the same setting of BTYPE as a BLR 1461 * <register containing the signal handler entry point>. 1462 * Signal delivery to a location in a PROT_BTI guarded page 1463 * that is not a function entry point will now trigger a 1464 * SIGILL in userspace. 1465 * 1466 * If the signal handler entry point is not in a PROT_BTI 1467 * guarded page, this is harmless. 1468 */ 1469 if (system_supports_bti()) { 1470 regs->pstate &= ~PSR_BTYPE_MASK; 1471 regs->pstate |= PSR_BTYPE_C; 1472 } 1473 1474 /* TCO (Tag Check Override) always cleared for signal handlers */ 1475 regs->pstate &= ~PSR_TCO_BIT; 1476 1477 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1478 if (system_supports_sme()) { 1479 task_smstop_sm(current); 1480 current->thread.svcr &= ~SVCR_ZA_MASK; 1481 write_sysreg_s(0, SYS_TPIDR2_EL0); 1482 } 1483 1484 return 0; 1485 } 1486 1487 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1488 struct pt_regs *regs) 1489 { 1490 struct rt_sigframe_user_layout user; 1491 struct rt_sigframe __user *frame; 1492 struct user_access_state ua_state; 1493 int err = 0; 1494 1495 fpsimd_save_and_flush_current_state(); 1496 1497 if (get_sigframe(&user, ksig, regs)) 1498 return 1; 1499 1500 save_reset_user_access_state(&ua_state); 1501 frame = user.sigframe; 1502 1503 __put_user_error(0, &frame->uc.uc_flags, err); 1504 __put_user_error(NULL, &frame->uc.uc_link, err); 1505 1506 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1507 err |= setup_sigframe(&user, regs, set, &ua_state); 1508 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1509 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1510 1511 if (err == 0) 1512 err = setup_return(regs, ksig, &user, usig); 1513 1514 /* 1515 * We must not fail if setup_return() succeeded - see comment at the 1516 * beginning of setup_return(). 1517 */ 1518 1519 if (err == 0) 1520 set_handler_user_access_state(); 1521 else 1522 restore_user_access_state(&ua_state); 1523 1524 return err; 1525 } 1526 1527 static void setup_restart_syscall(struct pt_regs *regs) 1528 { 1529 if (is_compat_task()) 1530 compat_setup_restart_syscall(regs); 1531 else 1532 regs->regs[8] = __NR_restart_syscall; 1533 } 1534 1535 /* 1536 * OK, we're invoking a handler 1537 */ 1538 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1539 { 1540 sigset_t *oldset = sigmask_to_save(); 1541 int usig = ksig->sig; 1542 int ret; 1543 1544 rseq_signal_deliver(ksig, regs); 1545 1546 /* 1547 * Set up the stack frame 1548 */ 1549 if (is_compat_task()) { 1550 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1551 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1552 else 1553 ret = compat_setup_frame(usig, ksig, oldset, regs); 1554 } else { 1555 ret = setup_rt_frame(usig, ksig, oldset, regs); 1556 } 1557 1558 /* 1559 * Check that the resulting registers are actually sane. 1560 */ 1561 ret |= !valid_user_regs(®s->user_regs, current); 1562 1563 /* Step into the signal handler if we are stepping */ 1564 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1565 } 1566 1567 /* 1568 * Note that 'init' is a special process: it doesn't get signals it doesn't 1569 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1570 * mistake. 1571 * 1572 * Note that we go through the signals twice: once to check the signals that 1573 * the kernel can handle, and then we build all the user-level signal handling 1574 * stack-frames in one go after that. 1575 */ 1576 void do_signal(struct pt_regs *regs) 1577 { 1578 unsigned long continue_addr = 0, restart_addr = 0; 1579 int retval = 0; 1580 struct ksignal ksig; 1581 bool syscall = in_syscall(regs); 1582 1583 /* 1584 * If we were from a system call, check for system call restarting... 1585 */ 1586 if (syscall) { 1587 continue_addr = regs->pc; 1588 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1589 retval = regs->regs[0]; 1590 1591 /* 1592 * Avoid additional syscall restarting via ret_to_user. 1593 */ 1594 forget_syscall(regs); 1595 1596 /* 1597 * Prepare for system call restart. We do this here so that a 1598 * debugger will see the already changed PC. 1599 */ 1600 switch (retval) { 1601 case -ERESTARTNOHAND: 1602 case -ERESTARTSYS: 1603 case -ERESTARTNOINTR: 1604 case -ERESTART_RESTARTBLOCK: 1605 regs->regs[0] = regs->orig_x0; 1606 regs->pc = restart_addr; 1607 break; 1608 } 1609 } 1610 1611 /* 1612 * Get the signal to deliver. When running under ptrace, at this point 1613 * the debugger may change all of our registers. 1614 */ 1615 if (get_signal(&ksig)) { 1616 /* 1617 * Depending on the signal settings, we may need to revert the 1618 * decision to restart the system call, but skip this if a 1619 * debugger has chosen to restart at a different PC. 1620 */ 1621 if (regs->pc == restart_addr && 1622 (retval == -ERESTARTNOHAND || 1623 retval == -ERESTART_RESTARTBLOCK || 1624 (retval == -ERESTARTSYS && 1625 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1626 syscall_set_return_value(current, regs, -EINTR, 0); 1627 regs->pc = continue_addr; 1628 } 1629 1630 handle_signal(&ksig, regs); 1631 return; 1632 } 1633 1634 /* 1635 * Handle restarting a different system call. As above, if a debugger 1636 * has chosen to restart at a different PC, ignore the restart. 1637 */ 1638 if (syscall && regs->pc == restart_addr) { 1639 if (retval == -ERESTART_RESTARTBLOCK) 1640 setup_restart_syscall(regs); 1641 user_rewind_single_step(current); 1642 } 1643 1644 restore_saved_sigmask(); 1645 } 1646 1647 unsigned long __ro_after_init signal_minsigstksz; 1648 1649 /* 1650 * Determine the stack space required for guaranteed signal devliery. 1651 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1652 * cpufeatures setup is assumed to be complete. 1653 */ 1654 void __init minsigstksz_setup(void) 1655 { 1656 struct rt_sigframe_user_layout user; 1657 1658 init_user_layout(&user); 1659 1660 /* 1661 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1662 * be big enough, but it's our best guess: 1663 */ 1664 if (WARN_ON(setup_sigframe_layout(&user, true))) 1665 return; 1666 1667 signal_minsigstksz = sigframe_size(&user) + 1668 round_up(sizeof(struct frame_record), 16) + 1669 16; /* max alignment padding */ 1670 } 1671 1672 /* 1673 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1674 * changes likely come with new fields that should be added below. 1675 */ 1676 static_assert(NSIGILL == 11); 1677 static_assert(NSIGFPE == 15); 1678 static_assert(NSIGSEGV == 10); 1679 static_assert(NSIGBUS == 5); 1680 static_assert(NSIGTRAP == 6); 1681 static_assert(NSIGCHLD == 6); 1682 static_assert(NSIGSYS == 2); 1683 static_assert(sizeof(siginfo_t) == 128); 1684 static_assert(__alignof__(siginfo_t) == 8); 1685 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1686 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1687 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1688 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1689 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1690 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1691 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1692 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1693 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1694 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1695 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1696 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1697 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1698 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1699 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1700 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1701 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1702 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1703 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1704 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1705 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1706 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1707 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1708 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1709 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1710 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1711