1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #if !((defined(TARGET_ARM) && !defined(TARGET_AARCH64)) || \ 516 defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) 517 518 /* Force a SIGSEGV if we couldn't write to memory trying to set 519 * up the signal frame. oldsig is the signal we were trying to handle 520 * at the point of failure. 521 */ 522 static void force_sigsegv(int oldsig) 523 { 524 CPUState *cpu = thread_cpu; 525 CPUArchState *env = cpu->env_ptr; 526 target_siginfo_t info; 527 528 if (oldsig == SIGSEGV) { 529 /* Make sure we don't try to deliver the signal again; this will 530 * end up with handle_pending_signal() calling force_sig(). 531 */ 532 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 533 } 534 info.si_signo = TARGET_SIGSEGV; 535 info.si_errno = 0; 536 info.si_code = TARGET_SI_KERNEL; 537 info._sifields._kill._pid = 0; 538 info._sifields._kill._uid = 0; 539 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 540 } 541 #endif 542 543 /* abort execution with signal */ 544 static void QEMU_NORETURN force_sig(int target_sig) 545 { 546 CPUState *cpu = thread_cpu; 547 CPUArchState *env = cpu->env_ptr; 548 TaskState *ts = (TaskState *)cpu->opaque; 549 int host_sig, core_dumped = 0; 550 struct sigaction act; 551 552 host_sig = target_to_host_signal(target_sig); 553 trace_user_force_sig(env, target_sig, host_sig); 554 gdb_signalled(env, target_sig); 555 556 /* dump core if supported by target binary format */ 557 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 558 stop_all_tasks(); 559 core_dumped = 560 ((*ts->bprm->core_dump)(target_sig, env) == 0); 561 } 562 if (core_dumped) { 563 /* we already dumped the core of target process, we don't want 564 * a coredump of qemu itself */ 565 struct rlimit nodump; 566 getrlimit(RLIMIT_CORE, &nodump); 567 nodump.rlim_cur=0; 568 setrlimit(RLIMIT_CORE, &nodump); 569 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 570 target_sig, strsignal(host_sig), "core dumped" ); 571 } 572 573 /* The proper exit code for dying from an uncaught signal is 574 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 575 * a negative value. To get the proper exit code we need to 576 * actually die from an uncaught signal. Here the default signal 577 * handler is installed, we send ourself a signal and we wait for 578 * it to arrive. */ 579 sigfillset(&act.sa_mask); 580 act.sa_handler = SIG_DFL; 581 act.sa_flags = 0; 582 sigaction(host_sig, &act, NULL); 583 584 /* For some reason raise(host_sig) doesn't send the signal when 585 * statically linked on x86-64. */ 586 kill(getpid(), host_sig); 587 588 /* Make sure the signal isn't masked (just reuse the mask inside 589 of act) */ 590 sigdelset(&act.sa_mask, host_sig); 591 sigsuspend(&act.sa_mask); 592 593 /* unreachable */ 594 abort(); 595 } 596 597 /* queue a signal so that it will be send to the virtual CPU as soon 598 as possible */ 599 int queue_signal(CPUArchState *env, int sig, int si_type, 600 target_siginfo_t *info) 601 { 602 CPUState *cpu = ENV_GET_CPU(env); 603 TaskState *ts = cpu->opaque; 604 605 trace_user_queue_signal(env, sig); 606 607 info->si_code = deposit32(info->si_code, 16, 16, si_type); 608 609 ts->sync_signal.info = *info; 610 ts->sync_signal.pending = sig; 611 /* signal that a new signal is pending */ 612 atomic_set(&ts->signal_pending, 1); 613 return 1; /* indicates that the signal was queued */ 614 } 615 616 #ifndef HAVE_SAFE_SYSCALL 617 static inline void rewind_if_in_safe_syscall(void *puc) 618 { 619 /* Default version: never rewind */ 620 } 621 #endif 622 623 static void host_signal_handler(int host_signum, siginfo_t *info, 624 void *puc) 625 { 626 CPUArchState *env = thread_cpu->env_ptr; 627 CPUState *cpu = ENV_GET_CPU(env); 628 TaskState *ts = cpu->opaque; 629 630 int sig; 631 target_siginfo_t tinfo; 632 ucontext_t *uc = puc; 633 struct emulated_sigtable *k; 634 635 /* the CPU emulator uses some host signals to detect exceptions, 636 we forward to it some signals */ 637 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 638 && info->si_code > 0) { 639 if (cpu_signal_handler(host_signum, info, puc)) 640 return; 641 } 642 643 /* get target signal number */ 644 sig = host_to_target_signal(host_signum); 645 if (sig < 1 || sig > TARGET_NSIG) 646 return; 647 trace_user_host_signal(env, host_signum, sig); 648 649 rewind_if_in_safe_syscall(puc); 650 651 host_to_target_siginfo_noswap(&tinfo, info); 652 k = &ts->sigtab[sig - 1]; 653 k->info = tinfo; 654 k->pending = sig; 655 ts->signal_pending = 1; 656 657 /* Block host signals until target signal handler entered. We 658 * can't block SIGSEGV or SIGBUS while we're executing guest 659 * code in case the guest code provokes one in the window between 660 * now and it getting out to the main loop. Signals will be 661 * unblocked again in process_pending_signals(). 662 * 663 * WARNING: we cannot use sigfillset() here because the uc_sigmask 664 * field is a kernel sigset_t, which is much smaller than the 665 * libc sigset_t which sigfillset() operates on. Using sigfillset() 666 * would write 0xff bytes off the end of the structure and trash 667 * data on the struct. 668 * We can't use sizeof(uc->uc_sigmask) either, because the libc 669 * headers define the struct field with the wrong (too large) type. 670 */ 671 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 672 sigdelset(&uc->uc_sigmask, SIGSEGV); 673 sigdelset(&uc->uc_sigmask, SIGBUS); 674 675 /* interrupt the virtual CPU as soon as possible */ 676 cpu_exit(thread_cpu); 677 } 678 679 /* do_sigaltstack() returns target values and errnos. */ 680 /* compare linux/kernel/signal.c:do_sigaltstack() */ 681 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 682 { 683 int ret; 684 struct target_sigaltstack oss; 685 686 /* XXX: test errors */ 687 if(uoss_addr) 688 { 689 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 690 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 691 __put_user(sas_ss_flags(sp), &oss.ss_flags); 692 } 693 694 if(uss_addr) 695 { 696 struct target_sigaltstack *uss; 697 struct target_sigaltstack ss; 698 size_t minstacksize = TARGET_MINSIGSTKSZ; 699 700 #if defined(TARGET_PPC64) 701 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 702 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 703 if (get_ppc64_abi(image) > 1) { 704 minstacksize = 4096; 705 } 706 #endif 707 708 ret = -TARGET_EFAULT; 709 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 710 goto out; 711 } 712 __get_user(ss.ss_sp, &uss->ss_sp); 713 __get_user(ss.ss_size, &uss->ss_size); 714 __get_user(ss.ss_flags, &uss->ss_flags); 715 unlock_user_struct(uss, uss_addr, 0); 716 717 ret = -TARGET_EPERM; 718 if (on_sig_stack(sp)) 719 goto out; 720 721 ret = -TARGET_EINVAL; 722 if (ss.ss_flags != TARGET_SS_DISABLE 723 && ss.ss_flags != TARGET_SS_ONSTACK 724 && ss.ss_flags != 0) 725 goto out; 726 727 if (ss.ss_flags == TARGET_SS_DISABLE) { 728 ss.ss_size = 0; 729 ss.ss_sp = 0; 730 } else { 731 ret = -TARGET_ENOMEM; 732 if (ss.ss_size < minstacksize) { 733 goto out; 734 } 735 } 736 737 target_sigaltstack_used.ss_sp = ss.ss_sp; 738 target_sigaltstack_used.ss_size = ss.ss_size; 739 } 740 741 if (uoss_addr) { 742 ret = -TARGET_EFAULT; 743 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 744 goto out; 745 } 746 747 ret = 0; 748 out: 749 return ret; 750 } 751 752 /* do_sigaction() return target values and host errnos */ 753 int do_sigaction(int sig, const struct target_sigaction *act, 754 struct target_sigaction *oact) 755 { 756 struct target_sigaction *k; 757 struct sigaction act1; 758 int host_sig; 759 int ret = 0; 760 761 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 762 return -TARGET_EINVAL; 763 } 764 765 if (block_signals()) { 766 return -TARGET_ERESTARTSYS; 767 } 768 769 k = &sigact_table[sig - 1]; 770 if (oact) { 771 __put_user(k->_sa_handler, &oact->_sa_handler); 772 __put_user(k->sa_flags, &oact->sa_flags); 773 #if !defined(TARGET_MIPS) 774 __put_user(k->sa_restorer, &oact->sa_restorer); 775 #endif 776 /* Not swapped. */ 777 oact->sa_mask = k->sa_mask; 778 } 779 if (act) { 780 /* FIXME: This is not threadsafe. */ 781 __get_user(k->_sa_handler, &act->_sa_handler); 782 __get_user(k->sa_flags, &act->sa_flags); 783 #if !defined(TARGET_MIPS) 784 __get_user(k->sa_restorer, &act->sa_restorer); 785 #endif 786 /* To be swapped in target_to_host_sigset. */ 787 k->sa_mask = act->sa_mask; 788 789 /* we update the host linux signal state */ 790 host_sig = target_to_host_signal(sig); 791 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 792 sigfillset(&act1.sa_mask); 793 act1.sa_flags = SA_SIGINFO; 794 if (k->sa_flags & TARGET_SA_RESTART) 795 act1.sa_flags |= SA_RESTART; 796 /* NOTE: it is important to update the host kernel signal 797 ignore state to avoid getting unexpected interrupted 798 syscalls */ 799 if (k->_sa_handler == TARGET_SIG_IGN) { 800 act1.sa_sigaction = (void *)SIG_IGN; 801 } else if (k->_sa_handler == TARGET_SIG_DFL) { 802 if (fatal_signal (sig)) 803 act1.sa_sigaction = host_signal_handler; 804 else 805 act1.sa_sigaction = (void *)SIG_DFL; 806 } else { 807 act1.sa_sigaction = host_signal_handler; 808 } 809 ret = sigaction(host_sig, &act1, NULL); 810 } 811 } 812 return ret; 813 } 814 815 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 816 817 /* from the Linux kernel */ 818 819 struct target_fpreg { 820 uint16_t significand[4]; 821 uint16_t exponent; 822 }; 823 824 struct target_fpxreg { 825 uint16_t significand[4]; 826 uint16_t exponent; 827 uint16_t padding[3]; 828 }; 829 830 struct target_xmmreg { 831 abi_ulong element[4]; 832 }; 833 834 struct target_fpstate { 835 /* Regular FPU environment */ 836 abi_ulong cw; 837 abi_ulong sw; 838 abi_ulong tag; 839 abi_ulong ipoff; 840 abi_ulong cssel; 841 abi_ulong dataoff; 842 abi_ulong datasel; 843 struct target_fpreg _st[8]; 844 uint16_t status; 845 uint16_t magic; /* 0xffff = regular FPU data only */ 846 847 /* FXSR FPU environment */ 848 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 849 abi_ulong mxcsr; 850 abi_ulong reserved; 851 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 852 struct target_xmmreg _xmm[8]; 853 abi_ulong padding[56]; 854 }; 855 856 #define X86_FXSR_MAGIC 0x0000 857 858 struct target_sigcontext { 859 uint16_t gs, __gsh; 860 uint16_t fs, __fsh; 861 uint16_t es, __esh; 862 uint16_t ds, __dsh; 863 abi_ulong edi; 864 abi_ulong esi; 865 abi_ulong ebp; 866 abi_ulong esp; 867 abi_ulong ebx; 868 abi_ulong edx; 869 abi_ulong ecx; 870 abi_ulong eax; 871 abi_ulong trapno; 872 abi_ulong err; 873 abi_ulong eip; 874 uint16_t cs, __csh; 875 abi_ulong eflags; 876 abi_ulong esp_at_signal; 877 uint16_t ss, __ssh; 878 abi_ulong fpstate; /* pointer */ 879 abi_ulong oldmask; 880 abi_ulong cr2; 881 }; 882 883 struct target_ucontext { 884 abi_ulong tuc_flags; 885 abi_ulong tuc_link; 886 target_stack_t tuc_stack; 887 struct target_sigcontext tuc_mcontext; 888 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 889 }; 890 891 struct sigframe 892 { 893 abi_ulong pretcode; 894 int sig; 895 struct target_sigcontext sc; 896 struct target_fpstate fpstate; 897 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 898 char retcode[8]; 899 }; 900 901 struct rt_sigframe 902 { 903 abi_ulong pretcode; 904 int sig; 905 abi_ulong pinfo; 906 abi_ulong puc; 907 struct target_siginfo info; 908 struct target_ucontext uc; 909 struct target_fpstate fpstate; 910 char retcode[8]; 911 }; 912 913 /* 914 * Set up a signal frame. 915 */ 916 917 /* XXX: save x87 state */ 918 static void setup_sigcontext(struct target_sigcontext *sc, 919 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 920 abi_ulong fpstate_addr) 921 { 922 CPUState *cs = CPU(x86_env_get_cpu(env)); 923 uint16_t magic; 924 925 /* already locked in setup_frame() */ 926 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 927 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 928 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 929 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 930 __put_user(env->regs[R_EDI], &sc->edi); 931 __put_user(env->regs[R_ESI], &sc->esi); 932 __put_user(env->regs[R_EBP], &sc->ebp); 933 __put_user(env->regs[R_ESP], &sc->esp); 934 __put_user(env->regs[R_EBX], &sc->ebx); 935 __put_user(env->regs[R_EDX], &sc->edx); 936 __put_user(env->regs[R_ECX], &sc->ecx); 937 __put_user(env->regs[R_EAX], &sc->eax); 938 __put_user(cs->exception_index, &sc->trapno); 939 __put_user(env->error_code, &sc->err); 940 __put_user(env->eip, &sc->eip); 941 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 942 __put_user(env->eflags, &sc->eflags); 943 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 944 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 945 946 cpu_x86_fsave(env, fpstate_addr, 1); 947 fpstate->status = fpstate->sw; 948 magic = 0xffff; 949 __put_user(magic, &fpstate->magic); 950 __put_user(fpstate_addr, &sc->fpstate); 951 952 /* non-iBCS2 extensions.. */ 953 __put_user(mask, &sc->oldmask); 954 __put_user(env->cr[2], &sc->cr2); 955 } 956 957 /* 958 * Determine which stack to use.. 959 */ 960 961 static inline abi_ulong 962 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 963 { 964 unsigned long esp; 965 966 /* Default to using normal stack */ 967 esp = env->regs[R_ESP]; 968 /* This is the X/Open sanctioned signal stack switching. */ 969 if (ka->sa_flags & TARGET_SA_ONSTACK) { 970 if (sas_ss_flags(esp) == 0) { 971 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 972 } 973 } else { 974 975 /* This is the legacy signal stack switching. */ 976 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 977 !(ka->sa_flags & TARGET_SA_RESTORER) && 978 ka->sa_restorer) { 979 esp = (unsigned long) ka->sa_restorer; 980 } 981 } 982 return (esp - frame_size) & -8ul; 983 } 984 985 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 986 static void setup_frame(int sig, struct target_sigaction *ka, 987 target_sigset_t *set, CPUX86State *env) 988 { 989 abi_ulong frame_addr; 990 struct sigframe *frame; 991 int i; 992 993 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 994 trace_user_setup_frame(env, frame_addr); 995 996 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 997 goto give_sigsegv; 998 999 __put_user(sig, &frame->sig); 1000 1001 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1002 frame_addr + offsetof(struct sigframe, fpstate)); 1003 1004 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1005 __put_user(set->sig[i], &frame->extramask[i - 1]); 1006 } 1007 1008 /* Set up to return from userspace. If provided, use a stub 1009 already in userspace. */ 1010 if (ka->sa_flags & TARGET_SA_RESTORER) { 1011 __put_user(ka->sa_restorer, &frame->pretcode); 1012 } else { 1013 uint16_t val16; 1014 abi_ulong retcode_addr; 1015 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1016 __put_user(retcode_addr, &frame->pretcode); 1017 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1018 val16 = 0xb858; 1019 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1020 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1021 val16 = 0x80cd; 1022 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1023 } 1024 1025 1026 /* Set up registers for signal handler */ 1027 env->regs[R_ESP] = frame_addr; 1028 env->eip = ka->_sa_handler; 1029 1030 cpu_x86_load_seg(env, R_DS, __USER_DS); 1031 cpu_x86_load_seg(env, R_ES, __USER_DS); 1032 cpu_x86_load_seg(env, R_SS, __USER_DS); 1033 cpu_x86_load_seg(env, R_CS, __USER_CS); 1034 env->eflags &= ~TF_MASK; 1035 1036 unlock_user_struct(frame, frame_addr, 1); 1037 1038 return; 1039 1040 give_sigsegv: 1041 force_sigsegv(sig); 1042 } 1043 1044 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1045 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1046 target_siginfo_t *info, 1047 target_sigset_t *set, CPUX86State *env) 1048 { 1049 abi_ulong frame_addr, addr; 1050 struct rt_sigframe *frame; 1051 int i; 1052 1053 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1054 trace_user_setup_rt_frame(env, frame_addr); 1055 1056 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1057 goto give_sigsegv; 1058 1059 __put_user(sig, &frame->sig); 1060 addr = frame_addr + offsetof(struct rt_sigframe, info); 1061 __put_user(addr, &frame->pinfo); 1062 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1063 __put_user(addr, &frame->puc); 1064 tswap_siginfo(&frame->info, info); 1065 1066 /* Create the ucontext. */ 1067 __put_user(0, &frame->uc.tuc_flags); 1068 __put_user(0, &frame->uc.tuc_link); 1069 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1070 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1071 &frame->uc.tuc_stack.ss_flags); 1072 __put_user(target_sigaltstack_used.ss_size, 1073 &frame->uc.tuc_stack.ss_size); 1074 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1075 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1076 1077 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1078 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1079 } 1080 1081 /* Set up to return from userspace. If provided, use a stub 1082 already in userspace. */ 1083 if (ka->sa_flags & TARGET_SA_RESTORER) { 1084 __put_user(ka->sa_restorer, &frame->pretcode); 1085 } else { 1086 uint16_t val16; 1087 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1088 __put_user(addr, &frame->pretcode); 1089 /* This is movl $,%eax ; int $0x80 */ 1090 __put_user(0xb8, (char *)(frame->retcode+0)); 1091 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1092 val16 = 0x80cd; 1093 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1094 } 1095 1096 /* Set up registers for signal handler */ 1097 env->regs[R_ESP] = frame_addr; 1098 env->eip = ka->_sa_handler; 1099 1100 cpu_x86_load_seg(env, R_DS, __USER_DS); 1101 cpu_x86_load_seg(env, R_ES, __USER_DS); 1102 cpu_x86_load_seg(env, R_SS, __USER_DS); 1103 cpu_x86_load_seg(env, R_CS, __USER_CS); 1104 env->eflags &= ~TF_MASK; 1105 1106 unlock_user_struct(frame, frame_addr, 1); 1107 1108 return; 1109 1110 give_sigsegv: 1111 force_sigsegv(sig); 1112 } 1113 1114 static int 1115 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1116 { 1117 unsigned int err = 0; 1118 abi_ulong fpstate_addr; 1119 unsigned int tmpflags; 1120 1121 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1122 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1123 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1124 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1125 1126 env->regs[R_EDI] = tswapl(sc->edi); 1127 env->regs[R_ESI] = tswapl(sc->esi); 1128 env->regs[R_EBP] = tswapl(sc->ebp); 1129 env->regs[R_ESP] = tswapl(sc->esp); 1130 env->regs[R_EBX] = tswapl(sc->ebx); 1131 env->regs[R_EDX] = tswapl(sc->edx); 1132 env->regs[R_ECX] = tswapl(sc->ecx); 1133 env->regs[R_EAX] = tswapl(sc->eax); 1134 env->eip = tswapl(sc->eip); 1135 1136 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1137 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1138 1139 tmpflags = tswapl(sc->eflags); 1140 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1141 // regs->orig_eax = -1; /* disable syscall checks */ 1142 1143 fpstate_addr = tswapl(sc->fpstate); 1144 if (fpstate_addr != 0) { 1145 if (!access_ok(VERIFY_READ, fpstate_addr, 1146 sizeof(struct target_fpstate))) 1147 goto badframe; 1148 cpu_x86_frstor(env, fpstate_addr, 1); 1149 } 1150 1151 return err; 1152 badframe: 1153 return 1; 1154 } 1155 1156 long do_sigreturn(CPUX86State *env) 1157 { 1158 struct sigframe *frame; 1159 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1160 target_sigset_t target_set; 1161 sigset_t set; 1162 int i; 1163 1164 trace_user_do_sigreturn(env, frame_addr); 1165 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1166 goto badframe; 1167 /* set blocked signals */ 1168 __get_user(target_set.sig[0], &frame->sc.oldmask); 1169 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1170 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1171 } 1172 1173 target_to_host_sigset_internal(&set, &target_set); 1174 set_sigmask(&set); 1175 1176 /* restore registers */ 1177 if (restore_sigcontext(env, &frame->sc)) 1178 goto badframe; 1179 unlock_user_struct(frame, frame_addr, 0); 1180 return -TARGET_QEMU_ESIGRETURN; 1181 1182 badframe: 1183 unlock_user_struct(frame, frame_addr, 0); 1184 force_sig(TARGET_SIGSEGV); 1185 return 0; 1186 } 1187 1188 long do_rt_sigreturn(CPUX86State *env) 1189 { 1190 abi_ulong frame_addr; 1191 struct rt_sigframe *frame; 1192 sigset_t set; 1193 1194 frame_addr = env->regs[R_ESP] - 4; 1195 trace_user_do_rt_sigreturn(env, frame_addr); 1196 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1197 goto badframe; 1198 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1199 set_sigmask(&set); 1200 1201 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1202 goto badframe; 1203 } 1204 1205 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1206 get_sp_from_cpustate(env)) == -EFAULT) { 1207 goto badframe; 1208 } 1209 1210 unlock_user_struct(frame, frame_addr, 0); 1211 return -TARGET_QEMU_ESIGRETURN; 1212 1213 badframe: 1214 unlock_user_struct(frame, frame_addr, 0); 1215 force_sig(TARGET_SIGSEGV); 1216 return 0; 1217 } 1218 1219 #elif defined(TARGET_AARCH64) 1220 1221 struct target_sigcontext { 1222 uint64_t fault_address; 1223 /* AArch64 registers */ 1224 uint64_t regs[31]; 1225 uint64_t sp; 1226 uint64_t pc; 1227 uint64_t pstate; 1228 /* 4K reserved for FP/SIMD state and future expansion */ 1229 char __reserved[4096] __attribute__((__aligned__(16))); 1230 }; 1231 1232 struct target_ucontext { 1233 abi_ulong tuc_flags; 1234 abi_ulong tuc_link; 1235 target_stack_t tuc_stack; 1236 target_sigset_t tuc_sigmask; 1237 /* glibc uses a 1024-bit sigset_t */ 1238 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1239 /* last for future expansion */ 1240 struct target_sigcontext tuc_mcontext; 1241 }; 1242 1243 /* 1244 * Header to be used at the beginning of structures extending the user 1245 * context. Such structures must be placed after the rt_sigframe on the stack 1246 * and be 16-byte aligned. The last structure must be a dummy one with the 1247 * magic and size set to 0. 1248 */ 1249 struct target_aarch64_ctx { 1250 uint32_t magic; 1251 uint32_t size; 1252 }; 1253 1254 #define TARGET_FPSIMD_MAGIC 0x46508001 1255 1256 struct target_fpsimd_context { 1257 struct target_aarch64_ctx head; 1258 uint32_t fpsr; 1259 uint32_t fpcr; 1260 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1261 }; 1262 1263 /* 1264 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1265 * user space as it will change with the addition of new context. User space 1266 * should check the magic/size information. 1267 */ 1268 struct target_aux_context { 1269 struct target_fpsimd_context fpsimd; 1270 /* additional context to be added before "end" */ 1271 struct target_aarch64_ctx end; 1272 }; 1273 1274 struct target_rt_sigframe { 1275 struct target_siginfo info; 1276 struct target_ucontext uc; 1277 uint64_t fp; 1278 uint64_t lr; 1279 uint32_t tramp[2]; 1280 }; 1281 1282 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1283 CPUARMState *env, target_sigset_t *set) 1284 { 1285 int i; 1286 struct target_aux_context *aux = 1287 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1288 1289 /* set up the stack frame for unwinding */ 1290 __put_user(env->xregs[29], &sf->fp); 1291 __put_user(env->xregs[30], &sf->lr); 1292 1293 for (i = 0; i < 31; i++) { 1294 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1295 } 1296 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1297 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1298 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1299 1300 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1301 1302 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1303 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1304 } 1305 1306 for (i = 0; i < 32; i++) { 1307 #ifdef TARGET_WORDS_BIGENDIAN 1308 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1309 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1310 #else 1311 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1312 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1313 #endif 1314 } 1315 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1316 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1317 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1318 __put_user(sizeof(struct target_fpsimd_context), 1319 &aux->fpsimd.head.size); 1320 1321 /* set the "end" magic */ 1322 __put_user(0, &aux->end.magic); 1323 __put_user(0, &aux->end.size); 1324 1325 return 0; 1326 } 1327 1328 static int target_restore_sigframe(CPUARMState *env, 1329 struct target_rt_sigframe *sf) 1330 { 1331 sigset_t set; 1332 int i; 1333 struct target_aux_context *aux = 1334 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1335 uint32_t magic, size, fpsr, fpcr; 1336 uint64_t pstate; 1337 1338 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1339 set_sigmask(&set); 1340 1341 for (i = 0; i < 31; i++) { 1342 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1343 } 1344 1345 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1346 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1347 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1348 pstate_write(env, pstate); 1349 1350 __get_user(magic, &aux->fpsimd.head.magic); 1351 __get_user(size, &aux->fpsimd.head.size); 1352 1353 if (magic != TARGET_FPSIMD_MAGIC 1354 || size != sizeof(struct target_fpsimd_context)) { 1355 return 1; 1356 } 1357 1358 for (i = 0; i < 32; i++) { 1359 #ifdef TARGET_WORDS_BIGENDIAN 1360 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1361 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1362 #else 1363 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1364 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1365 #endif 1366 } 1367 __get_user(fpsr, &aux->fpsimd.fpsr); 1368 vfp_set_fpsr(env, fpsr); 1369 __get_user(fpcr, &aux->fpsimd.fpcr); 1370 vfp_set_fpcr(env, fpcr); 1371 1372 return 0; 1373 } 1374 1375 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1376 { 1377 abi_ulong sp; 1378 1379 sp = env->xregs[31]; 1380 1381 /* 1382 * This is the X/Open sanctioned signal stack switching. 1383 */ 1384 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1385 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1386 } 1387 1388 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1389 1390 return sp; 1391 } 1392 1393 static void target_setup_frame(int usig, struct target_sigaction *ka, 1394 target_siginfo_t *info, target_sigset_t *set, 1395 CPUARMState *env) 1396 { 1397 struct target_rt_sigframe *frame; 1398 abi_ulong frame_addr, return_addr; 1399 1400 frame_addr = get_sigframe(ka, env); 1401 trace_user_setup_frame(env, frame_addr); 1402 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1403 goto give_sigsegv; 1404 } 1405 1406 __put_user(0, &frame->uc.tuc_flags); 1407 __put_user(0, &frame->uc.tuc_link); 1408 1409 __put_user(target_sigaltstack_used.ss_sp, 1410 &frame->uc.tuc_stack.ss_sp); 1411 __put_user(sas_ss_flags(env->xregs[31]), 1412 &frame->uc.tuc_stack.ss_flags); 1413 __put_user(target_sigaltstack_used.ss_size, 1414 &frame->uc.tuc_stack.ss_size); 1415 target_setup_sigframe(frame, env, set); 1416 if (ka->sa_flags & TARGET_SA_RESTORER) { 1417 return_addr = ka->sa_restorer; 1418 } else { 1419 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1420 __put_user(0xd2801168, &frame->tramp[0]); 1421 __put_user(0xd4000001, &frame->tramp[1]); 1422 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1423 } 1424 env->xregs[0] = usig; 1425 env->xregs[31] = frame_addr; 1426 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1427 env->pc = ka->_sa_handler; 1428 env->xregs[30] = return_addr; 1429 if (info) { 1430 tswap_siginfo(&frame->info, info); 1431 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1432 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1433 } 1434 1435 unlock_user_struct(frame, frame_addr, 1); 1436 return; 1437 1438 give_sigsegv: 1439 unlock_user_struct(frame, frame_addr, 1); 1440 force_sigsegv(usig); 1441 } 1442 1443 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1444 target_siginfo_t *info, target_sigset_t *set, 1445 CPUARMState *env) 1446 { 1447 target_setup_frame(sig, ka, info, set, env); 1448 } 1449 1450 static void setup_frame(int sig, struct target_sigaction *ka, 1451 target_sigset_t *set, CPUARMState *env) 1452 { 1453 target_setup_frame(sig, ka, 0, set, env); 1454 } 1455 1456 long do_rt_sigreturn(CPUARMState *env) 1457 { 1458 struct target_rt_sigframe *frame = NULL; 1459 abi_ulong frame_addr = env->xregs[31]; 1460 1461 trace_user_do_rt_sigreturn(env, frame_addr); 1462 if (frame_addr & 15) { 1463 goto badframe; 1464 } 1465 1466 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1467 goto badframe; 1468 } 1469 1470 if (target_restore_sigframe(env, frame)) { 1471 goto badframe; 1472 } 1473 1474 if (do_sigaltstack(frame_addr + 1475 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1476 0, get_sp_from_cpustate(env)) == -EFAULT) { 1477 goto badframe; 1478 } 1479 1480 unlock_user_struct(frame, frame_addr, 0); 1481 return -TARGET_QEMU_ESIGRETURN; 1482 1483 badframe: 1484 unlock_user_struct(frame, frame_addr, 0); 1485 force_sig(TARGET_SIGSEGV); 1486 return 0; 1487 } 1488 1489 long do_sigreturn(CPUARMState *env) 1490 { 1491 return do_rt_sigreturn(env); 1492 } 1493 1494 #elif defined(TARGET_ARM) 1495 1496 struct target_sigcontext { 1497 abi_ulong trap_no; 1498 abi_ulong error_code; 1499 abi_ulong oldmask; 1500 abi_ulong arm_r0; 1501 abi_ulong arm_r1; 1502 abi_ulong arm_r2; 1503 abi_ulong arm_r3; 1504 abi_ulong arm_r4; 1505 abi_ulong arm_r5; 1506 abi_ulong arm_r6; 1507 abi_ulong arm_r7; 1508 abi_ulong arm_r8; 1509 abi_ulong arm_r9; 1510 abi_ulong arm_r10; 1511 abi_ulong arm_fp; 1512 abi_ulong arm_ip; 1513 abi_ulong arm_sp; 1514 abi_ulong arm_lr; 1515 abi_ulong arm_pc; 1516 abi_ulong arm_cpsr; 1517 abi_ulong fault_address; 1518 }; 1519 1520 struct target_ucontext_v1 { 1521 abi_ulong tuc_flags; 1522 abi_ulong tuc_link; 1523 target_stack_t tuc_stack; 1524 struct target_sigcontext tuc_mcontext; 1525 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1526 }; 1527 1528 struct target_ucontext_v2 { 1529 abi_ulong tuc_flags; 1530 abi_ulong tuc_link; 1531 target_stack_t tuc_stack; 1532 struct target_sigcontext tuc_mcontext; 1533 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1534 char __unused[128 - sizeof(target_sigset_t)]; 1535 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1536 }; 1537 1538 struct target_user_vfp { 1539 uint64_t fpregs[32]; 1540 abi_ulong fpscr; 1541 }; 1542 1543 struct target_user_vfp_exc { 1544 abi_ulong fpexc; 1545 abi_ulong fpinst; 1546 abi_ulong fpinst2; 1547 }; 1548 1549 struct target_vfp_sigframe { 1550 abi_ulong magic; 1551 abi_ulong size; 1552 struct target_user_vfp ufp; 1553 struct target_user_vfp_exc ufp_exc; 1554 } __attribute__((__aligned__(8))); 1555 1556 struct target_iwmmxt_sigframe { 1557 abi_ulong magic; 1558 abi_ulong size; 1559 uint64_t regs[16]; 1560 /* Note that not all the coprocessor control registers are stored here */ 1561 uint32_t wcssf; 1562 uint32_t wcasf; 1563 uint32_t wcgr0; 1564 uint32_t wcgr1; 1565 uint32_t wcgr2; 1566 uint32_t wcgr3; 1567 } __attribute__((__aligned__(8))); 1568 1569 #define TARGET_VFP_MAGIC 0x56465001 1570 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1571 1572 struct sigframe_v1 1573 { 1574 struct target_sigcontext sc; 1575 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1576 abi_ulong retcode; 1577 }; 1578 1579 struct sigframe_v2 1580 { 1581 struct target_ucontext_v2 uc; 1582 abi_ulong retcode; 1583 }; 1584 1585 struct rt_sigframe_v1 1586 { 1587 abi_ulong pinfo; 1588 abi_ulong puc; 1589 struct target_siginfo info; 1590 struct target_ucontext_v1 uc; 1591 abi_ulong retcode; 1592 }; 1593 1594 struct rt_sigframe_v2 1595 { 1596 struct target_siginfo info; 1597 struct target_ucontext_v2 uc; 1598 abi_ulong retcode; 1599 }; 1600 1601 #define TARGET_CONFIG_CPU_32 1 1602 1603 /* 1604 * For ARM syscalls, we encode the syscall number into the instruction. 1605 */ 1606 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1607 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1608 1609 /* 1610 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1611 * need two 16-bit instructions. 1612 */ 1613 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1614 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1615 1616 static const abi_ulong retcodes[4] = { 1617 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1618 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1619 }; 1620 1621 1622 static inline int valid_user_regs(CPUARMState *regs) 1623 { 1624 return 1; 1625 } 1626 1627 static void 1628 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1629 CPUARMState *env, abi_ulong mask) 1630 { 1631 __put_user(env->regs[0], &sc->arm_r0); 1632 __put_user(env->regs[1], &sc->arm_r1); 1633 __put_user(env->regs[2], &sc->arm_r2); 1634 __put_user(env->regs[3], &sc->arm_r3); 1635 __put_user(env->regs[4], &sc->arm_r4); 1636 __put_user(env->regs[5], &sc->arm_r5); 1637 __put_user(env->regs[6], &sc->arm_r6); 1638 __put_user(env->regs[7], &sc->arm_r7); 1639 __put_user(env->regs[8], &sc->arm_r8); 1640 __put_user(env->regs[9], &sc->arm_r9); 1641 __put_user(env->regs[10], &sc->arm_r10); 1642 __put_user(env->regs[11], &sc->arm_fp); 1643 __put_user(env->regs[12], &sc->arm_ip); 1644 __put_user(env->regs[13], &sc->arm_sp); 1645 __put_user(env->regs[14], &sc->arm_lr); 1646 __put_user(env->regs[15], &sc->arm_pc); 1647 #ifdef TARGET_CONFIG_CPU_32 1648 __put_user(cpsr_read(env), &sc->arm_cpsr); 1649 #endif 1650 1651 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1652 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1653 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1654 __put_user(mask, &sc->oldmask); 1655 } 1656 1657 static inline abi_ulong 1658 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1659 { 1660 unsigned long sp = regs->regs[13]; 1661 1662 /* 1663 * This is the X/Open sanctioned signal stack switching. 1664 */ 1665 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1666 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1667 } 1668 /* 1669 * ATPCS B01 mandates 8-byte alignment 1670 */ 1671 return (sp - framesize) & ~7; 1672 } 1673 1674 static void 1675 setup_return(CPUARMState *env, struct target_sigaction *ka, 1676 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1677 { 1678 abi_ulong handler = ka->_sa_handler; 1679 abi_ulong retcode; 1680 int thumb = handler & 1; 1681 uint32_t cpsr = cpsr_read(env); 1682 1683 cpsr &= ~CPSR_IT; 1684 if (thumb) { 1685 cpsr |= CPSR_T; 1686 } else { 1687 cpsr &= ~CPSR_T; 1688 } 1689 1690 if (ka->sa_flags & TARGET_SA_RESTORER) { 1691 retcode = ka->sa_restorer; 1692 } else { 1693 unsigned int idx = thumb; 1694 1695 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1696 idx += 2; 1697 } 1698 1699 __put_user(retcodes[idx], rc); 1700 1701 retcode = rc_addr + thumb; 1702 } 1703 1704 env->regs[0] = usig; 1705 env->regs[13] = frame_addr; 1706 env->regs[14] = retcode; 1707 env->regs[15] = handler & (thumb ? ~1 : ~3); 1708 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1709 } 1710 1711 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1712 { 1713 int i; 1714 struct target_vfp_sigframe *vfpframe; 1715 vfpframe = (struct target_vfp_sigframe *)regspace; 1716 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1717 __put_user(sizeof(*vfpframe), &vfpframe->size); 1718 for (i = 0; i < 32; i++) { 1719 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1720 } 1721 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1722 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1723 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1724 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1725 return (abi_ulong*)(vfpframe+1); 1726 } 1727 1728 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1729 CPUARMState *env) 1730 { 1731 int i; 1732 struct target_iwmmxt_sigframe *iwmmxtframe; 1733 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1734 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1735 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1736 for (i = 0; i < 16; i++) { 1737 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1738 } 1739 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1740 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1741 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1742 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1743 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1744 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1745 return (abi_ulong*)(iwmmxtframe+1); 1746 } 1747 1748 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1749 target_sigset_t *set, CPUARMState *env) 1750 { 1751 struct target_sigaltstack stack; 1752 int i; 1753 abi_ulong *regspace; 1754 1755 /* Clear all the bits of the ucontext we don't use. */ 1756 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1757 1758 memset(&stack, 0, sizeof(stack)); 1759 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1760 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1761 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1762 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1763 1764 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1765 /* Save coprocessor signal frame. */ 1766 regspace = uc->tuc_regspace; 1767 if (arm_feature(env, ARM_FEATURE_VFP)) { 1768 regspace = setup_sigframe_v2_vfp(regspace, env); 1769 } 1770 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1771 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1772 } 1773 1774 /* Write terminating magic word */ 1775 __put_user(0, regspace); 1776 1777 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1778 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1779 } 1780 } 1781 1782 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1783 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1784 target_sigset_t *set, CPUARMState *regs) 1785 { 1786 struct sigframe_v1 *frame; 1787 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1788 int i; 1789 1790 trace_user_setup_frame(regs, frame_addr); 1791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1792 return; 1793 } 1794 1795 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1796 1797 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1798 __put_user(set->sig[i], &frame->extramask[i - 1]); 1799 } 1800 1801 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1802 frame_addr + offsetof(struct sigframe_v1, retcode)); 1803 1804 unlock_user_struct(frame, frame_addr, 1); 1805 } 1806 1807 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1808 target_sigset_t *set, CPUARMState *regs) 1809 { 1810 struct sigframe_v2 *frame; 1811 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1812 1813 trace_user_setup_frame(regs, frame_addr); 1814 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1815 return; 1816 } 1817 1818 setup_sigframe_v2(&frame->uc, set, regs); 1819 1820 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1821 frame_addr + offsetof(struct sigframe_v2, retcode)); 1822 1823 unlock_user_struct(frame, frame_addr, 1); 1824 } 1825 1826 static void setup_frame(int usig, struct target_sigaction *ka, 1827 target_sigset_t *set, CPUARMState *regs) 1828 { 1829 if (get_osversion() >= 0x020612) { 1830 setup_frame_v2(usig, ka, set, regs); 1831 } else { 1832 setup_frame_v1(usig, ka, set, regs); 1833 } 1834 } 1835 1836 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1837 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1838 target_siginfo_t *info, 1839 target_sigset_t *set, CPUARMState *env) 1840 { 1841 struct rt_sigframe_v1 *frame; 1842 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1843 struct target_sigaltstack stack; 1844 int i; 1845 abi_ulong info_addr, uc_addr; 1846 1847 trace_user_setup_rt_frame(env, frame_addr); 1848 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1849 return /* 1 */; 1850 } 1851 1852 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1853 __put_user(info_addr, &frame->pinfo); 1854 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1855 __put_user(uc_addr, &frame->puc); 1856 tswap_siginfo(&frame->info, info); 1857 1858 /* Clear all the bits of the ucontext we don't use. */ 1859 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1860 1861 memset(&stack, 0, sizeof(stack)); 1862 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1863 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1864 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1865 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1866 1867 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1868 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1869 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1870 } 1871 1872 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1873 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1874 1875 env->regs[1] = info_addr; 1876 env->regs[2] = uc_addr; 1877 1878 unlock_user_struct(frame, frame_addr, 1); 1879 } 1880 1881 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1882 target_siginfo_t *info, 1883 target_sigset_t *set, CPUARMState *env) 1884 { 1885 struct rt_sigframe_v2 *frame; 1886 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1887 abi_ulong info_addr, uc_addr; 1888 1889 trace_user_setup_rt_frame(env, frame_addr); 1890 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1891 return /* 1 */; 1892 } 1893 1894 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1895 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1896 tswap_siginfo(&frame->info, info); 1897 1898 setup_sigframe_v2(&frame->uc, set, env); 1899 1900 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1901 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1902 1903 env->regs[1] = info_addr; 1904 env->regs[2] = uc_addr; 1905 1906 unlock_user_struct(frame, frame_addr, 1); 1907 } 1908 1909 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1910 target_siginfo_t *info, 1911 target_sigset_t *set, CPUARMState *env) 1912 { 1913 if (get_osversion() >= 0x020612) { 1914 setup_rt_frame_v2(usig, ka, info, set, env); 1915 } else { 1916 setup_rt_frame_v1(usig, ka, info, set, env); 1917 } 1918 } 1919 1920 static int 1921 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1922 { 1923 int err = 0; 1924 uint32_t cpsr; 1925 1926 __get_user(env->regs[0], &sc->arm_r0); 1927 __get_user(env->regs[1], &sc->arm_r1); 1928 __get_user(env->regs[2], &sc->arm_r2); 1929 __get_user(env->regs[3], &sc->arm_r3); 1930 __get_user(env->regs[4], &sc->arm_r4); 1931 __get_user(env->regs[5], &sc->arm_r5); 1932 __get_user(env->regs[6], &sc->arm_r6); 1933 __get_user(env->regs[7], &sc->arm_r7); 1934 __get_user(env->regs[8], &sc->arm_r8); 1935 __get_user(env->regs[9], &sc->arm_r9); 1936 __get_user(env->regs[10], &sc->arm_r10); 1937 __get_user(env->regs[11], &sc->arm_fp); 1938 __get_user(env->regs[12], &sc->arm_ip); 1939 __get_user(env->regs[13], &sc->arm_sp); 1940 __get_user(env->regs[14], &sc->arm_lr); 1941 __get_user(env->regs[15], &sc->arm_pc); 1942 #ifdef TARGET_CONFIG_CPU_32 1943 __get_user(cpsr, &sc->arm_cpsr); 1944 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1945 #endif 1946 1947 err |= !valid_user_regs(env); 1948 1949 return err; 1950 } 1951 1952 static long do_sigreturn_v1(CPUARMState *env) 1953 { 1954 abi_ulong frame_addr; 1955 struct sigframe_v1 *frame = NULL; 1956 target_sigset_t set; 1957 sigset_t host_set; 1958 int i; 1959 1960 /* 1961 * Since we stacked the signal on a 64-bit boundary, 1962 * then 'sp' should be word aligned here. If it's 1963 * not, then the user is trying to mess with us. 1964 */ 1965 frame_addr = env->regs[13]; 1966 trace_user_do_sigreturn(env, frame_addr); 1967 if (frame_addr & 7) { 1968 goto badframe; 1969 } 1970 1971 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1972 goto badframe; 1973 } 1974 1975 __get_user(set.sig[0], &frame->sc.oldmask); 1976 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1977 __get_user(set.sig[i], &frame->extramask[i - 1]); 1978 } 1979 1980 target_to_host_sigset_internal(&host_set, &set); 1981 set_sigmask(&host_set); 1982 1983 if (restore_sigcontext(env, &frame->sc)) { 1984 goto badframe; 1985 } 1986 1987 #if 0 1988 /* Send SIGTRAP if we're single-stepping */ 1989 if (ptrace_cancel_bpt(current)) 1990 send_sig(SIGTRAP, current, 1); 1991 #endif 1992 unlock_user_struct(frame, frame_addr, 0); 1993 return -TARGET_QEMU_ESIGRETURN; 1994 1995 badframe: 1996 force_sig(TARGET_SIGSEGV /* , current */); 1997 return 0; 1998 } 1999 2000 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2001 { 2002 int i; 2003 abi_ulong magic, sz; 2004 uint32_t fpscr, fpexc; 2005 struct target_vfp_sigframe *vfpframe; 2006 vfpframe = (struct target_vfp_sigframe *)regspace; 2007 2008 __get_user(magic, &vfpframe->magic); 2009 __get_user(sz, &vfpframe->size); 2010 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2011 return 0; 2012 } 2013 for (i = 0; i < 32; i++) { 2014 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2015 } 2016 __get_user(fpscr, &vfpframe->ufp.fpscr); 2017 vfp_set_fpscr(env, fpscr); 2018 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2019 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2020 * and the exception flag is cleared 2021 */ 2022 fpexc |= (1 << 30); 2023 fpexc &= ~((1 << 31) | (1 << 28)); 2024 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2025 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2026 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2027 return (abi_ulong*)(vfpframe + 1); 2028 } 2029 2030 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2031 abi_ulong *regspace) 2032 { 2033 int i; 2034 abi_ulong magic, sz; 2035 struct target_iwmmxt_sigframe *iwmmxtframe; 2036 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2037 2038 __get_user(magic, &iwmmxtframe->magic); 2039 __get_user(sz, &iwmmxtframe->size); 2040 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2041 return 0; 2042 } 2043 for (i = 0; i < 16; i++) { 2044 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2045 } 2046 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2047 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2048 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2049 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2050 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2051 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2052 return (abi_ulong*)(iwmmxtframe + 1); 2053 } 2054 2055 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2056 struct target_ucontext_v2 *uc) 2057 { 2058 sigset_t host_set; 2059 abi_ulong *regspace; 2060 2061 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2062 set_sigmask(&host_set); 2063 2064 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2065 return 1; 2066 2067 /* Restore coprocessor signal frame */ 2068 regspace = uc->tuc_regspace; 2069 if (arm_feature(env, ARM_FEATURE_VFP)) { 2070 regspace = restore_sigframe_v2_vfp(env, regspace); 2071 if (!regspace) { 2072 return 1; 2073 } 2074 } 2075 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2076 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2077 if (!regspace) { 2078 return 1; 2079 } 2080 } 2081 2082 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2083 return 1; 2084 2085 #if 0 2086 /* Send SIGTRAP if we're single-stepping */ 2087 if (ptrace_cancel_bpt(current)) 2088 send_sig(SIGTRAP, current, 1); 2089 #endif 2090 2091 return 0; 2092 } 2093 2094 static long do_sigreturn_v2(CPUARMState *env) 2095 { 2096 abi_ulong frame_addr; 2097 struct sigframe_v2 *frame = NULL; 2098 2099 /* 2100 * Since we stacked the signal on a 64-bit boundary, 2101 * then 'sp' should be word aligned here. If it's 2102 * not, then the user is trying to mess with us. 2103 */ 2104 frame_addr = env->regs[13]; 2105 trace_user_do_sigreturn(env, frame_addr); 2106 if (frame_addr & 7) { 2107 goto badframe; 2108 } 2109 2110 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2111 goto badframe; 2112 } 2113 2114 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2115 goto badframe; 2116 } 2117 2118 unlock_user_struct(frame, frame_addr, 0); 2119 return -TARGET_QEMU_ESIGRETURN; 2120 2121 badframe: 2122 unlock_user_struct(frame, frame_addr, 0); 2123 force_sig(TARGET_SIGSEGV /* , current */); 2124 return 0; 2125 } 2126 2127 long do_sigreturn(CPUARMState *env) 2128 { 2129 if (get_osversion() >= 0x020612) { 2130 return do_sigreturn_v2(env); 2131 } else { 2132 return do_sigreturn_v1(env); 2133 } 2134 } 2135 2136 static long do_rt_sigreturn_v1(CPUARMState *env) 2137 { 2138 abi_ulong frame_addr; 2139 struct rt_sigframe_v1 *frame = NULL; 2140 sigset_t host_set; 2141 2142 /* 2143 * Since we stacked the signal on a 64-bit boundary, 2144 * then 'sp' should be word aligned here. If it's 2145 * not, then the user is trying to mess with us. 2146 */ 2147 frame_addr = env->regs[13]; 2148 trace_user_do_rt_sigreturn(env, frame_addr); 2149 if (frame_addr & 7) { 2150 goto badframe; 2151 } 2152 2153 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2154 goto badframe; 2155 } 2156 2157 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2158 set_sigmask(&host_set); 2159 2160 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2161 goto badframe; 2162 } 2163 2164 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2165 goto badframe; 2166 2167 #if 0 2168 /* Send SIGTRAP if we're single-stepping */ 2169 if (ptrace_cancel_bpt(current)) 2170 send_sig(SIGTRAP, current, 1); 2171 #endif 2172 unlock_user_struct(frame, frame_addr, 0); 2173 return -TARGET_QEMU_ESIGRETURN; 2174 2175 badframe: 2176 unlock_user_struct(frame, frame_addr, 0); 2177 force_sig(TARGET_SIGSEGV /* , current */); 2178 return 0; 2179 } 2180 2181 static long do_rt_sigreturn_v2(CPUARMState *env) 2182 { 2183 abi_ulong frame_addr; 2184 struct rt_sigframe_v2 *frame = NULL; 2185 2186 /* 2187 * Since we stacked the signal on a 64-bit boundary, 2188 * then 'sp' should be word aligned here. If it's 2189 * not, then the user is trying to mess with us. 2190 */ 2191 frame_addr = env->regs[13]; 2192 trace_user_do_rt_sigreturn(env, frame_addr); 2193 if (frame_addr & 7) { 2194 goto badframe; 2195 } 2196 2197 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2198 goto badframe; 2199 } 2200 2201 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2202 goto badframe; 2203 } 2204 2205 unlock_user_struct(frame, frame_addr, 0); 2206 return -TARGET_QEMU_ESIGRETURN; 2207 2208 badframe: 2209 unlock_user_struct(frame, frame_addr, 0); 2210 force_sig(TARGET_SIGSEGV /* , current */); 2211 return 0; 2212 } 2213 2214 long do_rt_sigreturn(CPUARMState *env) 2215 { 2216 if (get_osversion() >= 0x020612) { 2217 return do_rt_sigreturn_v2(env); 2218 } else { 2219 return do_rt_sigreturn_v1(env); 2220 } 2221 } 2222 2223 #elif defined(TARGET_SPARC) 2224 2225 #define __SUNOS_MAXWIN 31 2226 2227 /* This is what SunOS does, so shall I. */ 2228 struct target_sigcontext { 2229 abi_ulong sigc_onstack; /* state to restore */ 2230 2231 abi_ulong sigc_mask; /* sigmask to restore */ 2232 abi_ulong sigc_sp; /* stack pointer */ 2233 abi_ulong sigc_pc; /* program counter */ 2234 abi_ulong sigc_npc; /* next program counter */ 2235 abi_ulong sigc_psr; /* for condition codes etc */ 2236 abi_ulong sigc_g1; /* User uses these two registers */ 2237 abi_ulong sigc_o0; /* within the trampoline code. */ 2238 2239 /* Now comes information regarding the users window set 2240 * at the time of the signal. 2241 */ 2242 abi_ulong sigc_oswins; /* outstanding windows */ 2243 2244 /* stack ptrs for each regwin buf */ 2245 char *sigc_spbuf[__SUNOS_MAXWIN]; 2246 2247 /* Windows to restore after signal */ 2248 struct { 2249 abi_ulong locals[8]; 2250 abi_ulong ins[8]; 2251 } sigc_wbuf[__SUNOS_MAXWIN]; 2252 }; 2253 /* A Sparc stack frame */ 2254 struct sparc_stackf { 2255 abi_ulong locals[8]; 2256 abi_ulong ins[8]; 2257 /* It's simpler to treat fp and callers_pc as elements of ins[] 2258 * since we never need to access them ourselves. 2259 */ 2260 char *structptr; 2261 abi_ulong xargs[6]; 2262 abi_ulong xxargs[1]; 2263 }; 2264 2265 typedef struct { 2266 struct { 2267 abi_ulong psr; 2268 abi_ulong pc; 2269 abi_ulong npc; 2270 abi_ulong y; 2271 abi_ulong u_regs[16]; /* globals and ins */ 2272 } si_regs; 2273 int si_mask; 2274 } __siginfo_t; 2275 2276 typedef struct { 2277 abi_ulong si_float_regs[32]; 2278 unsigned long si_fsr; 2279 unsigned long si_fpqdepth; 2280 struct { 2281 unsigned long *insn_addr; 2282 unsigned long insn; 2283 } si_fpqueue [16]; 2284 } qemu_siginfo_fpu_t; 2285 2286 2287 struct target_signal_frame { 2288 struct sparc_stackf ss; 2289 __siginfo_t info; 2290 abi_ulong fpu_save; 2291 abi_ulong insns[2] __attribute__ ((aligned (8))); 2292 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2293 abi_ulong extra_size; /* Should be 0 */ 2294 qemu_siginfo_fpu_t fpu_state; 2295 }; 2296 struct target_rt_signal_frame { 2297 struct sparc_stackf ss; 2298 siginfo_t info; 2299 abi_ulong regs[20]; 2300 sigset_t mask; 2301 abi_ulong fpu_save; 2302 unsigned int insns[2]; 2303 stack_t stack; 2304 unsigned int extra_size; /* Should be 0 */ 2305 qemu_siginfo_fpu_t fpu_state; 2306 }; 2307 2308 #define UREG_O0 16 2309 #define UREG_O6 22 2310 #define UREG_I0 0 2311 #define UREG_I1 1 2312 #define UREG_I2 2 2313 #define UREG_I3 3 2314 #define UREG_I4 4 2315 #define UREG_I5 5 2316 #define UREG_I6 6 2317 #define UREG_I7 7 2318 #define UREG_L0 8 2319 #define UREG_FP UREG_I6 2320 #define UREG_SP UREG_O6 2321 2322 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2323 CPUSPARCState *env, 2324 unsigned long framesize) 2325 { 2326 abi_ulong sp; 2327 2328 sp = env->regwptr[UREG_FP]; 2329 2330 /* This is the X/Open sanctioned signal stack switching. */ 2331 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2332 if (!on_sig_stack(sp) 2333 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2334 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2335 } 2336 } 2337 return sp - framesize; 2338 } 2339 2340 static int 2341 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2342 { 2343 int err = 0, i; 2344 2345 __put_user(env->psr, &si->si_regs.psr); 2346 __put_user(env->pc, &si->si_regs.pc); 2347 __put_user(env->npc, &si->si_regs.npc); 2348 __put_user(env->y, &si->si_regs.y); 2349 for (i=0; i < 8; i++) { 2350 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2351 } 2352 for (i=0; i < 8; i++) { 2353 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2354 } 2355 __put_user(mask, &si->si_mask); 2356 return err; 2357 } 2358 2359 #if 0 2360 static int 2361 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2362 CPUSPARCState *env, unsigned long mask) 2363 { 2364 int err = 0; 2365 2366 __put_user(mask, &sc->sigc_mask); 2367 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2368 __put_user(env->pc, &sc->sigc_pc); 2369 __put_user(env->npc, &sc->sigc_npc); 2370 __put_user(env->psr, &sc->sigc_psr); 2371 __put_user(env->gregs[1], &sc->sigc_g1); 2372 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2373 2374 return err; 2375 } 2376 #endif 2377 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2378 2379 static void setup_frame(int sig, struct target_sigaction *ka, 2380 target_sigset_t *set, CPUSPARCState *env) 2381 { 2382 abi_ulong sf_addr; 2383 struct target_signal_frame *sf; 2384 int sigframe_size, err, i; 2385 2386 /* 1. Make sure everything is clean */ 2387 //synchronize_user_stack(); 2388 2389 sigframe_size = NF_ALIGNEDSZ; 2390 sf_addr = get_sigframe(ka, env, sigframe_size); 2391 trace_user_setup_frame(env, sf_addr); 2392 2393 sf = lock_user(VERIFY_WRITE, sf_addr, 2394 sizeof(struct target_signal_frame), 0); 2395 if (!sf) { 2396 goto sigsegv; 2397 } 2398 #if 0 2399 if (invalid_frame_pointer(sf, sigframe_size)) 2400 goto sigill_and_return; 2401 #endif 2402 /* 2. Save the current process state */ 2403 err = setup___siginfo(&sf->info, env, set->sig[0]); 2404 __put_user(0, &sf->extra_size); 2405 2406 //save_fpu_state(regs, &sf->fpu_state); 2407 //__put_user(&sf->fpu_state, &sf->fpu_save); 2408 2409 __put_user(set->sig[0], &sf->info.si_mask); 2410 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2411 __put_user(set->sig[i + 1], &sf->extramask[i]); 2412 } 2413 2414 for (i = 0; i < 8; i++) { 2415 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2416 } 2417 for (i = 0; i < 8; i++) { 2418 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2419 } 2420 if (err) 2421 goto sigsegv; 2422 2423 /* 3. signal handler back-trampoline and parameters */ 2424 env->regwptr[UREG_FP] = sf_addr; 2425 env->regwptr[UREG_I0] = sig; 2426 env->regwptr[UREG_I1] = sf_addr + 2427 offsetof(struct target_signal_frame, info); 2428 env->regwptr[UREG_I2] = sf_addr + 2429 offsetof(struct target_signal_frame, info); 2430 2431 /* 4. signal handler */ 2432 env->pc = ka->_sa_handler; 2433 env->npc = (env->pc + 4); 2434 /* 5. return to kernel instructions */ 2435 if (ka->sa_restorer) { 2436 env->regwptr[UREG_I7] = ka->sa_restorer; 2437 } else { 2438 uint32_t val32; 2439 2440 env->regwptr[UREG_I7] = sf_addr + 2441 offsetof(struct target_signal_frame, insns) - 2 * 4; 2442 2443 /* mov __NR_sigreturn, %g1 */ 2444 val32 = 0x821020d8; 2445 __put_user(val32, &sf->insns[0]); 2446 2447 /* t 0x10 */ 2448 val32 = 0x91d02010; 2449 __put_user(val32, &sf->insns[1]); 2450 if (err) 2451 goto sigsegv; 2452 2453 /* Flush instruction space. */ 2454 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2455 // tb_flush(env); 2456 } 2457 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2458 return; 2459 #if 0 2460 sigill_and_return: 2461 force_sig(TARGET_SIGILL); 2462 #endif 2463 sigsegv: 2464 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2465 force_sigsegv(sig); 2466 } 2467 2468 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2469 target_siginfo_t *info, 2470 target_sigset_t *set, CPUSPARCState *env) 2471 { 2472 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2473 } 2474 2475 long do_sigreturn(CPUSPARCState *env) 2476 { 2477 abi_ulong sf_addr; 2478 struct target_signal_frame *sf; 2479 uint32_t up_psr, pc, npc; 2480 target_sigset_t set; 2481 sigset_t host_set; 2482 int err=0, i; 2483 2484 sf_addr = env->regwptr[UREG_FP]; 2485 trace_user_do_sigreturn(env, sf_addr); 2486 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2487 goto segv_and_exit; 2488 } 2489 2490 /* 1. Make sure we are not getting garbage from the user */ 2491 2492 if (sf_addr & 3) 2493 goto segv_and_exit; 2494 2495 __get_user(pc, &sf->info.si_regs.pc); 2496 __get_user(npc, &sf->info.si_regs.npc); 2497 2498 if ((pc | npc) & 3) { 2499 goto segv_and_exit; 2500 } 2501 2502 /* 2. Restore the state */ 2503 __get_user(up_psr, &sf->info.si_regs.psr); 2504 2505 /* User can only change condition codes and FPU enabling in %psr. */ 2506 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2507 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2508 2509 env->pc = pc; 2510 env->npc = npc; 2511 __get_user(env->y, &sf->info.si_regs.y); 2512 for (i=0; i < 8; i++) { 2513 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2514 } 2515 for (i=0; i < 8; i++) { 2516 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2517 } 2518 2519 /* FIXME: implement FPU save/restore: 2520 * __get_user(fpu_save, &sf->fpu_save); 2521 * if (fpu_save) 2522 * err |= restore_fpu_state(env, fpu_save); 2523 */ 2524 2525 /* This is pretty much atomic, no amount locking would prevent 2526 * the races which exist anyways. 2527 */ 2528 __get_user(set.sig[0], &sf->info.si_mask); 2529 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2530 __get_user(set.sig[i], &sf->extramask[i - 1]); 2531 } 2532 2533 target_to_host_sigset_internal(&host_set, &set); 2534 set_sigmask(&host_set); 2535 2536 if (err) { 2537 goto segv_and_exit; 2538 } 2539 unlock_user_struct(sf, sf_addr, 0); 2540 return -TARGET_QEMU_ESIGRETURN; 2541 2542 segv_and_exit: 2543 unlock_user_struct(sf, sf_addr, 0); 2544 force_sig(TARGET_SIGSEGV); 2545 } 2546 2547 long do_rt_sigreturn(CPUSPARCState *env) 2548 { 2549 trace_user_do_rt_sigreturn(env, 0); 2550 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2551 return -TARGET_ENOSYS; 2552 } 2553 2554 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2555 #define MC_TSTATE 0 2556 #define MC_PC 1 2557 #define MC_NPC 2 2558 #define MC_Y 3 2559 #define MC_G1 4 2560 #define MC_G2 5 2561 #define MC_G3 6 2562 #define MC_G4 7 2563 #define MC_G5 8 2564 #define MC_G6 9 2565 #define MC_G7 10 2566 #define MC_O0 11 2567 #define MC_O1 12 2568 #define MC_O2 13 2569 #define MC_O3 14 2570 #define MC_O4 15 2571 #define MC_O5 16 2572 #define MC_O6 17 2573 #define MC_O7 18 2574 #define MC_NGREG 19 2575 2576 typedef abi_ulong target_mc_greg_t; 2577 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2578 2579 struct target_mc_fq { 2580 abi_ulong *mcfq_addr; 2581 uint32_t mcfq_insn; 2582 }; 2583 2584 struct target_mc_fpu { 2585 union { 2586 uint32_t sregs[32]; 2587 uint64_t dregs[32]; 2588 //uint128_t qregs[16]; 2589 } mcfpu_fregs; 2590 abi_ulong mcfpu_fsr; 2591 abi_ulong mcfpu_fprs; 2592 abi_ulong mcfpu_gsr; 2593 struct target_mc_fq *mcfpu_fq; 2594 unsigned char mcfpu_qcnt; 2595 unsigned char mcfpu_qentsz; 2596 unsigned char mcfpu_enab; 2597 }; 2598 typedef struct target_mc_fpu target_mc_fpu_t; 2599 2600 typedef struct { 2601 target_mc_gregset_t mc_gregs; 2602 target_mc_greg_t mc_fp; 2603 target_mc_greg_t mc_i7; 2604 target_mc_fpu_t mc_fpregs; 2605 } target_mcontext_t; 2606 2607 struct target_ucontext { 2608 struct target_ucontext *tuc_link; 2609 abi_ulong tuc_flags; 2610 target_sigset_t tuc_sigmask; 2611 target_mcontext_t tuc_mcontext; 2612 }; 2613 2614 /* A V9 register window */ 2615 struct target_reg_window { 2616 abi_ulong locals[8]; 2617 abi_ulong ins[8]; 2618 }; 2619 2620 #define TARGET_STACK_BIAS 2047 2621 2622 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2623 void sparc64_set_context(CPUSPARCState *env) 2624 { 2625 abi_ulong ucp_addr; 2626 struct target_ucontext *ucp; 2627 target_mc_gregset_t *grp; 2628 abi_ulong pc, npc, tstate; 2629 abi_ulong fp, i7, w_addr; 2630 unsigned int i; 2631 2632 ucp_addr = env->regwptr[UREG_I0]; 2633 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2634 goto do_sigsegv; 2635 } 2636 grp = &ucp->tuc_mcontext.mc_gregs; 2637 __get_user(pc, &((*grp)[MC_PC])); 2638 __get_user(npc, &((*grp)[MC_NPC])); 2639 if ((pc | npc) & 3) { 2640 goto do_sigsegv; 2641 } 2642 if (env->regwptr[UREG_I1]) { 2643 target_sigset_t target_set; 2644 sigset_t set; 2645 2646 if (TARGET_NSIG_WORDS == 1) { 2647 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2648 } else { 2649 abi_ulong *src, *dst; 2650 src = ucp->tuc_sigmask.sig; 2651 dst = target_set.sig; 2652 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2653 __get_user(*dst, src); 2654 } 2655 } 2656 target_to_host_sigset_internal(&set, &target_set); 2657 set_sigmask(&set); 2658 } 2659 env->pc = pc; 2660 env->npc = npc; 2661 __get_user(env->y, &((*grp)[MC_Y])); 2662 __get_user(tstate, &((*grp)[MC_TSTATE])); 2663 env->asi = (tstate >> 24) & 0xff; 2664 cpu_put_ccr(env, tstate >> 32); 2665 cpu_put_cwp64(env, tstate & 0x1f); 2666 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2667 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2668 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2669 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2670 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2671 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2672 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2673 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2674 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2675 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2676 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2677 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2678 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2679 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2680 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2681 2682 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2683 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2684 2685 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2686 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2687 abi_ulong) != 0) { 2688 goto do_sigsegv; 2689 } 2690 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2691 abi_ulong) != 0) { 2692 goto do_sigsegv; 2693 } 2694 /* FIXME this does not match how the kernel handles the FPU in 2695 * its sparc64_set_context implementation. In particular the FPU 2696 * is only restored if fenab is non-zero in: 2697 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2698 */ 2699 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2700 { 2701 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2702 for (i = 0; i < 64; i++, src++) { 2703 if (i & 1) { 2704 __get_user(env->fpr[i/2].l.lower, src); 2705 } else { 2706 __get_user(env->fpr[i/2].l.upper, src); 2707 } 2708 } 2709 } 2710 __get_user(env->fsr, 2711 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2712 __get_user(env->gsr, 2713 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2714 unlock_user_struct(ucp, ucp_addr, 0); 2715 return; 2716 do_sigsegv: 2717 unlock_user_struct(ucp, ucp_addr, 0); 2718 force_sig(TARGET_SIGSEGV); 2719 } 2720 2721 void sparc64_get_context(CPUSPARCState *env) 2722 { 2723 abi_ulong ucp_addr; 2724 struct target_ucontext *ucp; 2725 target_mc_gregset_t *grp; 2726 target_mcontext_t *mcp; 2727 abi_ulong fp, i7, w_addr; 2728 int err; 2729 unsigned int i; 2730 target_sigset_t target_set; 2731 sigset_t set; 2732 2733 ucp_addr = env->regwptr[UREG_I0]; 2734 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2735 goto do_sigsegv; 2736 } 2737 2738 mcp = &ucp->tuc_mcontext; 2739 grp = &mcp->mc_gregs; 2740 2741 /* Skip over the trap instruction, first. */ 2742 env->pc = env->npc; 2743 env->npc += 4; 2744 2745 /* If we're only reading the signal mask then do_sigprocmask() 2746 * is guaranteed not to fail, which is important because we don't 2747 * have any way to signal a failure or restart this operation since 2748 * this is not a normal syscall. 2749 */ 2750 err = do_sigprocmask(0, NULL, &set); 2751 assert(err == 0); 2752 host_to_target_sigset_internal(&target_set, &set); 2753 if (TARGET_NSIG_WORDS == 1) { 2754 __put_user(target_set.sig[0], 2755 (abi_ulong *)&ucp->tuc_sigmask); 2756 } else { 2757 abi_ulong *src, *dst; 2758 src = target_set.sig; 2759 dst = ucp->tuc_sigmask.sig; 2760 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2761 __put_user(*src, dst); 2762 } 2763 if (err) 2764 goto do_sigsegv; 2765 } 2766 2767 /* XXX: tstate must be saved properly */ 2768 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2769 __put_user(env->pc, &((*grp)[MC_PC])); 2770 __put_user(env->npc, &((*grp)[MC_NPC])); 2771 __put_user(env->y, &((*grp)[MC_Y])); 2772 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2773 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2774 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2775 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2776 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2777 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2778 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2779 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2780 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2781 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2782 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2783 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2784 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2785 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2786 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2787 2788 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2789 fp = i7 = 0; 2790 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2791 abi_ulong) != 0) { 2792 goto do_sigsegv; 2793 } 2794 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2795 abi_ulong) != 0) { 2796 goto do_sigsegv; 2797 } 2798 __put_user(fp, &(mcp->mc_fp)); 2799 __put_user(i7, &(mcp->mc_i7)); 2800 2801 { 2802 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2803 for (i = 0; i < 64; i++, dst++) { 2804 if (i & 1) { 2805 __put_user(env->fpr[i/2].l.lower, dst); 2806 } else { 2807 __put_user(env->fpr[i/2].l.upper, dst); 2808 } 2809 } 2810 } 2811 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2812 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2813 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2814 2815 if (err) 2816 goto do_sigsegv; 2817 unlock_user_struct(ucp, ucp_addr, 1); 2818 return; 2819 do_sigsegv: 2820 unlock_user_struct(ucp, ucp_addr, 1); 2821 force_sig(TARGET_SIGSEGV); 2822 } 2823 #endif 2824 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2825 2826 # if defined(TARGET_ABI_MIPSO32) 2827 struct target_sigcontext { 2828 uint32_t sc_regmask; /* Unused */ 2829 uint32_t sc_status; 2830 uint64_t sc_pc; 2831 uint64_t sc_regs[32]; 2832 uint64_t sc_fpregs[32]; 2833 uint32_t sc_ownedfp; /* Unused */ 2834 uint32_t sc_fpc_csr; 2835 uint32_t sc_fpc_eir; /* Unused */ 2836 uint32_t sc_used_math; 2837 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2838 uint32_t pad0; 2839 uint64_t sc_mdhi; 2840 uint64_t sc_mdlo; 2841 target_ulong sc_hi1; /* Was sc_cause */ 2842 target_ulong sc_lo1; /* Was sc_badvaddr */ 2843 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2844 target_ulong sc_lo2; 2845 target_ulong sc_hi3; 2846 target_ulong sc_lo3; 2847 }; 2848 # else /* N32 || N64 */ 2849 struct target_sigcontext { 2850 uint64_t sc_regs[32]; 2851 uint64_t sc_fpregs[32]; 2852 uint64_t sc_mdhi; 2853 uint64_t sc_hi1; 2854 uint64_t sc_hi2; 2855 uint64_t sc_hi3; 2856 uint64_t sc_mdlo; 2857 uint64_t sc_lo1; 2858 uint64_t sc_lo2; 2859 uint64_t sc_lo3; 2860 uint64_t sc_pc; 2861 uint32_t sc_fpc_csr; 2862 uint32_t sc_used_math; 2863 uint32_t sc_dsp; 2864 uint32_t sc_reserved; 2865 }; 2866 # endif /* O32 */ 2867 2868 struct sigframe { 2869 uint32_t sf_ass[4]; /* argument save space for o32 */ 2870 uint32_t sf_code[2]; /* signal trampoline */ 2871 struct target_sigcontext sf_sc; 2872 target_sigset_t sf_mask; 2873 }; 2874 2875 struct target_ucontext { 2876 target_ulong tuc_flags; 2877 target_ulong tuc_link; 2878 target_stack_t tuc_stack; 2879 target_ulong pad0; 2880 struct target_sigcontext tuc_mcontext; 2881 target_sigset_t tuc_sigmask; 2882 }; 2883 2884 struct target_rt_sigframe { 2885 uint32_t rs_ass[4]; /* argument save space for o32 */ 2886 uint32_t rs_code[2]; /* signal trampoline */ 2887 struct target_siginfo rs_info; 2888 struct target_ucontext rs_uc; 2889 }; 2890 2891 /* Install trampoline to jump back from signal handler */ 2892 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2893 { 2894 int err = 0; 2895 2896 /* 2897 * Set up the return code ... 2898 * 2899 * li v0, __NR__foo_sigreturn 2900 * syscall 2901 */ 2902 2903 __put_user(0x24020000 + syscall, tramp + 0); 2904 __put_user(0x0000000c , tramp + 1); 2905 return err; 2906 } 2907 2908 static inline void setup_sigcontext(CPUMIPSState *regs, 2909 struct target_sigcontext *sc) 2910 { 2911 int i; 2912 2913 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2914 regs->hflags &= ~MIPS_HFLAG_BMASK; 2915 2916 __put_user(0, &sc->sc_regs[0]); 2917 for (i = 1; i < 32; ++i) { 2918 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2919 } 2920 2921 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2922 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2923 2924 /* Rather than checking for dsp existence, always copy. The storage 2925 would just be garbage otherwise. */ 2926 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2927 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2928 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2929 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2930 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2931 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2932 { 2933 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2934 __put_user(dsp, &sc->sc_dsp); 2935 } 2936 2937 __put_user(1, &sc->sc_used_math); 2938 2939 for (i = 0; i < 32; ++i) { 2940 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2941 } 2942 } 2943 2944 static inline void 2945 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2946 { 2947 int i; 2948 2949 __get_user(regs->CP0_EPC, &sc->sc_pc); 2950 2951 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2952 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2953 2954 for (i = 1; i < 32; ++i) { 2955 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2956 } 2957 2958 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2959 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2960 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2961 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2962 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2963 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2964 { 2965 uint32_t dsp; 2966 __get_user(dsp, &sc->sc_dsp); 2967 cpu_wrdsp(dsp, 0x3ff, regs); 2968 } 2969 2970 for (i = 0; i < 32; ++i) { 2971 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2972 } 2973 } 2974 2975 /* 2976 * Determine which stack to use.. 2977 */ 2978 static inline abi_ulong 2979 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2980 { 2981 unsigned long sp; 2982 2983 /* Default to using normal stack */ 2984 sp = regs->active_tc.gpr[29]; 2985 2986 /* 2987 * FPU emulator may have its own trampoline active just 2988 * above the user stack, 16-bytes before the next lowest 2989 * 16 byte boundary. Try to avoid trashing it. 2990 */ 2991 sp -= 32; 2992 2993 /* This is the X/Open sanctioned signal stack switching. */ 2994 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2995 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2996 } 2997 2998 return (sp - frame_size) & ~7; 2999 } 3000 3001 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3002 { 3003 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3004 env->hflags &= ~MIPS_HFLAG_M16; 3005 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3006 env->active_tc.PC &= ~(target_ulong) 1; 3007 } 3008 } 3009 3010 # if defined(TARGET_ABI_MIPSO32) 3011 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3012 static void setup_frame(int sig, struct target_sigaction * ka, 3013 target_sigset_t *set, CPUMIPSState *regs) 3014 { 3015 struct sigframe *frame; 3016 abi_ulong frame_addr; 3017 int i; 3018 3019 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3020 trace_user_setup_frame(regs, frame_addr); 3021 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3022 goto give_sigsegv; 3023 } 3024 3025 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3026 3027 setup_sigcontext(regs, &frame->sf_sc); 3028 3029 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3030 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3031 } 3032 3033 /* 3034 * Arguments to signal handler: 3035 * 3036 * a0 = signal number 3037 * a1 = 0 (should be cause) 3038 * a2 = pointer to struct sigcontext 3039 * 3040 * $25 and PC point to the signal handler, $29 points to the 3041 * struct sigframe. 3042 */ 3043 regs->active_tc.gpr[ 4] = sig; 3044 regs->active_tc.gpr[ 5] = 0; 3045 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3046 regs->active_tc.gpr[29] = frame_addr; 3047 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3048 /* The original kernel code sets CP0_EPC to the handler 3049 * since it returns to userland using eret 3050 * we cannot do this here, and we must set PC directly */ 3051 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3052 mips_set_hflags_isa_mode_from_pc(regs); 3053 unlock_user_struct(frame, frame_addr, 1); 3054 return; 3055 3056 give_sigsegv: 3057 force_sigsegv(sig); 3058 } 3059 3060 long do_sigreturn(CPUMIPSState *regs) 3061 { 3062 struct sigframe *frame; 3063 abi_ulong frame_addr; 3064 sigset_t blocked; 3065 target_sigset_t target_set; 3066 int i; 3067 3068 frame_addr = regs->active_tc.gpr[29]; 3069 trace_user_do_sigreturn(regs, frame_addr); 3070 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3071 goto badframe; 3072 3073 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3074 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3075 } 3076 3077 target_to_host_sigset_internal(&blocked, &target_set); 3078 set_sigmask(&blocked); 3079 3080 restore_sigcontext(regs, &frame->sf_sc); 3081 3082 #if 0 3083 /* 3084 * Don't let your children do this ... 3085 */ 3086 __asm__ __volatile__( 3087 "move\t$29, %0\n\t" 3088 "j\tsyscall_exit" 3089 :/* no outputs */ 3090 :"r" (®s)); 3091 /* Unreached */ 3092 #endif 3093 3094 regs->active_tc.PC = regs->CP0_EPC; 3095 mips_set_hflags_isa_mode_from_pc(regs); 3096 /* I am not sure this is right, but it seems to work 3097 * maybe a problem with nested signals ? */ 3098 regs->CP0_EPC = 0; 3099 return -TARGET_QEMU_ESIGRETURN; 3100 3101 badframe: 3102 force_sig(TARGET_SIGSEGV/*, current*/); 3103 return 0; 3104 } 3105 # endif /* O32 */ 3106 3107 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3108 target_siginfo_t *info, 3109 target_sigset_t *set, CPUMIPSState *env) 3110 { 3111 struct target_rt_sigframe *frame; 3112 abi_ulong frame_addr; 3113 int i; 3114 3115 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3116 trace_user_setup_rt_frame(env, frame_addr); 3117 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3118 goto give_sigsegv; 3119 } 3120 3121 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3122 3123 tswap_siginfo(&frame->rs_info, info); 3124 3125 __put_user(0, &frame->rs_uc.tuc_flags); 3126 __put_user(0, &frame->rs_uc.tuc_link); 3127 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3128 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3129 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3130 &frame->rs_uc.tuc_stack.ss_flags); 3131 3132 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3133 3134 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3135 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3136 } 3137 3138 /* 3139 * Arguments to signal handler: 3140 * 3141 * a0 = signal number 3142 * a1 = pointer to siginfo_t 3143 * a2 = pointer to struct ucontext 3144 * 3145 * $25 and PC point to the signal handler, $29 points to the 3146 * struct sigframe. 3147 */ 3148 env->active_tc.gpr[ 4] = sig; 3149 env->active_tc.gpr[ 5] = frame_addr 3150 + offsetof(struct target_rt_sigframe, rs_info); 3151 env->active_tc.gpr[ 6] = frame_addr 3152 + offsetof(struct target_rt_sigframe, rs_uc); 3153 env->active_tc.gpr[29] = frame_addr; 3154 env->active_tc.gpr[31] = frame_addr 3155 + offsetof(struct target_rt_sigframe, rs_code); 3156 /* The original kernel code sets CP0_EPC to the handler 3157 * since it returns to userland using eret 3158 * we cannot do this here, and we must set PC directly */ 3159 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3160 mips_set_hflags_isa_mode_from_pc(env); 3161 unlock_user_struct(frame, frame_addr, 1); 3162 return; 3163 3164 give_sigsegv: 3165 unlock_user_struct(frame, frame_addr, 1); 3166 force_sigsegv(sig); 3167 } 3168 3169 long do_rt_sigreturn(CPUMIPSState *env) 3170 { 3171 struct target_rt_sigframe *frame; 3172 abi_ulong frame_addr; 3173 sigset_t blocked; 3174 3175 frame_addr = env->active_tc.gpr[29]; 3176 trace_user_do_rt_sigreturn(env, frame_addr); 3177 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3178 goto badframe; 3179 } 3180 3181 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3182 set_sigmask(&blocked); 3183 3184 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3185 3186 if (do_sigaltstack(frame_addr + 3187 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3188 0, get_sp_from_cpustate(env)) == -EFAULT) 3189 goto badframe; 3190 3191 env->active_tc.PC = env->CP0_EPC; 3192 mips_set_hflags_isa_mode_from_pc(env); 3193 /* I am not sure this is right, but it seems to work 3194 * maybe a problem with nested signals ? */ 3195 env->CP0_EPC = 0; 3196 return -TARGET_QEMU_ESIGRETURN; 3197 3198 badframe: 3199 force_sig(TARGET_SIGSEGV/*, current*/); 3200 return 0; 3201 } 3202 3203 #elif defined(TARGET_SH4) 3204 3205 /* 3206 * code and data structures from linux kernel: 3207 * include/asm-sh/sigcontext.h 3208 * arch/sh/kernel/signal.c 3209 */ 3210 3211 struct target_sigcontext { 3212 target_ulong oldmask; 3213 3214 /* CPU registers */ 3215 target_ulong sc_gregs[16]; 3216 target_ulong sc_pc; 3217 target_ulong sc_pr; 3218 target_ulong sc_sr; 3219 target_ulong sc_gbr; 3220 target_ulong sc_mach; 3221 target_ulong sc_macl; 3222 3223 /* FPU registers */ 3224 target_ulong sc_fpregs[16]; 3225 target_ulong sc_xfpregs[16]; 3226 unsigned int sc_fpscr; 3227 unsigned int sc_fpul; 3228 unsigned int sc_ownedfp; 3229 }; 3230 3231 struct target_sigframe 3232 { 3233 struct target_sigcontext sc; 3234 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3235 uint16_t retcode[3]; 3236 }; 3237 3238 3239 struct target_ucontext { 3240 target_ulong tuc_flags; 3241 struct target_ucontext *tuc_link; 3242 target_stack_t tuc_stack; 3243 struct target_sigcontext tuc_mcontext; 3244 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3245 }; 3246 3247 struct target_rt_sigframe 3248 { 3249 struct target_siginfo info; 3250 struct target_ucontext uc; 3251 uint16_t retcode[3]; 3252 }; 3253 3254 3255 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3256 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3257 3258 static abi_ulong get_sigframe(struct target_sigaction *ka, 3259 unsigned long sp, size_t frame_size) 3260 { 3261 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3262 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3263 } 3264 3265 return (sp - frame_size) & -8ul; 3266 } 3267 3268 static void setup_sigcontext(struct target_sigcontext *sc, 3269 CPUSH4State *regs, unsigned long mask) 3270 { 3271 int i; 3272 3273 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3274 COPY(gregs[0]); COPY(gregs[1]); 3275 COPY(gregs[2]); COPY(gregs[3]); 3276 COPY(gregs[4]); COPY(gregs[5]); 3277 COPY(gregs[6]); COPY(gregs[7]); 3278 COPY(gregs[8]); COPY(gregs[9]); 3279 COPY(gregs[10]); COPY(gregs[11]); 3280 COPY(gregs[12]); COPY(gregs[13]); 3281 COPY(gregs[14]); COPY(gregs[15]); 3282 COPY(gbr); COPY(mach); 3283 COPY(macl); COPY(pr); 3284 COPY(sr); COPY(pc); 3285 #undef COPY 3286 3287 for (i=0; i<16; i++) { 3288 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3289 } 3290 __put_user(regs->fpscr, &sc->sc_fpscr); 3291 __put_user(regs->fpul, &sc->sc_fpul); 3292 3293 /* non-iBCS2 extensions.. */ 3294 __put_user(mask, &sc->oldmask); 3295 } 3296 3297 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3298 { 3299 int i; 3300 3301 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3302 COPY(gregs[0]); COPY(gregs[1]); 3303 COPY(gregs[2]); COPY(gregs[3]); 3304 COPY(gregs[4]); COPY(gregs[5]); 3305 COPY(gregs[6]); COPY(gregs[7]); 3306 COPY(gregs[8]); COPY(gregs[9]); 3307 COPY(gregs[10]); COPY(gregs[11]); 3308 COPY(gregs[12]); COPY(gregs[13]); 3309 COPY(gregs[14]); COPY(gregs[15]); 3310 COPY(gbr); COPY(mach); 3311 COPY(macl); COPY(pr); 3312 COPY(sr); COPY(pc); 3313 #undef COPY 3314 3315 for (i=0; i<16; i++) { 3316 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3317 } 3318 __get_user(regs->fpscr, &sc->sc_fpscr); 3319 __get_user(regs->fpul, &sc->sc_fpul); 3320 3321 regs->tra = -1; /* disable syscall checks */ 3322 } 3323 3324 static void setup_frame(int sig, struct target_sigaction *ka, 3325 target_sigset_t *set, CPUSH4State *regs) 3326 { 3327 struct target_sigframe *frame; 3328 abi_ulong frame_addr; 3329 int i; 3330 3331 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3332 trace_user_setup_frame(regs, frame_addr); 3333 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3334 goto give_sigsegv; 3335 } 3336 3337 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3338 3339 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3340 __put_user(set->sig[i + 1], &frame->extramask[i]); 3341 } 3342 3343 /* Set up to return from userspace. If provided, use a stub 3344 already in userspace. */ 3345 if (ka->sa_flags & TARGET_SA_RESTORER) { 3346 regs->pr = (unsigned long) ka->sa_restorer; 3347 } else { 3348 /* Generate return code (system call to sigreturn) */ 3349 abi_ulong retcode_addr = frame_addr + 3350 offsetof(struct target_sigframe, retcode); 3351 __put_user(MOVW(2), &frame->retcode[0]); 3352 __put_user(TRAP_NOARG, &frame->retcode[1]); 3353 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3354 regs->pr = (unsigned long) retcode_addr; 3355 } 3356 3357 /* Set up registers for signal handler */ 3358 regs->gregs[15] = frame_addr; 3359 regs->gregs[4] = sig; /* Arg for signal handler */ 3360 regs->gregs[5] = 0; 3361 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3362 regs->pc = (unsigned long) ka->_sa_handler; 3363 3364 unlock_user_struct(frame, frame_addr, 1); 3365 return; 3366 3367 give_sigsegv: 3368 unlock_user_struct(frame, frame_addr, 1); 3369 force_sigsegv(sig); 3370 } 3371 3372 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3373 target_siginfo_t *info, 3374 target_sigset_t *set, CPUSH4State *regs) 3375 { 3376 struct target_rt_sigframe *frame; 3377 abi_ulong frame_addr; 3378 int i; 3379 3380 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3381 trace_user_setup_rt_frame(regs, frame_addr); 3382 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3383 goto give_sigsegv; 3384 } 3385 3386 tswap_siginfo(&frame->info, info); 3387 3388 /* Create the ucontext. */ 3389 __put_user(0, &frame->uc.tuc_flags); 3390 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3391 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3392 &frame->uc.tuc_stack.ss_sp); 3393 __put_user(sas_ss_flags(regs->gregs[15]), 3394 &frame->uc.tuc_stack.ss_flags); 3395 __put_user(target_sigaltstack_used.ss_size, 3396 &frame->uc.tuc_stack.ss_size); 3397 setup_sigcontext(&frame->uc.tuc_mcontext, 3398 regs, set->sig[0]); 3399 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3400 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3401 } 3402 3403 /* Set up to return from userspace. If provided, use a stub 3404 already in userspace. */ 3405 if (ka->sa_flags & TARGET_SA_RESTORER) { 3406 regs->pr = (unsigned long) ka->sa_restorer; 3407 } else { 3408 /* Generate return code (system call to sigreturn) */ 3409 abi_ulong retcode_addr = frame_addr + 3410 offsetof(struct target_rt_sigframe, retcode); 3411 __put_user(MOVW(2), &frame->retcode[0]); 3412 __put_user(TRAP_NOARG, &frame->retcode[1]); 3413 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3414 regs->pr = (unsigned long) retcode_addr; 3415 } 3416 3417 /* Set up registers for signal handler */ 3418 regs->gregs[15] = frame_addr; 3419 regs->gregs[4] = sig; /* Arg for signal handler */ 3420 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3421 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3422 regs->pc = (unsigned long) ka->_sa_handler; 3423 3424 unlock_user_struct(frame, frame_addr, 1); 3425 return; 3426 3427 give_sigsegv: 3428 unlock_user_struct(frame, frame_addr, 1); 3429 force_sigsegv(sig); 3430 } 3431 3432 long do_sigreturn(CPUSH4State *regs) 3433 { 3434 struct target_sigframe *frame; 3435 abi_ulong frame_addr; 3436 sigset_t blocked; 3437 target_sigset_t target_set; 3438 int i; 3439 int err = 0; 3440 3441 frame_addr = regs->gregs[15]; 3442 trace_user_do_sigreturn(regs, frame_addr); 3443 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3444 goto badframe; 3445 } 3446 3447 __get_user(target_set.sig[0], &frame->sc.oldmask); 3448 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3449 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3450 } 3451 3452 if (err) 3453 goto badframe; 3454 3455 target_to_host_sigset_internal(&blocked, &target_set); 3456 set_sigmask(&blocked); 3457 3458 restore_sigcontext(regs, &frame->sc); 3459 3460 unlock_user_struct(frame, frame_addr, 0); 3461 return -TARGET_QEMU_ESIGRETURN; 3462 3463 badframe: 3464 unlock_user_struct(frame, frame_addr, 0); 3465 force_sig(TARGET_SIGSEGV); 3466 return 0; 3467 } 3468 3469 long do_rt_sigreturn(CPUSH4State *regs) 3470 { 3471 struct target_rt_sigframe *frame; 3472 abi_ulong frame_addr; 3473 sigset_t blocked; 3474 3475 frame_addr = regs->gregs[15]; 3476 trace_user_do_rt_sigreturn(regs, frame_addr); 3477 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3478 goto badframe; 3479 } 3480 3481 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3482 set_sigmask(&blocked); 3483 3484 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3485 3486 if (do_sigaltstack(frame_addr + 3487 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3488 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3489 goto badframe; 3490 } 3491 3492 unlock_user_struct(frame, frame_addr, 0); 3493 return -TARGET_QEMU_ESIGRETURN; 3494 3495 badframe: 3496 unlock_user_struct(frame, frame_addr, 0); 3497 force_sig(TARGET_SIGSEGV); 3498 return 0; 3499 } 3500 #elif defined(TARGET_MICROBLAZE) 3501 3502 struct target_sigcontext { 3503 struct target_pt_regs regs; /* needs to be first */ 3504 uint32_t oldmask; 3505 }; 3506 3507 struct target_stack_t { 3508 abi_ulong ss_sp; 3509 int ss_flags; 3510 unsigned int ss_size; 3511 }; 3512 3513 struct target_ucontext { 3514 abi_ulong tuc_flags; 3515 abi_ulong tuc_link; 3516 struct target_stack_t tuc_stack; 3517 struct target_sigcontext tuc_mcontext; 3518 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3519 }; 3520 3521 /* Signal frames. */ 3522 struct target_signal_frame { 3523 struct target_ucontext uc; 3524 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3525 uint32_t tramp[2]; 3526 }; 3527 3528 struct rt_signal_frame { 3529 siginfo_t info; 3530 struct ucontext uc; 3531 uint32_t tramp[2]; 3532 }; 3533 3534 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3535 { 3536 __put_user(env->regs[0], &sc->regs.r0); 3537 __put_user(env->regs[1], &sc->regs.r1); 3538 __put_user(env->regs[2], &sc->regs.r2); 3539 __put_user(env->regs[3], &sc->regs.r3); 3540 __put_user(env->regs[4], &sc->regs.r4); 3541 __put_user(env->regs[5], &sc->regs.r5); 3542 __put_user(env->regs[6], &sc->regs.r6); 3543 __put_user(env->regs[7], &sc->regs.r7); 3544 __put_user(env->regs[8], &sc->regs.r8); 3545 __put_user(env->regs[9], &sc->regs.r9); 3546 __put_user(env->regs[10], &sc->regs.r10); 3547 __put_user(env->regs[11], &sc->regs.r11); 3548 __put_user(env->regs[12], &sc->regs.r12); 3549 __put_user(env->regs[13], &sc->regs.r13); 3550 __put_user(env->regs[14], &sc->regs.r14); 3551 __put_user(env->regs[15], &sc->regs.r15); 3552 __put_user(env->regs[16], &sc->regs.r16); 3553 __put_user(env->regs[17], &sc->regs.r17); 3554 __put_user(env->regs[18], &sc->regs.r18); 3555 __put_user(env->regs[19], &sc->regs.r19); 3556 __put_user(env->regs[20], &sc->regs.r20); 3557 __put_user(env->regs[21], &sc->regs.r21); 3558 __put_user(env->regs[22], &sc->regs.r22); 3559 __put_user(env->regs[23], &sc->regs.r23); 3560 __put_user(env->regs[24], &sc->regs.r24); 3561 __put_user(env->regs[25], &sc->regs.r25); 3562 __put_user(env->regs[26], &sc->regs.r26); 3563 __put_user(env->regs[27], &sc->regs.r27); 3564 __put_user(env->regs[28], &sc->regs.r28); 3565 __put_user(env->regs[29], &sc->regs.r29); 3566 __put_user(env->regs[30], &sc->regs.r30); 3567 __put_user(env->regs[31], &sc->regs.r31); 3568 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3569 } 3570 3571 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3572 { 3573 __get_user(env->regs[0], &sc->regs.r0); 3574 __get_user(env->regs[1], &sc->regs.r1); 3575 __get_user(env->regs[2], &sc->regs.r2); 3576 __get_user(env->regs[3], &sc->regs.r3); 3577 __get_user(env->regs[4], &sc->regs.r4); 3578 __get_user(env->regs[5], &sc->regs.r5); 3579 __get_user(env->regs[6], &sc->regs.r6); 3580 __get_user(env->regs[7], &sc->regs.r7); 3581 __get_user(env->regs[8], &sc->regs.r8); 3582 __get_user(env->regs[9], &sc->regs.r9); 3583 __get_user(env->regs[10], &sc->regs.r10); 3584 __get_user(env->regs[11], &sc->regs.r11); 3585 __get_user(env->regs[12], &sc->regs.r12); 3586 __get_user(env->regs[13], &sc->regs.r13); 3587 __get_user(env->regs[14], &sc->regs.r14); 3588 __get_user(env->regs[15], &sc->regs.r15); 3589 __get_user(env->regs[16], &sc->regs.r16); 3590 __get_user(env->regs[17], &sc->regs.r17); 3591 __get_user(env->regs[18], &sc->regs.r18); 3592 __get_user(env->regs[19], &sc->regs.r19); 3593 __get_user(env->regs[20], &sc->regs.r20); 3594 __get_user(env->regs[21], &sc->regs.r21); 3595 __get_user(env->regs[22], &sc->regs.r22); 3596 __get_user(env->regs[23], &sc->regs.r23); 3597 __get_user(env->regs[24], &sc->regs.r24); 3598 __get_user(env->regs[25], &sc->regs.r25); 3599 __get_user(env->regs[26], &sc->regs.r26); 3600 __get_user(env->regs[27], &sc->regs.r27); 3601 __get_user(env->regs[28], &sc->regs.r28); 3602 __get_user(env->regs[29], &sc->regs.r29); 3603 __get_user(env->regs[30], &sc->regs.r30); 3604 __get_user(env->regs[31], &sc->regs.r31); 3605 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3606 } 3607 3608 static abi_ulong get_sigframe(struct target_sigaction *ka, 3609 CPUMBState *env, int frame_size) 3610 { 3611 abi_ulong sp = env->regs[1]; 3612 3613 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3614 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3615 } 3616 3617 return ((sp - frame_size) & -8UL); 3618 } 3619 3620 static void setup_frame(int sig, struct target_sigaction *ka, 3621 target_sigset_t *set, CPUMBState *env) 3622 { 3623 struct target_signal_frame *frame; 3624 abi_ulong frame_addr; 3625 int i; 3626 3627 frame_addr = get_sigframe(ka, env, sizeof *frame); 3628 trace_user_setup_frame(env, frame_addr); 3629 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3630 goto badframe; 3631 3632 /* Save the mask. */ 3633 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3634 3635 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3636 __put_user(set->sig[i], &frame->extramask[i - 1]); 3637 } 3638 3639 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3640 3641 /* Set up to return from userspace. If provided, use a stub 3642 already in userspace. */ 3643 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3644 if (ka->sa_flags & TARGET_SA_RESTORER) { 3645 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3646 } else { 3647 uint32_t t; 3648 /* Note, these encodings are _big endian_! */ 3649 /* addi r12, r0, __NR_sigreturn */ 3650 t = 0x31800000UL | TARGET_NR_sigreturn; 3651 __put_user(t, frame->tramp + 0); 3652 /* brki r14, 0x8 */ 3653 t = 0xb9cc0008UL; 3654 __put_user(t, frame->tramp + 1); 3655 3656 /* Return from sighandler will jump to the tramp. 3657 Negative 8 offset because return is rtsd r15, 8 */ 3658 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3659 - 8; 3660 } 3661 3662 /* Set up registers for signal handler */ 3663 env->regs[1] = frame_addr; 3664 /* Signal handler args: */ 3665 env->regs[5] = sig; /* Arg 0: signum */ 3666 env->regs[6] = 0; 3667 /* arg 1: sigcontext */ 3668 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3669 3670 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3671 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3672 3673 unlock_user_struct(frame, frame_addr, 1); 3674 return; 3675 badframe: 3676 force_sigsegv(sig); 3677 } 3678 3679 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3680 target_siginfo_t *info, 3681 target_sigset_t *set, CPUMBState *env) 3682 { 3683 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3684 } 3685 3686 long do_sigreturn(CPUMBState *env) 3687 { 3688 struct target_signal_frame *frame; 3689 abi_ulong frame_addr; 3690 target_sigset_t target_set; 3691 sigset_t set; 3692 int i; 3693 3694 frame_addr = env->regs[R_SP]; 3695 trace_user_do_sigreturn(env, frame_addr); 3696 /* Make sure the guest isn't playing games. */ 3697 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3698 goto badframe; 3699 3700 /* Restore blocked signals */ 3701 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3702 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3703 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3704 } 3705 target_to_host_sigset_internal(&set, &target_set); 3706 set_sigmask(&set); 3707 3708 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3709 /* We got here through a sigreturn syscall, our path back is via an 3710 rtb insn so setup r14 for that. */ 3711 env->regs[14] = env->sregs[SR_PC]; 3712 3713 unlock_user_struct(frame, frame_addr, 0); 3714 return -TARGET_QEMU_ESIGRETURN; 3715 badframe: 3716 force_sig(TARGET_SIGSEGV); 3717 } 3718 3719 long do_rt_sigreturn(CPUMBState *env) 3720 { 3721 trace_user_do_rt_sigreturn(env, 0); 3722 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3723 return -TARGET_ENOSYS; 3724 } 3725 3726 #elif defined(TARGET_CRIS) 3727 3728 struct target_sigcontext { 3729 struct target_pt_regs regs; /* needs to be first */ 3730 uint32_t oldmask; 3731 uint32_t usp; /* usp before stacking this gunk on it */ 3732 }; 3733 3734 /* Signal frames. */ 3735 struct target_signal_frame { 3736 struct target_sigcontext sc; 3737 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3738 uint16_t retcode[4]; /* Trampoline code. */ 3739 }; 3740 3741 struct rt_signal_frame { 3742 siginfo_t *pinfo; 3743 void *puc; 3744 siginfo_t info; 3745 struct ucontext uc; 3746 uint16_t retcode[4]; /* Trampoline code. */ 3747 }; 3748 3749 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3750 { 3751 __put_user(env->regs[0], &sc->regs.r0); 3752 __put_user(env->regs[1], &sc->regs.r1); 3753 __put_user(env->regs[2], &sc->regs.r2); 3754 __put_user(env->regs[3], &sc->regs.r3); 3755 __put_user(env->regs[4], &sc->regs.r4); 3756 __put_user(env->regs[5], &sc->regs.r5); 3757 __put_user(env->regs[6], &sc->regs.r6); 3758 __put_user(env->regs[7], &sc->regs.r7); 3759 __put_user(env->regs[8], &sc->regs.r8); 3760 __put_user(env->regs[9], &sc->regs.r9); 3761 __put_user(env->regs[10], &sc->regs.r10); 3762 __put_user(env->regs[11], &sc->regs.r11); 3763 __put_user(env->regs[12], &sc->regs.r12); 3764 __put_user(env->regs[13], &sc->regs.r13); 3765 __put_user(env->regs[14], &sc->usp); 3766 __put_user(env->regs[15], &sc->regs.acr); 3767 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3768 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3769 __put_user(env->pc, &sc->regs.erp); 3770 } 3771 3772 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3773 { 3774 __get_user(env->regs[0], &sc->regs.r0); 3775 __get_user(env->regs[1], &sc->regs.r1); 3776 __get_user(env->regs[2], &sc->regs.r2); 3777 __get_user(env->regs[3], &sc->regs.r3); 3778 __get_user(env->regs[4], &sc->regs.r4); 3779 __get_user(env->regs[5], &sc->regs.r5); 3780 __get_user(env->regs[6], &sc->regs.r6); 3781 __get_user(env->regs[7], &sc->regs.r7); 3782 __get_user(env->regs[8], &sc->regs.r8); 3783 __get_user(env->regs[9], &sc->regs.r9); 3784 __get_user(env->regs[10], &sc->regs.r10); 3785 __get_user(env->regs[11], &sc->regs.r11); 3786 __get_user(env->regs[12], &sc->regs.r12); 3787 __get_user(env->regs[13], &sc->regs.r13); 3788 __get_user(env->regs[14], &sc->usp); 3789 __get_user(env->regs[15], &sc->regs.acr); 3790 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3791 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3792 __get_user(env->pc, &sc->regs.erp); 3793 } 3794 3795 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3796 { 3797 abi_ulong sp; 3798 /* Align the stack downwards to 4. */ 3799 sp = (env->regs[R_SP] & ~3); 3800 return sp - framesize; 3801 } 3802 3803 static void setup_frame(int sig, struct target_sigaction *ka, 3804 target_sigset_t *set, CPUCRISState *env) 3805 { 3806 struct target_signal_frame *frame; 3807 abi_ulong frame_addr; 3808 int i; 3809 3810 frame_addr = get_sigframe(env, sizeof *frame); 3811 trace_user_setup_frame(env, frame_addr); 3812 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3813 goto badframe; 3814 3815 /* 3816 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3817 * use this trampoline anymore but it sets it up for GDB. 3818 * In QEMU, using the trampoline simplifies things a bit so we use it. 3819 * 3820 * This is movu.w __NR_sigreturn, r9; break 13; 3821 */ 3822 __put_user(0x9c5f, frame->retcode+0); 3823 __put_user(TARGET_NR_sigreturn, 3824 frame->retcode + 1); 3825 __put_user(0xe93d, frame->retcode + 2); 3826 3827 /* Save the mask. */ 3828 __put_user(set->sig[0], &frame->sc.oldmask); 3829 3830 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3831 __put_user(set->sig[i], &frame->extramask[i - 1]); 3832 } 3833 3834 setup_sigcontext(&frame->sc, env); 3835 3836 /* Move the stack and setup the arguments for the handler. */ 3837 env->regs[R_SP] = frame_addr; 3838 env->regs[10] = sig; 3839 env->pc = (unsigned long) ka->_sa_handler; 3840 /* Link SRP so the guest returns through the trampoline. */ 3841 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3842 3843 unlock_user_struct(frame, frame_addr, 1); 3844 return; 3845 badframe: 3846 force_sigsegv(sig); 3847 } 3848 3849 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3850 target_siginfo_t *info, 3851 target_sigset_t *set, CPUCRISState *env) 3852 { 3853 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3854 } 3855 3856 long do_sigreturn(CPUCRISState *env) 3857 { 3858 struct target_signal_frame *frame; 3859 abi_ulong frame_addr; 3860 target_sigset_t target_set; 3861 sigset_t set; 3862 int i; 3863 3864 frame_addr = env->regs[R_SP]; 3865 trace_user_do_sigreturn(env, frame_addr); 3866 /* Make sure the guest isn't playing games. */ 3867 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3868 goto badframe; 3869 } 3870 3871 /* Restore blocked signals */ 3872 __get_user(target_set.sig[0], &frame->sc.oldmask); 3873 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3874 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3875 } 3876 target_to_host_sigset_internal(&set, &target_set); 3877 set_sigmask(&set); 3878 3879 restore_sigcontext(&frame->sc, env); 3880 unlock_user_struct(frame, frame_addr, 0); 3881 return -TARGET_QEMU_ESIGRETURN; 3882 badframe: 3883 force_sig(TARGET_SIGSEGV); 3884 } 3885 3886 long do_rt_sigreturn(CPUCRISState *env) 3887 { 3888 trace_user_do_rt_sigreturn(env, 0); 3889 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3890 return -TARGET_ENOSYS; 3891 } 3892 3893 #elif defined(TARGET_OPENRISC) 3894 3895 struct target_sigcontext { 3896 struct target_pt_regs regs; 3897 abi_ulong oldmask; 3898 abi_ulong usp; 3899 }; 3900 3901 struct target_ucontext { 3902 abi_ulong tuc_flags; 3903 abi_ulong tuc_link; 3904 target_stack_t tuc_stack; 3905 struct target_sigcontext tuc_mcontext; 3906 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3907 }; 3908 3909 struct target_rt_sigframe { 3910 abi_ulong pinfo; 3911 uint64_t puc; 3912 struct target_siginfo info; 3913 struct target_sigcontext sc; 3914 struct target_ucontext uc; 3915 unsigned char retcode[16]; /* trampoline code */ 3916 }; 3917 3918 /* This is the asm-generic/ucontext.h version */ 3919 #if 0 3920 static int restore_sigcontext(CPUOpenRISCState *regs, 3921 struct target_sigcontext *sc) 3922 { 3923 unsigned int err = 0; 3924 unsigned long old_usp; 3925 3926 /* Alwys make any pending restarted system call return -EINTR */ 3927 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3928 3929 /* restore the regs from &sc->regs (same as sc, since regs is first) 3930 * (sc is already checked for VERIFY_READ since the sigframe was 3931 * checked in sys_sigreturn previously) 3932 */ 3933 3934 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3935 goto badframe; 3936 } 3937 3938 /* make sure the U-flag is set so user-mode cannot fool us */ 3939 3940 regs->sr &= ~SR_SM; 3941 3942 /* restore the old USP as it was before we stacked the sc etc. 3943 * (we cannot just pop the sigcontext since we aligned the sp and 3944 * stuff after pushing it) 3945 */ 3946 3947 __get_user(old_usp, &sc->usp); 3948 phx_signal("old_usp 0x%lx", old_usp); 3949 3950 __PHX__ REALLY /* ??? */ 3951 wrusp(old_usp); 3952 regs->gpr[1] = old_usp; 3953 3954 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3955 * after this completes, but we don't use that mechanism. maybe we can 3956 * use it now ? 3957 */ 3958 3959 return err; 3960 3961 badframe: 3962 return 1; 3963 } 3964 #endif 3965 3966 /* Set up a signal frame. */ 3967 3968 static void setup_sigcontext(struct target_sigcontext *sc, 3969 CPUOpenRISCState *regs, 3970 unsigned long mask) 3971 { 3972 unsigned long usp = regs->gpr[1]; 3973 3974 /* copy the regs. they are first in sc so we can use sc directly */ 3975 3976 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3977 3978 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3979 the signal handler. The frametype will be restored to its previous 3980 value in restore_sigcontext. */ 3981 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3982 3983 /* then some other stuff */ 3984 __put_user(mask, &sc->oldmask); 3985 __put_user(usp, &sc->usp); 3986 } 3987 3988 static inline unsigned long align_sigframe(unsigned long sp) 3989 { 3990 return sp & ~3UL; 3991 } 3992 3993 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 3994 CPUOpenRISCState *regs, 3995 size_t frame_size) 3996 { 3997 unsigned long sp = regs->gpr[1]; 3998 int onsigstack = on_sig_stack(sp); 3999 4000 /* redzone */ 4001 /* This is the X/Open sanctioned signal stack switching. */ 4002 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4003 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4004 } 4005 4006 sp = align_sigframe(sp - frame_size); 4007 4008 /* 4009 * If we are on the alternate signal stack and would overflow it, don't. 4010 * Return an always-bogus address instead so we will die with SIGSEGV. 4011 */ 4012 4013 if (onsigstack && !likely(on_sig_stack(sp))) { 4014 return -1L; 4015 } 4016 4017 return sp; 4018 } 4019 4020 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4021 target_siginfo_t *info, 4022 target_sigset_t *set, CPUOpenRISCState *env) 4023 { 4024 int err = 0; 4025 abi_ulong frame_addr; 4026 unsigned long return_ip; 4027 struct target_rt_sigframe *frame; 4028 abi_ulong info_addr, uc_addr; 4029 4030 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4031 trace_user_setup_rt_frame(env, frame_addr); 4032 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4033 goto give_sigsegv; 4034 } 4035 4036 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4037 __put_user(info_addr, &frame->pinfo); 4038 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4039 __put_user(uc_addr, &frame->puc); 4040 4041 if (ka->sa_flags & SA_SIGINFO) { 4042 tswap_siginfo(&frame->info, info); 4043 } 4044 4045 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4046 __put_user(0, &frame->uc.tuc_flags); 4047 __put_user(0, &frame->uc.tuc_link); 4048 __put_user(target_sigaltstack_used.ss_sp, 4049 &frame->uc.tuc_stack.ss_sp); 4050 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4051 __put_user(target_sigaltstack_used.ss_size, 4052 &frame->uc.tuc_stack.ss_size); 4053 setup_sigcontext(&frame->sc, env, set->sig[0]); 4054 4055 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4056 4057 /* trampoline - the desired return ip is the retcode itself */ 4058 return_ip = (unsigned long)&frame->retcode; 4059 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4060 __put_user(0xa960, (short *)(frame->retcode + 0)); 4061 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4062 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4063 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4064 4065 if (err) { 4066 goto give_sigsegv; 4067 } 4068 4069 /* TODO what is the current->exec_domain stuff and invmap ? */ 4070 4071 /* Set up registers for signal handler */ 4072 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4073 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4074 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4075 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4076 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4077 4078 /* actually move the usp to reflect the stacked frame */ 4079 env->gpr[1] = (unsigned long)frame; 4080 4081 return; 4082 4083 give_sigsegv: 4084 unlock_user_struct(frame, frame_addr, 1); 4085 force_sigsegv(sig); 4086 } 4087 4088 long do_sigreturn(CPUOpenRISCState *env) 4089 { 4090 trace_user_do_sigreturn(env, 0); 4091 fprintf(stderr, "do_sigreturn: not implemented\n"); 4092 return -TARGET_ENOSYS; 4093 } 4094 4095 long do_rt_sigreturn(CPUOpenRISCState *env) 4096 { 4097 trace_user_do_rt_sigreturn(env, 0); 4098 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4099 return -TARGET_ENOSYS; 4100 } 4101 /* TARGET_OPENRISC */ 4102 4103 #elif defined(TARGET_S390X) 4104 4105 #define __NUM_GPRS 16 4106 #define __NUM_FPRS 16 4107 #define __NUM_ACRS 16 4108 4109 #define S390_SYSCALL_SIZE 2 4110 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4111 4112 #define _SIGCONTEXT_NSIG 64 4113 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4114 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4115 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4116 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4117 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4118 4119 typedef struct { 4120 target_psw_t psw; 4121 target_ulong gprs[__NUM_GPRS]; 4122 unsigned int acrs[__NUM_ACRS]; 4123 } target_s390_regs_common; 4124 4125 typedef struct { 4126 unsigned int fpc; 4127 double fprs[__NUM_FPRS]; 4128 } target_s390_fp_regs; 4129 4130 typedef struct { 4131 target_s390_regs_common regs; 4132 target_s390_fp_regs fpregs; 4133 } target_sigregs; 4134 4135 struct target_sigcontext { 4136 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4137 target_sigregs *sregs; 4138 }; 4139 4140 typedef struct { 4141 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4142 struct target_sigcontext sc; 4143 target_sigregs sregs; 4144 int signo; 4145 uint8_t retcode[S390_SYSCALL_SIZE]; 4146 } sigframe; 4147 4148 struct target_ucontext { 4149 target_ulong tuc_flags; 4150 struct target_ucontext *tuc_link; 4151 target_stack_t tuc_stack; 4152 target_sigregs tuc_mcontext; 4153 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4154 }; 4155 4156 typedef struct { 4157 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4158 uint8_t retcode[S390_SYSCALL_SIZE]; 4159 struct target_siginfo info; 4160 struct target_ucontext uc; 4161 } rt_sigframe; 4162 4163 static inline abi_ulong 4164 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4165 { 4166 abi_ulong sp; 4167 4168 /* Default to using normal stack */ 4169 sp = env->regs[15]; 4170 4171 /* This is the X/Open sanctioned signal stack switching. */ 4172 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4173 if (!sas_ss_flags(sp)) { 4174 sp = target_sigaltstack_used.ss_sp + 4175 target_sigaltstack_used.ss_size; 4176 } 4177 } 4178 4179 /* This is the legacy signal stack switching. */ 4180 else if (/* FIXME !user_mode(regs) */ 0 && 4181 !(ka->sa_flags & TARGET_SA_RESTORER) && 4182 ka->sa_restorer) { 4183 sp = (abi_ulong) ka->sa_restorer; 4184 } 4185 4186 return (sp - frame_size) & -8ul; 4187 } 4188 4189 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4190 { 4191 int i; 4192 //save_access_regs(current->thread.acrs); FIXME 4193 4194 /* Copy a 'clean' PSW mask to the user to avoid leaking 4195 information about whether PER is currently on. */ 4196 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4197 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4198 for (i = 0; i < 16; i++) { 4199 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4200 } 4201 for (i = 0; i < 16; i++) { 4202 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4203 } 4204 /* 4205 * We have to store the fp registers to current->thread.fp_regs 4206 * to merge them with the emulated registers. 4207 */ 4208 //save_fp_regs(¤t->thread.fp_regs); FIXME 4209 for (i = 0; i < 16; i++) { 4210 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4211 } 4212 } 4213 4214 static void setup_frame(int sig, struct target_sigaction *ka, 4215 target_sigset_t *set, CPUS390XState *env) 4216 { 4217 sigframe *frame; 4218 abi_ulong frame_addr; 4219 4220 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4221 trace_user_setup_frame(env, frame_addr); 4222 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4223 goto give_sigsegv; 4224 } 4225 4226 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4227 4228 save_sigregs(env, &frame->sregs); 4229 4230 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4231 (abi_ulong *)&frame->sc.sregs); 4232 4233 /* Set up to return from userspace. If provided, use a stub 4234 already in userspace. */ 4235 if (ka->sa_flags & TARGET_SA_RESTORER) { 4236 env->regs[14] = (unsigned long) 4237 ka->sa_restorer | PSW_ADDR_AMODE; 4238 } else { 4239 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4240 | PSW_ADDR_AMODE; 4241 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4242 (uint16_t *)(frame->retcode)); 4243 } 4244 4245 /* Set up backchain. */ 4246 __put_user(env->regs[15], (abi_ulong *) frame); 4247 4248 /* Set up registers for signal handler */ 4249 env->regs[15] = frame_addr; 4250 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4251 4252 env->regs[2] = sig; //map_signal(sig); 4253 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4254 4255 /* We forgot to include these in the sigcontext. 4256 To avoid breaking binary compatibility, they are passed as args. */ 4257 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4258 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4259 4260 /* Place signal number on stack to allow backtrace from handler. */ 4261 __put_user(env->regs[2], &frame->signo); 4262 unlock_user_struct(frame, frame_addr, 1); 4263 return; 4264 4265 give_sigsegv: 4266 force_sigsegv(sig); 4267 } 4268 4269 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4270 target_siginfo_t *info, 4271 target_sigset_t *set, CPUS390XState *env) 4272 { 4273 int i; 4274 rt_sigframe *frame; 4275 abi_ulong frame_addr; 4276 4277 frame_addr = get_sigframe(ka, env, sizeof *frame); 4278 trace_user_setup_rt_frame(env, frame_addr); 4279 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4280 goto give_sigsegv; 4281 } 4282 4283 tswap_siginfo(&frame->info, info); 4284 4285 /* Create the ucontext. */ 4286 __put_user(0, &frame->uc.tuc_flags); 4287 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4288 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4289 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4290 &frame->uc.tuc_stack.ss_flags); 4291 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4292 save_sigregs(env, &frame->uc.tuc_mcontext); 4293 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4294 __put_user((abi_ulong)set->sig[i], 4295 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4296 } 4297 4298 /* Set up to return from userspace. If provided, use a stub 4299 already in userspace. */ 4300 if (ka->sa_flags & TARGET_SA_RESTORER) { 4301 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4302 } else { 4303 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4304 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4305 (uint16_t *)(frame->retcode)); 4306 } 4307 4308 /* Set up backchain. */ 4309 __put_user(env->regs[15], (abi_ulong *) frame); 4310 4311 /* Set up registers for signal handler */ 4312 env->regs[15] = frame_addr; 4313 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4314 4315 env->regs[2] = sig; //map_signal(sig); 4316 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4317 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4318 return; 4319 4320 give_sigsegv: 4321 force_sigsegv(sig); 4322 } 4323 4324 static int 4325 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4326 { 4327 int err = 0; 4328 int i; 4329 4330 for (i = 0; i < 16; i++) { 4331 __get_user(env->regs[i], &sc->regs.gprs[i]); 4332 } 4333 4334 __get_user(env->psw.mask, &sc->regs.psw.mask); 4335 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4336 (unsigned long long)env->psw.addr); 4337 __get_user(env->psw.addr, &sc->regs.psw.addr); 4338 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4339 4340 for (i = 0; i < 16; i++) { 4341 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4342 } 4343 for (i = 0; i < 16; i++) { 4344 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4345 } 4346 4347 return err; 4348 } 4349 4350 long do_sigreturn(CPUS390XState *env) 4351 { 4352 sigframe *frame; 4353 abi_ulong frame_addr = env->regs[15]; 4354 target_sigset_t target_set; 4355 sigset_t set; 4356 4357 trace_user_do_sigreturn(env, frame_addr); 4358 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4359 goto badframe; 4360 } 4361 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4362 4363 target_to_host_sigset_internal(&set, &target_set); 4364 set_sigmask(&set); /* ~_BLOCKABLE? */ 4365 4366 if (restore_sigregs(env, &frame->sregs)) { 4367 goto badframe; 4368 } 4369 4370 unlock_user_struct(frame, frame_addr, 0); 4371 return -TARGET_QEMU_ESIGRETURN; 4372 4373 badframe: 4374 force_sig(TARGET_SIGSEGV); 4375 return 0; 4376 } 4377 4378 long do_rt_sigreturn(CPUS390XState *env) 4379 { 4380 rt_sigframe *frame; 4381 abi_ulong frame_addr = env->regs[15]; 4382 sigset_t set; 4383 4384 trace_user_do_rt_sigreturn(env, frame_addr); 4385 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4386 goto badframe; 4387 } 4388 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4389 4390 set_sigmask(&set); /* ~_BLOCKABLE? */ 4391 4392 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4393 goto badframe; 4394 } 4395 4396 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4397 get_sp_from_cpustate(env)) == -EFAULT) { 4398 goto badframe; 4399 } 4400 unlock_user_struct(frame, frame_addr, 0); 4401 return -TARGET_QEMU_ESIGRETURN; 4402 4403 badframe: 4404 unlock_user_struct(frame, frame_addr, 0); 4405 force_sig(TARGET_SIGSEGV); 4406 return 0; 4407 } 4408 4409 #elif defined(TARGET_PPC) 4410 4411 /* Size of dummy stack frame allocated when calling signal handler. 4412 See arch/powerpc/include/asm/ptrace.h. */ 4413 #if defined(TARGET_PPC64) 4414 #define SIGNAL_FRAMESIZE 128 4415 #else 4416 #define SIGNAL_FRAMESIZE 64 4417 #endif 4418 4419 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4420 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4421 struct target_mcontext { 4422 target_ulong mc_gregs[48]; 4423 /* Includes fpscr. */ 4424 uint64_t mc_fregs[33]; 4425 target_ulong mc_pad[2]; 4426 /* We need to handle Altivec and SPE at the same time, which no 4427 kernel needs to do. Fortunately, the kernel defines this bit to 4428 be Altivec-register-large all the time, rather than trying to 4429 twiddle it based on the specific platform. */ 4430 union { 4431 /* SPE vector registers. One extra for SPEFSCR. */ 4432 uint32_t spe[33]; 4433 /* Altivec vector registers. The packing of VSCR and VRSAVE 4434 varies depending on whether we're PPC64 or not: PPC64 splits 4435 them apart; PPC32 stuffs them together. */ 4436 #if defined(TARGET_PPC64) 4437 #define QEMU_NVRREG 34 4438 #else 4439 #define QEMU_NVRREG 33 4440 #endif 4441 ppc_avr_t altivec[QEMU_NVRREG]; 4442 #undef QEMU_NVRREG 4443 } mc_vregs __attribute__((__aligned__(16))); 4444 }; 4445 4446 /* See arch/powerpc/include/asm/sigcontext.h. */ 4447 struct target_sigcontext { 4448 target_ulong _unused[4]; 4449 int32_t signal; 4450 #if defined(TARGET_PPC64) 4451 int32_t pad0; 4452 #endif 4453 target_ulong handler; 4454 target_ulong oldmask; 4455 target_ulong regs; /* struct pt_regs __user * */ 4456 #if defined(TARGET_PPC64) 4457 struct target_mcontext mcontext; 4458 #endif 4459 }; 4460 4461 /* Indices for target_mcontext.mc_gregs, below. 4462 See arch/powerpc/include/asm/ptrace.h for details. */ 4463 enum { 4464 TARGET_PT_R0 = 0, 4465 TARGET_PT_R1 = 1, 4466 TARGET_PT_R2 = 2, 4467 TARGET_PT_R3 = 3, 4468 TARGET_PT_R4 = 4, 4469 TARGET_PT_R5 = 5, 4470 TARGET_PT_R6 = 6, 4471 TARGET_PT_R7 = 7, 4472 TARGET_PT_R8 = 8, 4473 TARGET_PT_R9 = 9, 4474 TARGET_PT_R10 = 10, 4475 TARGET_PT_R11 = 11, 4476 TARGET_PT_R12 = 12, 4477 TARGET_PT_R13 = 13, 4478 TARGET_PT_R14 = 14, 4479 TARGET_PT_R15 = 15, 4480 TARGET_PT_R16 = 16, 4481 TARGET_PT_R17 = 17, 4482 TARGET_PT_R18 = 18, 4483 TARGET_PT_R19 = 19, 4484 TARGET_PT_R20 = 20, 4485 TARGET_PT_R21 = 21, 4486 TARGET_PT_R22 = 22, 4487 TARGET_PT_R23 = 23, 4488 TARGET_PT_R24 = 24, 4489 TARGET_PT_R25 = 25, 4490 TARGET_PT_R26 = 26, 4491 TARGET_PT_R27 = 27, 4492 TARGET_PT_R28 = 28, 4493 TARGET_PT_R29 = 29, 4494 TARGET_PT_R30 = 30, 4495 TARGET_PT_R31 = 31, 4496 TARGET_PT_NIP = 32, 4497 TARGET_PT_MSR = 33, 4498 TARGET_PT_ORIG_R3 = 34, 4499 TARGET_PT_CTR = 35, 4500 TARGET_PT_LNK = 36, 4501 TARGET_PT_XER = 37, 4502 TARGET_PT_CCR = 38, 4503 /* Yes, there are two registers with #39. One is 64-bit only. */ 4504 TARGET_PT_MQ = 39, 4505 TARGET_PT_SOFTE = 39, 4506 TARGET_PT_TRAP = 40, 4507 TARGET_PT_DAR = 41, 4508 TARGET_PT_DSISR = 42, 4509 TARGET_PT_RESULT = 43, 4510 TARGET_PT_REGS_COUNT = 44 4511 }; 4512 4513 4514 struct target_ucontext { 4515 target_ulong tuc_flags; 4516 target_ulong tuc_link; /* struct ucontext __user * */ 4517 struct target_sigaltstack tuc_stack; 4518 #if !defined(TARGET_PPC64) 4519 int32_t tuc_pad[7]; 4520 target_ulong tuc_regs; /* struct mcontext __user * 4521 points to uc_mcontext field */ 4522 #endif 4523 target_sigset_t tuc_sigmask; 4524 #if defined(TARGET_PPC64) 4525 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4526 struct target_sigcontext tuc_sigcontext; 4527 #else 4528 int32_t tuc_maskext[30]; 4529 int32_t tuc_pad2[3]; 4530 struct target_mcontext tuc_mcontext; 4531 #endif 4532 }; 4533 4534 /* See arch/powerpc/kernel/signal_32.c. */ 4535 struct target_sigframe { 4536 struct target_sigcontext sctx; 4537 struct target_mcontext mctx; 4538 int32_t abigap[56]; 4539 }; 4540 4541 #if defined(TARGET_PPC64) 4542 4543 #define TARGET_TRAMP_SIZE 6 4544 4545 struct target_rt_sigframe { 4546 /* sys_rt_sigreturn requires the ucontext be the first field */ 4547 struct target_ucontext uc; 4548 target_ulong _unused[2]; 4549 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4550 target_ulong pinfo; /* struct siginfo __user * */ 4551 target_ulong puc; /* void __user * */ 4552 struct target_siginfo info; 4553 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4554 char abigap[288]; 4555 } __attribute__((aligned(16))); 4556 4557 #else 4558 4559 struct target_rt_sigframe { 4560 struct target_siginfo info; 4561 struct target_ucontext uc; 4562 int32_t abigap[56]; 4563 }; 4564 4565 #endif 4566 4567 #if defined(TARGET_PPC64) 4568 4569 struct target_func_ptr { 4570 target_ulong entry; 4571 target_ulong toc; 4572 }; 4573 4574 #endif 4575 4576 /* We use the mc_pad field for the signal return trampoline. */ 4577 #define tramp mc_pad 4578 4579 /* See arch/powerpc/kernel/signal.c. */ 4580 static target_ulong get_sigframe(struct target_sigaction *ka, 4581 CPUPPCState *env, 4582 int frame_size) 4583 { 4584 target_ulong oldsp; 4585 4586 oldsp = env->gpr[1]; 4587 4588 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4589 (sas_ss_flags(oldsp) == 0)) { 4590 oldsp = (target_sigaltstack_used.ss_sp 4591 + target_sigaltstack_used.ss_size); 4592 } 4593 4594 return (oldsp - frame_size) & ~0xFUL; 4595 } 4596 4597 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4598 { 4599 target_ulong msr = env->msr; 4600 int i; 4601 target_ulong ccr = 0; 4602 4603 /* In general, the kernel attempts to be intelligent about what it 4604 needs to save for Altivec/FP/SPE registers. We don't care that 4605 much, so we just go ahead and save everything. */ 4606 4607 /* Save general registers. */ 4608 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4609 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4610 } 4611 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4612 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4613 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4614 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4615 4616 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4617 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4618 } 4619 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4620 4621 /* Save Altivec registers if necessary. */ 4622 if (env->insns_flags & PPC_ALTIVEC) { 4623 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4624 ppc_avr_t *avr = &env->avr[i]; 4625 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4626 4627 __put_user(avr->u64[0], &vreg->u64[0]); 4628 __put_user(avr->u64[1], &vreg->u64[1]); 4629 } 4630 /* Set MSR_VR in the saved MSR value to indicate that 4631 frame->mc_vregs contains valid data. */ 4632 msr |= MSR_VR; 4633 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4634 &frame->mc_vregs.altivec[32].u32[3]); 4635 } 4636 4637 /* Save floating point registers. */ 4638 if (env->insns_flags & PPC_FLOAT) { 4639 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4640 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4641 } 4642 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4643 } 4644 4645 /* Save SPE registers. The kernel only saves the high half. */ 4646 if (env->insns_flags & PPC_SPE) { 4647 #if defined(TARGET_PPC64) 4648 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4649 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4650 } 4651 #else 4652 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4653 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4654 } 4655 #endif 4656 /* Set MSR_SPE in the saved MSR value to indicate that 4657 frame->mc_vregs contains valid data. */ 4658 msr |= MSR_SPE; 4659 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4660 } 4661 4662 /* Store MSR. */ 4663 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4664 } 4665 4666 static void encode_trampoline(int sigret, uint32_t *tramp) 4667 { 4668 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4669 if (sigret) { 4670 __put_user(0x38000000 | sigret, &tramp[0]); 4671 __put_user(0x44000002, &tramp[1]); 4672 } 4673 } 4674 4675 static void restore_user_regs(CPUPPCState *env, 4676 struct target_mcontext *frame, int sig) 4677 { 4678 target_ulong save_r2 = 0; 4679 target_ulong msr; 4680 target_ulong ccr; 4681 4682 int i; 4683 4684 if (!sig) { 4685 save_r2 = env->gpr[2]; 4686 } 4687 4688 /* Restore general registers. */ 4689 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4690 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4691 } 4692 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4693 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4694 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4695 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4696 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4697 4698 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4699 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4700 } 4701 4702 if (!sig) { 4703 env->gpr[2] = save_r2; 4704 } 4705 /* Restore MSR. */ 4706 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4707 4708 /* If doing signal return, restore the previous little-endian mode. */ 4709 if (sig) 4710 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4711 4712 /* Restore Altivec registers if necessary. */ 4713 if (env->insns_flags & PPC_ALTIVEC) { 4714 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4715 ppc_avr_t *avr = &env->avr[i]; 4716 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4717 4718 __get_user(avr->u64[0], &vreg->u64[0]); 4719 __get_user(avr->u64[1], &vreg->u64[1]); 4720 } 4721 /* Set MSR_VEC in the saved MSR value to indicate that 4722 frame->mc_vregs contains valid data. */ 4723 __get_user(env->spr[SPR_VRSAVE], 4724 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4725 } 4726 4727 /* Restore floating point registers. */ 4728 if (env->insns_flags & PPC_FLOAT) { 4729 uint64_t fpscr; 4730 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4731 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4732 } 4733 __get_user(fpscr, &frame->mc_fregs[32]); 4734 env->fpscr = (uint32_t) fpscr; 4735 } 4736 4737 /* Save SPE registers. The kernel only saves the high half. */ 4738 if (env->insns_flags & PPC_SPE) { 4739 #if defined(TARGET_PPC64) 4740 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4741 uint32_t hi; 4742 4743 __get_user(hi, &frame->mc_vregs.spe[i]); 4744 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4745 } 4746 #else 4747 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4748 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4749 } 4750 #endif 4751 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4752 } 4753 } 4754 4755 static void setup_frame(int sig, struct target_sigaction *ka, 4756 target_sigset_t *set, CPUPPCState *env) 4757 { 4758 struct target_sigframe *frame; 4759 struct target_sigcontext *sc; 4760 target_ulong frame_addr, newsp; 4761 int err = 0; 4762 #if defined(TARGET_PPC64) 4763 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4764 #endif 4765 4766 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4767 trace_user_setup_frame(env, frame_addr); 4768 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4769 goto sigsegv; 4770 sc = &frame->sctx; 4771 4772 __put_user(ka->_sa_handler, &sc->handler); 4773 __put_user(set->sig[0], &sc->oldmask); 4774 #if TARGET_ABI_BITS == 64 4775 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4776 #else 4777 __put_user(set->sig[1], &sc->_unused[3]); 4778 #endif 4779 __put_user(h2g(&frame->mctx), &sc->regs); 4780 __put_user(sig, &sc->signal); 4781 4782 /* Save user regs. */ 4783 save_user_regs(env, &frame->mctx); 4784 4785 /* Construct the trampoline code on the stack. */ 4786 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4787 4788 /* The kernel checks for the presence of a VDSO here. We don't 4789 emulate a vdso, so use a sigreturn system call. */ 4790 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4791 4792 /* Turn off all fp exceptions. */ 4793 env->fpscr = 0; 4794 4795 /* Create a stack frame for the caller of the handler. */ 4796 newsp = frame_addr - SIGNAL_FRAMESIZE; 4797 err |= put_user(env->gpr[1], newsp, target_ulong); 4798 4799 if (err) 4800 goto sigsegv; 4801 4802 /* Set up registers for signal handler. */ 4803 env->gpr[1] = newsp; 4804 env->gpr[3] = sig; 4805 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4806 4807 #if defined(TARGET_PPC64) 4808 if (get_ppc64_abi(image) < 2) { 4809 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4810 struct target_func_ptr *handler = 4811 (struct target_func_ptr *)g2h(ka->_sa_handler); 4812 env->nip = tswapl(handler->entry); 4813 env->gpr[2] = tswapl(handler->toc); 4814 } else { 4815 /* ELFv2 PPC64 function pointers are entry points, but R12 4816 * must also be set */ 4817 env->nip = tswapl((target_ulong) ka->_sa_handler); 4818 env->gpr[12] = env->nip; 4819 } 4820 #else 4821 env->nip = (target_ulong) ka->_sa_handler; 4822 #endif 4823 4824 /* Signal handlers are entered in big-endian mode. */ 4825 env->msr &= ~(1ull << MSR_LE); 4826 4827 unlock_user_struct(frame, frame_addr, 1); 4828 return; 4829 4830 sigsegv: 4831 unlock_user_struct(frame, frame_addr, 1); 4832 force_sigsegv(sig); 4833 } 4834 4835 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4836 target_siginfo_t *info, 4837 target_sigset_t *set, CPUPPCState *env) 4838 { 4839 struct target_rt_sigframe *rt_sf; 4840 uint32_t *trampptr = 0; 4841 struct target_mcontext *mctx = 0; 4842 target_ulong rt_sf_addr, newsp = 0; 4843 int i, err = 0; 4844 #if defined(TARGET_PPC64) 4845 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4846 #endif 4847 4848 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4849 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4850 goto sigsegv; 4851 4852 tswap_siginfo(&rt_sf->info, info); 4853 4854 __put_user(0, &rt_sf->uc.tuc_flags); 4855 __put_user(0, &rt_sf->uc.tuc_link); 4856 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4857 &rt_sf->uc.tuc_stack.ss_sp); 4858 __put_user(sas_ss_flags(env->gpr[1]), 4859 &rt_sf->uc.tuc_stack.ss_flags); 4860 __put_user(target_sigaltstack_used.ss_size, 4861 &rt_sf->uc.tuc_stack.ss_size); 4862 #if !defined(TARGET_PPC64) 4863 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4864 &rt_sf->uc.tuc_regs); 4865 #endif 4866 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4867 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4868 } 4869 4870 #if defined(TARGET_PPC64) 4871 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4872 trampptr = &rt_sf->trampoline[0]; 4873 #else 4874 mctx = &rt_sf->uc.tuc_mcontext; 4875 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4876 #endif 4877 4878 save_user_regs(env, mctx); 4879 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4880 4881 /* The kernel checks for the presence of a VDSO here. We don't 4882 emulate a vdso, so use a sigreturn system call. */ 4883 env->lr = (target_ulong) h2g(trampptr); 4884 4885 /* Turn off all fp exceptions. */ 4886 env->fpscr = 0; 4887 4888 /* Create a stack frame for the caller of the handler. */ 4889 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4890 err |= put_user(env->gpr[1], newsp, target_ulong); 4891 4892 if (err) 4893 goto sigsegv; 4894 4895 /* Set up registers for signal handler. */ 4896 env->gpr[1] = newsp; 4897 env->gpr[3] = (target_ulong) sig; 4898 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4899 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4900 env->gpr[6] = (target_ulong) h2g(rt_sf); 4901 4902 #if defined(TARGET_PPC64) 4903 if (get_ppc64_abi(image) < 2) { 4904 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4905 struct target_func_ptr *handler = 4906 (struct target_func_ptr *)g2h(ka->_sa_handler); 4907 env->nip = tswapl(handler->entry); 4908 env->gpr[2] = tswapl(handler->toc); 4909 } else { 4910 /* ELFv2 PPC64 function pointers are entry points, but R12 4911 * must also be set */ 4912 env->nip = tswapl((target_ulong) ka->_sa_handler); 4913 env->gpr[12] = env->nip; 4914 } 4915 #else 4916 env->nip = (target_ulong) ka->_sa_handler; 4917 #endif 4918 4919 /* Signal handlers are entered in big-endian mode. */ 4920 env->msr &= ~(1ull << MSR_LE); 4921 4922 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4923 return; 4924 4925 sigsegv: 4926 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4927 force_sigsegv(sig); 4928 4929 } 4930 4931 long do_sigreturn(CPUPPCState *env) 4932 { 4933 struct target_sigcontext *sc = NULL; 4934 struct target_mcontext *sr = NULL; 4935 target_ulong sr_addr = 0, sc_addr; 4936 sigset_t blocked; 4937 target_sigset_t set; 4938 4939 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4940 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4941 goto sigsegv; 4942 4943 #if defined(TARGET_PPC64) 4944 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4945 #else 4946 __get_user(set.sig[0], &sc->oldmask); 4947 __get_user(set.sig[1], &sc->_unused[3]); 4948 #endif 4949 target_to_host_sigset_internal(&blocked, &set); 4950 set_sigmask(&blocked); 4951 4952 __get_user(sr_addr, &sc->regs); 4953 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4954 goto sigsegv; 4955 restore_user_regs(env, sr, 1); 4956 4957 unlock_user_struct(sr, sr_addr, 1); 4958 unlock_user_struct(sc, sc_addr, 1); 4959 return -TARGET_QEMU_ESIGRETURN; 4960 4961 sigsegv: 4962 unlock_user_struct(sr, sr_addr, 1); 4963 unlock_user_struct(sc, sc_addr, 1); 4964 force_sig(TARGET_SIGSEGV); 4965 return 0; 4966 } 4967 4968 /* See arch/powerpc/kernel/signal_32.c. */ 4969 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4970 { 4971 struct target_mcontext *mcp; 4972 target_ulong mcp_addr; 4973 sigset_t blocked; 4974 target_sigset_t set; 4975 4976 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4977 sizeof (set))) 4978 return 1; 4979 4980 #if defined(TARGET_PPC64) 4981 mcp_addr = h2g(ucp) + 4982 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 4983 #else 4984 __get_user(mcp_addr, &ucp->tuc_regs); 4985 #endif 4986 4987 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 4988 return 1; 4989 4990 target_to_host_sigset_internal(&blocked, &set); 4991 set_sigmask(&blocked); 4992 restore_user_regs(env, mcp, sig); 4993 4994 unlock_user_struct(mcp, mcp_addr, 1); 4995 return 0; 4996 } 4997 4998 long do_rt_sigreturn(CPUPPCState *env) 4999 { 5000 struct target_rt_sigframe *rt_sf = NULL; 5001 target_ulong rt_sf_addr; 5002 5003 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5004 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5005 goto sigsegv; 5006 5007 if (do_setcontext(&rt_sf->uc, env, 1)) 5008 goto sigsegv; 5009 5010 do_sigaltstack(rt_sf_addr 5011 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5012 0, env->gpr[1]); 5013 5014 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5015 return -TARGET_QEMU_ESIGRETURN; 5016 5017 sigsegv: 5018 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5019 force_sig(TARGET_SIGSEGV); 5020 return 0; 5021 } 5022 5023 #elif defined(TARGET_M68K) 5024 5025 struct target_sigcontext { 5026 abi_ulong sc_mask; 5027 abi_ulong sc_usp; 5028 abi_ulong sc_d0; 5029 abi_ulong sc_d1; 5030 abi_ulong sc_a0; 5031 abi_ulong sc_a1; 5032 unsigned short sc_sr; 5033 abi_ulong sc_pc; 5034 }; 5035 5036 struct target_sigframe 5037 { 5038 abi_ulong pretcode; 5039 int sig; 5040 int code; 5041 abi_ulong psc; 5042 char retcode[8]; 5043 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5044 struct target_sigcontext sc; 5045 }; 5046 5047 typedef int target_greg_t; 5048 #define TARGET_NGREG 18 5049 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5050 5051 typedef struct target_fpregset { 5052 int f_fpcntl[3]; 5053 int f_fpregs[8*3]; 5054 } target_fpregset_t; 5055 5056 struct target_mcontext { 5057 int version; 5058 target_gregset_t gregs; 5059 target_fpregset_t fpregs; 5060 }; 5061 5062 #define TARGET_MCONTEXT_VERSION 2 5063 5064 struct target_ucontext { 5065 abi_ulong tuc_flags; 5066 abi_ulong tuc_link; 5067 target_stack_t tuc_stack; 5068 struct target_mcontext tuc_mcontext; 5069 abi_long tuc_filler[80]; 5070 target_sigset_t tuc_sigmask; 5071 }; 5072 5073 struct target_rt_sigframe 5074 { 5075 abi_ulong pretcode; 5076 int sig; 5077 abi_ulong pinfo; 5078 abi_ulong puc; 5079 char retcode[8]; 5080 struct target_siginfo info; 5081 struct target_ucontext uc; 5082 }; 5083 5084 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5085 abi_ulong mask) 5086 { 5087 __put_user(mask, &sc->sc_mask); 5088 __put_user(env->aregs[7], &sc->sc_usp); 5089 __put_user(env->dregs[0], &sc->sc_d0); 5090 __put_user(env->dregs[1], &sc->sc_d1); 5091 __put_user(env->aregs[0], &sc->sc_a0); 5092 __put_user(env->aregs[1], &sc->sc_a1); 5093 __put_user(env->sr, &sc->sc_sr); 5094 __put_user(env->pc, &sc->sc_pc); 5095 } 5096 5097 static void 5098 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5099 { 5100 int temp; 5101 5102 __get_user(env->aregs[7], &sc->sc_usp); 5103 __get_user(env->dregs[0], &sc->sc_d0); 5104 __get_user(env->dregs[1], &sc->sc_d1); 5105 __get_user(env->aregs[0], &sc->sc_a0); 5106 __get_user(env->aregs[1], &sc->sc_a1); 5107 __get_user(env->pc, &sc->sc_pc); 5108 __get_user(temp, &sc->sc_sr); 5109 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5110 } 5111 5112 /* 5113 * Determine which stack to use.. 5114 */ 5115 static inline abi_ulong 5116 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5117 size_t frame_size) 5118 { 5119 unsigned long sp; 5120 5121 sp = regs->aregs[7]; 5122 5123 /* This is the X/Open sanctioned signal stack switching. */ 5124 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5125 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5126 } 5127 5128 return ((sp - frame_size) & -8UL); 5129 } 5130 5131 static void setup_frame(int sig, struct target_sigaction *ka, 5132 target_sigset_t *set, CPUM68KState *env) 5133 { 5134 struct target_sigframe *frame; 5135 abi_ulong frame_addr; 5136 abi_ulong retcode_addr; 5137 abi_ulong sc_addr; 5138 int i; 5139 5140 frame_addr = get_sigframe(ka, env, sizeof *frame); 5141 trace_user_setup_frame(env, frame_addr); 5142 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5143 goto give_sigsegv; 5144 } 5145 5146 __put_user(sig, &frame->sig); 5147 5148 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5149 __put_user(sc_addr, &frame->psc); 5150 5151 setup_sigcontext(&frame->sc, env, set->sig[0]); 5152 5153 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5154 __put_user(set->sig[i], &frame->extramask[i - 1]); 5155 } 5156 5157 /* Set up to return from userspace. */ 5158 5159 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5160 __put_user(retcode_addr, &frame->pretcode); 5161 5162 /* moveq #,d0; trap #0 */ 5163 5164 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5165 (uint32_t *)(frame->retcode)); 5166 5167 /* Set up to return from userspace */ 5168 5169 env->aregs[7] = frame_addr; 5170 env->pc = ka->_sa_handler; 5171 5172 unlock_user_struct(frame, frame_addr, 1); 5173 return; 5174 5175 give_sigsegv: 5176 force_sigsegv(sig); 5177 } 5178 5179 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5180 CPUM68KState *env) 5181 { 5182 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5183 5184 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5185 __put_user(env->dregs[0], &gregs[0]); 5186 __put_user(env->dregs[1], &gregs[1]); 5187 __put_user(env->dregs[2], &gregs[2]); 5188 __put_user(env->dregs[3], &gregs[3]); 5189 __put_user(env->dregs[4], &gregs[4]); 5190 __put_user(env->dregs[5], &gregs[5]); 5191 __put_user(env->dregs[6], &gregs[6]); 5192 __put_user(env->dregs[7], &gregs[7]); 5193 __put_user(env->aregs[0], &gregs[8]); 5194 __put_user(env->aregs[1], &gregs[9]); 5195 __put_user(env->aregs[2], &gregs[10]); 5196 __put_user(env->aregs[3], &gregs[11]); 5197 __put_user(env->aregs[4], &gregs[12]); 5198 __put_user(env->aregs[5], &gregs[13]); 5199 __put_user(env->aregs[6], &gregs[14]); 5200 __put_user(env->aregs[7], &gregs[15]); 5201 __put_user(env->pc, &gregs[16]); 5202 __put_user(env->sr, &gregs[17]); 5203 5204 return 0; 5205 } 5206 5207 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5208 struct target_ucontext *uc) 5209 { 5210 int temp; 5211 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5212 5213 __get_user(temp, &uc->tuc_mcontext.version); 5214 if (temp != TARGET_MCONTEXT_VERSION) 5215 goto badframe; 5216 5217 /* restore passed registers */ 5218 __get_user(env->dregs[0], &gregs[0]); 5219 __get_user(env->dregs[1], &gregs[1]); 5220 __get_user(env->dregs[2], &gregs[2]); 5221 __get_user(env->dregs[3], &gregs[3]); 5222 __get_user(env->dregs[4], &gregs[4]); 5223 __get_user(env->dregs[5], &gregs[5]); 5224 __get_user(env->dregs[6], &gregs[6]); 5225 __get_user(env->dregs[7], &gregs[7]); 5226 __get_user(env->aregs[0], &gregs[8]); 5227 __get_user(env->aregs[1], &gregs[9]); 5228 __get_user(env->aregs[2], &gregs[10]); 5229 __get_user(env->aregs[3], &gregs[11]); 5230 __get_user(env->aregs[4], &gregs[12]); 5231 __get_user(env->aregs[5], &gregs[13]); 5232 __get_user(env->aregs[6], &gregs[14]); 5233 __get_user(env->aregs[7], &gregs[15]); 5234 __get_user(env->pc, &gregs[16]); 5235 __get_user(temp, &gregs[17]); 5236 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5237 5238 return 0; 5239 5240 badframe: 5241 return 1; 5242 } 5243 5244 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5245 target_siginfo_t *info, 5246 target_sigset_t *set, CPUM68KState *env) 5247 { 5248 struct target_rt_sigframe *frame; 5249 abi_ulong frame_addr; 5250 abi_ulong retcode_addr; 5251 abi_ulong info_addr; 5252 abi_ulong uc_addr; 5253 int err = 0; 5254 int i; 5255 5256 frame_addr = get_sigframe(ka, env, sizeof *frame); 5257 trace_user_setup_rt_frame(env, frame_addr); 5258 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5259 goto give_sigsegv; 5260 } 5261 5262 __put_user(sig, &frame->sig); 5263 5264 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5265 __put_user(info_addr, &frame->pinfo); 5266 5267 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5268 __put_user(uc_addr, &frame->puc); 5269 5270 tswap_siginfo(&frame->info, info); 5271 5272 /* Create the ucontext */ 5273 5274 __put_user(0, &frame->uc.tuc_flags); 5275 __put_user(0, &frame->uc.tuc_link); 5276 __put_user(target_sigaltstack_used.ss_sp, 5277 &frame->uc.tuc_stack.ss_sp); 5278 __put_user(sas_ss_flags(env->aregs[7]), 5279 &frame->uc.tuc_stack.ss_flags); 5280 __put_user(target_sigaltstack_used.ss_size, 5281 &frame->uc.tuc_stack.ss_size); 5282 err |= target_rt_setup_ucontext(&frame->uc, env); 5283 5284 if (err) 5285 goto give_sigsegv; 5286 5287 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5288 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5289 } 5290 5291 /* Set up to return from userspace. */ 5292 5293 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5294 __put_user(retcode_addr, &frame->pretcode); 5295 5296 /* moveq #,d0; notb d0; trap #0 */ 5297 5298 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5299 (uint32_t *)(frame->retcode + 0)); 5300 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5301 5302 if (err) 5303 goto give_sigsegv; 5304 5305 /* Set up to return from userspace */ 5306 5307 env->aregs[7] = frame_addr; 5308 env->pc = ka->_sa_handler; 5309 5310 unlock_user_struct(frame, frame_addr, 1); 5311 return; 5312 5313 give_sigsegv: 5314 unlock_user_struct(frame, frame_addr, 1); 5315 force_sigsegv(sig); 5316 } 5317 5318 long do_sigreturn(CPUM68KState *env) 5319 { 5320 struct target_sigframe *frame; 5321 abi_ulong frame_addr = env->aregs[7] - 4; 5322 target_sigset_t target_set; 5323 sigset_t set; 5324 int i; 5325 5326 trace_user_do_sigreturn(env, frame_addr); 5327 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5328 goto badframe; 5329 5330 /* set blocked signals */ 5331 5332 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5333 5334 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5335 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5336 } 5337 5338 target_to_host_sigset_internal(&set, &target_set); 5339 set_sigmask(&set); 5340 5341 /* restore registers */ 5342 5343 restore_sigcontext(env, &frame->sc); 5344 5345 unlock_user_struct(frame, frame_addr, 0); 5346 return -TARGET_QEMU_ESIGRETURN; 5347 5348 badframe: 5349 force_sig(TARGET_SIGSEGV); 5350 return 0; 5351 } 5352 5353 long do_rt_sigreturn(CPUM68KState *env) 5354 { 5355 struct target_rt_sigframe *frame; 5356 abi_ulong frame_addr = env->aregs[7] - 4; 5357 target_sigset_t target_set; 5358 sigset_t set; 5359 5360 trace_user_do_rt_sigreturn(env, frame_addr); 5361 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5362 goto badframe; 5363 5364 target_to_host_sigset_internal(&set, &target_set); 5365 set_sigmask(&set); 5366 5367 /* restore registers */ 5368 5369 if (target_rt_restore_ucontext(env, &frame->uc)) 5370 goto badframe; 5371 5372 if (do_sigaltstack(frame_addr + 5373 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5374 0, get_sp_from_cpustate(env)) == -EFAULT) 5375 goto badframe; 5376 5377 unlock_user_struct(frame, frame_addr, 0); 5378 return -TARGET_QEMU_ESIGRETURN; 5379 5380 badframe: 5381 unlock_user_struct(frame, frame_addr, 0); 5382 force_sig(TARGET_SIGSEGV); 5383 return 0; 5384 } 5385 5386 #elif defined(TARGET_ALPHA) 5387 5388 struct target_sigcontext { 5389 abi_long sc_onstack; 5390 abi_long sc_mask; 5391 abi_long sc_pc; 5392 abi_long sc_ps; 5393 abi_long sc_regs[32]; 5394 abi_long sc_ownedfp; 5395 abi_long sc_fpregs[32]; 5396 abi_ulong sc_fpcr; 5397 abi_ulong sc_fp_control; 5398 abi_ulong sc_reserved1; 5399 abi_ulong sc_reserved2; 5400 abi_ulong sc_ssize; 5401 abi_ulong sc_sbase; 5402 abi_ulong sc_traparg_a0; 5403 abi_ulong sc_traparg_a1; 5404 abi_ulong sc_traparg_a2; 5405 abi_ulong sc_fp_trap_pc; 5406 abi_ulong sc_fp_trigger_sum; 5407 abi_ulong sc_fp_trigger_inst; 5408 }; 5409 5410 struct target_ucontext { 5411 abi_ulong tuc_flags; 5412 abi_ulong tuc_link; 5413 abi_ulong tuc_osf_sigmask; 5414 target_stack_t tuc_stack; 5415 struct target_sigcontext tuc_mcontext; 5416 target_sigset_t tuc_sigmask; 5417 }; 5418 5419 struct target_sigframe { 5420 struct target_sigcontext sc; 5421 unsigned int retcode[3]; 5422 }; 5423 5424 struct target_rt_sigframe { 5425 target_siginfo_t info; 5426 struct target_ucontext uc; 5427 unsigned int retcode[3]; 5428 }; 5429 5430 #define INSN_MOV_R30_R16 0x47fe0410 5431 #define INSN_LDI_R0 0x201f0000 5432 #define INSN_CALLSYS 0x00000083 5433 5434 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5435 abi_ulong frame_addr, target_sigset_t *set) 5436 { 5437 int i; 5438 5439 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5440 __put_user(set->sig[0], &sc->sc_mask); 5441 __put_user(env->pc, &sc->sc_pc); 5442 __put_user(8, &sc->sc_ps); 5443 5444 for (i = 0; i < 31; ++i) { 5445 __put_user(env->ir[i], &sc->sc_regs[i]); 5446 } 5447 __put_user(0, &sc->sc_regs[31]); 5448 5449 for (i = 0; i < 31; ++i) { 5450 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5451 } 5452 __put_user(0, &sc->sc_fpregs[31]); 5453 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5454 5455 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5456 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5457 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5458 } 5459 5460 static void restore_sigcontext(CPUAlphaState *env, 5461 struct target_sigcontext *sc) 5462 { 5463 uint64_t fpcr; 5464 int i; 5465 5466 __get_user(env->pc, &sc->sc_pc); 5467 5468 for (i = 0; i < 31; ++i) { 5469 __get_user(env->ir[i], &sc->sc_regs[i]); 5470 } 5471 for (i = 0; i < 31; ++i) { 5472 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5473 } 5474 5475 __get_user(fpcr, &sc->sc_fpcr); 5476 cpu_alpha_store_fpcr(env, fpcr); 5477 } 5478 5479 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5480 CPUAlphaState *env, 5481 unsigned long framesize) 5482 { 5483 abi_ulong sp = env->ir[IR_SP]; 5484 5485 /* This is the X/Open sanctioned signal stack switching. */ 5486 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5487 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5488 } 5489 return (sp - framesize) & -32; 5490 } 5491 5492 static void setup_frame(int sig, struct target_sigaction *ka, 5493 target_sigset_t *set, CPUAlphaState *env) 5494 { 5495 abi_ulong frame_addr, r26; 5496 struct target_sigframe *frame; 5497 int err = 0; 5498 5499 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5500 trace_user_setup_frame(env, frame_addr); 5501 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5502 goto give_sigsegv; 5503 } 5504 5505 setup_sigcontext(&frame->sc, env, frame_addr, set); 5506 5507 if (ka->sa_restorer) { 5508 r26 = ka->sa_restorer; 5509 } else { 5510 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5511 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5512 &frame->retcode[1]); 5513 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5514 /* imb() */ 5515 r26 = frame_addr; 5516 } 5517 5518 unlock_user_struct(frame, frame_addr, 1); 5519 5520 if (err) { 5521 give_sigsegv: 5522 force_sigsegv(sig); 5523 return; 5524 } 5525 5526 env->ir[IR_RA] = r26; 5527 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5528 env->ir[IR_A0] = sig; 5529 env->ir[IR_A1] = 0; 5530 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5531 env->ir[IR_SP] = frame_addr; 5532 } 5533 5534 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5535 target_siginfo_t *info, 5536 target_sigset_t *set, CPUAlphaState *env) 5537 { 5538 abi_ulong frame_addr, r26; 5539 struct target_rt_sigframe *frame; 5540 int i, err = 0; 5541 5542 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5543 trace_user_setup_rt_frame(env, frame_addr); 5544 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5545 goto give_sigsegv; 5546 } 5547 5548 tswap_siginfo(&frame->info, info); 5549 5550 __put_user(0, &frame->uc.tuc_flags); 5551 __put_user(0, &frame->uc.tuc_link); 5552 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5553 __put_user(target_sigaltstack_used.ss_sp, 5554 &frame->uc.tuc_stack.ss_sp); 5555 __put_user(sas_ss_flags(env->ir[IR_SP]), 5556 &frame->uc.tuc_stack.ss_flags); 5557 __put_user(target_sigaltstack_used.ss_size, 5558 &frame->uc.tuc_stack.ss_size); 5559 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5560 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5561 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5562 } 5563 5564 if (ka->sa_restorer) { 5565 r26 = ka->sa_restorer; 5566 } else { 5567 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5568 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5569 &frame->retcode[1]); 5570 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5571 /* imb(); */ 5572 r26 = frame_addr; 5573 } 5574 5575 if (err) { 5576 give_sigsegv: 5577 force_sigsegv(sig); 5578 return; 5579 } 5580 5581 env->ir[IR_RA] = r26; 5582 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5583 env->ir[IR_A0] = sig; 5584 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5585 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5586 env->ir[IR_SP] = frame_addr; 5587 } 5588 5589 long do_sigreturn(CPUAlphaState *env) 5590 { 5591 struct target_sigcontext *sc; 5592 abi_ulong sc_addr = env->ir[IR_A0]; 5593 target_sigset_t target_set; 5594 sigset_t set; 5595 5596 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5597 goto badframe; 5598 } 5599 5600 target_sigemptyset(&target_set); 5601 __get_user(target_set.sig[0], &sc->sc_mask); 5602 5603 target_to_host_sigset_internal(&set, &target_set); 5604 set_sigmask(&set); 5605 5606 restore_sigcontext(env, sc); 5607 unlock_user_struct(sc, sc_addr, 0); 5608 return -TARGET_QEMU_ESIGRETURN; 5609 5610 badframe: 5611 force_sig(TARGET_SIGSEGV); 5612 } 5613 5614 long do_rt_sigreturn(CPUAlphaState *env) 5615 { 5616 abi_ulong frame_addr = env->ir[IR_A0]; 5617 struct target_rt_sigframe *frame; 5618 sigset_t set; 5619 5620 trace_user_do_rt_sigreturn(env, frame_addr); 5621 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5622 goto badframe; 5623 } 5624 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5625 set_sigmask(&set); 5626 5627 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5628 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5629 uc.tuc_stack), 5630 0, env->ir[IR_SP]) == -EFAULT) { 5631 goto badframe; 5632 } 5633 5634 unlock_user_struct(frame, frame_addr, 0); 5635 return -TARGET_QEMU_ESIGRETURN; 5636 5637 5638 badframe: 5639 unlock_user_struct(frame, frame_addr, 0); 5640 force_sig(TARGET_SIGSEGV); 5641 } 5642 5643 #elif defined(TARGET_TILEGX) 5644 5645 struct target_sigcontext { 5646 union { 5647 /* General-purpose registers. */ 5648 abi_ulong gregs[56]; 5649 struct { 5650 abi_ulong __gregs[53]; 5651 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5652 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5653 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5654 }; 5655 }; 5656 abi_ulong pc; /* Program counter. */ 5657 abi_ulong ics; /* In Interrupt Critical Section? */ 5658 abi_ulong faultnum; /* Fault number. */ 5659 abi_ulong pad[5]; 5660 }; 5661 5662 struct target_ucontext { 5663 abi_ulong tuc_flags; 5664 abi_ulong tuc_link; 5665 target_stack_t tuc_stack; 5666 struct target_sigcontext tuc_mcontext; 5667 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5668 }; 5669 5670 struct target_rt_sigframe { 5671 unsigned char save_area[16]; /* caller save area */ 5672 struct target_siginfo info; 5673 struct target_ucontext uc; 5674 abi_ulong retcode[2]; 5675 }; 5676 5677 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5678 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5679 5680 5681 static void setup_sigcontext(struct target_sigcontext *sc, 5682 CPUArchState *env, int signo) 5683 { 5684 int i; 5685 5686 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5687 __put_user(env->regs[i], &sc->gregs[i]); 5688 } 5689 5690 __put_user(env->pc, &sc->pc); 5691 __put_user(0, &sc->ics); 5692 __put_user(signo, &sc->faultnum); 5693 } 5694 5695 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5696 { 5697 int i; 5698 5699 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5700 __get_user(env->regs[i], &sc->gregs[i]); 5701 } 5702 5703 __get_user(env->pc, &sc->pc); 5704 } 5705 5706 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5707 size_t frame_size) 5708 { 5709 unsigned long sp = env->regs[TILEGX_R_SP]; 5710 5711 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5712 return -1UL; 5713 } 5714 5715 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5716 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5717 } 5718 5719 sp -= frame_size; 5720 sp &= -16UL; 5721 return sp; 5722 } 5723 5724 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5725 target_siginfo_t *info, 5726 target_sigset_t *set, CPUArchState *env) 5727 { 5728 abi_ulong frame_addr; 5729 struct target_rt_sigframe *frame; 5730 unsigned long restorer; 5731 5732 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5733 trace_user_setup_rt_frame(env, frame_addr); 5734 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5735 goto give_sigsegv; 5736 } 5737 5738 /* Always write at least the signal number for the stack backtracer. */ 5739 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5740 /* At sigreturn time, restore the callee-save registers too. */ 5741 tswap_siginfo(&frame->info, info); 5742 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5743 } else { 5744 __put_user(info->si_signo, &frame->info.si_signo); 5745 } 5746 5747 /* Create the ucontext. */ 5748 __put_user(0, &frame->uc.tuc_flags); 5749 __put_user(0, &frame->uc.tuc_link); 5750 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5751 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5752 &frame->uc.tuc_stack.ss_flags); 5753 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5754 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5755 5756 if (ka->sa_flags & TARGET_SA_RESTORER) { 5757 restorer = (unsigned long) ka->sa_restorer; 5758 } else { 5759 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5760 __put_user(INSN_SWINT1, &frame->retcode[1]); 5761 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5762 } 5763 env->pc = (unsigned long) ka->_sa_handler; 5764 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5765 env->regs[TILEGX_R_LR] = restorer; 5766 env->regs[0] = (unsigned long) sig; 5767 env->regs[1] = (unsigned long) &frame->info; 5768 env->regs[2] = (unsigned long) &frame->uc; 5769 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5770 5771 unlock_user_struct(frame, frame_addr, 1); 5772 return; 5773 5774 give_sigsegv: 5775 force_sigsegv(sig); 5776 } 5777 5778 long do_rt_sigreturn(CPUTLGState *env) 5779 { 5780 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5781 struct target_rt_sigframe *frame; 5782 sigset_t set; 5783 5784 trace_user_do_rt_sigreturn(env, frame_addr); 5785 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5786 goto badframe; 5787 } 5788 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5789 set_sigmask(&set); 5790 5791 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5792 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5793 uc.tuc_stack), 5794 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5795 goto badframe; 5796 } 5797 5798 unlock_user_struct(frame, frame_addr, 0); 5799 return -TARGET_QEMU_ESIGRETURN; 5800 5801 5802 badframe: 5803 unlock_user_struct(frame, frame_addr, 0); 5804 force_sig(TARGET_SIGSEGV); 5805 } 5806 5807 #else 5808 5809 static void setup_frame(int sig, struct target_sigaction *ka, 5810 target_sigset_t *set, CPUArchState *env) 5811 { 5812 fprintf(stderr, "setup_frame: not implemented\n"); 5813 } 5814 5815 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5816 target_siginfo_t *info, 5817 target_sigset_t *set, CPUArchState *env) 5818 { 5819 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5820 } 5821 5822 long do_sigreturn(CPUArchState *env) 5823 { 5824 fprintf(stderr, "do_sigreturn: not implemented\n"); 5825 return -TARGET_ENOSYS; 5826 } 5827 5828 long do_rt_sigreturn(CPUArchState *env) 5829 { 5830 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5831 return -TARGET_ENOSYS; 5832 } 5833 5834 #endif 5835 5836 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 5837 struct emulated_sigtable *k) 5838 { 5839 CPUState *cpu = ENV_GET_CPU(cpu_env); 5840 abi_ulong handler; 5841 sigset_t set; 5842 target_sigset_t target_old_set; 5843 struct target_sigaction *sa; 5844 TaskState *ts = cpu->opaque; 5845 5846 trace_user_handle_signal(cpu_env, sig); 5847 /* dequeue signal */ 5848 k->pending = 0; 5849 5850 sig = gdb_handlesig(cpu, sig); 5851 if (!sig) { 5852 sa = NULL; 5853 handler = TARGET_SIG_IGN; 5854 } else { 5855 sa = &sigact_table[sig - 1]; 5856 handler = sa->_sa_handler; 5857 } 5858 5859 if (do_strace) { 5860 print_taken_signal(sig, &k->info); 5861 } 5862 5863 if (handler == TARGET_SIG_DFL) { 5864 /* default handler : ignore some signal. The other are job control or fatal */ 5865 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5866 kill(getpid(),SIGSTOP); 5867 } else if (sig != TARGET_SIGCHLD && 5868 sig != TARGET_SIGURG && 5869 sig != TARGET_SIGWINCH && 5870 sig != TARGET_SIGCONT) { 5871 force_sig(sig); 5872 } 5873 } else if (handler == TARGET_SIG_IGN) { 5874 /* ignore sig */ 5875 } else if (handler == TARGET_SIG_ERR) { 5876 force_sig(sig); 5877 } else { 5878 /* compute the blocked signals during the handler execution */ 5879 sigset_t *blocked_set; 5880 5881 target_to_host_sigset(&set, &sa->sa_mask); 5882 /* SA_NODEFER indicates that the current signal should not be 5883 blocked during the handler */ 5884 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5885 sigaddset(&set, target_to_host_signal(sig)); 5886 5887 /* save the previous blocked signal state to restore it at the 5888 end of the signal execution (see do_sigreturn) */ 5889 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5890 5891 /* block signals in the handler */ 5892 blocked_set = ts->in_sigsuspend ? 5893 &ts->sigsuspend_mask : &ts->signal_mask; 5894 sigorset(&ts->signal_mask, blocked_set, &set); 5895 ts->in_sigsuspend = 0; 5896 5897 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5898 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5899 { 5900 CPUX86State *env = cpu_env; 5901 if (env->eflags & VM_MASK) 5902 save_v86_state(env); 5903 } 5904 #endif 5905 /* prepare the stack frame of the virtual CPU */ 5906 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5907 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5908 /* These targets do not have traditional signals. */ 5909 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5910 #else 5911 if (sa->sa_flags & TARGET_SA_SIGINFO) 5912 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5913 else 5914 setup_frame(sig, sa, &target_old_set, cpu_env); 5915 #endif 5916 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5917 sa->_sa_handler = TARGET_SIG_DFL; 5918 } 5919 } 5920 } 5921 5922 void process_pending_signals(CPUArchState *cpu_env) 5923 { 5924 CPUState *cpu = ENV_GET_CPU(cpu_env); 5925 int sig; 5926 TaskState *ts = cpu->opaque; 5927 sigset_t set; 5928 sigset_t *blocked_set; 5929 5930 while (atomic_read(&ts->signal_pending)) { 5931 /* FIXME: This is not threadsafe. */ 5932 sigfillset(&set); 5933 sigprocmask(SIG_SETMASK, &set, 0); 5934 5935 restart_scan: 5936 sig = ts->sync_signal.pending; 5937 if (sig) { 5938 /* Synchronous signals are forced, 5939 * see force_sig_info() and callers in Linux 5940 * Note that not all of our queue_signal() calls in QEMU correspond 5941 * to force_sig_info() calls in Linux (some are send_sig_info()). 5942 * However it seems like a kernel bug to me to allow the process 5943 * to block a synchronous signal since it could then just end up 5944 * looping round and round indefinitely. 5945 */ 5946 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5947 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5948 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5949 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5950 } 5951 5952 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 5953 } 5954 5955 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5956 blocked_set = ts->in_sigsuspend ? 5957 &ts->sigsuspend_mask : &ts->signal_mask; 5958 5959 if (ts->sigtab[sig - 1].pending && 5960 (!sigismember(blocked_set, 5961 target_to_host_signal_table[sig]))) { 5962 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 5963 /* Restart scan from the beginning, as handle_pending_signal 5964 * might have resulted in a new synchronous signal (eg SIGSEGV). 5965 */ 5966 goto restart_scan; 5967 } 5968 } 5969 5970 /* if no signal is pending, unblock signals and recheck (the act 5971 * of unblocking might cause us to take another host signal which 5972 * will set signal_pending again). 5973 */ 5974 atomic_set(&ts->signal_pending, 0); 5975 ts->in_sigsuspend = 0; 5976 set = ts->signal_mask; 5977 sigdelset(&set, SIGSEGV); 5978 sigdelset(&set, SIGBUS); 5979 sigprocmask(SIG_SETMASK, &set, 0); 5980 } 5981 ts->in_sigsuspend = 0; 5982 } 5983