1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) 516 517 /* Force a SIGSEGV if we couldn't write to memory trying to set 518 * up the signal frame. oldsig is the signal we were trying to handle 519 * at the point of failure. 520 */ 521 static void force_sigsegv(int oldsig) 522 { 523 CPUState *cpu = thread_cpu; 524 CPUArchState *env = cpu->env_ptr; 525 target_siginfo_t info; 526 527 if (oldsig == SIGSEGV) { 528 /* Make sure we don't try to deliver the signal again; this will 529 * end up with handle_pending_signal() calling force_sig(). 530 */ 531 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 532 } 533 info.si_signo = TARGET_SIGSEGV; 534 info.si_errno = 0; 535 info.si_code = TARGET_SI_KERNEL; 536 info._sifields._kill._pid = 0; 537 info._sifields._kill._uid = 0; 538 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 539 } 540 #endif 541 542 /* abort execution with signal */ 543 static void QEMU_NORETURN force_sig(int target_sig) 544 { 545 CPUState *cpu = thread_cpu; 546 CPUArchState *env = cpu->env_ptr; 547 TaskState *ts = (TaskState *)cpu->opaque; 548 int host_sig, core_dumped = 0; 549 struct sigaction act; 550 551 host_sig = target_to_host_signal(target_sig); 552 trace_user_force_sig(env, target_sig, host_sig); 553 gdb_signalled(env, target_sig); 554 555 /* dump core if supported by target binary format */ 556 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 557 stop_all_tasks(); 558 core_dumped = 559 ((*ts->bprm->core_dump)(target_sig, env) == 0); 560 } 561 if (core_dumped) { 562 /* we already dumped the core of target process, we don't want 563 * a coredump of qemu itself */ 564 struct rlimit nodump; 565 getrlimit(RLIMIT_CORE, &nodump); 566 nodump.rlim_cur=0; 567 setrlimit(RLIMIT_CORE, &nodump); 568 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 569 target_sig, strsignal(host_sig), "core dumped" ); 570 } 571 572 /* The proper exit code for dying from an uncaught signal is 573 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 574 * a negative value. To get the proper exit code we need to 575 * actually die from an uncaught signal. Here the default signal 576 * handler is installed, we send ourself a signal and we wait for 577 * it to arrive. */ 578 sigfillset(&act.sa_mask); 579 act.sa_handler = SIG_DFL; 580 act.sa_flags = 0; 581 sigaction(host_sig, &act, NULL); 582 583 /* For some reason raise(host_sig) doesn't send the signal when 584 * statically linked on x86-64. */ 585 kill(getpid(), host_sig); 586 587 /* Make sure the signal isn't masked (just reuse the mask inside 588 of act) */ 589 sigdelset(&act.sa_mask, host_sig); 590 sigsuspend(&act.sa_mask); 591 592 /* unreachable */ 593 abort(); 594 } 595 596 /* queue a signal so that it will be send to the virtual CPU as soon 597 as possible */ 598 int queue_signal(CPUArchState *env, int sig, int si_type, 599 target_siginfo_t *info) 600 { 601 CPUState *cpu = ENV_GET_CPU(env); 602 TaskState *ts = cpu->opaque; 603 604 trace_user_queue_signal(env, sig); 605 606 info->si_code = deposit32(info->si_code, 16, 16, si_type); 607 608 ts->sync_signal.info = *info; 609 ts->sync_signal.pending = sig; 610 /* signal that a new signal is pending */ 611 atomic_set(&ts->signal_pending, 1); 612 return 1; /* indicates that the signal was queued */ 613 } 614 615 #ifndef HAVE_SAFE_SYSCALL 616 static inline void rewind_if_in_safe_syscall(void *puc) 617 { 618 /* Default version: never rewind */ 619 } 620 #endif 621 622 static void host_signal_handler(int host_signum, siginfo_t *info, 623 void *puc) 624 { 625 CPUArchState *env = thread_cpu->env_ptr; 626 CPUState *cpu = ENV_GET_CPU(env); 627 TaskState *ts = cpu->opaque; 628 629 int sig; 630 target_siginfo_t tinfo; 631 ucontext_t *uc = puc; 632 struct emulated_sigtable *k; 633 634 /* the CPU emulator uses some host signals to detect exceptions, 635 we forward to it some signals */ 636 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 637 && info->si_code > 0) { 638 if (cpu_signal_handler(host_signum, info, puc)) 639 return; 640 } 641 642 /* get target signal number */ 643 sig = host_to_target_signal(host_signum); 644 if (sig < 1 || sig > TARGET_NSIG) 645 return; 646 trace_user_host_signal(env, host_signum, sig); 647 648 rewind_if_in_safe_syscall(puc); 649 650 host_to_target_siginfo_noswap(&tinfo, info); 651 k = &ts->sigtab[sig - 1]; 652 k->info = tinfo; 653 k->pending = sig; 654 ts->signal_pending = 1; 655 656 /* Block host signals until target signal handler entered. We 657 * can't block SIGSEGV or SIGBUS while we're executing guest 658 * code in case the guest code provokes one in the window between 659 * now and it getting out to the main loop. Signals will be 660 * unblocked again in process_pending_signals(). 661 * 662 * WARNING: we cannot use sigfillset() here because the uc_sigmask 663 * field is a kernel sigset_t, which is much smaller than the 664 * libc sigset_t which sigfillset() operates on. Using sigfillset() 665 * would write 0xff bytes off the end of the structure and trash 666 * data on the struct. 667 * We can't use sizeof(uc->uc_sigmask) either, because the libc 668 * headers define the struct field with the wrong (too large) type. 669 */ 670 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 671 sigdelset(&uc->uc_sigmask, SIGSEGV); 672 sigdelset(&uc->uc_sigmask, SIGBUS); 673 674 /* interrupt the virtual CPU as soon as possible */ 675 cpu_exit(thread_cpu); 676 } 677 678 /* do_sigaltstack() returns target values and errnos. */ 679 /* compare linux/kernel/signal.c:do_sigaltstack() */ 680 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 681 { 682 int ret; 683 struct target_sigaltstack oss; 684 685 /* XXX: test errors */ 686 if(uoss_addr) 687 { 688 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 689 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 690 __put_user(sas_ss_flags(sp), &oss.ss_flags); 691 } 692 693 if(uss_addr) 694 { 695 struct target_sigaltstack *uss; 696 struct target_sigaltstack ss; 697 size_t minstacksize = TARGET_MINSIGSTKSZ; 698 699 #if defined(TARGET_PPC64) 700 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 701 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 702 if (get_ppc64_abi(image) > 1) { 703 minstacksize = 4096; 704 } 705 #endif 706 707 ret = -TARGET_EFAULT; 708 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 709 goto out; 710 } 711 __get_user(ss.ss_sp, &uss->ss_sp); 712 __get_user(ss.ss_size, &uss->ss_size); 713 __get_user(ss.ss_flags, &uss->ss_flags); 714 unlock_user_struct(uss, uss_addr, 0); 715 716 ret = -TARGET_EPERM; 717 if (on_sig_stack(sp)) 718 goto out; 719 720 ret = -TARGET_EINVAL; 721 if (ss.ss_flags != TARGET_SS_DISABLE 722 && ss.ss_flags != TARGET_SS_ONSTACK 723 && ss.ss_flags != 0) 724 goto out; 725 726 if (ss.ss_flags == TARGET_SS_DISABLE) { 727 ss.ss_size = 0; 728 ss.ss_sp = 0; 729 } else { 730 ret = -TARGET_ENOMEM; 731 if (ss.ss_size < minstacksize) { 732 goto out; 733 } 734 } 735 736 target_sigaltstack_used.ss_sp = ss.ss_sp; 737 target_sigaltstack_used.ss_size = ss.ss_size; 738 } 739 740 if (uoss_addr) { 741 ret = -TARGET_EFAULT; 742 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 743 goto out; 744 } 745 746 ret = 0; 747 out: 748 return ret; 749 } 750 751 /* do_sigaction() return target values and host errnos */ 752 int do_sigaction(int sig, const struct target_sigaction *act, 753 struct target_sigaction *oact) 754 { 755 struct target_sigaction *k; 756 struct sigaction act1; 757 int host_sig; 758 int ret = 0; 759 760 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 761 return -TARGET_EINVAL; 762 } 763 764 if (block_signals()) { 765 return -TARGET_ERESTARTSYS; 766 } 767 768 k = &sigact_table[sig - 1]; 769 if (oact) { 770 __put_user(k->_sa_handler, &oact->_sa_handler); 771 __put_user(k->sa_flags, &oact->sa_flags); 772 #if !defined(TARGET_MIPS) 773 __put_user(k->sa_restorer, &oact->sa_restorer); 774 #endif 775 /* Not swapped. */ 776 oact->sa_mask = k->sa_mask; 777 } 778 if (act) { 779 /* FIXME: This is not threadsafe. */ 780 __get_user(k->_sa_handler, &act->_sa_handler); 781 __get_user(k->sa_flags, &act->sa_flags); 782 #if !defined(TARGET_MIPS) 783 __get_user(k->sa_restorer, &act->sa_restorer); 784 #endif 785 /* To be swapped in target_to_host_sigset. */ 786 k->sa_mask = act->sa_mask; 787 788 /* we update the host linux signal state */ 789 host_sig = target_to_host_signal(sig); 790 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 791 sigfillset(&act1.sa_mask); 792 act1.sa_flags = SA_SIGINFO; 793 if (k->sa_flags & TARGET_SA_RESTART) 794 act1.sa_flags |= SA_RESTART; 795 /* NOTE: it is important to update the host kernel signal 796 ignore state to avoid getting unexpected interrupted 797 syscalls */ 798 if (k->_sa_handler == TARGET_SIG_IGN) { 799 act1.sa_sigaction = (void *)SIG_IGN; 800 } else if (k->_sa_handler == TARGET_SIG_DFL) { 801 if (fatal_signal (sig)) 802 act1.sa_sigaction = host_signal_handler; 803 else 804 act1.sa_sigaction = (void *)SIG_DFL; 805 } else { 806 act1.sa_sigaction = host_signal_handler; 807 } 808 ret = sigaction(host_sig, &act1, NULL); 809 } 810 } 811 return ret; 812 } 813 814 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 815 816 /* from the Linux kernel */ 817 818 struct target_fpreg { 819 uint16_t significand[4]; 820 uint16_t exponent; 821 }; 822 823 struct target_fpxreg { 824 uint16_t significand[4]; 825 uint16_t exponent; 826 uint16_t padding[3]; 827 }; 828 829 struct target_xmmreg { 830 abi_ulong element[4]; 831 }; 832 833 struct target_fpstate { 834 /* Regular FPU environment */ 835 abi_ulong cw; 836 abi_ulong sw; 837 abi_ulong tag; 838 abi_ulong ipoff; 839 abi_ulong cssel; 840 abi_ulong dataoff; 841 abi_ulong datasel; 842 struct target_fpreg _st[8]; 843 uint16_t status; 844 uint16_t magic; /* 0xffff = regular FPU data only */ 845 846 /* FXSR FPU environment */ 847 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 848 abi_ulong mxcsr; 849 abi_ulong reserved; 850 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 851 struct target_xmmreg _xmm[8]; 852 abi_ulong padding[56]; 853 }; 854 855 #define X86_FXSR_MAGIC 0x0000 856 857 struct target_sigcontext { 858 uint16_t gs, __gsh; 859 uint16_t fs, __fsh; 860 uint16_t es, __esh; 861 uint16_t ds, __dsh; 862 abi_ulong edi; 863 abi_ulong esi; 864 abi_ulong ebp; 865 abi_ulong esp; 866 abi_ulong ebx; 867 abi_ulong edx; 868 abi_ulong ecx; 869 abi_ulong eax; 870 abi_ulong trapno; 871 abi_ulong err; 872 abi_ulong eip; 873 uint16_t cs, __csh; 874 abi_ulong eflags; 875 abi_ulong esp_at_signal; 876 uint16_t ss, __ssh; 877 abi_ulong fpstate; /* pointer */ 878 abi_ulong oldmask; 879 abi_ulong cr2; 880 }; 881 882 struct target_ucontext { 883 abi_ulong tuc_flags; 884 abi_ulong tuc_link; 885 target_stack_t tuc_stack; 886 struct target_sigcontext tuc_mcontext; 887 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 888 }; 889 890 struct sigframe 891 { 892 abi_ulong pretcode; 893 int sig; 894 struct target_sigcontext sc; 895 struct target_fpstate fpstate; 896 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 897 char retcode[8]; 898 }; 899 900 struct rt_sigframe 901 { 902 abi_ulong pretcode; 903 int sig; 904 abi_ulong pinfo; 905 abi_ulong puc; 906 struct target_siginfo info; 907 struct target_ucontext uc; 908 struct target_fpstate fpstate; 909 char retcode[8]; 910 }; 911 912 /* 913 * Set up a signal frame. 914 */ 915 916 /* XXX: save x87 state */ 917 static void setup_sigcontext(struct target_sigcontext *sc, 918 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 919 abi_ulong fpstate_addr) 920 { 921 CPUState *cs = CPU(x86_env_get_cpu(env)); 922 uint16_t magic; 923 924 /* already locked in setup_frame() */ 925 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 926 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 927 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 928 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 929 __put_user(env->regs[R_EDI], &sc->edi); 930 __put_user(env->regs[R_ESI], &sc->esi); 931 __put_user(env->regs[R_EBP], &sc->ebp); 932 __put_user(env->regs[R_ESP], &sc->esp); 933 __put_user(env->regs[R_EBX], &sc->ebx); 934 __put_user(env->regs[R_EDX], &sc->edx); 935 __put_user(env->regs[R_ECX], &sc->ecx); 936 __put_user(env->regs[R_EAX], &sc->eax); 937 __put_user(cs->exception_index, &sc->trapno); 938 __put_user(env->error_code, &sc->err); 939 __put_user(env->eip, &sc->eip); 940 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 941 __put_user(env->eflags, &sc->eflags); 942 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 943 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 944 945 cpu_x86_fsave(env, fpstate_addr, 1); 946 fpstate->status = fpstate->sw; 947 magic = 0xffff; 948 __put_user(magic, &fpstate->magic); 949 __put_user(fpstate_addr, &sc->fpstate); 950 951 /* non-iBCS2 extensions.. */ 952 __put_user(mask, &sc->oldmask); 953 __put_user(env->cr[2], &sc->cr2); 954 } 955 956 /* 957 * Determine which stack to use.. 958 */ 959 960 static inline abi_ulong 961 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 962 { 963 unsigned long esp; 964 965 /* Default to using normal stack */ 966 esp = env->regs[R_ESP]; 967 /* This is the X/Open sanctioned signal stack switching. */ 968 if (ka->sa_flags & TARGET_SA_ONSTACK) { 969 if (sas_ss_flags(esp) == 0) { 970 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 971 } 972 } else { 973 974 /* This is the legacy signal stack switching. */ 975 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 976 !(ka->sa_flags & TARGET_SA_RESTORER) && 977 ka->sa_restorer) { 978 esp = (unsigned long) ka->sa_restorer; 979 } 980 } 981 return (esp - frame_size) & -8ul; 982 } 983 984 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 985 static void setup_frame(int sig, struct target_sigaction *ka, 986 target_sigset_t *set, CPUX86State *env) 987 { 988 abi_ulong frame_addr; 989 struct sigframe *frame; 990 int i; 991 992 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 993 trace_user_setup_frame(env, frame_addr); 994 995 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 996 goto give_sigsegv; 997 998 __put_user(sig, &frame->sig); 999 1000 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1001 frame_addr + offsetof(struct sigframe, fpstate)); 1002 1003 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1004 __put_user(set->sig[i], &frame->extramask[i - 1]); 1005 } 1006 1007 /* Set up to return from userspace. If provided, use a stub 1008 already in userspace. */ 1009 if (ka->sa_flags & TARGET_SA_RESTORER) { 1010 __put_user(ka->sa_restorer, &frame->pretcode); 1011 } else { 1012 uint16_t val16; 1013 abi_ulong retcode_addr; 1014 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1015 __put_user(retcode_addr, &frame->pretcode); 1016 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1017 val16 = 0xb858; 1018 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1019 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1020 val16 = 0x80cd; 1021 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1022 } 1023 1024 1025 /* Set up registers for signal handler */ 1026 env->regs[R_ESP] = frame_addr; 1027 env->eip = ka->_sa_handler; 1028 1029 cpu_x86_load_seg(env, R_DS, __USER_DS); 1030 cpu_x86_load_seg(env, R_ES, __USER_DS); 1031 cpu_x86_load_seg(env, R_SS, __USER_DS); 1032 cpu_x86_load_seg(env, R_CS, __USER_CS); 1033 env->eflags &= ~TF_MASK; 1034 1035 unlock_user_struct(frame, frame_addr, 1); 1036 1037 return; 1038 1039 give_sigsegv: 1040 force_sigsegv(sig); 1041 } 1042 1043 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1044 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1045 target_siginfo_t *info, 1046 target_sigset_t *set, CPUX86State *env) 1047 { 1048 abi_ulong frame_addr, addr; 1049 struct rt_sigframe *frame; 1050 int i; 1051 1052 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1053 trace_user_setup_rt_frame(env, frame_addr); 1054 1055 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1056 goto give_sigsegv; 1057 1058 __put_user(sig, &frame->sig); 1059 addr = frame_addr + offsetof(struct rt_sigframe, info); 1060 __put_user(addr, &frame->pinfo); 1061 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1062 __put_user(addr, &frame->puc); 1063 tswap_siginfo(&frame->info, info); 1064 1065 /* Create the ucontext. */ 1066 __put_user(0, &frame->uc.tuc_flags); 1067 __put_user(0, &frame->uc.tuc_link); 1068 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1069 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1070 &frame->uc.tuc_stack.ss_flags); 1071 __put_user(target_sigaltstack_used.ss_size, 1072 &frame->uc.tuc_stack.ss_size); 1073 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1074 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1075 1076 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1077 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1078 } 1079 1080 /* Set up to return from userspace. If provided, use a stub 1081 already in userspace. */ 1082 if (ka->sa_flags & TARGET_SA_RESTORER) { 1083 __put_user(ka->sa_restorer, &frame->pretcode); 1084 } else { 1085 uint16_t val16; 1086 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1087 __put_user(addr, &frame->pretcode); 1088 /* This is movl $,%eax ; int $0x80 */ 1089 __put_user(0xb8, (char *)(frame->retcode+0)); 1090 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1091 val16 = 0x80cd; 1092 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1093 } 1094 1095 /* Set up registers for signal handler */ 1096 env->regs[R_ESP] = frame_addr; 1097 env->eip = ka->_sa_handler; 1098 1099 cpu_x86_load_seg(env, R_DS, __USER_DS); 1100 cpu_x86_load_seg(env, R_ES, __USER_DS); 1101 cpu_x86_load_seg(env, R_SS, __USER_DS); 1102 cpu_x86_load_seg(env, R_CS, __USER_CS); 1103 env->eflags &= ~TF_MASK; 1104 1105 unlock_user_struct(frame, frame_addr, 1); 1106 1107 return; 1108 1109 give_sigsegv: 1110 force_sigsegv(sig); 1111 } 1112 1113 static int 1114 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1115 { 1116 unsigned int err = 0; 1117 abi_ulong fpstate_addr; 1118 unsigned int tmpflags; 1119 1120 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1121 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1122 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1123 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1124 1125 env->regs[R_EDI] = tswapl(sc->edi); 1126 env->regs[R_ESI] = tswapl(sc->esi); 1127 env->regs[R_EBP] = tswapl(sc->ebp); 1128 env->regs[R_ESP] = tswapl(sc->esp); 1129 env->regs[R_EBX] = tswapl(sc->ebx); 1130 env->regs[R_EDX] = tswapl(sc->edx); 1131 env->regs[R_ECX] = tswapl(sc->ecx); 1132 env->regs[R_EAX] = tswapl(sc->eax); 1133 env->eip = tswapl(sc->eip); 1134 1135 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1136 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1137 1138 tmpflags = tswapl(sc->eflags); 1139 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1140 // regs->orig_eax = -1; /* disable syscall checks */ 1141 1142 fpstate_addr = tswapl(sc->fpstate); 1143 if (fpstate_addr != 0) { 1144 if (!access_ok(VERIFY_READ, fpstate_addr, 1145 sizeof(struct target_fpstate))) 1146 goto badframe; 1147 cpu_x86_frstor(env, fpstate_addr, 1); 1148 } 1149 1150 return err; 1151 badframe: 1152 return 1; 1153 } 1154 1155 long do_sigreturn(CPUX86State *env) 1156 { 1157 struct sigframe *frame; 1158 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1159 target_sigset_t target_set; 1160 sigset_t set; 1161 int i; 1162 1163 trace_user_do_sigreturn(env, frame_addr); 1164 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1165 goto badframe; 1166 /* set blocked signals */ 1167 __get_user(target_set.sig[0], &frame->sc.oldmask); 1168 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1169 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1170 } 1171 1172 target_to_host_sigset_internal(&set, &target_set); 1173 set_sigmask(&set); 1174 1175 /* restore registers */ 1176 if (restore_sigcontext(env, &frame->sc)) 1177 goto badframe; 1178 unlock_user_struct(frame, frame_addr, 0); 1179 return -TARGET_QEMU_ESIGRETURN; 1180 1181 badframe: 1182 unlock_user_struct(frame, frame_addr, 0); 1183 force_sig(TARGET_SIGSEGV); 1184 return 0; 1185 } 1186 1187 long do_rt_sigreturn(CPUX86State *env) 1188 { 1189 abi_ulong frame_addr; 1190 struct rt_sigframe *frame; 1191 sigset_t set; 1192 1193 frame_addr = env->regs[R_ESP] - 4; 1194 trace_user_do_rt_sigreturn(env, frame_addr); 1195 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1196 goto badframe; 1197 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1198 set_sigmask(&set); 1199 1200 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1201 goto badframe; 1202 } 1203 1204 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1205 get_sp_from_cpustate(env)) == -EFAULT) { 1206 goto badframe; 1207 } 1208 1209 unlock_user_struct(frame, frame_addr, 0); 1210 return -TARGET_QEMU_ESIGRETURN; 1211 1212 badframe: 1213 unlock_user_struct(frame, frame_addr, 0); 1214 force_sig(TARGET_SIGSEGV); 1215 return 0; 1216 } 1217 1218 #elif defined(TARGET_AARCH64) 1219 1220 struct target_sigcontext { 1221 uint64_t fault_address; 1222 /* AArch64 registers */ 1223 uint64_t regs[31]; 1224 uint64_t sp; 1225 uint64_t pc; 1226 uint64_t pstate; 1227 /* 4K reserved for FP/SIMD state and future expansion */ 1228 char __reserved[4096] __attribute__((__aligned__(16))); 1229 }; 1230 1231 struct target_ucontext { 1232 abi_ulong tuc_flags; 1233 abi_ulong tuc_link; 1234 target_stack_t tuc_stack; 1235 target_sigset_t tuc_sigmask; 1236 /* glibc uses a 1024-bit sigset_t */ 1237 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1238 /* last for future expansion */ 1239 struct target_sigcontext tuc_mcontext; 1240 }; 1241 1242 /* 1243 * Header to be used at the beginning of structures extending the user 1244 * context. Such structures must be placed after the rt_sigframe on the stack 1245 * and be 16-byte aligned. The last structure must be a dummy one with the 1246 * magic and size set to 0. 1247 */ 1248 struct target_aarch64_ctx { 1249 uint32_t magic; 1250 uint32_t size; 1251 }; 1252 1253 #define TARGET_FPSIMD_MAGIC 0x46508001 1254 1255 struct target_fpsimd_context { 1256 struct target_aarch64_ctx head; 1257 uint32_t fpsr; 1258 uint32_t fpcr; 1259 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1260 }; 1261 1262 /* 1263 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1264 * user space as it will change with the addition of new context. User space 1265 * should check the magic/size information. 1266 */ 1267 struct target_aux_context { 1268 struct target_fpsimd_context fpsimd; 1269 /* additional context to be added before "end" */ 1270 struct target_aarch64_ctx end; 1271 }; 1272 1273 struct target_rt_sigframe { 1274 struct target_siginfo info; 1275 struct target_ucontext uc; 1276 uint64_t fp; 1277 uint64_t lr; 1278 uint32_t tramp[2]; 1279 }; 1280 1281 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1282 CPUARMState *env, target_sigset_t *set) 1283 { 1284 int i; 1285 struct target_aux_context *aux = 1286 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1287 1288 /* set up the stack frame for unwinding */ 1289 __put_user(env->xregs[29], &sf->fp); 1290 __put_user(env->xregs[30], &sf->lr); 1291 1292 for (i = 0; i < 31; i++) { 1293 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1294 } 1295 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1296 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1297 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1298 1299 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1300 1301 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1302 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1303 } 1304 1305 for (i = 0; i < 32; i++) { 1306 #ifdef TARGET_WORDS_BIGENDIAN 1307 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1308 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1309 #else 1310 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1311 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1312 #endif 1313 } 1314 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1315 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1316 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1317 __put_user(sizeof(struct target_fpsimd_context), 1318 &aux->fpsimd.head.size); 1319 1320 /* set the "end" magic */ 1321 __put_user(0, &aux->end.magic); 1322 __put_user(0, &aux->end.size); 1323 1324 return 0; 1325 } 1326 1327 static int target_restore_sigframe(CPUARMState *env, 1328 struct target_rt_sigframe *sf) 1329 { 1330 sigset_t set; 1331 int i; 1332 struct target_aux_context *aux = 1333 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1334 uint32_t magic, size, fpsr, fpcr; 1335 uint64_t pstate; 1336 1337 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1338 set_sigmask(&set); 1339 1340 for (i = 0; i < 31; i++) { 1341 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1342 } 1343 1344 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1345 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1346 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1347 pstate_write(env, pstate); 1348 1349 __get_user(magic, &aux->fpsimd.head.magic); 1350 __get_user(size, &aux->fpsimd.head.size); 1351 1352 if (magic != TARGET_FPSIMD_MAGIC 1353 || size != sizeof(struct target_fpsimd_context)) { 1354 return 1; 1355 } 1356 1357 for (i = 0; i < 32; i++) { 1358 #ifdef TARGET_WORDS_BIGENDIAN 1359 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1360 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1361 #else 1362 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1363 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1364 #endif 1365 } 1366 __get_user(fpsr, &aux->fpsimd.fpsr); 1367 vfp_set_fpsr(env, fpsr); 1368 __get_user(fpcr, &aux->fpsimd.fpcr); 1369 vfp_set_fpcr(env, fpcr); 1370 1371 return 0; 1372 } 1373 1374 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1375 { 1376 abi_ulong sp; 1377 1378 sp = env->xregs[31]; 1379 1380 /* 1381 * This is the X/Open sanctioned signal stack switching. 1382 */ 1383 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1384 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1385 } 1386 1387 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1388 1389 return sp; 1390 } 1391 1392 static void target_setup_frame(int usig, struct target_sigaction *ka, 1393 target_siginfo_t *info, target_sigset_t *set, 1394 CPUARMState *env) 1395 { 1396 struct target_rt_sigframe *frame; 1397 abi_ulong frame_addr, return_addr; 1398 1399 frame_addr = get_sigframe(ka, env); 1400 trace_user_setup_frame(env, frame_addr); 1401 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1402 goto give_sigsegv; 1403 } 1404 1405 __put_user(0, &frame->uc.tuc_flags); 1406 __put_user(0, &frame->uc.tuc_link); 1407 1408 __put_user(target_sigaltstack_used.ss_sp, 1409 &frame->uc.tuc_stack.ss_sp); 1410 __put_user(sas_ss_flags(env->xregs[31]), 1411 &frame->uc.tuc_stack.ss_flags); 1412 __put_user(target_sigaltstack_used.ss_size, 1413 &frame->uc.tuc_stack.ss_size); 1414 target_setup_sigframe(frame, env, set); 1415 if (ka->sa_flags & TARGET_SA_RESTORER) { 1416 return_addr = ka->sa_restorer; 1417 } else { 1418 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1419 __put_user(0xd2801168, &frame->tramp[0]); 1420 __put_user(0xd4000001, &frame->tramp[1]); 1421 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1422 } 1423 env->xregs[0] = usig; 1424 env->xregs[31] = frame_addr; 1425 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1426 env->pc = ka->_sa_handler; 1427 env->xregs[30] = return_addr; 1428 if (info) { 1429 tswap_siginfo(&frame->info, info); 1430 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1431 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1432 } 1433 1434 unlock_user_struct(frame, frame_addr, 1); 1435 return; 1436 1437 give_sigsegv: 1438 unlock_user_struct(frame, frame_addr, 1); 1439 force_sigsegv(usig); 1440 } 1441 1442 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1443 target_siginfo_t *info, target_sigset_t *set, 1444 CPUARMState *env) 1445 { 1446 target_setup_frame(sig, ka, info, set, env); 1447 } 1448 1449 static void setup_frame(int sig, struct target_sigaction *ka, 1450 target_sigset_t *set, CPUARMState *env) 1451 { 1452 target_setup_frame(sig, ka, 0, set, env); 1453 } 1454 1455 long do_rt_sigreturn(CPUARMState *env) 1456 { 1457 struct target_rt_sigframe *frame = NULL; 1458 abi_ulong frame_addr = env->xregs[31]; 1459 1460 trace_user_do_rt_sigreturn(env, frame_addr); 1461 if (frame_addr & 15) { 1462 goto badframe; 1463 } 1464 1465 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1466 goto badframe; 1467 } 1468 1469 if (target_restore_sigframe(env, frame)) { 1470 goto badframe; 1471 } 1472 1473 if (do_sigaltstack(frame_addr + 1474 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1475 0, get_sp_from_cpustate(env)) == -EFAULT) { 1476 goto badframe; 1477 } 1478 1479 unlock_user_struct(frame, frame_addr, 0); 1480 return -TARGET_QEMU_ESIGRETURN; 1481 1482 badframe: 1483 unlock_user_struct(frame, frame_addr, 0); 1484 force_sig(TARGET_SIGSEGV); 1485 return 0; 1486 } 1487 1488 long do_sigreturn(CPUARMState *env) 1489 { 1490 return do_rt_sigreturn(env); 1491 } 1492 1493 #elif defined(TARGET_ARM) 1494 1495 struct target_sigcontext { 1496 abi_ulong trap_no; 1497 abi_ulong error_code; 1498 abi_ulong oldmask; 1499 abi_ulong arm_r0; 1500 abi_ulong arm_r1; 1501 abi_ulong arm_r2; 1502 abi_ulong arm_r3; 1503 abi_ulong arm_r4; 1504 abi_ulong arm_r5; 1505 abi_ulong arm_r6; 1506 abi_ulong arm_r7; 1507 abi_ulong arm_r8; 1508 abi_ulong arm_r9; 1509 abi_ulong arm_r10; 1510 abi_ulong arm_fp; 1511 abi_ulong arm_ip; 1512 abi_ulong arm_sp; 1513 abi_ulong arm_lr; 1514 abi_ulong arm_pc; 1515 abi_ulong arm_cpsr; 1516 abi_ulong fault_address; 1517 }; 1518 1519 struct target_ucontext_v1 { 1520 abi_ulong tuc_flags; 1521 abi_ulong tuc_link; 1522 target_stack_t tuc_stack; 1523 struct target_sigcontext tuc_mcontext; 1524 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1525 }; 1526 1527 struct target_ucontext_v2 { 1528 abi_ulong tuc_flags; 1529 abi_ulong tuc_link; 1530 target_stack_t tuc_stack; 1531 struct target_sigcontext tuc_mcontext; 1532 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1533 char __unused[128 - sizeof(target_sigset_t)]; 1534 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1535 }; 1536 1537 struct target_user_vfp { 1538 uint64_t fpregs[32]; 1539 abi_ulong fpscr; 1540 }; 1541 1542 struct target_user_vfp_exc { 1543 abi_ulong fpexc; 1544 abi_ulong fpinst; 1545 abi_ulong fpinst2; 1546 }; 1547 1548 struct target_vfp_sigframe { 1549 abi_ulong magic; 1550 abi_ulong size; 1551 struct target_user_vfp ufp; 1552 struct target_user_vfp_exc ufp_exc; 1553 } __attribute__((__aligned__(8))); 1554 1555 struct target_iwmmxt_sigframe { 1556 abi_ulong magic; 1557 abi_ulong size; 1558 uint64_t regs[16]; 1559 /* Note that not all the coprocessor control registers are stored here */ 1560 uint32_t wcssf; 1561 uint32_t wcasf; 1562 uint32_t wcgr0; 1563 uint32_t wcgr1; 1564 uint32_t wcgr2; 1565 uint32_t wcgr3; 1566 } __attribute__((__aligned__(8))); 1567 1568 #define TARGET_VFP_MAGIC 0x56465001 1569 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1570 1571 struct sigframe_v1 1572 { 1573 struct target_sigcontext sc; 1574 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1575 abi_ulong retcode; 1576 }; 1577 1578 struct sigframe_v2 1579 { 1580 struct target_ucontext_v2 uc; 1581 abi_ulong retcode; 1582 }; 1583 1584 struct rt_sigframe_v1 1585 { 1586 abi_ulong pinfo; 1587 abi_ulong puc; 1588 struct target_siginfo info; 1589 struct target_ucontext_v1 uc; 1590 abi_ulong retcode; 1591 }; 1592 1593 struct rt_sigframe_v2 1594 { 1595 struct target_siginfo info; 1596 struct target_ucontext_v2 uc; 1597 abi_ulong retcode; 1598 }; 1599 1600 #define TARGET_CONFIG_CPU_32 1 1601 1602 /* 1603 * For ARM syscalls, we encode the syscall number into the instruction. 1604 */ 1605 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1606 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1607 1608 /* 1609 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1610 * need two 16-bit instructions. 1611 */ 1612 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1613 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1614 1615 static const abi_ulong retcodes[4] = { 1616 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1617 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1618 }; 1619 1620 1621 static inline int valid_user_regs(CPUARMState *regs) 1622 { 1623 return 1; 1624 } 1625 1626 static void 1627 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1628 CPUARMState *env, abi_ulong mask) 1629 { 1630 __put_user(env->regs[0], &sc->arm_r0); 1631 __put_user(env->regs[1], &sc->arm_r1); 1632 __put_user(env->regs[2], &sc->arm_r2); 1633 __put_user(env->regs[3], &sc->arm_r3); 1634 __put_user(env->regs[4], &sc->arm_r4); 1635 __put_user(env->regs[5], &sc->arm_r5); 1636 __put_user(env->regs[6], &sc->arm_r6); 1637 __put_user(env->regs[7], &sc->arm_r7); 1638 __put_user(env->regs[8], &sc->arm_r8); 1639 __put_user(env->regs[9], &sc->arm_r9); 1640 __put_user(env->regs[10], &sc->arm_r10); 1641 __put_user(env->regs[11], &sc->arm_fp); 1642 __put_user(env->regs[12], &sc->arm_ip); 1643 __put_user(env->regs[13], &sc->arm_sp); 1644 __put_user(env->regs[14], &sc->arm_lr); 1645 __put_user(env->regs[15], &sc->arm_pc); 1646 #ifdef TARGET_CONFIG_CPU_32 1647 __put_user(cpsr_read(env), &sc->arm_cpsr); 1648 #endif 1649 1650 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1651 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1652 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1653 __put_user(mask, &sc->oldmask); 1654 } 1655 1656 static inline abi_ulong 1657 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1658 { 1659 unsigned long sp = regs->regs[13]; 1660 1661 /* 1662 * This is the X/Open sanctioned signal stack switching. 1663 */ 1664 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1665 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1666 } 1667 /* 1668 * ATPCS B01 mandates 8-byte alignment 1669 */ 1670 return (sp - framesize) & ~7; 1671 } 1672 1673 static void 1674 setup_return(CPUARMState *env, struct target_sigaction *ka, 1675 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1676 { 1677 abi_ulong handler = ka->_sa_handler; 1678 abi_ulong retcode; 1679 int thumb = handler & 1; 1680 uint32_t cpsr = cpsr_read(env); 1681 1682 cpsr &= ~CPSR_IT; 1683 if (thumb) { 1684 cpsr |= CPSR_T; 1685 } else { 1686 cpsr &= ~CPSR_T; 1687 } 1688 1689 if (ka->sa_flags & TARGET_SA_RESTORER) { 1690 retcode = ka->sa_restorer; 1691 } else { 1692 unsigned int idx = thumb; 1693 1694 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1695 idx += 2; 1696 } 1697 1698 __put_user(retcodes[idx], rc); 1699 1700 retcode = rc_addr + thumb; 1701 } 1702 1703 env->regs[0] = usig; 1704 env->regs[13] = frame_addr; 1705 env->regs[14] = retcode; 1706 env->regs[15] = handler & (thumb ? ~1 : ~3); 1707 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1708 } 1709 1710 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1711 { 1712 int i; 1713 struct target_vfp_sigframe *vfpframe; 1714 vfpframe = (struct target_vfp_sigframe *)regspace; 1715 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1716 __put_user(sizeof(*vfpframe), &vfpframe->size); 1717 for (i = 0; i < 32; i++) { 1718 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1719 } 1720 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1721 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1722 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1723 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1724 return (abi_ulong*)(vfpframe+1); 1725 } 1726 1727 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1728 CPUARMState *env) 1729 { 1730 int i; 1731 struct target_iwmmxt_sigframe *iwmmxtframe; 1732 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1733 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1734 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1735 for (i = 0; i < 16; i++) { 1736 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1737 } 1738 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1739 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1740 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1741 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1742 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1743 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1744 return (abi_ulong*)(iwmmxtframe+1); 1745 } 1746 1747 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1748 target_sigset_t *set, CPUARMState *env) 1749 { 1750 struct target_sigaltstack stack; 1751 int i; 1752 abi_ulong *regspace; 1753 1754 /* Clear all the bits of the ucontext we don't use. */ 1755 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1756 1757 memset(&stack, 0, sizeof(stack)); 1758 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1759 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1760 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1761 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1762 1763 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1764 /* Save coprocessor signal frame. */ 1765 regspace = uc->tuc_regspace; 1766 if (arm_feature(env, ARM_FEATURE_VFP)) { 1767 regspace = setup_sigframe_v2_vfp(regspace, env); 1768 } 1769 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1770 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1771 } 1772 1773 /* Write terminating magic word */ 1774 __put_user(0, regspace); 1775 1776 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1777 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1778 } 1779 } 1780 1781 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1782 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1783 target_sigset_t *set, CPUARMState *regs) 1784 { 1785 struct sigframe_v1 *frame; 1786 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1787 int i; 1788 1789 trace_user_setup_frame(regs, frame_addr); 1790 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1791 goto sigsegv; 1792 } 1793 1794 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1795 1796 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1797 __put_user(set->sig[i], &frame->extramask[i - 1]); 1798 } 1799 1800 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1801 frame_addr + offsetof(struct sigframe_v1, retcode)); 1802 1803 unlock_user_struct(frame, frame_addr, 1); 1804 return; 1805 sigsegv: 1806 force_sigsegv(usig); 1807 } 1808 1809 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1810 target_sigset_t *set, CPUARMState *regs) 1811 { 1812 struct sigframe_v2 *frame; 1813 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1814 1815 trace_user_setup_frame(regs, frame_addr); 1816 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1817 goto sigsegv; 1818 } 1819 1820 setup_sigframe_v2(&frame->uc, set, regs); 1821 1822 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1823 frame_addr + offsetof(struct sigframe_v2, retcode)); 1824 1825 unlock_user_struct(frame, frame_addr, 1); 1826 return; 1827 sigsegv: 1828 force_sigsegv(usig); 1829 } 1830 1831 static void setup_frame(int usig, struct target_sigaction *ka, 1832 target_sigset_t *set, CPUARMState *regs) 1833 { 1834 if (get_osversion() >= 0x020612) { 1835 setup_frame_v2(usig, ka, set, regs); 1836 } else { 1837 setup_frame_v1(usig, ka, set, regs); 1838 } 1839 } 1840 1841 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1842 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1843 target_siginfo_t *info, 1844 target_sigset_t *set, CPUARMState *env) 1845 { 1846 struct rt_sigframe_v1 *frame; 1847 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1848 struct target_sigaltstack stack; 1849 int i; 1850 abi_ulong info_addr, uc_addr; 1851 1852 trace_user_setup_rt_frame(env, frame_addr); 1853 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1854 goto sigsegv; 1855 } 1856 1857 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1858 __put_user(info_addr, &frame->pinfo); 1859 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1860 __put_user(uc_addr, &frame->puc); 1861 tswap_siginfo(&frame->info, info); 1862 1863 /* Clear all the bits of the ucontext we don't use. */ 1864 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1865 1866 memset(&stack, 0, sizeof(stack)); 1867 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1868 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1869 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1870 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1871 1872 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1873 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1874 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1875 } 1876 1877 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1878 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1879 1880 env->regs[1] = info_addr; 1881 env->regs[2] = uc_addr; 1882 1883 unlock_user_struct(frame, frame_addr, 1); 1884 return; 1885 sigsegv: 1886 force_sigsegv(usig); 1887 } 1888 1889 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1890 target_siginfo_t *info, 1891 target_sigset_t *set, CPUARMState *env) 1892 { 1893 struct rt_sigframe_v2 *frame; 1894 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1895 abi_ulong info_addr, uc_addr; 1896 1897 trace_user_setup_rt_frame(env, frame_addr); 1898 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1899 goto sigsegv; 1900 } 1901 1902 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1903 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1904 tswap_siginfo(&frame->info, info); 1905 1906 setup_sigframe_v2(&frame->uc, set, env); 1907 1908 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1909 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1910 1911 env->regs[1] = info_addr; 1912 env->regs[2] = uc_addr; 1913 1914 unlock_user_struct(frame, frame_addr, 1); 1915 return; 1916 sigsegv: 1917 force_sigsegv(usig); 1918 } 1919 1920 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1921 target_siginfo_t *info, 1922 target_sigset_t *set, CPUARMState *env) 1923 { 1924 if (get_osversion() >= 0x020612) { 1925 setup_rt_frame_v2(usig, ka, info, set, env); 1926 } else { 1927 setup_rt_frame_v1(usig, ka, info, set, env); 1928 } 1929 } 1930 1931 static int 1932 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1933 { 1934 int err = 0; 1935 uint32_t cpsr; 1936 1937 __get_user(env->regs[0], &sc->arm_r0); 1938 __get_user(env->regs[1], &sc->arm_r1); 1939 __get_user(env->regs[2], &sc->arm_r2); 1940 __get_user(env->regs[3], &sc->arm_r3); 1941 __get_user(env->regs[4], &sc->arm_r4); 1942 __get_user(env->regs[5], &sc->arm_r5); 1943 __get_user(env->regs[6], &sc->arm_r6); 1944 __get_user(env->regs[7], &sc->arm_r7); 1945 __get_user(env->regs[8], &sc->arm_r8); 1946 __get_user(env->regs[9], &sc->arm_r9); 1947 __get_user(env->regs[10], &sc->arm_r10); 1948 __get_user(env->regs[11], &sc->arm_fp); 1949 __get_user(env->regs[12], &sc->arm_ip); 1950 __get_user(env->regs[13], &sc->arm_sp); 1951 __get_user(env->regs[14], &sc->arm_lr); 1952 __get_user(env->regs[15], &sc->arm_pc); 1953 #ifdef TARGET_CONFIG_CPU_32 1954 __get_user(cpsr, &sc->arm_cpsr); 1955 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1956 #endif 1957 1958 err |= !valid_user_regs(env); 1959 1960 return err; 1961 } 1962 1963 static long do_sigreturn_v1(CPUARMState *env) 1964 { 1965 abi_ulong frame_addr; 1966 struct sigframe_v1 *frame = NULL; 1967 target_sigset_t set; 1968 sigset_t host_set; 1969 int i; 1970 1971 /* 1972 * Since we stacked the signal on a 64-bit boundary, 1973 * then 'sp' should be word aligned here. If it's 1974 * not, then the user is trying to mess with us. 1975 */ 1976 frame_addr = env->regs[13]; 1977 trace_user_do_sigreturn(env, frame_addr); 1978 if (frame_addr & 7) { 1979 goto badframe; 1980 } 1981 1982 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1983 goto badframe; 1984 } 1985 1986 __get_user(set.sig[0], &frame->sc.oldmask); 1987 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1988 __get_user(set.sig[i], &frame->extramask[i - 1]); 1989 } 1990 1991 target_to_host_sigset_internal(&host_set, &set); 1992 set_sigmask(&host_set); 1993 1994 if (restore_sigcontext(env, &frame->sc)) { 1995 goto badframe; 1996 } 1997 1998 #if 0 1999 /* Send SIGTRAP if we're single-stepping */ 2000 if (ptrace_cancel_bpt(current)) 2001 send_sig(SIGTRAP, current, 1); 2002 #endif 2003 unlock_user_struct(frame, frame_addr, 0); 2004 return -TARGET_QEMU_ESIGRETURN; 2005 2006 badframe: 2007 force_sig(TARGET_SIGSEGV /* , current */); 2008 return 0; 2009 } 2010 2011 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2012 { 2013 int i; 2014 abi_ulong magic, sz; 2015 uint32_t fpscr, fpexc; 2016 struct target_vfp_sigframe *vfpframe; 2017 vfpframe = (struct target_vfp_sigframe *)regspace; 2018 2019 __get_user(magic, &vfpframe->magic); 2020 __get_user(sz, &vfpframe->size); 2021 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2022 return 0; 2023 } 2024 for (i = 0; i < 32; i++) { 2025 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2026 } 2027 __get_user(fpscr, &vfpframe->ufp.fpscr); 2028 vfp_set_fpscr(env, fpscr); 2029 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2030 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2031 * and the exception flag is cleared 2032 */ 2033 fpexc |= (1 << 30); 2034 fpexc &= ~((1 << 31) | (1 << 28)); 2035 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2036 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2037 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2038 return (abi_ulong*)(vfpframe + 1); 2039 } 2040 2041 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2042 abi_ulong *regspace) 2043 { 2044 int i; 2045 abi_ulong magic, sz; 2046 struct target_iwmmxt_sigframe *iwmmxtframe; 2047 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2048 2049 __get_user(magic, &iwmmxtframe->magic); 2050 __get_user(sz, &iwmmxtframe->size); 2051 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2052 return 0; 2053 } 2054 for (i = 0; i < 16; i++) { 2055 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2056 } 2057 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2058 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2059 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2060 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2061 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2062 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2063 return (abi_ulong*)(iwmmxtframe + 1); 2064 } 2065 2066 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2067 struct target_ucontext_v2 *uc) 2068 { 2069 sigset_t host_set; 2070 abi_ulong *regspace; 2071 2072 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2073 set_sigmask(&host_set); 2074 2075 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2076 return 1; 2077 2078 /* Restore coprocessor signal frame */ 2079 regspace = uc->tuc_regspace; 2080 if (arm_feature(env, ARM_FEATURE_VFP)) { 2081 regspace = restore_sigframe_v2_vfp(env, regspace); 2082 if (!regspace) { 2083 return 1; 2084 } 2085 } 2086 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2087 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2088 if (!regspace) { 2089 return 1; 2090 } 2091 } 2092 2093 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2094 return 1; 2095 2096 #if 0 2097 /* Send SIGTRAP if we're single-stepping */ 2098 if (ptrace_cancel_bpt(current)) 2099 send_sig(SIGTRAP, current, 1); 2100 #endif 2101 2102 return 0; 2103 } 2104 2105 static long do_sigreturn_v2(CPUARMState *env) 2106 { 2107 abi_ulong frame_addr; 2108 struct sigframe_v2 *frame = NULL; 2109 2110 /* 2111 * Since we stacked the signal on a 64-bit boundary, 2112 * then 'sp' should be word aligned here. If it's 2113 * not, then the user is trying to mess with us. 2114 */ 2115 frame_addr = env->regs[13]; 2116 trace_user_do_sigreturn(env, frame_addr); 2117 if (frame_addr & 7) { 2118 goto badframe; 2119 } 2120 2121 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2122 goto badframe; 2123 } 2124 2125 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2126 goto badframe; 2127 } 2128 2129 unlock_user_struct(frame, frame_addr, 0); 2130 return -TARGET_QEMU_ESIGRETURN; 2131 2132 badframe: 2133 unlock_user_struct(frame, frame_addr, 0); 2134 force_sig(TARGET_SIGSEGV /* , current */); 2135 return 0; 2136 } 2137 2138 long do_sigreturn(CPUARMState *env) 2139 { 2140 if (get_osversion() >= 0x020612) { 2141 return do_sigreturn_v2(env); 2142 } else { 2143 return do_sigreturn_v1(env); 2144 } 2145 } 2146 2147 static long do_rt_sigreturn_v1(CPUARMState *env) 2148 { 2149 abi_ulong frame_addr; 2150 struct rt_sigframe_v1 *frame = NULL; 2151 sigset_t host_set; 2152 2153 /* 2154 * Since we stacked the signal on a 64-bit boundary, 2155 * then 'sp' should be word aligned here. If it's 2156 * not, then the user is trying to mess with us. 2157 */ 2158 frame_addr = env->regs[13]; 2159 trace_user_do_rt_sigreturn(env, frame_addr); 2160 if (frame_addr & 7) { 2161 goto badframe; 2162 } 2163 2164 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2165 goto badframe; 2166 } 2167 2168 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2169 set_sigmask(&host_set); 2170 2171 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2172 goto badframe; 2173 } 2174 2175 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2176 goto badframe; 2177 2178 #if 0 2179 /* Send SIGTRAP if we're single-stepping */ 2180 if (ptrace_cancel_bpt(current)) 2181 send_sig(SIGTRAP, current, 1); 2182 #endif 2183 unlock_user_struct(frame, frame_addr, 0); 2184 return -TARGET_QEMU_ESIGRETURN; 2185 2186 badframe: 2187 unlock_user_struct(frame, frame_addr, 0); 2188 force_sig(TARGET_SIGSEGV /* , current */); 2189 return 0; 2190 } 2191 2192 static long do_rt_sigreturn_v2(CPUARMState *env) 2193 { 2194 abi_ulong frame_addr; 2195 struct rt_sigframe_v2 *frame = NULL; 2196 2197 /* 2198 * Since we stacked the signal on a 64-bit boundary, 2199 * then 'sp' should be word aligned here. If it's 2200 * not, then the user is trying to mess with us. 2201 */ 2202 frame_addr = env->regs[13]; 2203 trace_user_do_rt_sigreturn(env, frame_addr); 2204 if (frame_addr & 7) { 2205 goto badframe; 2206 } 2207 2208 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2209 goto badframe; 2210 } 2211 2212 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2213 goto badframe; 2214 } 2215 2216 unlock_user_struct(frame, frame_addr, 0); 2217 return -TARGET_QEMU_ESIGRETURN; 2218 2219 badframe: 2220 unlock_user_struct(frame, frame_addr, 0); 2221 force_sig(TARGET_SIGSEGV /* , current */); 2222 return 0; 2223 } 2224 2225 long do_rt_sigreturn(CPUARMState *env) 2226 { 2227 if (get_osversion() >= 0x020612) { 2228 return do_rt_sigreturn_v2(env); 2229 } else { 2230 return do_rt_sigreturn_v1(env); 2231 } 2232 } 2233 2234 #elif defined(TARGET_SPARC) 2235 2236 #define __SUNOS_MAXWIN 31 2237 2238 /* This is what SunOS does, so shall I. */ 2239 struct target_sigcontext { 2240 abi_ulong sigc_onstack; /* state to restore */ 2241 2242 abi_ulong sigc_mask; /* sigmask to restore */ 2243 abi_ulong sigc_sp; /* stack pointer */ 2244 abi_ulong sigc_pc; /* program counter */ 2245 abi_ulong sigc_npc; /* next program counter */ 2246 abi_ulong sigc_psr; /* for condition codes etc */ 2247 abi_ulong sigc_g1; /* User uses these two registers */ 2248 abi_ulong sigc_o0; /* within the trampoline code. */ 2249 2250 /* Now comes information regarding the users window set 2251 * at the time of the signal. 2252 */ 2253 abi_ulong sigc_oswins; /* outstanding windows */ 2254 2255 /* stack ptrs for each regwin buf */ 2256 char *sigc_spbuf[__SUNOS_MAXWIN]; 2257 2258 /* Windows to restore after signal */ 2259 struct { 2260 abi_ulong locals[8]; 2261 abi_ulong ins[8]; 2262 } sigc_wbuf[__SUNOS_MAXWIN]; 2263 }; 2264 /* A Sparc stack frame */ 2265 struct sparc_stackf { 2266 abi_ulong locals[8]; 2267 abi_ulong ins[8]; 2268 /* It's simpler to treat fp and callers_pc as elements of ins[] 2269 * since we never need to access them ourselves. 2270 */ 2271 char *structptr; 2272 abi_ulong xargs[6]; 2273 abi_ulong xxargs[1]; 2274 }; 2275 2276 typedef struct { 2277 struct { 2278 abi_ulong psr; 2279 abi_ulong pc; 2280 abi_ulong npc; 2281 abi_ulong y; 2282 abi_ulong u_regs[16]; /* globals and ins */ 2283 } si_regs; 2284 int si_mask; 2285 } __siginfo_t; 2286 2287 typedef struct { 2288 abi_ulong si_float_regs[32]; 2289 unsigned long si_fsr; 2290 unsigned long si_fpqdepth; 2291 struct { 2292 unsigned long *insn_addr; 2293 unsigned long insn; 2294 } si_fpqueue [16]; 2295 } qemu_siginfo_fpu_t; 2296 2297 2298 struct target_signal_frame { 2299 struct sparc_stackf ss; 2300 __siginfo_t info; 2301 abi_ulong fpu_save; 2302 abi_ulong insns[2] __attribute__ ((aligned (8))); 2303 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2304 abi_ulong extra_size; /* Should be 0 */ 2305 qemu_siginfo_fpu_t fpu_state; 2306 }; 2307 struct target_rt_signal_frame { 2308 struct sparc_stackf ss; 2309 siginfo_t info; 2310 abi_ulong regs[20]; 2311 sigset_t mask; 2312 abi_ulong fpu_save; 2313 unsigned int insns[2]; 2314 stack_t stack; 2315 unsigned int extra_size; /* Should be 0 */ 2316 qemu_siginfo_fpu_t fpu_state; 2317 }; 2318 2319 #define UREG_O0 16 2320 #define UREG_O6 22 2321 #define UREG_I0 0 2322 #define UREG_I1 1 2323 #define UREG_I2 2 2324 #define UREG_I3 3 2325 #define UREG_I4 4 2326 #define UREG_I5 5 2327 #define UREG_I6 6 2328 #define UREG_I7 7 2329 #define UREG_L0 8 2330 #define UREG_FP UREG_I6 2331 #define UREG_SP UREG_O6 2332 2333 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2334 CPUSPARCState *env, 2335 unsigned long framesize) 2336 { 2337 abi_ulong sp; 2338 2339 sp = env->regwptr[UREG_FP]; 2340 2341 /* This is the X/Open sanctioned signal stack switching. */ 2342 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2343 if (!on_sig_stack(sp) 2344 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2345 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2346 } 2347 } 2348 return sp - framesize; 2349 } 2350 2351 static int 2352 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2353 { 2354 int err = 0, i; 2355 2356 __put_user(env->psr, &si->si_regs.psr); 2357 __put_user(env->pc, &si->si_regs.pc); 2358 __put_user(env->npc, &si->si_regs.npc); 2359 __put_user(env->y, &si->si_regs.y); 2360 for (i=0; i < 8; i++) { 2361 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2362 } 2363 for (i=0; i < 8; i++) { 2364 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2365 } 2366 __put_user(mask, &si->si_mask); 2367 return err; 2368 } 2369 2370 #if 0 2371 static int 2372 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2373 CPUSPARCState *env, unsigned long mask) 2374 { 2375 int err = 0; 2376 2377 __put_user(mask, &sc->sigc_mask); 2378 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2379 __put_user(env->pc, &sc->sigc_pc); 2380 __put_user(env->npc, &sc->sigc_npc); 2381 __put_user(env->psr, &sc->sigc_psr); 2382 __put_user(env->gregs[1], &sc->sigc_g1); 2383 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2384 2385 return err; 2386 } 2387 #endif 2388 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2389 2390 static void setup_frame(int sig, struct target_sigaction *ka, 2391 target_sigset_t *set, CPUSPARCState *env) 2392 { 2393 abi_ulong sf_addr; 2394 struct target_signal_frame *sf; 2395 int sigframe_size, err, i; 2396 2397 /* 1. Make sure everything is clean */ 2398 //synchronize_user_stack(); 2399 2400 sigframe_size = NF_ALIGNEDSZ; 2401 sf_addr = get_sigframe(ka, env, sigframe_size); 2402 trace_user_setup_frame(env, sf_addr); 2403 2404 sf = lock_user(VERIFY_WRITE, sf_addr, 2405 sizeof(struct target_signal_frame), 0); 2406 if (!sf) { 2407 goto sigsegv; 2408 } 2409 #if 0 2410 if (invalid_frame_pointer(sf, sigframe_size)) 2411 goto sigill_and_return; 2412 #endif 2413 /* 2. Save the current process state */ 2414 err = setup___siginfo(&sf->info, env, set->sig[0]); 2415 __put_user(0, &sf->extra_size); 2416 2417 //save_fpu_state(regs, &sf->fpu_state); 2418 //__put_user(&sf->fpu_state, &sf->fpu_save); 2419 2420 __put_user(set->sig[0], &sf->info.si_mask); 2421 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2422 __put_user(set->sig[i + 1], &sf->extramask[i]); 2423 } 2424 2425 for (i = 0; i < 8; i++) { 2426 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2427 } 2428 for (i = 0; i < 8; i++) { 2429 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2430 } 2431 if (err) 2432 goto sigsegv; 2433 2434 /* 3. signal handler back-trampoline and parameters */ 2435 env->regwptr[UREG_FP] = sf_addr; 2436 env->regwptr[UREG_I0] = sig; 2437 env->regwptr[UREG_I1] = sf_addr + 2438 offsetof(struct target_signal_frame, info); 2439 env->regwptr[UREG_I2] = sf_addr + 2440 offsetof(struct target_signal_frame, info); 2441 2442 /* 4. signal handler */ 2443 env->pc = ka->_sa_handler; 2444 env->npc = (env->pc + 4); 2445 /* 5. return to kernel instructions */ 2446 if (ka->sa_restorer) { 2447 env->regwptr[UREG_I7] = ka->sa_restorer; 2448 } else { 2449 uint32_t val32; 2450 2451 env->regwptr[UREG_I7] = sf_addr + 2452 offsetof(struct target_signal_frame, insns) - 2 * 4; 2453 2454 /* mov __NR_sigreturn, %g1 */ 2455 val32 = 0x821020d8; 2456 __put_user(val32, &sf->insns[0]); 2457 2458 /* t 0x10 */ 2459 val32 = 0x91d02010; 2460 __put_user(val32, &sf->insns[1]); 2461 if (err) 2462 goto sigsegv; 2463 2464 /* Flush instruction space. */ 2465 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2466 // tb_flush(env); 2467 } 2468 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2469 return; 2470 #if 0 2471 sigill_and_return: 2472 force_sig(TARGET_SIGILL); 2473 #endif 2474 sigsegv: 2475 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2476 force_sigsegv(sig); 2477 } 2478 2479 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2480 target_siginfo_t *info, 2481 target_sigset_t *set, CPUSPARCState *env) 2482 { 2483 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2484 } 2485 2486 long do_sigreturn(CPUSPARCState *env) 2487 { 2488 abi_ulong sf_addr; 2489 struct target_signal_frame *sf; 2490 uint32_t up_psr, pc, npc; 2491 target_sigset_t set; 2492 sigset_t host_set; 2493 int err=0, i; 2494 2495 sf_addr = env->regwptr[UREG_FP]; 2496 trace_user_do_sigreturn(env, sf_addr); 2497 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2498 goto segv_and_exit; 2499 } 2500 2501 /* 1. Make sure we are not getting garbage from the user */ 2502 2503 if (sf_addr & 3) 2504 goto segv_and_exit; 2505 2506 __get_user(pc, &sf->info.si_regs.pc); 2507 __get_user(npc, &sf->info.si_regs.npc); 2508 2509 if ((pc | npc) & 3) { 2510 goto segv_and_exit; 2511 } 2512 2513 /* 2. Restore the state */ 2514 __get_user(up_psr, &sf->info.si_regs.psr); 2515 2516 /* User can only change condition codes and FPU enabling in %psr. */ 2517 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2518 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2519 2520 env->pc = pc; 2521 env->npc = npc; 2522 __get_user(env->y, &sf->info.si_regs.y); 2523 for (i=0; i < 8; i++) { 2524 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2525 } 2526 for (i=0; i < 8; i++) { 2527 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2528 } 2529 2530 /* FIXME: implement FPU save/restore: 2531 * __get_user(fpu_save, &sf->fpu_save); 2532 * if (fpu_save) 2533 * err |= restore_fpu_state(env, fpu_save); 2534 */ 2535 2536 /* This is pretty much atomic, no amount locking would prevent 2537 * the races which exist anyways. 2538 */ 2539 __get_user(set.sig[0], &sf->info.si_mask); 2540 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2541 __get_user(set.sig[i], &sf->extramask[i - 1]); 2542 } 2543 2544 target_to_host_sigset_internal(&host_set, &set); 2545 set_sigmask(&host_set); 2546 2547 if (err) { 2548 goto segv_and_exit; 2549 } 2550 unlock_user_struct(sf, sf_addr, 0); 2551 return -TARGET_QEMU_ESIGRETURN; 2552 2553 segv_and_exit: 2554 unlock_user_struct(sf, sf_addr, 0); 2555 force_sig(TARGET_SIGSEGV); 2556 } 2557 2558 long do_rt_sigreturn(CPUSPARCState *env) 2559 { 2560 trace_user_do_rt_sigreturn(env, 0); 2561 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2562 return -TARGET_ENOSYS; 2563 } 2564 2565 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2566 #define MC_TSTATE 0 2567 #define MC_PC 1 2568 #define MC_NPC 2 2569 #define MC_Y 3 2570 #define MC_G1 4 2571 #define MC_G2 5 2572 #define MC_G3 6 2573 #define MC_G4 7 2574 #define MC_G5 8 2575 #define MC_G6 9 2576 #define MC_G7 10 2577 #define MC_O0 11 2578 #define MC_O1 12 2579 #define MC_O2 13 2580 #define MC_O3 14 2581 #define MC_O4 15 2582 #define MC_O5 16 2583 #define MC_O6 17 2584 #define MC_O7 18 2585 #define MC_NGREG 19 2586 2587 typedef abi_ulong target_mc_greg_t; 2588 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2589 2590 struct target_mc_fq { 2591 abi_ulong *mcfq_addr; 2592 uint32_t mcfq_insn; 2593 }; 2594 2595 struct target_mc_fpu { 2596 union { 2597 uint32_t sregs[32]; 2598 uint64_t dregs[32]; 2599 //uint128_t qregs[16]; 2600 } mcfpu_fregs; 2601 abi_ulong mcfpu_fsr; 2602 abi_ulong mcfpu_fprs; 2603 abi_ulong mcfpu_gsr; 2604 struct target_mc_fq *mcfpu_fq; 2605 unsigned char mcfpu_qcnt; 2606 unsigned char mcfpu_qentsz; 2607 unsigned char mcfpu_enab; 2608 }; 2609 typedef struct target_mc_fpu target_mc_fpu_t; 2610 2611 typedef struct { 2612 target_mc_gregset_t mc_gregs; 2613 target_mc_greg_t mc_fp; 2614 target_mc_greg_t mc_i7; 2615 target_mc_fpu_t mc_fpregs; 2616 } target_mcontext_t; 2617 2618 struct target_ucontext { 2619 struct target_ucontext *tuc_link; 2620 abi_ulong tuc_flags; 2621 target_sigset_t tuc_sigmask; 2622 target_mcontext_t tuc_mcontext; 2623 }; 2624 2625 /* A V9 register window */ 2626 struct target_reg_window { 2627 abi_ulong locals[8]; 2628 abi_ulong ins[8]; 2629 }; 2630 2631 #define TARGET_STACK_BIAS 2047 2632 2633 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2634 void sparc64_set_context(CPUSPARCState *env) 2635 { 2636 abi_ulong ucp_addr; 2637 struct target_ucontext *ucp; 2638 target_mc_gregset_t *grp; 2639 abi_ulong pc, npc, tstate; 2640 abi_ulong fp, i7, w_addr; 2641 unsigned int i; 2642 2643 ucp_addr = env->regwptr[UREG_I0]; 2644 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2645 goto do_sigsegv; 2646 } 2647 grp = &ucp->tuc_mcontext.mc_gregs; 2648 __get_user(pc, &((*grp)[MC_PC])); 2649 __get_user(npc, &((*grp)[MC_NPC])); 2650 if ((pc | npc) & 3) { 2651 goto do_sigsegv; 2652 } 2653 if (env->regwptr[UREG_I1]) { 2654 target_sigset_t target_set; 2655 sigset_t set; 2656 2657 if (TARGET_NSIG_WORDS == 1) { 2658 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2659 } else { 2660 abi_ulong *src, *dst; 2661 src = ucp->tuc_sigmask.sig; 2662 dst = target_set.sig; 2663 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2664 __get_user(*dst, src); 2665 } 2666 } 2667 target_to_host_sigset_internal(&set, &target_set); 2668 set_sigmask(&set); 2669 } 2670 env->pc = pc; 2671 env->npc = npc; 2672 __get_user(env->y, &((*grp)[MC_Y])); 2673 __get_user(tstate, &((*grp)[MC_TSTATE])); 2674 env->asi = (tstate >> 24) & 0xff; 2675 cpu_put_ccr(env, tstate >> 32); 2676 cpu_put_cwp64(env, tstate & 0x1f); 2677 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2678 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2679 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2680 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2681 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2682 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2683 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2684 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2685 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2686 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2687 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2688 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2689 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2690 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2691 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2692 2693 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2694 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2695 2696 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2697 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2698 abi_ulong) != 0) { 2699 goto do_sigsegv; 2700 } 2701 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2702 abi_ulong) != 0) { 2703 goto do_sigsegv; 2704 } 2705 /* FIXME this does not match how the kernel handles the FPU in 2706 * its sparc64_set_context implementation. In particular the FPU 2707 * is only restored if fenab is non-zero in: 2708 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2709 */ 2710 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2711 { 2712 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2713 for (i = 0; i < 64; i++, src++) { 2714 if (i & 1) { 2715 __get_user(env->fpr[i/2].l.lower, src); 2716 } else { 2717 __get_user(env->fpr[i/2].l.upper, src); 2718 } 2719 } 2720 } 2721 __get_user(env->fsr, 2722 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2723 __get_user(env->gsr, 2724 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2725 unlock_user_struct(ucp, ucp_addr, 0); 2726 return; 2727 do_sigsegv: 2728 unlock_user_struct(ucp, ucp_addr, 0); 2729 force_sig(TARGET_SIGSEGV); 2730 } 2731 2732 void sparc64_get_context(CPUSPARCState *env) 2733 { 2734 abi_ulong ucp_addr; 2735 struct target_ucontext *ucp; 2736 target_mc_gregset_t *grp; 2737 target_mcontext_t *mcp; 2738 abi_ulong fp, i7, w_addr; 2739 int err; 2740 unsigned int i; 2741 target_sigset_t target_set; 2742 sigset_t set; 2743 2744 ucp_addr = env->regwptr[UREG_I0]; 2745 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2746 goto do_sigsegv; 2747 } 2748 2749 mcp = &ucp->tuc_mcontext; 2750 grp = &mcp->mc_gregs; 2751 2752 /* Skip over the trap instruction, first. */ 2753 env->pc = env->npc; 2754 env->npc += 4; 2755 2756 /* If we're only reading the signal mask then do_sigprocmask() 2757 * is guaranteed not to fail, which is important because we don't 2758 * have any way to signal a failure or restart this operation since 2759 * this is not a normal syscall. 2760 */ 2761 err = do_sigprocmask(0, NULL, &set); 2762 assert(err == 0); 2763 host_to_target_sigset_internal(&target_set, &set); 2764 if (TARGET_NSIG_WORDS == 1) { 2765 __put_user(target_set.sig[0], 2766 (abi_ulong *)&ucp->tuc_sigmask); 2767 } else { 2768 abi_ulong *src, *dst; 2769 src = target_set.sig; 2770 dst = ucp->tuc_sigmask.sig; 2771 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2772 __put_user(*src, dst); 2773 } 2774 if (err) 2775 goto do_sigsegv; 2776 } 2777 2778 /* XXX: tstate must be saved properly */ 2779 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2780 __put_user(env->pc, &((*grp)[MC_PC])); 2781 __put_user(env->npc, &((*grp)[MC_NPC])); 2782 __put_user(env->y, &((*grp)[MC_Y])); 2783 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2784 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2785 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2786 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2787 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2788 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2789 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2790 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2791 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2792 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2793 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2794 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2795 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2796 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2797 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2798 2799 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2800 fp = i7 = 0; 2801 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2802 abi_ulong) != 0) { 2803 goto do_sigsegv; 2804 } 2805 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2806 abi_ulong) != 0) { 2807 goto do_sigsegv; 2808 } 2809 __put_user(fp, &(mcp->mc_fp)); 2810 __put_user(i7, &(mcp->mc_i7)); 2811 2812 { 2813 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2814 for (i = 0; i < 64; i++, dst++) { 2815 if (i & 1) { 2816 __put_user(env->fpr[i/2].l.lower, dst); 2817 } else { 2818 __put_user(env->fpr[i/2].l.upper, dst); 2819 } 2820 } 2821 } 2822 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2823 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2824 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2825 2826 if (err) 2827 goto do_sigsegv; 2828 unlock_user_struct(ucp, ucp_addr, 1); 2829 return; 2830 do_sigsegv: 2831 unlock_user_struct(ucp, ucp_addr, 1); 2832 force_sig(TARGET_SIGSEGV); 2833 } 2834 #endif 2835 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2836 2837 # if defined(TARGET_ABI_MIPSO32) 2838 struct target_sigcontext { 2839 uint32_t sc_regmask; /* Unused */ 2840 uint32_t sc_status; 2841 uint64_t sc_pc; 2842 uint64_t sc_regs[32]; 2843 uint64_t sc_fpregs[32]; 2844 uint32_t sc_ownedfp; /* Unused */ 2845 uint32_t sc_fpc_csr; 2846 uint32_t sc_fpc_eir; /* Unused */ 2847 uint32_t sc_used_math; 2848 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2849 uint32_t pad0; 2850 uint64_t sc_mdhi; 2851 uint64_t sc_mdlo; 2852 target_ulong sc_hi1; /* Was sc_cause */ 2853 target_ulong sc_lo1; /* Was sc_badvaddr */ 2854 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2855 target_ulong sc_lo2; 2856 target_ulong sc_hi3; 2857 target_ulong sc_lo3; 2858 }; 2859 # else /* N32 || N64 */ 2860 struct target_sigcontext { 2861 uint64_t sc_regs[32]; 2862 uint64_t sc_fpregs[32]; 2863 uint64_t sc_mdhi; 2864 uint64_t sc_hi1; 2865 uint64_t sc_hi2; 2866 uint64_t sc_hi3; 2867 uint64_t sc_mdlo; 2868 uint64_t sc_lo1; 2869 uint64_t sc_lo2; 2870 uint64_t sc_lo3; 2871 uint64_t sc_pc; 2872 uint32_t sc_fpc_csr; 2873 uint32_t sc_used_math; 2874 uint32_t sc_dsp; 2875 uint32_t sc_reserved; 2876 }; 2877 # endif /* O32 */ 2878 2879 struct sigframe { 2880 uint32_t sf_ass[4]; /* argument save space for o32 */ 2881 uint32_t sf_code[2]; /* signal trampoline */ 2882 struct target_sigcontext sf_sc; 2883 target_sigset_t sf_mask; 2884 }; 2885 2886 struct target_ucontext { 2887 target_ulong tuc_flags; 2888 target_ulong tuc_link; 2889 target_stack_t tuc_stack; 2890 target_ulong pad0; 2891 struct target_sigcontext tuc_mcontext; 2892 target_sigset_t tuc_sigmask; 2893 }; 2894 2895 struct target_rt_sigframe { 2896 uint32_t rs_ass[4]; /* argument save space for o32 */ 2897 uint32_t rs_code[2]; /* signal trampoline */ 2898 struct target_siginfo rs_info; 2899 struct target_ucontext rs_uc; 2900 }; 2901 2902 /* Install trampoline to jump back from signal handler */ 2903 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2904 { 2905 int err = 0; 2906 2907 /* 2908 * Set up the return code ... 2909 * 2910 * li v0, __NR__foo_sigreturn 2911 * syscall 2912 */ 2913 2914 __put_user(0x24020000 + syscall, tramp + 0); 2915 __put_user(0x0000000c , tramp + 1); 2916 return err; 2917 } 2918 2919 static inline void setup_sigcontext(CPUMIPSState *regs, 2920 struct target_sigcontext *sc) 2921 { 2922 int i; 2923 2924 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2925 regs->hflags &= ~MIPS_HFLAG_BMASK; 2926 2927 __put_user(0, &sc->sc_regs[0]); 2928 for (i = 1; i < 32; ++i) { 2929 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2930 } 2931 2932 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2933 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2934 2935 /* Rather than checking for dsp existence, always copy. The storage 2936 would just be garbage otherwise. */ 2937 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2938 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2939 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2940 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2941 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2942 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2943 { 2944 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2945 __put_user(dsp, &sc->sc_dsp); 2946 } 2947 2948 __put_user(1, &sc->sc_used_math); 2949 2950 for (i = 0; i < 32; ++i) { 2951 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2952 } 2953 } 2954 2955 static inline void 2956 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2957 { 2958 int i; 2959 2960 __get_user(regs->CP0_EPC, &sc->sc_pc); 2961 2962 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2963 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2964 2965 for (i = 1; i < 32; ++i) { 2966 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2967 } 2968 2969 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2970 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2971 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2972 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2973 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2974 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2975 { 2976 uint32_t dsp; 2977 __get_user(dsp, &sc->sc_dsp); 2978 cpu_wrdsp(dsp, 0x3ff, regs); 2979 } 2980 2981 for (i = 0; i < 32; ++i) { 2982 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2983 } 2984 } 2985 2986 /* 2987 * Determine which stack to use.. 2988 */ 2989 static inline abi_ulong 2990 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2991 { 2992 unsigned long sp; 2993 2994 /* Default to using normal stack */ 2995 sp = regs->active_tc.gpr[29]; 2996 2997 /* 2998 * FPU emulator may have its own trampoline active just 2999 * above the user stack, 16-bytes before the next lowest 3000 * 16 byte boundary. Try to avoid trashing it. 3001 */ 3002 sp -= 32; 3003 3004 /* This is the X/Open sanctioned signal stack switching. */ 3005 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3006 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3007 } 3008 3009 return (sp - frame_size) & ~7; 3010 } 3011 3012 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3013 { 3014 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3015 env->hflags &= ~MIPS_HFLAG_M16; 3016 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3017 env->active_tc.PC &= ~(target_ulong) 1; 3018 } 3019 } 3020 3021 # if defined(TARGET_ABI_MIPSO32) 3022 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3023 static void setup_frame(int sig, struct target_sigaction * ka, 3024 target_sigset_t *set, CPUMIPSState *regs) 3025 { 3026 struct sigframe *frame; 3027 abi_ulong frame_addr; 3028 int i; 3029 3030 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3031 trace_user_setup_frame(regs, frame_addr); 3032 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3033 goto give_sigsegv; 3034 } 3035 3036 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3037 3038 setup_sigcontext(regs, &frame->sf_sc); 3039 3040 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3041 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3042 } 3043 3044 /* 3045 * Arguments to signal handler: 3046 * 3047 * a0 = signal number 3048 * a1 = 0 (should be cause) 3049 * a2 = pointer to struct sigcontext 3050 * 3051 * $25 and PC point to the signal handler, $29 points to the 3052 * struct sigframe. 3053 */ 3054 regs->active_tc.gpr[ 4] = sig; 3055 regs->active_tc.gpr[ 5] = 0; 3056 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3057 regs->active_tc.gpr[29] = frame_addr; 3058 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3059 /* The original kernel code sets CP0_EPC to the handler 3060 * since it returns to userland using eret 3061 * we cannot do this here, and we must set PC directly */ 3062 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3063 mips_set_hflags_isa_mode_from_pc(regs); 3064 unlock_user_struct(frame, frame_addr, 1); 3065 return; 3066 3067 give_sigsegv: 3068 force_sigsegv(sig); 3069 } 3070 3071 long do_sigreturn(CPUMIPSState *regs) 3072 { 3073 struct sigframe *frame; 3074 abi_ulong frame_addr; 3075 sigset_t blocked; 3076 target_sigset_t target_set; 3077 int i; 3078 3079 frame_addr = regs->active_tc.gpr[29]; 3080 trace_user_do_sigreturn(regs, frame_addr); 3081 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3082 goto badframe; 3083 3084 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3085 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3086 } 3087 3088 target_to_host_sigset_internal(&blocked, &target_set); 3089 set_sigmask(&blocked); 3090 3091 restore_sigcontext(regs, &frame->sf_sc); 3092 3093 #if 0 3094 /* 3095 * Don't let your children do this ... 3096 */ 3097 __asm__ __volatile__( 3098 "move\t$29, %0\n\t" 3099 "j\tsyscall_exit" 3100 :/* no outputs */ 3101 :"r" (®s)); 3102 /* Unreached */ 3103 #endif 3104 3105 regs->active_tc.PC = regs->CP0_EPC; 3106 mips_set_hflags_isa_mode_from_pc(regs); 3107 /* I am not sure this is right, but it seems to work 3108 * maybe a problem with nested signals ? */ 3109 regs->CP0_EPC = 0; 3110 return -TARGET_QEMU_ESIGRETURN; 3111 3112 badframe: 3113 force_sig(TARGET_SIGSEGV/*, current*/); 3114 return 0; 3115 } 3116 # endif /* O32 */ 3117 3118 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3119 target_siginfo_t *info, 3120 target_sigset_t *set, CPUMIPSState *env) 3121 { 3122 struct target_rt_sigframe *frame; 3123 abi_ulong frame_addr; 3124 int i; 3125 3126 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3127 trace_user_setup_rt_frame(env, frame_addr); 3128 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3129 goto give_sigsegv; 3130 } 3131 3132 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3133 3134 tswap_siginfo(&frame->rs_info, info); 3135 3136 __put_user(0, &frame->rs_uc.tuc_flags); 3137 __put_user(0, &frame->rs_uc.tuc_link); 3138 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3139 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3140 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3141 &frame->rs_uc.tuc_stack.ss_flags); 3142 3143 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3144 3145 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3146 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3147 } 3148 3149 /* 3150 * Arguments to signal handler: 3151 * 3152 * a0 = signal number 3153 * a1 = pointer to siginfo_t 3154 * a2 = pointer to struct ucontext 3155 * 3156 * $25 and PC point to the signal handler, $29 points to the 3157 * struct sigframe. 3158 */ 3159 env->active_tc.gpr[ 4] = sig; 3160 env->active_tc.gpr[ 5] = frame_addr 3161 + offsetof(struct target_rt_sigframe, rs_info); 3162 env->active_tc.gpr[ 6] = frame_addr 3163 + offsetof(struct target_rt_sigframe, rs_uc); 3164 env->active_tc.gpr[29] = frame_addr; 3165 env->active_tc.gpr[31] = frame_addr 3166 + offsetof(struct target_rt_sigframe, rs_code); 3167 /* The original kernel code sets CP0_EPC to the handler 3168 * since it returns to userland using eret 3169 * we cannot do this here, and we must set PC directly */ 3170 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3171 mips_set_hflags_isa_mode_from_pc(env); 3172 unlock_user_struct(frame, frame_addr, 1); 3173 return; 3174 3175 give_sigsegv: 3176 unlock_user_struct(frame, frame_addr, 1); 3177 force_sigsegv(sig); 3178 } 3179 3180 long do_rt_sigreturn(CPUMIPSState *env) 3181 { 3182 struct target_rt_sigframe *frame; 3183 abi_ulong frame_addr; 3184 sigset_t blocked; 3185 3186 frame_addr = env->active_tc.gpr[29]; 3187 trace_user_do_rt_sigreturn(env, frame_addr); 3188 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3189 goto badframe; 3190 } 3191 3192 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3193 set_sigmask(&blocked); 3194 3195 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3196 3197 if (do_sigaltstack(frame_addr + 3198 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3199 0, get_sp_from_cpustate(env)) == -EFAULT) 3200 goto badframe; 3201 3202 env->active_tc.PC = env->CP0_EPC; 3203 mips_set_hflags_isa_mode_from_pc(env); 3204 /* I am not sure this is right, but it seems to work 3205 * maybe a problem with nested signals ? */ 3206 env->CP0_EPC = 0; 3207 return -TARGET_QEMU_ESIGRETURN; 3208 3209 badframe: 3210 force_sig(TARGET_SIGSEGV/*, current*/); 3211 return 0; 3212 } 3213 3214 #elif defined(TARGET_SH4) 3215 3216 /* 3217 * code and data structures from linux kernel: 3218 * include/asm-sh/sigcontext.h 3219 * arch/sh/kernel/signal.c 3220 */ 3221 3222 struct target_sigcontext { 3223 target_ulong oldmask; 3224 3225 /* CPU registers */ 3226 target_ulong sc_gregs[16]; 3227 target_ulong sc_pc; 3228 target_ulong sc_pr; 3229 target_ulong sc_sr; 3230 target_ulong sc_gbr; 3231 target_ulong sc_mach; 3232 target_ulong sc_macl; 3233 3234 /* FPU registers */ 3235 target_ulong sc_fpregs[16]; 3236 target_ulong sc_xfpregs[16]; 3237 unsigned int sc_fpscr; 3238 unsigned int sc_fpul; 3239 unsigned int sc_ownedfp; 3240 }; 3241 3242 struct target_sigframe 3243 { 3244 struct target_sigcontext sc; 3245 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3246 uint16_t retcode[3]; 3247 }; 3248 3249 3250 struct target_ucontext { 3251 target_ulong tuc_flags; 3252 struct target_ucontext *tuc_link; 3253 target_stack_t tuc_stack; 3254 struct target_sigcontext tuc_mcontext; 3255 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3256 }; 3257 3258 struct target_rt_sigframe 3259 { 3260 struct target_siginfo info; 3261 struct target_ucontext uc; 3262 uint16_t retcode[3]; 3263 }; 3264 3265 3266 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3267 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3268 3269 static abi_ulong get_sigframe(struct target_sigaction *ka, 3270 unsigned long sp, size_t frame_size) 3271 { 3272 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3273 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3274 } 3275 3276 return (sp - frame_size) & -8ul; 3277 } 3278 3279 static void setup_sigcontext(struct target_sigcontext *sc, 3280 CPUSH4State *regs, unsigned long mask) 3281 { 3282 int i; 3283 3284 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3285 COPY(gregs[0]); COPY(gregs[1]); 3286 COPY(gregs[2]); COPY(gregs[3]); 3287 COPY(gregs[4]); COPY(gregs[5]); 3288 COPY(gregs[6]); COPY(gregs[7]); 3289 COPY(gregs[8]); COPY(gregs[9]); 3290 COPY(gregs[10]); COPY(gregs[11]); 3291 COPY(gregs[12]); COPY(gregs[13]); 3292 COPY(gregs[14]); COPY(gregs[15]); 3293 COPY(gbr); COPY(mach); 3294 COPY(macl); COPY(pr); 3295 COPY(sr); COPY(pc); 3296 #undef COPY 3297 3298 for (i=0; i<16; i++) { 3299 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3300 } 3301 __put_user(regs->fpscr, &sc->sc_fpscr); 3302 __put_user(regs->fpul, &sc->sc_fpul); 3303 3304 /* non-iBCS2 extensions.. */ 3305 __put_user(mask, &sc->oldmask); 3306 } 3307 3308 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3309 { 3310 int i; 3311 3312 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3313 COPY(gregs[0]); COPY(gregs[1]); 3314 COPY(gregs[2]); COPY(gregs[3]); 3315 COPY(gregs[4]); COPY(gregs[5]); 3316 COPY(gregs[6]); COPY(gregs[7]); 3317 COPY(gregs[8]); COPY(gregs[9]); 3318 COPY(gregs[10]); COPY(gregs[11]); 3319 COPY(gregs[12]); COPY(gregs[13]); 3320 COPY(gregs[14]); COPY(gregs[15]); 3321 COPY(gbr); COPY(mach); 3322 COPY(macl); COPY(pr); 3323 COPY(sr); COPY(pc); 3324 #undef COPY 3325 3326 for (i=0; i<16; i++) { 3327 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3328 } 3329 __get_user(regs->fpscr, &sc->sc_fpscr); 3330 __get_user(regs->fpul, &sc->sc_fpul); 3331 3332 regs->tra = -1; /* disable syscall checks */ 3333 } 3334 3335 static void setup_frame(int sig, struct target_sigaction *ka, 3336 target_sigset_t *set, CPUSH4State *regs) 3337 { 3338 struct target_sigframe *frame; 3339 abi_ulong frame_addr; 3340 int i; 3341 3342 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3343 trace_user_setup_frame(regs, frame_addr); 3344 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3345 goto give_sigsegv; 3346 } 3347 3348 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3349 3350 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3351 __put_user(set->sig[i + 1], &frame->extramask[i]); 3352 } 3353 3354 /* Set up to return from userspace. If provided, use a stub 3355 already in userspace. */ 3356 if (ka->sa_flags & TARGET_SA_RESTORER) { 3357 regs->pr = (unsigned long) ka->sa_restorer; 3358 } else { 3359 /* Generate return code (system call to sigreturn) */ 3360 abi_ulong retcode_addr = frame_addr + 3361 offsetof(struct target_sigframe, retcode); 3362 __put_user(MOVW(2), &frame->retcode[0]); 3363 __put_user(TRAP_NOARG, &frame->retcode[1]); 3364 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3365 regs->pr = (unsigned long) retcode_addr; 3366 } 3367 3368 /* Set up registers for signal handler */ 3369 regs->gregs[15] = frame_addr; 3370 regs->gregs[4] = sig; /* Arg for signal handler */ 3371 regs->gregs[5] = 0; 3372 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3373 regs->pc = (unsigned long) ka->_sa_handler; 3374 3375 unlock_user_struct(frame, frame_addr, 1); 3376 return; 3377 3378 give_sigsegv: 3379 unlock_user_struct(frame, frame_addr, 1); 3380 force_sigsegv(sig); 3381 } 3382 3383 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3384 target_siginfo_t *info, 3385 target_sigset_t *set, CPUSH4State *regs) 3386 { 3387 struct target_rt_sigframe *frame; 3388 abi_ulong frame_addr; 3389 int i; 3390 3391 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3392 trace_user_setup_rt_frame(regs, frame_addr); 3393 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3394 goto give_sigsegv; 3395 } 3396 3397 tswap_siginfo(&frame->info, info); 3398 3399 /* Create the ucontext. */ 3400 __put_user(0, &frame->uc.tuc_flags); 3401 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3402 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3403 &frame->uc.tuc_stack.ss_sp); 3404 __put_user(sas_ss_flags(regs->gregs[15]), 3405 &frame->uc.tuc_stack.ss_flags); 3406 __put_user(target_sigaltstack_used.ss_size, 3407 &frame->uc.tuc_stack.ss_size); 3408 setup_sigcontext(&frame->uc.tuc_mcontext, 3409 regs, set->sig[0]); 3410 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3411 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3412 } 3413 3414 /* Set up to return from userspace. If provided, use a stub 3415 already in userspace. */ 3416 if (ka->sa_flags & TARGET_SA_RESTORER) { 3417 regs->pr = (unsigned long) ka->sa_restorer; 3418 } else { 3419 /* Generate return code (system call to sigreturn) */ 3420 abi_ulong retcode_addr = frame_addr + 3421 offsetof(struct target_rt_sigframe, retcode); 3422 __put_user(MOVW(2), &frame->retcode[0]); 3423 __put_user(TRAP_NOARG, &frame->retcode[1]); 3424 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3425 regs->pr = (unsigned long) retcode_addr; 3426 } 3427 3428 /* Set up registers for signal handler */ 3429 regs->gregs[15] = frame_addr; 3430 regs->gregs[4] = sig; /* Arg for signal handler */ 3431 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3432 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3433 regs->pc = (unsigned long) ka->_sa_handler; 3434 3435 unlock_user_struct(frame, frame_addr, 1); 3436 return; 3437 3438 give_sigsegv: 3439 unlock_user_struct(frame, frame_addr, 1); 3440 force_sigsegv(sig); 3441 } 3442 3443 long do_sigreturn(CPUSH4State *regs) 3444 { 3445 struct target_sigframe *frame; 3446 abi_ulong frame_addr; 3447 sigset_t blocked; 3448 target_sigset_t target_set; 3449 int i; 3450 int err = 0; 3451 3452 frame_addr = regs->gregs[15]; 3453 trace_user_do_sigreturn(regs, frame_addr); 3454 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3455 goto badframe; 3456 } 3457 3458 __get_user(target_set.sig[0], &frame->sc.oldmask); 3459 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3460 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3461 } 3462 3463 if (err) 3464 goto badframe; 3465 3466 target_to_host_sigset_internal(&blocked, &target_set); 3467 set_sigmask(&blocked); 3468 3469 restore_sigcontext(regs, &frame->sc); 3470 3471 unlock_user_struct(frame, frame_addr, 0); 3472 return -TARGET_QEMU_ESIGRETURN; 3473 3474 badframe: 3475 unlock_user_struct(frame, frame_addr, 0); 3476 force_sig(TARGET_SIGSEGV); 3477 return 0; 3478 } 3479 3480 long do_rt_sigreturn(CPUSH4State *regs) 3481 { 3482 struct target_rt_sigframe *frame; 3483 abi_ulong frame_addr; 3484 sigset_t blocked; 3485 3486 frame_addr = regs->gregs[15]; 3487 trace_user_do_rt_sigreturn(regs, frame_addr); 3488 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3489 goto badframe; 3490 } 3491 3492 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3493 set_sigmask(&blocked); 3494 3495 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3496 3497 if (do_sigaltstack(frame_addr + 3498 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3499 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3500 goto badframe; 3501 } 3502 3503 unlock_user_struct(frame, frame_addr, 0); 3504 return -TARGET_QEMU_ESIGRETURN; 3505 3506 badframe: 3507 unlock_user_struct(frame, frame_addr, 0); 3508 force_sig(TARGET_SIGSEGV); 3509 return 0; 3510 } 3511 #elif defined(TARGET_MICROBLAZE) 3512 3513 struct target_sigcontext { 3514 struct target_pt_regs regs; /* needs to be first */ 3515 uint32_t oldmask; 3516 }; 3517 3518 struct target_stack_t { 3519 abi_ulong ss_sp; 3520 int ss_flags; 3521 unsigned int ss_size; 3522 }; 3523 3524 struct target_ucontext { 3525 abi_ulong tuc_flags; 3526 abi_ulong tuc_link; 3527 struct target_stack_t tuc_stack; 3528 struct target_sigcontext tuc_mcontext; 3529 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3530 }; 3531 3532 /* Signal frames. */ 3533 struct target_signal_frame { 3534 struct target_ucontext uc; 3535 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3536 uint32_t tramp[2]; 3537 }; 3538 3539 struct rt_signal_frame { 3540 siginfo_t info; 3541 struct ucontext uc; 3542 uint32_t tramp[2]; 3543 }; 3544 3545 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3546 { 3547 __put_user(env->regs[0], &sc->regs.r0); 3548 __put_user(env->regs[1], &sc->regs.r1); 3549 __put_user(env->regs[2], &sc->regs.r2); 3550 __put_user(env->regs[3], &sc->regs.r3); 3551 __put_user(env->regs[4], &sc->regs.r4); 3552 __put_user(env->regs[5], &sc->regs.r5); 3553 __put_user(env->regs[6], &sc->regs.r6); 3554 __put_user(env->regs[7], &sc->regs.r7); 3555 __put_user(env->regs[8], &sc->regs.r8); 3556 __put_user(env->regs[9], &sc->regs.r9); 3557 __put_user(env->regs[10], &sc->regs.r10); 3558 __put_user(env->regs[11], &sc->regs.r11); 3559 __put_user(env->regs[12], &sc->regs.r12); 3560 __put_user(env->regs[13], &sc->regs.r13); 3561 __put_user(env->regs[14], &sc->regs.r14); 3562 __put_user(env->regs[15], &sc->regs.r15); 3563 __put_user(env->regs[16], &sc->regs.r16); 3564 __put_user(env->regs[17], &sc->regs.r17); 3565 __put_user(env->regs[18], &sc->regs.r18); 3566 __put_user(env->regs[19], &sc->regs.r19); 3567 __put_user(env->regs[20], &sc->regs.r20); 3568 __put_user(env->regs[21], &sc->regs.r21); 3569 __put_user(env->regs[22], &sc->regs.r22); 3570 __put_user(env->regs[23], &sc->regs.r23); 3571 __put_user(env->regs[24], &sc->regs.r24); 3572 __put_user(env->regs[25], &sc->regs.r25); 3573 __put_user(env->regs[26], &sc->regs.r26); 3574 __put_user(env->regs[27], &sc->regs.r27); 3575 __put_user(env->regs[28], &sc->regs.r28); 3576 __put_user(env->regs[29], &sc->regs.r29); 3577 __put_user(env->regs[30], &sc->regs.r30); 3578 __put_user(env->regs[31], &sc->regs.r31); 3579 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3580 } 3581 3582 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3583 { 3584 __get_user(env->regs[0], &sc->regs.r0); 3585 __get_user(env->regs[1], &sc->regs.r1); 3586 __get_user(env->regs[2], &sc->regs.r2); 3587 __get_user(env->regs[3], &sc->regs.r3); 3588 __get_user(env->regs[4], &sc->regs.r4); 3589 __get_user(env->regs[5], &sc->regs.r5); 3590 __get_user(env->regs[6], &sc->regs.r6); 3591 __get_user(env->regs[7], &sc->regs.r7); 3592 __get_user(env->regs[8], &sc->regs.r8); 3593 __get_user(env->regs[9], &sc->regs.r9); 3594 __get_user(env->regs[10], &sc->regs.r10); 3595 __get_user(env->regs[11], &sc->regs.r11); 3596 __get_user(env->regs[12], &sc->regs.r12); 3597 __get_user(env->regs[13], &sc->regs.r13); 3598 __get_user(env->regs[14], &sc->regs.r14); 3599 __get_user(env->regs[15], &sc->regs.r15); 3600 __get_user(env->regs[16], &sc->regs.r16); 3601 __get_user(env->regs[17], &sc->regs.r17); 3602 __get_user(env->regs[18], &sc->regs.r18); 3603 __get_user(env->regs[19], &sc->regs.r19); 3604 __get_user(env->regs[20], &sc->regs.r20); 3605 __get_user(env->regs[21], &sc->regs.r21); 3606 __get_user(env->regs[22], &sc->regs.r22); 3607 __get_user(env->regs[23], &sc->regs.r23); 3608 __get_user(env->regs[24], &sc->regs.r24); 3609 __get_user(env->regs[25], &sc->regs.r25); 3610 __get_user(env->regs[26], &sc->regs.r26); 3611 __get_user(env->regs[27], &sc->regs.r27); 3612 __get_user(env->regs[28], &sc->regs.r28); 3613 __get_user(env->regs[29], &sc->regs.r29); 3614 __get_user(env->regs[30], &sc->regs.r30); 3615 __get_user(env->regs[31], &sc->regs.r31); 3616 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3617 } 3618 3619 static abi_ulong get_sigframe(struct target_sigaction *ka, 3620 CPUMBState *env, int frame_size) 3621 { 3622 abi_ulong sp = env->regs[1]; 3623 3624 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3625 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3626 } 3627 3628 return ((sp - frame_size) & -8UL); 3629 } 3630 3631 static void setup_frame(int sig, struct target_sigaction *ka, 3632 target_sigset_t *set, CPUMBState *env) 3633 { 3634 struct target_signal_frame *frame; 3635 abi_ulong frame_addr; 3636 int i; 3637 3638 frame_addr = get_sigframe(ka, env, sizeof *frame); 3639 trace_user_setup_frame(env, frame_addr); 3640 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3641 goto badframe; 3642 3643 /* Save the mask. */ 3644 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3645 3646 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3647 __put_user(set->sig[i], &frame->extramask[i - 1]); 3648 } 3649 3650 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3651 3652 /* Set up to return from userspace. If provided, use a stub 3653 already in userspace. */ 3654 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3655 if (ka->sa_flags & TARGET_SA_RESTORER) { 3656 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3657 } else { 3658 uint32_t t; 3659 /* Note, these encodings are _big endian_! */ 3660 /* addi r12, r0, __NR_sigreturn */ 3661 t = 0x31800000UL | TARGET_NR_sigreturn; 3662 __put_user(t, frame->tramp + 0); 3663 /* brki r14, 0x8 */ 3664 t = 0xb9cc0008UL; 3665 __put_user(t, frame->tramp + 1); 3666 3667 /* Return from sighandler will jump to the tramp. 3668 Negative 8 offset because return is rtsd r15, 8 */ 3669 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3670 - 8; 3671 } 3672 3673 /* Set up registers for signal handler */ 3674 env->regs[1] = frame_addr; 3675 /* Signal handler args: */ 3676 env->regs[5] = sig; /* Arg 0: signum */ 3677 env->regs[6] = 0; 3678 /* arg 1: sigcontext */ 3679 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3680 3681 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3682 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3683 3684 unlock_user_struct(frame, frame_addr, 1); 3685 return; 3686 badframe: 3687 force_sigsegv(sig); 3688 } 3689 3690 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3691 target_siginfo_t *info, 3692 target_sigset_t *set, CPUMBState *env) 3693 { 3694 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3695 } 3696 3697 long do_sigreturn(CPUMBState *env) 3698 { 3699 struct target_signal_frame *frame; 3700 abi_ulong frame_addr; 3701 target_sigset_t target_set; 3702 sigset_t set; 3703 int i; 3704 3705 frame_addr = env->regs[R_SP]; 3706 trace_user_do_sigreturn(env, frame_addr); 3707 /* Make sure the guest isn't playing games. */ 3708 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3709 goto badframe; 3710 3711 /* Restore blocked signals */ 3712 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3713 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3714 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3715 } 3716 target_to_host_sigset_internal(&set, &target_set); 3717 set_sigmask(&set); 3718 3719 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3720 /* We got here through a sigreturn syscall, our path back is via an 3721 rtb insn so setup r14 for that. */ 3722 env->regs[14] = env->sregs[SR_PC]; 3723 3724 unlock_user_struct(frame, frame_addr, 0); 3725 return -TARGET_QEMU_ESIGRETURN; 3726 badframe: 3727 force_sig(TARGET_SIGSEGV); 3728 } 3729 3730 long do_rt_sigreturn(CPUMBState *env) 3731 { 3732 trace_user_do_rt_sigreturn(env, 0); 3733 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3734 return -TARGET_ENOSYS; 3735 } 3736 3737 #elif defined(TARGET_CRIS) 3738 3739 struct target_sigcontext { 3740 struct target_pt_regs regs; /* needs to be first */ 3741 uint32_t oldmask; 3742 uint32_t usp; /* usp before stacking this gunk on it */ 3743 }; 3744 3745 /* Signal frames. */ 3746 struct target_signal_frame { 3747 struct target_sigcontext sc; 3748 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3749 uint16_t retcode[4]; /* Trampoline code. */ 3750 }; 3751 3752 struct rt_signal_frame { 3753 siginfo_t *pinfo; 3754 void *puc; 3755 siginfo_t info; 3756 struct ucontext uc; 3757 uint16_t retcode[4]; /* Trampoline code. */ 3758 }; 3759 3760 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3761 { 3762 __put_user(env->regs[0], &sc->regs.r0); 3763 __put_user(env->regs[1], &sc->regs.r1); 3764 __put_user(env->regs[2], &sc->regs.r2); 3765 __put_user(env->regs[3], &sc->regs.r3); 3766 __put_user(env->regs[4], &sc->regs.r4); 3767 __put_user(env->regs[5], &sc->regs.r5); 3768 __put_user(env->regs[6], &sc->regs.r6); 3769 __put_user(env->regs[7], &sc->regs.r7); 3770 __put_user(env->regs[8], &sc->regs.r8); 3771 __put_user(env->regs[9], &sc->regs.r9); 3772 __put_user(env->regs[10], &sc->regs.r10); 3773 __put_user(env->regs[11], &sc->regs.r11); 3774 __put_user(env->regs[12], &sc->regs.r12); 3775 __put_user(env->regs[13], &sc->regs.r13); 3776 __put_user(env->regs[14], &sc->usp); 3777 __put_user(env->regs[15], &sc->regs.acr); 3778 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3779 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3780 __put_user(env->pc, &sc->regs.erp); 3781 } 3782 3783 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3784 { 3785 __get_user(env->regs[0], &sc->regs.r0); 3786 __get_user(env->regs[1], &sc->regs.r1); 3787 __get_user(env->regs[2], &sc->regs.r2); 3788 __get_user(env->regs[3], &sc->regs.r3); 3789 __get_user(env->regs[4], &sc->regs.r4); 3790 __get_user(env->regs[5], &sc->regs.r5); 3791 __get_user(env->regs[6], &sc->regs.r6); 3792 __get_user(env->regs[7], &sc->regs.r7); 3793 __get_user(env->regs[8], &sc->regs.r8); 3794 __get_user(env->regs[9], &sc->regs.r9); 3795 __get_user(env->regs[10], &sc->regs.r10); 3796 __get_user(env->regs[11], &sc->regs.r11); 3797 __get_user(env->regs[12], &sc->regs.r12); 3798 __get_user(env->regs[13], &sc->regs.r13); 3799 __get_user(env->regs[14], &sc->usp); 3800 __get_user(env->regs[15], &sc->regs.acr); 3801 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3802 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3803 __get_user(env->pc, &sc->regs.erp); 3804 } 3805 3806 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3807 { 3808 abi_ulong sp; 3809 /* Align the stack downwards to 4. */ 3810 sp = (env->regs[R_SP] & ~3); 3811 return sp - framesize; 3812 } 3813 3814 static void setup_frame(int sig, struct target_sigaction *ka, 3815 target_sigset_t *set, CPUCRISState *env) 3816 { 3817 struct target_signal_frame *frame; 3818 abi_ulong frame_addr; 3819 int i; 3820 3821 frame_addr = get_sigframe(env, sizeof *frame); 3822 trace_user_setup_frame(env, frame_addr); 3823 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3824 goto badframe; 3825 3826 /* 3827 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3828 * use this trampoline anymore but it sets it up for GDB. 3829 * In QEMU, using the trampoline simplifies things a bit so we use it. 3830 * 3831 * This is movu.w __NR_sigreturn, r9; break 13; 3832 */ 3833 __put_user(0x9c5f, frame->retcode+0); 3834 __put_user(TARGET_NR_sigreturn, 3835 frame->retcode + 1); 3836 __put_user(0xe93d, frame->retcode + 2); 3837 3838 /* Save the mask. */ 3839 __put_user(set->sig[0], &frame->sc.oldmask); 3840 3841 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3842 __put_user(set->sig[i], &frame->extramask[i - 1]); 3843 } 3844 3845 setup_sigcontext(&frame->sc, env); 3846 3847 /* Move the stack and setup the arguments for the handler. */ 3848 env->regs[R_SP] = frame_addr; 3849 env->regs[10] = sig; 3850 env->pc = (unsigned long) ka->_sa_handler; 3851 /* Link SRP so the guest returns through the trampoline. */ 3852 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3853 3854 unlock_user_struct(frame, frame_addr, 1); 3855 return; 3856 badframe: 3857 force_sigsegv(sig); 3858 } 3859 3860 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3861 target_siginfo_t *info, 3862 target_sigset_t *set, CPUCRISState *env) 3863 { 3864 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3865 } 3866 3867 long do_sigreturn(CPUCRISState *env) 3868 { 3869 struct target_signal_frame *frame; 3870 abi_ulong frame_addr; 3871 target_sigset_t target_set; 3872 sigset_t set; 3873 int i; 3874 3875 frame_addr = env->regs[R_SP]; 3876 trace_user_do_sigreturn(env, frame_addr); 3877 /* Make sure the guest isn't playing games. */ 3878 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3879 goto badframe; 3880 } 3881 3882 /* Restore blocked signals */ 3883 __get_user(target_set.sig[0], &frame->sc.oldmask); 3884 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3885 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3886 } 3887 target_to_host_sigset_internal(&set, &target_set); 3888 set_sigmask(&set); 3889 3890 restore_sigcontext(&frame->sc, env); 3891 unlock_user_struct(frame, frame_addr, 0); 3892 return -TARGET_QEMU_ESIGRETURN; 3893 badframe: 3894 force_sig(TARGET_SIGSEGV); 3895 } 3896 3897 long do_rt_sigreturn(CPUCRISState *env) 3898 { 3899 trace_user_do_rt_sigreturn(env, 0); 3900 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3901 return -TARGET_ENOSYS; 3902 } 3903 3904 #elif defined(TARGET_OPENRISC) 3905 3906 struct target_sigcontext { 3907 struct target_pt_regs regs; 3908 abi_ulong oldmask; 3909 abi_ulong usp; 3910 }; 3911 3912 struct target_ucontext { 3913 abi_ulong tuc_flags; 3914 abi_ulong tuc_link; 3915 target_stack_t tuc_stack; 3916 struct target_sigcontext tuc_mcontext; 3917 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3918 }; 3919 3920 struct target_rt_sigframe { 3921 abi_ulong pinfo; 3922 uint64_t puc; 3923 struct target_siginfo info; 3924 struct target_sigcontext sc; 3925 struct target_ucontext uc; 3926 unsigned char retcode[16]; /* trampoline code */ 3927 }; 3928 3929 /* This is the asm-generic/ucontext.h version */ 3930 #if 0 3931 static int restore_sigcontext(CPUOpenRISCState *regs, 3932 struct target_sigcontext *sc) 3933 { 3934 unsigned int err = 0; 3935 unsigned long old_usp; 3936 3937 /* Alwys make any pending restarted system call return -EINTR */ 3938 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3939 3940 /* restore the regs from &sc->regs (same as sc, since regs is first) 3941 * (sc is already checked for VERIFY_READ since the sigframe was 3942 * checked in sys_sigreturn previously) 3943 */ 3944 3945 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3946 goto badframe; 3947 } 3948 3949 /* make sure the U-flag is set so user-mode cannot fool us */ 3950 3951 regs->sr &= ~SR_SM; 3952 3953 /* restore the old USP as it was before we stacked the sc etc. 3954 * (we cannot just pop the sigcontext since we aligned the sp and 3955 * stuff after pushing it) 3956 */ 3957 3958 __get_user(old_usp, &sc->usp); 3959 phx_signal("old_usp 0x%lx", old_usp); 3960 3961 __PHX__ REALLY /* ??? */ 3962 wrusp(old_usp); 3963 regs->gpr[1] = old_usp; 3964 3965 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3966 * after this completes, but we don't use that mechanism. maybe we can 3967 * use it now ? 3968 */ 3969 3970 return err; 3971 3972 badframe: 3973 return 1; 3974 } 3975 #endif 3976 3977 /* Set up a signal frame. */ 3978 3979 static void setup_sigcontext(struct target_sigcontext *sc, 3980 CPUOpenRISCState *regs, 3981 unsigned long mask) 3982 { 3983 unsigned long usp = regs->gpr[1]; 3984 3985 /* copy the regs. they are first in sc so we can use sc directly */ 3986 3987 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3988 3989 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3990 the signal handler. The frametype will be restored to its previous 3991 value in restore_sigcontext. */ 3992 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3993 3994 /* then some other stuff */ 3995 __put_user(mask, &sc->oldmask); 3996 __put_user(usp, &sc->usp); 3997 } 3998 3999 static inline unsigned long align_sigframe(unsigned long sp) 4000 { 4001 return sp & ~3UL; 4002 } 4003 4004 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4005 CPUOpenRISCState *regs, 4006 size_t frame_size) 4007 { 4008 unsigned long sp = regs->gpr[1]; 4009 int onsigstack = on_sig_stack(sp); 4010 4011 /* redzone */ 4012 /* This is the X/Open sanctioned signal stack switching. */ 4013 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4014 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4015 } 4016 4017 sp = align_sigframe(sp - frame_size); 4018 4019 /* 4020 * If we are on the alternate signal stack and would overflow it, don't. 4021 * Return an always-bogus address instead so we will die with SIGSEGV. 4022 */ 4023 4024 if (onsigstack && !likely(on_sig_stack(sp))) { 4025 return -1L; 4026 } 4027 4028 return sp; 4029 } 4030 4031 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4032 target_siginfo_t *info, 4033 target_sigset_t *set, CPUOpenRISCState *env) 4034 { 4035 int err = 0; 4036 abi_ulong frame_addr; 4037 unsigned long return_ip; 4038 struct target_rt_sigframe *frame; 4039 abi_ulong info_addr, uc_addr; 4040 4041 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4042 trace_user_setup_rt_frame(env, frame_addr); 4043 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4044 goto give_sigsegv; 4045 } 4046 4047 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4048 __put_user(info_addr, &frame->pinfo); 4049 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4050 __put_user(uc_addr, &frame->puc); 4051 4052 if (ka->sa_flags & SA_SIGINFO) { 4053 tswap_siginfo(&frame->info, info); 4054 } 4055 4056 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4057 __put_user(0, &frame->uc.tuc_flags); 4058 __put_user(0, &frame->uc.tuc_link); 4059 __put_user(target_sigaltstack_used.ss_sp, 4060 &frame->uc.tuc_stack.ss_sp); 4061 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4062 __put_user(target_sigaltstack_used.ss_size, 4063 &frame->uc.tuc_stack.ss_size); 4064 setup_sigcontext(&frame->sc, env, set->sig[0]); 4065 4066 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4067 4068 /* trampoline - the desired return ip is the retcode itself */ 4069 return_ip = (unsigned long)&frame->retcode; 4070 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4071 __put_user(0xa960, (short *)(frame->retcode + 0)); 4072 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4073 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4074 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4075 4076 if (err) { 4077 goto give_sigsegv; 4078 } 4079 4080 /* TODO what is the current->exec_domain stuff and invmap ? */ 4081 4082 /* Set up registers for signal handler */ 4083 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4084 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4085 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4086 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4087 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4088 4089 /* actually move the usp to reflect the stacked frame */ 4090 env->gpr[1] = (unsigned long)frame; 4091 4092 return; 4093 4094 give_sigsegv: 4095 unlock_user_struct(frame, frame_addr, 1); 4096 force_sigsegv(sig); 4097 } 4098 4099 long do_sigreturn(CPUOpenRISCState *env) 4100 { 4101 trace_user_do_sigreturn(env, 0); 4102 fprintf(stderr, "do_sigreturn: not implemented\n"); 4103 return -TARGET_ENOSYS; 4104 } 4105 4106 long do_rt_sigreturn(CPUOpenRISCState *env) 4107 { 4108 trace_user_do_rt_sigreturn(env, 0); 4109 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4110 return -TARGET_ENOSYS; 4111 } 4112 /* TARGET_OPENRISC */ 4113 4114 #elif defined(TARGET_S390X) 4115 4116 #define __NUM_GPRS 16 4117 #define __NUM_FPRS 16 4118 #define __NUM_ACRS 16 4119 4120 #define S390_SYSCALL_SIZE 2 4121 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4122 4123 #define _SIGCONTEXT_NSIG 64 4124 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4125 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4126 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4127 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4128 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4129 4130 typedef struct { 4131 target_psw_t psw; 4132 target_ulong gprs[__NUM_GPRS]; 4133 unsigned int acrs[__NUM_ACRS]; 4134 } target_s390_regs_common; 4135 4136 typedef struct { 4137 unsigned int fpc; 4138 double fprs[__NUM_FPRS]; 4139 } target_s390_fp_regs; 4140 4141 typedef struct { 4142 target_s390_regs_common regs; 4143 target_s390_fp_regs fpregs; 4144 } target_sigregs; 4145 4146 struct target_sigcontext { 4147 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4148 target_sigregs *sregs; 4149 }; 4150 4151 typedef struct { 4152 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4153 struct target_sigcontext sc; 4154 target_sigregs sregs; 4155 int signo; 4156 uint8_t retcode[S390_SYSCALL_SIZE]; 4157 } sigframe; 4158 4159 struct target_ucontext { 4160 target_ulong tuc_flags; 4161 struct target_ucontext *tuc_link; 4162 target_stack_t tuc_stack; 4163 target_sigregs tuc_mcontext; 4164 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4165 }; 4166 4167 typedef struct { 4168 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4169 uint8_t retcode[S390_SYSCALL_SIZE]; 4170 struct target_siginfo info; 4171 struct target_ucontext uc; 4172 } rt_sigframe; 4173 4174 static inline abi_ulong 4175 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4176 { 4177 abi_ulong sp; 4178 4179 /* Default to using normal stack */ 4180 sp = env->regs[15]; 4181 4182 /* This is the X/Open sanctioned signal stack switching. */ 4183 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4184 if (!sas_ss_flags(sp)) { 4185 sp = target_sigaltstack_used.ss_sp + 4186 target_sigaltstack_used.ss_size; 4187 } 4188 } 4189 4190 /* This is the legacy signal stack switching. */ 4191 else if (/* FIXME !user_mode(regs) */ 0 && 4192 !(ka->sa_flags & TARGET_SA_RESTORER) && 4193 ka->sa_restorer) { 4194 sp = (abi_ulong) ka->sa_restorer; 4195 } 4196 4197 return (sp - frame_size) & -8ul; 4198 } 4199 4200 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4201 { 4202 int i; 4203 //save_access_regs(current->thread.acrs); FIXME 4204 4205 /* Copy a 'clean' PSW mask to the user to avoid leaking 4206 information about whether PER is currently on. */ 4207 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4208 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4209 for (i = 0; i < 16; i++) { 4210 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4211 } 4212 for (i = 0; i < 16; i++) { 4213 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4214 } 4215 /* 4216 * We have to store the fp registers to current->thread.fp_regs 4217 * to merge them with the emulated registers. 4218 */ 4219 //save_fp_regs(¤t->thread.fp_regs); FIXME 4220 for (i = 0; i < 16; i++) { 4221 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4222 } 4223 } 4224 4225 static void setup_frame(int sig, struct target_sigaction *ka, 4226 target_sigset_t *set, CPUS390XState *env) 4227 { 4228 sigframe *frame; 4229 abi_ulong frame_addr; 4230 4231 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4232 trace_user_setup_frame(env, frame_addr); 4233 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4234 goto give_sigsegv; 4235 } 4236 4237 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4238 4239 save_sigregs(env, &frame->sregs); 4240 4241 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4242 (abi_ulong *)&frame->sc.sregs); 4243 4244 /* Set up to return from userspace. If provided, use a stub 4245 already in userspace. */ 4246 if (ka->sa_flags & TARGET_SA_RESTORER) { 4247 env->regs[14] = (unsigned long) 4248 ka->sa_restorer | PSW_ADDR_AMODE; 4249 } else { 4250 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4251 | PSW_ADDR_AMODE; 4252 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4253 (uint16_t *)(frame->retcode)); 4254 } 4255 4256 /* Set up backchain. */ 4257 __put_user(env->regs[15], (abi_ulong *) frame); 4258 4259 /* Set up registers for signal handler */ 4260 env->regs[15] = frame_addr; 4261 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4262 4263 env->regs[2] = sig; //map_signal(sig); 4264 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4265 4266 /* We forgot to include these in the sigcontext. 4267 To avoid breaking binary compatibility, they are passed as args. */ 4268 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4269 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4270 4271 /* Place signal number on stack to allow backtrace from handler. */ 4272 __put_user(env->regs[2], &frame->signo); 4273 unlock_user_struct(frame, frame_addr, 1); 4274 return; 4275 4276 give_sigsegv: 4277 force_sigsegv(sig); 4278 } 4279 4280 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4281 target_siginfo_t *info, 4282 target_sigset_t *set, CPUS390XState *env) 4283 { 4284 int i; 4285 rt_sigframe *frame; 4286 abi_ulong frame_addr; 4287 4288 frame_addr = get_sigframe(ka, env, sizeof *frame); 4289 trace_user_setup_rt_frame(env, frame_addr); 4290 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4291 goto give_sigsegv; 4292 } 4293 4294 tswap_siginfo(&frame->info, info); 4295 4296 /* Create the ucontext. */ 4297 __put_user(0, &frame->uc.tuc_flags); 4298 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4299 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4300 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4301 &frame->uc.tuc_stack.ss_flags); 4302 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4303 save_sigregs(env, &frame->uc.tuc_mcontext); 4304 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4305 __put_user((abi_ulong)set->sig[i], 4306 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4307 } 4308 4309 /* Set up to return from userspace. If provided, use a stub 4310 already in userspace. */ 4311 if (ka->sa_flags & TARGET_SA_RESTORER) { 4312 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4313 } else { 4314 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4315 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4316 (uint16_t *)(frame->retcode)); 4317 } 4318 4319 /* Set up backchain. */ 4320 __put_user(env->regs[15], (abi_ulong *) frame); 4321 4322 /* Set up registers for signal handler */ 4323 env->regs[15] = frame_addr; 4324 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4325 4326 env->regs[2] = sig; //map_signal(sig); 4327 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4328 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4329 return; 4330 4331 give_sigsegv: 4332 force_sigsegv(sig); 4333 } 4334 4335 static int 4336 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4337 { 4338 int err = 0; 4339 int i; 4340 4341 for (i = 0; i < 16; i++) { 4342 __get_user(env->regs[i], &sc->regs.gprs[i]); 4343 } 4344 4345 __get_user(env->psw.mask, &sc->regs.psw.mask); 4346 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4347 (unsigned long long)env->psw.addr); 4348 __get_user(env->psw.addr, &sc->regs.psw.addr); 4349 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4350 4351 for (i = 0; i < 16; i++) { 4352 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4353 } 4354 for (i = 0; i < 16; i++) { 4355 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4356 } 4357 4358 return err; 4359 } 4360 4361 long do_sigreturn(CPUS390XState *env) 4362 { 4363 sigframe *frame; 4364 abi_ulong frame_addr = env->regs[15]; 4365 target_sigset_t target_set; 4366 sigset_t set; 4367 4368 trace_user_do_sigreturn(env, frame_addr); 4369 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4370 goto badframe; 4371 } 4372 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4373 4374 target_to_host_sigset_internal(&set, &target_set); 4375 set_sigmask(&set); /* ~_BLOCKABLE? */ 4376 4377 if (restore_sigregs(env, &frame->sregs)) { 4378 goto badframe; 4379 } 4380 4381 unlock_user_struct(frame, frame_addr, 0); 4382 return -TARGET_QEMU_ESIGRETURN; 4383 4384 badframe: 4385 force_sig(TARGET_SIGSEGV); 4386 return 0; 4387 } 4388 4389 long do_rt_sigreturn(CPUS390XState *env) 4390 { 4391 rt_sigframe *frame; 4392 abi_ulong frame_addr = env->regs[15]; 4393 sigset_t set; 4394 4395 trace_user_do_rt_sigreturn(env, frame_addr); 4396 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4397 goto badframe; 4398 } 4399 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4400 4401 set_sigmask(&set); /* ~_BLOCKABLE? */ 4402 4403 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4404 goto badframe; 4405 } 4406 4407 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4408 get_sp_from_cpustate(env)) == -EFAULT) { 4409 goto badframe; 4410 } 4411 unlock_user_struct(frame, frame_addr, 0); 4412 return -TARGET_QEMU_ESIGRETURN; 4413 4414 badframe: 4415 unlock_user_struct(frame, frame_addr, 0); 4416 force_sig(TARGET_SIGSEGV); 4417 return 0; 4418 } 4419 4420 #elif defined(TARGET_PPC) 4421 4422 /* Size of dummy stack frame allocated when calling signal handler. 4423 See arch/powerpc/include/asm/ptrace.h. */ 4424 #if defined(TARGET_PPC64) 4425 #define SIGNAL_FRAMESIZE 128 4426 #else 4427 #define SIGNAL_FRAMESIZE 64 4428 #endif 4429 4430 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4431 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4432 struct target_mcontext { 4433 target_ulong mc_gregs[48]; 4434 /* Includes fpscr. */ 4435 uint64_t mc_fregs[33]; 4436 target_ulong mc_pad[2]; 4437 /* We need to handle Altivec and SPE at the same time, which no 4438 kernel needs to do. Fortunately, the kernel defines this bit to 4439 be Altivec-register-large all the time, rather than trying to 4440 twiddle it based on the specific platform. */ 4441 union { 4442 /* SPE vector registers. One extra for SPEFSCR. */ 4443 uint32_t spe[33]; 4444 /* Altivec vector registers. The packing of VSCR and VRSAVE 4445 varies depending on whether we're PPC64 or not: PPC64 splits 4446 them apart; PPC32 stuffs them together. */ 4447 #if defined(TARGET_PPC64) 4448 #define QEMU_NVRREG 34 4449 #else 4450 #define QEMU_NVRREG 33 4451 #endif 4452 ppc_avr_t altivec[QEMU_NVRREG]; 4453 #undef QEMU_NVRREG 4454 } mc_vregs __attribute__((__aligned__(16))); 4455 }; 4456 4457 /* See arch/powerpc/include/asm/sigcontext.h. */ 4458 struct target_sigcontext { 4459 target_ulong _unused[4]; 4460 int32_t signal; 4461 #if defined(TARGET_PPC64) 4462 int32_t pad0; 4463 #endif 4464 target_ulong handler; 4465 target_ulong oldmask; 4466 target_ulong regs; /* struct pt_regs __user * */ 4467 #if defined(TARGET_PPC64) 4468 struct target_mcontext mcontext; 4469 #endif 4470 }; 4471 4472 /* Indices for target_mcontext.mc_gregs, below. 4473 See arch/powerpc/include/asm/ptrace.h for details. */ 4474 enum { 4475 TARGET_PT_R0 = 0, 4476 TARGET_PT_R1 = 1, 4477 TARGET_PT_R2 = 2, 4478 TARGET_PT_R3 = 3, 4479 TARGET_PT_R4 = 4, 4480 TARGET_PT_R5 = 5, 4481 TARGET_PT_R6 = 6, 4482 TARGET_PT_R7 = 7, 4483 TARGET_PT_R8 = 8, 4484 TARGET_PT_R9 = 9, 4485 TARGET_PT_R10 = 10, 4486 TARGET_PT_R11 = 11, 4487 TARGET_PT_R12 = 12, 4488 TARGET_PT_R13 = 13, 4489 TARGET_PT_R14 = 14, 4490 TARGET_PT_R15 = 15, 4491 TARGET_PT_R16 = 16, 4492 TARGET_PT_R17 = 17, 4493 TARGET_PT_R18 = 18, 4494 TARGET_PT_R19 = 19, 4495 TARGET_PT_R20 = 20, 4496 TARGET_PT_R21 = 21, 4497 TARGET_PT_R22 = 22, 4498 TARGET_PT_R23 = 23, 4499 TARGET_PT_R24 = 24, 4500 TARGET_PT_R25 = 25, 4501 TARGET_PT_R26 = 26, 4502 TARGET_PT_R27 = 27, 4503 TARGET_PT_R28 = 28, 4504 TARGET_PT_R29 = 29, 4505 TARGET_PT_R30 = 30, 4506 TARGET_PT_R31 = 31, 4507 TARGET_PT_NIP = 32, 4508 TARGET_PT_MSR = 33, 4509 TARGET_PT_ORIG_R3 = 34, 4510 TARGET_PT_CTR = 35, 4511 TARGET_PT_LNK = 36, 4512 TARGET_PT_XER = 37, 4513 TARGET_PT_CCR = 38, 4514 /* Yes, there are two registers with #39. One is 64-bit only. */ 4515 TARGET_PT_MQ = 39, 4516 TARGET_PT_SOFTE = 39, 4517 TARGET_PT_TRAP = 40, 4518 TARGET_PT_DAR = 41, 4519 TARGET_PT_DSISR = 42, 4520 TARGET_PT_RESULT = 43, 4521 TARGET_PT_REGS_COUNT = 44 4522 }; 4523 4524 4525 struct target_ucontext { 4526 target_ulong tuc_flags; 4527 target_ulong tuc_link; /* struct ucontext __user * */ 4528 struct target_sigaltstack tuc_stack; 4529 #if !defined(TARGET_PPC64) 4530 int32_t tuc_pad[7]; 4531 target_ulong tuc_regs; /* struct mcontext __user * 4532 points to uc_mcontext field */ 4533 #endif 4534 target_sigset_t tuc_sigmask; 4535 #if defined(TARGET_PPC64) 4536 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4537 struct target_sigcontext tuc_sigcontext; 4538 #else 4539 int32_t tuc_maskext[30]; 4540 int32_t tuc_pad2[3]; 4541 struct target_mcontext tuc_mcontext; 4542 #endif 4543 }; 4544 4545 /* See arch/powerpc/kernel/signal_32.c. */ 4546 struct target_sigframe { 4547 struct target_sigcontext sctx; 4548 struct target_mcontext mctx; 4549 int32_t abigap[56]; 4550 }; 4551 4552 #if defined(TARGET_PPC64) 4553 4554 #define TARGET_TRAMP_SIZE 6 4555 4556 struct target_rt_sigframe { 4557 /* sys_rt_sigreturn requires the ucontext be the first field */ 4558 struct target_ucontext uc; 4559 target_ulong _unused[2]; 4560 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4561 target_ulong pinfo; /* struct siginfo __user * */ 4562 target_ulong puc; /* void __user * */ 4563 struct target_siginfo info; 4564 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4565 char abigap[288]; 4566 } __attribute__((aligned(16))); 4567 4568 #else 4569 4570 struct target_rt_sigframe { 4571 struct target_siginfo info; 4572 struct target_ucontext uc; 4573 int32_t abigap[56]; 4574 }; 4575 4576 #endif 4577 4578 #if defined(TARGET_PPC64) 4579 4580 struct target_func_ptr { 4581 target_ulong entry; 4582 target_ulong toc; 4583 }; 4584 4585 #endif 4586 4587 /* We use the mc_pad field for the signal return trampoline. */ 4588 #define tramp mc_pad 4589 4590 /* See arch/powerpc/kernel/signal.c. */ 4591 static target_ulong get_sigframe(struct target_sigaction *ka, 4592 CPUPPCState *env, 4593 int frame_size) 4594 { 4595 target_ulong oldsp; 4596 4597 oldsp = env->gpr[1]; 4598 4599 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4600 (sas_ss_flags(oldsp) == 0)) { 4601 oldsp = (target_sigaltstack_used.ss_sp 4602 + target_sigaltstack_used.ss_size); 4603 } 4604 4605 return (oldsp - frame_size) & ~0xFUL; 4606 } 4607 4608 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4609 { 4610 target_ulong msr = env->msr; 4611 int i; 4612 target_ulong ccr = 0; 4613 4614 /* In general, the kernel attempts to be intelligent about what it 4615 needs to save for Altivec/FP/SPE registers. We don't care that 4616 much, so we just go ahead and save everything. */ 4617 4618 /* Save general registers. */ 4619 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4620 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4621 } 4622 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4623 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4624 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4625 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4626 4627 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4628 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4629 } 4630 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4631 4632 /* Save Altivec registers if necessary. */ 4633 if (env->insns_flags & PPC_ALTIVEC) { 4634 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4635 ppc_avr_t *avr = &env->avr[i]; 4636 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4637 4638 __put_user(avr->u64[0], &vreg->u64[0]); 4639 __put_user(avr->u64[1], &vreg->u64[1]); 4640 } 4641 /* Set MSR_VR in the saved MSR value to indicate that 4642 frame->mc_vregs contains valid data. */ 4643 msr |= MSR_VR; 4644 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4645 &frame->mc_vregs.altivec[32].u32[3]); 4646 } 4647 4648 /* Save floating point registers. */ 4649 if (env->insns_flags & PPC_FLOAT) { 4650 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4651 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4652 } 4653 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4654 } 4655 4656 /* Save SPE registers. The kernel only saves the high half. */ 4657 if (env->insns_flags & PPC_SPE) { 4658 #if defined(TARGET_PPC64) 4659 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4660 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4661 } 4662 #else 4663 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4664 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4665 } 4666 #endif 4667 /* Set MSR_SPE in the saved MSR value to indicate that 4668 frame->mc_vregs contains valid data. */ 4669 msr |= MSR_SPE; 4670 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4671 } 4672 4673 /* Store MSR. */ 4674 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4675 } 4676 4677 static void encode_trampoline(int sigret, uint32_t *tramp) 4678 { 4679 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4680 if (sigret) { 4681 __put_user(0x38000000 | sigret, &tramp[0]); 4682 __put_user(0x44000002, &tramp[1]); 4683 } 4684 } 4685 4686 static void restore_user_regs(CPUPPCState *env, 4687 struct target_mcontext *frame, int sig) 4688 { 4689 target_ulong save_r2 = 0; 4690 target_ulong msr; 4691 target_ulong ccr; 4692 4693 int i; 4694 4695 if (!sig) { 4696 save_r2 = env->gpr[2]; 4697 } 4698 4699 /* Restore general registers. */ 4700 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4701 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4702 } 4703 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4704 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4705 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4706 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4707 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4708 4709 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4710 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4711 } 4712 4713 if (!sig) { 4714 env->gpr[2] = save_r2; 4715 } 4716 /* Restore MSR. */ 4717 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4718 4719 /* If doing signal return, restore the previous little-endian mode. */ 4720 if (sig) 4721 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4722 4723 /* Restore Altivec registers if necessary. */ 4724 if (env->insns_flags & PPC_ALTIVEC) { 4725 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4726 ppc_avr_t *avr = &env->avr[i]; 4727 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4728 4729 __get_user(avr->u64[0], &vreg->u64[0]); 4730 __get_user(avr->u64[1], &vreg->u64[1]); 4731 } 4732 /* Set MSR_VEC in the saved MSR value to indicate that 4733 frame->mc_vregs contains valid data. */ 4734 __get_user(env->spr[SPR_VRSAVE], 4735 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4736 } 4737 4738 /* Restore floating point registers. */ 4739 if (env->insns_flags & PPC_FLOAT) { 4740 uint64_t fpscr; 4741 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4742 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4743 } 4744 __get_user(fpscr, &frame->mc_fregs[32]); 4745 env->fpscr = (uint32_t) fpscr; 4746 } 4747 4748 /* Save SPE registers. The kernel only saves the high half. */ 4749 if (env->insns_flags & PPC_SPE) { 4750 #if defined(TARGET_PPC64) 4751 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4752 uint32_t hi; 4753 4754 __get_user(hi, &frame->mc_vregs.spe[i]); 4755 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4756 } 4757 #else 4758 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4759 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4760 } 4761 #endif 4762 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4763 } 4764 } 4765 4766 static void setup_frame(int sig, struct target_sigaction *ka, 4767 target_sigset_t *set, CPUPPCState *env) 4768 { 4769 struct target_sigframe *frame; 4770 struct target_sigcontext *sc; 4771 target_ulong frame_addr, newsp; 4772 int err = 0; 4773 #if defined(TARGET_PPC64) 4774 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4775 #endif 4776 4777 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4778 trace_user_setup_frame(env, frame_addr); 4779 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4780 goto sigsegv; 4781 sc = &frame->sctx; 4782 4783 __put_user(ka->_sa_handler, &sc->handler); 4784 __put_user(set->sig[0], &sc->oldmask); 4785 #if TARGET_ABI_BITS == 64 4786 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4787 #else 4788 __put_user(set->sig[1], &sc->_unused[3]); 4789 #endif 4790 __put_user(h2g(&frame->mctx), &sc->regs); 4791 __put_user(sig, &sc->signal); 4792 4793 /* Save user regs. */ 4794 save_user_regs(env, &frame->mctx); 4795 4796 /* Construct the trampoline code on the stack. */ 4797 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4798 4799 /* The kernel checks for the presence of a VDSO here. We don't 4800 emulate a vdso, so use a sigreturn system call. */ 4801 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4802 4803 /* Turn off all fp exceptions. */ 4804 env->fpscr = 0; 4805 4806 /* Create a stack frame for the caller of the handler. */ 4807 newsp = frame_addr - SIGNAL_FRAMESIZE; 4808 err |= put_user(env->gpr[1], newsp, target_ulong); 4809 4810 if (err) 4811 goto sigsegv; 4812 4813 /* Set up registers for signal handler. */ 4814 env->gpr[1] = newsp; 4815 env->gpr[3] = sig; 4816 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4817 4818 #if defined(TARGET_PPC64) 4819 if (get_ppc64_abi(image) < 2) { 4820 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4821 struct target_func_ptr *handler = 4822 (struct target_func_ptr *)g2h(ka->_sa_handler); 4823 env->nip = tswapl(handler->entry); 4824 env->gpr[2] = tswapl(handler->toc); 4825 } else { 4826 /* ELFv2 PPC64 function pointers are entry points, but R12 4827 * must also be set */ 4828 env->nip = tswapl((target_ulong) ka->_sa_handler); 4829 env->gpr[12] = env->nip; 4830 } 4831 #else 4832 env->nip = (target_ulong) ka->_sa_handler; 4833 #endif 4834 4835 /* Signal handlers are entered in big-endian mode. */ 4836 env->msr &= ~(1ull << MSR_LE); 4837 4838 unlock_user_struct(frame, frame_addr, 1); 4839 return; 4840 4841 sigsegv: 4842 unlock_user_struct(frame, frame_addr, 1); 4843 force_sigsegv(sig); 4844 } 4845 4846 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4847 target_siginfo_t *info, 4848 target_sigset_t *set, CPUPPCState *env) 4849 { 4850 struct target_rt_sigframe *rt_sf; 4851 uint32_t *trampptr = 0; 4852 struct target_mcontext *mctx = 0; 4853 target_ulong rt_sf_addr, newsp = 0; 4854 int i, err = 0; 4855 #if defined(TARGET_PPC64) 4856 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4857 #endif 4858 4859 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4860 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4861 goto sigsegv; 4862 4863 tswap_siginfo(&rt_sf->info, info); 4864 4865 __put_user(0, &rt_sf->uc.tuc_flags); 4866 __put_user(0, &rt_sf->uc.tuc_link); 4867 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4868 &rt_sf->uc.tuc_stack.ss_sp); 4869 __put_user(sas_ss_flags(env->gpr[1]), 4870 &rt_sf->uc.tuc_stack.ss_flags); 4871 __put_user(target_sigaltstack_used.ss_size, 4872 &rt_sf->uc.tuc_stack.ss_size); 4873 #if !defined(TARGET_PPC64) 4874 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4875 &rt_sf->uc.tuc_regs); 4876 #endif 4877 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4878 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4879 } 4880 4881 #if defined(TARGET_PPC64) 4882 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4883 trampptr = &rt_sf->trampoline[0]; 4884 #else 4885 mctx = &rt_sf->uc.tuc_mcontext; 4886 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4887 #endif 4888 4889 save_user_regs(env, mctx); 4890 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4891 4892 /* The kernel checks for the presence of a VDSO here. We don't 4893 emulate a vdso, so use a sigreturn system call. */ 4894 env->lr = (target_ulong) h2g(trampptr); 4895 4896 /* Turn off all fp exceptions. */ 4897 env->fpscr = 0; 4898 4899 /* Create a stack frame for the caller of the handler. */ 4900 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4901 err |= put_user(env->gpr[1], newsp, target_ulong); 4902 4903 if (err) 4904 goto sigsegv; 4905 4906 /* Set up registers for signal handler. */ 4907 env->gpr[1] = newsp; 4908 env->gpr[3] = (target_ulong) sig; 4909 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4910 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4911 env->gpr[6] = (target_ulong) h2g(rt_sf); 4912 4913 #if defined(TARGET_PPC64) 4914 if (get_ppc64_abi(image) < 2) { 4915 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4916 struct target_func_ptr *handler = 4917 (struct target_func_ptr *)g2h(ka->_sa_handler); 4918 env->nip = tswapl(handler->entry); 4919 env->gpr[2] = tswapl(handler->toc); 4920 } else { 4921 /* ELFv2 PPC64 function pointers are entry points, but R12 4922 * must also be set */ 4923 env->nip = tswapl((target_ulong) ka->_sa_handler); 4924 env->gpr[12] = env->nip; 4925 } 4926 #else 4927 env->nip = (target_ulong) ka->_sa_handler; 4928 #endif 4929 4930 /* Signal handlers are entered in big-endian mode. */ 4931 env->msr &= ~(1ull << MSR_LE); 4932 4933 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4934 return; 4935 4936 sigsegv: 4937 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4938 force_sigsegv(sig); 4939 4940 } 4941 4942 long do_sigreturn(CPUPPCState *env) 4943 { 4944 struct target_sigcontext *sc = NULL; 4945 struct target_mcontext *sr = NULL; 4946 target_ulong sr_addr = 0, sc_addr; 4947 sigset_t blocked; 4948 target_sigset_t set; 4949 4950 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4951 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4952 goto sigsegv; 4953 4954 #if defined(TARGET_PPC64) 4955 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4956 #else 4957 __get_user(set.sig[0], &sc->oldmask); 4958 __get_user(set.sig[1], &sc->_unused[3]); 4959 #endif 4960 target_to_host_sigset_internal(&blocked, &set); 4961 set_sigmask(&blocked); 4962 4963 __get_user(sr_addr, &sc->regs); 4964 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4965 goto sigsegv; 4966 restore_user_regs(env, sr, 1); 4967 4968 unlock_user_struct(sr, sr_addr, 1); 4969 unlock_user_struct(sc, sc_addr, 1); 4970 return -TARGET_QEMU_ESIGRETURN; 4971 4972 sigsegv: 4973 unlock_user_struct(sr, sr_addr, 1); 4974 unlock_user_struct(sc, sc_addr, 1); 4975 force_sig(TARGET_SIGSEGV); 4976 return 0; 4977 } 4978 4979 /* See arch/powerpc/kernel/signal_32.c. */ 4980 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4981 { 4982 struct target_mcontext *mcp; 4983 target_ulong mcp_addr; 4984 sigset_t blocked; 4985 target_sigset_t set; 4986 4987 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4988 sizeof (set))) 4989 return 1; 4990 4991 #if defined(TARGET_PPC64) 4992 mcp_addr = h2g(ucp) + 4993 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 4994 #else 4995 __get_user(mcp_addr, &ucp->tuc_regs); 4996 #endif 4997 4998 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 4999 return 1; 5000 5001 target_to_host_sigset_internal(&blocked, &set); 5002 set_sigmask(&blocked); 5003 restore_user_regs(env, mcp, sig); 5004 5005 unlock_user_struct(mcp, mcp_addr, 1); 5006 return 0; 5007 } 5008 5009 long do_rt_sigreturn(CPUPPCState *env) 5010 { 5011 struct target_rt_sigframe *rt_sf = NULL; 5012 target_ulong rt_sf_addr; 5013 5014 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5015 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5016 goto sigsegv; 5017 5018 if (do_setcontext(&rt_sf->uc, env, 1)) 5019 goto sigsegv; 5020 5021 do_sigaltstack(rt_sf_addr 5022 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5023 0, env->gpr[1]); 5024 5025 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5026 return -TARGET_QEMU_ESIGRETURN; 5027 5028 sigsegv: 5029 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5030 force_sig(TARGET_SIGSEGV); 5031 return 0; 5032 } 5033 5034 #elif defined(TARGET_M68K) 5035 5036 struct target_sigcontext { 5037 abi_ulong sc_mask; 5038 abi_ulong sc_usp; 5039 abi_ulong sc_d0; 5040 abi_ulong sc_d1; 5041 abi_ulong sc_a0; 5042 abi_ulong sc_a1; 5043 unsigned short sc_sr; 5044 abi_ulong sc_pc; 5045 }; 5046 5047 struct target_sigframe 5048 { 5049 abi_ulong pretcode; 5050 int sig; 5051 int code; 5052 abi_ulong psc; 5053 char retcode[8]; 5054 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5055 struct target_sigcontext sc; 5056 }; 5057 5058 typedef int target_greg_t; 5059 #define TARGET_NGREG 18 5060 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5061 5062 typedef struct target_fpregset { 5063 int f_fpcntl[3]; 5064 int f_fpregs[8*3]; 5065 } target_fpregset_t; 5066 5067 struct target_mcontext { 5068 int version; 5069 target_gregset_t gregs; 5070 target_fpregset_t fpregs; 5071 }; 5072 5073 #define TARGET_MCONTEXT_VERSION 2 5074 5075 struct target_ucontext { 5076 abi_ulong tuc_flags; 5077 abi_ulong tuc_link; 5078 target_stack_t tuc_stack; 5079 struct target_mcontext tuc_mcontext; 5080 abi_long tuc_filler[80]; 5081 target_sigset_t tuc_sigmask; 5082 }; 5083 5084 struct target_rt_sigframe 5085 { 5086 abi_ulong pretcode; 5087 int sig; 5088 abi_ulong pinfo; 5089 abi_ulong puc; 5090 char retcode[8]; 5091 struct target_siginfo info; 5092 struct target_ucontext uc; 5093 }; 5094 5095 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5096 abi_ulong mask) 5097 { 5098 __put_user(mask, &sc->sc_mask); 5099 __put_user(env->aregs[7], &sc->sc_usp); 5100 __put_user(env->dregs[0], &sc->sc_d0); 5101 __put_user(env->dregs[1], &sc->sc_d1); 5102 __put_user(env->aregs[0], &sc->sc_a0); 5103 __put_user(env->aregs[1], &sc->sc_a1); 5104 __put_user(env->sr, &sc->sc_sr); 5105 __put_user(env->pc, &sc->sc_pc); 5106 } 5107 5108 static void 5109 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5110 { 5111 int temp; 5112 5113 __get_user(env->aregs[7], &sc->sc_usp); 5114 __get_user(env->dregs[0], &sc->sc_d0); 5115 __get_user(env->dregs[1], &sc->sc_d1); 5116 __get_user(env->aregs[0], &sc->sc_a0); 5117 __get_user(env->aregs[1], &sc->sc_a1); 5118 __get_user(env->pc, &sc->sc_pc); 5119 __get_user(temp, &sc->sc_sr); 5120 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5121 } 5122 5123 /* 5124 * Determine which stack to use.. 5125 */ 5126 static inline abi_ulong 5127 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5128 size_t frame_size) 5129 { 5130 unsigned long sp; 5131 5132 sp = regs->aregs[7]; 5133 5134 /* This is the X/Open sanctioned signal stack switching. */ 5135 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5136 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5137 } 5138 5139 return ((sp - frame_size) & -8UL); 5140 } 5141 5142 static void setup_frame(int sig, struct target_sigaction *ka, 5143 target_sigset_t *set, CPUM68KState *env) 5144 { 5145 struct target_sigframe *frame; 5146 abi_ulong frame_addr; 5147 abi_ulong retcode_addr; 5148 abi_ulong sc_addr; 5149 int i; 5150 5151 frame_addr = get_sigframe(ka, env, sizeof *frame); 5152 trace_user_setup_frame(env, frame_addr); 5153 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5154 goto give_sigsegv; 5155 } 5156 5157 __put_user(sig, &frame->sig); 5158 5159 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5160 __put_user(sc_addr, &frame->psc); 5161 5162 setup_sigcontext(&frame->sc, env, set->sig[0]); 5163 5164 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5165 __put_user(set->sig[i], &frame->extramask[i - 1]); 5166 } 5167 5168 /* Set up to return from userspace. */ 5169 5170 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5171 __put_user(retcode_addr, &frame->pretcode); 5172 5173 /* moveq #,d0; trap #0 */ 5174 5175 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5176 (uint32_t *)(frame->retcode)); 5177 5178 /* Set up to return from userspace */ 5179 5180 env->aregs[7] = frame_addr; 5181 env->pc = ka->_sa_handler; 5182 5183 unlock_user_struct(frame, frame_addr, 1); 5184 return; 5185 5186 give_sigsegv: 5187 force_sigsegv(sig); 5188 } 5189 5190 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5191 CPUM68KState *env) 5192 { 5193 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5194 5195 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5196 __put_user(env->dregs[0], &gregs[0]); 5197 __put_user(env->dregs[1], &gregs[1]); 5198 __put_user(env->dregs[2], &gregs[2]); 5199 __put_user(env->dregs[3], &gregs[3]); 5200 __put_user(env->dregs[4], &gregs[4]); 5201 __put_user(env->dregs[5], &gregs[5]); 5202 __put_user(env->dregs[6], &gregs[6]); 5203 __put_user(env->dregs[7], &gregs[7]); 5204 __put_user(env->aregs[0], &gregs[8]); 5205 __put_user(env->aregs[1], &gregs[9]); 5206 __put_user(env->aregs[2], &gregs[10]); 5207 __put_user(env->aregs[3], &gregs[11]); 5208 __put_user(env->aregs[4], &gregs[12]); 5209 __put_user(env->aregs[5], &gregs[13]); 5210 __put_user(env->aregs[6], &gregs[14]); 5211 __put_user(env->aregs[7], &gregs[15]); 5212 __put_user(env->pc, &gregs[16]); 5213 __put_user(env->sr, &gregs[17]); 5214 5215 return 0; 5216 } 5217 5218 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5219 struct target_ucontext *uc) 5220 { 5221 int temp; 5222 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5223 5224 __get_user(temp, &uc->tuc_mcontext.version); 5225 if (temp != TARGET_MCONTEXT_VERSION) 5226 goto badframe; 5227 5228 /* restore passed registers */ 5229 __get_user(env->dregs[0], &gregs[0]); 5230 __get_user(env->dregs[1], &gregs[1]); 5231 __get_user(env->dregs[2], &gregs[2]); 5232 __get_user(env->dregs[3], &gregs[3]); 5233 __get_user(env->dregs[4], &gregs[4]); 5234 __get_user(env->dregs[5], &gregs[5]); 5235 __get_user(env->dregs[6], &gregs[6]); 5236 __get_user(env->dregs[7], &gregs[7]); 5237 __get_user(env->aregs[0], &gregs[8]); 5238 __get_user(env->aregs[1], &gregs[9]); 5239 __get_user(env->aregs[2], &gregs[10]); 5240 __get_user(env->aregs[3], &gregs[11]); 5241 __get_user(env->aregs[4], &gregs[12]); 5242 __get_user(env->aregs[5], &gregs[13]); 5243 __get_user(env->aregs[6], &gregs[14]); 5244 __get_user(env->aregs[7], &gregs[15]); 5245 __get_user(env->pc, &gregs[16]); 5246 __get_user(temp, &gregs[17]); 5247 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5248 5249 return 0; 5250 5251 badframe: 5252 return 1; 5253 } 5254 5255 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5256 target_siginfo_t *info, 5257 target_sigset_t *set, CPUM68KState *env) 5258 { 5259 struct target_rt_sigframe *frame; 5260 abi_ulong frame_addr; 5261 abi_ulong retcode_addr; 5262 abi_ulong info_addr; 5263 abi_ulong uc_addr; 5264 int err = 0; 5265 int i; 5266 5267 frame_addr = get_sigframe(ka, env, sizeof *frame); 5268 trace_user_setup_rt_frame(env, frame_addr); 5269 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5270 goto give_sigsegv; 5271 } 5272 5273 __put_user(sig, &frame->sig); 5274 5275 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5276 __put_user(info_addr, &frame->pinfo); 5277 5278 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5279 __put_user(uc_addr, &frame->puc); 5280 5281 tswap_siginfo(&frame->info, info); 5282 5283 /* Create the ucontext */ 5284 5285 __put_user(0, &frame->uc.tuc_flags); 5286 __put_user(0, &frame->uc.tuc_link); 5287 __put_user(target_sigaltstack_used.ss_sp, 5288 &frame->uc.tuc_stack.ss_sp); 5289 __put_user(sas_ss_flags(env->aregs[7]), 5290 &frame->uc.tuc_stack.ss_flags); 5291 __put_user(target_sigaltstack_used.ss_size, 5292 &frame->uc.tuc_stack.ss_size); 5293 err |= target_rt_setup_ucontext(&frame->uc, env); 5294 5295 if (err) 5296 goto give_sigsegv; 5297 5298 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5299 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5300 } 5301 5302 /* Set up to return from userspace. */ 5303 5304 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5305 __put_user(retcode_addr, &frame->pretcode); 5306 5307 /* moveq #,d0; notb d0; trap #0 */ 5308 5309 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5310 (uint32_t *)(frame->retcode + 0)); 5311 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5312 5313 if (err) 5314 goto give_sigsegv; 5315 5316 /* Set up to return from userspace */ 5317 5318 env->aregs[7] = frame_addr; 5319 env->pc = ka->_sa_handler; 5320 5321 unlock_user_struct(frame, frame_addr, 1); 5322 return; 5323 5324 give_sigsegv: 5325 unlock_user_struct(frame, frame_addr, 1); 5326 force_sigsegv(sig); 5327 } 5328 5329 long do_sigreturn(CPUM68KState *env) 5330 { 5331 struct target_sigframe *frame; 5332 abi_ulong frame_addr = env->aregs[7] - 4; 5333 target_sigset_t target_set; 5334 sigset_t set; 5335 int i; 5336 5337 trace_user_do_sigreturn(env, frame_addr); 5338 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5339 goto badframe; 5340 5341 /* set blocked signals */ 5342 5343 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5344 5345 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5346 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5347 } 5348 5349 target_to_host_sigset_internal(&set, &target_set); 5350 set_sigmask(&set); 5351 5352 /* restore registers */ 5353 5354 restore_sigcontext(env, &frame->sc); 5355 5356 unlock_user_struct(frame, frame_addr, 0); 5357 return -TARGET_QEMU_ESIGRETURN; 5358 5359 badframe: 5360 force_sig(TARGET_SIGSEGV); 5361 return 0; 5362 } 5363 5364 long do_rt_sigreturn(CPUM68KState *env) 5365 { 5366 struct target_rt_sigframe *frame; 5367 abi_ulong frame_addr = env->aregs[7] - 4; 5368 target_sigset_t target_set; 5369 sigset_t set; 5370 5371 trace_user_do_rt_sigreturn(env, frame_addr); 5372 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5373 goto badframe; 5374 5375 target_to_host_sigset_internal(&set, &target_set); 5376 set_sigmask(&set); 5377 5378 /* restore registers */ 5379 5380 if (target_rt_restore_ucontext(env, &frame->uc)) 5381 goto badframe; 5382 5383 if (do_sigaltstack(frame_addr + 5384 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5385 0, get_sp_from_cpustate(env)) == -EFAULT) 5386 goto badframe; 5387 5388 unlock_user_struct(frame, frame_addr, 0); 5389 return -TARGET_QEMU_ESIGRETURN; 5390 5391 badframe: 5392 unlock_user_struct(frame, frame_addr, 0); 5393 force_sig(TARGET_SIGSEGV); 5394 return 0; 5395 } 5396 5397 #elif defined(TARGET_ALPHA) 5398 5399 struct target_sigcontext { 5400 abi_long sc_onstack; 5401 abi_long sc_mask; 5402 abi_long sc_pc; 5403 abi_long sc_ps; 5404 abi_long sc_regs[32]; 5405 abi_long sc_ownedfp; 5406 abi_long sc_fpregs[32]; 5407 abi_ulong sc_fpcr; 5408 abi_ulong sc_fp_control; 5409 abi_ulong sc_reserved1; 5410 abi_ulong sc_reserved2; 5411 abi_ulong sc_ssize; 5412 abi_ulong sc_sbase; 5413 abi_ulong sc_traparg_a0; 5414 abi_ulong sc_traparg_a1; 5415 abi_ulong sc_traparg_a2; 5416 abi_ulong sc_fp_trap_pc; 5417 abi_ulong sc_fp_trigger_sum; 5418 abi_ulong sc_fp_trigger_inst; 5419 }; 5420 5421 struct target_ucontext { 5422 abi_ulong tuc_flags; 5423 abi_ulong tuc_link; 5424 abi_ulong tuc_osf_sigmask; 5425 target_stack_t tuc_stack; 5426 struct target_sigcontext tuc_mcontext; 5427 target_sigset_t tuc_sigmask; 5428 }; 5429 5430 struct target_sigframe { 5431 struct target_sigcontext sc; 5432 unsigned int retcode[3]; 5433 }; 5434 5435 struct target_rt_sigframe { 5436 target_siginfo_t info; 5437 struct target_ucontext uc; 5438 unsigned int retcode[3]; 5439 }; 5440 5441 #define INSN_MOV_R30_R16 0x47fe0410 5442 #define INSN_LDI_R0 0x201f0000 5443 #define INSN_CALLSYS 0x00000083 5444 5445 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5446 abi_ulong frame_addr, target_sigset_t *set) 5447 { 5448 int i; 5449 5450 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5451 __put_user(set->sig[0], &sc->sc_mask); 5452 __put_user(env->pc, &sc->sc_pc); 5453 __put_user(8, &sc->sc_ps); 5454 5455 for (i = 0; i < 31; ++i) { 5456 __put_user(env->ir[i], &sc->sc_regs[i]); 5457 } 5458 __put_user(0, &sc->sc_regs[31]); 5459 5460 for (i = 0; i < 31; ++i) { 5461 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5462 } 5463 __put_user(0, &sc->sc_fpregs[31]); 5464 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5465 5466 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5467 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5468 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5469 } 5470 5471 static void restore_sigcontext(CPUAlphaState *env, 5472 struct target_sigcontext *sc) 5473 { 5474 uint64_t fpcr; 5475 int i; 5476 5477 __get_user(env->pc, &sc->sc_pc); 5478 5479 for (i = 0; i < 31; ++i) { 5480 __get_user(env->ir[i], &sc->sc_regs[i]); 5481 } 5482 for (i = 0; i < 31; ++i) { 5483 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5484 } 5485 5486 __get_user(fpcr, &sc->sc_fpcr); 5487 cpu_alpha_store_fpcr(env, fpcr); 5488 } 5489 5490 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5491 CPUAlphaState *env, 5492 unsigned long framesize) 5493 { 5494 abi_ulong sp = env->ir[IR_SP]; 5495 5496 /* This is the X/Open sanctioned signal stack switching. */ 5497 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5498 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5499 } 5500 return (sp - framesize) & -32; 5501 } 5502 5503 static void setup_frame(int sig, struct target_sigaction *ka, 5504 target_sigset_t *set, CPUAlphaState *env) 5505 { 5506 abi_ulong frame_addr, r26; 5507 struct target_sigframe *frame; 5508 int err = 0; 5509 5510 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5511 trace_user_setup_frame(env, frame_addr); 5512 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5513 goto give_sigsegv; 5514 } 5515 5516 setup_sigcontext(&frame->sc, env, frame_addr, set); 5517 5518 if (ka->sa_restorer) { 5519 r26 = ka->sa_restorer; 5520 } else { 5521 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5522 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5523 &frame->retcode[1]); 5524 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5525 /* imb() */ 5526 r26 = frame_addr; 5527 } 5528 5529 unlock_user_struct(frame, frame_addr, 1); 5530 5531 if (err) { 5532 give_sigsegv: 5533 force_sigsegv(sig); 5534 return; 5535 } 5536 5537 env->ir[IR_RA] = r26; 5538 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5539 env->ir[IR_A0] = sig; 5540 env->ir[IR_A1] = 0; 5541 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5542 env->ir[IR_SP] = frame_addr; 5543 } 5544 5545 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5546 target_siginfo_t *info, 5547 target_sigset_t *set, CPUAlphaState *env) 5548 { 5549 abi_ulong frame_addr, r26; 5550 struct target_rt_sigframe *frame; 5551 int i, err = 0; 5552 5553 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5554 trace_user_setup_rt_frame(env, frame_addr); 5555 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5556 goto give_sigsegv; 5557 } 5558 5559 tswap_siginfo(&frame->info, info); 5560 5561 __put_user(0, &frame->uc.tuc_flags); 5562 __put_user(0, &frame->uc.tuc_link); 5563 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5564 __put_user(target_sigaltstack_used.ss_sp, 5565 &frame->uc.tuc_stack.ss_sp); 5566 __put_user(sas_ss_flags(env->ir[IR_SP]), 5567 &frame->uc.tuc_stack.ss_flags); 5568 __put_user(target_sigaltstack_used.ss_size, 5569 &frame->uc.tuc_stack.ss_size); 5570 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5571 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5572 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5573 } 5574 5575 if (ka->sa_restorer) { 5576 r26 = ka->sa_restorer; 5577 } else { 5578 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5579 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5580 &frame->retcode[1]); 5581 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5582 /* imb(); */ 5583 r26 = frame_addr; 5584 } 5585 5586 if (err) { 5587 give_sigsegv: 5588 force_sigsegv(sig); 5589 return; 5590 } 5591 5592 env->ir[IR_RA] = r26; 5593 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5594 env->ir[IR_A0] = sig; 5595 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5596 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5597 env->ir[IR_SP] = frame_addr; 5598 } 5599 5600 long do_sigreturn(CPUAlphaState *env) 5601 { 5602 struct target_sigcontext *sc; 5603 abi_ulong sc_addr = env->ir[IR_A0]; 5604 target_sigset_t target_set; 5605 sigset_t set; 5606 5607 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5608 goto badframe; 5609 } 5610 5611 target_sigemptyset(&target_set); 5612 __get_user(target_set.sig[0], &sc->sc_mask); 5613 5614 target_to_host_sigset_internal(&set, &target_set); 5615 set_sigmask(&set); 5616 5617 restore_sigcontext(env, sc); 5618 unlock_user_struct(sc, sc_addr, 0); 5619 return -TARGET_QEMU_ESIGRETURN; 5620 5621 badframe: 5622 force_sig(TARGET_SIGSEGV); 5623 } 5624 5625 long do_rt_sigreturn(CPUAlphaState *env) 5626 { 5627 abi_ulong frame_addr = env->ir[IR_A0]; 5628 struct target_rt_sigframe *frame; 5629 sigset_t set; 5630 5631 trace_user_do_rt_sigreturn(env, frame_addr); 5632 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5633 goto badframe; 5634 } 5635 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5636 set_sigmask(&set); 5637 5638 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5639 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5640 uc.tuc_stack), 5641 0, env->ir[IR_SP]) == -EFAULT) { 5642 goto badframe; 5643 } 5644 5645 unlock_user_struct(frame, frame_addr, 0); 5646 return -TARGET_QEMU_ESIGRETURN; 5647 5648 5649 badframe: 5650 unlock_user_struct(frame, frame_addr, 0); 5651 force_sig(TARGET_SIGSEGV); 5652 } 5653 5654 #elif defined(TARGET_TILEGX) 5655 5656 struct target_sigcontext { 5657 union { 5658 /* General-purpose registers. */ 5659 abi_ulong gregs[56]; 5660 struct { 5661 abi_ulong __gregs[53]; 5662 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5663 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5664 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5665 }; 5666 }; 5667 abi_ulong pc; /* Program counter. */ 5668 abi_ulong ics; /* In Interrupt Critical Section? */ 5669 abi_ulong faultnum; /* Fault number. */ 5670 abi_ulong pad[5]; 5671 }; 5672 5673 struct target_ucontext { 5674 abi_ulong tuc_flags; 5675 abi_ulong tuc_link; 5676 target_stack_t tuc_stack; 5677 struct target_sigcontext tuc_mcontext; 5678 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5679 }; 5680 5681 struct target_rt_sigframe { 5682 unsigned char save_area[16]; /* caller save area */ 5683 struct target_siginfo info; 5684 struct target_ucontext uc; 5685 abi_ulong retcode[2]; 5686 }; 5687 5688 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5689 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5690 5691 5692 static void setup_sigcontext(struct target_sigcontext *sc, 5693 CPUArchState *env, int signo) 5694 { 5695 int i; 5696 5697 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5698 __put_user(env->regs[i], &sc->gregs[i]); 5699 } 5700 5701 __put_user(env->pc, &sc->pc); 5702 __put_user(0, &sc->ics); 5703 __put_user(signo, &sc->faultnum); 5704 } 5705 5706 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5707 { 5708 int i; 5709 5710 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5711 __get_user(env->regs[i], &sc->gregs[i]); 5712 } 5713 5714 __get_user(env->pc, &sc->pc); 5715 } 5716 5717 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5718 size_t frame_size) 5719 { 5720 unsigned long sp = env->regs[TILEGX_R_SP]; 5721 5722 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5723 return -1UL; 5724 } 5725 5726 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5727 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5728 } 5729 5730 sp -= frame_size; 5731 sp &= -16UL; 5732 return sp; 5733 } 5734 5735 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5736 target_siginfo_t *info, 5737 target_sigset_t *set, CPUArchState *env) 5738 { 5739 abi_ulong frame_addr; 5740 struct target_rt_sigframe *frame; 5741 unsigned long restorer; 5742 5743 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5744 trace_user_setup_rt_frame(env, frame_addr); 5745 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5746 goto give_sigsegv; 5747 } 5748 5749 /* Always write at least the signal number for the stack backtracer. */ 5750 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5751 /* At sigreturn time, restore the callee-save registers too. */ 5752 tswap_siginfo(&frame->info, info); 5753 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5754 } else { 5755 __put_user(info->si_signo, &frame->info.si_signo); 5756 } 5757 5758 /* Create the ucontext. */ 5759 __put_user(0, &frame->uc.tuc_flags); 5760 __put_user(0, &frame->uc.tuc_link); 5761 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5762 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5763 &frame->uc.tuc_stack.ss_flags); 5764 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5765 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5766 5767 if (ka->sa_flags & TARGET_SA_RESTORER) { 5768 restorer = (unsigned long) ka->sa_restorer; 5769 } else { 5770 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5771 __put_user(INSN_SWINT1, &frame->retcode[1]); 5772 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5773 } 5774 env->pc = (unsigned long) ka->_sa_handler; 5775 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5776 env->regs[TILEGX_R_LR] = restorer; 5777 env->regs[0] = (unsigned long) sig; 5778 env->regs[1] = (unsigned long) &frame->info; 5779 env->regs[2] = (unsigned long) &frame->uc; 5780 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5781 5782 unlock_user_struct(frame, frame_addr, 1); 5783 return; 5784 5785 give_sigsegv: 5786 force_sigsegv(sig); 5787 } 5788 5789 long do_rt_sigreturn(CPUTLGState *env) 5790 { 5791 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5792 struct target_rt_sigframe *frame; 5793 sigset_t set; 5794 5795 trace_user_do_rt_sigreturn(env, frame_addr); 5796 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5797 goto badframe; 5798 } 5799 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5800 set_sigmask(&set); 5801 5802 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5803 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5804 uc.tuc_stack), 5805 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5806 goto badframe; 5807 } 5808 5809 unlock_user_struct(frame, frame_addr, 0); 5810 return -TARGET_QEMU_ESIGRETURN; 5811 5812 5813 badframe: 5814 unlock_user_struct(frame, frame_addr, 0); 5815 force_sig(TARGET_SIGSEGV); 5816 } 5817 5818 #else 5819 5820 static void setup_frame(int sig, struct target_sigaction *ka, 5821 target_sigset_t *set, CPUArchState *env) 5822 { 5823 fprintf(stderr, "setup_frame: not implemented\n"); 5824 } 5825 5826 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5827 target_siginfo_t *info, 5828 target_sigset_t *set, CPUArchState *env) 5829 { 5830 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5831 } 5832 5833 long do_sigreturn(CPUArchState *env) 5834 { 5835 fprintf(stderr, "do_sigreturn: not implemented\n"); 5836 return -TARGET_ENOSYS; 5837 } 5838 5839 long do_rt_sigreturn(CPUArchState *env) 5840 { 5841 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5842 return -TARGET_ENOSYS; 5843 } 5844 5845 #endif 5846 5847 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 5848 struct emulated_sigtable *k) 5849 { 5850 CPUState *cpu = ENV_GET_CPU(cpu_env); 5851 abi_ulong handler; 5852 sigset_t set; 5853 target_sigset_t target_old_set; 5854 struct target_sigaction *sa; 5855 TaskState *ts = cpu->opaque; 5856 5857 trace_user_handle_signal(cpu_env, sig); 5858 /* dequeue signal */ 5859 k->pending = 0; 5860 5861 sig = gdb_handlesig(cpu, sig); 5862 if (!sig) { 5863 sa = NULL; 5864 handler = TARGET_SIG_IGN; 5865 } else { 5866 sa = &sigact_table[sig - 1]; 5867 handler = sa->_sa_handler; 5868 } 5869 5870 if (do_strace) { 5871 print_taken_signal(sig, &k->info); 5872 } 5873 5874 if (handler == TARGET_SIG_DFL) { 5875 /* default handler : ignore some signal. The other are job control or fatal */ 5876 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5877 kill(getpid(),SIGSTOP); 5878 } else if (sig != TARGET_SIGCHLD && 5879 sig != TARGET_SIGURG && 5880 sig != TARGET_SIGWINCH && 5881 sig != TARGET_SIGCONT) { 5882 force_sig(sig); 5883 } 5884 } else if (handler == TARGET_SIG_IGN) { 5885 /* ignore sig */ 5886 } else if (handler == TARGET_SIG_ERR) { 5887 force_sig(sig); 5888 } else { 5889 /* compute the blocked signals during the handler execution */ 5890 sigset_t *blocked_set; 5891 5892 target_to_host_sigset(&set, &sa->sa_mask); 5893 /* SA_NODEFER indicates that the current signal should not be 5894 blocked during the handler */ 5895 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5896 sigaddset(&set, target_to_host_signal(sig)); 5897 5898 /* save the previous blocked signal state to restore it at the 5899 end of the signal execution (see do_sigreturn) */ 5900 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5901 5902 /* block signals in the handler */ 5903 blocked_set = ts->in_sigsuspend ? 5904 &ts->sigsuspend_mask : &ts->signal_mask; 5905 sigorset(&ts->signal_mask, blocked_set, &set); 5906 ts->in_sigsuspend = 0; 5907 5908 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5909 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5910 { 5911 CPUX86State *env = cpu_env; 5912 if (env->eflags & VM_MASK) 5913 save_v86_state(env); 5914 } 5915 #endif 5916 /* prepare the stack frame of the virtual CPU */ 5917 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5918 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5919 /* These targets do not have traditional signals. */ 5920 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5921 #else 5922 if (sa->sa_flags & TARGET_SA_SIGINFO) 5923 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5924 else 5925 setup_frame(sig, sa, &target_old_set, cpu_env); 5926 #endif 5927 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5928 sa->_sa_handler = TARGET_SIG_DFL; 5929 } 5930 } 5931 } 5932 5933 void process_pending_signals(CPUArchState *cpu_env) 5934 { 5935 CPUState *cpu = ENV_GET_CPU(cpu_env); 5936 int sig; 5937 TaskState *ts = cpu->opaque; 5938 sigset_t set; 5939 sigset_t *blocked_set; 5940 5941 while (atomic_read(&ts->signal_pending)) { 5942 /* FIXME: This is not threadsafe. */ 5943 sigfillset(&set); 5944 sigprocmask(SIG_SETMASK, &set, 0); 5945 5946 restart_scan: 5947 sig = ts->sync_signal.pending; 5948 if (sig) { 5949 /* Synchronous signals are forced, 5950 * see force_sig_info() and callers in Linux 5951 * Note that not all of our queue_signal() calls in QEMU correspond 5952 * to force_sig_info() calls in Linux (some are send_sig_info()). 5953 * However it seems like a kernel bug to me to allow the process 5954 * to block a synchronous signal since it could then just end up 5955 * looping round and round indefinitely. 5956 */ 5957 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5958 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5959 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5960 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5961 } 5962 5963 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 5964 } 5965 5966 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5967 blocked_set = ts->in_sigsuspend ? 5968 &ts->sigsuspend_mask : &ts->signal_mask; 5969 5970 if (ts->sigtab[sig - 1].pending && 5971 (!sigismember(blocked_set, 5972 target_to_host_signal_table[sig]))) { 5973 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 5974 /* Restart scan from the beginning, as handle_pending_signal 5975 * might have resulted in a new synchronous signal (eg SIGSEGV). 5976 */ 5977 goto restart_scan; 5978 } 5979 } 5980 5981 /* if no signal is pending, unblock signals and recheck (the act 5982 * of unblocking might cause us to take another host signal which 5983 * will set signal_pending again). 5984 */ 5985 atomic_set(&ts->signal_pending, 0); 5986 ts->in_sigsuspend = 0; 5987 set = ts->signal_mask; 5988 sigdelset(&set, SIGSEGV); 5989 sigdelset(&set, SIGBUS); 5990 sigprocmask(SIG_SETMASK, &set, 0); 5991 } 5992 ts->in_sigsuspend = 0; 5993 } 5994