1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 516 /* abort execution with signal */ 517 static void QEMU_NORETURN force_sig(int target_sig) 518 { 519 CPUState *cpu = thread_cpu; 520 CPUArchState *env = cpu->env_ptr; 521 TaskState *ts = (TaskState *)cpu->opaque; 522 int host_sig, core_dumped = 0; 523 struct sigaction act; 524 525 host_sig = target_to_host_signal(target_sig); 526 trace_user_force_sig(env, target_sig, host_sig); 527 gdb_signalled(env, target_sig); 528 529 /* dump core if supported by target binary format */ 530 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 531 stop_all_tasks(); 532 core_dumped = 533 ((*ts->bprm->core_dump)(target_sig, env) == 0); 534 } 535 if (core_dumped) { 536 /* we already dumped the core of target process, we don't want 537 * a coredump of qemu itself */ 538 struct rlimit nodump; 539 getrlimit(RLIMIT_CORE, &nodump); 540 nodump.rlim_cur=0; 541 setrlimit(RLIMIT_CORE, &nodump); 542 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 543 target_sig, strsignal(host_sig), "core dumped" ); 544 } 545 546 /* The proper exit code for dying from an uncaught signal is 547 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 548 * a negative value. To get the proper exit code we need to 549 * actually die from an uncaught signal. Here the default signal 550 * handler is installed, we send ourself a signal and we wait for 551 * it to arrive. */ 552 sigfillset(&act.sa_mask); 553 act.sa_handler = SIG_DFL; 554 act.sa_flags = 0; 555 sigaction(host_sig, &act, NULL); 556 557 /* For some reason raise(host_sig) doesn't send the signal when 558 * statically linked on x86-64. */ 559 kill(getpid(), host_sig); 560 561 /* Make sure the signal isn't masked (just reuse the mask inside 562 of act) */ 563 sigdelset(&act.sa_mask, host_sig); 564 sigsuspend(&act.sa_mask); 565 566 /* unreachable */ 567 abort(); 568 } 569 570 /* queue a signal so that it will be send to the virtual CPU as soon 571 as possible */ 572 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) 573 { 574 CPUState *cpu = ENV_GET_CPU(env); 575 TaskState *ts = cpu->opaque; 576 577 trace_user_queue_signal(env, sig); 578 579 /* Currently all callers define siginfo structures which 580 * use the _sifields._sigfault union member, so we can 581 * set the type here. If that changes we should push this 582 * out so the si_type is passed in by callers. 583 */ 584 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT); 585 586 ts->sync_signal.info = *info; 587 ts->sync_signal.pending = sig; 588 /* signal that a new signal is pending */ 589 atomic_set(&ts->signal_pending, 1); 590 return 1; /* indicates that the signal was queued */ 591 } 592 593 #ifndef HAVE_SAFE_SYSCALL 594 static inline void rewind_if_in_safe_syscall(void *puc) 595 { 596 /* Default version: never rewind */ 597 } 598 #endif 599 600 static void host_signal_handler(int host_signum, siginfo_t *info, 601 void *puc) 602 { 603 CPUArchState *env = thread_cpu->env_ptr; 604 CPUState *cpu = ENV_GET_CPU(env); 605 TaskState *ts = cpu->opaque; 606 607 int sig; 608 target_siginfo_t tinfo; 609 ucontext_t *uc = puc; 610 struct emulated_sigtable *k; 611 612 /* the CPU emulator uses some host signals to detect exceptions, 613 we forward to it some signals */ 614 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 615 && info->si_code > 0) { 616 if (cpu_signal_handler(host_signum, info, puc)) 617 return; 618 } 619 620 /* get target signal number */ 621 sig = host_to_target_signal(host_signum); 622 if (sig < 1 || sig > TARGET_NSIG) 623 return; 624 trace_user_host_signal(env, host_signum, sig); 625 626 rewind_if_in_safe_syscall(puc); 627 628 host_to_target_siginfo_noswap(&tinfo, info); 629 k = &ts->sigtab[sig - 1]; 630 k->info = tinfo; 631 k->pending = sig; 632 ts->signal_pending = 1; 633 634 /* Block host signals until target signal handler entered. We 635 * can't block SIGSEGV or SIGBUS while we're executing guest 636 * code in case the guest code provokes one in the window between 637 * now and it getting out to the main loop. Signals will be 638 * unblocked again in process_pending_signals(). 639 */ 640 sigfillset(&uc->uc_sigmask); 641 sigdelset(&uc->uc_sigmask, SIGSEGV); 642 sigdelset(&uc->uc_sigmask, SIGBUS); 643 644 /* interrupt the virtual CPU as soon as possible */ 645 cpu_exit(thread_cpu); 646 } 647 648 /* do_sigaltstack() returns target values and errnos. */ 649 /* compare linux/kernel/signal.c:do_sigaltstack() */ 650 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 651 { 652 int ret; 653 struct target_sigaltstack oss; 654 655 /* XXX: test errors */ 656 if(uoss_addr) 657 { 658 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 659 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 660 __put_user(sas_ss_flags(sp), &oss.ss_flags); 661 } 662 663 if(uss_addr) 664 { 665 struct target_sigaltstack *uss; 666 struct target_sigaltstack ss; 667 size_t minstacksize = TARGET_MINSIGSTKSZ; 668 669 #if defined(TARGET_PPC64) 670 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 671 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 672 if (get_ppc64_abi(image) > 1) { 673 minstacksize = 4096; 674 } 675 #endif 676 677 ret = -TARGET_EFAULT; 678 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 679 goto out; 680 } 681 __get_user(ss.ss_sp, &uss->ss_sp); 682 __get_user(ss.ss_size, &uss->ss_size); 683 __get_user(ss.ss_flags, &uss->ss_flags); 684 unlock_user_struct(uss, uss_addr, 0); 685 686 ret = -TARGET_EPERM; 687 if (on_sig_stack(sp)) 688 goto out; 689 690 ret = -TARGET_EINVAL; 691 if (ss.ss_flags != TARGET_SS_DISABLE 692 && ss.ss_flags != TARGET_SS_ONSTACK 693 && ss.ss_flags != 0) 694 goto out; 695 696 if (ss.ss_flags == TARGET_SS_DISABLE) { 697 ss.ss_size = 0; 698 ss.ss_sp = 0; 699 } else { 700 ret = -TARGET_ENOMEM; 701 if (ss.ss_size < minstacksize) { 702 goto out; 703 } 704 } 705 706 target_sigaltstack_used.ss_sp = ss.ss_sp; 707 target_sigaltstack_used.ss_size = ss.ss_size; 708 } 709 710 if (uoss_addr) { 711 ret = -TARGET_EFAULT; 712 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 713 goto out; 714 } 715 716 ret = 0; 717 out: 718 return ret; 719 } 720 721 /* do_sigaction() return target values and host errnos */ 722 int do_sigaction(int sig, const struct target_sigaction *act, 723 struct target_sigaction *oact) 724 { 725 struct target_sigaction *k; 726 struct sigaction act1; 727 int host_sig; 728 int ret = 0; 729 730 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 731 return -TARGET_EINVAL; 732 } 733 734 if (block_signals()) { 735 return -TARGET_ERESTARTSYS; 736 } 737 738 k = &sigact_table[sig - 1]; 739 if (oact) { 740 __put_user(k->_sa_handler, &oact->_sa_handler); 741 __put_user(k->sa_flags, &oact->sa_flags); 742 #if !defined(TARGET_MIPS) 743 __put_user(k->sa_restorer, &oact->sa_restorer); 744 #endif 745 /* Not swapped. */ 746 oact->sa_mask = k->sa_mask; 747 } 748 if (act) { 749 /* FIXME: This is not threadsafe. */ 750 __get_user(k->_sa_handler, &act->_sa_handler); 751 __get_user(k->sa_flags, &act->sa_flags); 752 #if !defined(TARGET_MIPS) 753 __get_user(k->sa_restorer, &act->sa_restorer); 754 #endif 755 /* To be swapped in target_to_host_sigset. */ 756 k->sa_mask = act->sa_mask; 757 758 /* we update the host linux signal state */ 759 host_sig = target_to_host_signal(sig); 760 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 761 sigfillset(&act1.sa_mask); 762 act1.sa_flags = SA_SIGINFO; 763 if (k->sa_flags & TARGET_SA_RESTART) 764 act1.sa_flags |= SA_RESTART; 765 /* NOTE: it is important to update the host kernel signal 766 ignore state to avoid getting unexpected interrupted 767 syscalls */ 768 if (k->_sa_handler == TARGET_SIG_IGN) { 769 act1.sa_sigaction = (void *)SIG_IGN; 770 } else if (k->_sa_handler == TARGET_SIG_DFL) { 771 if (fatal_signal (sig)) 772 act1.sa_sigaction = host_signal_handler; 773 else 774 act1.sa_sigaction = (void *)SIG_DFL; 775 } else { 776 act1.sa_sigaction = host_signal_handler; 777 } 778 ret = sigaction(host_sig, &act1, NULL); 779 } 780 } 781 return ret; 782 } 783 784 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 785 786 /* from the Linux kernel */ 787 788 struct target_fpreg { 789 uint16_t significand[4]; 790 uint16_t exponent; 791 }; 792 793 struct target_fpxreg { 794 uint16_t significand[4]; 795 uint16_t exponent; 796 uint16_t padding[3]; 797 }; 798 799 struct target_xmmreg { 800 abi_ulong element[4]; 801 }; 802 803 struct target_fpstate { 804 /* Regular FPU environment */ 805 abi_ulong cw; 806 abi_ulong sw; 807 abi_ulong tag; 808 abi_ulong ipoff; 809 abi_ulong cssel; 810 abi_ulong dataoff; 811 abi_ulong datasel; 812 struct target_fpreg _st[8]; 813 uint16_t status; 814 uint16_t magic; /* 0xffff = regular FPU data only */ 815 816 /* FXSR FPU environment */ 817 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 818 abi_ulong mxcsr; 819 abi_ulong reserved; 820 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 821 struct target_xmmreg _xmm[8]; 822 abi_ulong padding[56]; 823 }; 824 825 #define X86_FXSR_MAGIC 0x0000 826 827 struct target_sigcontext { 828 uint16_t gs, __gsh; 829 uint16_t fs, __fsh; 830 uint16_t es, __esh; 831 uint16_t ds, __dsh; 832 abi_ulong edi; 833 abi_ulong esi; 834 abi_ulong ebp; 835 abi_ulong esp; 836 abi_ulong ebx; 837 abi_ulong edx; 838 abi_ulong ecx; 839 abi_ulong eax; 840 abi_ulong trapno; 841 abi_ulong err; 842 abi_ulong eip; 843 uint16_t cs, __csh; 844 abi_ulong eflags; 845 abi_ulong esp_at_signal; 846 uint16_t ss, __ssh; 847 abi_ulong fpstate; /* pointer */ 848 abi_ulong oldmask; 849 abi_ulong cr2; 850 }; 851 852 struct target_ucontext { 853 abi_ulong tuc_flags; 854 abi_ulong tuc_link; 855 target_stack_t tuc_stack; 856 struct target_sigcontext tuc_mcontext; 857 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 858 }; 859 860 struct sigframe 861 { 862 abi_ulong pretcode; 863 int sig; 864 struct target_sigcontext sc; 865 struct target_fpstate fpstate; 866 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 867 char retcode[8]; 868 }; 869 870 struct rt_sigframe 871 { 872 abi_ulong pretcode; 873 int sig; 874 abi_ulong pinfo; 875 abi_ulong puc; 876 struct target_siginfo info; 877 struct target_ucontext uc; 878 struct target_fpstate fpstate; 879 char retcode[8]; 880 }; 881 882 /* 883 * Set up a signal frame. 884 */ 885 886 /* XXX: save x87 state */ 887 static void setup_sigcontext(struct target_sigcontext *sc, 888 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 889 abi_ulong fpstate_addr) 890 { 891 CPUState *cs = CPU(x86_env_get_cpu(env)); 892 uint16_t magic; 893 894 /* already locked in setup_frame() */ 895 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 896 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 897 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 898 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 899 __put_user(env->regs[R_EDI], &sc->edi); 900 __put_user(env->regs[R_ESI], &sc->esi); 901 __put_user(env->regs[R_EBP], &sc->ebp); 902 __put_user(env->regs[R_ESP], &sc->esp); 903 __put_user(env->regs[R_EBX], &sc->ebx); 904 __put_user(env->regs[R_EDX], &sc->edx); 905 __put_user(env->regs[R_ECX], &sc->ecx); 906 __put_user(env->regs[R_EAX], &sc->eax); 907 __put_user(cs->exception_index, &sc->trapno); 908 __put_user(env->error_code, &sc->err); 909 __put_user(env->eip, &sc->eip); 910 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 911 __put_user(env->eflags, &sc->eflags); 912 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 913 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 914 915 cpu_x86_fsave(env, fpstate_addr, 1); 916 fpstate->status = fpstate->sw; 917 magic = 0xffff; 918 __put_user(magic, &fpstate->magic); 919 __put_user(fpstate_addr, &sc->fpstate); 920 921 /* non-iBCS2 extensions.. */ 922 __put_user(mask, &sc->oldmask); 923 __put_user(env->cr[2], &sc->cr2); 924 } 925 926 /* 927 * Determine which stack to use.. 928 */ 929 930 static inline abi_ulong 931 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 932 { 933 unsigned long esp; 934 935 /* Default to using normal stack */ 936 esp = env->regs[R_ESP]; 937 /* This is the X/Open sanctioned signal stack switching. */ 938 if (ka->sa_flags & TARGET_SA_ONSTACK) { 939 if (sas_ss_flags(esp) == 0) { 940 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 941 } 942 } else { 943 944 /* This is the legacy signal stack switching. */ 945 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 946 !(ka->sa_flags & TARGET_SA_RESTORER) && 947 ka->sa_restorer) { 948 esp = (unsigned long) ka->sa_restorer; 949 } 950 } 951 return (esp - frame_size) & -8ul; 952 } 953 954 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 955 static void setup_frame(int sig, struct target_sigaction *ka, 956 target_sigset_t *set, CPUX86State *env) 957 { 958 abi_ulong frame_addr; 959 struct sigframe *frame; 960 int i; 961 962 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 963 trace_user_setup_frame(env, frame_addr); 964 965 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 966 goto give_sigsegv; 967 968 __put_user(sig, &frame->sig); 969 970 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 971 frame_addr + offsetof(struct sigframe, fpstate)); 972 973 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 974 __put_user(set->sig[i], &frame->extramask[i - 1]); 975 } 976 977 /* Set up to return from userspace. If provided, use a stub 978 already in userspace. */ 979 if (ka->sa_flags & TARGET_SA_RESTORER) { 980 __put_user(ka->sa_restorer, &frame->pretcode); 981 } else { 982 uint16_t val16; 983 abi_ulong retcode_addr; 984 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 985 __put_user(retcode_addr, &frame->pretcode); 986 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 987 val16 = 0xb858; 988 __put_user(val16, (uint16_t *)(frame->retcode+0)); 989 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 990 val16 = 0x80cd; 991 __put_user(val16, (uint16_t *)(frame->retcode+6)); 992 } 993 994 995 /* Set up registers for signal handler */ 996 env->regs[R_ESP] = frame_addr; 997 env->eip = ka->_sa_handler; 998 999 cpu_x86_load_seg(env, R_DS, __USER_DS); 1000 cpu_x86_load_seg(env, R_ES, __USER_DS); 1001 cpu_x86_load_seg(env, R_SS, __USER_DS); 1002 cpu_x86_load_seg(env, R_CS, __USER_CS); 1003 env->eflags &= ~TF_MASK; 1004 1005 unlock_user_struct(frame, frame_addr, 1); 1006 1007 return; 1008 1009 give_sigsegv: 1010 if (sig == TARGET_SIGSEGV) { 1011 ka->_sa_handler = TARGET_SIG_DFL; 1012 } 1013 force_sig(TARGET_SIGSEGV /* , current */); 1014 } 1015 1016 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1017 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1018 target_siginfo_t *info, 1019 target_sigset_t *set, CPUX86State *env) 1020 { 1021 abi_ulong frame_addr, addr; 1022 struct rt_sigframe *frame; 1023 int i; 1024 1025 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1026 trace_user_setup_rt_frame(env, frame_addr); 1027 1028 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1029 goto give_sigsegv; 1030 1031 __put_user(sig, &frame->sig); 1032 addr = frame_addr + offsetof(struct rt_sigframe, info); 1033 __put_user(addr, &frame->pinfo); 1034 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1035 __put_user(addr, &frame->puc); 1036 tswap_siginfo(&frame->info, info); 1037 1038 /* Create the ucontext. */ 1039 __put_user(0, &frame->uc.tuc_flags); 1040 __put_user(0, &frame->uc.tuc_link); 1041 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1042 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1043 &frame->uc.tuc_stack.ss_flags); 1044 __put_user(target_sigaltstack_used.ss_size, 1045 &frame->uc.tuc_stack.ss_size); 1046 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1047 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1048 1049 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1050 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1051 } 1052 1053 /* Set up to return from userspace. If provided, use a stub 1054 already in userspace. */ 1055 if (ka->sa_flags & TARGET_SA_RESTORER) { 1056 __put_user(ka->sa_restorer, &frame->pretcode); 1057 } else { 1058 uint16_t val16; 1059 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1060 __put_user(addr, &frame->pretcode); 1061 /* This is movl $,%eax ; int $0x80 */ 1062 __put_user(0xb8, (char *)(frame->retcode+0)); 1063 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1064 val16 = 0x80cd; 1065 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1066 } 1067 1068 /* Set up registers for signal handler */ 1069 env->regs[R_ESP] = frame_addr; 1070 env->eip = ka->_sa_handler; 1071 1072 cpu_x86_load_seg(env, R_DS, __USER_DS); 1073 cpu_x86_load_seg(env, R_ES, __USER_DS); 1074 cpu_x86_load_seg(env, R_SS, __USER_DS); 1075 cpu_x86_load_seg(env, R_CS, __USER_CS); 1076 env->eflags &= ~TF_MASK; 1077 1078 unlock_user_struct(frame, frame_addr, 1); 1079 1080 return; 1081 1082 give_sigsegv: 1083 if (sig == TARGET_SIGSEGV) { 1084 ka->_sa_handler = TARGET_SIG_DFL; 1085 } 1086 force_sig(TARGET_SIGSEGV /* , current */); 1087 } 1088 1089 static int 1090 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1091 { 1092 unsigned int err = 0; 1093 abi_ulong fpstate_addr; 1094 unsigned int tmpflags; 1095 1096 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1097 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1098 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1099 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1100 1101 env->regs[R_EDI] = tswapl(sc->edi); 1102 env->regs[R_ESI] = tswapl(sc->esi); 1103 env->regs[R_EBP] = tswapl(sc->ebp); 1104 env->regs[R_ESP] = tswapl(sc->esp); 1105 env->regs[R_EBX] = tswapl(sc->ebx); 1106 env->regs[R_EDX] = tswapl(sc->edx); 1107 env->regs[R_ECX] = tswapl(sc->ecx); 1108 env->regs[R_EAX] = tswapl(sc->eax); 1109 env->eip = tswapl(sc->eip); 1110 1111 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1112 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1113 1114 tmpflags = tswapl(sc->eflags); 1115 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1116 // regs->orig_eax = -1; /* disable syscall checks */ 1117 1118 fpstate_addr = tswapl(sc->fpstate); 1119 if (fpstate_addr != 0) { 1120 if (!access_ok(VERIFY_READ, fpstate_addr, 1121 sizeof(struct target_fpstate))) 1122 goto badframe; 1123 cpu_x86_frstor(env, fpstate_addr, 1); 1124 } 1125 1126 return err; 1127 badframe: 1128 return 1; 1129 } 1130 1131 long do_sigreturn(CPUX86State *env) 1132 { 1133 struct sigframe *frame; 1134 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1135 target_sigset_t target_set; 1136 sigset_t set; 1137 int i; 1138 1139 trace_user_do_sigreturn(env, frame_addr); 1140 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1141 goto badframe; 1142 /* set blocked signals */ 1143 __get_user(target_set.sig[0], &frame->sc.oldmask); 1144 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1145 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1146 } 1147 1148 target_to_host_sigset_internal(&set, &target_set); 1149 set_sigmask(&set); 1150 1151 /* restore registers */ 1152 if (restore_sigcontext(env, &frame->sc)) 1153 goto badframe; 1154 unlock_user_struct(frame, frame_addr, 0); 1155 return -TARGET_QEMU_ESIGRETURN; 1156 1157 badframe: 1158 unlock_user_struct(frame, frame_addr, 0); 1159 force_sig(TARGET_SIGSEGV); 1160 return 0; 1161 } 1162 1163 long do_rt_sigreturn(CPUX86State *env) 1164 { 1165 abi_ulong frame_addr; 1166 struct rt_sigframe *frame; 1167 sigset_t set; 1168 1169 frame_addr = env->regs[R_ESP] - 4; 1170 trace_user_do_rt_sigreturn(env, frame_addr); 1171 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1172 goto badframe; 1173 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1174 set_sigmask(&set); 1175 1176 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1177 goto badframe; 1178 } 1179 1180 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1181 get_sp_from_cpustate(env)) == -EFAULT) { 1182 goto badframe; 1183 } 1184 1185 unlock_user_struct(frame, frame_addr, 0); 1186 return -TARGET_QEMU_ESIGRETURN; 1187 1188 badframe: 1189 unlock_user_struct(frame, frame_addr, 0); 1190 force_sig(TARGET_SIGSEGV); 1191 return 0; 1192 } 1193 1194 #elif defined(TARGET_AARCH64) 1195 1196 struct target_sigcontext { 1197 uint64_t fault_address; 1198 /* AArch64 registers */ 1199 uint64_t regs[31]; 1200 uint64_t sp; 1201 uint64_t pc; 1202 uint64_t pstate; 1203 /* 4K reserved for FP/SIMD state and future expansion */ 1204 char __reserved[4096] __attribute__((__aligned__(16))); 1205 }; 1206 1207 struct target_ucontext { 1208 abi_ulong tuc_flags; 1209 abi_ulong tuc_link; 1210 target_stack_t tuc_stack; 1211 target_sigset_t tuc_sigmask; 1212 /* glibc uses a 1024-bit sigset_t */ 1213 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1214 /* last for future expansion */ 1215 struct target_sigcontext tuc_mcontext; 1216 }; 1217 1218 /* 1219 * Header to be used at the beginning of structures extending the user 1220 * context. Such structures must be placed after the rt_sigframe on the stack 1221 * and be 16-byte aligned. The last structure must be a dummy one with the 1222 * magic and size set to 0. 1223 */ 1224 struct target_aarch64_ctx { 1225 uint32_t magic; 1226 uint32_t size; 1227 }; 1228 1229 #define TARGET_FPSIMD_MAGIC 0x46508001 1230 1231 struct target_fpsimd_context { 1232 struct target_aarch64_ctx head; 1233 uint32_t fpsr; 1234 uint32_t fpcr; 1235 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1236 }; 1237 1238 /* 1239 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1240 * user space as it will change with the addition of new context. User space 1241 * should check the magic/size information. 1242 */ 1243 struct target_aux_context { 1244 struct target_fpsimd_context fpsimd; 1245 /* additional context to be added before "end" */ 1246 struct target_aarch64_ctx end; 1247 }; 1248 1249 struct target_rt_sigframe { 1250 struct target_siginfo info; 1251 struct target_ucontext uc; 1252 uint64_t fp; 1253 uint64_t lr; 1254 uint32_t tramp[2]; 1255 }; 1256 1257 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1258 CPUARMState *env, target_sigset_t *set) 1259 { 1260 int i; 1261 struct target_aux_context *aux = 1262 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1263 1264 /* set up the stack frame for unwinding */ 1265 __put_user(env->xregs[29], &sf->fp); 1266 __put_user(env->xregs[30], &sf->lr); 1267 1268 for (i = 0; i < 31; i++) { 1269 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1270 } 1271 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1272 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1273 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1274 1275 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1276 1277 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1278 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1279 } 1280 1281 for (i = 0; i < 32; i++) { 1282 #ifdef TARGET_WORDS_BIGENDIAN 1283 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1284 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1285 #else 1286 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1287 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1288 #endif 1289 } 1290 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1291 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1292 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1293 __put_user(sizeof(struct target_fpsimd_context), 1294 &aux->fpsimd.head.size); 1295 1296 /* set the "end" magic */ 1297 __put_user(0, &aux->end.magic); 1298 __put_user(0, &aux->end.size); 1299 1300 return 0; 1301 } 1302 1303 static int target_restore_sigframe(CPUARMState *env, 1304 struct target_rt_sigframe *sf) 1305 { 1306 sigset_t set; 1307 int i; 1308 struct target_aux_context *aux = 1309 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1310 uint32_t magic, size, fpsr, fpcr; 1311 uint64_t pstate; 1312 1313 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1314 set_sigmask(&set); 1315 1316 for (i = 0; i < 31; i++) { 1317 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1318 } 1319 1320 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1321 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1322 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1323 pstate_write(env, pstate); 1324 1325 __get_user(magic, &aux->fpsimd.head.magic); 1326 __get_user(size, &aux->fpsimd.head.size); 1327 1328 if (magic != TARGET_FPSIMD_MAGIC 1329 || size != sizeof(struct target_fpsimd_context)) { 1330 return 1; 1331 } 1332 1333 for (i = 0; i < 32; i++) { 1334 #ifdef TARGET_WORDS_BIGENDIAN 1335 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1336 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1337 #else 1338 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1339 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1340 #endif 1341 } 1342 __get_user(fpsr, &aux->fpsimd.fpsr); 1343 vfp_set_fpsr(env, fpsr); 1344 __get_user(fpcr, &aux->fpsimd.fpcr); 1345 vfp_set_fpcr(env, fpcr); 1346 1347 return 0; 1348 } 1349 1350 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1351 { 1352 abi_ulong sp; 1353 1354 sp = env->xregs[31]; 1355 1356 /* 1357 * This is the X/Open sanctioned signal stack switching. 1358 */ 1359 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1360 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1361 } 1362 1363 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1364 1365 return sp; 1366 } 1367 1368 static void target_setup_frame(int usig, struct target_sigaction *ka, 1369 target_siginfo_t *info, target_sigset_t *set, 1370 CPUARMState *env) 1371 { 1372 struct target_rt_sigframe *frame; 1373 abi_ulong frame_addr, return_addr; 1374 1375 frame_addr = get_sigframe(ka, env); 1376 trace_user_setup_frame(env, frame_addr); 1377 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1378 goto give_sigsegv; 1379 } 1380 1381 __put_user(0, &frame->uc.tuc_flags); 1382 __put_user(0, &frame->uc.tuc_link); 1383 1384 __put_user(target_sigaltstack_used.ss_sp, 1385 &frame->uc.tuc_stack.ss_sp); 1386 __put_user(sas_ss_flags(env->xregs[31]), 1387 &frame->uc.tuc_stack.ss_flags); 1388 __put_user(target_sigaltstack_used.ss_size, 1389 &frame->uc.tuc_stack.ss_size); 1390 target_setup_sigframe(frame, env, set); 1391 if (ka->sa_flags & TARGET_SA_RESTORER) { 1392 return_addr = ka->sa_restorer; 1393 } else { 1394 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1395 __put_user(0xd2801168, &frame->tramp[0]); 1396 __put_user(0xd4000001, &frame->tramp[1]); 1397 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1398 } 1399 env->xregs[0] = usig; 1400 env->xregs[31] = frame_addr; 1401 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1402 env->pc = ka->_sa_handler; 1403 env->xregs[30] = return_addr; 1404 if (info) { 1405 tswap_siginfo(&frame->info, info); 1406 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1407 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1408 } 1409 1410 unlock_user_struct(frame, frame_addr, 1); 1411 return; 1412 1413 give_sigsegv: 1414 unlock_user_struct(frame, frame_addr, 1); 1415 force_sig(TARGET_SIGSEGV); 1416 } 1417 1418 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1419 target_siginfo_t *info, target_sigset_t *set, 1420 CPUARMState *env) 1421 { 1422 target_setup_frame(sig, ka, info, set, env); 1423 } 1424 1425 static void setup_frame(int sig, struct target_sigaction *ka, 1426 target_sigset_t *set, CPUARMState *env) 1427 { 1428 target_setup_frame(sig, ka, 0, set, env); 1429 } 1430 1431 long do_rt_sigreturn(CPUARMState *env) 1432 { 1433 struct target_rt_sigframe *frame = NULL; 1434 abi_ulong frame_addr = env->xregs[31]; 1435 1436 trace_user_do_rt_sigreturn(env, frame_addr); 1437 if (frame_addr & 15) { 1438 goto badframe; 1439 } 1440 1441 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1442 goto badframe; 1443 } 1444 1445 if (target_restore_sigframe(env, frame)) { 1446 goto badframe; 1447 } 1448 1449 if (do_sigaltstack(frame_addr + 1450 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1451 0, get_sp_from_cpustate(env)) == -EFAULT) { 1452 goto badframe; 1453 } 1454 1455 unlock_user_struct(frame, frame_addr, 0); 1456 return -TARGET_QEMU_ESIGRETURN; 1457 1458 badframe: 1459 unlock_user_struct(frame, frame_addr, 0); 1460 force_sig(TARGET_SIGSEGV); 1461 return 0; 1462 } 1463 1464 long do_sigreturn(CPUARMState *env) 1465 { 1466 return do_rt_sigreturn(env); 1467 } 1468 1469 #elif defined(TARGET_ARM) 1470 1471 struct target_sigcontext { 1472 abi_ulong trap_no; 1473 abi_ulong error_code; 1474 abi_ulong oldmask; 1475 abi_ulong arm_r0; 1476 abi_ulong arm_r1; 1477 abi_ulong arm_r2; 1478 abi_ulong arm_r3; 1479 abi_ulong arm_r4; 1480 abi_ulong arm_r5; 1481 abi_ulong arm_r6; 1482 abi_ulong arm_r7; 1483 abi_ulong arm_r8; 1484 abi_ulong arm_r9; 1485 abi_ulong arm_r10; 1486 abi_ulong arm_fp; 1487 abi_ulong arm_ip; 1488 abi_ulong arm_sp; 1489 abi_ulong arm_lr; 1490 abi_ulong arm_pc; 1491 abi_ulong arm_cpsr; 1492 abi_ulong fault_address; 1493 }; 1494 1495 struct target_ucontext_v1 { 1496 abi_ulong tuc_flags; 1497 abi_ulong tuc_link; 1498 target_stack_t tuc_stack; 1499 struct target_sigcontext tuc_mcontext; 1500 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1501 }; 1502 1503 struct target_ucontext_v2 { 1504 abi_ulong tuc_flags; 1505 abi_ulong tuc_link; 1506 target_stack_t tuc_stack; 1507 struct target_sigcontext tuc_mcontext; 1508 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1509 char __unused[128 - sizeof(target_sigset_t)]; 1510 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1511 }; 1512 1513 struct target_user_vfp { 1514 uint64_t fpregs[32]; 1515 abi_ulong fpscr; 1516 }; 1517 1518 struct target_user_vfp_exc { 1519 abi_ulong fpexc; 1520 abi_ulong fpinst; 1521 abi_ulong fpinst2; 1522 }; 1523 1524 struct target_vfp_sigframe { 1525 abi_ulong magic; 1526 abi_ulong size; 1527 struct target_user_vfp ufp; 1528 struct target_user_vfp_exc ufp_exc; 1529 } __attribute__((__aligned__(8))); 1530 1531 struct target_iwmmxt_sigframe { 1532 abi_ulong magic; 1533 abi_ulong size; 1534 uint64_t regs[16]; 1535 /* Note that not all the coprocessor control registers are stored here */ 1536 uint32_t wcssf; 1537 uint32_t wcasf; 1538 uint32_t wcgr0; 1539 uint32_t wcgr1; 1540 uint32_t wcgr2; 1541 uint32_t wcgr3; 1542 } __attribute__((__aligned__(8))); 1543 1544 #define TARGET_VFP_MAGIC 0x56465001 1545 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1546 1547 struct sigframe_v1 1548 { 1549 struct target_sigcontext sc; 1550 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1551 abi_ulong retcode; 1552 }; 1553 1554 struct sigframe_v2 1555 { 1556 struct target_ucontext_v2 uc; 1557 abi_ulong retcode; 1558 }; 1559 1560 struct rt_sigframe_v1 1561 { 1562 abi_ulong pinfo; 1563 abi_ulong puc; 1564 struct target_siginfo info; 1565 struct target_ucontext_v1 uc; 1566 abi_ulong retcode; 1567 }; 1568 1569 struct rt_sigframe_v2 1570 { 1571 struct target_siginfo info; 1572 struct target_ucontext_v2 uc; 1573 abi_ulong retcode; 1574 }; 1575 1576 #define TARGET_CONFIG_CPU_32 1 1577 1578 /* 1579 * For ARM syscalls, we encode the syscall number into the instruction. 1580 */ 1581 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1582 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1583 1584 /* 1585 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1586 * need two 16-bit instructions. 1587 */ 1588 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1589 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1590 1591 static const abi_ulong retcodes[4] = { 1592 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1593 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1594 }; 1595 1596 1597 static inline int valid_user_regs(CPUARMState *regs) 1598 { 1599 return 1; 1600 } 1601 1602 static void 1603 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1604 CPUARMState *env, abi_ulong mask) 1605 { 1606 __put_user(env->regs[0], &sc->arm_r0); 1607 __put_user(env->regs[1], &sc->arm_r1); 1608 __put_user(env->regs[2], &sc->arm_r2); 1609 __put_user(env->regs[3], &sc->arm_r3); 1610 __put_user(env->regs[4], &sc->arm_r4); 1611 __put_user(env->regs[5], &sc->arm_r5); 1612 __put_user(env->regs[6], &sc->arm_r6); 1613 __put_user(env->regs[7], &sc->arm_r7); 1614 __put_user(env->regs[8], &sc->arm_r8); 1615 __put_user(env->regs[9], &sc->arm_r9); 1616 __put_user(env->regs[10], &sc->arm_r10); 1617 __put_user(env->regs[11], &sc->arm_fp); 1618 __put_user(env->regs[12], &sc->arm_ip); 1619 __put_user(env->regs[13], &sc->arm_sp); 1620 __put_user(env->regs[14], &sc->arm_lr); 1621 __put_user(env->regs[15], &sc->arm_pc); 1622 #ifdef TARGET_CONFIG_CPU_32 1623 __put_user(cpsr_read(env), &sc->arm_cpsr); 1624 #endif 1625 1626 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1627 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1628 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1629 __put_user(mask, &sc->oldmask); 1630 } 1631 1632 static inline abi_ulong 1633 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1634 { 1635 unsigned long sp = regs->regs[13]; 1636 1637 /* 1638 * This is the X/Open sanctioned signal stack switching. 1639 */ 1640 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1641 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1642 } 1643 /* 1644 * ATPCS B01 mandates 8-byte alignment 1645 */ 1646 return (sp - framesize) & ~7; 1647 } 1648 1649 static void 1650 setup_return(CPUARMState *env, struct target_sigaction *ka, 1651 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1652 { 1653 abi_ulong handler = ka->_sa_handler; 1654 abi_ulong retcode; 1655 int thumb = handler & 1; 1656 uint32_t cpsr = cpsr_read(env); 1657 1658 cpsr &= ~CPSR_IT; 1659 if (thumb) { 1660 cpsr |= CPSR_T; 1661 } else { 1662 cpsr &= ~CPSR_T; 1663 } 1664 1665 if (ka->sa_flags & TARGET_SA_RESTORER) { 1666 retcode = ka->sa_restorer; 1667 } else { 1668 unsigned int idx = thumb; 1669 1670 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1671 idx += 2; 1672 } 1673 1674 __put_user(retcodes[idx], rc); 1675 1676 retcode = rc_addr + thumb; 1677 } 1678 1679 env->regs[0] = usig; 1680 env->regs[13] = frame_addr; 1681 env->regs[14] = retcode; 1682 env->regs[15] = handler & (thumb ? ~1 : ~3); 1683 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1684 } 1685 1686 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1687 { 1688 int i; 1689 struct target_vfp_sigframe *vfpframe; 1690 vfpframe = (struct target_vfp_sigframe *)regspace; 1691 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1692 __put_user(sizeof(*vfpframe), &vfpframe->size); 1693 for (i = 0; i < 32; i++) { 1694 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1695 } 1696 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1697 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1698 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1699 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1700 return (abi_ulong*)(vfpframe+1); 1701 } 1702 1703 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1704 CPUARMState *env) 1705 { 1706 int i; 1707 struct target_iwmmxt_sigframe *iwmmxtframe; 1708 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1709 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1710 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1711 for (i = 0; i < 16; i++) { 1712 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1713 } 1714 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1715 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1716 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1717 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1718 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1719 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1720 return (abi_ulong*)(iwmmxtframe+1); 1721 } 1722 1723 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1724 target_sigset_t *set, CPUARMState *env) 1725 { 1726 struct target_sigaltstack stack; 1727 int i; 1728 abi_ulong *regspace; 1729 1730 /* Clear all the bits of the ucontext we don't use. */ 1731 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1732 1733 memset(&stack, 0, sizeof(stack)); 1734 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1735 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1736 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1737 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1738 1739 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1740 /* Save coprocessor signal frame. */ 1741 regspace = uc->tuc_regspace; 1742 if (arm_feature(env, ARM_FEATURE_VFP)) { 1743 regspace = setup_sigframe_v2_vfp(regspace, env); 1744 } 1745 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1746 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1747 } 1748 1749 /* Write terminating magic word */ 1750 __put_user(0, regspace); 1751 1752 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1753 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1754 } 1755 } 1756 1757 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1758 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1759 target_sigset_t *set, CPUARMState *regs) 1760 { 1761 struct sigframe_v1 *frame; 1762 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1763 int i; 1764 1765 trace_user_setup_frame(regs, frame_addr); 1766 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1767 return; 1768 } 1769 1770 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1771 1772 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1773 __put_user(set->sig[i], &frame->extramask[i - 1]); 1774 } 1775 1776 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1777 frame_addr + offsetof(struct sigframe_v1, retcode)); 1778 1779 unlock_user_struct(frame, frame_addr, 1); 1780 } 1781 1782 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1783 target_sigset_t *set, CPUARMState *regs) 1784 { 1785 struct sigframe_v2 *frame; 1786 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1787 1788 trace_user_setup_frame(regs, frame_addr); 1789 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1790 return; 1791 } 1792 1793 setup_sigframe_v2(&frame->uc, set, regs); 1794 1795 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1796 frame_addr + offsetof(struct sigframe_v2, retcode)); 1797 1798 unlock_user_struct(frame, frame_addr, 1); 1799 } 1800 1801 static void setup_frame(int usig, struct target_sigaction *ka, 1802 target_sigset_t *set, CPUARMState *regs) 1803 { 1804 if (get_osversion() >= 0x020612) { 1805 setup_frame_v2(usig, ka, set, regs); 1806 } else { 1807 setup_frame_v1(usig, ka, set, regs); 1808 } 1809 } 1810 1811 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1812 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1813 target_siginfo_t *info, 1814 target_sigset_t *set, CPUARMState *env) 1815 { 1816 struct rt_sigframe_v1 *frame; 1817 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1818 struct target_sigaltstack stack; 1819 int i; 1820 abi_ulong info_addr, uc_addr; 1821 1822 trace_user_setup_rt_frame(env, frame_addr); 1823 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1824 return /* 1 */; 1825 } 1826 1827 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1828 __put_user(info_addr, &frame->pinfo); 1829 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1830 __put_user(uc_addr, &frame->puc); 1831 tswap_siginfo(&frame->info, info); 1832 1833 /* Clear all the bits of the ucontext we don't use. */ 1834 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1835 1836 memset(&stack, 0, sizeof(stack)); 1837 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1838 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1839 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1840 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1841 1842 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1843 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1844 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1845 } 1846 1847 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1848 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1849 1850 env->regs[1] = info_addr; 1851 env->regs[2] = uc_addr; 1852 1853 unlock_user_struct(frame, frame_addr, 1); 1854 } 1855 1856 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1857 target_siginfo_t *info, 1858 target_sigset_t *set, CPUARMState *env) 1859 { 1860 struct rt_sigframe_v2 *frame; 1861 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1862 abi_ulong info_addr, uc_addr; 1863 1864 trace_user_setup_rt_frame(env, frame_addr); 1865 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1866 return /* 1 */; 1867 } 1868 1869 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1870 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1871 tswap_siginfo(&frame->info, info); 1872 1873 setup_sigframe_v2(&frame->uc, set, env); 1874 1875 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1876 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1877 1878 env->regs[1] = info_addr; 1879 env->regs[2] = uc_addr; 1880 1881 unlock_user_struct(frame, frame_addr, 1); 1882 } 1883 1884 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1885 target_siginfo_t *info, 1886 target_sigset_t *set, CPUARMState *env) 1887 { 1888 if (get_osversion() >= 0x020612) { 1889 setup_rt_frame_v2(usig, ka, info, set, env); 1890 } else { 1891 setup_rt_frame_v1(usig, ka, info, set, env); 1892 } 1893 } 1894 1895 static int 1896 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1897 { 1898 int err = 0; 1899 uint32_t cpsr; 1900 1901 __get_user(env->regs[0], &sc->arm_r0); 1902 __get_user(env->regs[1], &sc->arm_r1); 1903 __get_user(env->regs[2], &sc->arm_r2); 1904 __get_user(env->regs[3], &sc->arm_r3); 1905 __get_user(env->regs[4], &sc->arm_r4); 1906 __get_user(env->regs[5], &sc->arm_r5); 1907 __get_user(env->regs[6], &sc->arm_r6); 1908 __get_user(env->regs[7], &sc->arm_r7); 1909 __get_user(env->regs[8], &sc->arm_r8); 1910 __get_user(env->regs[9], &sc->arm_r9); 1911 __get_user(env->regs[10], &sc->arm_r10); 1912 __get_user(env->regs[11], &sc->arm_fp); 1913 __get_user(env->regs[12], &sc->arm_ip); 1914 __get_user(env->regs[13], &sc->arm_sp); 1915 __get_user(env->regs[14], &sc->arm_lr); 1916 __get_user(env->regs[15], &sc->arm_pc); 1917 #ifdef TARGET_CONFIG_CPU_32 1918 __get_user(cpsr, &sc->arm_cpsr); 1919 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1920 #endif 1921 1922 err |= !valid_user_regs(env); 1923 1924 return err; 1925 } 1926 1927 static long do_sigreturn_v1(CPUARMState *env) 1928 { 1929 abi_ulong frame_addr; 1930 struct sigframe_v1 *frame = NULL; 1931 target_sigset_t set; 1932 sigset_t host_set; 1933 int i; 1934 1935 /* 1936 * Since we stacked the signal on a 64-bit boundary, 1937 * then 'sp' should be word aligned here. If it's 1938 * not, then the user is trying to mess with us. 1939 */ 1940 frame_addr = env->regs[13]; 1941 trace_user_do_sigreturn(env, frame_addr); 1942 if (frame_addr & 7) { 1943 goto badframe; 1944 } 1945 1946 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1947 goto badframe; 1948 } 1949 1950 __get_user(set.sig[0], &frame->sc.oldmask); 1951 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1952 __get_user(set.sig[i], &frame->extramask[i - 1]); 1953 } 1954 1955 target_to_host_sigset_internal(&host_set, &set); 1956 set_sigmask(&host_set); 1957 1958 if (restore_sigcontext(env, &frame->sc)) { 1959 goto badframe; 1960 } 1961 1962 #if 0 1963 /* Send SIGTRAP if we're single-stepping */ 1964 if (ptrace_cancel_bpt(current)) 1965 send_sig(SIGTRAP, current, 1); 1966 #endif 1967 unlock_user_struct(frame, frame_addr, 0); 1968 return -TARGET_QEMU_ESIGRETURN; 1969 1970 badframe: 1971 force_sig(TARGET_SIGSEGV /* , current */); 1972 return 0; 1973 } 1974 1975 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 1976 { 1977 int i; 1978 abi_ulong magic, sz; 1979 uint32_t fpscr, fpexc; 1980 struct target_vfp_sigframe *vfpframe; 1981 vfpframe = (struct target_vfp_sigframe *)regspace; 1982 1983 __get_user(magic, &vfpframe->magic); 1984 __get_user(sz, &vfpframe->size); 1985 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 1986 return 0; 1987 } 1988 for (i = 0; i < 32; i++) { 1989 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1990 } 1991 __get_user(fpscr, &vfpframe->ufp.fpscr); 1992 vfp_set_fpscr(env, fpscr); 1993 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 1994 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 1995 * and the exception flag is cleared 1996 */ 1997 fpexc |= (1 << 30); 1998 fpexc &= ~((1 << 31) | (1 << 28)); 1999 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2000 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2001 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2002 return (abi_ulong*)(vfpframe + 1); 2003 } 2004 2005 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2006 abi_ulong *regspace) 2007 { 2008 int i; 2009 abi_ulong magic, sz; 2010 struct target_iwmmxt_sigframe *iwmmxtframe; 2011 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2012 2013 __get_user(magic, &iwmmxtframe->magic); 2014 __get_user(sz, &iwmmxtframe->size); 2015 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2016 return 0; 2017 } 2018 for (i = 0; i < 16; i++) { 2019 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2020 } 2021 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2022 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2023 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2024 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2025 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2026 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2027 return (abi_ulong*)(iwmmxtframe + 1); 2028 } 2029 2030 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2031 struct target_ucontext_v2 *uc) 2032 { 2033 sigset_t host_set; 2034 abi_ulong *regspace; 2035 2036 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2037 set_sigmask(&host_set); 2038 2039 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2040 return 1; 2041 2042 /* Restore coprocessor signal frame */ 2043 regspace = uc->tuc_regspace; 2044 if (arm_feature(env, ARM_FEATURE_VFP)) { 2045 regspace = restore_sigframe_v2_vfp(env, regspace); 2046 if (!regspace) { 2047 return 1; 2048 } 2049 } 2050 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2051 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2052 if (!regspace) { 2053 return 1; 2054 } 2055 } 2056 2057 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2058 return 1; 2059 2060 #if 0 2061 /* Send SIGTRAP if we're single-stepping */ 2062 if (ptrace_cancel_bpt(current)) 2063 send_sig(SIGTRAP, current, 1); 2064 #endif 2065 2066 return 0; 2067 } 2068 2069 static long do_sigreturn_v2(CPUARMState *env) 2070 { 2071 abi_ulong frame_addr; 2072 struct sigframe_v2 *frame = NULL; 2073 2074 /* 2075 * Since we stacked the signal on a 64-bit boundary, 2076 * then 'sp' should be word aligned here. If it's 2077 * not, then the user is trying to mess with us. 2078 */ 2079 frame_addr = env->regs[13]; 2080 trace_user_do_sigreturn(env, frame_addr); 2081 if (frame_addr & 7) { 2082 goto badframe; 2083 } 2084 2085 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2086 goto badframe; 2087 } 2088 2089 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2090 goto badframe; 2091 } 2092 2093 unlock_user_struct(frame, frame_addr, 0); 2094 return -TARGET_QEMU_ESIGRETURN; 2095 2096 badframe: 2097 unlock_user_struct(frame, frame_addr, 0); 2098 force_sig(TARGET_SIGSEGV /* , current */); 2099 return 0; 2100 } 2101 2102 long do_sigreturn(CPUARMState *env) 2103 { 2104 if (get_osversion() >= 0x020612) { 2105 return do_sigreturn_v2(env); 2106 } else { 2107 return do_sigreturn_v1(env); 2108 } 2109 } 2110 2111 static long do_rt_sigreturn_v1(CPUARMState *env) 2112 { 2113 abi_ulong frame_addr; 2114 struct rt_sigframe_v1 *frame = NULL; 2115 sigset_t host_set; 2116 2117 /* 2118 * Since we stacked the signal on a 64-bit boundary, 2119 * then 'sp' should be word aligned here. If it's 2120 * not, then the user is trying to mess with us. 2121 */ 2122 frame_addr = env->regs[13]; 2123 trace_user_do_rt_sigreturn(env, frame_addr); 2124 if (frame_addr & 7) { 2125 goto badframe; 2126 } 2127 2128 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2129 goto badframe; 2130 } 2131 2132 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2133 set_sigmask(&host_set); 2134 2135 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2136 goto badframe; 2137 } 2138 2139 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2140 goto badframe; 2141 2142 #if 0 2143 /* Send SIGTRAP if we're single-stepping */ 2144 if (ptrace_cancel_bpt(current)) 2145 send_sig(SIGTRAP, current, 1); 2146 #endif 2147 unlock_user_struct(frame, frame_addr, 0); 2148 return -TARGET_QEMU_ESIGRETURN; 2149 2150 badframe: 2151 unlock_user_struct(frame, frame_addr, 0); 2152 force_sig(TARGET_SIGSEGV /* , current */); 2153 return 0; 2154 } 2155 2156 static long do_rt_sigreturn_v2(CPUARMState *env) 2157 { 2158 abi_ulong frame_addr; 2159 struct rt_sigframe_v2 *frame = NULL; 2160 2161 /* 2162 * Since we stacked the signal on a 64-bit boundary, 2163 * then 'sp' should be word aligned here. If it's 2164 * not, then the user is trying to mess with us. 2165 */ 2166 frame_addr = env->regs[13]; 2167 trace_user_do_rt_sigreturn(env, frame_addr); 2168 if (frame_addr & 7) { 2169 goto badframe; 2170 } 2171 2172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2173 goto badframe; 2174 } 2175 2176 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2177 goto badframe; 2178 } 2179 2180 unlock_user_struct(frame, frame_addr, 0); 2181 return -TARGET_QEMU_ESIGRETURN; 2182 2183 badframe: 2184 unlock_user_struct(frame, frame_addr, 0); 2185 force_sig(TARGET_SIGSEGV /* , current */); 2186 return 0; 2187 } 2188 2189 long do_rt_sigreturn(CPUARMState *env) 2190 { 2191 if (get_osversion() >= 0x020612) { 2192 return do_rt_sigreturn_v2(env); 2193 } else { 2194 return do_rt_sigreturn_v1(env); 2195 } 2196 } 2197 2198 #elif defined(TARGET_SPARC) 2199 2200 #define __SUNOS_MAXWIN 31 2201 2202 /* This is what SunOS does, so shall I. */ 2203 struct target_sigcontext { 2204 abi_ulong sigc_onstack; /* state to restore */ 2205 2206 abi_ulong sigc_mask; /* sigmask to restore */ 2207 abi_ulong sigc_sp; /* stack pointer */ 2208 abi_ulong sigc_pc; /* program counter */ 2209 abi_ulong sigc_npc; /* next program counter */ 2210 abi_ulong sigc_psr; /* for condition codes etc */ 2211 abi_ulong sigc_g1; /* User uses these two registers */ 2212 abi_ulong sigc_o0; /* within the trampoline code. */ 2213 2214 /* Now comes information regarding the users window set 2215 * at the time of the signal. 2216 */ 2217 abi_ulong sigc_oswins; /* outstanding windows */ 2218 2219 /* stack ptrs for each regwin buf */ 2220 char *sigc_spbuf[__SUNOS_MAXWIN]; 2221 2222 /* Windows to restore after signal */ 2223 struct { 2224 abi_ulong locals[8]; 2225 abi_ulong ins[8]; 2226 } sigc_wbuf[__SUNOS_MAXWIN]; 2227 }; 2228 /* A Sparc stack frame */ 2229 struct sparc_stackf { 2230 abi_ulong locals[8]; 2231 abi_ulong ins[8]; 2232 /* It's simpler to treat fp and callers_pc as elements of ins[] 2233 * since we never need to access them ourselves. 2234 */ 2235 char *structptr; 2236 abi_ulong xargs[6]; 2237 abi_ulong xxargs[1]; 2238 }; 2239 2240 typedef struct { 2241 struct { 2242 abi_ulong psr; 2243 abi_ulong pc; 2244 abi_ulong npc; 2245 abi_ulong y; 2246 abi_ulong u_regs[16]; /* globals and ins */ 2247 } si_regs; 2248 int si_mask; 2249 } __siginfo_t; 2250 2251 typedef struct { 2252 abi_ulong si_float_regs[32]; 2253 unsigned long si_fsr; 2254 unsigned long si_fpqdepth; 2255 struct { 2256 unsigned long *insn_addr; 2257 unsigned long insn; 2258 } si_fpqueue [16]; 2259 } qemu_siginfo_fpu_t; 2260 2261 2262 struct target_signal_frame { 2263 struct sparc_stackf ss; 2264 __siginfo_t info; 2265 abi_ulong fpu_save; 2266 abi_ulong insns[2] __attribute__ ((aligned (8))); 2267 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2268 abi_ulong extra_size; /* Should be 0 */ 2269 qemu_siginfo_fpu_t fpu_state; 2270 }; 2271 struct target_rt_signal_frame { 2272 struct sparc_stackf ss; 2273 siginfo_t info; 2274 abi_ulong regs[20]; 2275 sigset_t mask; 2276 abi_ulong fpu_save; 2277 unsigned int insns[2]; 2278 stack_t stack; 2279 unsigned int extra_size; /* Should be 0 */ 2280 qemu_siginfo_fpu_t fpu_state; 2281 }; 2282 2283 #define UREG_O0 16 2284 #define UREG_O6 22 2285 #define UREG_I0 0 2286 #define UREG_I1 1 2287 #define UREG_I2 2 2288 #define UREG_I3 3 2289 #define UREG_I4 4 2290 #define UREG_I5 5 2291 #define UREG_I6 6 2292 #define UREG_I7 7 2293 #define UREG_L0 8 2294 #define UREG_FP UREG_I6 2295 #define UREG_SP UREG_O6 2296 2297 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2298 CPUSPARCState *env, 2299 unsigned long framesize) 2300 { 2301 abi_ulong sp; 2302 2303 sp = env->regwptr[UREG_FP]; 2304 2305 /* This is the X/Open sanctioned signal stack switching. */ 2306 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2307 if (!on_sig_stack(sp) 2308 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2309 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2310 } 2311 } 2312 return sp - framesize; 2313 } 2314 2315 static int 2316 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2317 { 2318 int err = 0, i; 2319 2320 __put_user(env->psr, &si->si_regs.psr); 2321 __put_user(env->pc, &si->si_regs.pc); 2322 __put_user(env->npc, &si->si_regs.npc); 2323 __put_user(env->y, &si->si_regs.y); 2324 for (i=0; i < 8; i++) { 2325 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2326 } 2327 for (i=0; i < 8; i++) { 2328 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2329 } 2330 __put_user(mask, &si->si_mask); 2331 return err; 2332 } 2333 2334 #if 0 2335 static int 2336 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2337 CPUSPARCState *env, unsigned long mask) 2338 { 2339 int err = 0; 2340 2341 __put_user(mask, &sc->sigc_mask); 2342 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2343 __put_user(env->pc, &sc->sigc_pc); 2344 __put_user(env->npc, &sc->sigc_npc); 2345 __put_user(env->psr, &sc->sigc_psr); 2346 __put_user(env->gregs[1], &sc->sigc_g1); 2347 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2348 2349 return err; 2350 } 2351 #endif 2352 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2353 2354 static void setup_frame(int sig, struct target_sigaction *ka, 2355 target_sigset_t *set, CPUSPARCState *env) 2356 { 2357 abi_ulong sf_addr; 2358 struct target_signal_frame *sf; 2359 int sigframe_size, err, i; 2360 2361 /* 1. Make sure everything is clean */ 2362 //synchronize_user_stack(); 2363 2364 sigframe_size = NF_ALIGNEDSZ; 2365 sf_addr = get_sigframe(ka, env, sigframe_size); 2366 trace_user_setup_frame(env, sf_addr); 2367 2368 sf = lock_user(VERIFY_WRITE, sf_addr, 2369 sizeof(struct target_signal_frame), 0); 2370 if (!sf) { 2371 goto sigsegv; 2372 } 2373 #if 0 2374 if (invalid_frame_pointer(sf, sigframe_size)) 2375 goto sigill_and_return; 2376 #endif 2377 /* 2. Save the current process state */ 2378 err = setup___siginfo(&sf->info, env, set->sig[0]); 2379 __put_user(0, &sf->extra_size); 2380 2381 //save_fpu_state(regs, &sf->fpu_state); 2382 //__put_user(&sf->fpu_state, &sf->fpu_save); 2383 2384 __put_user(set->sig[0], &sf->info.si_mask); 2385 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2386 __put_user(set->sig[i + 1], &sf->extramask[i]); 2387 } 2388 2389 for (i = 0; i < 8; i++) { 2390 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2391 } 2392 for (i = 0; i < 8; i++) { 2393 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2394 } 2395 if (err) 2396 goto sigsegv; 2397 2398 /* 3. signal handler back-trampoline and parameters */ 2399 env->regwptr[UREG_FP] = sf_addr; 2400 env->regwptr[UREG_I0] = sig; 2401 env->regwptr[UREG_I1] = sf_addr + 2402 offsetof(struct target_signal_frame, info); 2403 env->regwptr[UREG_I2] = sf_addr + 2404 offsetof(struct target_signal_frame, info); 2405 2406 /* 4. signal handler */ 2407 env->pc = ka->_sa_handler; 2408 env->npc = (env->pc + 4); 2409 /* 5. return to kernel instructions */ 2410 if (ka->sa_restorer) { 2411 env->regwptr[UREG_I7] = ka->sa_restorer; 2412 } else { 2413 uint32_t val32; 2414 2415 env->regwptr[UREG_I7] = sf_addr + 2416 offsetof(struct target_signal_frame, insns) - 2 * 4; 2417 2418 /* mov __NR_sigreturn, %g1 */ 2419 val32 = 0x821020d8; 2420 __put_user(val32, &sf->insns[0]); 2421 2422 /* t 0x10 */ 2423 val32 = 0x91d02010; 2424 __put_user(val32, &sf->insns[1]); 2425 if (err) 2426 goto sigsegv; 2427 2428 /* Flush instruction space. */ 2429 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2430 // tb_flush(env); 2431 } 2432 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2433 return; 2434 #if 0 2435 sigill_and_return: 2436 force_sig(TARGET_SIGILL); 2437 #endif 2438 sigsegv: 2439 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2440 force_sig(TARGET_SIGSEGV); 2441 } 2442 2443 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2444 target_siginfo_t *info, 2445 target_sigset_t *set, CPUSPARCState *env) 2446 { 2447 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2448 } 2449 2450 long do_sigreturn(CPUSPARCState *env) 2451 { 2452 abi_ulong sf_addr; 2453 struct target_signal_frame *sf; 2454 uint32_t up_psr, pc, npc; 2455 target_sigset_t set; 2456 sigset_t host_set; 2457 int err=0, i; 2458 2459 sf_addr = env->regwptr[UREG_FP]; 2460 trace_user_do_sigreturn(env, sf_addr); 2461 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2462 goto segv_and_exit; 2463 } 2464 2465 /* 1. Make sure we are not getting garbage from the user */ 2466 2467 if (sf_addr & 3) 2468 goto segv_and_exit; 2469 2470 __get_user(pc, &sf->info.si_regs.pc); 2471 __get_user(npc, &sf->info.si_regs.npc); 2472 2473 if ((pc | npc) & 3) { 2474 goto segv_and_exit; 2475 } 2476 2477 /* 2. Restore the state */ 2478 __get_user(up_psr, &sf->info.si_regs.psr); 2479 2480 /* User can only change condition codes and FPU enabling in %psr. */ 2481 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2482 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2483 2484 env->pc = pc; 2485 env->npc = npc; 2486 __get_user(env->y, &sf->info.si_regs.y); 2487 for (i=0; i < 8; i++) { 2488 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2489 } 2490 for (i=0; i < 8; i++) { 2491 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2492 } 2493 2494 /* FIXME: implement FPU save/restore: 2495 * __get_user(fpu_save, &sf->fpu_save); 2496 * if (fpu_save) 2497 * err |= restore_fpu_state(env, fpu_save); 2498 */ 2499 2500 /* This is pretty much atomic, no amount locking would prevent 2501 * the races which exist anyways. 2502 */ 2503 __get_user(set.sig[0], &sf->info.si_mask); 2504 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2505 __get_user(set.sig[i], &sf->extramask[i - 1]); 2506 } 2507 2508 target_to_host_sigset_internal(&host_set, &set); 2509 set_sigmask(&host_set); 2510 2511 if (err) { 2512 goto segv_and_exit; 2513 } 2514 unlock_user_struct(sf, sf_addr, 0); 2515 return -TARGET_QEMU_ESIGRETURN; 2516 2517 segv_and_exit: 2518 unlock_user_struct(sf, sf_addr, 0); 2519 force_sig(TARGET_SIGSEGV); 2520 } 2521 2522 long do_rt_sigreturn(CPUSPARCState *env) 2523 { 2524 trace_user_do_rt_sigreturn(env, 0); 2525 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2526 return -TARGET_ENOSYS; 2527 } 2528 2529 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2530 #define MC_TSTATE 0 2531 #define MC_PC 1 2532 #define MC_NPC 2 2533 #define MC_Y 3 2534 #define MC_G1 4 2535 #define MC_G2 5 2536 #define MC_G3 6 2537 #define MC_G4 7 2538 #define MC_G5 8 2539 #define MC_G6 9 2540 #define MC_G7 10 2541 #define MC_O0 11 2542 #define MC_O1 12 2543 #define MC_O2 13 2544 #define MC_O3 14 2545 #define MC_O4 15 2546 #define MC_O5 16 2547 #define MC_O6 17 2548 #define MC_O7 18 2549 #define MC_NGREG 19 2550 2551 typedef abi_ulong target_mc_greg_t; 2552 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2553 2554 struct target_mc_fq { 2555 abi_ulong *mcfq_addr; 2556 uint32_t mcfq_insn; 2557 }; 2558 2559 struct target_mc_fpu { 2560 union { 2561 uint32_t sregs[32]; 2562 uint64_t dregs[32]; 2563 //uint128_t qregs[16]; 2564 } mcfpu_fregs; 2565 abi_ulong mcfpu_fsr; 2566 abi_ulong mcfpu_fprs; 2567 abi_ulong mcfpu_gsr; 2568 struct target_mc_fq *mcfpu_fq; 2569 unsigned char mcfpu_qcnt; 2570 unsigned char mcfpu_qentsz; 2571 unsigned char mcfpu_enab; 2572 }; 2573 typedef struct target_mc_fpu target_mc_fpu_t; 2574 2575 typedef struct { 2576 target_mc_gregset_t mc_gregs; 2577 target_mc_greg_t mc_fp; 2578 target_mc_greg_t mc_i7; 2579 target_mc_fpu_t mc_fpregs; 2580 } target_mcontext_t; 2581 2582 struct target_ucontext { 2583 struct target_ucontext *tuc_link; 2584 abi_ulong tuc_flags; 2585 target_sigset_t tuc_sigmask; 2586 target_mcontext_t tuc_mcontext; 2587 }; 2588 2589 /* A V9 register window */ 2590 struct target_reg_window { 2591 abi_ulong locals[8]; 2592 abi_ulong ins[8]; 2593 }; 2594 2595 #define TARGET_STACK_BIAS 2047 2596 2597 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2598 void sparc64_set_context(CPUSPARCState *env) 2599 { 2600 abi_ulong ucp_addr; 2601 struct target_ucontext *ucp; 2602 target_mc_gregset_t *grp; 2603 abi_ulong pc, npc, tstate; 2604 abi_ulong fp, i7, w_addr; 2605 unsigned int i; 2606 2607 ucp_addr = env->regwptr[UREG_I0]; 2608 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2609 goto do_sigsegv; 2610 } 2611 grp = &ucp->tuc_mcontext.mc_gregs; 2612 __get_user(pc, &((*grp)[MC_PC])); 2613 __get_user(npc, &((*grp)[MC_NPC])); 2614 if ((pc | npc) & 3) { 2615 goto do_sigsegv; 2616 } 2617 if (env->regwptr[UREG_I1]) { 2618 target_sigset_t target_set; 2619 sigset_t set; 2620 2621 if (TARGET_NSIG_WORDS == 1) { 2622 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2623 } else { 2624 abi_ulong *src, *dst; 2625 src = ucp->tuc_sigmask.sig; 2626 dst = target_set.sig; 2627 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2628 __get_user(*dst, src); 2629 } 2630 } 2631 target_to_host_sigset_internal(&set, &target_set); 2632 set_sigmask(&set); 2633 } 2634 env->pc = pc; 2635 env->npc = npc; 2636 __get_user(env->y, &((*grp)[MC_Y])); 2637 __get_user(tstate, &((*grp)[MC_TSTATE])); 2638 env->asi = (tstate >> 24) & 0xff; 2639 cpu_put_ccr(env, tstate >> 32); 2640 cpu_put_cwp64(env, tstate & 0x1f); 2641 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2642 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2643 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2644 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2645 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2646 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2647 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2648 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2649 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2650 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2651 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2652 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2653 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2654 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2655 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2656 2657 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2658 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2659 2660 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2661 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2662 abi_ulong) != 0) { 2663 goto do_sigsegv; 2664 } 2665 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2666 abi_ulong) != 0) { 2667 goto do_sigsegv; 2668 } 2669 /* FIXME this does not match how the kernel handles the FPU in 2670 * its sparc64_set_context implementation. In particular the FPU 2671 * is only restored if fenab is non-zero in: 2672 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2673 */ 2674 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2675 { 2676 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2677 for (i = 0; i < 64; i++, src++) { 2678 if (i & 1) { 2679 __get_user(env->fpr[i/2].l.lower, src); 2680 } else { 2681 __get_user(env->fpr[i/2].l.upper, src); 2682 } 2683 } 2684 } 2685 __get_user(env->fsr, 2686 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2687 __get_user(env->gsr, 2688 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2689 unlock_user_struct(ucp, ucp_addr, 0); 2690 return; 2691 do_sigsegv: 2692 unlock_user_struct(ucp, ucp_addr, 0); 2693 force_sig(TARGET_SIGSEGV); 2694 } 2695 2696 void sparc64_get_context(CPUSPARCState *env) 2697 { 2698 abi_ulong ucp_addr; 2699 struct target_ucontext *ucp; 2700 target_mc_gregset_t *grp; 2701 target_mcontext_t *mcp; 2702 abi_ulong fp, i7, w_addr; 2703 int err; 2704 unsigned int i; 2705 target_sigset_t target_set; 2706 sigset_t set; 2707 2708 ucp_addr = env->regwptr[UREG_I0]; 2709 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2710 goto do_sigsegv; 2711 } 2712 2713 mcp = &ucp->tuc_mcontext; 2714 grp = &mcp->mc_gregs; 2715 2716 /* Skip over the trap instruction, first. */ 2717 env->pc = env->npc; 2718 env->npc += 4; 2719 2720 /* If we're only reading the signal mask then do_sigprocmask() 2721 * is guaranteed not to fail, which is important because we don't 2722 * have any way to signal a failure or restart this operation since 2723 * this is not a normal syscall. 2724 */ 2725 err = do_sigprocmask(0, NULL, &set); 2726 assert(err == 0); 2727 host_to_target_sigset_internal(&target_set, &set); 2728 if (TARGET_NSIG_WORDS == 1) { 2729 __put_user(target_set.sig[0], 2730 (abi_ulong *)&ucp->tuc_sigmask); 2731 } else { 2732 abi_ulong *src, *dst; 2733 src = target_set.sig; 2734 dst = ucp->tuc_sigmask.sig; 2735 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2736 __put_user(*src, dst); 2737 } 2738 if (err) 2739 goto do_sigsegv; 2740 } 2741 2742 /* XXX: tstate must be saved properly */ 2743 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2744 __put_user(env->pc, &((*grp)[MC_PC])); 2745 __put_user(env->npc, &((*grp)[MC_NPC])); 2746 __put_user(env->y, &((*grp)[MC_Y])); 2747 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2748 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2749 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2750 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2751 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2752 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2753 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2754 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2755 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2756 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2757 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2758 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2759 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2760 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2761 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2762 2763 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2764 fp = i7 = 0; 2765 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2766 abi_ulong) != 0) { 2767 goto do_sigsegv; 2768 } 2769 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2770 abi_ulong) != 0) { 2771 goto do_sigsegv; 2772 } 2773 __put_user(fp, &(mcp->mc_fp)); 2774 __put_user(i7, &(mcp->mc_i7)); 2775 2776 { 2777 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2778 for (i = 0; i < 64; i++, dst++) { 2779 if (i & 1) { 2780 __put_user(env->fpr[i/2].l.lower, dst); 2781 } else { 2782 __put_user(env->fpr[i/2].l.upper, dst); 2783 } 2784 } 2785 } 2786 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2787 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2788 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2789 2790 if (err) 2791 goto do_sigsegv; 2792 unlock_user_struct(ucp, ucp_addr, 1); 2793 return; 2794 do_sigsegv: 2795 unlock_user_struct(ucp, ucp_addr, 1); 2796 force_sig(TARGET_SIGSEGV); 2797 } 2798 #endif 2799 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2800 2801 # if defined(TARGET_ABI_MIPSO32) 2802 struct target_sigcontext { 2803 uint32_t sc_regmask; /* Unused */ 2804 uint32_t sc_status; 2805 uint64_t sc_pc; 2806 uint64_t sc_regs[32]; 2807 uint64_t sc_fpregs[32]; 2808 uint32_t sc_ownedfp; /* Unused */ 2809 uint32_t sc_fpc_csr; 2810 uint32_t sc_fpc_eir; /* Unused */ 2811 uint32_t sc_used_math; 2812 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2813 uint32_t pad0; 2814 uint64_t sc_mdhi; 2815 uint64_t sc_mdlo; 2816 target_ulong sc_hi1; /* Was sc_cause */ 2817 target_ulong sc_lo1; /* Was sc_badvaddr */ 2818 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2819 target_ulong sc_lo2; 2820 target_ulong sc_hi3; 2821 target_ulong sc_lo3; 2822 }; 2823 # else /* N32 || N64 */ 2824 struct target_sigcontext { 2825 uint64_t sc_regs[32]; 2826 uint64_t sc_fpregs[32]; 2827 uint64_t sc_mdhi; 2828 uint64_t sc_hi1; 2829 uint64_t sc_hi2; 2830 uint64_t sc_hi3; 2831 uint64_t sc_mdlo; 2832 uint64_t sc_lo1; 2833 uint64_t sc_lo2; 2834 uint64_t sc_lo3; 2835 uint64_t sc_pc; 2836 uint32_t sc_fpc_csr; 2837 uint32_t sc_used_math; 2838 uint32_t sc_dsp; 2839 uint32_t sc_reserved; 2840 }; 2841 # endif /* O32 */ 2842 2843 struct sigframe { 2844 uint32_t sf_ass[4]; /* argument save space for o32 */ 2845 uint32_t sf_code[2]; /* signal trampoline */ 2846 struct target_sigcontext sf_sc; 2847 target_sigset_t sf_mask; 2848 }; 2849 2850 struct target_ucontext { 2851 target_ulong tuc_flags; 2852 target_ulong tuc_link; 2853 target_stack_t tuc_stack; 2854 target_ulong pad0; 2855 struct target_sigcontext tuc_mcontext; 2856 target_sigset_t tuc_sigmask; 2857 }; 2858 2859 struct target_rt_sigframe { 2860 uint32_t rs_ass[4]; /* argument save space for o32 */ 2861 uint32_t rs_code[2]; /* signal trampoline */ 2862 struct target_siginfo rs_info; 2863 struct target_ucontext rs_uc; 2864 }; 2865 2866 /* Install trampoline to jump back from signal handler */ 2867 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2868 { 2869 int err = 0; 2870 2871 /* 2872 * Set up the return code ... 2873 * 2874 * li v0, __NR__foo_sigreturn 2875 * syscall 2876 */ 2877 2878 __put_user(0x24020000 + syscall, tramp + 0); 2879 __put_user(0x0000000c , tramp + 1); 2880 return err; 2881 } 2882 2883 static inline void setup_sigcontext(CPUMIPSState *regs, 2884 struct target_sigcontext *sc) 2885 { 2886 int i; 2887 2888 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2889 regs->hflags &= ~MIPS_HFLAG_BMASK; 2890 2891 __put_user(0, &sc->sc_regs[0]); 2892 for (i = 1; i < 32; ++i) { 2893 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2894 } 2895 2896 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2897 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2898 2899 /* Rather than checking for dsp existence, always copy. The storage 2900 would just be garbage otherwise. */ 2901 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2902 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2903 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2904 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2905 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2906 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2907 { 2908 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2909 __put_user(dsp, &sc->sc_dsp); 2910 } 2911 2912 __put_user(1, &sc->sc_used_math); 2913 2914 for (i = 0; i < 32; ++i) { 2915 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2916 } 2917 } 2918 2919 static inline void 2920 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2921 { 2922 int i; 2923 2924 __get_user(regs->CP0_EPC, &sc->sc_pc); 2925 2926 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2927 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2928 2929 for (i = 1; i < 32; ++i) { 2930 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2931 } 2932 2933 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2934 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2935 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2936 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2937 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2938 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2939 { 2940 uint32_t dsp; 2941 __get_user(dsp, &sc->sc_dsp); 2942 cpu_wrdsp(dsp, 0x3ff, regs); 2943 } 2944 2945 for (i = 0; i < 32; ++i) { 2946 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2947 } 2948 } 2949 2950 /* 2951 * Determine which stack to use.. 2952 */ 2953 static inline abi_ulong 2954 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2955 { 2956 unsigned long sp; 2957 2958 /* Default to using normal stack */ 2959 sp = regs->active_tc.gpr[29]; 2960 2961 /* 2962 * FPU emulator may have its own trampoline active just 2963 * above the user stack, 16-bytes before the next lowest 2964 * 16 byte boundary. Try to avoid trashing it. 2965 */ 2966 sp -= 32; 2967 2968 /* This is the X/Open sanctioned signal stack switching. */ 2969 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2970 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2971 } 2972 2973 return (sp - frame_size) & ~7; 2974 } 2975 2976 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 2977 { 2978 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 2979 env->hflags &= ~MIPS_HFLAG_M16; 2980 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 2981 env->active_tc.PC &= ~(target_ulong) 1; 2982 } 2983 } 2984 2985 # if defined(TARGET_ABI_MIPSO32) 2986 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 2987 static void setup_frame(int sig, struct target_sigaction * ka, 2988 target_sigset_t *set, CPUMIPSState *regs) 2989 { 2990 struct sigframe *frame; 2991 abi_ulong frame_addr; 2992 int i; 2993 2994 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2995 trace_user_setup_frame(regs, frame_addr); 2996 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2997 goto give_sigsegv; 2998 } 2999 3000 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3001 3002 setup_sigcontext(regs, &frame->sf_sc); 3003 3004 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3005 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3006 } 3007 3008 /* 3009 * Arguments to signal handler: 3010 * 3011 * a0 = signal number 3012 * a1 = 0 (should be cause) 3013 * a2 = pointer to struct sigcontext 3014 * 3015 * $25 and PC point to the signal handler, $29 points to the 3016 * struct sigframe. 3017 */ 3018 regs->active_tc.gpr[ 4] = sig; 3019 regs->active_tc.gpr[ 5] = 0; 3020 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3021 regs->active_tc.gpr[29] = frame_addr; 3022 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3023 /* The original kernel code sets CP0_EPC to the handler 3024 * since it returns to userland using eret 3025 * we cannot do this here, and we must set PC directly */ 3026 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3027 mips_set_hflags_isa_mode_from_pc(regs); 3028 unlock_user_struct(frame, frame_addr, 1); 3029 return; 3030 3031 give_sigsegv: 3032 force_sig(TARGET_SIGSEGV/*, current*/); 3033 } 3034 3035 long do_sigreturn(CPUMIPSState *regs) 3036 { 3037 struct sigframe *frame; 3038 abi_ulong frame_addr; 3039 sigset_t blocked; 3040 target_sigset_t target_set; 3041 int i; 3042 3043 frame_addr = regs->active_tc.gpr[29]; 3044 trace_user_do_sigreturn(regs, frame_addr); 3045 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3046 goto badframe; 3047 3048 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3049 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3050 } 3051 3052 target_to_host_sigset_internal(&blocked, &target_set); 3053 set_sigmask(&blocked); 3054 3055 restore_sigcontext(regs, &frame->sf_sc); 3056 3057 #if 0 3058 /* 3059 * Don't let your children do this ... 3060 */ 3061 __asm__ __volatile__( 3062 "move\t$29, %0\n\t" 3063 "j\tsyscall_exit" 3064 :/* no outputs */ 3065 :"r" (®s)); 3066 /* Unreached */ 3067 #endif 3068 3069 regs->active_tc.PC = regs->CP0_EPC; 3070 mips_set_hflags_isa_mode_from_pc(regs); 3071 /* I am not sure this is right, but it seems to work 3072 * maybe a problem with nested signals ? */ 3073 regs->CP0_EPC = 0; 3074 return -TARGET_QEMU_ESIGRETURN; 3075 3076 badframe: 3077 force_sig(TARGET_SIGSEGV/*, current*/); 3078 return 0; 3079 } 3080 # endif /* O32 */ 3081 3082 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3083 target_siginfo_t *info, 3084 target_sigset_t *set, CPUMIPSState *env) 3085 { 3086 struct target_rt_sigframe *frame; 3087 abi_ulong frame_addr; 3088 int i; 3089 3090 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3091 trace_user_setup_rt_frame(env, frame_addr); 3092 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3093 goto give_sigsegv; 3094 } 3095 3096 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3097 3098 tswap_siginfo(&frame->rs_info, info); 3099 3100 __put_user(0, &frame->rs_uc.tuc_flags); 3101 __put_user(0, &frame->rs_uc.tuc_link); 3102 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3103 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3104 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3105 &frame->rs_uc.tuc_stack.ss_flags); 3106 3107 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3108 3109 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3110 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3111 } 3112 3113 /* 3114 * Arguments to signal handler: 3115 * 3116 * a0 = signal number 3117 * a1 = pointer to siginfo_t 3118 * a2 = pointer to struct ucontext 3119 * 3120 * $25 and PC point to the signal handler, $29 points to the 3121 * struct sigframe. 3122 */ 3123 env->active_tc.gpr[ 4] = sig; 3124 env->active_tc.gpr[ 5] = frame_addr 3125 + offsetof(struct target_rt_sigframe, rs_info); 3126 env->active_tc.gpr[ 6] = frame_addr 3127 + offsetof(struct target_rt_sigframe, rs_uc); 3128 env->active_tc.gpr[29] = frame_addr; 3129 env->active_tc.gpr[31] = frame_addr 3130 + offsetof(struct target_rt_sigframe, rs_code); 3131 /* The original kernel code sets CP0_EPC to the handler 3132 * since it returns to userland using eret 3133 * we cannot do this here, and we must set PC directly */ 3134 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3135 mips_set_hflags_isa_mode_from_pc(env); 3136 unlock_user_struct(frame, frame_addr, 1); 3137 return; 3138 3139 give_sigsegv: 3140 unlock_user_struct(frame, frame_addr, 1); 3141 force_sig(TARGET_SIGSEGV/*, current*/); 3142 } 3143 3144 long do_rt_sigreturn(CPUMIPSState *env) 3145 { 3146 struct target_rt_sigframe *frame; 3147 abi_ulong frame_addr; 3148 sigset_t blocked; 3149 3150 frame_addr = env->active_tc.gpr[29]; 3151 trace_user_do_rt_sigreturn(env, frame_addr); 3152 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3153 goto badframe; 3154 } 3155 3156 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3157 set_sigmask(&blocked); 3158 3159 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3160 3161 if (do_sigaltstack(frame_addr + 3162 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3163 0, get_sp_from_cpustate(env)) == -EFAULT) 3164 goto badframe; 3165 3166 env->active_tc.PC = env->CP0_EPC; 3167 mips_set_hflags_isa_mode_from_pc(env); 3168 /* I am not sure this is right, but it seems to work 3169 * maybe a problem with nested signals ? */ 3170 env->CP0_EPC = 0; 3171 return -TARGET_QEMU_ESIGRETURN; 3172 3173 badframe: 3174 force_sig(TARGET_SIGSEGV/*, current*/); 3175 return 0; 3176 } 3177 3178 #elif defined(TARGET_SH4) 3179 3180 /* 3181 * code and data structures from linux kernel: 3182 * include/asm-sh/sigcontext.h 3183 * arch/sh/kernel/signal.c 3184 */ 3185 3186 struct target_sigcontext { 3187 target_ulong oldmask; 3188 3189 /* CPU registers */ 3190 target_ulong sc_gregs[16]; 3191 target_ulong sc_pc; 3192 target_ulong sc_pr; 3193 target_ulong sc_sr; 3194 target_ulong sc_gbr; 3195 target_ulong sc_mach; 3196 target_ulong sc_macl; 3197 3198 /* FPU registers */ 3199 target_ulong sc_fpregs[16]; 3200 target_ulong sc_xfpregs[16]; 3201 unsigned int sc_fpscr; 3202 unsigned int sc_fpul; 3203 unsigned int sc_ownedfp; 3204 }; 3205 3206 struct target_sigframe 3207 { 3208 struct target_sigcontext sc; 3209 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3210 uint16_t retcode[3]; 3211 }; 3212 3213 3214 struct target_ucontext { 3215 target_ulong tuc_flags; 3216 struct target_ucontext *tuc_link; 3217 target_stack_t tuc_stack; 3218 struct target_sigcontext tuc_mcontext; 3219 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3220 }; 3221 3222 struct target_rt_sigframe 3223 { 3224 struct target_siginfo info; 3225 struct target_ucontext uc; 3226 uint16_t retcode[3]; 3227 }; 3228 3229 3230 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3231 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3232 3233 static abi_ulong get_sigframe(struct target_sigaction *ka, 3234 unsigned long sp, size_t frame_size) 3235 { 3236 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3237 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3238 } 3239 3240 return (sp - frame_size) & -8ul; 3241 } 3242 3243 static void setup_sigcontext(struct target_sigcontext *sc, 3244 CPUSH4State *regs, unsigned long mask) 3245 { 3246 int i; 3247 3248 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3249 COPY(gregs[0]); COPY(gregs[1]); 3250 COPY(gregs[2]); COPY(gregs[3]); 3251 COPY(gregs[4]); COPY(gregs[5]); 3252 COPY(gregs[6]); COPY(gregs[7]); 3253 COPY(gregs[8]); COPY(gregs[9]); 3254 COPY(gregs[10]); COPY(gregs[11]); 3255 COPY(gregs[12]); COPY(gregs[13]); 3256 COPY(gregs[14]); COPY(gregs[15]); 3257 COPY(gbr); COPY(mach); 3258 COPY(macl); COPY(pr); 3259 COPY(sr); COPY(pc); 3260 #undef COPY 3261 3262 for (i=0; i<16; i++) { 3263 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3264 } 3265 __put_user(regs->fpscr, &sc->sc_fpscr); 3266 __put_user(regs->fpul, &sc->sc_fpul); 3267 3268 /* non-iBCS2 extensions.. */ 3269 __put_user(mask, &sc->oldmask); 3270 } 3271 3272 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3273 { 3274 int i; 3275 3276 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3277 COPY(gregs[0]); COPY(gregs[1]); 3278 COPY(gregs[2]); COPY(gregs[3]); 3279 COPY(gregs[4]); COPY(gregs[5]); 3280 COPY(gregs[6]); COPY(gregs[7]); 3281 COPY(gregs[8]); COPY(gregs[9]); 3282 COPY(gregs[10]); COPY(gregs[11]); 3283 COPY(gregs[12]); COPY(gregs[13]); 3284 COPY(gregs[14]); COPY(gregs[15]); 3285 COPY(gbr); COPY(mach); 3286 COPY(macl); COPY(pr); 3287 COPY(sr); COPY(pc); 3288 #undef COPY 3289 3290 for (i=0; i<16; i++) { 3291 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3292 } 3293 __get_user(regs->fpscr, &sc->sc_fpscr); 3294 __get_user(regs->fpul, &sc->sc_fpul); 3295 3296 regs->tra = -1; /* disable syscall checks */ 3297 } 3298 3299 static void setup_frame(int sig, struct target_sigaction *ka, 3300 target_sigset_t *set, CPUSH4State *regs) 3301 { 3302 struct target_sigframe *frame; 3303 abi_ulong frame_addr; 3304 int i; 3305 3306 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3307 trace_user_setup_frame(regs, frame_addr); 3308 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3309 goto give_sigsegv; 3310 } 3311 3312 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3313 3314 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3315 __put_user(set->sig[i + 1], &frame->extramask[i]); 3316 } 3317 3318 /* Set up to return from userspace. If provided, use a stub 3319 already in userspace. */ 3320 if (ka->sa_flags & TARGET_SA_RESTORER) { 3321 regs->pr = (unsigned long) ka->sa_restorer; 3322 } else { 3323 /* Generate return code (system call to sigreturn) */ 3324 abi_ulong retcode_addr = frame_addr + 3325 offsetof(struct target_sigframe, retcode); 3326 __put_user(MOVW(2), &frame->retcode[0]); 3327 __put_user(TRAP_NOARG, &frame->retcode[1]); 3328 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3329 regs->pr = (unsigned long) retcode_addr; 3330 } 3331 3332 /* Set up registers for signal handler */ 3333 regs->gregs[15] = frame_addr; 3334 regs->gregs[4] = sig; /* Arg for signal handler */ 3335 regs->gregs[5] = 0; 3336 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3337 regs->pc = (unsigned long) ka->_sa_handler; 3338 3339 unlock_user_struct(frame, frame_addr, 1); 3340 return; 3341 3342 give_sigsegv: 3343 unlock_user_struct(frame, frame_addr, 1); 3344 force_sig(TARGET_SIGSEGV); 3345 } 3346 3347 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3348 target_siginfo_t *info, 3349 target_sigset_t *set, CPUSH4State *regs) 3350 { 3351 struct target_rt_sigframe *frame; 3352 abi_ulong frame_addr; 3353 int i; 3354 3355 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3356 trace_user_setup_rt_frame(regs, frame_addr); 3357 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3358 goto give_sigsegv; 3359 } 3360 3361 tswap_siginfo(&frame->info, info); 3362 3363 /* Create the ucontext. */ 3364 __put_user(0, &frame->uc.tuc_flags); 3365 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3366 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3367 &frame->uc.tuc_stack.ss_sp); 3368 __put_user(sas_ss_flags(regs->gregs[15]), 3369 &frame->uc.tuc_stack.ss_flags); 3370 __put_user(target_sigaltstack_used.ss_size, 3371 &frame->uc.tuc_stack.ss_size); 3372 setup_sigcontext(&frame->uc.tuc_mcontext, 3373 regs, set->sig[0]); 3374 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3375 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3376 } 3377 3378 /* Set up to return from userspace. If provided, use a stub 3379 already in userspace. */ 3380 if (ka->sa_flags & TARGET_SA_RESTORER) { 3381 regs->pr = (unsigned long) ka->sa_restorer; 3382 } else { 3383 /* Generate return code (system call to sigreturn) */ 3384 abi_ulong retcode_addr = frame_addr + 3385 offsetof(struct target_rt_sigframe, retcode); 3386 __put_user(MOVW(2), &frame->retcode[0]); 3387 __put_user(TRAP_NOARG, &frame->retcode[1]); 3388 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3389 regs->pr = (unsigned long) retcode_addr; 3390 } 3391 3392 /* Set up registers for signal handler */ 3393 regs->gregs[15] = frame_addr; 3394 regs->gregs[4] = sig; /* Arg for signal handler */ 3395 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3396 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3397 regs->pc = (unsigned long) ka->_sa_handler; 3398 3399 unlock_user_struct(frame, frame_addr, 1); 3400 return; 3401 3402 give_sigsegv: 3403 unlock_user_struct(frame, frame_addr, 1); 3404 force_sig(TARGET_SIGSEGV); 3405 } 3406 3407 long do_sigreturn(CPUSH4State *regs) 3408 { 3409 struct target_sigframe *frame; 3410 abi_ulong frame_addr; 3411 sigset_t blocked; 3412 target_sigset_t target_set; 3413 int i; 3414 int err = 0; 3415 3416 frame_addr = regs->gregs[15]; 3417 trace_user_do_sigreturn(regs, frame_addr); 3418 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3419 goto badframe; 3420 } 3421 3422 __get_user(target_set.sig[0], &frame->sc.oldmask); 3423 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3424 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3425 } 3426 3427 if (err) 3428 goto badframe; 3429 3430 target_to_host_sigset_internal(&blocked, &target_set); 3431 set_sigmask(&blocked); 3432 3433 restore_sigcontext(regs, &frame->sc); 3434 3435 unlock_user_struct(frame, frame_addr, 0); 3436 return -TARGET_QEMU_ESIGRETURN; 3437 3438 badframe: 3439 unlock_user_struct(frame, frame_addr, 0); 3440 force_sig(TARGET_SIGSEGV); 3441 return 0; 3442 } 3443 3444 long do_rt_sigreturn(CPUSH4State *regs) 3445 { 3446 struct target_rt_sigframe *frame; 3447 abi_ulong frame_addr; 3448 sigset_t blocked; 3449 3450 frame_addr = regs->gregs[15]; 3451 trace_user_do_rt_sigreturn(regs, frame_addr); 3452 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3453 goto badframe; 3454 } 3455 3456 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3457 set_sigmask(&blocked); 3458 3459 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3460 3461 if (do_sigaltstack(frame_addr + 3462 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3463 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3464 goto badframe; 3465 } 3466 3467 unlock_user_struct(frame, frame_addr, 0); 3468 return -TARGET_QEMU_ESIGRETURN; 3469 3470 badframe: 3471 unlock_user_struct(frame, frame_addr, 0); 3472 force_sig(TARGET_SIGSEGV); 3473 return 0; 3474 } 3475 #elif defined(TARGET_MICROBLAZE) 3476 3477 struct target_sigcontext { 3478 struct target_pt_regs regs; /* needs to be first */ 3479 uint32_t oldmask; 3480 }; 3481 3482 struct target_stack_t { 3483 abi_ulong ss_sp; 3484 int ss_flags; 3485 unsigned int ss_size; 3486 }; 3487 3488 struct target_ucontext { 3489 abi_ulong tuc_flags; 3490 abi_ulong tuc_link; 3491 struct target_stack_t tuc_stack; 3492 struct target_sigcontext tuc_mcontext; 3493 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3494 }; 3495 3496 /* Signal frames. */ 3497 struct target_signal_frame { 3498 struct target_ucontext uc; 3499 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3500 uint32_t tramp[2]; 3501 }; 3502 3503 struct rt_signal_frame { 3504 siginfo_t info; 3505 struct ucontext uc; 3506 uint32_t tramp[2]; 3507 }; 3508 3509 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3510 { 3511 __put_user(env->regs[0], &sc->regs.r0); 3512 __put_user(env->regs[1], &sc->regs.r1); 3513 __put_user(env->regs[2], &sc->regs.r2); 3514 __put_user(env->regs[3], &sc->regs.r3); 3515 __put_user(env->regs[4], &sc->regs.r4); 3516 __put_user(env->regs[5], &sc->regs.r5); 3517 __put_user(env->regs[6], &sc->regs.r6); 3518 __put_user(env->regs[7], &sc->regs.r7); 3519 __put_user(env->regs[8], &sc->regs.r8); 3520 __put_user(env->regs[9], &sc->regs.r9); 3521 __put_user(env->regs[10], &sc->regs.r10); 3522 __put_user(env->regs[11], &sc->regs.r11); 3523 __put_user(env->regs[12], &sc->regs.r12); 3524 __put_user(env->regs[13], &sc->regs.r13); 3525 __put_user(env->regs[14], &sc->regs.r14); 3526 __put_user(env->regs[15], &sc->regs.r15); 3527 __put_user(env->regs[16], &sc->regs.r16); 3528 __put_user(env->regs[17], &sc->regs.r17); 3529 __put_user(env->regs[18], &sc->regs.r18); 3530 __put_user(env->regs[19], &sc->regs.r19); 3531 __put_user(env->regs[20], &sc->regs.r20); 3532 __put_user(env->regs[21], &sc->regs.r21); 3533 __put_user(env->regs[22], &sc->regs.r22); 3534 __put_user(env->regs[23], &sc->regs.r23); 3535 __put_user(env->regs[24], &sc->regs.r24); 3536 __put_user(env->regs[25], &sc->regs.r25); 3537 __put_user(env->regs[26], &sc->regs.r26); 3538 __put_user(env->regs[27], &sc->regs.r27); 3539 __put_user(env->regs[28], &sc->regs.r28); 3540 __put_user(env->regs[29], &sc->regs.r29); 3541 __put_user(env->regs[30], &sc->regs.r30); 3542 __put_user(env->regs[31], &sc->regs.r31); 3543 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3544 } 3545 3546 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3547 { 3548 __get_user(env->regs[0], &sc->regs.r0); 3549 __get_user(env->regs[1], &sc->regs.r1); 3550 __get_user(env->regs[2], &sc->regs.r2); 3551 __get_user(env->regs[3], &sc->regs.r3); 3552 __get_user(env->regs[4], &sc->regs.r4); 3553 __get_user(env->regs[5], &sc->regs.r5); 3554 __get_user(env->regs[6], &sc->regs.r6); 3555 __get_user(env->regs[7], &sc->regs.r7); 3556 __get_user(env->regs[8], &sc->regs.r8); 3557 __get_user(env->regs[9], &sc->regs.r9); 3558 __get_user(env->regs[10], &sc->regs.r10); 3559 __get_user(env->regs[11], &sc->regs.r11); 3560 __get_user(env->regs[12], &sc->regs.r12); 3561 __get_user(env->regs[13], &sc->regs.r13); 3562 __get_user(env->regs[14], &sc->regs.r14); 3563 __get_user(env->regs[15], &sc->regs.r15); 3564 __get_user(env->regs[16], &sc->regs.r16); 3565 __get_user(env->regs[17], &sc->regs.r17); 3566 __get_user(env->regs[18], &sc->regs.r18); 3567 __get_user(env->regs[19], &sc->regs.r19); 3568 __get_user(env->regs[20], &sc->regs.r20); 3569 __get_user(env->regs[21], &sc->regs.r21); 3570 __get_user(env->regs[22], &sc->regs.r22); 3571 __get_user(env->regs[23], &sc->regs.r23); 3572 __get_user(env->regs[24], &sc->regs.r24); 3573 __get_user(env->regs[25], &sc->regs.r25); 3574 __get_user(env->regs[26], &sc->regs.r26); 3575 __get_user(env->regs[27], &sc->regs.r27); 3576 __get_user(env->regs[28], &sc->regs.r28); 3577 __get_user(env->regs[29], &sc->regs.r29); 3578 __get_user(env->regs[30], &sc->regs.r30); 3579 __get_user(env->regs[31], &sc->regs.r31); 3580 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3581 } 3582 3583 static abi_ulong get_sigframe(struct target_sigaction *ka, 3584 CPUMBState *env, int frame_size) 3585 { 3586 abi_ulong sp = env->regs[1]; 3587 3588 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3589 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3590 } 3591 3592 return ((sp - frame_size) & -8UL); 3593 } 3594 3595 static void setup_frame(int sig, struct target_sigaction *ka, 3596 target_sigset_t *set, CPUMBState *env) 3597 { 3598 struct target_signal_frame *frame; 3599 abi_ulong frame_addr; 3600 int i; 3601 3602 frame_addr = get_sigframe(ka, env, sizeof *frame); 3603 trace_user_setup_frame(env, frame_addr); 3604 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3605 goto badframe; 3606 3607 /* Save the mask. */ 3608 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3609 3610 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3611 __put_user(set->sig[i], &frame->extramask[i - 1]); 3612 } 3613 3614 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3615 3616 /* Set up to return from userspace. If provided, use a stub 3617 already in userspace. */ 3618 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3619 if (ka->sa_flags & TARGET_SA_RESTORER) { 3620 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3621 } else { 3622 uint32_t t; 3623 /* Note, these encodings are _big endian_! */ 3624 /* addi r12, r0, __NR_sigreturn */ 3625 t = 0x31800000UL | TARGET_NR_sigreturn; 3626 __put_user(t, frame->tramp + 0); 3627 /* brki r14, 0x8 */ 3628 t = 0xb9cc0008UL; 3629 __put_user(t, frame->tramp + 1); 3630 3631 /* Return from sighandler will jump to the tramp. 3632 Negative 8 offset because return is rtsd r15, 8 */ 3633 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3634 - 8; 3635 } 3636 3637 /* Set up registers for signal handler */ 3638 env->regs[1] = frame_addr; 3639 /* Signal handler args: */ 3640 env->regs[5] = sig; /* Arg 0: signum */ 3641 env->regs[6] = 0; 3642 /* arg 1: sigcontext */ 3643 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3644 3645 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3646 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3647 3648 unlock_user_struct(frame, frame_addr, 1); 3649 return; 3650 badframe: 3651 force_sig(TARGET_SIGSEGV); 3652 } 3653 3654 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3655 target_siginfo_t *info, 3656 target_sigset_t *set, CPUMBState *env) 3657 { 3658 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3659 } 3660 3661 long do_sigreturn(CPUMBState *env) 3662 { 3663 struct target_signal_frame *frame; 3664 abi_ulong frame_addr; 3665 target_sigset_t target_set; 3666 sigset_t set; 3667 int i; 3668 3669 frame_addr = env->regs[R_SP]; 3670 trace_user_do_sigreturn(env, frame_addr); 3671 /* Make sure the guest isn't playing games. */ 3672 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3673 goto badframe; 3674 3675 /* Restore blocked signals */ 3676 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3677 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3678 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3679 } 3680 target_to_host_sigset_internal(&set, &target_set); 3681 set_sigmask(&set); 3682 3683 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3684 /* We got here through a sigreturn syscall, our path back is via an 3685 rtb insn so setup r14 for that. */ 3686 env->regs[14] = env->sregs[SR_PC]; 3687 3688 unlock_user_struct(frame, frame_addr, 0); 3689 return -TARGET_QEMU_ESIGRETURN; 3690 badframe: 3691 force_sig(TARGET_SIGSEGV); 3692 } 3693 3694 long do_rt_sigreturn(CPUMBState *env) 3695 { 3696 trace_user_do_rt_sigreturn(env, 0); 3697 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3698 return -TARGET_ENOSYS; 3699 } 3700 3701 #elif defined(TARGET_CRIS) 3702 3703 struct target_sigcontext { 3704 struct target_pt_regs regs; /* needs to be first */ 3705 uint32_t oldmask; 3706 uint32_t usp; /* usp before stacking this gunk on it */ 3707 }; 3708 3709 /* Signal frames. */ 3710 struct target_signal_frame { 3711 struct target_sigcontext sc; 3712 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3713 uint16_t retcode[4]; /* Trampoline code. */ 3714 }; 3715 3716 struct rt_signal_frame { 3717 siginfo_t *pinfo; 3718 void *puc; 3719 siginfo_t info; 3720 struct ucontext uc; 3721 uint16_t retcode[4]; /* Trampoline code. */ 3722 }; 3723 3724 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3725 { 3726 __put_user(env->regs[0], &sc->regs.r0); 3727 __put_user(env->regs[1], &sc->regs.r1); 3728 __put_user(env->regs[2], &sc->regs.r2); 3729 __put_user(env->regs[3], &sc->regs.r3); 3730 __put_user(env->regs[4], &sc->regs.r4); 3731 __put_user(env->regs[5], &sc->regs.r5); 3732 __put_user(env->regs[6], &sc->regs.r6); 3733 __put_user(env->regs[7], &sc->regs.r7); 3734 __put_user(env->regs[8], &sc->regs.r8); 3735 __put_user(env->regs[9], &sc->regs.r9); 3736 __put_user(env->regs[10], &sc->regs.r10); 3737 __put_user(env->regs[11], &sc->regs.r11); 3738 __put_user(env->regs[12], &sc->regs.r12); 3739 __put_user(env->regs[13], &sc->regs.r13); 3740 __put_user(env->regs[14], &sc->usp); 3741 __put_user(env->regs[15], &sc->regs.acr); 3742 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3743 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3744 __put_user(env->pc, &sc->regs.erp); 3745 } 3746 3747 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3748 { 3749 __get_user(env->regs[0], &sc->regs.r0); 3750 __get_user(env->regs[1], &sc->regs.r1); 3751 __get_user(env->regs[2], &sc->regs.r2); 3752 __get_user(env->regs[3], &sc->regs.r3); 3753 __get_user(env->regs[4], &sc->regs.r4); 3754 __get_user(env->regs[5], &sc->regs.r5); 3755 __get_user(env->regs[6], &sc->regs.r6); 3756 __get_user(env->regs[7], &sc->regs.r7); 3757 __get_user(env->regs[8], &sc->regs.r8); 3758 __get_user(env->regs[9], &sc->regs.r9); 3759 __get_user(env->regs[10], &sc->regs.r10); 3760 __get_user(env->regs[11], &sc->regs.r11); 3761 __get_user(env->regs[12], &sc->regs.r12); 3762 __get_user(env->regs[13], &sc->regs.r13); 3763 __get_user(env->regs[14], &sc->usp); 3764 __get_user(env->regs[15], &sc->regs.acr); 3765 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3766 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3767 __get_user(env->pc, &sc->regs.erp); 3768 } 3769 3770 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3771 { 3772 abi_ulong sp; 3773 /* Align the stack downwards to 4. */ 3774 sp = (env->regs[R_SP] & ~3); 3775 return sp - framesize; 3776 } 3777 3778 static void setup_frame(int sig, struct target_sigaction *ka, 3779 target_sigset_t *set, CPUCRISState *env) 3780 { 3781 struct target_signal_frame *frame; 3782 abi_ulong frame_addr; 3783 int i; 3784 3785 frame_addr = get_sigframe(env, sizeof *frame); 3786 trace_user_setup_frame(env, frame_addr); 3787 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3788 goto badframe; 3789 3790 /* 3791 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3792 * use this trampoline anymore but it sets it up for GDB. 3793 * In QEMU, using the trampoline simplifies things a bit so we use it. 3794 * 3795 * This is movu.w __NR_sigreturn, r9; break 13; 3796 */ 3797 __put_user(0x9c5f, frame->retcode+0); 3798 __put_user(TARGET_NR_sigreturn, 3799 frame->retcode + 1); 3800 __put_user(0xe93d, frame->retcode + 2); 3801 3802 /* Save the mask. */ 3803 __put_user(set->sig[0], &frame->sc.oldmask); 3804 3805 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3806 __put_user(set->sig[i], &frame->extramask[i - 1]); 3807 } 3808 3809 setup_sigcontext(&frame->sc, env); 3810 3811 /* Move the stack and setup the arguments for the handler. */ 3812 env->regs[R_SP] = frame_addr; 3813 env->regs[10] = sig; 3814 env->pc = (unsigned long) ka->_sa_handler; 3815 /* Link SRP so the guest returns through the trampoline. */ 3816 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3817 3818 unlock_user_struct(frame, frame_addr, 1); 3819 return; 3820 badframe: 3821 force_sig(TARGET_SIGSEGV); 3822 } 3823 3824 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3825 target_siginfo_t *info, 3826 target_sigset_t *set, CPUCRISState *env) 3827 { 3828 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3829 } 3830 3831 long do_sigreturn(CPUCRISState *env) 3832 { 3833 struct target_signal_frame *frame; 3834 abi_ulong frame_addr; 3835 target_sigset_t target_set; 3836 sigset_t set; 3837 int i; 3838 3839 frame_addr = env->regs[R_SP]; 3840 trace_user_do_sigreturn(env, frame_addr); 3841 /* Make sure the guest isn't playing games. */ 3842 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3843 goto badframe; 3844 } 3845 3846 /* Restore blocked signals */ 3847 __get_user(target_set.sig[0], &frame->sc.oldmask); 3848 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3849 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3850 } 3851 target_to_host_sigset_internal(&set, &target_set); 3852 set_sigmask(&set); 3853 3854 restore_sigcontext(&frame->sc, env); 3855 unlock_user_struct(frame, frame_addr, 0); 3856 return -TARGET_QEMU_ESIGRETURN; 3857 badframe: 3858 force_sig(TARGET_SIGSEGV); 3859 } 3860 3861 long do_rt_sigreturn(CPUCRISState *env) 3862 { 3863 trace_user_do_rt_sigreturn(env, 0); 3864 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3865 return -TARGET_ENOSYS; 3866 } 3867 3868 #elif defined(TARGET_OPENRISC) 3869 3870 struct target_sigcontext { 3871 struct target_pt_regs regs; 3872 abi_ulong oldmask; 3873 abi_ulong usp; 3874 }; 3875 3876 struct target_ucontext { 3877 abi_ulong tuc_flags; 3878 abi_ulong tuc_link; 3879 target_stack_t tuc_stack; 3880 struct target_sigcontext tuc_mcontext; 3881 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3882 }; 3883 3884 struct target_rt_sigframe { 3885 abi_ulong pinfo; 3886 uint64_t puc; 3887 struct target_siginfo info; 3888 struct target_sigcontext sc; 3889 struct target_ucontext uc; 3890 unsigned char retcode[16]; /* trampoline code */ 3891 }; 3892 3893 /* This is the asm-generic/ucontext.h version */ 3894 #if 0 3895 static int restore_sigcontext(CPUOpenRISCState *regs, 3896 struct target_sigcontext *sc) 3897 { 3898 unsigned int err = 0; 3899 unsigned long old_usp; 3900 3901 /* Alwys make any pending restarted system call return -EINTR */ 3902 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3903 3904 /* restore the regs from &sc->regs (same as sc, since regs is first) 3905 * (sc is already checked for VERIFY_READ since the sigframe was 3906 * checked in sys_sigreturn previously) 3907 */ 3908 3909 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3910 goto badframe; 3911 } 3912 3913 /* make sure the U-flag is set so user-mode cannot fool us */ 3914 3915 regs->sr &= ~SR_SM; 3916 3917 /* restore the old USP as it was before we stacked the sc etc. 3918 * (we cannot just pop the sigcontext since we aligned the sp and 3919 * stuff after pushing it) 3920 */ 3921 3922 __get_user(old_usp, &sc->usp); 3923 phx_signal("old_usp 0x%lx", old_usp); 3924 3925 __PHX__ REALLY /* ??? */ 3926 wrusp(old_usp); 3927 regs->gpr[1] = old_usp; 3928 3929 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3930 * after this completes, but we don't use that mechanism. maybe we can 3931 * use it now ? 3932 */ 3933 3934 return err; 3935 3936 badframe: 3937 return 1; 3938 } 3939 #endif 3940 3941 /* Set up a signal frame. */ 3942 3943 static void setup_sigcontext(struct target_sigcontext *sc, 3944 CPUOpenRISCState *regs, 3945 unsigned long mask) 3946 { 3947 unsigned long usp = regs->gpr[1]; 3948 3949 /* copy the regs. they are first in sc so we can use sc directly */ 3950 3951 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3952 3953 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3954 the signal handler. The frametype will be restored to its previous 3955 value in restore_sigcontext. */ 3956 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3957 3958 /* then some other stuff */ 3959 __put_user(mask, &sc->oldmask); 3960 __put_user(usp, &sc->usp); 3961 } 3962 3963 static inline unsigned long align_sigframe(unsigned long sp) 3964 { 3965 return sp & ~3UL; 3966 } 3967 3968 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 3969 CPUOpenRISCState *regs, 3970 size_t frame_size) 3971 { 3972 unsigned long sp = regs->gpr[1]; 3973 int onsigstack = on_sig_stack(sp); 3974 3975 /* redzone */ 3976 /* This is the X/Open sanctioned signal stack switching. */ 3977 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 3978 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3979 } 3980 3981 sp = align_sigframe(sp - frame_size); 3982 3983 /* 3984 * If we are on the alternate signal stack and would overflow it, don't. 3985 * Return an always-bogus address instead so we will die with SIGSEGV. 3986 */ 3987 3988 if (onsigstack && !likely(on_sig_stack(sp))) { 3989 return -1L; 3990 } 3991 3992 return sp; 3993 } 3994 3995 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3996 target_siginfo_t *info, 3997 target_sigset_t *set, CPUOpenRISCState *env) 3998 { 3999 int err = 0; 4000 abi_ulong frame_addr; 4001 unsigned long return_ip; 4002 struct target_rt_sigframe *frame; 4003 abi_ulong info_addr, uc_addr; 4004 4005 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4006 trace_user_setup_rt_frame(env, frame_addr); 4007 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4008 goto give_sigsegv; 4009 } 4010 4011 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4012 __put_user(info_addr, &frame->pinfo); 4013 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4014 __put_user(uc_addr, &frame->puc); 4015 4016 if (ka->sa_flags & SA_SIGINFO) { 4017 tswap_siginfo(&frame->info, info); 4018 } 4019 4020 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4021 __put_user(0, &frame->uc.tuc_flags); 4022 __put_user(0, &frame->uc.tuc_link); 4023 __put_user(target_sigaltstack_used.ss_sp, 4024 &frame->uc.tuc_stack.ss_sp); 4025 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4026 __put_user(target_sigaltstack_used.ss_size, 4027 &frame->uc.tuc_stack.ss_size); 4028 setup_sigcontext(&frame->sc, env, set->sig[0]); 4029 4030 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4031 4032 /* trampoline - the desired return ip is the retcode itself */ 4033 return_ip = (unsigned long)&frame->retcode; 4034 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4035 __put_user(0xa960, (short *)(frame->retcode + 0)); 4036 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4037 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4038 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4039 4040 if (err) { 4041 goto give_sigsegv; 4042 } 4043 4044 /* TODO what is the current->exec_domain stuff and invmap ? */ 4045 4046 /* Set up registers for signal handler */ 4047 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4048 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4049 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4050 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4051 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4052 4053 /* actually move the usp to reflect the stacked frame */ 4054 env->gpr[1] = (unsigned long)frame; 4055 4056 return; 4057 4058 give_sigsegv: 4059 unlock_user_struct(frame, frame_addr, 1); 4060 if (sig == TARGET_SIGSEGV) { 4061 ka->_sa_handler = TARGET_SIG_DFL; 4062 } 4063 force_sig(TARGET_SIGSEGV); 4064 } 4065 4066 long do_sigreturn(CPUOpenRISCState *env) 4067 { 4068 trace_user_do_sigreturn(env, 0); 4069 fprintf(stderr, "do_sigreturn: not implemented\n"); 4070 return -TARGET_ENOSYS; 4071 } 4072 4073 long do_rt_sigreturn(CPUOpenRISCState *env) 4074 { 4075 trace_user_do_rt_sigreturn(env, 0); 4076 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4077 return -TARGET_ENOSYS; 4078 } 4079 /* TARGET_OPENRISC */ 4080 4081 #elif defined(TARGET_S390X) 4082 4083 #define __NUM_GPRS 16 4084 #define __NUM_FPRS 16 4085 #define __NUM_ACRS 16 4086 4087 #define S390_SYSCALL_SIZE 2 4088 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4089 4090 #define _SIGCONTEXT_NSIG 64 4091 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4092 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4093 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4094 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4095 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4096 4097 typedef struct { 4098 target_psw_t psw; 4099 target_ulong gprs[__NUM_GPRS]; 4100 unsigned int acrs[__NUM_ACRS]; 4101 } target_s390_regs_common; 4102 4103 typedef struct { 4104 unsigned int fpc; 4105 double fprs[__NUM_FPRS]; 4106 } target_s390_fp_regs; 4107 4108 typedef struct { 4109 target_s390_regs_common regs; 4110 target_s390_fp_regs fpregs; 4111 } target_sigregs; 4112 4113 struct target_sigcontext { 4114 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4115 target_sigregs *sregs; 4116 }; 4117 4118 typedef struct { 4119 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4120 struct target_sigcontext sc; 4121 target_sigregs sregs; 4122 int signo; 4123 uint8_t retcode[S390_SYSCALL_SIZE]; 4124 } sigframe; 4125 4126 struct target_ucontext { 4127 target_ulong tuc_flags; 4128 struct target_ucontext *tuc_link; 4129 target_stack_t tuc_stack; 4130 target_sigregs tuc_mcontext; 4131 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4132 }; 4133 4134 typedef struct { 4135 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4136 uint8_t retcode[S390_SYSCALL_SIZE]; 4137 struct target_siginfo info; 4138 struct target_ucontext uc; 4139 } rt_sigframe; 4140 4141 static inline abi_ulong 4142 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4143 { 4144 abi_ulong sp; 4145 4146 /* Default to using normal stack */ 4147 sp = env->regs[15]; 4148 4149 /* This is the X/Open sanctioned signal stack switching. */ 4150 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4151 if (!sas_ss_flags(sp)) { 4152 sp = target_sigaltstack_used.ss_sp + 4153 target_sigaltstack_used.ss_size; 4154 } 4155 } 4156 4157 /* This is the legacy signal stack switching. */ 4158 else if (/* FIXME !user_mode(regs) */ 0 && 4159 !(ka->sa_flags & TARGET_SA_RESTORER) && 4160 ka->sa_restorer) { 4161 sp = (abi_ulong) ka->sa_restorer; 4162 } 4163 4164 return (sp - frame_size) & -8ul; 4165 } 4166 4167 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4168 { 4169 int i; 4170 //save_access_regs(current->thread.acrs); FIXME 4171 4172 /* Copy a 'clean' PSW mask to the user to avoid leaking 4173 information about whether PER is currently on. */ 4174 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4175 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4176 for (i = 0; i < 16; i++) { 4177 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4178 } 4179 for (i = 0; i < 16; i++) { 4180 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4181 } 4182 /* 4183 * We have to store the fp registers to current->thread.fp_regs 4184 * to merge them with the emulated registers. 4185 */ 4186 //save_fp_regs(¤t->thread.fp_regs); FIXME 4187 for (i = 0; i < 16; i++) { 4188 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4189 } 4190 } 4191 4192 static void setup_frame(int sig, struct target_sigaction *ka, 4193 target_sigset_t *set, CPUS390XState *env) 4194 { 4195 sigframe *frame; 4196 abi_ulong frame_addr; 4197 4198 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4199 trace_user_setup_frame(env, frame_addr); 4200 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4201 goto give_sigsegv; 4202 } 4203 4204 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4205 4206 save_sigregs(env, &frame->sregs); 4207 4208 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4209 (abi_ulong *)&frame->sc.sregs); 4210 4211 /* Set up to return from userspace. If provided, use a stub 4212 already in userspace. */ 4213 if (ka->sa_flags & TARGET_SA_RESTORER) { 4214 env->regs[14] = (unsigned long) 4215 ka->sa_restorer | PSW_ADDR_AMODE; 4216 } else { 4217 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4218 | PSW_ADDR_AMODE; 4219 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4220 (uint16_t *)(frame->retcode)); 4221 } 4222 4223 /* Set up backchain. */ 4224 __put_user(env->regs[15], (abi_ulong *) frame); 4225 4226 /* Set up registers for signal handler */ 4227 env->regs[15] = frame_addr; 4228 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4229 4230 env->regs[2] = sig; //map_signal(sig); 4231 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4232 4233 /* We forgot to include these in the sigcontext. 4234 To avoid breaking binary compatibility, they are passed as args. */ 4235 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4236 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4237 4238 /* Place signal number on stack to allow backtrace from handler. */ 4239 __put_user(env->regs[2], (int *) &frame->signo); 4240 unlock_user_struct(frame, frame_addr, 1); 4241 return; 4242 4243 give_sigsegv: 4244 force_sig(TARGET_SIGSEGV); 4245 } 4246 4247 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4248 target_siginfo_t *info, 4249 target_sigset_t *set, CPUS390XState *env) 4250 { 4251 int i; 4252 rt_sigframe *frame; 4253 abi_ulong frame_addr; 4254 4255 frame_addr = get_sigframe(ka, env, sizeof *frame); 4256 trace_user_setup_rt_frame(env, frame_addr); 4257 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4258 goto give_sigsegv; 4259 } 4260 4261 tswap_siginfo(&frame->info, info); 4262 4263 /* Create the ucontext. */ 4264 __put_user(0, &frame->uc.tuc_flags); 4265 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4266 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4267 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4268 &frame->uc.tuc_stack.ss_flags); 4269 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4270 save_sigregs(env, &frame->uc.tuc_mcontext); 4271 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4272 __put_user((abi_ulong)set->sig[i], 4273 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4274 } 4275 4276 /* Set up to return from userspace. If provided, use a stub 4277 already in userspace. */ 4278 if (ka->sa_flags & TARGET_SA_RESTORER) { 4279 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4280 } else { 4281 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4282 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4283 (uint16_t *)(frame->retcode)); 4284 } 4285 4286 /* Set up backchain. */ 4287 __put_user(env->regs[15], (abi_ulong *) frame); 4288 4289 /* Set up registers for signal handler */ 4290 env->regs[15] = frame_addr; 4291 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4292 4293 env->regs[2] = sig; //map_signal(sig); 4294 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4295 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4296 return; 4297 4298 give_sigsegv: 4299 force_sig(TARGET_SIGSEGV); 4300 } 4301 4302 static int 4303 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4304 { 4305 int err = 0; 4306 int i; 4307 4308 for (i = 0; i < 16; i++) { 4309 __get_user(env->regs[i], &sc->regs.gprs[i]); 4310 } 4311 4312 __get_user(env->psw.mask, &sc->regs.psw.mask); 4313 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4314 (unsigned long long)env->psw.addr); 4315 __get_user(env->psw.addr, &sc->regs.psw.addr); 4316 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4317 4318 for (i = 0; i < 16; i++) { 4319 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4320 } 4321 for (i = 0; i < 16; i++) { 4322 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4323 } 4324 4325 return err; 4326 } 4327 4328 long do_sigreturn(CPUS390XState *env) 4329 { 4330 sigframe *frame; 4331 abi_ulong frame_addr = env->regs[15]; 4332 target_sigset_t target_set; 4333 sigset_t set; 4334 4335 trace_user_do_sigreturn(env, frame_addr); 4336 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4337 goto badframe; 4338 } 4339 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4340 4341 target_to_host_sigset_internal(&set, &target_set); 4342 set_sigmask(&set); /* ~_BLOCKABLE? */ 4343 4344 if (restore_sigregs(env, &frame->sregs)) { 4345 goto badframe; 4346 } 4347 4348 unlock_user_struct(frame, frame_addr, 0); 4349 return -TARGET_QEMU_ESIGRETURN; 4350 4351 badframe: 4352 force_sig(TARGET_SIGSEGV); 4353 return 0; 4354 } 4355 4356 long do_rt_sigreturn(CPUS390XState *env) 4357 { 4358 rt_sigframe *frame; 4359 abi_ulong frame_addr = env->regs[15]; 4360 sigset_t set; 4361 4362 trace_user_do_rt_sigreturn(env, frame_addr); 4363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4364 goto badframe; 4365 } 4366 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4367 4368 set_sigmask(&set); /* ~_BLOCKABLE? */ 4369 4370 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4371 goto badframe; 4372 } 4373 4374 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4375 get_sp_from_cpustate(env)) == -EFAULT) { 4376 goto badframe; 4377 } 4378 unlock_user_struct(frame, frame_addr, 0); 4379 return -TARGET_QEMU_ESIGRETURN; 4380 4381 badframe: 4382 unlock_user_struct(frame, frame_addr, 0); 4383 force_sig(TARGET_SIGSEGV); 4384 return 0; 4385 } 4386 4387 #elif defined(TARGET_PPC) 4388 4389 /* Size of dummy stack frame allocated when calling signal handler. 4390 See arch/powerpc/include/asm/ptrace.h. */ 4391 #if defined(TARGET_PPC64) 4392 #define SIGNAL_FRAMESIZE 128 4393 #else 4394 #define SIGNAL_FRAMESIZE 64 4395 #endif 4396 4397 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4398 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4399 struct target_mcontext { 4400 target_ulong mc_gregs[48]; 4401 /* Includes fpscr. */ 4402 uint64_t mc_fregs[33]; 4403 target_ulong mc_pad[2]; 4404 /* We need to handle Altivec and SPE at the same time, which no 4405 kernel needs to do. Fortunately, the kernel defines this bit to 4406 be Altivec-register-large all the time, rather than trying to 4407 twiddle it based on the specific platform. */ 4408 union { 4409 /* SPE vector registers. One extra for SPEFSCR. */ 4410 uint32_t spe[33]; 4411 /* Altivec vector registers. The packing of VSCR and VRSAVE 4412 varies depending on whether we're PPC64 or not: PPC64 splits 4413 them apart; PPC32 stuffs them together. */ 4414 #if defined(TARGET_PPC64) 4415 #define QEMU_NVRREG 34 4416 #else 4417 #define QEMU_NVRREG 33 4418 #endif 4419 ppc_avr_t altivec[QEMU_NVRREG]; 4420 #undef QEMU_NVRREG 4421 } mc_vregs __attribute__((__aligned__(16))); 4422 }; 4423 4424 /* See arch/powerpc/include/asm/sigcontext.h. */ 4425 struct target_sigcontext { 4426 target_ulong _unused[4]; 4427 int32_t signal; 4428 #if defined(TARGET_PPC64) 4429 int32_t pad0; 4430 #endif 4431 target_ulong handler; 4432 target_ulong oldmask; 4433 target_ulong regs; /* struct pt_regs __user * */ 4434 #if defined(TARGET_PPC64) 4435 struct target_mcontext mcontext; 4436 #endif 4437 }; 4438 4439 /* Indices for target_mcontext.mc_gregs, below. 4440 See arch/powerpc/include/asm/ptrace.h for details. */ 4441 enum { 4442 TARGET_PT_R0 = 0, 4443 TARGET_PT_R1 = 1, 4444 TARGET_PT_R2 = 2, 4445 TARGET_PT_R3 = 3, 4446 TARGET_PT_R4 = 4, 4447 TARGET_PT_R5 = 5, 4448 TARGET_PT_R6 = 6, 4449 TARGET_PT_R7 = 7, 4450 TARGET_PT_R8 = 8, 4451 TARGET_PT_R9 = 9, 4452 TARGET_PT_R10 = 10, 4453 TARGET_PT_R11 = 11, 4454 TARGET_PT_R12 = 12, 4455 TARGET_PT_R13 = 13, 4456 TARGET_PT_R14 = 14, 4457 TARGET_PT_R15 = 15, 4458 TARGET_PT_R16 = 16, 4459 TARGET_PT_R17 = 17, 4460 TARGET_PT_R18 = 18, 4461 TARGET_PT_R19 = 19, 4462 TARGET_PT_R20 = 20, 4463 TARGET_PT_R21 = 21, 4464 TARGET_PT_R22 = 22, 4465 TARGET_PT_R23 = 23, 4466 TARGET_PT_R24 = 24, 4467 TARGET_PT_R25 = 25, 4468 TARGET_PT_R26 = 26, 4469 TARGET_PT_R27 = 27, 4470 TARGET_PT_R28 = 28, 4471 TARGET_PT_R29 = 29, 4472 TARGET_PT_R30 = 30, 4473 TARGET_PT_R31 = 31, 4474 TARGET_PT_NIP = 32, 4475 TARGET_PT_MSR = 33, 4476 TARGET_PT_ORIG_R3 = 34, 4477 TARGET_PT_CTR = 35, 4478 TARGET_PT_LNK = 36, 4479 TARGET_PT_XER = 37, 4480 TARGET_PT_CCR = 38, 4481 /* Yes, there are two registers with #39. One is 64-bit only. */ 4482 TARGET_PT_MQ = 39, 4483 TARGET_PT_SOFTE = 39, 4484 TARGET_PT_TRAP = 40, 4485 TARGET_PT_DAR = 41, 4486 TARGET_PT_DSISR = 42, 4487 TARGET_PT_RESULT = 43, 4488 TARGET_PT_REGS_COUNT = 44 4489 }; 4490 4491 4492 struct target_ucontext { 4493 target_ulong tuc_flags; 4494 target_ulong tuc_link; /* struct ucontext __user * */ 4495 struct target_sigaltstack tuc_stack; 4496 #if !defined(TARGET_PPC64) 4497 int32_t tuc_pad[7]; 4498 target_ulong tuc_regs; /* struct mcontext __user * 4499 points to uc_mcontext field */ 4500 #endif 4501 target_sigset_t tuc_sigmask; 4502 #if defined(TARGET_PPC64) 4503 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4504 struct target_sigcontext tuc_sigcontext; 4505 #else 4506 int32_t tuc_maskext[30]; 4507 int32_t tuc_pad2[3]; 4508 struct target_mcontext tuc_mcontext; 4509 #endif 4510 }; 4511 4512 /* See arch/powerpc/kernel/signal_32.c. */ 4513 struct target_sigframe { 4514 struct target_sigcontext sctx; 4515 struct target_mcontext mctx; 4516 int32_t abigap[56]; 4517 }; 4518 4519 #if defined(TARGET_PPC64) 4520 4521 #define TARGET_TRAMP_SIZE 6 4522 4523 struct target_rt_sigframe { 4524 /* sys_rt_sigreturn requires the ucontext be the first field */ 4525 struct target_ucontext uc; 4526 target_ulong _unused[2]; 4527 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4528 target_ulong pinfo; /* struct siginfo __user * */ 4529 target_ulong puc; /* void __user * */ 4530 struct target_siginfo info; 4531 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4532 char abigap[288]; 4533 } __attribute__((aligned(16))); 4534 4535 #else 4536 4537 struct target_rt_sigframe { 4538 struct target_siginfo info; 4539 struct target_ucontext uc; 4540 int32_t abigap[56]; 4541 }; 4542 4543 #endif 4544 4545 #if defined(TARGET_PPC64) 4546 4547 struct target_func_ptr { 4548 target_ulong entry; 4549 target_ulong toc; 4550 }; 4551 4552 #endif 4553 4554 /* We use the mc_pad field for the signal return trampoline. */ 4555 #define tramp mc_pad 4556 4557 /* See arch/powerpc/kernel/signal.c. */ 4558 static target_ulong get_sigframe(struct target_sigaction *ka, 4559 CPUPPCState *env, 4560 int frame_size) 4561 { 4562 target_ulong oldsp; 4563 4564 oldsp = env->gpr[1]; 4565 4566 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4567 (sas_ss_flags(oldsp) == 0)) { 4568 oldsp = (target_sigaltstack_used.ss_sp 4569 + target_sigaltstack_used.ss_size); 4570 } 4571 4572 return (oldsp - frame_size) & ~0xFUL; 4573 } 4574 4575 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4576 { 4577 target_ulong msr = env->msr; 4578 int i; 4579 target_ulong ccr = 0; 4580 4581 /* In general, the kernel attempts to be intelligent about what it 4582 needs to save for Altivec/FP/SPE registers. We don't care that 4583 much, so we just go ahead and save everything. */ 4584 4585 /* Save general registers. */ 4586 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4587 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4588 } 4589 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4590 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4591 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4592 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4593 4594 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4595 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4596 } 4597 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4598 4599 /* Save Altivec registers if necessary. */ 4600 if (env->insns_flags & PPC_ALTIVEC) { 4601 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4602 ppc_avr_t *avr = &env->avr[i]; 4603 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4604 4605 __put_user(avr->u64[0], &vreg->u64[0]); 4606 __put_user(avr->u64[1], &vreg->u64[1]); 4607 } 4608 /* Set MSR_VR in the saved MSR value to indicate that 4609 frame->mc_vregs contains valid data. */ 4610 msr |= MSR_VR; 4611 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4612 &frame->mc_vregs.altivec[32].u32[3]); 4613 } 4614 4615 /* Save floating point registers. */ 4616 if (env->insns_flags & PPC_FLOAT) { 4617 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4618 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4619 } 4620 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4621 } 4622 4623 /* Save SPE registers. The kernel only saves the high half. */ 4624 if (env->insns_flags & PPC_SPE) { 4625 #if defined(TARGET_PPC64) 4626 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4627 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4628 } 4629 #else 4630 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4631 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4632 } 4633 #endif 4634 /* Set MSR_SPE in the saved MSR value to indicate that 4635 frame->mc_vregs contains valid data. */ 4636 msr |= MSR_SPE; 4637 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4638 } 4639 4640 /* Store MSR. */ 4641 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4642 } 4643 4644 static void encode_trampoline(int sigret, uint32_t *tramp) 4645 { 4646 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4647 if (sigret) { 4648 __put_user(0x38000000 | sigret, &tramp[0]); 4649 __put_user(0x44000002, &tramp[1]); 4650 } 4651 } 4652 4653 static void restore_user_regs(CPUPPCState *env, 4654 struct target_mcontext *frame, int sig) 4655 { 4656 target_ulong save_r2 = 0; 4657 target_ulong msr; 4658 target_ulong ccr; 4659 4660 int i; 4661 4662 if (!sig) { 4663 save_r2 = env->gpr[2]; 4664 } 4665 4666 /* Restore general registers. */ 4667 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4668 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4669 } 4670 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4671 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4672 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4673 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4674 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4675 4676 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4677 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4678 } 4679 4680 if (!sig) { 4681 env->gpr[2] = save_r2; 4682 } 4683 /* Restore MSR. */ 4684 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4685 4686 /* If doing signal return, restore the previous little-endian mode. */ 4687 if (sig) 4688 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4689 4690 /* Restore Altivec registers if necessary. */ 4691 if (env->insns_flags & PPC_ALTIVEC) { 4692 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4693 ppc_avr_t *avr = &env->avr[i]; 4694 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4695 4696 __get_user(avr->u64[0], &vreg->u64[0]); 4697 __get_user(avr->u64[1], &vreg->u64[1]); 4698 } 4699 /* Set MSR_VEC in the saved MSR value to indicate that 4700 frame->mc_vregs contains valid data. */ 4701 __get_user(env->spr[SPR_VRSAVE], 4702 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4703 } 4704 4705 /* Restore floating point registers. */ 4706 if (env->insns_flags & PPC_FLOAT) { 4707 uint64_t fpscr; 4708 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4709 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4710 } 4711 __get_user(fpscr, &frame->mc_fregs[32]); 4712 env->fpscr = (uint32_t) fpscr; 4713 } 4714 4715 /* Save SPE registers. The kernel only saves the high half. */ 4716 if (env->insns_flags & PPC_SPE) { 4717 #if defined(TARGET_PPC64) 4718 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4719 uint32_t hi; 4720 4721 __get_user(hi, &frame->mc_vregs.spe[i]); 4722 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4723 } 4724 #else 4725 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4726 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4727 } 4728 #endif 4729 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4730 } 4731 } 4732 4733 static void setup_frame(int sig, struct target_sigaction *ka, 4734 target_sigset_t *set, CPUPPCState *env) 4735 { 4736 struct target_sigframe *frame; 4737 struct target_sigcontext *sc; 4738 target_ulong frame_addr, newsp; 4739 int err = 0; 4740 #if defined(TARGET_PPC64) 4741 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4742 #endif 4743 4744 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4745 trace_user_setup_frame(env, frame_addr); 4746 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4747 goto sigsegv; 4748 sc = &frame->sctx; 4749 4750 __put_user(ka->_sa_handler, &sc->handler); 4751 __put_user(set->sig[0], &sc->oldmask); 4752 #if TARGET_ABI_BITS == 64 4753 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4754 #else 4755 __put_user(set->sig[1], &sc->_unused[3]); 4756 #endif 4757 __put_user(h2g(&frame->mctx), &sc->regs); 4758 __put_user(sig, &sc->signal); 4759 4760 /* Save user regs. */ 4761 save_user_regs(env, &frame->mctx); 4762 4763 /* Construct the trampoline code on the stack. */ 4764 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4765 4766 /* The kernel checks for the presence of a VDSO here. We don't 4767 emulate a vdso, so use a sigreturn system call. */ 4768 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4769 4770 /* Turn off all fp exceptions. */ 4771 env->fpscr = 0; 4772 4773 /* Create a stack frame for the caller of the handler. */ 4774 newsp = frame_addr - SIGNAL_FRAMESIZE; 4775 err |= put_user(env->gpr[1], newsp, target_ulong); 4776 4777 if (err) 4778 goto sigsegv; 4779 4780 /* Set up registers for signal handler. */ 4781 env->gpr[1] = newsp; 4782 env->gpr[3] = sig; 4783 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4784 4785 #if defined(TARGET_PPC64) 4786 if (get_ppc64_abi(image) < 2) { 4787 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4788 struct target_func_ptr *handler = 4789 (struct target_func_ptr *)g2h(ka->_sa_handler); 4790 env->nip = tswapl(handler->entry); 4791 env->gpr[2] = tswapl(handler->toc); 4792 } else { 4793 /* ELFv2 PPC64 function pointers are entry points, but R12 4794 * must also be set */ 4795 env->nip = tswapl((target_ulong) ka->_sa_handler); 4796 env->gpr[12] = env->nip; 4797 } 4798 #else 4799 env->nip = (target_ulong) ka->_sa_handler; 4800 #endif 4801 4802 /* Signal handlers are entered in big-endian mode. */ 4803 env->msr &= ~(1ull << MSR_LE); 4804 4805 unlock_user_struct(frame, frame_addr, 1); 4806 return; 4807 4808 sigsegv: 4809 unlock_user_struct(frame, frame_addr, 1); 4810 force_sig(TARGET_SIGSEGV); 4811 } 4812 4813 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4814 target_siginfo_t *info, 4815 target_sigset_t *set, CPUPPCState *env) 4816 { 4817 struct target_rt_sigframe *rt_sf; 4818 uint32_t *trampptr = 0; 4819 struct target_mcontext *mctx = 0; 4820 target_ulong rt_sf_addr, newsp = 0; 4821 int i, err = 0; 4822 #if defined(TARGET_PPC64) 4823 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4824 #endif 4825 4826 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4827 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4828 goto sigsegv; 4829 4830 tswap_siginfo(&rt_sf->info, info); 4831 4832 __put_user(0, &rt_sf->uc.tuc_flags); 4833 __put_user(0, &rt_sf->uc.tuc_link); 4834 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4835 &rt_sf->uc.tuc_stack.ss_sp); 4836 __put_user(sas_ss_flags(env->gpr[1]), 4837 &rt_sf->uc.tuc_stack.ss_flags); 4838 __put_user(target_sigaltstack_used.ss_size, 4839 &rt_sf->uc.tuc_stack.ss_size); 4840 #if !defined(TARGET_PPC64) 4841 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4842 &rt_sf->uc.tuc_regs); 4843 #endif 4844 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4845 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4846 } 4847 4848 #if defined(TARGET_PPC64) 4849 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4850 trampptr = &rt_sf->trampoline[0]; 4851 #else 4852 mctx = &rt_sf->uc.tuc_mcontext; 4853 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4854 #endif 4855 4856 save_user_regs(env, mctx); 4857 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4858 4859 /* The kernel checks for the presence of a VDSO here. We don't 4860 emulate a vdso, so use a sigreturn system call. */ 4861 env->lr = (target_ulong) h2g(trampptr); 4862 4863 /* Turn off all fp exceptions. */ 4864 env->fpscr = 0; 4865 4866 /* Create a stack frame for the caller of the handler. */ 4867 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4868 err |= put_user(env->gpr[1], newsp, target_ulong); 4869 4870 if (err) 4871 goto sigsegv; 4872 4873 /* Set up registers for signal handler. */ 4874 env->gpr[1] = newsp; 4875 env->gpr[3] = (target_ulong) sig; 4876 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4877 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4878 env->gpr[6] = (target_ulong) h2g(rt_sf); 4879 4880 #if defined(TARGET_PPC64) 4881 if (get_ppc64_abi(image) < 2) { 4882 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4883 struct target_func_ptr *handler = 4884 (struct target_func_ptr *)g2h(ka->_sa_handler); 4885 env->nip = tswapl(handler->entry); 4886 env->gpr[2] = tswapl(handler->toc); 4887 } else { 4888 /* ELFv2 PPC64 function pointers are entry points, but R12 4889 * must also be set */ 4890 env->nip = tswapl((target_ulong) ka->_sa_handler); 4891 env->gpr[12] = env->nip; 4892 } 4893 #else 4894 env->nip = (target_ulong) ka->_sa_handler; 4895 #endif 4896 4897 /* Signal handlers are entered in big-endian mode. */ 4898 env->msr &= ~(1ull << MSR_LE); 4899 4900 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4901 return; 4902 4903 sigsegv: 4904 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4905 force_sig(TARGET_SIGSEGV); 4906 4907 } 4908 4909 long do_sigreturn(CPUPPCState *env) 4910 { 4911 struct target_sigcontext *sc = NULL; 4912 struct target_mcontext *sr = NULL; 4913 target_ulong sr_addr = 0, sc_addr; 4914 sigset_t blocked; 4915 target_sigset_t set; 4916 4917 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4918 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4919 goto sigsegv; 4920 4921 #if defined(TARGET_PPC64) 4922 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4923 #else 4924 __get_user(set.sig[0], &sc->oldmask); 4925 __get_user(set.sig[1], &sc->_unused[3]); 4926 #endif 4927 target_to_host_sigset_internal(&blocked, &set); 4928 set_sigmask(&blocked); 4929 4930 __get_user(sr_addr, &sc->regs); 4931 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4932 goto sigsegv; 4933 restore_user_regs(env, sr, 1); 4934 4935 unlock_user_struct(sr, sr_addr, 1); 4936 unlock_user_struct(sc, sc_addr, 1); 4937 return -TARGET_QEMU_ESIGRETURN; 4938 4939 sigsegv: 4940 unlock_user_struct(sr, sr_addr, 1); 4941 unlock_user_struct(sc, sc_addr, 1); 4942 force_sig(TARGET_SIGSEGV); 4943 return 0; 4944 } 4945 4946 /* See arch/powerpc/kernel/signal_32.c. */ 4947 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4948 { 4949 struct target_mcontext *mcp; 4950 target_ulong mcp_addr; 4951 sigset_t blocked; 4952 target_sigset_t set; 4953 4954 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4955 sizeof (set))) 4956 return 1; 4957 4958 #if defined(TARGET_PPC64) 4959 mcp_addr = h2g(ucp) + 4960 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 4961 #else 4962 __get_user(mcp_addr, &ucp->tuc_regs); 4963 #endif 4964 4965 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 4966 return 1; 4967 4968 target_to_host_sigset_internal(&blocked, &set); 4969 set_sigmask(&blocked); 4970 restore_user_regs(env, mcp, sig); 4971 4972 unlock_user_struct(mcp, mcp_addr, 1); 4973 return 0; 4974 } 4975 4976 long do_rt_sigreturn(CPUPPCState *env) 4977 { 4978 struct target_rt_sigframe *rt_sf = NULL; 4979 target_ulong rt_sf_addr; 4980 4981 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 4982 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 4983 goto sigsegv; 4984 4985 if (do_setcontext(&rt_sf->uc, env, 1)) 4986 goto sigsegv; 4987 4988 do_sigaltstack(rt_sf_addr 4989 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 4990 0, env->gpr[1]); 4991 4992 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4993 return -TARGET_QEMU_ESIGRETURN; 4994 4995 sigsegv: 4996 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4997 force_sig(TARGET_SIGSEGV); 4998 return 0; 4999 } 5000 5001 #elif defined(TARGET_M68K) 5002 5003 struct target_sigcontext { 5004 abi_ulong sc_mask; 5005 abi_ulong sc_usp; 5006 abi_ulong sc_d0; 5007 abi_ulong sc_d1; 5008 abi_ulong sc_a0; 5009 abi_ulong sc_a1; 5010 unsigned short sc_sr; 5011 abi_ulong sc_pc; 5012 }; 5013 5014 struct target_sigframe 5015 { 5016 abi_ulong pretcode; 5017 int sig; 5018 int code; 5019 abi_ulong psc; 5020 char retcode[8]; 5021 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5022 struct target_sigcontext sc; 5023 }; 5024 5025 typedef int target_greg_t; 5026 #define TARGET_NGREG 18 5027 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5028 5029 typedef struct target_fpregset { 5030 int f_fpcntl[3]; 5031 int f_fpregs[8*3]; 5032 } target_fpregset_t; 5033 5034 struct target_mcontext { 5035 int version; 5036 target_gregset_t gregs; 5037 target_fpregset_t fpregs; 5038 }; 5039 5040 #define TARGET_MCONTEXT_VERSION 2 5041 5042 struct target_ucontext { 5043 abi_ulong tuc_flags; 5044 abi_ulong tuc_link; 5045 target_stack_t tuc_stack; 5046 struct target_mcontext tuc_mcontext; 5047 abi_long tuc_filler[80]; 5048 target_sigset_t tuc_sigmask; 5049 }; 5050 5051 struct target_rt_sigframe 5052 { 5053 abi_ulong pretcode; 5054 int sig; 5055 abi_ulong pinfo; 5056 abi_ulong puc; 5057 char retcode[8]; 5058 struct target_siginfo info; 5059 struct target_ucontext uc; 5060 }; 5061 5062 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5063 abi_ulong mask) 5064 { 5065 __put_user(mask, &sc->sc_mask); 5066 __put_user(env->aregs[7], &sc->sc_usp); 5067 __put_user(env->dregs[0], &sc->sc_d0); 5068 __put_user(env->dregs[1], &sc->sc_d1); 5069 __put_user(env->aregs[0], &sc->sc_a0); 5070 __put_user(env->aregs[1], &sc->sc_a1); 5071 __put_user(env->sr, &sc->sc_sr); 5072 __put_user(env->pc, &sc->sc_pc); 5073 } 5074 5075 static void 5076 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5077 { 5078 int temp; 5079 5080 __get_user(env->aregs[7], &sc->sc_usp); 5081 __get_user(env->dregs[0], &sc->sc_d0); 5082 __get_user(env->dregs[1], &sc->sc_d1); 5083 __get_user(env->aregs[0], &sc->sc_a0); 5084 __get_user(env->aregs[1], &sc->sc_a1); 5085 __get_user(env->pc, &sc->sc_pc); 5086 __get_user(temp, &sc->sc_sr); 5087 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5088 } 5089 5090 /* 5091 * Determine which stack to use.. 5092 */ 5093 static inline abi_ulong 5094 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5095 size_t frame_size) 5096 { 5097 unsigned long sp; 5098 5099 sp = regs->aregs[7]; 5100 5101 /* This is the X/Open sanctioned signal stack switching. */ 5102 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5103 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5104 } 5105 5106 return ((sp - frame_size) & -8UL); 5107 } 5108 5109 static void setup_frame(int sig, struct target_sigaction *ka, 5110 target_sigset_t *set, CPUM68KState *env) 5111 { 5112 struct target_sigframe *frame; 5113 abi_ulong frame_addr; 5114 abi_ulong retcode_addr; 5115 abi_ulong sc_addr; 5116 int i; 5117 5118 frame_addr = get_sigframe(ka, env, sizeof *frame); 5119 trace_user_setup_frame(env, frame_addr); 5120 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5121 goto give_sigsegv; 5122 } 5123 5124 __put_user(sig, &frame->sig); 5125 5126 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5127 __put_user(sc_addr, &frame->psc); 5128 5129 setup_sigcontext(&frame->sc, env, set->sig[0]); 5130 5131 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5132 __put_user(set->sig[i], &frame->extramask[i - 1]); 5133 } 5134 5135 /* Set up to return from userspace. */ 5136 5137 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5138 __put_user(retcode_addr, &frame->pretcode); 5139 5140 /* moveq #,d0; trap #0 */ 5141 5142 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5143 (uint32_t *)(frame->retcode)); 5144 5145 /* Set up to return from userspace */ 5146 5147 env->aregs[7] = frame_addr; 5148 env->pc = ka->_sa_handler; 5149 5150 unlock_user_struct(frame, frame_addr, 1); 5151 return; 5152 5153 give_sigsegv: 5154 force_sig(TARGET_SIGSEGV); 5155 } 5156 5157 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5158 CPUM68KState *env) 5159 { 5160 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5161 5162 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5163 __put_user(env->dregs[0], &gregs[0]); 5164 __put_user(env->dregs[1], &gregs[1]); 5165 __put_user(env->dregs[2], &gregs[2]); 5166 __put_user(env->dregs[3], &gregs[3]); 5167 __put_user(env->dregs[4], &gregs[4]); 5168 __put_user(env->dregs[5], &gregs[5]); 5169 __put_user(env->dregs[6], &gregs[6]); 5170 __put_user(env->dregs[7], &gregs[7]); 5171 __put_user(env->aregs[0], &gregs[8]); 5172 __put_user(env->aregs[1], &gregs[9]); 5173 __put_user(env->aregs[2], &gregs[10]); 5174 __put_user(env->aregs[3], &gregs[11]); 5175 __put_user(env->aregs[4], &gregs[12]); 5176 __put_user(env->aregs[5], &gregs[13]); 5177 __put_user(env->aregs[6], &gregs[14]); 5178 __put_user(env->aregs[7], &gregs[15]); 5179 __put_user(env->pc, &gregs[16]); 5180 __put_user(env->sr, &gregs[17]); 5181 5182 return 0; 5183 } 5184 5185 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5186 struct target_ucontext *uc) 5187 { 5188 int temp; 5189 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5190 5191 __get_user(temp, &uc->tuc_mcontext.version); 5192 if (temp != TARGET_MCONTEXT_VERSION) 5193 goto badframe; 5194 5195 /* restore passed registers */ 5196 __get_user(env->dregs[0], &gregs[0]); 5197 __get_user(env->dregs[1], &gregs[1]); 5198 __get_user(env->dregs[2], &gregs[2]); 5199 __get_user(env->dregs[3], &gregs[3]); 5200 __get_user(env->dregs[4], &gregs[4]); 5201 __get_user(env->dregs[5], &gregs[5]); 5202 __get_user(env->dregs[6], &gregs[6]); 5203 __get_user(env->dregs[7], &gregs[7]); 5204 __get_user(env->aregs[0], &gregs[8]); 5205 __get_user(env->aregs[1], &gregs[9]); 5206 __get_user(env->aregs[2], &gregs[10]); 5207 __get_user(env->aregs[3], &gregs[11]); 5208 __get_user(env->aregs[4], &gregs[12]); 5209 __get_user(env->aregs[5], &gregs[13]); 5210 __get_user(env->aregs[6], &gregs[14]); 5211 __get_user(env->aregs[7], &gregs[15]); 5212 __get_user(env->pc, &gregs[16]); 5213 __get_user(temp, &gregs[17]); 5214 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5215 5216 return 0; 5217 5218 badframe: 5219 return 1; 5220 } 5221 5222 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5223 target_siginfo_t *info, 5224 target_sigset_t *set, CPUM68KState *env) 5225 { 5226 struct target_rt_sigframe *frame; 5227 abi_ulong frame_addr; 5228 abi_ulong retcode_addr; 5229 abi_ulong info_addr; 5230 abi_ulong uc_addr; 5231 int err = 0; 5232 int i; 5233 5234 frame_addr = get_sigframe(ka, env, sizeof *frame); 5235 trace_user_setup_rt_frame(env, frame_addr); 5236 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5237 goto give_sigsegv; 5238 } 5239 5240 __put_user(sig, &frame->sig); 5241 5242 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5243 __put_user(info_addr, &frame->pinfo); 5244 5245 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5246 __put_user(uc_addr, &frame->puc); 5247 5248 tswap_siginfo(&frame->info, info); 5249 5250 /* Create the ucontext */ 5251 5252 __put_user(0, &frame->uc.tuc_flags); 5253 __put_user(0, &frame->uc.tuc_link); 5254 __put_user(target_sigaltstack_used.ss_sp, 5255 &frame->uc.tuc_stack.ss_sp); 5256 __put_user(sas_ss_flags(env->aregs[7]), 5257 &frame->uc.tuc_stack.ss_flags); 5258 __put_user(target_sigaltstack_used.ss_size, 5259 &frame->uc.tuc_stack.ss_size); 5260 err |= target_rt_setup_ucontext(&frame->uc, env); 5261 5262 if (err) 5263 goto give_sigsegv; 5264 5265 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5266 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5267 } 5268 5269 /* Set up to return from userspace. */ 5270 5271 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5272 __put_user(retcode_addr, &frame->pretcode); 5273 5274 /* moveq #,d0; notb d0; trap #0 */ 5275 5276 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5277 (uint32_t *)(frame->retcode + 0)); 5278 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5279 5280 if (err) 5281 goto give_sigsegv; 5282 5283 /* Set up to return from userspace */ 5284 5285 env->aregs[7] = frame_addr; 5286 env->pc = ka->_sa_handler; 5287 5288 unlock_user_struct(frame, frame_addr, 1); 5289 return; 5290 5291 give_sigsegv: 5292 unlock_user_struct(frame, frame_addr, 1); 5293 force_sig(TARGET_SIGSEGV); 5294 } 5295 5296 long do_sigreturn(CPUM68KState *env) 5297 { 5298 struct target_sigframe *frame; 5299 abi_ulong frame_addr = env->aregs[7] - 4; 5300 target_sigset_t target_set; 5301 sigset_t set; 5302 int i; 5303 5304 trace_user_do_sigreturn(env, frame_addr); 5305 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5306 goto badframe; 5307 5308 /* set blocked signals */ 5309 5310 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5311 5312 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5313 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5314 } 5315 5316 target_to_host_sigset_internal(&set, &target_set); 5317 set_sigmask(&set); 5318 5319 /* restore registers */ 5320 5321 restore_sigcontext(env, &frame->sc); 5322 5323 unlock_user_struct(frame, frame_addr, 0); 5324 return -TARGET_QEMU_ESIGRETURN; 5325 5326 badframe: 5327 force_sig(TARGET_SIGSEGV); 5328 return 0; 5329 } 5330 5331 long do_rt_sigreturn(CPUM68KState *env) 5332 { 5333 struct target_rt_sigframe *frame; 5334 abi_ulong frame_addr = env->aregs[7] - 4; 5335 target_sigset_t target_set; 5336 sigset_t set; 5337 5338 trace_user_do_rt_sigreturn(env, frame_addr); 5339 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5340 goto badframe; 5341 5342 target_to_host_sigset_internal(&set, &target_set); 5343 set_sigmask(&set); 5344 5345 /* restore registers */ 5346 5347 if (target_rt_restore_ucontext(env, &frame->uc)) 5348 goto badframe; 5349 5350 if (do_sigaltstack(frame_addr + 5351 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5352 0, get_sp_from_cpustate(env)) == -EFAULT) 5353 goto badframe; 5354 5355 unlock_user_struct(frame, frame_addr, 0); 5356 return -TARGET_QEMU_ESIGRETURN; 5357 5358 badframe: 5359 unlock_user_struct(frame, frame_addr, 0); 5360 force_sig(TARGET_SIGSEGV); 5361 return 0; 5362 } 5363 5364 #elif defined(TARGET_ALPHA) 5365 5366 struct target_sigcontext { 5367 abi_long sc_onstack; 5368 abi_long sc_mask; 5369 abi_long sc_pc; 5370 abi_long sc_ps; 5371 abi_long sc_regs[32]; 5372 abi_long sc_ownedfp; 5373 abi_long sc_fpregs[32]; 5374 abi_ulong sc_fpcr; 5375 abi_ulong sc_fp_control; 5376 abi_ulong sc_reserved1; 5377 abi_ulong sc_reserved2; 5378 abi_ulong sc_ssize; 5379 abi_ulong sc_sbase; 5380 abi_ulong sc_traparg_a0; 5381 abi_ulong sc_traparg_a1; 5382 abi_ulong sc_traparg_a2; 5383 abi_ulong sc_fp_trap_pc; 5384 abi_ulong sc_fp_trigger_sum; 5385 abi_ulong sc_fp_trigger_inst; 5386 }; 5387 5388 struct target_ucontext { 5389 abi_ulong tuc_flags; 5390 abi_ulong tuc_link; 5391 abi_ulong tuc_osf_sigmask; 5392 target_stack_t tuc_stack; 5393 struct target_sigcontext tuc_mcontext; 5394 target_sigset_t tuc_sigmask; 5395 }; 5396 5397 struct target_sigframe { 5398 struct target_sigcontext sc; 5399 unsigned int retcode[3]; 5400 }; 5401 5402 struct target_rt_sigframe { 5403 target_siginfo_t info; 5404 struct target_ucontext uc; 5405 unsigned int retcode[3]; 5406 }; 5407 5408 #define INSN_MOV_R30_R16 0x47fe0410 5409 #define INSN_LDI_R0 0x201f0000 5410 #define INSN_CALLSYS 0x00000083 5411 5412 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5413 abi_ulong frame_addr, target_sigset_t *set) 5414 { 5415 int i; 5416 5417 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5418 __put_user(set->sig[0], &sc->sc_mask); 5419 __put_user(env->pc, &sc->sc_pc); 5420 __put_user(8, &sc->sc_ps); 5421 5422 for (i = 0; i < 31; ++i) { 5423 __put_user(env->ir[i], &sc->sc_regs[i]); 5424 } 5425 __put_user(0, &sc->sc_regs[31]); 5426 5427 for (i = 0; i < 31; ++i) { 5428 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5429 } 5430 __put_user(0, &sc->sc_fpregs[31]); 5431 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5432 5433 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5434 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5435 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5436 } 5437 5438 static void restore_sigcontext(CPUAlphaState *env, 5439 struct target_sigcontext *sc) 5440 { 5441 uint64_t fpcr; 5442 int i; 5443 5444 __get_user(env->pc, &sc->sc_pc); 5445 5446 for (i = 0; i < 31; ++i) { 5447 __get_user(env->ir[i], &sc->sc_regs[i]); 5448 } 5449 for (i = 0; i < 31; ++i) { 5450 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5451 } 5452 5453 __get_user(fpcr, &sc->sc_fpcr); 5454 cpu_alpha_store_fpcr(env, fpcr); 5455 } 5456 5457 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5458 CPUAlphaState *env, 5459 unsigned long framesize) 5460 { 5461 abi_ulong sp = env->ir[IR_SP]; 5462 5463 /* This is the X/Open sanctioned signal stack switching. */ 5464 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5465 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5466 } 5467 return (sp - framesize) & -32; 5468 } 5469 5470 static void setup_frame(int sig, struct target_sigaction *ka, 5471 target_sigset_t *set, CPUAlphaState *env) 5472 { 5473 abi_ulong frame_addr, r26; 5474 struct target_sigframe *frame; 5475 int err = 0; 5476 5477 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5478 trace_user_setup_frame(env, frame_addr); 5479 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5480 goto give_sigsegv; 5481 } 5482 5483 setup_sigcontext(&frame->sc, env, frame_addr, set); 5484 5485 if (ka->sa_restorer) { 5486 r26 = ka->sa_restorer; 5487 } else { 5488 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5489 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5490 &frame->retcode[1]); 5491 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5492 /* imb() */ 5493 r26 = frame_addr; 5494 } 5495 5496 unlock_user_struct(frame, frame_addr, 1); 5497 5498 if (err) { 5499 give_sigsegv: 5500 if (sig == TARGET_SIGSEGV) { 5501 ka->_sa_handler = TARGET_SIG_DFL; 5502 } 5503 force_sig(TARGET_SIGSEGV); 5504 } 5505 5506 env->ir[IR_RA] = r26; 5507 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5508 env->ir[IR_A0] = sig; 5509 env->ir[IR_A1] = 0; 5510 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5511 env->ir[IR_SP] = frame_addr; 5512 } 5513 5514 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5515 target_siginfo_t *info, 5516 target_sigset_t *set, CPUAlphaState *env) 5517 { 5518 abi_ulong frame_addr, r26; 5519 struct target_rt_sigframe *frame; 5520 int i, err = 0; 5521 5522 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5523 trace_user_setup_rt_frame(env, frame_addr); 5524 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5525 goto give_sigsegv; 5526 } 5527 5528 tswap_siginfo(&frame->info, info); 5529 5530 __put_user(0, &frame->uc.tuc_flags); 5531 __put_user(0, &frame->uc.tuc_link); 5532 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5533 __put_user(target_sigaltstack_used.ss_sp, 5534 &frame->uc.tuc_stack.ss_sp); 5535 __put_user(sas_ss_flags(env->ir[IR_SP]), 5536 &frame->uc.tuc_stack.ss_flags); 5537 __put_user(target_sigaltstack_used.ss_size, 5538 &frame->uc.tuc_stack.ss_size); 5539 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5540 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5541 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5542 } 5543 5544 if (ka->sa_restorer) { 5545 r26 = ka->sa_restorer; 5546 } else { 5547 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5548 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5549 &frame->retcode[1]); 5550 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5551 /* imb(); */ 5552 r26 = frame_addr; 5553 } 5554 5555 if (err) { 5556 give_sigsegv: 5557 if (sig == TARGET_SIGSEGV) { 5558 ka->_sa_handler = TARGET_SIG_DFL; 5559 } 5560 force_sig(TARGET_SIGSEGV); 5561 } 5562 5563 env->ir[IR_RA] = r26; 5564 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5565 env->ir[IR_A0] = sig; 5566 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5567 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5568 env->ir[IR_SP] = frame_addr; 5569 } 5570 5571 long do_sigreturn(CPUAlphaState *env) 5572 { 5573 struct target_sigcontext *sc; 5574 abi_ulong sc_addr = env->ir[IR_A0]; 5575 target_sigset_t target_set; 5576 sigset_t set; 5577 5578 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5579 goto badframe; 5580 } 5581 5582 target_sigemptyset(&target_set); 5583 __get_user(target_set.sig[0], &sc->sc_mask); 5584 5585 target_to_host_sigset_internal(&set, &target_set); 5586 set_sigmask(&set); 5587 5588 restore_sigcontext(env, sc); 5589 unlock_user_struct(sc, sc_addr, 0); 5590 return -TARGET_QEMU_ESIGRETURN; 5591 5592 badframe: 5593 force_sig(TARGET_SIGSEGV); 5594 } 5595 5596 long do_rt_sigreturn(CPUAlphaState *env) 5597 { 5598 abi_ulong frame_addr = env->ir[IR_A0]; 5599 struct target_rt_sigframe *frame; 5600 sigset_t set; 5601 5602 trace_user_do_rt_sigreturn(env, frame_addr); 5603 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5604 goto badframe; 5605 } 5606 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5607 set_sigmask(&set); 5608 5609 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5610 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5611 uc.tuc_stack), 5612 0, env->ir[IR_SP]) == -EFAULT) { 5613 goto badframe; 5614 } 5615 5616 unlock_user_struct(frame, frame_addr, 0); 5617 return -TARGET_QEMU_ESIGRETURN; 5618 5619 5620 badframe: 5621 unlock_user_struct(frame, frame_addr, 0); 5622 force_sig(TARGET_SIGSEGV); 5623 } 5624 5625 #elif defined(TARGET_TILEGX) 5626 5627 struct target_sigcontext { 5628 union { 5629 /* General-purpose registers. */ 5630 abi_ulong gregs[56]; 5631 struct { 5632 abi_ulong __gregs[53]; 5633 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5634 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5635 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5636 }; 5637 }; 5638 abi_ulong pc; /* Program counter. */ 5639 abi_ulong ics; /* In Interrupt Critical Section? */ 5640 abi_ulong faultnum; /* Fault number. */ 5641 abi_ulong pad[5]; 5642 }; 5643 5644 struct target_ucontext { 5645 abi_ulong tuc_flags; 5646 abi_ulong tuc_link; 5647 target_stack_t tuc_stack; 5648 struct target_sigcontext tuc_mcontext; 5649 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5650 }; 5651 5652 struct target_rt_sigframe { 5653 unsigned char save_area[16]; /* caller save area */ 5654 struct target_siginfo info; 5655 struct target_ucontext uc; 5656 abi_ulong retcode[2]; 5657 }; 5658 5659 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5660 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5661 5662 5663 static void setup_sigcontext(struct target_sigcontext *sc, 5664 CPUArchState *env, int signo) 5665 { 5666 int i; 5667 5668 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5669 __put_user(env->regs[i], &sc->gregs[i]); 5670 } 5671 5672 __put_user(env->pc, &sc->pc); 5673 __put_user(0, &sc->ics); 5674 __put_user(signo, &sc->faultnum); 5675 } 5676 5677 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5678 { 5679 int i; 5680 5681 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5682 __get_user(env->regs[i], &sc->gregs[i]); 5683 } 5684 5685 __get_user(env->pc, &sc->pc); 5686 } 5687 5688 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5689 size_t frame_size) 5690 { 5691 unsigned long sp = env->regs[TILEGX_R_SP]; 5692 5693 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5694 return -1UL; 5695 } 5696 5697 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5698 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5699 } 5700 5701 sp -= frame_size; 5702 sp &= -16UL; 5703 return sp; 5704 } 5705 5706 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5707 target_siginfo_t *info, 5708 target_sigset_t *set, CPUArchState *env) 5709 { 5710 abi_ulong frame_addr; 5711 struct target_rt_sigframe *frame; 5712 unsigned long restorer; 5713 5714 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5715 trace_user_setup_rt_frame(env, frame_addr); 5716 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5717 goto give_sigsegv; 5718 } 5719 5720 /* Always write at least the signal number for the stack backtracer. */ 5721 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5722 /* At sigreturn time, restore the callee-save registers too. */ 5723 tswap_siginfo(&frame->info, info); 5724 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5725 } else { 5726 __put_user(info->si_signo, &frame->info.si_signo); 5727 } 5728 5729 /* Create the ucontext. */ 5730 __put_user(0, &frame->uc.tuc_flags); 5731 __put_user(0, &frame->uc.tuc_link); 5732 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5733 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5734 &frame->uc.tuc_stack.ss_flags); 5735 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5736 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5737 5738 if (ka->sa_flags & TARGET_SA_RESTORER) { 5739 restorer = (unsigned long) ka->sa_restorer; 5740 } else { 5741 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5742 __put_user(INSN_SWINT1, &frame->retcode[1]); 5743 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5744 } 5745 env->pc = (unsigned long) ka->_sa_handler; 5746 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5747 env->regs[TILEGX_R_LR] = restorer; 5748 env->regs[0] = (unsigned long) sig; 5749 env->regs[1] = (unsigned long) &frame->info; 5750 env->regs[2] = (unsigned long) &frame->uc; 5751 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5752 5753 unlock_user_struct(frame, frame_addr, 1); 5754 return; 5755 5756 give_sigsegv: 5757 if (sig == TARGET_SIGSEGV) { 5758 ka->_sa_handler = TARGET_SIG_DFL; 5759 } 5760 force_sig(TARGET_SIGSEGV /* , current */); 5761 } 5762 5763 long do_rt_sigreturn(CPUTLGState *env) 5764 { 5765 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5766 struct target_rt_sigframe *frame; 5767 sigset_t set; 5768 5769 trace_user_do_rt_sigreturn(env, frame_addr); 5770 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5771 goto badframe; 5772 } 5773 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5774 set_sigmask(&set); 5775 5776 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5777 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5778 uc.tuc_stack), 5779 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5780 goto badframe; 5781 } 5782 5783 unlock_user_struct(frame, frame_addr, 0); 5784 return -TARGET_QEMU_ESIGRETURN; 5785 5786 5787 badframe: 5788 unlock_user_struct(frame, frame_addr, 0); 5789 force_sig(TARGET_SIGSEGV); 5790 } 5791 5792 #else 5793 5794 static void setup_frame(int sig, struct target_sigaction *ka, 5795 target_sigset_t *set, CPUArchState *env) 5796 { 5797 fprintf(stderr, "setup_frame: not implemented\n"); 5798 } 5799 5800 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5801 target_siginfo_t *info, 5802 target_sigset_t *set, CPUArchState *env) 5803 { 5804 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5805 } 5806 5807 long do_sigreturn(CPUArchState *env) 5808 { 5809 fprintf(stderr, "do_sigreturn: not implemented\n"); 5810 return -TARGET_ENOSYS; 5811 } 5812 5813 long do_rt_sigreturn(CPUArchState *env) 5814 { 5815 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5816 return -TARGET_ENOSYS; 5817 } 5818 5819 #endif 5820 5821 static void handle_pending_signal(CPUArchState *cpu_env, int sig) 5822 { 5823 CPUState *cpu = ENV_GET_CPU(cpu_env); 5824 abi_ulong handler; 5825 sigset_t set; 5826 target_sigset_t target_old_set; 5827 struct target_sigaction *sa; 5828 TaskState *ts = cpu->opaque; 5829 struct emulated_sigtable *k = &ts->sigtab[sig - 1]; 5830 5831 trace_user_handle_signal(cpu_env, sig); 5832 /* dequeue signal */ 5833 k->pending = 0; 5834 5835 sig = gdb_handlesig(cpu, sig); 5836 if (!sig) { 5837 sa = NULL; 5838 handler = TARGET_SIG_IGN; 5839 } else { 5840 sa = &sigact_table[sig - 1]; 5841 handler = sa->_sa_handler; 5842 } 5843 5844 if (handler == TARGET_SIG_DFL) { 5845 /* default handler : ignore some signal. The other are job control or fatal */ 5846 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5847 kill(getpid(),SIGSTOP); 5848 } else if (sig != TARGET_SIGCHLD && 5849 sig != TARGET_SIGURG && 5850 sig != TARGET_SIGWINCH && 5851 sig != TARGET_SIGCONT) { 5852 force_sig(sig); 5853 } 5854 } else if (handler == TARGET_SIG_IGN) { 5855 /* ignore sig */ 5856 } else if (handler == TARGET_SIG_ERR) { 5857 force_sig(sig); 5858 } else { 5859 /* compute the blocked signals during the handler execution */ 5860 sigset_t *blocked_set; 5861 5862 target_to_host_sigset(&set, &sa->sa_mask); 5863 /* SA_NODEFER indicates that the current signal should not be 5864 blocked during the handler */ 5865 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5866 sigaddset(&set, target_to_host_signal(sig)); 5867 5868 /* save the previous blocked signal state to restore it at the 5869 end of the signal execution (see do_sigreturn) */ 5870 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5871 5872 /* block signals in the handler */ 5873 blocked_set = ts->in_sigsuspend ? 5874 &ts->sigsuspend_mask : &ts->signal_mask; 5875 sigorset(&ts->signal_mask, blocked_set, &set); 5876 ts->in_sigsuspend = 0; 5877 5878 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5879 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5880 { 5881 CPUX86State *env = cpu_env; 5882 if (env->eflags & VM_MASK) 5883 save_v86_state(env); 5884 } 5885 #endif 5886 /* prepare the stack frame of the virtual CPU */ 5887 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5888 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5889 /* These targets do not have traditional signals. */ 5890 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5891 #else 5892 if (sa->sa_flags & TARGET_SA_SIGINFO) 5893 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5894 else 5895 setup_frame(sig, sa, &target_old_set, cpu_env); 5896 #endif 5897 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5898 sa->_sa_handler = TARGET_SIG_DFL; 5899 } 5900 } 5901 } 5902 5903 void process_pending_signals(CPUArchState *cpu_env) 5904 { 5905 CPUState *cpu = ENV_GET_CPU(cpu_env); 5906 int sig; 5907 TaskState *ts = cpu->opaque; 5908 sigset_t set; 5909 sigset_t *blocked_set; 5910 5911 while (atomic_read(&ts->signal_pending)) { 5912 /* FIXME: This is not threadsafe. */ 5913 sigfillset(&set); 5914 sigprocmask(SIG_SETMASK, &set, 0); 5915 5916 sig = ts->sync_signal.pending; 5917 if (sig) { 5918 /* Synchronous signals are forced, 5919 * see force_sig_info() and callers in Linux 5920 * Note that not all of our queue_signal() calls in QEMU correspond 5921 * to force_sig_info() calls in Linux (some are send_sig_info()). 5922 * However it seems like a kernel bug to me to allow the process 5923 * to block a synchronous signal since it could then just end up 5924 * looping round and round indefinitely. 5925 */ 5926 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5927 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5928 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5929 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5930 } 5931 5932 handle_pending_signal(cpu_env, sig); 5933 } 5934 5935 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5936 blocked_set = ts->in_sigsuspend ? 5937 &ts->sigsuspend_mask : &ts->signal_mask; 5938 5939 if (ts->sigtab[sig - 1].pending && 5940 (!sigismember(blocked_set, 5941 target_to_host_signal_table[sig]))) { 5942 handle_pending_signal(cpu_env, sig); 5943 /* Restart scan from the beginning */ 5944 sig = 1; 5945 } 5946 } 5947 5948 /* if no signal is pending, unblock signals and recheck (the act 5949 * of unblocking might cause us to take another host signal which 5950 * will set signal_pending again). 5951 */ 5952 atomic_set(&ts->signal_pending, 0); 5953 ts->in_sigsuspend = 0; 5954 set = ts->signal_mask; 5955 sigdelset(&set, SIGSEGV); 5956 sigdelset(&set, SIGBUS); 5957 sigprocmask(SIG_SETMASK, &set, 0); 5958 } 5959 ts->in_sigsuspend = 0; 5960 } 5961