1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 516 /* abort execution with signal */ 517 static void QEMU_NORETURN force_sig(int target_sig) 518 { 519 CPUState *cpu = thread_cpu; 520 CPUArchState *env = cpu->env_ptr; 521 TaskState *ts = (TaskState *)cpu->opaque; 522 int host_sig, core_dumped = 0; 523 struct sigaction act; 524 525 host_sig = target_to_host_signal(target_sig); 526 trace_user_force_sig(env, target_sig, host_sig); 527 gdb_signalled(env, target_sig); 528 529 /* dump core if supported by target binary format */ 530 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 531 stop_all_tasks(); 532 core_dumped = 533 ((*ts->bprm->core_dump)(target_sig, env) == 0); 534 } 535 if (core_dumped) { 536 /* we already dumped the core of target process, we don't want 537 * a coredump of qemu itself */ 538 struct rlimit nodump; 539 getrlimit(RLIMIT_CORE, &nodump); 540 nodump.rlim_cur=0; 541 setrlimit(RLIMIT_CORE, &nodump); 542 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 543 target_sig, strsignal(host_sig), "core dumped" ); 544 } 545 546 /* The proper exit code for dying from an uncaught signal is 547 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 548 * a negative value. To get the proper exit code we need to 549 * actually die from an uncaught signal. Here the default signal 550 * handler is installed, we send ourself a signal and we wait for 551 * it to arrive. */ 552 sigfillset(&act.sa_mask); 553 act.sa_handler = SIG_DFL; 554 act.sa_flags = 0; 555 sigaction(host_sig, &act, NULL); 556 557 /* For some reason raise(host_sig) doesn't send the signal when 558 * statically linked on x86-64. */ 559 kill(getpid(), host_sig); 560 561 /* Make sure the signal isn't masked (just reuse the mask inside 562 of act) */ 563 sigdelset(&act.sa_mask, host_sig); 564 sigsuspend(&act.sa_mask); 565 566 /* unreachable */ 567 abort(); 568 } 569 570 /* queue a signal so that it will be send to the virtual CPU as soon 571 as possible */ 572 int queue_signal(CPUArchState *env, int sig, int si_type, 573 target_siginfo_t *info) 574 { 575 CPUState *cpu = ENV_GET_CPU(env); 576 TaskState *ts = cpu->opaque; 577 578 trace_user_queue_signal(env, sig); 579 580 info->si_code = deposit32(info->si_code, 16, 16, si_type); 581 582 ts->sync_signal.info = *info; 583 ts->sync_signal.pending = sig; 584 /* signal that a new signal is pending */ 585 atomic_set(&ts->signal_pending, 1); 586 return 1; /* indicates that the signal was queued */ 587 } 588 589 #ifndef HAVE_SAFE_SYSCALL 590 static inline void rewind_if_in_safe_syscall(void *puc) 591 { 592 /* Default version: never rewind */ 593 } 594 #endif 595 596 static void host_signal_handler(int host_signum, siginfo_t *info, 597 void *puc) 598 { 599 CPUArchState *env = thread_cpu->env_ptr; 600 CPUState *cpu = ENV_GET_CPU(env); 601 TaskState *ts = cpu->opaque; 602 603 int sig; 604 target_siginfo_t tinfo; 605 ucontext_t *uc = puc; 606 struct emulated_sigtable *k; 607 608 /* the CPU emulator uses some host signals to detect exceptions, 609 we forward to it some signals */ 610 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 611 && info->si_code > 0) { 612 if (cpu_signal_handler(host_signum, info, puc)) 613 return; 614 } 615 616 /* get target signal number */ 617 sig = host_to_target_signal(host_signum); 618 if (sig < 1 || sig > TARGET_NSIG) 619 return; 620 trace_user_host_signal(env, host_signum, sig); 621 622 rewind_if_in_safe_syscall(puc); 623 624 host_to_target_siginfo_noswap(&tinfo, info); 625 k = &ts->sigtab[sig - 1]; 626 k->info = tinfo; 627 k->pending = sig; 628 ts->signal_pending = 1; 629 630 /* Block host signals until target signal handler entered. We 631 * can't block SIGSEGV or SIGBUS while we're executing guest 632 * code in case the guest code provokes one in the window between 633 * now and it getting out to the main loop. Signals will be 634 * unblocked again in process_pending_signals(). 635 * 636 * WARNING: we cannot use sigfillset() here because the uc_sigmask 637 * field is a kernel sigset_t, which is much smaller than the 638 * libc sigset_t which sigfillset() operates on. Using sigfillset() 639 * would write 0xff bytes off the end of the structure and trash 640 * data on the struct. 641 * We can't use sizeof(uc->uc_sigmask) either, because the libc 642 * headers define the struct field with the wrong (too large) type. 643 */ 644 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 645 sigdelset(&uc->uc_sigmask, SIGSEGV); 646 sigdelset(&uc->uc_sigmask, SIGBUS); 647 648 /* interrupt the virtual CPU as soon as possible */ 649 cpu_exit(thread_cpu); 650 } 651 652 /* do_sigaltstack() returns target values and errnos. */ 653 /* compare linux/kernel/signal.c:do_sigaltstack() */ 654 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 655 { 656 int ret; 657 struct target_sigaltstack oss; 658 659 /* XXX: test errors */ 660 if(uoss_addr) 661 { 662 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 663 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 664 __put_user(sas_ss_flags(sp), &oss.ss_flags); 665 } 666 667 if(uss_addr) 668 { 669 struct target_sigaltstack *uss; 670 struct target_sigaltstack ss; 671 size_t minstacksize = TARGET_MINSIGSTKSZ; 672 673 #if defined(TARGET_PPC64) 674 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 675 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 676 if (get_ppc64_abi(image) > 1) { 677 minstacksize = 4096; 678 } 679 #endif 680 681 ret = -TARGET_EFAULT; 682 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 683 goto out; 684 } 685 __get_user(ss.ss_sp, &uss->ss_sp); 686 __get_user(ss.ss_size, &uss->ss_size); 687 __get_user(ss.ss_flags, &uss->ss_flags); 688 unlock_user_struct(uss, uss_addr, 0); 689 690 ret = -TARGET_EPERM; 691 if (on_sig_stack(sp)) 692 goto out; 693 694 ret = -TARGET_EINVAL; 695 if (ss.ss_flags != TARGET_SS_DISABLE 696 && ss.ss_flags != TARGET_SS_ONSTACK 697 && ss.ss_flags != 0) 698 goto out; 699 700 if (ss.ss_flags == TARGET_SS_DISABLE) { 701 ss.ss_size = 0; 702 ss.ss_sp = 0; 703 } else { 704 ret = -TARGET_ENOMEM; 705 if (ss.ss_size < minstacksize) { 706 goto out; 707 } 708 } 709 710 target_sigaltstack_used.ss_sp = ss.ss_sp; 711 target_sigaltstack_used.ss_size = ss.ss_size; 712 } 713 714 if (uoss_addr) { 715 ret = -TARGET_EFAULT; 716 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 717 goto out; 718 } 719 720 ret = 0; 721 out: 722 return ret; 723 } 724 725 /* do_sigaction() return target values and host errnos */ 726 int do_sigaction(int sig, const struct target_sigaction *act, 727 struct target_sigaction *oact) 728 { 729 struct target_sigaction *k; 730 struct sigaction act1; 731 int host_sig; 732 int ret = 0; 733 734 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 735 return -TARGET_EINVAL; 736 } 737 738 if (block_signals()) { 739 return -TARGET_ERESTARTSYS; 740 } 741 742 k = &sigact_table[sig - 1]; 743 if (oact) { 744 __put_user(k->_sa_handler, &oact->_sa_handler); 745 __put_user(k->sa_flags, &oact->sa_flags); 746 #if !defined(TARGET_MIPS) 747 __put_user(k->sa_restorer, &oact->sa_restorer); 748 #endif 749 /* Not swapped. */ 750 oact->sa_mask = k->sa_mask; 751 } 752 if (act) { 753 /* FIXME: This is not threadsafe. */ 754 __get_user(k->_sa_handler, &act->_sa_handler); 755 __get_user(k->sa_flags, &act->sa_flags); 756 #if !defined(TARGET_MIPS) 757 __get_user(k->sa_restorer, &act->sa_restorer); 758 #endif 759 /* To be swapped in target_to_host_sigset. */ 760 k->sa_mask = act->sa_mask; 761 762 /* we update the host linux signal state */ 763 host_sig = target_to_host_signal(sig); 764 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 765 sigfillset(&act1.sa_mask); 766 act1.sa_flags = SA_SIGINFO; 767 if (k->sa_flags & TARGET_SA_RESTART) 768 act1.sa_flags |= SA_RESTART; 769 /* NOTE: it is important to update the host kernel signal 770 ignore state to avoid getting unexpected interrupted 771 syscalls */ 772 if (k->_sa_handler == TARGET_SIG_IGN) { 773 act1.sa_sigaction = (void *)SIG_IGN; 774 } else if (k->_sa_handler == TARGET_SIG_DFL) { 775 if (fatal_signal (sig)) 776 act1.sa_sigaction = host_signal_handler; 777 else 778 act1.sa_sigaction = (void *)SIG_DFL; 779 } else { 780 act1.sa_sigaction = host_signal_handler; 781 } 782 ret = sigaction(host_sig, &act1, NULL); 783 } 784 } 785 return ret; 786 } 787 788 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 789 790 /* from the Linux kernel */ 791 792 struct target_fpreg { 793 uint16_t significand[4]; 794 uint16_t exponent; 795 }; 796 797 struct target_fpxreg { 798 uint16_t significand[4]; 799 uint16_t exponent; 800 uint16_t padding[3]; 801 }; 802 803 struct target_xmmreg { 804 abi_ulong element[4]; 805 }; 806 807 struct target_fpstate { 808 /* Regular FPU environment */ 809 abi_ulong cw; 810 abi_ulong sw; 811 abi_ulong tag; 812 abi_ulong ipoff; 813 abi_ulong cssel; 814 abi_ulong dataoff; 815 abi_ulong datasel; 816 struct target_fpreg _st[8]; 817 uint16_t status; 818 uint16_t magic; /* 0xffff = regular FPU data only */ 819 820 /* FXSR FPU environment */ 821 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 822 abi_ulong mxcsr; 823 abi_ulong reserved; 824 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 825 struct target_xmmreg _xmm[8]; 826 abi_ulong padding[56]; 827 }; 828 829 #define X86_FXSR_MAGIC 0x0000 830 831 struct target_sigcontext { 832 uint16_t gs, __gsh; 833 uint16_t fs, __fsh; 834 uint16_t es, __esh; 835 uint16_t ds, __dsh; 836 abi_ulong edi; 837 abi_ulong esi; 838 abi_ulong ebp; 839 abi_ulong esp; 840 abi_ulong ebx; 841 abi_ulong edx; 842 abi_ulong ecx; 843 abi_ulong eax; 844 abi_ulong trapno; 845 abi_ulong err; 846 abi_ulong eip; 847 uint16_t cs, __csh; 848 abi_ulong eflags; 849 abi_ulong esp_at_signal; 850 uint16_t ss, __ssh; 851 abi_ulong fpstate; /* pointer */ 852 abi_ulong oldmask; 853 abi_ulong cr2; 854 }; 855 856 struct target_ucontext { 857 abi_ulong tuc_flags; 858 abi_ulong tuc_link; 859 target_stack_t tuc_stack; 860 struct target_sigcontext tuc_mcontext; 861 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 862 }; 863 864 struct sigframe 865 { 866 abi_ulong pretcode; 867 int sig; 868 struct target_sigcontext sc; 869 struct target_fpstate fpstate; 870 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 871 char retcode[8]; 872 }; 873 874 struct rt_sigframe 875 { 876 abi_ulong pretcode; 877 int sig; 878 abi_ulong pinfo; 879 abi_ulong puc; 880 struct target_siginfo info; 881 struct target_ucontext uc; 882 struct target_fpstate fpstate; 883 char retcode[8]; 884 }; 885 886 /* 887 * Set up a signal frame. 888 */ 889 890 /* XXX: save x87 state */ 891 static void setup_sigcontext(struct target_sigcontext *sc, 892 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 893 abi_ulong fpstate_addr) 894 { 895 CPUState *cs = CPU(x86_env_get_cpu(env)); 896 uint16_t magic; 897 898 /* already locked in setup_frame() */ 899 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 900 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 901 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 902 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 903 __put_user(env->regs[R_EDI], &sc->edi); 904 __put_user(env->regs[R_ESI], &sc->esi); 905 __put_user(env->regs[R_EBP], &sc->ebp); 906 __put_user(env->regs[R_ESP], &sc->esp); 907 __put_user(env->regs[R_EBX], &sc->ebx); 908 __put_user(env->regs[R_EDX], &sc->edx); 909 __put_user(env->regs[R_ECX], &sc->ecx); 910 __put_user(env->regs[R_EAX], &sc->eax); 911 __put_user(cs->exception_index, &sc->trapno); 912 __put_user(env->error_code, &sc->err); 913 __put_user(env->eip, &sc->eip); 914 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 915 __put_user(env->eflags, &sc->eflags); 916 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 917 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 918 919 cpu_x86_fsave(env, fpstate_addr, 1); 920 fpstate->status = fpstate->sw; 921 magic = 0xffff; 922 __put_user(magic, &fpstate->magic); 923 __put_user(fpstate_addr, &sc->fpstate); 924 925 /* non-iBCS2 extensions.. */ 926 __put_user(mask, &sc->oldmask); 927 __put_user(env->cr[2], &sc->cr2); 928 } 929 930 /* 931 * Determine which stack to use.. 932 */ 933 934 static inline abi_ulong 935 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 936 { 937 unsigned long esp; 938 939 /* Default to using normal stack */ 940 esp = env->regs[R_ESP]; 941 /* This is the X/Open sanctioned signal stack switching. */ 942 if (ka->sa_flags & TARGET_SA_ONSTACK) { 943 if (sas_ss_flags(esp) == 0) { 944 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 945 } 946 } else { 947 948 /* This is the legacy signal stack switching. */ 949 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 950 !(ka->sa_flags & TARGET_SA_RESTORER) && 951 ka->sa_restorer) { 952 esp = (unsigned long) ka->sa_restorer; 953 } 954 } 955 return (esp - frame_size) & -8ul; 956 } 957 958 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 959 static void setup_frame(int sig, struct target_sigaction *ka, 960 target_sigset_t *set, CPUX86State *env) 961 { 962 abi_ulong frame_addr; 963 struct sigframe *frame; 964 int i; 965 966 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 967 trace_user_setup_frame(env, frame_addr); 968 969 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 970 goto give_sigsegv; 971 972 __put_user(sig, &frame->sig); 973 974 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 975 frame_addr + offsetof(struct sigframe, fpstate)); 976 977 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 978 __put_user(set->sig[i], &frame->extramask[i - 1]); 979 } 980 981 /* Set up to return from userspace. If provided, use a stub 982 already in userspace. */ 983 if (ka->sa_flags & TARGET_SA_RESTORER) { 984 __put_user(ka->sa_restorer, &frame->pretcode); 985 } else { 986 uint16_t val16; 987 abi_ulong retcode_addr; 988 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 989 __put_user(retcode_addr, &frame->pretcode); 990 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 991 val16 = 0xb858; 992 __put_user(val16, (uint16_t *)(frame->retcode+0)); 993 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 994 val16 = 0x80cd; 995 __put_user(val16, (uint16_t *)(frame->retcode+6)); 996 } 997 998 999 /* Set up registers for signal handler */ 1000 env->regs[R_ESP] = frame_addr; 1001 env->eip = ka->_sa_handler; 1002 1003 cpu_x86_load_seg(env, R_DS, __USER_DS); 1004 cpu_x86_load_seg(env, R_ES, __USER_DS); 1005 cpu_x86_load_seg(env, R_SS, __USER_DS); 1006 cpu_x86_load_seg(env, R_CS, __USER_CS); 1007 env->eflags &= ~TF_MASK; 1008 1009 unlock_user_struct(frame, frame_addr, 1); 1010 1011 return; 1012 1013 give_sigsegv: 1014 if (sig == TARGET_SIGSEGV) { 1015 ka->_sa_handler = TARGET_SIG_DFL; 1016 } 1017 force_sig(TARGET_SIGSEGV /* , current */); 1018 } 1019 1020 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1021 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1022 target_siginfo_t *info, 1023 target_sigset_t *set, CPUX86State *env) 1024 { 1025 abi_ulong frame_addr, addr; 1026 struct rt_sigframe *frame; 1027 int i; 1028 1029 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1030 trace_user_setup_rt_frame(env, frame_addr); 1031 1032 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1033 goto give_sigsegv; 1034 1035 __put_user(sig, &frame->sig); 1036 addr = frame_addr + offsetof(struct rt_sigframe, info); 1037 __put_user(addr, &frame->pinfo); 1038 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1039 __put_user(addr, &frame->puc); 1040 tswap_siginfo(&frame->info, info); 1041 1042 /* Create the ucontext. */ 1043 __put_user(0, &frame->uc.tuc_flags); 1044 __put_user(0, &frame->uc.tuc_link); 1045 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1046 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1047 &frame->uc.tuc_stack.ss_flags); 1048 __put_user(target_sigaltstack_used.ss_size, 1049 &frame->uc.tuc_stack.ss_size); 1050 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1051 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1052 1053 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1054 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1055 } 1056 1057 /* Set up to return from userspace. If provided, use a stub 1058 already in userspace. */ 1059 if (ka->sa_flags & TARGET_SA_RESTORER) { 1060 __put_user(ka->sa_restorer, &frame->pretcode); 1061 } else { 1062 uint16_t val16; 1063 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1064 __put_user(addr, &frame->pretcode); 1065 /* This is movl $,%eax ; int $0x80 */ 1066 __put_user(0xb8, (char *)(frame->retcode+0)); 1067 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1068 val16 = 0x80cd; 1069 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1070 } 1071 1072 /* Set up registers for signal handler */ 1073 env->regs[R_ESP] = frame_addr; 1074 env->eip = ka->_sa_handler; 1075 1076 cpu_x86_load_seg(env, R_DS, __USER_DS); 1077 cpu_x86_load_seg(env, R_ES, __USER_DS); 1078 cpu_x86_load_seg(env, R_SS, __USER_DS); 1079 cpu_x86_load_seg(env, R_CS, __USER_CS); 1080 env->eflags &= ~TF_MASK; 1081 1082 unlock_user_struct(frame, frame_addr, 1); 1083 1084 return; 1085 1086 give_sigsegv: 1087 if (sig == TARGET_SIGSEGV) { 1088 ka->_sa_handler = TARGET_SIG_DFL; 1089 } 1090 force_sig(TARGET_SIGSEGV /* , current */); 1091 } 1092 1093 static int 1094 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1095 { 1096 unsigned int err = 0; 1097 abi_ulong fpstate_addr; 1098 unsigned int tmpflags; 1099 1100 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1101 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1102 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1103 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1104 1105 env->regs[R_EDI] = tswapl(sc->edi); 1106 env->regs[R_ESI] = tswapl(sc->esi); 1107 env->regs[R_EBP] = tswapl(sc->ebp); 1108 env->regs[R_ESP] = tswapl(sc->esp); 1109 env->regs[R_EBX] = tswapl(sc->ebx); 1110 env->regs[R_EDX] = tswapl(sc->edx); 1111 env->regs[R_ECX] = tswapl(sc->ecx); 1112 env->regs[R_EAX] = tswapl(sc->eax); 1113 env->eip = tswapl(sc->eip); 1114 1115 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1116 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1117 1118 tmpflags = tswapl(sc->eflags); 1119 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1120 // regs->orig_eax = -1; /* disable syscall checks */ 1121 1122 fpstate_addr = tswapl(sc->fpstate); 1123 if (fpstate_addr != 0) { 1124 if (!access_ok(VERIFY_READ, fpstate_addr, 1125 sizeof(struct target_fpstate))) 1126 goto badframe; 1127 cpu_x86_frstor(env, fpstate_addr, 1); 1128 } 1129 1130 return err; 1131 badframe: 1132 return 1; 1133 } 1134 1135 long do_sigreturn(CPUX86State *env) 1136 { 1137 struct sigframe *frame; 1138 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1139 target_sigset_t target_set; 1140 sigset_t set; 1141 int i; 1142 1143 trace_user_do_sigreturn(env, frame_addr); 1144 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1145 goto badframe; 1146 /* set blocked signals */ 1147 __get_user(target_set.sig[0], &frame->sc.oldmask); 1148 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1149 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1150 } 1151 1152 target_to_host_sigset_internal(&set, &target_set); 1153 set_sigmask(&set); 1154 1155 /* restore registers */ 1156 if (restore_sigcontext(env, &frame->sc)) 1157 goto badframe; 1158 unlock_user_struct(frame, frame_addr, 0); 1159 return -TARGET_QEMU_ESIGRETURN; 1160 1161 badframe: 1162 unlock_user_struct(frame, frame_addr, 0); 1163 force_sig(TARGET_SIGSEGV); 1164 return 0; 1165 } 1166 1167 long do_rt_sigreturn(CPUX86State *env) 1168 { 1169 abi_ulong frame_addr; 1170 struct rt_sigframe *frame; 1171 sigset_t set; 1172 1173 frame_addr = env->regs[R_ESP] - 4; 1174 trace_user_do_rt_sigreturn(env, frame_addr); 1175 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1176 goto badframe; 1177 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1178 set_sigmask(&set); 1179 1180 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1181 goto badframe; 1182 } 1183 1184 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1185 get_sp_from_cpustate(env)) == -EFAULT) { 1186 goto badframe; 1187 } 1188 1189 unlock_user_struct(frame, frame_addr, 0); 1190 return -TARGET_QEMU_ESIGRETURN; 1191 1192 badframe: 1193 unlock_user_struct(frame, frame_addr, 0); 1194 force_sig(TARGET_SIGSEGV); 1195 return 0; 1196 } 1197 1198 #elif defined(TARGET_AARCH64) 1199 1200 struct target_sigcontext { 1201 uint64_t fault_address; 1202 /* AArch64 registers */ 1203 uint64_t regs[31]; 1204 uint64_t sp; 1205 uint64_t pc; 1206 uint64_t pstate; 1207 /* 4K reserved for FP/SIMD state and future expansion */ 1208 char __reserved[4096] __attribute__((__aligned__(16))); 1209 }; 1210 1211 struct target_ucontext { 1212 abi_ulong tuc_flags; 1213 abi_ulong tuc_link; 1214 target_stack_t tuc_stack; 1215 target_sigset_t tuc_sigmask; 1216 /* glibc uses a 1024-bit sigset_t */ 1217 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1218 /* last for future expansion */ 1219 struct target_sigcontext tuc_mcontext; 1220 }; 1221 1222 /* 1223 * Header to be used at the beginning of structures extending the user 1224 * context. Such structures must be placed after the rt_sigframe on the stack 1225 * and be 16-byte aligned. The last structure must be a dummy one with the 1226 * magic and size set to 0. 1227 */ 1228 struct target_aarch64_ctx { 1229 uint32_t magic; 1230 uint32_t size; 1231 }; 1232 1233 #define TARGET_FPSIMD_MAGIC 0x46508001 1234 1235 struct target_fpsimd_context { 1236 struct target_aarch64_ctx head; 1237 uint32_t fpsr; 1238 uint32_t fpcr; 1239 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1240 }; 1241 1242 /* 1243 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1244 * user space as it will change with the addition of new context. User space 1245 * should check the magic/size information. 1246 */ 1247 struct target_aux_context { 1248 struct target_fpsimd_context fpsimd; 1249 /* additional context to be added before "end" */ 1250 struct target_aarch64_ctx end; 1251 }; 1252 1253 struct target_rt_sigframe { 1254 struct target_siginfo info; 1255 struct target_ucontext uc; 1256 uint64_t fp; 1257 uint64_t lr; 1258 uint32_t tramp[2]; 1259 }; 1260 1261 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1262 CPUARMState *env, target_sigset_t *set) 1263 { 1264 int i; 1265 struct target_aux_context *aux = 1266 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1267 1268 /* set up the stack frame for unwinding */ 1269 __put_user(env->xregs[29], &sf->fp); 1270 __put_user(env->xregs[30], &sf->lr); 1271 1272 for (i = 0; i < 31; i++) { 1273 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1274 } 1275 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1276 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1277 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1278 1279 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1280 1281 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1282 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1283 } 1284 1285 for (i = 0; i < 32; i++) { 1286 #ifdef TARGET_WORDS_BIGENDIAN 1287 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1288 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1289 #else 1290 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1291 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1292 #endif 1293 } 1294 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1295 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1296 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1297 __put_user(sizeof(struct target_fpsimd_context), 1298 &aux->fpsimd.head.size); 1299 1300 /* set the "end" magic */ 1301 __put_user(0, &aux->end.magic); 1302 __put_user(0, &aux->end.size); 1303 1304 return 0; 1305 } 1306 1307 static int target_restore_sigframe(CPUARMState *env, 1308 struct target_rt_sigframe *sf) 1309 { 1310 sigset_t set; 1311 int i; 1312 struct target_aux_context *aux = 1313 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1314 uint32_t magic, size, fpsr, fpcr; 1315 uint64_t pstate; 1316 1317 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1318 set_sigmask(&set); 1319 1320 for (i = 0; i < 31; i++) { 1321 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1322 } 1323 1324 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1325 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1326 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1327 pstate_write(env, pstate); 1328 1329 __get_user(magic, &aux->fpsimd.head.magic); 1330 __get_user(size, &aux->fpsimd.head.size); 1331 1332 if (magic != TARGET_FPSIMD_MAGIC 1333 || size != sizeof(struct target_fpsimd_context)) { 1334 return 1; 1335 } 1336 1337 for (i = 0; i < 32; i++) { 1338 #ifdef TARGET_WORDS_BIGENDIAN 1339 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1340 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1341 #else 1342 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1343 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1344 #endif 1345 } 1346 __get_user(fpsr, &aux->fpsimd.fpsr); 1347 vfp_set_fpsr(env, fpsr); 1348 __get_user(fpcr, &aux->fpsimd.fpcr); 1349 vfp_set_fpcr(env, fpcr); 1350 1351 return 0; 1352 } 1353 1354 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1355 { 1356 abi_ulong sp; 1357 1358 sp = env->xregs[31]; 1359 1360 /* 1361 * This is the X/Open sanctioned signal stack switching. 1362 */ 1363 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1364 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1365 } 1366 1367 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1368 1369 return sp; 1370 } 1371 1372 static void target_setup_frame(int usig, struct target_sigaction *ka, 1373 target_siginfo_t *info, target_sigset_t *set, 1374 CPUARMState *env) 1375 { 1376 struct target_rt_sigframe *frame; 1377 abi_ulong frame_addr, return_addr; 1378 1379 frame_addr = get_sigframe(ka, env); 1380 trace_user_setup_frame(env, frame_addr); 1381 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1382 goto give_sigsegv; 1383 } 1384 1385 __put_user(0, &frame->uc.tuc_flags); 1386 __put_user(0, &frame->uc.tuc_link); 1387 1388 __put_user(target_sigaltstack_used.ss_sp, 1389 &frame->uc.tuc_stack.ss_sp); 1390 __put_user(sas_ss_flags(env->xregs[31]), 1391 &frame->uc.tuc_stack.ss_flags); 1392 __put_user(target_sigaltstack_used.ss_size, 1393 &frame->uc.tuc_stack.ss_size); 1394 target_setup_sigframe(frame, env, set); 1395 if (ka->sa_flags & TARGET_SA_RESTORER) { 1396 return_addr = ka->sa_restorer; 1397 } else { 1398 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1399 __put_user(0xd2801168, &frame->tramp[0]); 1400 __put_user(0xd4000001, &frame->tramp[1]); 1401 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1402 } 1403 env->xregs[0] = usig; 1404 env->xregs[31] = frame_addr; 1405 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1406 env->pc = ka->_sa_handler; 1407 env->xregs[30] = return_addr; 1408 if (info) { 1409 tswap_siginfo(&frame->info, info); 1410 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1411 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1412 } 1413 1414 unlock_user_struct(frame, frame_addr, 1); 1415 return; 1416 1417 give_sigsegv: 1418 unlock_user_struct(frame, frame_addr, 1); 1419 force_sig(TARGET_SIGSEGV); 1420 } 1421 1422 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1423 target_siginfo_t *info, target_sigset_t *set, 1424 CPUARMState *env) 1425 { 1426 target_setup_frame(sig, ka, info, set, env); 1427 } 1428 1429 static void setup_frame(int sig, struct target_sigaction *ka, 1430 target_sigset_t *set, CPUARMState *env) 1431 { 1432 target_setup_frame(sig, ka, 0, set, env); 1433 } 1434 1435 long do_rt_sigreturn(CPUARMState *env) 1436 { 1437 struct target_rt_sigframe *frame = NULL; 1438 abi_ulong frame_addr = env->xregs[31]; 1439 1440 trace_user_do_rt_sigreturn(env, frame_addr); 1441 if (frame_addr & 15) { 1442 goto badframe; 1443 } 1444 1445 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1446 goto badframe; 1447 } 1448 1449 if (target_restore_sigframe(env, frame)) { 1450 goto badframe; 1451 } 1452 1453 if (do_sigaltstack(frame_addr + 1454 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1455 0, get_sp_from_cpustate(env)) == -EFAULT) { 1456 goto badframe; 1457 } 1458 1459 unlock_user_struct(frame, frame_addr, 0); 1460 return -TARGET_QEMU_ESIGRETURN; 1461 1462 badframe: 1463 unlock_user_struct(frame, frame_addr, 0); 1464 force_sig(TARGET_SIGSEGV); 1465 return 0; 1466 } 1467 1468 long do_sigreturn(CPUARMState *env) 1469 { 1470 return do_rt_sigreturn(env); 1471 } 1472 1473 #elif defined(TARGET_ARM) 1474 1475 struct target_sigcontext { 1476 abi_ulong trap_no; 1477 abi_ulong error_code; 1478 abi_ulong oldmask; 1479 abi_ulong arm_r0; 1480 abi_ulong arm_r1; 1481 abi_ulong arm_r2; 1482 abi_ulong arm_r3; 1483 abi_ulong arm_r4; 1484 abi_ulong arm_r5; 1485 abi_ulong arm_r6; 1486 abi_ulong arm_r7; 1487 abi_ulong arm_r8; 1488 abi_ulong arm_r9; 1489 abi_ulong arm_r10; 1490 abi_ulong arm_fp; 1491 abi_ulong arm_ip; 1492 abi_ulong arm_sp; 1493 abi_ulong arm_lr; 1494 abi_ulong arm_pc; 1495 abi_ulong arm_cpsr; 1496 abi_ulong fault_address; 1497 }; 1498 1499 struct target_ucontext_v1 { 1500 abi_ulong tuc_flags; 1501 abi_ulong tuc_link; 1502 target_stack_t tuc_stack; 1503 struct target_sigcontext tuc_mcontext; 1504 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1505 }; 1506 1507 struct target_ucontext_v2 { 1508 abi_ulong tuc_flags; 1509 abi_ulong tuc_link; 1510 target_stack_t tuc_stack; 1511 struct target_sigcontext tuc_mcontext; 1512 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1513 char __unused[128 - sizeof(target_sigset_t)]; 1514 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1515 }; 1516 1517 struct target_user_vfp { 1518 uint64_t fpregs[32]; 1519 abi_ulong fpscr; 1520 }; 1521 1522 struct target_user_vfp_exc { 1523 abi_ulong fpexc; 1524 abi_ulong fpinst; 1525 abi_ulong fpinst2; 1526 }; 1527 1528 struct target_vfp_sigframe { 1529 abi_ulong magic; 1530 abi_ulong size; 1531 struct target_user_vfp ufp; 1532 struct target_user_vfp_exc ufp_exc; 1533 } __attribute__((__aligned__(8))); 1534 1535 struct target_iwmmxt_sigframe { 1536 abi_ulong magic; 1537 abi_ulong size; 1538 uint64_t regs[16]; 1539 /* Note that not all the coprocessor control registers are stored here */ 1540 uint32_t wcssf; 1541 uint32_t wcasf; 1542 uint32_t wcgr0; 1543 uint32_t wcgr1; 1544 uint32_t wcgr2; 1545 uint32_t wcgr3; 1546 } __attribute__((__aligned__(8))); 1547 1548 #define TARGET_VFP_MAGIC 0x56465001 1549 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1550 1551 struct sigframe_v1 1552 { 1553 struct target_sigcontext sc; 1554 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1555 abi_ulong retcode; 1556 }; 1557 1558 struct sigframe_v2 1559 { 1560 struct target_ucontext_v2 uc; 1561 abi_ulong retcode; 1562 }; 1563 1564 struct rt_sigframe_v1 1565 { 1566 abi_ulong pinfo; 1567 abi_ulong puc; 1568 struct target_siginfo info; 1569 struct target_ucontext_v1 uc; 1570 abi_ulong retcode; 1571 }; 1572 1573 struct rt_sigframe_v2 1574 { 1575 struct target_siginfo info; 1576 struct target_ucontext_v2 uc; 1577 abi_ulong retcode; 1578 }; 1579 1580 #define TARGET_CONFIG_CPU_32 1 1581 1582 /* 1583 * For ARM syscalls, we encode the syscall number into the instruction. 1584 */ 1585 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1586 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1587 1588 /* 1589 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1590 * need two 16-bit instructions. 1591 */ 1592 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1593 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1594 1595 static const abi_ulong retcodes[4] = { 1596 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1597 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1598 }; 1599 1600 1601 static inline int valid_user_regs(CPUARMState *regs) 1602 { 1603 return 1; 1604 } 1605 1606 static void 1607 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1608 CPUARMState *env, abi_ulong mask) 1609 { 1610 __put_user(env->regs[0], &sc->arm_r0); 1611 __put_user(env->regs[1], &sc->arm_r1); 1612 __put_user(env->regs[2], &sc->arm_r2); 1613 __put_user(env->regs[3], &sc->arm_r3); 1614 __put_user(env->regs[4], &sc->arm_r4); 1615 __put_user(env->regs[5], &sc->arm_r5); 1616 __put_user(env->regs[6], &sc->arm_r6); 1617 __put_user(env->regs[7], &sc->arm_r7); 1618 __put_user(env->regs[8], &sc->arm_r8); 1619 __put_user(env->regs[9], &sc->arm_r9); 1620 __put_user(env->regs[10], &sc->arm_r10); 1621 __put_user(env->regs[11], &sc->arm_fp); 1622 __put_user(env->regs[12], &sc->arm_ip); 1623 __put_user(env->regs[13], &sc->arm_sp); 1624 __put_user(env->regs[14], &sc->arm_lr); 1625 __put_user(env->regs[15], &sc->arm_pc); 1626 #ifdef TARGET_CONFIG_CPU_32 1627 __put_user(cpsr_read(env), &sc->arm_cpsr); 1628 #endif 1629 1630 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1631 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1632 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1633 __put_user(mask, &sc->oldmask); 1634 } 1635 1636 static inline abi_ulong 1637 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1638 { 1639 unsigned long sp = regs->regs[13]; 1640 1641 /* 1642 * This is the X/Open sanctioned signal stack switching. 1643 */ 1644 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1645 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1646 } 1647 /* 1648 * ATPCS B01 mandates 8-byte alignment 1649 */ 1650 return (sp - framesize) & ~7; 1651 } 1652 1653 static void 1654 setup_return(CPUARMState *env, struct target_sigaction *ka, 1655 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1656 { 1657 abi_ulong handler = ka->_sa_handler; 1658 abi_ulong retcode; 1659 int thumb = handler & 1; 1660 uint32_t cpsr = cpsr_read(env); 1661 1662 cpsr &= ~CPSR_IT; 1663 if (thumb) { 1664 cpsr |= CPSR_T; 1665 } else { 1666 cpsr &= ~CPSR_T; 1667 } 1668 1669 if (ka->sa_flags & TARGET_SA_RESTORER) { 1670 retcode = ka->sa_restorer; 1671 } else { 1672 unsigned int idx = thumb; 1673 1674 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1675 idx += 2; 1676 } 1677 1678 __put_user(retcodes[idx], rc); 1679 1680 retcode = rc_addr + thumb; 1681 } 1682 1683 env->regs[0] = usig; 1684 env->regs[13] = frame_addr; 1685 env->regs[14] = retcode; 1686 env->regs[15] = handler & (thumb ? ~1 : ~3); 1687 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1688 } 1689 1690 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1691 { 1692 int i; 1693 struct target_vfp_sigframe *vfpframe; 1694 vfpframe = (struct target_vfp_sigframe *)regspace; 1695 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1696 __put_user(sizeof(*vfpframe), &vfpframe->size); 1697 for (i = 0; i < 32; i++) { 1698 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1699 } 1700 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1701 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1702 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1703 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1704 return (abi_ulong*)(vfpframe+1); 1705 } 1706 1707 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1708 CPUARMState *env) 1709 { 1710 int i; 1711 struct target_iwmmxt_sigframe *iwmmxtframe; 1712 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1713 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1714 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1715 for (i = 0; i < 16; i++) { 1716 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1717 } 1718 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1719 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1720 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1721 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1722 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1723 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1724 return (abi_ulong*)(iwmmxtframe+1); 1725 } 1726 1727 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1728 target_sigset_t *set, CPUARMState *env) 1729 { 1730 struct target_sigaltstack stack; 1731 int i; 1732 abi_ulong *regspace; 1733 1734 /* Clear all the bits of the ucontext we don't use. */ 1735 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1736 1737 memset(&stack, 0, sizeof(stack)); 1738 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1739 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1740 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1741 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1742 1743 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1744 /* Save coprocessor signal frame. */ 1745 regspace = uc->tuc_regspace; 1746 if (arm_feature(env, ARM_FEATURE_VFP)) { 1747 regspace = setup_sigframe_v2_vfp(regspace, env); 1748 } 1749 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1750 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1751 } 1752 1753 /* Write terminating magic word */ 1754 __put_user(0, regspace); 1755 1756 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1757 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1758 } 1759 } 1760 1761 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1762 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1763 target_sigset_t *set, CPUARMState *regs) 1764 { 1765 struct sigframe_v1 *frame; 1766 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1767 int i; 1768 1769 trace_user_setup_frame(regs, frame_addr); 1770 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1771 return; 1772 } 1773 1774 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1775 1776 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1777 __put_user(set->sig[i], &frame->extramask[i - 1]); 1778 } 1779 1780 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1781 frame_addr + offsetof(struct sigframe_v1, retcode)); 1782 1783 unlock_user_struct(frame, frame_addr, 1); 1784 } 1785 1786 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1787 target_sigset_t *set, CPUARMState *regs) 1788 { 1789 struct sigframe_v2 *frame; 1790 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1791 1792 trace_user_setup_frame(regs, frame_addr); 1793 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1794 return; 1795 } 1796 1797 setup_sigframe_v2(&frame->uc, set, regs); 1798 1799 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1800 frame_addr + offsetof(struct sigframe_v2, retcode)); 1801 1802 unlock_user_struct(frame, frame_addr, 1); 1803 } 1804 1805 static void setup_frame(int usig, struct target_sigaction *ka, 1806 target_sigset_t *set, CPUARMState *regs) 1807 { 1808 if (get_osversion() >= 0x020612) { 1809 setup_frame_v2(usig, ka, set, regs); 1810 } else { 1811 setup_frame_v1(usig, ka, set, regs); 1812 } 1813 } 1814 1815 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1816 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1817 target_siginfo_t *info, 1818 target_sigset_t *set, CPUARMState *env) 1819 { 1820 struct rt_sigframe_v1 *frame; 1821 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1822 struct target_sigaltstack stack; 1823 int i; 1824 abi_ulong info_addr, uc_addr; 1825 1826 trace_user_setup_rt_frame(env, frame_addr); 1827 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1828 return /* 1 */; 1829 } 1830 1831 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1832 __put_user(info_addr, &frame->pinfo); 1833 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1834 __put_user(uc_addr, &frame->puc); 1835 tswap_siginfo(&frame->info, info); 1836 1837 /* Clear all the bits of the ucontext we don't use. */ 1838 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1839 1840 memset(&stack, 0, sizeof(stack)); 1841 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1842 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1843 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1844 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1845 1846 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1847 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1848 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1849 } 1850 1851 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1852 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1853 1854 env->regs[1] = info_addr; 1855 env->regs[2] = uc_addr; 1856 1857 unlock_user_struct(frame, frame_addr, 1); 1858 } 1859 1860 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1861 target_siginfo_t *info, 1862 target_sigset_t *set, CPUARMState *env) 1863 { 1864 struct rt_sigframe_v2 *frame; 1865 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1866 abi_ulong info_addr, uc_addr; 1867 1868 trace_user_setup_rt_frame(env, frame_addr); 1869 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1870 return /* 1 */; 1871 } 1872 1873 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1874 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1875 tswap_siginfo(&frame->info, info); 1876 1877 setup_sigframe_v2(&frame->uc, set, env); 1878 1879 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1880 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1881 1882 env->regs[1] = info_addr; 1883 env->regs[2] = uc_addr; 1884 1885 unlock_user_struct(frame, frame_addr, 1); 1886 } 1887 1888 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1889 target_siginfo_t *info, 1890 target_sigset_t *set, CPUARMState *env) 1891 { 1892 if (get_osversion() >= 0x020612) { 1893 setup_rt_frame_v2(usig, ka, info, set, env); 1894 } else { 1895 setup_rt_frame_v1(usig, ka, info, set, env); 1896 } 1897 } 1898 1899 static int 1900 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1901 { 1902 int err = 0; 1903 uint32_t cpsr; 1904 1905 __get_user(env->regs[0], &sc->arm_r0); 1906 __get_user(env->regs[1], &sc->arm_r1); 1907 __get_user(env->regs[2], &sc->arm_r2); 1908 __get_user(env->regs[3], &sc->arm_r3); 1909 __get_user(env->regs[4], &sc->arm_r4); 1910 __get_user(env->regs[5], &sc->arm_r5); 1911 __get_user(env->regs[6], &sc->arm_r6); 1912 __get_user(env->regs[7], &sc->arm_r7); 1913 __get_user(env->regs[8], &sc->arm_r8); 1914 __get_user(env->regs[9], &sc->arm_r9); 1915 __get_user(env->regs[10], &sc->arm_r10); 1916 __get_user(env->regs[11], &sc->arm_fp); 1917 __get_user(env->regs[12], &sc->arm_ip); 1918 __get_user(env->regs[13], &sc->arm_sp); 1919 __get_user(env->regs[14], &sc->arm_lr); 1920 __get_user(env->regs[15], &sc->arm_pc); 1921 #ifdef TARGET_CONFIG_CPU_32 1922 __get_user(cpsr, &sc->arm_cpsr); 1923 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1924 #endif 1925 1926 err |= !valid_user_regs(env); 1927 1928 return err; 1929 } 1930 1931 static long do_sigreturn_v1(CPUARMState *env) 1932 { 1933 abi_ulong frame_addr; 1934 struct sigframe_v1 *frame = NULL; 1935 target_sigset_t set; 1936 sigset_t host_set; 1937 int i; 1938 1939 /* 1940 * Since we stacked the signal on a 64-bit boundary, 1941 * then 'sp' should be word aligned here. If it's 1942 * not, then the user is trying to mess with us. 1943 */ 1944 frame_addr = env->regs[13]; 1945 trace_user_do_sigreturn(env, frame_addr); 1946 if (frame_addr & 7) { 1947 goto badframe; 1948 } 1949 1950 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1951 goto badframe; 1952 } 1953 1954 __get_user(set.sig[0], &frame->sc.oldmask); 1955 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1956 __get_user(set.sig[i], &frame->extramask[i - 1]); 1957 } 1958 1959 target_to_host_sigset_internal(&host_set, &set); 1960 set_sigmask(&host_set); 1961 1962 if (restore_sigcontext(env, &frame->sc)) { 1963 goto badframe; 1964 } 1965 1966 #if 0 1967 /* Send SIGTRAP if we're single-stepping */ 1968 if (ptrace_cancel_bpt(current)) 1969 send_sig(SIGTRAP, current, 1); 1970 #endif 1971 unlock_user_struct(frame, frame_addr, 0); 1972 return -TARGET_QEMU_ESIGRETURN; 1973 1974 badframe: 1975 force_sig(TARGET_SIGSEGV /* , current */); 1976 return 0; 1977 } 1978 1979 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 1980 { 1981 int i; 1982 abi_ulong magic, sz; 1983 uint32_t fpscr, fpexc; 1984 struct target_vfp_sigframe *vfpframe; 1985 vfpframe = (struct target_vfp_sigframe *)regspace; 1986 1987 __get_user(magic, &vfpframe->magic); 1988 __get_user(sz, &vfpframe->size); 1989 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 1990 return 0; 1991 } 1992 for (i = 0; i < 32; i++) { 1993 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1994 } 1995 __get_user(fpscr, &vfpframe->ufp.fpscr); 1996 vfp_set_fpscr(env, fpscr); 1997 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 1998 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 1999 * and the exception flag is cleared 2000 */ 2001 fpexc |= (1 << 30); 2002 fpexc &= ~((1 << 31) | (1 << 28)); 2003 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2004 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2005 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2006 return (abi_ulong*)(vfpframe + 1); 2007 } 2008 2009 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2010 abi_ulong *regspace) 2011 { 2012 int i; 2013 abi_ulong magic, sz; 2014 struct target_iwmmxt_sigframe *iwmmxtframe; 2015 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2016 2017 __get_user(magic, &iwmmxtframe->magic); 2018 __get_user(sz, &iwmmxtframe->size); 2019 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2020 return 0; 2021 } 2022 for (i = 0; i < 16; i++) { 2023 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2024 } 2025 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2026 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2027 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2028 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2029 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2030 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2031 return (abi_ulong*)(iwmmxtframe + 1); 2032 } 2033 2034 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2035 struct target_ucontext_v2 *uc) 2036 { 2037 sigset_t host_set; 2038 abi_ulong *regspace; 2039 2040 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2041 set_sigmask(&host_set); 2042 2043 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2044 return 1; 2045 2046 /* Restore coprocessor signal frame */ 2047 regspace = uc->tuc_regspace; 2048 if (arm_feature(env, ARM_FEATURE_VFP)) { 2049 regspace = restore_sigframe_v2_vfp(env, regspace); 2050 if (!regspace) { 2051 return 1; 2052 } 2053 } 2054 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2055 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2056 if (!regspace) { 2057 return 1; 2058 } 2059 } 2060 2061 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2062 return 1; 2063 2064 #if 0 2065 /* Send SIGTRAP if we're single-stepping */ 2066 if (ptrace_cancel_bpt(current)) 2067 send_sig(SIGTRAP, current, 1); 2068 #endif 2069 2070 return 0; 2071 } 2072 2073 static long do_sigreturn_v2(CPUARMState *env) 2074 { 2075 abi_ulong frame_addr; 2076 struct sigframe_v2 *frame = NULL; 2077 2078 /* 2079 * Since we stacked the signal on a 64-bit boundary, 2080 * then 'sp' should be word aligned here. If it's 2081 * not, then the user is trying to mess with us. 2082 */ 2083 frame_addr = env->regs[13]; 2084 trace_user_do_sigreturn(env, frame_addr); 2085 if (frame_addr & 7) { 2086 goto badframe; 2087 } 2088 2089 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2090 goto badframe; 2091 } 2092 2093 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2094 goto badframe; 2095 } 2096 2097 unlock_user_struct(frame, frame_addr, 0); 2098 return -TARGET_QEMU_ESIGRETURN; 2099 2100 badframe: 2101 unlock_user_struct(frame, frame_addr, 0); 2102 force_sig(TARGET_SIGSEGV /* , current */); 2103 return 0; 2104 } 2105 2106 long do_sigreturn(CPUARMState *env) 2107 { 2108 if (get_osversion() >= 0x020612) { 2109 return do_sigreturn_v2(env); 2110 } else { 2111 return do_sigreturn_v1(env); 2112 } 2113 } 2114 2115 static long do_rt_sigreturn_v1(CPUARMState *env) 2116 { 2117 abi_ulong frame_addr; 2118 struct rt_sigframe_v1 *frame = NULL; 2119 sigset_t host_set; 2120 2121 /* 2122 * Since we stacked the signal on a 64-bit boundary, 2123 * then 'sp' should be word aligned here. If it's 2124 * not, then the user is trying to mess with us. 2125 */ 2126 frame_addr = env->regs[13]; 2127 trace_user_do_rt_sigreturn(env, frame_addr); 2128 if (frame_addr & 7) { 2129 goto badframe; 2130 } 2131 2132 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2133 goto badframe; 2134 } 2135 2136 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2137 set_sigmask(&host_set); 2138 2139 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2140 goto badframe; 2141 } 2142 2143 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2144 goto badframe; 2145 2146 #if 0 2147 /* Send SIGTRAP if we're single-stepping */ 2148 if (ptrace_cancel_bpt(current)) 2149 send_sig(SIGTRAP, current, 1); 2150 #endif 2151 unlock_user_struct(frame, frame_addr, 0); 2152 return -TARGET_QEMU_ESIGRETURN; 2153 2154 badframe: 2155 unlock_user_struct(frame, frame_addr, 0); 2156 force_sig(TARGET_SIGSEGV /* , current */); 2157 return 0; 2158 } 2159 2160 static long do_rt_sigreturn_v2(CPUARMState *env) 2161 { 2162 abi_ulong frame_addr; 2163 struct rt_sigframe_v2 *frame = NULL; 2164 2165 /* 2166 * Since we stacked the signal on a 64-bit boundary, 2167 * then 'sp' should be word aligned here. If it's 2168 * not, then the user is trying to mess with us. 2169 */ 2170 frame_addr = env->regs[13]; 2171 trace_user_do_rt_sigreturn(env, frame_addr); 2172 if (frame_addr & 7) { 2173 goto badframe; 2174 } 2175 2176 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2177 goto badframe; 2178 } 2179 2180 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2181 goto badframe; 2182 } 2183 2184 unlock_user_struct(frame, frame_addr, 0); 2185 return -TARGET_QEMU_ESIGRETURN; 2186 2187 badframe: 2188 unlock_user_struct(frame, frame_addr, 0); 2189 force_sig(TARGET_SIGSEGV /* , current */); 2190 return 0; 2191 } 2192 2193 long do_rt_sigreturn(CPUARMState *env) 2194 { 2195 if (get_osversion() >= 0x020612) { 2196 return do_rt_sigreturn_v2(env); 2197 } else { 2198 return do_rt_sigreturn_v1(env); 2199 } 2200 } 2201 2202 #elif defined(TARGET_SPARC) 2203 2204 #define __SUNOS_MAXWIN 31 2205 2206 /* This is what SunOS does, so shall I. */ 2207 struct target_sigcontext { 2208 abi_ulong sigc_onstack; /* state to restore */ 2209 2210 abi_ulong sigc_mask; /* sigmask to restore */ 2211 abi_ulong sigc_sp; /* stack pointer */ 2212 abi_ulong sigc_pc; /* program counter */ 2213 abi_ulong sigc_npc; /* next program counter */ 2214 abi_ulong sigc_psr; /* for condition codes etc */ 2215 abi_ulong sigc_g1; /* User uses these two registers */ 2216 abi_ulong sigc_o0; /* within the trampoline code. */ 2217 2218 /* Now comes information regarding the users window set 2219 * at the time of the signal. 2220 */ 2221 abi_ulong sigc_oswins; /* outstanding windows */ 2222 2223 /* stack ptrs for each regwin buf */ 2224 char *sigc_spbuf[__SUNOS_MAXWIN]; 2225 2226 /* Windows to restore after signal */ 2227 struct { 2228 abi_ulong locals[8]; 2229 abi_ulong ins[8]; 2230 } sigc_wbuf[__SUNOS_MAXWIN]; 2231 }; 2232 /* A Sparc stack frame */ 2233 struct sparc_stackf { 2234 abi_ulong locals[8]; 2235 abi_ulong ins[8]; 2236 /* It's simpler to treat fp and callers_pc as elements of ins[] 2237 * since we never need to access them ourselves. 2238 */ 2239 char *structptr; 2240 abi_ulong xargs[6]; 2241 abi_ulong xxargs[1]; 2242 }; 2243 2244 typedef struct { 2245 struct { 2246 abi_ulong psr; 2247 abi_ulong pc; 2248 abi_ulong npc; 2249 abi_ulong y; 2250 abi_ulong u_regs[16]; /* globals and ins */ 2251 } si_regs; 2252 int si_mask; 2253 } __siginfo_t; 2254 2255 typedef struct { 2256 abi_ulong si_float_regs[32]; 2257 unsigned long si_fsr; 2258 unsigned long si_fpqdepth; 2259 struct { 2260 unsigned long *insn_addr; 2261 unsigned long insn; 2262 } si_fpqueue [16]; 2263 } qemu_siginfo_fpu_t; 2264 2265 2266 struct target_signal_frame { 2267 struct sparc_stackf ss; 2268 __siginfo_t info; 2269 abi_ulong fpu_save; 2270 abi_ulong insns[2] __attribute__ ((aligned (8))); 2271 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2272 abi_ulong extra_size; /* Should be 0 */ 2273 qemu_siginfo_fpu_t fpu_state; 2274 }; 2275 struct target_rt_signal_frame { 2276 struct sparc_stackf ss; 2277 siginfo_t info; 2278 abi_ulong regs[20]; 2279 sigset_t mask; 2280 abi_ulong fpu_save; 2281 unsigned int insns[2]; 2282 stack_t stack; 2283 unsigned int extra_size; /* Should be 0 */ 2284 qemu_siginfo_fpu_t fpu_state; 2285 }; 2286 2287 #define UREG_O0 16 2288 #define UREG_O6 22 2289 #define UREG_I0 0 2290 #define UREG_I1 1 2291 #define UREG_I2 2 2292 #define UREG_I3 3 2293 #define UREG_I4 4 2294 #define UREG_I5 5 2295 #define UREG_I6 6 2296 #define UREG_I7 7 2297 #define UREG_L0 8 2298 #define UREG_FP UREG_I6 2299 #define UREG_SP UREG_O6 2300 2301 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2302 CPUSPARCState *env, 2303 unsigned long framesize) 2304 { 2305 abi_ulong sp; 2306 2307 sp = env->regwptr[UREG_FP]; 2308 2309 /* This is the X/Open sanctioned signal stack switching. */ 2310 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2311 if (!on_sig_stack(sp) 2312 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2313 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2314 } 2315 } 2316 return sp - framesize; 2317 } 2318 2319 static int 2320 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2321 { 2322 int err = 0, i; 2323 2324 __put_user(env->psr, &si->si_regs.psr); 2325 __put_user(env->pc, &si->si_regs.pc); 2326 __put_user(env->npc, &si->si_regs.npc); 2327 __put_user(env->y, &si->si_regs.y); 2328 for (i=0; i < 8; i++) { 2329 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2330 } 2331 for (i=0; i < 8; i++) { 2332 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2333 } 2334 __put_user(mask, &si->si_mask); 2335 return err; 2336 } 2337 2338 #if 0 2339 static int 2340 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2341 CPUSPARCState *env, unsigned long mask) 2342 { 2343 int err = 0; 2344 2345 __put_user(mask, &sc->sigc_mask); 2346 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2347 __put_user(env->pc, &sc->sigc_pc); 2348 __put_user(env->npc, &sc->sigc_npc); 2349 __put_user(env->psr, &sc->sigc_psr); 2350 __put_user(env->gregs[1], &sc->sigc_g1); 2351 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2352 2353 return err; 2354 } 2355 #endif 2356 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2357 2358 static void setup_frame(int sig, struct target_sigaction *ka, 2359 target_sigset_t *set, CPUSPARCState *env) 2360 { 2361 abi_ulong sf_addr; 2362 struct target_signal_frame *sf; 2363 int sigframe_size, err, i; 2364 2365 /* 1. Make sure everything is clean */ 2366 //synchronize_user_stack(); 2367 2368 sigframe_size = NF_ALIGNEDSZ; 2369 sf_addr = get_sigframe(ka, env, sigframe_size); 2370 trace_user_setup_frame(env, sf_addr); 2371 2372 sf = lock_user(VERIFY_WRITE, sf_addr, 2373 sizeof(struct target_signal_frame), 0); 2374 if (!sf) { 2375 goto sigsegv; 2376 } 2377 #if 0 2378 if (invalid_frame_pointer(sf, sigframe_size)) 2379 goto sigill_and_return; 2380 #endif 2381 /* 2. Save the current process state */ 2382 err = setup___siginfo(&sf->info, env, set->sig[0]); 2383 __put_user(0, &sf->extra_size); 2384 2385 //save_fpu_state(regs, &sf->fpu_state); 2386 //__put_user(&sf->fpu_state, &sf->fpu_save); 2387 2388 __put_user(set->sig[0], &sf->info.si_mask); 2389 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2390 __put_user(set->sig[i + 1], &sf->extramask[i]); 2391 } 2392 2393 for (i = 0; i < 8; i++) { 2394 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2395 } 2396 for (i = 0; i < 8; i++) { 2397 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2398 } 2399 if (err) 2400 goto sigsegv; 2401 2402 /* 3. signal handler back-trampoline and parameters */ 2403 env->regwptr[UREG_FP] = sf_addr; 2404 env->regwptr[UREG_I0] = sig; 2405 env->regwptr[UREG_I1] = sf_addr + 2406 offsetof(struct target_signal_frame, info); 2407 env->regwptr[UREG_I2] = sf_addr + 2408 offsetof(struct target_signal_frame, info); 2409 2410 /* 4. signal handler */ 2411 env->pc = ka->_sa_handler; 2412 env->npc = (env->pc + 4); 2413 /* 5. return to kernel instructions */ 2414 if (ka->sa_restorer) { 2415 env->regwptr[UREG_I7] = ka->sa_restorer; 2416 } else { 2417 uint32_t val32; 2418 2419 env->regwptr[UREG_I7] = sf_addr + 2420 offsetof(struct target_signal_frame, insns) - 2 * 4; 2421 2422 /* mov __NR_sigreturn, %g1 */ 2423 val32 = 0x821020d8; 2424 __put_user(val32, &sf->insns[0]); 2425 2426 /* t 0x10 */ 2427 val32 = 0x91d02010; 2428 __put_user(val32, &sf->insns[1]); 2429 if (err) 2430 goto sigsegv; 2431 2432 /* Flush instruction space. */ 2433 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2434 // tb_flush(env); 2435 } 2436 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2437 return; 2438 #if 0 2439 sigill_and_return: 2440 force_sig(TARGET_SIGILL); 2441 #endif 2442 sigsegv: 2443 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2444 force_sig(TARGET_SIGSEGV); 2445 } 2446 2447 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2448 target_siginfo_t *info, 2449 target_sigset_t *set, CPUSPARCState *env) 2450 { 2451 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2452 } 2453 2454 long do_sigreturn(CPUSPARCState *env) 2455 { 2456 abi_ulong sf_addr; 2457 struct target_signal_frame *sf; 2458 uint32_t up_psr, pc, npc; 2459 target_sigset_t set; 2460 sigset_t host_set; 2461 int err=0, i; 2462 2463 sf_addr = env->regwptr[UREG_FP]; 2464 trace_user_do_sigreturn(env, sf_addr); 2465 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2466 goto segv_and_exit; 2467 } 2468 2469 /* 1. Make sure we are not getting garbage from the user */ 2470 2471 if (sf_addr & 3) 2472 goto segv_and_exit; 2473 2474 __get_user(pc, &sf->info.si_regs.pc); 2475 __get_user(npc, &sf->info.si_regs.npc); 2476 2477 if ((pc | npc) & 3) { 2478 goto segv_and_exit; 2479 } 2480 2481 /* 2. Restore the state */ 2482 __get_user(up_psr, &sf->info.si_regs.psr); 2483 2484 /* User can only change condition codes and FPU enabling in %psr. */ 2485 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2486 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2487 2488 env->pc = pc; 2489 env->npc = npc; 2490 __get_user(env->y, &sf->info.si_regs.y); 2491 for (i=0; i < 8; i++) { 2492 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2493 } 2494 for (i=0; i < 8; i++) { 2495 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2496 } 2497 2498 /* FIXME: implement FPU save/restore: 2499 * __get_user(fpu_save, &sf->fpu_save); 2500 * if (fpu_save) 2501 * err |= restore_fpu_state(env, fpu_save); 2502 */ 2503 2504 /* This is pretty much atomic, no amount locking would prevent 2505 * the races which exist anyways. 2506 */ 2507 __get_user(set.sig[0], &sf->info.si_mask); 2508 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2509 __get_user(set.sig[i], &sf->extramask[i - 1]); 2510 } 2511 2512 target_to_host_sigset_internal(&host_set, &set); 2513 set_sigmask(&host_set); 2514 2515 if (err) { 2516 goto segv_and_exit; 2517 } 2518 unlock_user_struct(sf, sf_addr, 0); 2519 return -TARGET_QEMU_ESIGRETURN; 2520 2521 segv_and_exit: 2522 unlock_user_struct(sf, sf_addr, 0); 2523 force_sig(TARGET_SIGSEGV); 2524 } 2525 2526 long do_rt_sigreturn(CPUSPARCState *env) 2527 { 2528 trace_user_do_rt_sigreturn(env, 0); 2529 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2530 return -TARGET_ENOSYS; 2531 } 2532 2533 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2534 #define MC_TSTATE 0 2535 #define MC_PC 1 2536 #define MC_NPC 2 2537 #define MC_Y 3 2538 #define MC_G1 4 2539 #define MC_G2 5 2540 #define MC_G3 6 2541 #define MC_G4 7 2542 #define MC_G5 8 2543 #define MC_G6 9 2544 #define MC_G7 10 2545 #define MC_O0 11 2546 #define MC_O1 12 2547 #define MC_O2 13 2548 #define MC_O3 14 2549 #define MC_O4 15 2550 #define MC_O5 16 2551 #define MC_O6 17 2552 #define MC_O7 18 2553 #define MC_NGREG 19 2554 2555 typedef abi_ulong target_mc_greg_t; 2556 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2557 2558 struct target_mc_fq { 2559 abi_ulong *mcfq_addr; 2560 uint32_t mcfq_insn; 2561 }; 2562 2563 struct target_mc_fpu { 2564 union { 2565 uint32_t sregs[32]; 2566 uint64_t dregs[32]; 2567 //uint128_t qregs[16]; 2568 } mcfpu_fregs; 2569 abi_ulong mcfpu_fsr; 2570 abi_ulong mcfpu_fprs; 2571 abi_ulong mcfpu_gsr; 2572 struct target_mc_fq *mcfpu_fq; 2573 unsigned char mcfpu_qcnt; 2574 unsigned char mcfpu_qentsz; 2575 unsigned char mcfpu_enab; 2576 }; 2577 typedef struct target_mc_fpu target_mc_fpu_t; 2578 2579 typedef struct { 2580 target_mc_gregset_t mc_gregs; 2581 target_mc_greg_t mc_fp; 2582 target_mc_greg_t mc_i7; 2583 target_mc_fpu_t mc_fpregs; 2584 } target_mcontext_t; 2585 2586 struct target_ucontext { 2587 struct target_ucontext *tuc_link; 2588 abi_ulong tuc_flags; 2589 target_sigset_t tuc_sigmask; 2590 target_mcontext_t tuc_mcontext; 2591 }; 2592 2593 /* A V9 register window */ 2594 struct target_reg_window { 2595 abi_ulong locals[8]; 2596 abi_ulong ins[8]; 2597 }; 2598 2599 #define TARGET_STACK_BIAS 2047 2600 2601 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2602 void sparc64_set_context(CPUSPARCState *env) 2603 { 2604 abi_ulong ucp_addr; 2605 struct target_ucontext *ucp; 2606 target_mc_gregset_t *grp; 2607 abi_ulong pc, npc, tstate; 2608 abi_ulong fp, i7, w_addr; 2609 unsigned int i; 2610 2611 ucp_addr = env->regwptr[UREG_I0]; 2612 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2613 goto do_sigsegv; 2614 } 2615 grp = &ucp->tuc_mcontext.mc_gregs; 2616 __get_user(pc, &((*grp)[MC_PC])); 2617 __get_user(npc, &((*grp)[MC_NPC])); 2618 if ((pc | npc) & 3) { 2619 goto do_sigsegv; 2620 } 2621 if (env->regwptr[UREG_I1]) { 2622 target_sigset_t target_set; 2623 sigset_t set; 2624 2625 if (TARGET_NSIG_WORDS == 1) { 2626 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2627 } else { 2628 abi_ulong *src, *dst; 2629 src = ucp->tuc_sigmask.sig; 2630 dst = target_set.sig; 2631 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2632 __get_user(*dst, src); 2633 } 2634 } 2635 target_to_host_sigset_internal(&set, &target_set); 2636 set_sigmask(&set); 2637 } 2638 env->pc = pc; 2639 env->npc = npc; 2640 __get_user(env->y, &((*grp)[MC_Y])); 2641 __get_user(tstate, &((*grp)[MC_TSTATE])); 2642 env->asi = (tstate >> 24) & 0xff; 2643 cpu_put_ccr(env, tstate >> 32); 2644 cpu_put_cwp64(env, tstate & 0x1f); 2645 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2646 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2647 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2648 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2649 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2650 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2651 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2652 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2653 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2654 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2655 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2656 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2657 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2658 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2659 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2660 2661 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2662 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2663 2664 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2665 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2666 abi_ulong) != 0) { 2667 goto do_sigsegv; 2668 } 2669 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2670 abi_ulong) != 0) { 2671 goto do_sigsegv; 2672 } 2673 /* FIXME this does not match how the kernel handles the FPU in 2674 * its sparc64_set_context implementation. In particular the FPU 2675 * is only restored if fenab is non-zero in: 2676 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2677 */ 2678 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2679 { 2680 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2681 for (i = 0; i < 64; i++, src++) { 2682 if (i & 1) { 2683 __get_user(env->fpr[i/2].l.lower, src); 2684 } else { 2685 __get_user(env->fpr[i/2].l.upper, src); 2686 } 2687 } 2688 } 2689 __get_user(env->fsr, 2690 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2691 __get_user(env->gsr, 2692 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2693 unlock_user_struct(ucp, ucp_addr, 0); 2694 return; 2695 do_sigsegv: 2696 unlock_user_struct(ucp, ucp_addr, 0); 2697 force_sig(TARGET_SIGSEGV); 2698 } 2699 2700 void sparc64_get_context(CPUSPARCState *env) 2701 { 2702 abi_ulong ucp_addr; 2703 struct target_ucontext *ucp; 2704 target_mc_gregset_t *grp; 2705 target_mcontext_t *mcp; 2706 abi_ulong fp, i7, w_addr; 2707 int err; 2708 unsigned int i; 2709 target_sigset_t target_set; 2710 sigset_t set; 2711 2712 ucp_addr = env->regwptr[UREG_I0]; 2713 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2714 goto do_sigsegv; 2715 } 2716 2717 mcp = &ucp->tuc_mcontext; 2718 grp = &mcp->mc_gregs; 2719 2720 /* Skip over the trap instruction, first. */ 2721 env->pc = env->npc; 2722 env->npc += 4; 2723 2724 /* If we're only reading the signal mask then do_sigprocmask() 2725 * is guaranteed not to fail, which is important because we don't 2726 * have any way to signal a failure or restart this operation since 2727 * this is not a normal syscall. 2728 */ 2729 err = do_sigprocmask(0, NULL, &set); 2730 assert(err == 0); 2731 host_to_target_sigset_internal(&target_set, &set); 2732 if (TARGET_NSIG_WORDS == 1) { 2733 __put_user(target_set.sig[0], 2734 (abi_ulong *)&ucp->tuc_sigmask); 2735 } else { 2736 abi_ulong *src, *dst; 2737 src = target_set.sig; 2738 dst = ucp->tuc_sigmask.sig; 2739 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2740 __put_user(*src, dst); 2741 } 2742 if (err) 2743 goto do_sigsegv; 2744 } 2745 2746 /* XXX: tstate must be saved properly */ 2747 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2748 __put_user(env->pc, &((*grp)[MC_PC])); 2749 __put_user(env->npc, &((*grp)[MC_NPC])); 2750 __put_user(env->y, &((*grp)[MC_Y])); 2751 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2752 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2753 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2754 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2755 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2756 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2757 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2758 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2759 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2760 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2761 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2762 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2763 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2764 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2765 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2766 2767 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2768 fp = i7 = 0; 2769 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2770 abi_ulong) != 0) { 2771 goto do_sigsegv; 2772 } 2773 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2774 abi_ulong) != 0) { 2775 goto do_sigsegv; 2776 } 2777 __put_user(fp, &(mcp->mc_fp)); 2778 __put_user(i7, &(mcp->mc_i7)); 2779 2780 { 2781 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2782 for (i = 0; i < 64; i++, dst++) { 2783 if (i & 1) { 2784 __put_user(env->fpr[i/2].l.lower, dst); 2785 } else { 2786 __put_user(env->fpr[i/2].l.upper, dst); 2787 } 2788 } 2789 } 2790 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2791 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2792 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2793 2794 if (err) 2795 goto do_sigsegv; 2796 unlock_user_struct(ucp, ucp_addr, 1); 2797 return; 2798 do_sigsegv: 2799 unlock_user_struct(ucp, ucp_addr, 1); 2800 force_sig(TARGET_SIGSEGV); 2801 } 2802 #endif 2803 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2804 2805 # if defined(TARGET_ABI_MIPSO32) 2806 struct target_sigcontext { 2807 uint32_t sc_regmask; /* Unused */ 2808 uint32_t sc_status; 2809 uint64_t sc_pc; 2810 uint64_t sc_regs[32]; 2811 uint64_t sc_fpregs[32]; 2812 uint32_t sc_ownedfp; /* Unused */ 2813 uint32_t sc_fpc_csr; 2814 uint32_t sc_fpc_eir; /* Unused */ 2815 uint32_t sc_used_math; 2816 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2817 uint32_t pad0; 2818 uint64_t sc_mdhi; 2819 uint64_t sc_mdlo; 2820 target_ulong sc_hi1; /* Was sc_cause */ 2821 target_ulong sc_lo1; /* Was sc_badvaddr */ 2822 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2823 target_ulong sc_lo2; 2824 target_ulong sc_hi3; 2825 target_ulong sc_lo3; 2826 }; 2827 # else /* N32 || N64 */ 2828 struct target_sigcontext { 2829 uint64_t sc_regs[32]; 2830 uint64_t sc_fpregs[32]; 2831 uint64_t sc_mdhi; 2832 uint64_t sc_hi1; 2833 uint64_t sc_hi2; 2834 uint64_t sc_hi3; 2835 uint64_t sc_mdlo; 2836 uint64_t sc_lo1; 2837 uint64_t sc_lo2; 2838 uint64_t sc_lo3; 2839 uint64_t sc_pc; 2840 uint32_t sc_fpc_csr; 2841 uint32_t sc_used_math; 2842 uint32_t sc_dsp; 2843 uint32_t sc_reserved; 2844 }; 2845 # endif /* O32 */ 2846 2847 struct sigframe { 2848 uint32_t sf_ass[4]; /* argument save space for o32 */ 2849 uint32_t sf_code[2]; /* signal trampoline */ 2850 struct target_sigcontext sf_sc; 2851 target_sigset_t sf_mask; 2852 }; 2853 2854 struct target_ucontext { 2855 target_ulong tuc_flags; 2856 target_ulong tuc_link; 2857 target_stack_t tuc_stack; 2858 target_ulong pad0; 2859 struct target_sigcontext tuc_mcontext; 2860 target_sigset_t tuc_sigmask; 2861 }; 2862 2863 struct target_rt_sigframe { 2864 uint32_t rs_ass[4]; /* argument save space for o32 */ 2865 uint32_t rs_code[2]; /* signal trampoline */ 2866 struct target_siginfo rs_info; 2867 struct target_ucontext rs_uc; 2868 }; 2869 2870 /* Install trampoline to jump back from signal handler */ 2871 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2872 { 2873 int err = 0; 2874 2875 /* 2876 * Set up the return code ... 2877 * 2878 * li v0, __NR__foo_sigreturn 2879 * syscall 2880 */ 2881 2882 __put_user(0x24020000 + syscall, tramp + 0); 2883 __put_user(0x0000000c , tramp + 1); 2884 return err; 2885 } 2886 2887 static inline void setup_sigcontext(CPUMIPSState *regs, 2888 struct target_sigcontext *sc) 2889 { 2890 int i; 2891 2892 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2893 regs->hflags &= ~MIPS_HFLAG_BMASK; 2894 2895 __put_user(0, &sc->sc_regs[0]); 2896 for (i = 1; i < 32; ++i) { 2897 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2898 } 2899 2900 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2901 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2902 2903 /* Rather than checking for dsp existence, always copy. The storage 2904 would just be garbage otherwise. */ 2905 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2906 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2907 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2908 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2909 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2910 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2911 { 2912 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2913 __put_user(dsp, &sc->sc_dsp); 2914 } 2915 2916 __put_user(1, &sc->sc_used_math); 2917 2918 for (i = 0; i < 32; ++i) { 2919 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2920 } 2921 } 2922 2923 static inline void 2924 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2925 { 2926 int i; 2927 2928 __get_user(regs->CP0_EPC, &sc->sc_pc); 2929 2930 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2931 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2932 2933 for (i = 1; i < 32; ++i) { 2934 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2935 } 2936 2937 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2938 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2939 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2940 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2941 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2942 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2943 { 2944 uint32_t dsp; 2945 __get_user(dsp, &sc->sc_dsp); 2946 cpu_wrdsp(dsp, 0x3ff, regs); 2947 } 2948 2949 for (i = 0; i < 32; ++i) { 2950 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2951 } 2952 } 2953 2954 /* 2955 * Determine which stack to use.. 2956 */ 2957 static inline abi_ulong 2958 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2959 { 2960 unsigned long sp; 2961 2962 /* Default to using normal stack */ 2963 sp = regs->active_tc.gpr[29]; 2964 2965 /* 2966 * FPU emulator may have its own trampoline active just 2967 * above the user stack, 16-bytes before the next lowest 2968 * 16 byte boundary. Try to avoid trashing it. 2969 */ 2970 sp -= 32; 2971 2972 /* This is the X/Open sanctioned signal stack switching. */ 2973 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2974 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2975 } 2976 2977 return (sp - frame_size) & ~7; 2978 } 2979 2980 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 2981 { 2982 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 2983 env->hflags &= ~MIPS_HFLAG_M16; 2984 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 2985 env->active_tc.PC &= ~(target_ulong) 1; 2986 } 2987 } 2988 2989 # if defined(TARGET_ABI_MIPSO32) 2990 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 2991 static void setup_frame(int sig, struct target_sigaction * ka, 2992 target_sigset_t *set, CPUMIPSState *regs) 2993 { 2994 struct sigframe *frame; 2995 abi_ulong frame_addr; 2996 int i; 2997 2998 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2999 trace_user_setup_frame(regs, frame_addr); 3000 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3001 goto give_sigsegv; 3002 } 3003 3004 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3005 3006 setup_sigcontext(regs, &frame->sf_sc); 3007 3008 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3009 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3010 } 3011 3012 /* 3013 * Arguments to signal handler: 3014 * 3015 * a0 = signal number 3016 * a1 = 0 (should be cause) 3017 * a2 = pointer to struct sigcontext 3018 * 3019 * $25 and PC point to the signal handler, $29 points to the 3020 * struct sigframe. 3021 */ 3022 regs->active_tc.gpr[ 4] = sig; 3023 regs->active_tc.gpr[ 5] = 0; 3024 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3025 regs->active_tc.gpr[29] = frame_addr; 3026 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3027 /* The original kernel code sets CP0_EPC to the handler 3028 * since it returns to userland using eret 3029 * we cannot do this here, and we must set PC directly */ 3030 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3031 mips_set_hflags_isa_mode_from_pc(regs); 3032 unlock_user_struct(frame, frame_addr, 1); 3033 return; 3034 3035 give_sigsegv: 3036 force_sig(TARGET_SIGSEGV/*, current*/); 3037 } 3038 3039 long do_sigreturn(CPUMIPSState *regs) 3040 { 3041 struct sigframe *frame; 3042 abi_ulong frame_addr; 3043 sigset_t blocked; 3044 target_sigset_t target_set; 3045 int i; 3046 3047 frame_addr = regs->active_tc.gpr[29]; 3048 trace_user_do_sigreturn(regs, frame_addr); 3049 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3050 goto badframe; 3051 3052 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3053 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3054 } 3055 3056 target_to_host_sigset_internal(&blocked, &target_set); 3057 set_sigmask(&blocked); 3058 3059 restore_sigcontext(regs, &frame->sf_sc); 3060 3061 #if 0 3062 /* 3063 * Don't let your children do this ... 3064 */ 3065 __asm__ __volatile__( 3066 "move\t$29, %0\n\t" 3067 "j\tsyscall_exit" 3068 :/* no outputs */ 3069 :"r" (®s)); 3070 /* Unreached */ 3071 #endif 3072 3073 regs->active_tc.PC = regs->CP0_EPC; 3074 mips_set_hflags_isa_mode_from_pc(regs); 3075 /* I am not sure this is right, but it seems to work 3076 * maybe a problem with nested signals ? */ 3077 regs->CP0_EPC = 0; 3078 return -TARGET_QEMU_ESIGRETURN; 3079 3080 badframe: 3081 force_sig(TARGET_SIGSEGV/*, current*/); 3082 return 0; 3083 } 3084 # endif /* O32 */ 3085 3086 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3087 target_siginfo_t *info, 3088 target_sigset_t *set, CPUMIPSState *env) 3089 { 3090 struct target_rt_sigframe *frame; 3091 abi_ulong frame_addr; 3092 int i; 3093 3094 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3095 trace_user_setup_rt_frame(env, frame_addr); 3096 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3097 goto give_sigsegv; 3098 } 3099 3100 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3101 3102 tswap_siginfo(&frame->rs_info, info); 3103 3104 __put_user(0, &frame->rs_uc.tuc_flags); 3105 __put_user(0, &frame->rs_uc.tuc_link); 3106 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3107 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3108 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3109 &frame->rs_uc.tuc_stack.ss_flags); 3110 3111 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3112 3113 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3114 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3115 } 3116 3117 /* 3118 * Arguments to signal handler: 3119 * 3120 * a0 = signal number 3121 * a1 = pointer to siginfo_t 3122 * a2 = pointer to struct ucontext 3123 * 3124 * $25 and PC point to the signal handler, $29 points to the 3125 * struct sigframe. 3126 */ 3127 env->active_tc.gpr[ 4] = sig; 3128 env->active_tc.gpr[ 5] = frame_addr 3129 + offsetof(struct target_rt_sigframe, rs_info); 3130 env->active_tc.gpr[ 6] = frame_addr 3131 + offsetof(struct target_rt_sigframe, rs_uc); 3132 env->active_tc.gpr[29] = frame_addr; 3133 env->active_tc.gpr[31] = frame_addr 3134 + offsetof(struct target_rt_sigframe, rs_code); 3135 /* The original kernel code sets CP0_EPC to the handler 3136 * since it returns to userland using eret 3137 * we cannot do this here, and we must set PC directly */ 3138 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3139 mips_set_hflags_isa_mode_from_pc(env); 3140 unlock_user_struct(frame, frame_addr, 1); 3141 return; 3142 3143 give_sigsegv: 3144 unlock_user_struct(frame, frame_addr, 1); 3145 force_sig(TARGET_SIGSEGV/*, current*/); 3146 } 3147 3148 long do_rt_sigreturn(CPUMIPSState *env) 3149 { 3150 struct target_rt_sigframe *frame; 3151 abi_ulong frame_addr; 3152 sigset_t blocked; 3153 3154 frame_addr = env->active_tc.gpr[29]; 3155 trace_user_do_rt_sigreturn(env, frame_addr); 3156 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3157 goto badframe; 3158 } 3159 3160 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3161 set_sigmask(&blocked); 3162 3163 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3164 3165 if (do_sigaltstack(frame_addr + 3166 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3167 0, get_sp_from_cpustate(env)) == -EFAULT) 3168 goto badframe; 3169 3170 env->active_tc.PC = env->CP0_EPC; 3171 mips_set_hflags_isa_mode_from_pc(env); 3172 /* I am not sure this is right, but it seems to work 3173 * maybe a problem with nested signals ? */ 3174 env->CP0_EPC = 0; 3175 return -TARGET_QEMU_ESIGRETURN; 3176 3177 badframe: 3178 force_sig(TARGET_SIGSEGV/*, current*/); 3179 return 0; 3180 } 3181 3182 #elif defined(TARGET_SH4) 3183 3184 /* 3185 * code and data structures from linux kernel: 3186 * include/asm-sh/sigcontext.h 3187 * arch/sh/kernel/signal.c 3188 */ 3189 3190 struct target_sigcontext { 3191 target_ulong oldmask; 3192 3193 /* CPU registers */ 3194 target_ulong sc_gregs[16]; 3195 target_ulong sc_pc; 3196 target_ulong sc_pr; 3197 target_ulong sc_sr; 3198 target_ulong sc_gbr; 3199 target_ulong sc_mach; 3200 target_ulong sc_macl; 3201 3202 /* FPU registers */ 3203 target_ulong sc_fpregs[16]; 3204 target_ulong sc_xfpregs[16]; 3205 unsigned int sc_fpscr; 3206 unsigned int sc_fpul; 3207 unsigned int sc_ownedfp; 3208 }; 3209 3210 struct target_sigframe 3211 { 3212 struct target_sigcontext sc; 3213 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3214 uint16_t retcode[3]; 3215 }; 3216 3217 3218 struct target_ucontext { 3219 target_ulong tuc_flags; 3220 struct target_ucontext *tuc_link; 3221 target_stack_t tuc_stack; 3222 struct target_sigcontext tuc_mcontext; 3223 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3224 }; 3225 3226 struct target_rt_sigframe 3227 { 3228 struct target_siginfo info; 3229 struct target_ucontext uc; 3230 uint16_t retcode[3]; 3231 }; 3232 3233 3234 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3235 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3236 3237 static abi_ulong get_sigframe(struct target_sigaction *ka, 3238 unsigned long sp, size_t frame_size) 3239 { 3240 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3241 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3242 } 3243 3244 return (sp - frame_size) & -8ul; 3245 } 3246 3247 static void setup_sigcontext(struct target_sigcontext *sc, 3248 CPUSH4State *regs, unsigned long mask) 3249 { 3250 int i; 3251 3252 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3253 COPY(gregs[0]); COPY(gregs[1]); 3254 COPY(gregs[2]); COPY(gregs[3]); 3255 COPY(gregs[4]); COPY(gregs[5]); 3256 COPY(gregs[6]); COPY(gregs[7]); 3257 COPY(gregs[8]); COPY(gregs[9]); 3258 COPY(gregs[10]); COPY(gregs[11]); 3259 COPY(gregs[12]); COPY(gregs[13]); 3260 COPY(gregs[14]); COPY(gregs[15]); 3261 COPY(gbr); COPY(mach); 3262 COPY(macl); COPY(pr); 3263 COPY(sr); COPY(pc); 3264 #undef COPY 3265 3266 for (i=0; i<16; i++) { 3267 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3268 } 3269 __put_user(regs->fpscr, &sc->sc_fpscr); 3270 __put_user(regs->fpul, &sc->sc_fpul); 3271 3272 /* non-iBCS2 extensions.. */ 3273 __put_user(mask, &sc->oldmask); 3274 } 3275 3276 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3277 { 3278 int i; 3279 3280 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3281 COPY(gregs[0]); COPY(gregs[1]); 3282 COPY(gregs[2]); COPY(gregs[3]); 3283 COPY(gregs[4]); COPY(gregs[5]); 3284 COPY(gregs[6]); COPY(gregs[7]); 3285 COPY(gregs[8]); COPY(gregs[9]); 3286 COPY(gregs[10]); COPY(gregs[11]); 3287 COPY(gregs[12]); COPY(gregs[13]); 3288 COPY(gregs[14]); COPY(gregs[15]); 3289 COPY(gbr); COPY(mach); 3290 COPY(macl); COPY(pr); 3291 COPY(sr); COPY(pc); 3292 #undef COPY 3293 3294 for (i=0; i<16; i++) { 3295 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3296 } 3297 __get_user(regs->fpscr, &sc->sc_fpscr); 3298 __get_user(regs->fpul, &sc->sc_fpul); 3299 3300 regs->tra = -1; /* disable syscall checks */ 3301 } 3302 3303 static void setup_frame(int sig, struct target_sigaction *ka, 3304 target_sigset_t *set, CPUSH4State *regs) 3305 { 3306 struct target_sigframe *frame; 3307 abi_ulong frame_addr; 3308 int i; 3309 3310 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3311 trace_user_setup_frame(regs, frame_addr); 3312 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3313 goto give_sigsegv; 3314 } 3315 3316 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3317 3318 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3319 __put_user(set->sig[i + 1], &frame->extramask[i]); 3320 } 3321 3322 /* Set up to return from userspace. If provided, use a stub 3323 already in userspace. */ 3324 if (ka->sa_flags & TARGET_SA_RESTORER) { 3325 regs->pr = (unsigned long) ka->sa_restorer; 3326 } else { 3327 /* Generate return code (system call to sigreturn) */ 3328 abi_ulong retcode_addr = frame_addr + 3329 offsetof(struct target_sigframe, retcode); 3330 __put_user(MOVW(2), &frame->retcode[0]); 3331 __put_user(TRAP_NOARG, &frame->retcode[1]); 3332 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3333 regs->pr = (unsigned long) retcode_addr; 3334 } 3335 3336 /* Set up registers for signal handler */ 3337 regs->gregs[15] = frame_addr; 3338 regs->gregs[4] = sig; /* Arg for signal handler */ 3339 regs->gregs[5] = 0; 3340 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3341 regs->pc = (unsigned long) ka->_sa_handler; 3342 3343 unlock_user_struct(frame, frame_addr, 1); 3344 return; 3345 3346 give_sigsegv: 3347 unlock_user_struct(frame, frame_addr, 1); 3348 force_sig(TARGET_SIGSEGV); 3349 } 3350 3351 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3352 target_siginfo_t *info, 3353 target_sigset_t *set, CPUSH4State *regs) 3354 { 3355 struct target_rt_sigframe *frame; 3356 abi_ulong frame_addr; 3357 int i; 3358 3359 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3360 trace_user_setup_rt_frame(regs, frame_addr); 3361 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3362 goto give_sigsegv; 3363 } 3364 3365 tswap_siginfo(&frame->info, info); 3366 3367 /* Create the ucontext. */ 3368 __put_user(0, &frame->uc.tuc_flags); 3369 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3370 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3371 &frame->uc.tuc_stack.ss_sp); 3372 __put_user(sas_ss_flags(regs->gregs[15]), 3373 &frame->uc.tuc_stack.ss_flags); 3374 __put_user(target_sigaltstack_used.ss_size, 3375 &frame->uc.tuc_stack.ss_size); 3376 setup_sigcontext(&frame->uc.tuc_mcontext, 3377 regs, set->sig[0]); 3378 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3379 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3380 } 3381 3382 /* Set up to return from userspace. If provided, use a stub 3383 already in userspace. */ 3384 if (ka->sa_flags & TARGET_SA_RESTORER) { 3385 regs->pr = (unsigned long) ka->sa_restorer; 3386 } else { 3387 /* Generate return code (system call to sigreturn) */ 3388 abi_ulong retcode_addr = frame_addr + 3389 offsetof(struct target_rt_sigframe, retcode); 3390 __put_user(MOVW(2), &frame->retcode[0]); 3391 __put_user(TRAP_NOARG, &frame->retcode[1]); 3392 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3393 regs->pr = (unsigned long) retcode_addr; 3394 } 3395 3396 /* Set up registers for signal handler */ 3397 regs->gregs[15] = frame_addr; 3398 regs->gregs[4] = sig; /* Arg for signal handler */ 3399 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3400 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3401 regs->pc = (unsigned long) ka->_sa_handler; 3402 3403 unlock_user_struct(frame, frame_addr, 1); 3404 return; 3405 3406 give_sigsegv: 3407 unlock_user_struct(frame, frame_addr, 1); 3408 force_sig(TARGET_SIGSEGV); 3409 } 3410 3411 long do_sigreturn(CPUSH4State *regs) 3412 { 3413 struct target_sigframe *frame; 3414 abi_ulong frame_addr; 3415 sigset_t blocked; 3416 target_sigset_t target_set; 3417 int i; 3418 int err = 0; 3419 3420 frame_addr = regs->gregs[15]; 3421 trace_user_do_sigreturn(regs, frame_addr); 3422 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3423 goto badframe; 3424 } 3425 3426 __get_user(target_set.sig[0], &frame->sc.oldmask); 3427 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3428 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3429 } 3430 3431 if (err) 3432 goto badframe; 3433 3434 target_to_host_sigset_internal(&blocked, &target_set); 3435 set_sigmask(&blocked); 3436 3437 restore_sigcontext(regs, &frame->sc); 3438 3439 unlock_user_struct(frame, frame_addr, 0); 3440 return -TARGET_QEMU_ESIGRETURN; 3441 3442 badframe: 3443 unlock_user_struct(frame, frame_addr, 0); 3444 force_sig(TARGET_SIGSEGV); 3445 return 0; 3446 } 3447 3448 long do_rt_sigreturn(CPUSH4State *regs) 3449 { 3450 struct target_rt_sigframe *frame; 3451 abi_ulong frame_addr; 3452 sigset_t blocked; 3453 3454 frame_addr = regs->gregs[15]; 3455 trace_user_do_rt_sigreturn(regs, frame_addr); 3456 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3457 goto badframe; 3458 } 3459 3460 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3461 set_sigmask(&blocked); 3462 3463 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3464 3465 if (do_sigaltstack(frame_addr + 3466 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3467 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3468 goto badframe; 3469 } 3470 3471 unlock_user_struct(frame, frame_addr, 0); 3472 return -TARGET_QEMU_ESIGRETURN; 3473 3474 badframe: 3475 unlock_user_struct(frame, frame_addr, 0); 3476 force_sig(TARGET_SIGSEGV); 3477 return 0; 3478 } 3479 #elif defined(TARGET_MICROBLAZE) 3480 3481 struct target_sigcontext { 3482 struct target_pt_regs regs; /* needs to be first */ 3483 uint32_t oldmask; 3484 }; 3485 3486 struct target_stack_t { 3487 abi_ulong ss_sp; 3488 int ss_flags; 3489 unsigned int ss_size; 3490 }; 3491 3492 struct target_ucontext { 3493 abi_ulong tuc_flags; 3494 abi_ulong tuc_link; 3495 struct target_stack_t tuc_stack; 3496 struct target_sigcontext tuc_mcontext; 3497 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3498 }; 3499 3500 /* Signal frames. */ 3501 struct target_signal_frame { 3502 struct target_ucontext uc; 3503 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3504 uint32_t tramp[2]; 3505 }; 3506 3507 struct rt_signal_frame { 3508 siginfo_t info; 3509 struct ucontext uc; 3510 uint32_t tramp[2]; 3511 }; 3512 3513 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3514 { 3515 __put_user(env->regs[0], &sc->regs.r0); 3516 __put_user(env->regs[1], &sc->regs.r1); 3517 __put_user(env->regs[2], &sc->regs.r2); 3518 __put_user(env->regs[3], &sc->regs.r3); 3519 __put_user(env->regs[4], &sc->regs.r4); 3520 __put_user(env->regs[5], &sc->regs.r5); 3521 __put_user(env->regs[6], &sc->regs.r6); 3522 __put_user(env->regs[7], &sc->regs.r7); 3523 __put_user(env->regs[8], &sc->regs.r8); 3524 __put_user(env->regs[9], &sc->regs.r9); 3525 __put_user(env->regs[10], &sc->regs.r10); 3526 __put_user(env->regs[11], &sc->regs.r11); 3527 __put_user(env->regs[12], &sc->regs.r12); 3528 __put_user(env->regs[13], &sc->regs.r13); 3529 __put_user(env->regs[14], &sc->regs.r14); 3530 __put_user(env->regs[15], &sc->regs.r15); 3531 __put_user(env->regs[16], &sc->regs.r16); 3532 __put_user(env->regs[17], &sc->regs.r17); 3533 __put_user(env->regs[18], &sc->regs.r18); 3534 __put_user(env->regs[19], &sc->regs.r19); 3535 __put_user(env->regs[20], &sc->regs.r20); 3536 __put_user(env->regs[21], &sc->regs.r21); 3537 __put_user(env->regs[22], &sc->regs.r22); 3538 __put_user(env->regs[23], &sc->regs.r23); 3539 __put_user(env->regs[24], &sc->regs.r24); 3540 __put_user(env->regs[25], &sc->regs.r25); 3541 __put_user(env->regs[26], &sc->regs.r26); 3542 __put_user(env->regs[27], &sc->regs.r27); 3543 __put_user(env->regs[28], &sc->regs.r28); 3544 __put_user(env->regs[29], &sc->regs.r29); 3545 __put_user(env->regs[30], &sc->regs.r30); 3546 __put_user(env->regs[31], &sc->regs.r31); 3547 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3548 } 3549 3550 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3551 { 3552 __get_user(env->regs[0], &sc->regs.r0); 3553 __get_user(env->regs[1], &sc->regs.r1); 3554 __get_user(env->regs[2], &sc->regs.r2); 3555 __get_user(env->regs[3], &sc->regs.r3); 3556 __get_user(env->regs[4], &sc->regs.r4); 3557 __get_user(env->regs[5], &sc->regs.r5); 3558 __get_user(env->regs[6], &sc->regs.r6); 3559 __get_user(env->regs[7], &sc->regs.r7); 3560 __get_user(env->regs[8], &sc->regs.r8); 3561 __get_user(env->regs[9], &sc->regs.r9); 3562 __get_user(env->regs[10], &sc->regs.r10); 3563 __get_user(env->regs[11], &sc->regs.r11); 3564 __get_user(env->regs[12], &sc->regs.r12); 3565 __get_user(env->regs[13], &sc->regs.r13); 3566 __get_user(env->regs[14], &sc->regs.r14); 3567 __get_user(env->regs[15], &sc->regs.r15); 3568 __get_user(env->regs[16], &sc->regs.r16); 3569 __get_user(env->regs[17], &sc->regs.r17); 3570 __get_user(env->regs[18], &sc->regs.r18); 3571 __get_user(env->regs[19], &sc->regs.r19); 3572 __get_user(env->regs[20], &sc->regs.r20); 3573 __get_user(env->regs[21], &sc->regs.r21); 3574 __get_user(env->regs[22], &sc->regs.r22); 3575 __get_user(env->regs[23], &sc->regs.r23); 3576 __get_user(env->regs[24], &sc->regs.r24); 3577 __get_user(env->regs[25], &sc->regs.r25); 3578 __get_user(env->regs[26], &sc->regs.r26); 3579 __get_user(env->regs[27], &sc->regs.r27); 3580 __get_user(env->regs[28], &sc->regs.r28); 3581 __get_user(env->regs[29], &sc->regs.r29); 3582 __get_user(env->regs[30], &sc->regs.r30); 3583 __get_user(env->regs[31], &sc->regs.r31); 3584 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3585 } 3586 3587 static abi_ulong get_sigframe(struct target_sigaction *ka, 3588 CPUMBState *env, int frame_size) 3589 { 3590 abi_ulong sp = env->regs[1]; 3591 3592 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3593 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3594 } 3595 3596 return ((sp - frame_size) & -8UL); 3597 } 3598 3599 static void setup_frame(int sig, struct target_sigaction *ka, 3600 target_sigset_t *set, CPUMBState *env) 3601 { 3602 struct target_signal_frame *frame; 3603 abi_ulong frame_addr; 3604 int i; 3605 3606 frame_addr = get_sigframe(ka, env, sizeof *frame); 3607 trace_user_setup_frame(env, frame_addr); 3608 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3609 goto badframe; 3610 3611 /* Save the mask. */ 3612 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3613 3614 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3615 __put_user(set->sig[i], &frame->extramask[i - 1]); 3616 } 3617 3618 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3619 3620 /* Set up to return from userspace. If provided, use a stub 3621 already in userspace. */ 3622 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3623 if (ka->sa_flags & TARGET_SA_RESTORER) { 3624 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3625 } else { 3626 uint32_t t; 3627 /* Note, these encodings are _big endian_! */ 3628 /* addi r12, r0, __NR_sigreturn */ 3629 t = 0x31800000UL | TARGET_NR_sigreturn; 3630 __put_user(t, frame->tramp + 0); 3631 /* brki r14, 0x8 */ 3632 t = 0xb9cc0008UL; 3633 __put_user(t, frame->tramp + 1); 3634 3635 /* Return from sighandler will jump to the tramp. 3636 Negative 8 offset because return is rtsd r15, 8 */ 3637 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3638 - 8; 3639 } 3640 3641 /* Set up registers for signal handler */ 3642 env->regs[1] = frame_addr; 3643 /* Signal handler args: */ 3644 env->regs[5] = sig; /* Arg 0: signum */ 3645 env->regs[6] = 0; 3646 /* arg 1: sigcontext */ 3647 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3648 3649 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3650 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3651 3652 unlock_user_struct(frame, frame_addr, 1); 3653 return; 3654 badframe: 3655 force_sig(TARGET_SIGSEGV); 3656 } 3657 3658 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3659 target_siginfo_t *info, 3660 target_sigset_t *set, CPUMBState *env) 3661 { 3662 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3663 } 3664 3665 long do_sigreturn(CPUMBState *env) 3666 { 3667 struct target_signal_frame *frame; 3668 abi_ulong frame_addr; 3669 target_sigset_t target_set; 3670 sigset_t set; 3671 int i; 3672 3673 frame_addr = env->regs[R_SP]; 3674 trace_user_do_sigreturn(env, frame_addr); 3675 /* Make sure the guest isn't playing games. */ 3676 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3677 goto badframe; 3678 3679 /* Restore blocked signals */ 3680 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3681 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3682 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3683 } 3684 target_to_host_sigset_internal(&set, &target_set); 3685 set_sigmask(&set); 3686 3687 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3688 /* We got here through a sigreturn syscall, our path back is via an 3689 rtb insn so setup r14 for that. */ 3690 env->regs[14] = env->sregs[SR_PC]; 3691 3692 unlock_user_struct(frame, frame_addr, 0); 3693 return -TARGET_QEMU_ESIGRETURN; 3694 badframe: 3695 force_sig(TARGET_SIGSEGV); 3696 } 3697 3698 long do_rt_sigreturn(CPUMBState *env) 3699 { 3700 trace_user_do_rt_sigreturn(env, 0); 3701 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3702 return -TARGET_ENOSYS; 3703 } 3704 3705 #elif defined(TARGET_CRIS) 3706 3707 struct target_sigcontext { 3708 struct target_pt_regs regs; /* needs to be first */ 3709 uint32_t oldmask; 3710 uint32_t usp; /* usp before stacking this gunk on it */ 3711 }; 3712 3713 /* Signal frames. */ 3714 struct target_signal_frame { 3715 struct target_sigcontext sc; 3716 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3717 uint16_t retcode[4]; /* Trampoline code. */ 3718 }; 3719 3720 struct rt_signal_frame { 3721 siginfo_t *pinfo; 3722 void *puc; 3723 siginfo_t info; 3724 struct ucontext uc; 3725 uint16_t retcode[4]; /* Trampoline code. */ 3726 }; 3727 3728 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3729 { 3730 __put_user(env->regs[0], &sc->regs.r0); 3731 __put_user(env->regs[1], &sc->regs.r1); 3732 __put_user(env->regs[2], &sc->regs.r2); 3733 __put_user(env->regs[3], &sc->regs.r3); 3734 __put_user(env->regs[4], &sc->regs.r4); 3735 __put_user(env->regs[5], &sc->regs.r5); 3736 __put_user(env->regs[6], &sc->regs.r6); 3737 __put_user(env->regs[7], &sc->regs.r7); 3738 __put_user(env->regs[8], &sc->regs.r8); 3739 __put_user(env->regs[9], &sc->regs.r9); 3740 __put_user(env->regs[10], &sc->regs.r10); 3741 __put_user(env->regs[11], &sc->regs.r11); 3742 __put_user(env->regs[12], &sc->regs.r12); 3743 __put_user(env->regs[13], &sc->regs.r13); 3744 __put_user(env->regs[14], &sc->usp); 3745 __put_user(env->regs[15], &sc->regs.acr); 3746 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3747 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3748 __put_user(env->pc, &sc->regs.erp); 3749 } 3750 3751 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3752 { 3753 __get_user(env->regs[0], &sc->regs.r0); 3754 __get_user(env->regs[1], &sc->regs.r1); 3755 __get_user(env->regs[2], &sc->regs.r2); 3756 __get_user(env->regs[3], &sc->regs.r3); 3757 __get_user(env->regs[4], &sc->regs.r4); 3758 __get_user(env->regs[5], &sc->regs.r5); 3759 __get_user(env->regs[6], &sc->regs.r6); 3760 __get_user(env->regs[7], &sc->regs.r7); 3761 __get_user(env->regs[8], &sc->regs.r8); 3762 __get_user(env->regs[9], &sc->regs.r9); 3763 __get_user(env->regs[10], &sc->regs.r10); 3764 __get_user(env->regs[11], &sc->regs.r11); 3765 __get_user(env->regs[12], &sc->regs.r12); 3766 __get_user(env->regs[13], &sc->regs.r13); 3767 __get_user(env->regs[14], &sc->usp); 3768 __get_user(env->regs[15], &sc->regs.acr); 3769 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3770 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3771 __get_user(env->pc, &sc->regs.erp); 3772 } 3773 3774 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3775 { 3776 abi_ulong sp; 3777 /* Align the stack downwards to 4. */ 3778 sp = (env->regs[R_SP] & ~3); 3779 return sp - framesize; 3780 } 3781 3782 static void setup_frame(int sig, struct target_sigaction *ka, 3783 target_sigset_t *set, CPUCRISState *env) 3784 { 3785 struct target_signal_frame *frame; 3786 abi_ulong frame_addr; 3787 int i; 3788 3789 frame_addr = get_sigframe(env, sizeof *frame); 3790 trace_user_setup_frame(env, frame_addr); 3791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3792 goto badframe; 3793 3794 /* 3795 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3796 * use this trampoline anymore but it sets it up for GDB. 3797 * In QEMU, using the trampoline simplifies things a bit so we use it. 3798 * 3799 * This is movu.w __NR_sigreturn, r9; break 13; 3800 */ 3801 __put_user(0x9c5f, frame->retcode+0); 3802 __put_user(TARGET_NR_sigreturn, 3803 frame->retcode + 1); 3804 __put_user(0xe93d, frame->retcode + 2); 3805 3806 /* Save the mask. */ 3807 __put_user(set->sig[0], &frame->sc.oldmask); 3808 3809 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3810 __put_user(set->sig[i], &frame->extramask[i - 1]); 3811 } 3812 3813 setup_sigcontext(&frame->sc, env); 3814 3815 /* Move the stack and setup the arguments for the handler. */ 3816 env->regs[R_SP] = frame_addr; 3817 env->regs[10] = sig; 3818 env->pc = (unsigned long) ka->_sa_handler; 3819 /* Link SRP so the guest returns through the trampoline. */ 3820 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3821 3822 unlock_user_struct(frame, frame_addr, 1); 3823 return; 3824 badframe: 3825 force_sig(TARGET_SIGSEGV); 3826 } 3827 3828 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3829 target_siginfo_t *info, 3830 target_sigset_t *set, CPUCRISState *env) 3831 { 3832 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3833 } 3834 3835 long do_sigreturn(CPUCRISState *env) 3836 { 3837 struct target_signal_frame *frame; 3838 abi_ulong frame_addr; 3839 target_sigset_t target_set; 3840 sigset_t set; 3841 int i; 3842 3843 frame_addr = env->regs[R_SP]; 3844 trace_user_do_sigreturn(env, frame_addr); 3845 /* Make sure the guest isn't playing games. */ 3846 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3847 goto badframe; 3848 } 3849 3850 /* Restore blocked signals */ 3851 __get_user(target_set.sig[0], &frame->sc.oldmask); 3852 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3853 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3854 } 3855 target_to_host_sigset_internal(&set, &target_set); 3856 set_sigmask(&set); 3857 3858 restore_sigcontext(&frame->sc, env); 3859 unlock_user_struct(frame, frame_addr, 0); 3860 return -TARGET_QEMU_ESIGRETURN; 3861 badframe: 3862 force_sig(TARGET_SIGSEGV); 3863 } 3864 3865 long do_rt_sigreturn(CPUCRISState *env) 3866 { 3867 trace_user_do_rt_sigreturn(env, 0); 3868 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3869 return -TARGET_ENOSYS; 3870 } 3871 3872 #elif defined(TARGET_OPENRISC) 3873 3874 struct target_sigcontext { 3875 struct target_pt_regs regs; 3876 abi_ulong oldmask; 3877 abi_ulong usp; 3878 }; 3879 3880 struct target_ucontext { 3881 abi_ulong tuc_flags; 3882 abi_ulong tuc_link; 3883 target_stack_t tuc_stack; 3884 struct target_sigcontext tuc_mcontext; 3885 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3886 }; 3887 3888 struct target_rt_sigframe { 3889 abi_ulong pinfo; 3890 uint64_t puc; 3891 struct target_siginfo info; 3892 struct target_sigcontext sc; 3893 struct target_ucontext uc; 3894 unsigned char retcode[16]; /* trampoline code */ 3895 }; 3896 3897 /* This is the asm-generic/ucontext.h version */ 3898 #if 0 3899 static int restore_sigcontext(CPUOpenRISCState *regs, 3900 struct target_sigcontext *sc) 3901 { 3902 unsigned int err = 0; 3903 unsigned long old_usp; 3904 3905 /* Alwys make any pending restarted system call return -EINTR */ 3906 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3907 3908 /* restore the regs from &sc->regs (same as sc, since regs is first) 3909 * (sc is already checked for VERIFY_READ since the sigframe was 3910 * checked in sys_sigreturn previously) 3911 */ 3912 3913 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3914 goto badframe; 3915 } 3916 3917 /* make sure the U-flag is set so user-mode cannot fool us */ 3918 3919 regs->sr &= ~SR_SM; 3920 3921 /* restore the old USP as it was before we stacked the sc etc. 3922 * (we cannot just pop the sigcontext since we aligned the sp and 3923 * stuff after pushing it) 3924 */ 3925 3926 __get_user(old_usp, &sc->usp); 3927 phx_signal("old_usp 0x%lx", old_usp); 3928 3929 __PHX__ REALLY /* ??? */ 3930 wrusp(old_usp); 3931 regs->gpr[1] = old_usp; 3932 3933 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3934 * after this completes, but we don't use that mechanism. maybe we can 3935 * use it now ? 3936 */ 3937 3938 return err; 3939 3940 badframe: 3941 return 1; 3942 } 3943 #endif 3944 3945 /* Set up a signal frame. */ 3946 3947 static void setup_sigcontext(struct target_sigcontext *sc, 3948 CPUOpenRISCState *regs, 3949 unsigned long mask) 3950 { 3951 unsigned long usp = regs->gpr[1]; 3952 3953 /* copy the regs. they are first in sc so we can use sc directly */ 3954 3955 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3956 3957 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3958 the signal handler. The frametype will be restored to its previous 3959 value in restore_sigcontext. */ 3960 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3961 3962 /* then some other stuff */ 3963 __put_user(mask, &sc->oldmask); 3964 __put_user(usp, &sc->usp); 3965 } 3966 3967 static inline unsigned long align_sigframe(unsigned long sp) 3968 { 3969 return sp & ~3UL; 3970 } 3971 3972 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 3973 CPUOpenRISCState *regs, 3974 size_t frame_size) 3975 { 3976 unsigned long sp = regs->gpr[1]; 3977 int onsigstack = on_sig_stack(sp); 3978 3979 /* redzone */ 3980 /* This is the X/Open sanctioned signal stack switching. */ 3981 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 3982 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3983 } 3984 3985 sp = align_sigframe(sp - frame_size); 3986 3987 /* 3988 * If we are on the alternate signal stack and would overflow it, don't. 3989 * Return an always-bogus address instead so we will die with SIGSEGV. 3990 */ 3991 3992 if (onsigstack && !likely(on_sig_stack(sp))) { 3993 return -1L; 3994 } 3995 3996 return sp; 3997 } 3998 3999 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4000 target_siginfo_t *info, 4001 target_sigset_t *set, CPUOpenRISCState *env) 4002 { 4003 int err = 0; 4004 abi_ulong frame_addr; 4005 unsigned long return_ip; 4006 struct target_rt_sigframe *frame; 4007 abi_ulong info_addr, uc_addr; 4008 4009 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4010 trace_user_setup_rt_frame(env, frame_addr); 4011 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4012 goto give_sigsegv; 4013 } 4014 4015 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4016 __put_user(info_addr, &frame->pinfo); 4017 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4018 __put_user(uc_addr, &frame->puc); 4019 4020 if (ka->sa_flags & SA_SIGINFO) { 4021 tswap_siginfo(&frame->info, info); 4022 } 4023 4024 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4025 __put_user(0, &frame->uc.tuc_flags); 4026 __put_user(0, &frame->uc.tuc_link); 4027 __put_user(target_sigaltstack_used.ss_sp, 4028 &frame->uc.tuc_stack.ss_sp); 4029 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4030 __put_user(target_sigaltstack_used.ss_size, 4031 &frame->uc.tuc_stack.ss_size); 4032 setup_sigcontext(&frame->sc, env, set->sig[0]); 4033 4034 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4035 4036 /* trampoline - the desired return ip is the retcode itself */ 4037 return_ip = (unsigned long)&frame->retcode; 4038 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4039 __put_user(0xa960, (short *)(frame->retcode + 0)); 4040 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4041 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4042 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4043 4044 if (err) { 4045 goto give_sigsegv; 4046 } 4047 4048 /* TODO what is the current->exec_domain stuff and invmap ? */ 4049 4050 /* Set up registers for signal handler */ 4051 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4052 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4053 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4054 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4055 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4056 4057 /* actually move the usp to reflect the stacked frame */ 4058 env->gpr[1] = (unsigned long)frame; 4059 4060 return; 4061 4062 give_sigsegv: 4063 unlock_user_struct(frame, frame_addr, 1); 4064 if (sig == TARGET_SIGSEGV) { 4065 ka->_sa_handler = TARGET_SIG_DFL; 4066 } 4067 force_sig(TARGET_SIGSEGV); 4068 } 4069 4070 long do_sigreturn(CPUOpenRISCState *env) 4071 { 4072 trace_user_do_sigreturn(env, 0); 4073 fprintf(stderr, "do_sigreturn: not implemented\n"); 4074 return -TARGET_ENOSYS; 4075 } 4076 4077 long do_rt_sigreturn(CPUOpenRISCState *env) 4078 { 4079 trace_user_do_rt_sigreturn(env, 0); 4080 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4081 return -TARGET_ENOSYS; 4082 } 4083 /* TARGET_OPENRISC */ 4084 4085 #elif defined(TARGET_S390X) 4086 4087 #define __NUM_GPRS 16 4088 #define __NUM_FPRS 16 4089 #define __NUM_ACRS 16 4090 4091 #define S390_SYSCALL_SIZE 2 4092 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4093 4094 #define _SIGCONTEXT_NSIG 64 4095 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4096 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4097 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4098 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4099 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4100 4101 typedef struct { 4102 target_psw_t psw; 4103 target_ulong gprs[__NUM_GPRS]; 4104 unsigned int acrs[__NUM_ACRS]; 4105 } target_s390_regs_common; 4106 4107 typedef struct { 4108 unsigned int fpc; 4109 double fprs[__NUM_FPRS]; 4110 } target_s390_fp_regs; 4111 4112 typedef struct { 4113 target_s390_regs_common regs; 4114 target_s390_fp_regs fpregs; 4115 } target_sigregs; 4116 4117 struct target_sigcontext { 4118 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4119 target_sigregs *sregs; 4120 }; 4121 4122 typedef struct { 4123 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4124 struct target_sigcontext sc; 4125 target_sigregs sregs; 4126 int signo; 4127 uint8_t retcode[S390_SYSCALL_SIZE]; 4128 } sigframe; 4129 4130 struct target_ucontext { 4131 target_ulong tuc_flags; 4132 struct target_ucontext *tuc_link; 4133 target_stack_t tuc_stack; 4134 target_sigregs tuc_mcontext; 4135 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4136 }; 4137 4138 typedef struct { 4139 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4140 uint8_t retcode[S390_SYSCALL_SIZE]; 4141 struct target_siginfo info; 4142 struct target_ucontext uc; 4143 } rt_sigframe; 4144 4145 static inline abi_ulong 4146 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4147 { 4148 abi_ulong sp; 4149 4150 /* Default to using normal stack */ 4151 sp = env->regs[15]; 4152 4153 /* This is the X/Open sanctioned signal stack switching. */ 4154 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4155 if (!sas_ss_flags(sp)) { 4156 sp = target_sigaltstack_used.ss_sp + 4157 target_sigaltstack_used.ss_size; 4158 } 4159 } 4160 4161 /* This is the legacy signal stack switching. */ 4162 else if (/* FIXME !user_mode(regs) */ 0 && 4163 !(ka->sa_flags & TARGET_SA_RESTORER) && 4164 ka->sa_restorer) { 4165 sp = (abi_ulong) ka->sa_restorer; 4166 } 4167 4168 return (sp - frame_size) & -8ul; 4169 } 4170 4171 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4172 { 4173 int i; 4174 //save_access_regs(current->thread.acrs); FIXME 4175 4176 /* Copy a 'clean' PSW mask to the user to avoid leaking 4177 information about whether PER is currently on. */ 4178 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4179 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4180 for (i = 0; i < 16; i++) { 4181 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4182 } 4183 for (i = 0; i < 16; i++) { 4184 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4185 } 4186 /* 4187 * We have to store the fp registers to current->thread.fp_regs 4188 * to merge them with the emulated registers. 4189 */ 4190 //save_fp_regs(¤t->thread.fp_regs); FIXME 4191 for (i = 0; i < 16; i++) { 4192 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4193 } 4194 } 4195 4196 static void setup_frame(int sig, struct target_sigaction *ka, 4197 target_sigset_t *set, CPUS390XState *env) 4198 { 4199 sigframe *frame; 4200 abi_ulong frame_addr; 4201 4202 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4203 trace_user_setup_frame(env, frame_addr); 4204 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4205 goto give_sigsegv; 4206 } 4207 4208 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4209 4210 save_sigregs(env, &frame->sregs); 4211 4212 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4213 (abi_ulong *)&frame->sc.sregs); 4214 4215 /* Set up to return from userspace. If provided, use a stub 4216 already in userspace. */ 4217 if (ka->sa_flags & TARGET_SA_RESTORER) { 4218 env->regs[14] = (unsigned long) 4219 ka->sa_restorer | PSW_ADDR_AMODE; 4220 } else { 4221 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4222 | PSW_ADDR_AMODE; 4223 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4224 (uint16_t *)(frame->retcode)); 4225 } 4226 4227 /* Set up backchain. */ 4228 __put_user(env->regs[15], (abi_ulong *) frame); 4229 4230 /* Set up registers for signal handler */ 4231 env->regs[15] = frame_addr; 4232 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4233 4234 env->regs[2] = sig; //map_signal(sig); 4235 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4236 4237 /* We forgot to include these in the sigcontext. 4238 To avoid breaking binary compatibility, they are passed as args. */ 4239 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4240 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4241 4242 /* Place signal number on stack to allow backtrace from handler. */ 4243 __put_user(env->regs[2], &frame->signo); 4244 unlock_user_struct(frame, frame_addr, 1); 4245 return; 4246 4247 give_sigsegv: 4248 force_sig(TARGET_SIGSEGV); 4249 } 4250 4251 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4252 target_siginfo_t *info, 4253 target_sigset_t *set, CPUS390XState *env) 4254 { 4255 int i; 4256 rt_sigframe *frame; 4257 abi_ulong frame_addr; 4258 4259 frame_addr = get_sigframe(ka, env, sizeof *frame); 4260 trace_user_setup_rt_frame(env, frame_addr); 4261 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4262 goto give_sigsegv; 4263 } 4264 4265 tswap_siginfo(&frame->info, info); 4266 4267 /* Create the ucontext. */ 4268 __put_user(0, &frame->uc.tuc_flags); 4269 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4270 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4271 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4272 &frame->uc.tuc_stack.ss_flags); 4273 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4274 save_sigregs(env, &frame->uc.tuc_mcontext); 4275 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4276 __put_user((abi_ulong)set->sig[i], 4277 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4278 } 4279 4280 /* Set up to return from userspace. If provided, use a stub 4281 already in userspace. */ 4282 if (ka->sa_flags & TARGET_SA_RESTORER) { 4283 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4284 } else { 4285 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4286 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4287 (uint16_t *)(frame->retcode)); 4288 } 4289 4290 /* Set up backchain. */ 4291 __put_user(env->regs[15], (abi_ulong *) frame); 4292 4293 /* Set up registers for signal handler */ 4294 env->regs[15] = frame_addr; 4295 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4296 4297 env->regs[2] = sig; //map_signal(sig); 4298 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4299 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4300 return; 4301 4302 give_sigsegv: 4303 force_sig(TARGET_SIGSEGV); 4304 } 4305 4306 static int 4307 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4308 { 4309 int err = 0; 4310 int i; 4311 4312 for (i = 0; i < 16; i++) { 4313 __get_user(env->regs[i], &sc->regs.gprs[i]); 4314 } 4315 4316 __get_user(env->psw.mask, &sc->regs.psw.mask); 4317 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4318 (unsigned long long)env->psw.addr); 4319 __get_user(env->psw.addr, &sc->regs.psw.addr); 4320 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4321 4322 for (i = 0; i < 16; i++) { 4323 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4324 } 4325 for (i = 0; i < 16; i++) { 4326 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4327 } 4328 4329 return err; 4330 } 4331 4332 long do_sigreturn(CPUS390XState *env) 4333 { 4334 sigframe *frame; 4335 abi_ulong frame_addr = env->regs[15]; 4336 target_sigset_t target_set; 4337 sigset_t set; 4338 4339 trace_user_do_sigreturn(env, frame_addr); 4340 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4341 goto badframe; 4342 } 4343 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4344 4345 target_to_host_sigset_internal(&set, &target_set); 4346 set_sigmask(&set); /* ~_BLOCKABLE? */ 4347 4348 if (restore_sigregs(env, &frame->sregs)) { 4349 goto badframe; 4350 } 4351 4352 unlock_user_struct(frame, frame_addr, 0); 4353 return -TARGET_QEMU_ESIGRETURN; 4354 4355 badframe: 4356 force_sig(TARGET_SIGSEGV); 4357 return 0; 4358 } 4359 4360 long do_rt_sigreturn(CPUS390XState *env) 4361 { 4362 rt_sigframe *frame; 4363 abi_ulong frame_addr = env->regs[15]; 4364 sigset_t set; 4365 4366 trace_user_do_rt_sigreturn(env, frame_addr); 4367 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4368 goto badframe; 4369 } 4370 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4371 4372 set_sigmask(&set); /* ~_BLOCKABLE? */ 4373 4374 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4375 goto badframe; 4376 } 4377 4378 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4379 get_sp_from_cpustate(env)) == -EFAULT) { 4380 goto badframe; 4381 } 4382 unlock_user_struct(frame, frame_addr, 0); 4383 return -TARGET_QEMU_ESIGRETURN; 4384 4385 badframe: 4386 unlock_user_struct(frame, frame_addr, 0); 4387 force_sig(TARGET_SIGSEGV); 4388 return 0; 4389 } 4390 4391 #elif defined(TARGET_PPC) 4392 4393 /* Size of dummy stack frame allocated when calling signal handler. 4394 See arch/powerpc/include/asm/ptrace.h. */ 4395 #if defined(TARGET_PPC64) 4396 #define SIGNAL_FRAMESIZE 128 4397 #else 4398 #define SIGNAL_FRAMESIZE 64 4399 #endif 4400 4401 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4402 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4403 struct target_mcontext { 4404 target_ulong mc_gregs[48]; 4405 /* Includes fpscr. */ 4406 uint64_t mc_fregs[33]; 4407 target_ulong mc_pad[2]; 4408 /* We need to handle Altivec and SPE at the same time, which no 4409 kernel needs to do. Fortunately, the kernel defines this bit to 4410 be Altivec-register-large all the time, rather than trying to 4411 twiddle it based on the specific platform. */ 4412 union { 4413 /* SPE vector registers. One extra for SPEFSCR. */ 4414 uint32_t spe[33]; 4415 /* Altivec vector registers. The packing of VSCR and VRSAVE 4416 varies depending on whether we're PPC64 or not: PPC64 splits 4417 them apart; PPC32 stuffs them together. */ 4418 #if defined(TARGET_PPC64) 4419 #define QEMU_NVRREG 34 4420 #else 4421 #define QEMU_NVRREG 33 4422 #endif 4423 ppc_avr_t altivec[QEMU_NVRREG]; 4424 #undef QEMU_NVRREG 4425 } mc_vregs __attribute__((__aligned__(16))); 4426 }; 4427 4428 /* See arch/powerpc/include/asm/sigcontext.h. */ 4429 struct target_sigcontext { 4430 target_ulong _unused[4]; 4431 int32_t signal; 4432 #if defined(TARGET_PPC64) 4433 int32_t pad0; 4434 #endif 4435 target_ulong handler; 4436 target_ulong oldmask; 4437 target_ulong regs; /* struct pt_regs __user * */ 4438 #if defined(TARGET_PPC64) 4439 struct target_mcontext mcontext; 4440 #endif 4441 }; 4442 4443 /* Indices for target_mcontext.mc_gregs, below. 4444 See arch/powerpc/include/asm/ptrace.h for details. */ 4445 enum { 4446 TARGET_PT_R0 = 0, 4447 TARGET_PT_R1 = 1, 4448 TARGET_PT_R2 = 2, 4449 TARGET_PT_R3 = 3, 4450 TARGET_PT_R4 = 4, 4451 TARGET_PT_R5 = 5, 4452 TARGET_PT_R6 = 6, 4453 TARGET_PT_R7 = 7, 4454 TARGET_PT_R8 = 8, 4455 TARGET_PT_R9 = 9, 4456 TARGET_PT_R10 = 10, 4457 TARGET_PT_R11 = 11, 4458 TARGET_PT_R12 = 12, 4459 TARGET_PT_R13 = 13, 4460 TARGET_PT_R14 = 14, 4461 TARGET_PT_R15 = 15, 4462 TARGET_PT_R16 = 16, 4463 TARGET_PT_R17 = 17, 4464 TARGET_PT_R18 = 18, 4465 TARGET_PT_R19 = 19, 4466 TARGET_PT_R20 = 20, 4467 TARGET_PT_R21 = 21, 4468 TARGET_PT_R22 = 22, 4469 TARGET_PT_R23 = 23, 4470 TARGET_PT_R24 = 24, 4471 TARGET_PT_R25 = 25, 4472 TARGET_PT_R26 = 26, 4473 TARGET_PT_R27 = 27, 4474 TARGET_PT_R28 = 28, 4475 TARGET_PT_R29 = 29, 4476 TARGET_PT_R30 = 30, 4477 TARGET_PT_R31 = 31, 4478 TARGET_PT_NIP = 32, 4479 TARGET_PT_MSR = 33, 4480 TARGET_PT_ORIG_R3 = 34, 4481 TARGET_PT_CTR = 35, 4482 TARGET_PT_LNK = 36, 4483 TARGET_PT_XER = 37, 4484 TARGET_PT_CCR = 38, 4485 /* Yes, there are two registers with #39. One is 64-bit only. */ 4486 TARGET_PT_MQ = 39, 4487 TARGET_PT_SOFTE = 39, 4488 TARGET_PT_TRAP = 40, 4489 TARGET_PT_DAR = 41, 4490 TARGET_PT_DSISR = 42, 4491 TARGET_PT_RESULT = 43, 4492 TARGET_PT_REGS_COUNT = 44 4493 }; 4494 4495 4496 struct target_ucontext { 4497 target_ulong tuc_flags; 4498 target_ulong tuc_link; /* struct ucontext __user * */ 4499 struct target_sigaltstack tuc_stack; 4500 #if !defined(TARGET_PPC64) 4501 int32_t tuc_pad[7]; 4502 target_ulong tuc_regs; /* struct mcontext __user * 4503 points to uc_mcontext field */ 4504 #endif 4505 target_sigset_t tuc_sigmask; 4506 #if defined(TARGET_PPC64) 4507 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4508 struct target_sigcontext tuc_sigcontext; 4509 #else 4510 int32_t tuc_maskext[30]; 4511 int32_t tuc_pad2[3]; 4512 struct target_mcontext tuc_mcontext; 4513 #endif 4514 }; 4515 4516 /* See arch/powerpc/kernel/signal_32.c. */ 4517 struct target_sigframe { 4518 struct target_sigcontext sctx; 4519 struct target_mcontext mctx; 4520 int32_t abigap[56]; 4521 }; 4522 4523 #if defined(TARGET_PPC64) 4524 4525 #define TARGET_TRAMP_SIZE 6 4526 4527 struct target_rt_sigframe { 4528 /* sys_rt_sigreturn requires the ucontext be the first field */ 4529 struct target_ucontext uc; 4530 target_ulong _unused[2]; 4531 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4532 target_ulong pinfo; /* struct siginfo __user * */ 4533 target_ulong puc; /* void __user * */ 4534 struct target_siginfo info; 4535 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4536 char abigap[288]; 4537 } __attribute__((aligned(16))); 4538 4539 #else 4540 4541 struct target_rt_sigframe { 4542 struct target_siginfo info; 4543 struct target_ucontext uc; 4544 int32_t abigap[56]; 4545 }; 4546 4547 #endif 4548 4549 #if defined(TARGET_PPC64) 4550 4551 struct target_func_ptr { 4552 target_ulong entry; 4553 target_ulong toc; 4554 }; 4555 4556 #endif 4557 4558 /* We use the mc_pad field for the signal return trampoline. */ 4559 #define tramp mc_pad 4560 4561 /* See arch/powerpc/kernel/signal.c. */ 4562 static target_ulong get_sigframe(struct target_sigaction *ka, 4563 CPUPPCState *env, 4564 int frame_size) 4565 { 4566 target_ulong oldsp; 4567 4568 oldsp = env->gpr[1]; 4569 4570 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4571 (sas_ss_flags(oldsp) == 0)) { 4572 oldsp = (target_sigaltstack_used.ss_sp 4573 + target_sigaltstack_used.ss_size); 4574 } 4575 4576 return (oldsp - frame_size) & ~0xFUL; 4577 } 4578 4579 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4580 { 4581 target_ulong msr = env->msr; 4582 int i; 4583 target_ulong ccr = 0; 4584 4585 /* In general, the kernel attempts to be intelligent about what it 4586 needs to save for Altivec/FP/SPE registers. We don't care that 4587 much, so we just go ahead and save everything. */ 4588 4589 /* Save general registers. */ 4590 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4591 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4592 } 4593 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4594 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4595 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4596 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4597 4598 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4599 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4600 } 4601 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4602 4603 /* Save Altivec registers if necessary. */ 4604 if (env->insns_flags & PPC_ALTIVEC) { 4605 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4606 ppc_avr_t *avr = &env->avr[i]; 4607 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4608 4609 __put_user(avr->u64[0], &vreg->u64[0]); 4610 __put_user(avr->u64[1], &vreg->u64[1]); 4611 } 4612 /* Set MSR_VR in the saved MSR value to indicate that 4613 frame->mc_vregs contains valid data. */ 4614 msr |= MSR_VR; 4615 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4616 &frame->mc_vregs.altivec[32].u32[3]); 4617 } 4618 4619 /* Save floating point registers. */ 4620 if (env->insns_flags & PPC_FLOAT) { 4621 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4622 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4623 } 4624 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4625 } 4626 4627 /* Save SPE registers. The kernel only saves the high half. */ 4628 if (env->insns_flags & PPC_SPE) { 4629 #if defined(TARGET_PPC64) 4630 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4631 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4632 } 4633 #else 4634 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4635 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4636 } 4637 #endif 4638 /* Set MSR_SPE in the saved MSR value to indicate that 4639 frame->mc_vregs contains valid data. */ 4640 msr |= MSR_SPE; 4641 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4642 } 4643 4644 /* Store MSR. */ 4645 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4646 } 4647 4648 static void encode_trampoline(int sigret, uint32_t *tramp) 4649 { 4650 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4651 if (sigret) { 4652 __put_user(0x38000000 | sigret, &tramp[0]); 4653 __put_user(0x44000002, &tramp[1]); 4654 } 4655 } 4656 4657 static void restore_user_regs(CPUPPCState *env, 4658 struct target_mcontext *frame, int sig) 4659 { 4660 target_ulong save_r2 = 0; 4661 target_ulong msr; 4662 target_ulong ccr; 4663 4664 int i; 4665 4666 if (!sig) { 4667 save_r2 = env->gpr[2]; 4668 } 4669 4670 /* Restore general registers. */ 4671 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4672 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4673 } 4674 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4675 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4676 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4677 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4678 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4679 4680 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4681 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4682 } 4683 4684 if (!sig) { 4685 env->gpr[2] = save_r2; 4686 } 4687 /* Restore MSR. */ 4688 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4689 4690 /* If doing signal return, restore the previous little-endian mode. */ 4691 if (sig) 4692 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4693 4694 /* Restore Altivec registers if necessary. */ 4695 if (env->insns_flags & PPC_ALTIVEC) { 4696 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4697 ppc_avr_t *avr = &env->avr[i]; 4698 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4699 4700 __get_user(avr->u64[0], &vreg->u64[0]); 4701 __get_user(avr->u64[1], &vreg->u64[1]); 4702 } 4703 /* Set MSR_VEC in the saved MSR value to indicate that 4704 frame->mc_vregs contains valid data. */ 4705 __get_user(env->spr[SPR_VRSAVE], 4706 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4707 } 4708 4709 /* Restore floating point registers. */ 4710 if (env->insns_flags & PPC_FLOAT) { 4711 uint64_t fpscr; 4712 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4713 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4714 } 4715 __get_user(fpscr, &frame->mc_fregs[32]); 4716 env->fpscr = (uint32_t) fpscr; 4717 } 4718 4719 /* Save SPE registers. The kernel only saves the high half. */ 4720 if (env->insns_flags & PPC_SPE) { 4721 #if defined(TARGET_PPC64) 4722 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4723 uint32_t hi; 4724 4725 __get_user(hi, &frame->mc_vregs.spe[i]); 4726 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4727 } 4728 #else 4729 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4730 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4731 } 4732 #endif 4733 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4734 } 4735 } 4736 4737 static void setup_frame(int sig, struct target_sigaction *ka, 4738 target_sigset_t *set, CPUPPCState *env) 4739 { 4740 struct target_sigframe *frame; 4741 struct target_sigcontext *sc; 4742 target_ulong frame_addr, newsp; 4743 int err = 0; 4744 #if defined(TARGET_PPC64) 4745 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4746 #endif 4747 4748 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4749 trace_user_setup_frame(env, frame_addr); 4750 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4751 goto sigsegv; 4752 sc = &frame->sctx; 4753 4754 __put_user(ka->_sa_handler, &sc->handler); 4755 __put_user(set->sig[0], &sc->oldmask); 4756 #if TARGET_ABI_BITS == 64 4757 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4758 #else 4759 __put_user(set->sig[1], &sc->_unused[3]); 4760 #endif 4761 __put_user(h2g(&frame->mctx), &sc->regs); 4762 __put_user(sig, &sc->signal); 4763 4764 /* Save user regs. */ 4765 save_user_regs(env, &frame->mctx); 4766 4767 /* Construct the trampoline code on the stack. */ 4768 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4769 4770 /* The kernel checks for the presence of a VDSO here. We don't 4771 emulate a vdso, so use a sigreturn system call. */ 4772 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4773 4774 /* Turn off all fp exceptions. */ 4775 env->fpscr = 0; 4776 4777 /* Create a stack frame for the caller of the handler. */ 4778 newsp = frame_addr - SIGNAL_FRAMESIZE; 4779 err |= put_user(env->gpr[1], newsp, target_ulong); 4780 4781 if (err) 4782 goto sigsegv; 4783 4784 /* Set up registers for signal handler. */ 4785 env->gpr[1] = newsp; 4786 env->gpr[3] = sig; 4787 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4788 4789 #if defined(TARGET_PPC64) 4790 if (get_ppc64_abi(image) < 2) { 4791 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4792 struct target_func_ptr *handler = 4793 (struct target_func_ptr *)g2h(ka->_sa_handler); 4794 env->nip = tswapl(handler->entry); 4795 env->gpr[2] = tswapl(handler->toc); 4796 } else { 4797 /* ELFv2 PPC64 function pointers are entry points, but R12 4798 * must also be set */ 4799 env->nip = tswapl((target_ulong) ka->_sa_handler); 4800 env->gpr[12] = env->nip; 4801 } 4802 #else 4803 env->nip = (target_ulong) ka->_sa_handler; 4804 #endif 4805 4806 /* Signal handlers are entered in big-endian mode. */ 4807 env->msr &= ~(1ull << MSR_LE); 4808 4809 unlock_user_struct(frame, frame_addr, 1); 4810 return; 4811 4812 sigsegv: 4813 unlock_user_struct(frame, frame_addr, 1); 4814 force_sig(TARGET_SIGSEGV); 4815 } 4816 4817 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4818 target_siginfo_t *info, 4819 target_sigset_t *set, CPUPPCState *env) 4820 { 4821 struct target_rt_sigframe *rt_sf; 4822 uint32_t *trampptr = 0; 4823 struct target_mcontext *mctx = 0; 4824 target_ulong rt_sf_addr, newsp = 0; 4825 int i, err = 0; 4826 #if defined(TARGET_PPC64) 4827 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4828 #endif 4829 4830 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4831 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4832 goto sigsegv; 4833 4834 tswap_siginfo(&rt_sf->info, info); 4835 4836 __put_user(0, &rt_sf->uc.tuc_flags); 4837 __put_user(0, &rt_sf->uc.tuc_link); 4838 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4839 &rt_sf->uc.tuc_stack.ss_sp); 4840 __put_user(sas_ss_flags(env->gpr[1]), 4841 &rt_sf->uc.tuc_stack.ss_flags); 4842 __put_user(target_sigaltstack_used.ss_size, 4843 &rt_sf->uc.tuc_stack.ss_size); 4844 #if !defined(TARGET_PPC64) 4845 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4846 &rt_sf->uc.tuc_regs); 4847 #endif 4848 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4849 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4850 } 4851 4852 #if defined(TARGET_PPC64) 4853 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4854 trampptr = &rt_sf->trampoline[0]; 4855 #else 4856 mctx = &rt_sf->uc.tuc_mcontext; 4857 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4858 #endif 4859 4860 save_user_regs(env, mctx); 4861 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4862 4863 /* The kernel checks for the presence of a VDSO here. We don't 4864 emulate a vdso, so use a sigreturn system call. */ 4865 env->lr = (target_ulong) h2g(trampptr); 4866 4867 /* Turn off all fp exceptions. */ 4868 env->fpscr = 0; 4869 4870 /* Create a stack frame for the caller of the handler. */ 4871 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4872 err |= put_user(env->gpr[1], newsp, target_ulong); 4873 4874 if (err) 4875 goto sigsegv; 4876 4877 /* Set up registers for signal handler. */ 4878 env->gpr[1] = newsp; 4879 env->gpr[3] = (target_ulong) sig; 4880 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4881 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4882 env->gpr[6] = (target_ulong) h2g(rt_sf); 4883 4884 #if defined(TARGET_PPC64) 4885 if (get_ppc64_abi(image) < 2) { 4886 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4887 struct target_func_ptr *handler = 4888 (struct target_func_ptr *)g2h(ka->_sa_handler); 4889 env->nip = tswapl(handler->entry); 4890 env->gpr[2] = tswapl(handler->toc); 4891 } else { 4892 /* ELFv2 PPC64 function pointers are entry points, but R12 4893 * must also be set */ 4894 env->nip = tswapl((target_ulong) ka->_sa_handler); 4895 env->gpr[12] = env->nip; 4896 } 4897 #else 4898 env->nip = (target_ulong) ka->_sa_handler; 4899 #endif 4900 4901 /* Signal handlers are entered in big-endian mode. */ 4902 env->msr &= ~(1ull << MSR_LE); 4903 4904 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4905 return; 4906 4907 sigsegv: 4908 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4909 force_sig(TARGET_SIGSEGV); 4910 4911 } 4912 4913 long do_sigreturn(CPUPPCState *env) 4914 { 4915 struct target_sigcontext *sc = NULL; 4916 struct target_mcontext *sr = NULL; 4917 target_ulong sr_addr = 0, sc_addr; 4918 sigset_t blocked; 4919 target_sigset_t set; 4920 4921 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4922 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4923 goto sigsegv; 4924 4925 #if defined(TARGET_PPC64) 4926 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4927 #else 4928 __get_user(set.sig[0], &sc->oldmask); 4929 __get_user(set.sig[1], &sc->_unused[3]); 4930 #endif 4931 target_to_host_sigset_internal(&blocked, &set); 4932 set_sigmask(&blocked); 4933 4934 __get_user(sr_addr, &sc->regs); 4935 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4936 goto sigsegv; 4937 restore_user_regs(env, sr, 1); 4938 4939 unlock_user_struct(sr, sr_addr, 1); 4940 unlock_user_struct(sc, sc_addr, 1); 4941 return -TARGET_QEMU_ESIGRETURN; 4942 4943 sigsegv: 4944 unlock_user_struct(sr, sr_addr, 1); 4945 unlock_user_struct(sc, sc_addr, 1); 4946 force_sig(TARGET_SIGSEGV); 4947 return 0; 4948 } 4949 4950 /* See arch/powerpc/kernel/signal_32.c. */ 4951 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4952 { 4953 struct target_mcontext *mcp; 4954 target_ulong mcp_addr; 4955 sigset_t blocked; 4956 target_sigset_t set; 4957 4958 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4959 sizeof (set))) 4960 return 1; 4961 4962 #if defined(TARGET_PPC64) 4963 mcp_addr = h2g(ucp) + 4964 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 4965 #else 4966 __get_user(mcp_addr, &ucp->tuc_regs); 4967 #endif 4968 4969 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 4970 return 1; 4971 4972 target_to_host_sigset_internal(&blocked, &set); 4973 set_sigmask(&blocked); 4974 restore_user_regs(env, mcp, sig); 4975 4976 unlock_user_struct(mcp, mcp_addr, 1); 4977 return 0; 4978 } 4979 4980 long do_rt_sigreturn(CPUPPCState *env) 4981 { 4982 struct target_rt_sigframe *rt_sf = NULL; 4983 target_ulong rt_sf_addr; 4984 4985 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 4986 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 4987 goto sigsegv; 4988 4989 if (do_setcontext(&rt_sf->uc, env, 1)) 4990 goto sigsegv; 4991 4992 do_sigaltstack(rt_sf_addr 4993 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 4994 0, env->gpr[1]); 4995 4996 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4997 return -TARGET_QEMU_ESIGRETURN; 4998 4999 sigsegv: 5000 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5001 force_sig(TARGET_SIGSEGV); 5002 return 0; 5003 } 5004 5005 #elif defined(TARGET_M68K) 5006 5007 struct target_sigcontext { 5008 abi_ulong sc_mask; 5009 abi_ulong sc_usp; 5010 abi_ulong sc_d0; 5011 abi_ulong sc_d1; 5012 abi_ulong sc_a0; 5013 abi_ulong sc_a1; 5014 unsigned short sc_sr; 5015 abi_ulong sc_pc; 5016 }; 5017 5018 struct target_sigframe 5019 { 5020 abi_ulong pretcode; 5021 int sig; 5022 int code; 5023 abi_ulong psc; 5024 char retcode[8]; 5025 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5026 struct target_sigcontext sc; 5027 }; 5028 5029 typedef int target_greg_t; 5030 #define TARGET_NGREG 18 5031 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5032 5033 typedef struct target_fpregset { 5034 int f_fpcntl[3]; 5035 int f_fpregs[8*3]; 5036 } target_fpregset_t; 5037 5038 struct target_mcontext { 5039 int version; 5040 target_gregset_t gregs; 5041 target_fpregset_t fpregs; 5042 }; 5043 5044 #define TARGET_MCONTEXT_VERSION 2 5045 5046 struct target_ucontext { 5047 abi_ulong tuc_flags; 5048 abi_ulong tuc_link; 5049 target_stack_t tuc_stack; 5050 struct target_mcontext tuc_mcontext; 5051 abi_long tuc_filler[80]; 5052 target_sigset_t tuc_sigmask; 5053 }; 5054 5055 struct target_rt_sigframe 5056 { 5057 abi_ulong pretcode; 5058 int sig; 5059 abi_ulong pinfo; 5060 abi_ulong puc; 5061 char retcode[8]; 5062 struct target_siginfo info; 5063 struct target_ucontext uc; 5064 }; 5065 5066 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5067 abi_ulong mask) 5068 { 5069 __put_user(mask, &sc->sc_mask); 5070 __put_user(env->aregs[7], &sc->sc_usp); 5071 __put_user(env->dregs[0], &sc->sc_d0); 5072 __put_user(env->dregs[1], &sc->sc_d1); 5073 __put_user(env->aregs[0], &sc->sc_a0); 5074 __put_user(env->aregs[1], &sc->sc_a1); 5075 __put_user(env->sr, &sc->sc_sr); 5076 __put_user(env->pc, &sc->sc_pc); 5077 } 5078 5079 static void 5080 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5081 { 5082 int temp; 5083 5084 __get_user(env->aregs[7], &sc->sc_usp); 5085 __get_user(env->dregs[0], &sc->sc_d0); 5086 __get_user(env->dregs[1], &sc->sc_d1); 5087 __get_user(env->aregs[0], &sc->sc_a0); 5088 __get_user(env->aregs[1], &sc->sc_a1); 5089 __get_user(env->pc, &sc->sc_pc); 5090 __get_user(temp, &sc->sc_sr); 5091 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5092 } 5093 5094 /* 5095 * Determine which stack to use.. 5096 */ 5097 static inline abi_ulong 5098 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5099 size_t frame_size) 5100 { 5101 unsigned long sp; 5102 5103 sp = regs->aregs[7]; 5104 5105 /* This is the X/Open sanctioned signal stack switching. */ 5106 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5107 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5108 } 5109 5110 return ((sp - frame_size) & -8UL); 5111 } 5112 5113 static void setup_frame(int sig, struct target_sigaction *ka, 5114 target_sigset_t *set, CPUM68KState *env) 5115 { 5116 struct target_sigframe *frame; 5117 abi_ulong frame_addr; 5118 abi_ulong retcode_addr; 5119 abi_ulong sc_addr; 5120 int i; 5121 5122 frame_addr = get_sigframe(ka, env, sizeof *frame); 5123 trace_user_setup_frame(env, frame_addr); 5124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5125 goto give_sigsegv; 5126 } 5127 5128 __put_user(sig, &frame->sig); 5129 5130 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5131 __put_user(sc_addr, &frame->psc); 5132 5133 setup_sigcontext(&frame->sc, env, set->sig[0]); 5134 5135 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5136 __put_user(set->sig[i], &frame->extramask[i - 1]); 5137 } 5138 5139 /* Set up to return from userspace. */ 5140 5141 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5142 __put_user(retcode_addr, &frame->pretcode); 5143 5144 /* moveq #,d0; trap #0 */ 5145 5146 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5147 (uint32_t *)(frame->retcode)); 5148 5149 /* Set up to return from userspace */ 5150 5151 env->aregs[7] = frame_addr; 5152 env->pc = ka->_sa_handler; 5153 5154 unlock_user_struct(frame, frame_addr, 1); 5155 return; 5156 5157 give_sigsegv: 5158 force_sig(TARGET_SIGSEGV); 5159 } 5160 5161 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5162 CPUM68KState *env) 5163 { 5164 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5165 5166 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5167 __put_user(env->dregs[0], &gregs[0]); 5168 __put_user(env->dregs[1], &gregs[1]); 5169 __put_user(env->dregs[2], &gregs[2]); 5170 __put_user(env->dregs[3], &gregs[3]); 5171 __put_user(env->dregs[4], &gregs[4]); 5172 __put_user(env->dregs[5], &gregs[5]); 5173 __put_user(env->dregs[6], &gregs[6]); 5174 __put_user(env->dregs[7], &gregs[7]); 5175 __put_user(env->aregs[0], &gregs[8]); 5176 __put_user(env->aregs[1], &gregs[9]); 5177 __put_user(env->aregs[2], &gregs[10]); 5178 __put_user(env->aregs[3], &gregs[11]); 5179 __put_user(env->aregs[4], &gregs[12]); 5180 __put_user(env->aregs[5], &gregs[13]); 5181 __put_user(env->aregs[6], &gregs[14]); 5182 __put_user(env->aregs[7], &gregs[15]); 5183 __put_user(env->pc, &gregs[16]); 5184 __put_user(env->sr, &gregs[17]); 5185 5186 return 0; 5187 } 5188 5189 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5190 struct target_ucontext *uc) 5191 { 5192 int temp; 5193 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5194 5195 __get_user(temp, &uc->tuc_mcontext.version); 5196 if (temp != TARGET_MCONTEXT_VERSION) 5197 goto badframe; 5198 5199 /* restore passed registers */ 5200 __get_user(env->dregs[0], &gregs[0]); 5201 __get_user(env->dregs[1], &gregs[1]); 5202 __get_user(env->dregs[2], &gregs[2]); 5203 __get_user(env->dregs[3], &gregs[3]); 5204 __get_user(env->dregs[4], &gregs[4]); 5205 __get_user(env->dregs[5], &gregs[5]); 5206 __get_user(env->dregs[6], &gregs[6]); 5207 __get_user(env->dregs[7], &gregs[7]); 5208 __get_user(env->aregs[0], &gregs[8]); 5209 __get_user(env->aregs[1], &gregs[9]); 5210 __get_user(env->aregs[2], &gregs[10]); 5211 __get_user(env->aregs[3], &gregs[11]); 5212 __get_user(env->aregs[4], &gregs[12]); 5213 __get_user(env->aregs[5], &gregs[13]); 5214 __get_user(env->aregs[6], &gregs[14]); 5215 __get_user(env->aregs[7], &gregs[15]); 5216 __get_user(env->pc, &gregs[16]); 5217 __get_user(temp, &gregs[17]); 5218 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5219 5220 return 0; 5221 5222 badframe: 5223 return 1; 5224 } 5225 5226 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5227 target_siginfo_t *info, 5228 target_sigset_t *set, CPUM68KState *env) 5229 { 5230 struct target_rt_sigframe *frame; 5231 abi_ulong frame_addr; 5232 abi_ulong retcode_addr; 5233 abi_ulong info_addr; 5234 abi_ulong uc_addr; 5235 int err = 0; 5236 int i; 5237 5238 frame_addr = get_sigframe(ka, env, sizeof *frame); 5239 trace_user_setup_rt_frame(env, frame_addr); 5240 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5241 goto give_sigsegv; 5242 } 5243 5244 __put_user(sig, &frame->sig); 5245 5246 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5247 __put_user(info_addr, &frame->pinfo); 5248 5249 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5250 __put_user(uc_addr, &frame->puc); 5251 5252 tswap_siginfo(&frame->info, info); 5253 5254 /* Create the ucontext */ 5255 5256 __put_user(0, &frame->uc.tuc_flags); 5257 __put_user(0, &frame->uc.tuc_link); 5258 __put_user(target_sigaltstack_used.ss_sp, 5259 &frame->uc.tuc_stack.ss_sp); 5260 __put_user(sas_ss_flags(env->aregs[7]), 5261 &frame->uc.tuc_stack.ss_flags); 5262 __put_user(target_sigaltstack_used.ss_size, 5263 &frame->uc.tuc_stack.ss_size); 5264 err |= target_rt_setup_ucontext(&frame->uc, env); 5265 5266 if (err) 5267 goto give_sigsegv; 5268 5269 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5270 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5271 } 5272 5273 /* Set up to return from userspace. */ 5274 5275 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5276 __put_user(retcode_addr, &frame->pretcode); 5277 5278 /* moveq #,d0; notb d0; trap #0 */ 5279 5280 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5281 (uint32_t *)(frame->retcode + 0)); 5282 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5283 5284 if (err) 5285 goto give_sigsegv; 5286 5287 /* Set up to return from userspace */ 5288 5289 env->aregs[7] = frame_addr; 5290 env->pc = ka->_sa_handler; 5291 5292 unlock_user_struct(frame, frame_addr, 1); 5293 return; 5294 5295 give_sigsegv: 5296 unlock_user_struct(frame, frame_addr, 1); 5297 force_sig(TARGET_SIGSEGV); 5298 } 5299 5300 long do_sigreturn(CPUM68KState *env) 5301 { 5302 struct target_sigframe *frame; 5303 abi_ulong frame_addr = env->aregs[7] - 4; 5304 target_sigset_t target_set; 5305 sigset_t set; 5306 int i; 5307 5308 trace_user_do_sigreturn(env, frame_addr); 5309 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5310 goto badframe; 5311 5312 /* set blocked signals */ 5313 5314 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5315 5316 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5317 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5318 } 5319 5320 target_to_host_sigset_internal(&set, &target_set); 5321 set_sigmask(&set); 5322 5323 /* restore registers */ 5324 5325 restore_sigcontext(env, &frame->sc); 5326 5327 unlock_user_struct(frame, frame_addr, 0); 5328 return -TARGET_QEMU_ESIGRETURN; 5329 5330 badframe: 5331 force_sig(TARGET_SIGSEGV); 5332 return 0; 5333 } 5334 5335 long do_rt_sigreturn(CPUM68KState *env) 5336 { 5337 struct target_rt_sigframe *frame; 5338 abi_ulong frame_addr = env->aregs[7] - 4; 5339 target_sigset_t target_set; 5340 sigset_t set; 5341 5342 trace_user_do_rt_sigreturn(env, frame_addr); 5343 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5344 goto badframe; 5345 5346 target_to_host_sigset_internal(&set, &target_set); 5347 set_sigmask(&set); 5348 5349 /* restore registers */ 5350 5351 if (target_rt_restore_ucontext(env, &frame->uc)) 5352 goto badframe; 5353 5354 if (do_sigaltstack(frame_addr + 5355 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5356 0, get_sp_from_cpustate(env)) == -EFAULT) 5357 goto badframe; 5358 5359 unlock_user_struct(frame, frame_addr, 0); 5360 return -TARGET_QEMU_ESIGRETURN; 5361 5362 badframe: 5363 unlock_user_struct(frame, frame_addr, 0); 5364 force_sig(TARGET_SIGSEGV); 5365 return 0; 5366 } 5367 5368 #elif defined(TARGET_ALPHA) 5369 5370 struct target_sigcontext { 5371 abi_long sc_onstack; 5372 abi_long sc_mask; 5373 abi_long sc_pc; 5374 abi_long sc_ps; 5375 abi_long sc_regs[32]; 5376 abi_long sc_ownedfp; 5377 abi_long sc_fpregs[32]; 5378 abi_ulong sc_fpcr; 5379 abi_ulong sc_fp_control; 5380 abi_ulong sc_reserved1; 5381 abi_ulong sc_reserved2; 5382 abi_ulong sc_ssize; 5383 abi_ulong sc_sbase; 5384 abi_ulong sc_traparg_a0; 5385 abi_ulong sc_traparg_a1; 5386 abi_ulong sc_traparg_a2; 5387 abi_ulong sc_fp_trap_pc; 5388 abi_ulong sc_fp_trigger_sum; 5389 abi_ulong sc_fp_trigger_inst; 5390 }; 5391 5392 struct target_ucontext { 5393 abi_ulong tuc_flags; 5394 abi_ulong tuc_link; 5395 abi_ulong tuc_osf_sigmask; 5396 target_stack_t tuc_stack; 5397 struct target_sigcontext tuc_mcontext; 5398 target_sigset_t tuc_sigmask; 5399 }; 5400 5401 struct target_sigframe { 5402 struct target_sigcontext sc; 5403 unsigned int retcode[3]; 5404 }; 5405 5406 struct target_rt_sigframe { 5407 target_siginfo_t info; 5408 struct target_ucontext uc; 5409 unsigned int retcode[3]; 5410 }; 5411 5412 #define INSN_MOV_R30_R16 0x47fe0410 5413 #define INSN_LDI_R0 0x201f0000 5414 #define INSN_CALLSYS 0x00000083 5415 5416 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5417 abi_ulong frame_addr, target_sigset_t *set) 5418 { 5419 int i; 5420 5421 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5422 __put_user(set->sig[0], &sc->sc_mask); 5423 __put_user(env->pc, &sc->sc_pc); 5424 __put_user(8, &sc->sc_ps); 5425 5426 for (i = 0; i < 31; ++i) { 5427 __put_user(env->ir[i], &sc->sc_regs[i]); 5428 } 5429 __put_user(0, &sc->sc_regs[31]); 5430 5431 for (i = 0; i < 31; ++i) { 5432 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5433 } 5434 __put_user(0, &sc->sc_fpregs[31]); 5435 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5436 5437 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5438 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5439 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5440 } 5441 5442 static void restore_sigcontext(CPUAlphaState *env, 5443 struct target_sigcontext *sc) 5444 { 5445 uint64_t fpcr; 5446 int i; 5447 5448 __get_user(env->pc, &sc->sc_pc); 5449 5450 for (i = 0; i < 31; ++i) { 5451 __get_user(env->ir[i], &sc->sc_regs[i]); 5452 } 5453 for (i = 0; i < 31; ++i) { 5454 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5455 } 5456 5457 __get_user(fpcr, &sc->sc_fpcr); 5458 cpu_alpha_store_fpcr(env, fpcr); 5459 } 5460 5461 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5462 CPUAlphaState *env, 5463 unsigned long framesize) 5464 { 5465 abi_ulong sp = env->ir[IR_SP]; 5466 5467 /* This is the X/Open sanctioned signal stack switching. */ 5468 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5469 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5470 } 5471 return (sp - framesize) & -32; 5472 } 5473 5474 static void setup_frame(int sig, struct target_sigaction *ka, 5475 target_sigset_t *set, CPUAlphaState *env) 5476 { 5477 abi_ulong frame_addr, r26; 5478 struct target_sigframe *frame; 5479 int err = 0; 5480 5481 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5482 trace_user_setup_frame(env, frame_addr); 5483 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5484 goto give_sigsegv; 5485 } 5486 5487 setup_sigcontext(&frame->sc, env, frame_addr, set); 5488 5489 if (ka->sa_restorer) { 5490 r26 = ka->sa_restorer; 5491 } else { 5492 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5493 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5494 &frame->retcode[1]); 5495 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5496 /* imb() */ 5497 r26 = frame_addr; 5498 } 5499 5500 unlock_user_struct(frame, frame_addr, 1); 5501 5502 if (err) { 5503 give_sigsegv: 5504 if (sig == TARGET_SIGSEGV) { 5505 ka->_sa_handler = TARGET_SIG_DFL; 5506 } 5507 force_sig(TARGET_SIGSEGV); 5508 } 5509 5510 env->ir[IR_RA] = r26; 5511 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5512 env->ir[IR_A0] = sig; 5513 env->ir[IR_A1] = 0; 5514 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5515 env->ir[IR_SP] = frame_addr; 5516 } 5517 5518 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5519 target_siginfo_t *info, 5520 target_sigset_t *set, CPUAlphaState *env) 5521 { 5522 abi_ulong frame_addr, r26; 5523 struct target_rt_sigframe *frame; 5524 int i, err = 0; 5525 5526 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5527 trace_user_setup_rt_frame(env, frame_addr); 5528 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5529 goto give_sigsegv; 5530 } 5531 5532 tswap_siginfo(&frame->info, info); 5533 5534 __put_user(0, &frame->uc.tuc_flags); 5535 __put_user(0, &frame->uc.tuc_link); 5536 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5537 __put_user(target_sigaltstack_used.ss_sp, 5538 &frame->uc.tuc_stack.ss_sp); 5539 __put_user(sas_ss_flags(env->ir[IR_SP]), 5540 &frame->uc.tuc_stack.ss_flags); 5541 __put_user(target_sigaltstack_used.ss_size, 5542 &frame->uc.tuc_stack.ss_size); 5543 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5544 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5545 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5546 } 5547 5548 if (ka->sa_restorer) { 5549 r26 = ka->sa_restorer; 5550 } else { 5551 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5552 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5553 &frame->retcode[1]); 5554 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5555 /* imb(); */ 5556 r26 = frame_addr; 5557 } 5558 5559 if (err) { 5560 give_sigsegv: 5561 if (sig == TARGET_SIGSEGV) { 5562 ka->_sa_handler = TARGET_SIG_DFL; 5563 } 5564 force_sig(TARGET_SIGSEGV); 5565 } 5566 5567 env->ir[IR_RA] = r26; 5568 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5569 env->ir[IR_A0] = sig; 5570 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5571 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5572 env->ir[IR_SP] = frame_addr; 5573 } 5574 5575 long do_sigreturn(CPUAlphaState *env) 5576 { 5577 struct target_sigcontext *sc; 5578 abi_ulong sc_addr = env->ir[IR_A0]; 5579 target_sigset_t target_set; 5580 sigset_t set; 5581 5582 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5583 goto badframe; 5584 } 5585 5586 target_sigemptyset(&target_set); 5587 __get_user(target_set.sig[0], &sc->sc_mask); 5588 5589 target_to_host_sigset_internal(&set, &target_set); 5590 set_sigmask(&set); 5591 5592 restore_sigcontext(env, sc); 5593 unlock_user_struct(sc, sc_addr, 0); 5594 return -TARGET_QEMU_ESIGRETURN; 5595 5596 badframe: 5597 force_sig(TARGET_SIGSEGV); 5598 } 5599 5600 long do_rt_sigreturn(CPUAlphaState *env) 5601 { 5602 abi_ulong frame_addr = env->ir[IR_A0]; 5603 struct target_rt_sigframe *frame; 5604 sigset_t set; 5605 5606 trace_user_do_rt_sigreturn(env, frame_addr); 5607 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5608 goto badframe; 5609 } 5610 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5611 set_sigmask(&set); 5612 5613 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5614 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5615 uc.tuc_stack), 5616 0, env->ir[IR_SP]) == -EFAULT) { 5617 goto badframe; 5618 } 5619 5620 unlock_user_struct(frame, frame_addr, 0); 5621 return -TARGET_QEMU_ESIGRETURN; 5622 5623 5624 badframe: 5625 unlock_user_struct(frame, frame_addr, 0); 5626 force_sig(TARGET_SIGSEGV); 5627 } 5628 5629 #elif defined(TARGET_TILEGX) 5630 5631 struct target_sigcontext { 5632 union { 5633 /* General-purpose registers. */ 5634 abi_ulong gregs[56]; 5635 struct { 5636 abi_ulong __gregs[53]; 5637 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5638 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5639 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5640 }; 5641 }; 5642 abi_ulong pc; /* Program counter. */ 5643 abi_ulong ics; /* In Interrupt Critical Section? */ 5644 abi_ulong faultnum; /* Fault number. */ 5645 abi_ulong pad[5]; 5646 }; 5647 5648 struct target_ucontext { 5649 abi_ulong tuc_flags; 5650 abi_ulong tuc_link; 5651 target_stack_t tuc_stack; 5652 struct target_sigcontext tuc_mcontext; 5653 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5654 }; 5655 5656 struct target_rt_sigframe { 5657 unsigned char save_area[16]; /* caller save area */ 5658 struct target_siginfo info; 5659 struct target_ucontext uc; 5660 abi_ulong retcode[2]; 5661 }; 5662 5663 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5664 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5665 5666 5667 static void setup_sigcontext(struct target_sigcontext *sc, 5668 CPUArchState *env, int signo) 5669 { 5670 int i; 5671 5672 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5673 __put_user(env->regs[i], &sc->gregs[i]); 5674 } 5675 5676 __put_user(env->pc, &sc->pc); 5677 __put_user(0, &sc->ics); 5678 __put_user(signo, &sc->faultnum); 5679 } 5680 5681 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5682 { 5683 int i; 5684 5685 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5686 __get_user(env->regs[i], &sc->gregs[i]); 5687 } 5688 5689 __get_user(env->pc, &sc->pc); 5690 } 5691 5692 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5693 size_t frame_size) 5694 { 5695 unsigned long sp = env->regs[TILEGX_R_SP]; 5696 5697 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5698 return -1UL; 5699 } 5700 5701 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5702 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5703 } 5704 5705 sp -= frame_size; 5706 sp &= -16UL; 5707 return sp; 5708 } 5709 5710 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5711 target_siginfo_t *info, 5712 target_sigset_t *set, CPUArchState *env) 5713 { 5714 abi_ulong frame_addr; 5715 struct target_rt_sigframe *frame; 5716 unsigned long restorer; 5717 5718 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5719 trace_user_setup_rt_frame(env, frame_addr); 5720 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5721 goto give_sigsegv; 5722 } 5723 5724 /* Always write at least the signal number for the stack backtracer. */ 5725 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5726 /* At sigreturn time, restore the callee-save registers too. */ 5727 tswap_siginfo(&frame->info, info); 5728 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5729 } else { 5730 __put_user(info->si_signo, &frame->info.si_signo); 5731 } 5732 5733 /* Create the ucontext. */ 5734 __put_user(0, &frame->uc.tuc_flags); 5735 __put_user(0, &frame->uc.tuc_link); 5736 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5737 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5738 &frame->uc.tuc_stack.ss_flags); 5739 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5740 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5741 5742 if (ka->sa_flags & TARGET_SA_RESTORER) { 5743 restorer = (unsigned long) ka->sa_restorer; 5744 } else { 5745 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5746 __put_user(INSN_SWINT1, &frame->retcode[1]); 5747 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5748 } 5749 env->pc = (unsigned long) ka->_sa_handler; 5750 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5751 env->regs[TILEGX_R_LR] = restorer; 5752 env->regs[0] = (unsigned long) sig; 5753 env->regs[1] = (unsigned long) &frame->info; 5754 env->regs[2] = (unsigned long) &frame->uc; 5755 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5756 5757 unlock_user_struct(frame, frame_addr, 1); 5758 return; 5759 5760 give_sigsegv: 5761 if (sig == TARGET_SIGSEGV) { 5762 ka->_sa_handler = TARGET_SIG_DFL; 5763 } 5764 force_sig(TARGET_SIGSEGV /* , current */); 5765 } 5766 5767 long do_rt_sigreturn(CPUTLGState *env) 5768 { 5769 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5770 struct target_rt_sigframe *frame; 5771 sigset_t set; 5772 5773 trace_user_do_rt_sigreturn(env, frame_addr); 5774 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5775 goto badframe; 5776 } 5777 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5778 set_sigmask(&set); 5779 5780 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5781 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5782 uc.tuc_stack), 5783 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5784 goto badframe; 5785 } 5786 5787 unlock_user_struct(frame, frame_addr, 0); 5788 return -TARGET_QEMU_ESIGRETURN; 5789 5790 5791 badframe: 5792 unlock_user_struct(frame, frame_addr, 0); 5793 force_sig(TARGET_SIGSEGV); 5794 } 5795 5796 #else 5797 5798 static void setup_frame(int sig, struct target_sigaction *ka, 5799 target_sigset_t *set, CPUArchState *env) 5800 { 5801 fprintf(stderr, "setup_frame: not implemented\n"); 5802 } 5803 5804 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5805 target_siginfo_t *info, 5806 target_sigset_t *set, CPUArchState *env) 5807 { 5808 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5809 } 5810 5811 long do_sigreturn(CPUArchState *env) 5812 { 5813 fprintf(stderr, "do_sigreturn: not implemented\n"); 5814 return -TARGET_ENOSYS; 5815 } 5816 5817 long do_rt_sigreturn(CPUArchState *env) 5818 { 5819 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5820 return -TARGET_ENOSYS; 5821 } 5822 5823 #endif 5824 5825 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 5826 struct emulated_sigtable *k) 5827 { 5828 CPUState *cpu = ENV_GET_CPU(cpu_env); 5829 abi_ulong handler; 5830 sigset_t set; 5831 target_sigset_t target_old_set; 5832 struct target_sigaction *sa; 5833 TaskState *ts = cpu->opaque; 5834 5835 trace_user_handle_signal(cpu_env, sig); 5836 /* dequeue signal */ 5837 k->pending = 0; 5838 5839 sig = gdb_handlesig(cpu, sig); 5840 if (!sig) { 5841 sa = NULL; 5842 handler = TARGET_SIG_IGN; 5843 } else { 5844 sa = &sigact_table[sig - 1]; 5845 handler = sa->_sa_handler; 5846 } 5847 5848 if (do_strace) { 5849 print_taken_signal(sig, &k->info); 5850 } 5851 5852 if (handler == TARGET_SIG_DFL) { 5853 /* default handler : ignore some signal. The other are job control or fatal */ 5854 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5855 kill(getpid(),SIGSTOP); 5856 } else if (sig != TARGET_SIGCHLD && 5857 sig != TARGET_SIGURG && 5858 sig != TARGET_SIGWINCH && 5859 sig != TARGET_SIGCONT) { 5860 force_sig(sig); 5861 } 5862 } else if (handler == TARGET_SIG_IGN) { 5863 /* ignore sig */ 5864 } else if (handler == TARGET_SIG_ERR) { 5865 force_sig(sig); 5866 } else { 5867 /* compute the blocked signals during the handler execution */ 5868 sigset_t *blocked_set; 5869 5870 target_to_host_sigset(&set, &sa->sa_mask); 5871 /* SA_NODEFER indicates that the current signal should not be 5872 blocked during the handler */ 5873 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5874 sigaddset(&set, target_to_host_signal(sig)); 5875 5876 /* save the previous blocked signal state to restore it at the 5877 end of the signal execution (see do_sigreturn) */ 5878 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5879 5880 /* block signals in the handler */ 5881 blocked_set = ts->in_sigsuspend ? 5882 &ts->sigsuspend_mask : &ts->signal_mask; 5883 sigorset(&ts->signal_mask, blocked_set, &set); 5884 ts->in_sigsuspend = 0; 5885 5886 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5887 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5888 { 5889 CPUX86State *env = cpu_env; 5890 if (env->eflags & VM_MASK) 5891 save_v86_state(env); 5892 } 5893 #endif 5894 /* prepare the stack frame of the virtual CPU */ 5895 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5896 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5897 /* These targets do not have traditional signals. */ 5898 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5899 #else 5900 if (sa->sa_flags & TARGET_SA_SIGINFO) 5901 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5902 else 5903 setup_frame(sig, sa, &target_old_set, cpu_env); 5904 #endif 5905 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5906 sa->_sa_handler = TARGET_SIG_DFL; 5907 } 5908 } 5909 } 5910 5911 void process_pending_signals(CPUArchState *cpu_env) 5912 { 5913 CPUState *cpu = ENV_GET_CPU(cpu_env); 5914 int sig; 5915 TaskState *ts = cpu->opaque; 5916 sigset_t set; 5917 sigset_t *blocked_set; 5918 5919 while (atomic_read(&ts->signal_pending)) { 5920 /* FIXME: This is not threadsafe. */ 5921 sigfillset(&set); 5922 sigprocmask(SIG_SETMASK, &set, 0); 5923 5924 restart_scan: 5925 sig = ts->sync_signal.pending; 5926 if (sig) { 5927 /* Synchronous signals are forced, 5928 * see force_sig_info() and callers in Linux 5929 * Note that not all of our queue_signal() calls in QEMU correspond 5930 * to force_sig_info() calls in Linux (some are send_sig_info()). 5931 * However it seems like a kernel bug to me to allow the process 5932 * to block a synchronous signal since it could then just end up 5933 * looping round and round indefinitely. 5934 */ 5935 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5936 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5937 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5938 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5939 } 5940 5941 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 5942 } 5943 5944 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5945 blocked_set = ts->in_sigsuspend ? 5946 &ts->sigsuspend_mask : &ts->signal_mask; 5947 5948 if (ts->sigtab[sig - 1].pending && 5949 (!sigismember(blocked_set, 5950 target_to_host_signal_table[sig]))) { 5951 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 5952 /* Restart scan from the beginning, as handle_pending_signal 5953 * might have resulted in a new synchronous signal (eg SIGSEGV). 5954 */ 5955 goto restart_scan; 5956 } 5957 } 5958 5959 /* if no signal is pending, unblock signals and recheck (the act 5960 * of unblocking might cause us to take another host signal which 5961 * will set signal_pending again). 5962 */ 5963 atomic_set(&ts->signal_pending, 0); 5964 ts->in_sigsuspend = 0; 5965 set = ts->signal_mask; 5966 sigdelset(&set, SIGSEGV); 5967 sigdelset(&set, SIGBUS); 5968 sigprocmask(SIG_SETMASK, &set, 0); 5969 } 5970 ts->in_sigsuspend = 0; 5971 } 5972