1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) 516 /* Force a synchronously taken signal. The kernel force_sig() function 517 * also forces the signal to "not blocked, not ignored", but for QEMU 518 * that work is done in process_pending_signals(). 519 */ 520 static void force_sig(int sig) 521 { 522 CPUState *cpu = thread_cpu; 523 CPUArchState *env = cpu->env_ptr; 524 target_siginfo_t info; 525 526 info.si_signo = sig; 527 info.si_errno = 0; 528 info.si_code = TARGET_SI_KERNEL; 529 info._sifields._kill._pid = 0; 530 info._sifields._kill._uid = 0; 531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 532 } 533 534 /* Force a SIGSEGV if we couldn't write to memory trying to set 535 * up the signal frame. oldsig is the signal we were trying to handle 536 * at the point of failure. 537 */ 538 static void force_sigsegv(int oldsig) 539 { 540 if (oldsig == SIGSEGV) { 541 /* Make sure we don't try to deliver the signal again; this will 542 * end up with handle_pending_signal() calling dump_core_and_abort(). 543 */ 544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 545 } 546 force_sig(TARGET_SIGSEGV); 547 } 548 #endif 549 550 /* abort execution with signal */ 551 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 552 { 553 CPUState *cpu = thread_cpu; 554 CPUArchState *env = cpu->env_ptr; 555 TaskState *ts = (TaskState *)cpu->opaque; 556 int host_sig, core_dumped = 0; 557 struct sigaction act; 558 559 host_sig = target_to_host_signal(target_sig); 560 trace_user_force_sig(env, target_sig, host_sig); 561 gdb_signalled(env, target_sig); 562 563 /* dump core if supported by target binary format */ 564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 565 stop_all_tasks(); 566 core_dumped = 567 ((*ts->bprm->core_dump)(target_sig, env) == 0); 568 } 569 if (core_dumped) { 570 /* we already dumped the core of target process, we don't want 571 * a coredump of qemu itself */ 572 struct rlimit nodump; 573 getrlimit(RLIMIT_CORE, &nodump); 574 nodump.rlim_cur=0; 575 setrlimit(RLIMIT_CORE, &nodump); 576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 577 target_sig, strsignal(host_sig), "core dumped" ); 578 } 579 580 /* The proper exit code for dying from an uncaught signal is 581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 582 * a negative value. To get the proper exit code we need to 583 * actually die from an uncaught signal. Here the default signal 584 * handler is installed, we send ourself a signal and we wait for 585 * it to arrive. */ 586 sigfillset(&act.sa_mask); 587 act.sa_handler = SIG_DFL; 588 act.sa_flags = 0; 589 sigaction(host_sig, &act, NULL); 590 591 /* For some reason raise(host_sig) doesn't send the signal when 592 * statically linked on x86-64. */ 593 kill(getpid(), host_sig); 594 595 /* Make sure the signal isn't masked (just reuse the mask inside 596 of act) */ 597 sigdelset(&act.sa_mask, host_sig); 598 sigsuspend(&act.sa_mask); 599 600 /* unreachable */ 601 abort(); 602 } 603 604 /* queue a signal so that it will be send to the virtual CPU as soon 605 as possible */ 606 int queue_signal(CPUArchState *env, int sig, int si_type, 607 target_siginfo_t *info) 608 { 609 CPUState *cpu = ENV_GET_CPU(env); 610 TaskState *ts = cpu->opaque; 611 612 trace_user_queue_signal(env, sig); 613 614 info->si_code = deposit32(info->si_code, 16, 16, si_type); 615 616 ts->sync_signal.info = *info; 617 ts->sync_signal.pending = sig; 618 /* signal that a new signal is pending */ 619 atomic_set(&ts->signal_pending, 1); 620 return 1; /* indicates that the signal was queued */ 621 } 622 623 #ifndef HAVE_SAFE_SYSCALL 624 static inline void rewind_if_in_safe_syscall(void *puc) 625 { 626 /* Default version: never rewind */ 627 } 628 #endif 629 630 static void host_signal_handler(int host_signum, siginfo_t *info, 631 void *puc) 632 { 633 CPUArchState *env = thread_cpu->env_ptr; 634 CPUState *cpu = ENV_GET_CPU(env); 635 TaskState *ts = cpu->opaque; 636 637 int sig; 638 target_siginfo_t tinfo; 639 ucontext_t *uc = puc; 640 struct emulated_sigtable *k; 641 642 /* the CPU emulator uses some host signals to detect exceptions, 643 we forward to it some signals */ 644 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 645 && info->si_code > 0) { 646 if (cpu_signal_handler(host_signum, info, puc)) 647 return; 648 } 649 650 /* get target signal number */ 651 sig = host_to_target_signal(host_signum); 652 if (sig < 1 || sig > TARGET_NSIG) 653 return; 654 trace_user_host_signal(env, host_signum, sig); 655 656 rewind_if_in_safe_syscall(puc); 657 658 host_to_target_siginfo_noswap(&tinfo, info); 659 k = &ts->sigtab[sig - 1]; 660 k->info = tinfo; 661 k->pending = sig; 662 ts->signal_pending = 1; 663 664 /* Block host signals until target signal handler entered. We 665 * can't block SIGSEGV or SIGBUS while we're executing guest 666 * code in case the guest code provokes one in the window between 667 * now and it getting out to the main loop. Signals will be 668 * unblocked again in process_pending_signals(). 669 * 670 * WARNING: we cannot use sigfillset() here because the uc_sigmask 671 * field is a kernel sigset_t, which is much smaller than the 672 * libc sigset_t which sigfillset() operates on. Using sigfillset() 673 * would write 0xff bytes off the end of the structure and trash 674 * data on the struct. 675 * We can't use sizeof(uc->uc_sigmask) either, because the libc 676 * headers define the struct field with the wrong (too large) type. 677 */ 678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 679 sigdelset(&uc->uc_sigmask, SIGSEGV); 680 sigdelset(&uc->uc_sigmask, SIGBUS); 681 682 /* interrupt the virtual CPU as soon as possible */ 683 cpu_exit(thread_cpu); 684 } 685 686 /* do_sigaltstack() returns target values and errnos. */ 687 /* compare linux/kernel/signal.c:do_sigaltstack() */ 688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 689 { 690 int ret; 691 struct target_sigaltstack oss; 692 693 /* XXX: test errors */ 694 if(uoss_addr) 695 { 696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 698 __put_user(sas_ss_flags(sp), &oss.ss_flags); 699 } 700 701 if(uss_addr) 702 { 703 struct target_sigaltstack *uss; 704 struct target_sigaltstack ss; 705 size_t minstacksize = TARGET_MINSIGSTKSZ; 706 707 #if defined(TARGET_PPC64) 708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 710 if (get_ppc64_abi(image) > 1) { 711 minstacksize = 4096; 712 } 713 #endif 714 715 ret = -TARGET_EFAULT; 716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 717 goto out; 718 } 719 __get_user(ss.ss_sp, &uss->ss_sp); 720 __get_user(ss.ss_size, &uss->ss_size); 721 __get_user(ss.ss_flags, &uss->ss_flags); 722 unlock_user_struct(uss, uss_addr, 0); 723 724 ret = -TARGET_EPERM; 725 if (on_sig_stack(sp)) 726 goto out; 727 728 ret = -TARGET_EINVAL; 729 if (ss.ss_flags != TARGET_SS_DISABLE 730 && ss.ss_flags != TARGET_SS_ONSTACK 731 && ss.ss_flags != 0) 732 goto out; 733 734 if (ss.ss_flags == TARGET_SS_DISABLE) { 735 ss.ss_size = 0; 736 ss.ss_sp = 0; 737 } else { 738 ret = -TARGET_ENOMEM; 739 if (ss.ss_size < minstacksize) { 740 goto out; 741 } 742 } 743 744 target_sigaltstack_used.ss_sp = ss.ss_sp; 745 target_sigaltstack_used.ss_size = ss.ss_size; 746 } 747 748 if (uoss_addr) { 749 ret = -TARGET_EFAULT; 750 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 751 goto out; 752 } 753 754 ret = 0; 755 out: 756 return ret; 757 } 758 759 /* do_sigaction() return target values and host errnos */ 760 int do_sigaction(int sig, const struct target_sigaction *act, 761 struct target_sigaction *oact) 762 { 763 struct target_sigaction *k; 764 struct sigaction act1; 765 int host_sig; 766 int ret = 0; 767 768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 769 return -TARGET_EINVAL; 770 } 771 772 if (block_signals()) { 773 return -TARGET_ERESTARTSYS; 774 } 775 776 k = &sigact_table[sig - 1]; 777 if (oact) { 778 __put_user(k->_sa_handler, &oact->_sa_handler); 779 __put_user(k->sa_flags, &oact->sa_flags); 780 #if !defined(TARGET_MIPS) 781 __put_user(k->sa_restorer, &oact->sa_restorer); 782 #endif 783 /* Not swapped. */ 784 oact->sa_mask = k->sa_mask; 785 } 786 if (act) { 787 /* FIXME: This is not threadsafe. */ 788 __get_user(k->_sa_handler, &act->_sa_handler); 789 __get_user(k->sa_flags, &act->sa_flags); 790 #if !defined(TARGET_MIPS) 791 __get_user(k->sa_restorer, &act->sa_restorer); 792 #endif 793 /* To be swapped in target_to_host_sigset. */ 794 k->sa_mask = act->sa_mask; 795 796 /* we update the host linux signal state */ 797 host_sig = target_to_host_signal(sig); 798 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 799 sigfillset(&act1.sa_mask); 800 act1.sa_flags = SA_SIGINFO; 801 if (k->sa_flags & TARGET_SA_RESTART) 802 act1.sa_flags |= SA_RESTART; 803 /* NOTE: it is important to update the host kernel signal 804 ignore state to avoid getting unexpected interrupted 805 syscalls */ 806 if (k->_sa_handler == TARGET_SIG_IGN) { 807 act1.sa_sigaction = (void *)SIG_IGN; 808 } else if (k->_sa_handler == TARGET_SIG_DFL) { 809 if (fatal_signal (sig)) 810 act1.sa_sigaction = host_signal_handler; 811 else 812 act1.sa_sigaction = (void *)SIG_DFL; 813 } else { 814 act1.sa_sigaction = host_signal_handler; 815 } 816 ret = sigaction(host_sig, &act1, NULL); 817 } 818 } 819 return ret; 820 } 821 822 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 823 824 /* from the Linux kernel */ 825 826 struct target_fpreg { 827 uint16_t significand[4]; 828 uint16_t exponent; 829 }; 830 831 struct target_fpxreg { 832 uint16_t significand[4]; 833 uint16_t exponent; 834 uint16_t padding[3]; 835 }; 836 837 struct target_xmmreg { 838 abi_ulong element[4]; 839 }; 840 841 struct target_fpstate { 842 /* Regular FPU environment */ 843 abi_ulong cw; 844 abi_ulong sw; 845 abi_ulong tag; 846 abi_ulong ipoff; 847 abi_ulong cssel; 848 abi_ulong dataoff; 849 abi_ulong datasel; 850 struct target_fpreg _st[8]; 851 uint16_t status; 852 uint16_t magic; /* 0xffff = regular FPU data only */ 853 854 /* FXSR FPU environment */ 855 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 856 abi_ulong mxcsr; 857 abi_ulong reserved; 858 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 859 struct target_xmmreg _xmm[8]; 860 abi_ulong padding[56]; 861 }; 862 863 #define X86_FXSR_MAGIC 0x0000 864 865 struct target_sigcontext { 866 uint16_t gs, __gsh; 867 uint16_t fs, __fsh; 868 uint16_t es, __esh; 869 uint16_t ds, __dsh; 870 abi_ulong edi; 871 abi_ulong esi; 872 abi_ulong ebp; 873 abi_ulong esp; 874 abi_ulong ebx; 875 abi_ulong edx; 876 abi_ulong ecx; 877 abi_ulong eax; 878 abi_ulong trapno; 879 abi_ulong err; 880 abi_ulong eip; 881 uint16_t cs, __csh; 882 abi_ulong eflags; 883 abi_ulong esp_at_signal; 884 uint16_t ss, __ssh; 885 abi_ulong fpstate; /* pointer */ 886 abi_ulong oldmask; 887 abi_ulong cr2; 888 }; 889 890 struct target_ucontext { 891 abi_ulong tuc_flags; 892 abi_ulong tuc_link; 893 target_stack_t tuc_stack; 894 struct target_sigcontext tuc_mcontext; 895 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 896 }; 897 898 struct sigframe 899 { 900 abi_ulong pretcode; 901 int sig; 902 struct target_sigcontext sc; 903 struct target_fpstate fpstate; 904 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 905 char retcode[8]; 906 }; 907 908 struct rt_sigframe 909 { 910 abi_ulong pretcode; 911 int sig; 912 abi_ulong pinfo; 913 abi_ulong puc; 914 struct target_siginfo info; 915 struct target_ucontext uc; 916 struct target_fpstate fpstate; 917 char retcode[8]; 918 }; 919 920 /* 921 * Set up a signal frame. 922 */ 923 924 /* XXX: save x87 state */ 925 static void setup_sigcontext(struct target_sigcontext *sc, 926 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 927 abi_ulong fpstate_addr) 928 { 929 CPUState *cs = CPU(x86_env_get_cpu(env)); 930 uint16_t magic; 931 932 /* already locked in setup_frame() */ 933 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 934 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 935 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 936 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 937 __put_user(env->regs[R_EDI], &sc->edi); 938 __put_user(env->regs[R_ESI], &sc->esi); 939 __put_user(env->regs[R_EBP], &sc->ebp); 940 __put_user(env->regs[R_ESP], &sc->esp); 941 __put_user(env->regs[R_EBX], &sc->ebx); 942 __put_user(env->regs[R_EDX], &sc->edx); 943 __put_user(env->regs[R_ECX], &sc->ecx); 944 __put_user(env->regs[R_EAX], &sc->eax); 945 __put_user(cs->exception_index, &sc->trapno); 946 __put_user(env->error_code, &sc->err); 947 __put_user(env->eip, &sc->eip); 948 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 949 __put_user(env->eflags, &sc->eflags); 950 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 951 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 952 953 cpu_x86_fsave(env, fpstate_addr, 1); 954 fpstate->status = fpstate->sw; 955 magic = 0xffff; 956 __put_user(magic, &fpstate->magic); 957 __put_user(fpstate_addr, &sc->fpstate); 958 959 /* non-iBCS2 extensions.. */ 960 __put_user(mask, &sc->oldmask); 961 __put_user(env->cr[2], &sc->cr2); 962 } 963 964 /* 965 * Determine which stack to use.. 966 */ 967 968 static inline abi_ulong 969 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 970 { 971 unsigned long esp; 972 973 /* Default to using normal stack */ 974 esp = env->regs[R_ESP]; 975 /* This is the X/Open sanctioned signal stack switching. */ 976 if (ka->sa_flags & TARGET_SA_ONSTACK) { 977 if (sas_ss_flags(esp) == 0) { 978 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 979 } 980 } else { 981 982 /* This is the legacy signal stack switching. */ 983 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 984 !(ka->sa_flags & TARGET_SA_RESTORER) && 985 ka->sa_restorer) { 986 esp = (unsigned long) ka->sa_restorer; 987 } 988 } 989 return (esp - frame_size) & -8ul; 990 } 991 992 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 993 static void setup_frame(int sig, struct target_sigaction *ka, 994 target_sigset_t *set, CPUX86State *env) 995 { 996 abi_ulong frame_addr; 997 struct sigframe *frame; 998 int i; 999 1000 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1001 trace_user_setup_frame(env, frame_addr); 1002 1003 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1004 goto give_sigsegv; 1005 1006 __put_user(sig, &frame->sig); 1007 1008 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1009 frame_addr + offsetof(struct sigframe, fpstate)); 1010 1011 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1012 __put_user(set->sig[i], &frame->extramask[i - 1]); 1013 } 1014 1015 /* Set up to return from userspace. If provided, use a stub 1016 already in userspace. */ 1017 if (ka->sa_flags & TARGET_SA_RESTORER) { 1018 __put_user(ka->sa_restorer, &frame->pretcode); 1019 } else { 1020 uint16_t val16; 1021 abi_ulong retcode_addr; 1022 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1023 __put_user(retcode_addr, &frame->pretcode); 1024 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1025 val16 = 0xb858; 1026 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1027 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1028 val16 = 0x80cd; 1029 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1030 } 1031 1032 1033 /* Set up registers for signal handler */ 1034 env->regs[R_ESP] = frame_addr; 1035 env->eip = ka->_sa_handler; 1036 1037 cpu_x86_load_seg(env, R_DS, __USER_DS); 1038 cpu_x86_load_seg(env, R_ES, __USER_DS); 1039 cpu_x86_load_seg(env, R_SS, __USER_DS); 1040 cpu_x86_load_seg(env, R_CS, __USER_CS); 1041 env->eflags &= ~TF_MASK; 1042 1043 unlock_user_struct(frame, frame_addr, 1); 1044 1045 return; 1046 1047 give_sigsegv: 1048 force_sigsegv(sig); 1049 } 1050 1051 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1052 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1053 target_siginfo_t *info, 1054 target_sigset_t *set, CPUX86State *env) 1055 { 1056 abi_ulong frame_addr, addr; 1057 struct rt_sigframe *frame; 1058 int i; 1059 1060 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1061 trace_user_setup_rt_frame(env, frame_addr); 1062 1063 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1064 goto give_sigsegv; 1065 1066 __put_user(sig, &frame->sig); 1067 addr = frame_addr + offsetof(struct rt_sigframe, info); 1068 __put_user(addr, &frame->pinfo); 1069 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1070 __put_user(addr, &frame->puc); 1071 tswap_siginfo(&frame->info, info); 1072 1073 /* Create the ucontext. */ 1074 __put_user(0, &frame->uc.tuc_flags); 1075 __put_user(0, &frame->uc.tuc_link); 1076 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1077 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1078 &frame->uc.tuc_stack.ss_flags); 1079 __put_user(target_sigaltstack_used.ss_size, 1080 &frame->uc.tuc_stack.ss_size); 1081 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1082 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1083 1084 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1085 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1086 } 1087 1088 /* Set up to return from userspace. If provided, use a stub 1089 already in userspace. */ 1090 if (ka->sa_flags & TARGET_SA_RESTORER) { 1091 __put_user(ka->sa_restorer, &frame->pretcode); 1092 } else { 1093 uint16_t val16; 1094 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1095 __put_user(addr, &frame->pretcode); 1096 /* This is movl $,%eax ; int $0x80 */ 1097 __put_user(0xb8, (char *)(frame->retcode+0)); 1098 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1099 val16 = 0x80cd; 1100 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1101 } 1102 1103 /* Set up registers for signal handler */ 1104 env->regs[R_ESP] = frame_addr; 1105 env->eip = ka->_sa_handler; 1106 1107 cpu_x86_load_seg(env, R_DS, __USER_DS); 1108 cpu_x86_load_seg(env, R_ES, __USER_DS); 1109 cpu_x86_load_seg(env, R_SS, __USER_DS); 1110 cpu_x86_load_seg(env, R_CS, __USER_CS); 1111 env->eflags &= ~TF_MASK; 1112 1113 unlock_user_struct(frame, frame_addr, 1); 1114 1115 return; 1116 1117 give_sigsegv: 1118 force_sigsegv(sig); 1119 } 1120 1121 static int 1122 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1123 { 1124 unsigned int err = 0; 1125 abi_ulong fpstate_addr; 1126 unsigned int tmpflags; 1127 1128 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1129 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1130 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1131 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1132 1133 env->regs[R_EDI] = tswapl(sc->edi); 1134 env->regs[R_ESI] = tswapl(sc->esi); 1135 env->regs[R_EBP] = tswapl(sc->ebp); 1136 env->regs[R_ESP] = tswapl(sc->esp); 1137 env->regs[R_EBX] = tswapl(sc->ebx); 1138 env->regs[R_EDX] = tswapl(sc->edx); 1139 env->regs[R_ECX] = tswapl(sc->ecx); 1140 env->regs[R_EAX] = tswapl(sc->eax); 1141 env->eip = tswapl(sc->eip); 1142 1143 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1144 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1145 1146 tmpflags = tswapl(sc->eflags); 1147 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1148 // regs->orig_eax = -1; /* disable syscall checks */ 1149 1150 fpstate_addr = tswapl(sc->fpstate); 1151 if (fpstate_addr != 0) { 1152 if (!access_ok(VERIFY_READ, fpstate_addr, 1153 sizeof(struct target_fpstate))) 1154 goto badframe; 1155 cpu_x86_frstor(env, fpstate_addr, 1); 1156 } 1157 1158 return err; 1159 badframe: 1160 return 1; 1161 } 1162 1163 long do_sigreturn(CPUX86State *env) 1164 { 1165 struct sigframe *frame; 1166 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1167 target_sigset_t target_set; 1168 sigset_t set; 1169 int i; 1170 1171 trace_user_do_sigreturn(env, frame_addr); 1172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1173 goto badframe; 1174 /* set blocked signals */ 1175 __get_user(target_set.sig[0], &frame->sc.oldmask); 1176 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1177 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1178 } 1179 1180 target_to_host_sigset_internal(&set, &target_set); 1181 set_sigmask(&set); 1182 1183 /* restore registers */ 1184 if (restore_sigcontext(env, &frame->sc)) 1185 goto badframe; 1186 unlock_user_struct(frame, frame_addr, 0); 1187 return -TARGET_QEMU_ESIGRETURN; 1188 1189 badframe: 1190 unlock_user_struct(frame, frame_addr, 0); 1191 force_sig(TARGET_SIGSEGV); 1192 return -TARGET_QEMU_ESIGRETURN; 1193 } 1194 1195 long do_rt_sigreturn(CPUX86State *env) 1196 { 1197 abi_ulong frame_addr; 1198 struct rt_sigframe *frame; 1199 sigset_t set; 1200 1201 frame_addr = env->regs[R_ESP] - 4; 1202 trace_user_do_rt_sigreturn(env, frame_addr); 1203 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1204 goto badframe; 1205 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1206 set_sigmask(&set); 1207 1208 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1209 goto badframe; 1210 } 1211 1212 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1213 get_sp_from_cpustate(env)) == -EFAULT) { 1214 goto badframe; 1215 } 1216 1217 unlock_user_struct(frame, frame_addr, 0); 1218 return -TARGET_QEMU_ESIGRETURN; 1219 1220 badframe: 1221 unlock_user_struct(frame, frame_addr, 0); 1222 force_sig(TARGET_SIGSEGV); 1223 return -TARGET_QEMU_ESIGRETURN; 1224 } 1225 1226 #elif defined(TARGET_AARCH64) 1227 1228 struct target_sigcontext { 1229 uint64_t fault_address; 1230 /* AArch64 registers */ 1231 uint64_t regs[31]; 1232 uint64_t sp; 1233 uint64_t pc; 1234 uint64_t pstate; 1235 /* 4K reserved for FP/SIMD state and future expansion */ 1236 char __reserved[4096] __attribute__((__aligned__(16))); 1237 }; 1238 1239 struct target_ucontext { 1240 abi_ulong tuc_flags; 1241 abi_ulong tuc_link; 1242 target_stack_t tuc_stack; 1243 target_sigset_t tuc_sigmask; 1244 /* glibc uses a 1024-bit sigset_t */ 1245 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1246 /* last for future expansion */ 1247 struct target_sigcontext tuc_mcontext; 1248 }; 1249 1250 /* 1251 * Header to be used at the beginning of structures extending the user 1252 * context. Such structures must be placed after the rt_sigframe on the stack 1253 * and be 16-byte aligned. The last structure must be a dummy one with the 1254 * magic and size set to 0. 1255 */ 1256 struct target_aarch64_ctx { 1257 uint32_t magic; 1258 uint32_t size; 1259 }; 1260 1261 #define TARGET_FPSIMD_MAGIC 0x46508001 1262 1263 struct target_fpsimd_context { 1264 struct target_aarch64_ctx head; 1265 uint32_t fpsr; 1266 uint32_t fpcr; 1267 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1268 }; 1269 1270 /* 1271 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1272 * user space as it will change with the addition of new context. User space 1273 * should check the magic/size information. 1274 */ 1275 struct target_aux_context { 1276 struct target_fpsimd_context fpsimd; 1277 /* additional context to be added before "end" */ 1278 struct target_aarch64_ctx end; 1279 }; 1280 1281 struct target_rt_sigframe { 1282 struct target_siginfo info; 1283 struct target_ucontext uc; 1284 uint64_t fp; 1285 uint64_t lr; 1286 uint32_t tramp[2]; 1287 }; 1288 1289 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1290 CPUARMState *env, target_sigset_t *set) 1291 { 1292 int i; 1293 struct target_aux_context *aux = 1294 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1295 1296 /* set up the stack frame for unwinding */ 1297 __put_user(env->xregs[29], &sf->fp); 1298 __put_user(env->xregs[30], &sf->lr); 1299 1300 for (i = 0; i < 31; i++) { 1301 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1302 } 1303 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1304 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1305 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1306 1307 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1308 1309 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1310 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1311 } 1312 1313 for (i = 0; i < 32; i++) { 1314 #ifdef TARGET_WORDS_BIGENDIAN 1315 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1316 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1317 #else 1318 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1319 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1320 #endif 1321 } 1322 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1323 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1324 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1325 __put_user(sizeof(struct target_fpsimd_context), 1326 &aux->fpsimd.head.size); 1327 1328 /* set the "end" magic */ 1329 __put_user(0, &aux->end.magic); 1330 __put_user(0, &aux->end.size); 1331 1332 return 0; 1333 } 1334 1335 static int target_restore_sigframe(CPUARMState *env, 1336 struct target_rt_sigframe *sf) 1337 { 1338 sigset_t set; 1339 int i; 1340 struct target_aux_context *aux = 1341 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1342 uint32_t magic, size, fpsr, fpcr; 1343 uint64_t pstate; 1344 1345 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1346 set_sigmask(&set); 1347 1348 for (i = 0; i < 31; i++) { 1349 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1350 } 1351 1352 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1353 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1354 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1355 pstate_write(env, pstate); 1356 1357 __get_user(magic, &aux->fpsimd.head.magic); 1358 __get_user(size, &aux->fpsimd.head.size); 1359 1360 if (magic != TARGET_FPSIMD_MAGIC 1361 || size != sizeof(struct target_fpsimd_context)) { 1362 return 1; 1363 } 1364 1365 for (i = 0; i < 32; i++) { 1366 #ifdef TARGET_WORDS_BIGENDIAN 1367 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1368 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1369 #else 1370 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1371 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1372 #endif 1373 } 1374 __get_user(fpsr, &aux->fpsimd.fpsr); 1375 vfp_set_fpsr(env, fpsr); 1376 __get_user(fpcr, &aux->fpsimd.fpcr); 1377 vfp_set_fpcr(env, fpcr); 1378 1379 return 0; 1380 } 1381 1382 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1383 { 1384 abi_ulong sp; 1385 1386 sp = env->xregs[31]; 1387 1388 /* 1389 * This is the X/Open sanctioned signal stack switching. 1390 */ 1391 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1392 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1393 } 1394 1395 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1396 1397 return sp; 1398 } 1399 1400 static void target_setup_frame(int usig, struct target_sigaction *ka, 1401 target_siginfo_t *info, target_sigset_t *set, 1402 CPUARMState *env) 1403 { 1404 struct target_rt_sigframe *frame; 1405 abi_ulong frame_addr, return_addr; 1406 1407 frame_addr = get_sigframe(ka, env); 1408 trace_user_setup_frame(env, frame_addr); 1409 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1410 goto give_sigsegv; 1411 } 1412 1413 __put_user(0, &frame->uc.tuc_flags); 1414 __put_user(0, &frame->uc.tuc_link); 1415 1416 __put_user(target_sigaltstack_used.ss_sp, 1417 &frame->uc.tuc_stack.ss_sp); 1418 __put_user(sas_ss_flags(env->xregs[31]), 1419 &frame->uc.tuc_stack.ss_flags); 1420 __put_user(target_sigaltstack_used.ss_size, 1421 &frame->uc.tuc_stack.ss_size); 1422 target_setup_sigframe(frame, env, set); 1423 if (ka->sa_flags & TARGET_SA_RESTORER) { 1424 return_addr = ka->sa_restorer; 1425 } else { 1426 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1427 __put_user(0xd2801168, &frame->tramp[0]); 1428 __put_user(0xd4000001, &frame->tramp[1]); 1429 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1430 } 1431 env->xregs[0] = usig; 1432 env->xregs[31] = frame_addr; 1433 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1434 env->pc = ka->_sa_handler; 1435 env->xregs[30] = return_addr; 1436 if (info) { 1437 tswap_siginfo(&frame->info, info); 1438 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1439 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1440 } 1441 1442 unlock_user_struct(frame, frame_addr, 1); 1443 return; 1444 1445 give_sigsegv: 1446 unlock_user_struct(frame, frame_addr, 1); 1447 force_sigsegv(usig); 1448 } 1449 1450 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1451 target_siginfo_t *info, target_sigset_t *set, 1452 CPUARMState *env) 1453 { 1454 target_setup_frame(sig, ka, info, set, env); 1455 } 1456 1457 static void setup_frame(int sig, struct target_sigaction *ka, 1458 target_sigset_t *set, CPUARMState *env) 1459 { 1460 target_setup_frame(sig, ka, 0, set, env); 1461 } 1462 1463 long do_rt_sigreturn(CPUARMState *env) 1464 { 1465 struct target_rt_sigframe *frame = NULL; 1466 abi_ulong frame_addr = env->xregs[31]; 1467 1468 trace_user_do_rt_sigreturn(env, frame_addr); 1469 if (frame_addr & 15) { 1470 goto badframe; 1471 } 1472 1473 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1474 goto badframe; 1475 } 1476 1477 if (target_restore_sigframe(env, frame)) { 1478 goto badframe; 1479 } 1480 1481 if (do_sigaltstack(frame_addr + 1482 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1483 0, get_sp_from_cpustate(env)) == -EFAULT) { 1484 goto badframe; 1485 } 1486 1487 unlock_user_struct(frame, frame_addr, 0); 1488 return -TARGET_QEMU_ESIGRETURN; 1489 1490 badframe: 1491 unlock_user_struct(frame, frame_addr, 0); 1492 force_sig(TARGET_SIGSEGV); 1493 return -TARGET_QEMU_ESIGRETURN; 1494 } 1495 1496 long do_sigreturn(CPUARMState *env) 1497 { 1498 return do_rt_sigreturn(env); 1499 } 1500 1501 #elif defined(TARGET_ARM) 1502 1503 struct target_sigcontext { 1504 abi_ulong trap_no; 1505 abi_ulong error_code; 1506 abi_ulong oldmask; 1507 abi_ulong arm_r0; 1508 abi_ulong arm_r1; 1509 abi_ulong arm_r2; 1510 abi_ulong arm_r3; 1511 abi_ulong arm_r4; 1512 abi_ulong arm_r5; 1513 abi_ulong arm_r6; 1514 abi_ulong arm_r7; 1515 abi_ulong arm_r8; 1516 abi_ulong arm_r9; 1517 abi_ulong arm_r10; 1518 abi_ulong arm_fp; 1519 abi_ulong arm_ip; 1520 abi_ulong arm_sp; 1521 abi_ulong arm_lr; 1522 abi_ulong arm_pc; 1523 abi_ulong arm_cpsr; 1524 abi_ulong fault_address; 1525 }; 1526 1527 struct target_ucontext_v1 { 1528 abi_ulong tuc_flags; 1529 abi_ulong tuc_link; 1530 target_stack_t tuc_stack; 1531 struct target_sigcontext tuc_mcontext; 1532 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1533 }; 1534 1535 struct target_ucontext_v2 { 1536 abi_ulong tuc_flags; 1537 abi_ulong tuc_link; 1538 target_stack_t tuc_stack; 1539 struct target_sigcontext tuc_mcontext; 1540 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1541 char __unused[128 - sizeof(target_sigset_t)]; 1542 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1543 }; 1544 1545 struct target_user_vfp { 1546 uint64_t fpregs[32]; 1547 abi_ulong fpscr; 1548 }; 1549 1550 struct target_user_vfp_exc { 1551 abi_ulong fpexc; 1552 abi_ulong fpinst; 1553 abi_ulong fpinst2; 1554 }; 1555 1556 struct target_vfp_sigframe { 1557 abi_ulong magic; 1558 abi_ulong size; 1559 struct target_user_vfp ufp; 1560 struct target_user_vfp_exc ufp_exc; 1561 } __attribute__((__aligned__(8))); 1562 1563 struct target_iwmmxt_sigframe { 1564 abi_ulong magic; 1565 abi_ulong size; 1566 uint64_t regs[16]; 1567 /* Note that not all the coprocessor control registers are stored here */ 1568 uint32_t wcssf; 1569 uint32_t wcasf; 1570 uint32_t wcgr0; 1571 uint32_t wcgr1; 1572 uint32_t wcgr2; 1573 uint32_t wcgr3; 1574 } __attribute__((__aligned__(8))); 1575 1576 #define TARGET_VFP_MAGIC 0x56465001 1577 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1578 1579 struct sigframe_v1 1580 { 1581 struct target_sigcontext sc; 1582 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1583 abi_ulong retcode; 1584 }; 1585 1586 struct sigframe_v2 1587 { 1588 struct target_ucontext_v2 uc; 1589 abi_ulong retcode; 1590 }; 1591 1592 struct rt_sigframe_v1 1593 { 1594 abi_ulong pinfo; 1595 abi_ulong puc; 1596 struct target_siginfo info; 1597 struct target_ucontext_v1 uc; 1598 abi_ulong retcode; 1599 }; 1600 1601 struct rt_sigframe_v2 1602 { 1603 struct target_siginfo info; 1604 struct target_ucontext_v2 uc; 1605 abi_ulong retcode; 1606 }; 1607 1608 #define TARGET_CONFIG_CPU_32 1 1609 1610 /* 1611 * For ARM syscalls, we encode the syscall number into the instruction. 1612 */ 1613 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1614 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1615 1616 /* 1617 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1618 * need two 16-bit instructions. 1619 */ 1620 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1621 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1622 1623 static const abi_ulong retcodes[4] = { 1624 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1625 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1626 }; 1627 1628 1629 static inline int valid_user_regs(CPUARMState *regs) 1630 { 1631 return 1; 1632 } 1633 1634 static void 1635 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1636 CPUARMState *env, abi_ulong mask) 1637 { 1638 __put_user(env->regs[0], &sc->arm_r0); 1639 __put_user(env->regs[1], &sc->arm_r1); 1640 __put_user(env->regs[2], &sc->arm_r2); 1641 __put_user(env->regs[3], &sc->arm_r3); 1642 __put_user(env->regs[4], &sc->arm_r4); 1643 __put_user(env->regs[5], &sc->arm_r5); 1644 __put_user(env->regs[6], &sc->arm_r6); 1645 __put_user(env->regs[7], &sc->arm_r7); 1646 __put_user(env->regs[8], &sc->arm_r8); 1647 __put_user(env->regs[9], &sc->arm_r9); 1648 __put_user(env->regs[10], &sc->arm_r10); 1649 __put_user(env->regs[11], &sc->arm_fp); 1650 __put_user(env->regs[12], &sc->arm_ip); 1651 __put_user(env->regs[13], &sc->arm_sp); 1652 __put_user(env->regs[14], &sc->arm_lr); 1653 __put_user(env->regs[15], &sc->arm_pc); 1654 #ifdef TARGET_CONFIG_CPU_32 1655 __put_user(cpsr_read(env), &sc->arm_cpsr); 1656 #endif 1657 1658 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1659 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1660 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1661 __put_user(mask, &sc->oldmask); 1662 } 1663 1664 static inline abi_ulong 1665 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1666 { 1667 unsigned long sp = regs->regs[13]; 1668 1669 /* 1670 * This is the X/Open sanctioned signal stack switching. 1671 */ 1672 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1673 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1674 } 1675 /* 1676 * ATPCS B01 mandates 8-byte alignment 1677 */ 1678 return (sp - framesize) & ~7; 1679 } 1680 1681 static void 1682 setup_return(CPUARMState *env, struct target_sigaction *ka, 1683 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1684 { 1685 abi_ulong handler = ka->_sa_handler; 1686 abi_ulong retcode; 1687 int thumb = handler & 1; 1688 uint32_t cpsr = cpsr_read(env); 1689 1690 cpsr &= ~CPSR_IT; 1691 if (thumb) { 1692 cpsr |= CPSR_T; 1693 } else { 1694 cpsr &= ~CPSR_T; 1695 } 1696 1697 if (ka->sa_flags & TARGET_SA_RESTORER) { 1698 retcode = ka->sa_restorer; 1699 } else { 1700 unsigned int idx = thumb; 1701 1702 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1703 idx += 2; 1704 } 1705 1706 __put_user(retcodes[idx], rc); 1707 1708 retcode = rc_addr + thumb; 1709 } 1710 1711 env->regs[0] = usig; 1712 env->regs[13] = frame_addr; 1713 env->regs[14] = retcode; 1714 env->regs[15] = handler & (thumb ? ~1 : ~3); 1715 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1716 } 1717 1718 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1719 { 1720 int i; 1721 struct target_vfp_sigframe *vfpframe; 1722 vfpframe = (struct target_vfp_sigframe *)regspace; 1723 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1724 __put_user(sizeof(*vfpframe), &vfpframe->size); 1725 for (i = 0; i < 32; i++) { 1726 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1727 } 1728 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1729 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1730 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1731 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1732 return (abi_ulong*)(vfpframe+1); 1733 } 1734 1735 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1736 CPUARMState *env) 1737 { 1738 int i; 1739 struct target_iwmmxt_sigframe *iwmmxtframe; 1740 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1741 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1742 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1743 for (i = 0; i < 16; i++) { 1744 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1745 } 1746 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1747 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1748 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1749 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1750 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1751 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1752 return (abi_ulong*)(iwmmxtframe+1); 1753 } 1754 1755 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1756 target_sigset_t *set, CPUARMState *env) 1757 { 1758 struct target_sigaltstack stack; 1759 int i; 1760 abi_ulong *regspace; 1761 1762 /* Clear all the bits of the ucontext we don't use. */ 1763 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1764 1765 memset(&stack, 0, sizeof(stack)); 1766 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1767 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1768 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1769 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1770 1771 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1772 /* Save coprocessor signal frame. */ 1773 regspace = uc->tuc_regspace; 1774 if (arm_feature(env, ARM_FEATURE_VFP)) { 1775 regspace = setup_sigframe_v2_vfp(regspace, env); 1776 } 1777 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1778 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1779 } 1780 1781 /* Write terminating magic word */ 1782 __put_user(0, regspace); 1783 1784 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1785 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1786 } 1787 } 1788 1789 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1790 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1791 target_sigset_t *set, CPUARMState *regs) 1792 { 1793 struct sigframe_v1 *frame; 1794 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1795 int i; 1796 1797 trace_user_setup_frame(regs, frame_addr); 1798 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1799 goto sigsegv; 1800 } 1801 1802 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1803 1804 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1805 __put_user(set->sig[i], &frame->extramask[i - 1]); 1806 } 1807 1808 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1809 frame_addr + offsetof(struct sigframe_v1, retcode)); 1810 1811 unlock_user_struct(frame, frame_addr, 1); 1812 return; 1813 sigsegv: 1814 force_sigsegv(usig); 1815 } 1816 1817 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1818 target_sigset_t *set, CPUARMState *regs) 1819 { 1820 struct sigframe_v2 *frame; 1821 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1822 1823 trace_user_setup_frame(regs, frame_addr); 1824 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1825 goto sigsegv; 1826 } 1827 1828 setup_sigframe_v2(&frame->uc, set, regs); 1829 1830 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1831 frame_addr + offsetof(struct sigframe_v2, retcode)); 1832 1833 unlock_user_struct(frame, frame_addr, 1); 1834 return; 1835 sigsegv: 1836 force_sigsegv(usig); 1837 } 1838 1839 static void setup_frame(int usig, struct target_sigaction *ka, 1840 target_sigset_t *set, CPUARMState *regs) 1841 { 1842 if (get_osversion() >= 0x020612) { 1843 setup_frame_v2(usig, ka, set, regs); 1844 } else { 1845 setup_frame_v1(usig, ka, set, regs); 1846 } 1847 } 1848 1849 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1850 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1851 target_siginfo_t *info, 1852 target_sigset_t *set, CPUARMState *env) 1853 { 1854 struct rt_sigframe_v1 *frame; 1855 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1856 struct target_sigaltstack stack; 1857 int i; 1858 abi_ulong info_addr, uc_addr; 1859 1860 trace_user_setup_rt_frame(env, frame_addr); 1861 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1862 goto sigsegv; 1863 } 1864 1865 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1866 __put_user(info_addr, &frame->pinfo); 1867 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1868 __put_user(uc_addr, &frame->puc); 1869 tswap_siginfo(&frame->info, info); 1870 1871 /* Clear all the bits of the ucontext we don't use. */ 1872 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1873 1874 memset(&stack, 0, sizeof(stack)); 1875 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1876 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1877 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1878 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1879 1880 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1881 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1882 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1883 } 1884 1885 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1886 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1887 1888 env->regs[1] = info_addr; 1889 env->regs[2] = uc_addr; 1890 1891 unlock_user_struct(frame, frame_addr, 1); 1892 return; 1893 sigsegv: 1894 force_sigsegv(usig); 1895 } 1896 1897 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1898 target_siginfo_t *info, 1899 target_sigset_t *set, CPUARMState *env) 1900 { 1901 struct rt_sigframe_v2 *frame; 1902 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1903 abi_ulong info_addr, uc_addr; 1904 1905 trace_user_setup_rt_frame(env, frame_addr); 1906 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1907 goto sigsegv; 1908 } 1909 1910 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1911 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1912 tswap_siginfo(&frame->info, info); 1913 1914 setup_sigframe_v2(&frame->uc, set, env); 1915 1916 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1917 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1918 1919 env->regs[1] = info_addr; 1920 env->regs[2] = uc_addr; 1921 1922 unlock_user_struct(frame, frame_addr, 1); 1923 return; 1924 sigsegv: 1925 force_sigsegv(usig); 1926 } 1927 1928 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1929 target_siginfo_t *info, 1930 target_sigset_t *set, CPUARMState *env) 1931 { 1932 if (get_osversion() >= 0x020612) { 1933 setup_rt_frame_v2(usig, ka, info, set, env); 1934 } else { 1935 setup_rt_frame_v1(usig, ka, info, set, env); 1936 } 1937 } 1938 1939 static int 1940 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1941 { 1942 int err = 0; 1943 uint32_t cpsr; 1944 1945 __get_user(env->regs[0], &sc->arm_r0); 1946 __get_user(env->regs[1], &sc->arm_r1); 1947 __get_user(env->regs[2], &sc->arm_r2); 1948 __get_user(env->regs[3], &sc->arm_r3); 1949 __get_user(env->regs[4], &sc->arm_r4); 1950 __get_user(env->regs[5], &sc->arm_r5); 1951 __get_user(env->regs[6], &sc->arm_r6); 1952 __get_user(env->regs[7], &sc->arm_r7); 1953 __get_user(env->regs[8], &sc->arm_r8); 1954 __get_user(env->regs[9], &sc->arm_r9); 1955 __get_user(env->regs[10], &sc->arm_r10); 1956 __get_user(env->regs[11], &sc->arm_fp); 1957 __get_user(env->regs[12], &sc->arm_ip); 1958 __get_user(env->regs[13], &sc->arm_sp); 1959 __get_user(env->regs[14], &sc->arm_lr); 1960 __get_user(env->regs[15], &sc->arm_pc); 1961 #ifdef TARGET_CONFIG_CPU_32 1962 __get_user(cpsr, &sc->arm_cpsr); 1963 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1964 #endif 1965 1966 err |= !valid_user_regs(env); 1967 1968 return err; 1969 } 1970 1971 static long do_sigreturn_v1(CPUARMState *env) 1972 { 1973 abi_ulong frame_addr; 1974 struct sigframe_v1 *frame = NULL; 1975 target_sigset_t set; 1976 sigset_t host_set; 1977 int i; 1978 1979 /* 1980 * Since we stacked the signal on a 64-bit boundary, 1981 * then 'sp' should be word aligned here. If it's 1982 * not, then the user is trying to mess with us. 1983 */ 1984 frame_addr = env->regs[13]; 1985 trace_user_do_sigreturn(env, frame_addr); 1986 if (frame_addr & 7) { 1987 goto badframe; 1988 } 1989 1990 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1991 goto badframe; 1992 } 1993 1994 __get_user(set.sig[0], &frame->sc.oldmask); 1995 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1996 __get_user(set.sig[i], &frame->extramask[i - 1]); 1997 } 1998 1999 target_to_host_sigset_internal(&host_set, &set); 2000 set_sigmask(&host_set); 2001 2002 if (restore_sigcontext(env, &frame->sc)) { 2003 goto badframe; 2004 } 2005 2006 #if 0 2007 /* Send SIGTRAP if we're single-stepping */ 2008 if (ptrace_cancel_bpt(current)) 2009 send_sig(SIGTRAP, current, 1); 2010 #endif 2011 unlock_user_struct(frame, frame_addr, 0); 2012 return -TARGET_QEMU_ESIGRETURN; 2013 2014 badframe: 2015 force_sig(TARGET_SIGSEGV); 2016 return -TARGET_QEMU_ESIGRETURN; 2017 } 2018 2019 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2020 { 2021 int i; 2022 abi_ulong magic, sz; 2023 uint32_t fpscr, fpexc; 2024 struct target_vfp_sigframe *vfpframe; 2025 vfpframe = (struct target_vfp_sigframe *)regspace; 2026 2027 __get_user(magic, &vfpframe->magic); 2028 __get_user(sz, &vfpframe->size); 2029 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2030 return 0; 2031 } 2032 for (i = 0; i < 32; i++) { 2033 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2034 } 2035 __get_user(fpscr, &vfpframe->ufp.fpscr); 2036 vfp_set_fpscr(env, fpscr); 2037 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2038 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2039 * and the exception flag is cleared 2040 */ 2041 fpexc |= (1 << 30); 2042 fpexc &= ~((1 << 31) | (1 << 28)); 2043 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2044 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2045 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2046 return (abi_ulong*)(vfpframe + 1); 2047 } 2048 2049 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2050 abi_ulong *regspace) 2051 { 2052 int i; 2053 abi_ulong magic, sz; 2054 struct target_iwmmxt_sigframe *iwmmxtframe; 2055 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2056 2057 __get_user(magic, &iwmmxtframe->magic); 2058 __get_user(sz, &iwmmxtframe->size); 2059 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2060 return 0; 2061 } 2062 for (i = 0; i < 16; i++) { 2063 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2064 } 2065 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2066 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2067 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2068 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2069 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2070 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2071 return (abi_ulong*)(iwmmxtframe + 1); 2072 } 2073 2074 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2075 struct target_ucontext_v2 *uc) 2076 { 2077 sigset_t host_set; 2078 abi_ulong *regspace; 2079 2080 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2081 set_sigmask(&host_set); 2082 2083 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2084 return 1; 2085 2086 /* Restore coprocessor signal frame */ 2087 regspace = uc->tuc_regspace; 2088 if (arm_feature(env, ARM_FEATURE_VFP)) { 2089 regspace = restore_sigframe_v2_vfp(env, regspace); 2090 if (!regspace) { 2091 return 1; 2092 } 2093 } 2094 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2095 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2096 if (!regspace) { 2097 return 1; 2098 } 2099 } 2100 2101 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2102 return 1; 2103 2104 #if 0 2105 /* Send SIGTRAP if we're single-stepping */ 2106 if (ptrace_cancel_bpt(current)) 2107 send_sig(SIGTRAP, current, 1); 2108 #endif 2109 2110 return 0; 2111 } 2112 2113 static long do_sigreturn_v2(CPUARMState *env) 2114 { 2115 abi_ulong frame_addr; 2116 struct sigframe_v2 *frame = NULL; 2117 2118 /* 2119 * Since we stacked the signal on a 64-bit boundary, 2120 * then 'sp' should be word aligned here. If it's 2121 * not, then the user is trying to mess with us. 2122 */ 2123 frame_addr = env->regs[13]; 2124 trace_user_do_sigreturn(env, frame_addr); 2125 if (frame_addr & 7) { 2126 goto badframe; 2127 } 2128 2129 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2130 goto badframe; 2131 } 2132 2133 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2134 goto badframe; 2135 } 2136 2137 unlock_user_struct(frame, frame_addr, 0); 2138 return -TARGET_QEMU_ESIGRETURN; 2139 2140 badframe: 2141 unlock_user_struct(frame, frame_addr, 0); 2142 force_sig(TARGET_SIGSEGV); 2143 return -TARGET_QEMU_ESIGRETURN; 2144 } 2145 2146 long do_sigreturn(CPUARMState *env) 2147 { 2148 if (get_osversion() >= 0x020612) { 2149 return do_sigreturn_v2(env); 2150 } else { 2151 return do_sigreturn_v1(env); 2152 } 2153 } 2154 2155 static long do_rt_sigreturn_v1(CPUARMState *env) 2156 { 2157 abi_ulong frame_addr; 2158 struct rt_sigframe_v1 *frame = NULL; 2159 sigset_t host_set; 2160 2161 /* 2162 * Since we stacked the signal on a 64-bit boundary, 2163 * then 'sp' should be word aligned here. If it's 2164 * not, then the user is trying to mess with us. 2165 */ 2166 frame_addr = env->regs[13]; 2167 trace_user_do_rt_sigreturn(env, frame_addr); 2168 if (frame_addr & 7) { 2169 goto badframe; 2170 } 2171 2172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2173 goto badframe; 2174 } 2175 2176 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2177 set_sigmask(&host_set); 2178 2179 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2180 goto badframe; 2181 } 2182 2183 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2184 goto badframe; 2185 2186 #if 0 2187 /* Send SIGTRAP if we're single-stepping */ 2188 if (ptrace_cancel_bpt(current)) 2189 send_sig(SIGTRAP, current, 1); 2190 #endif 2191 unlock_user_struct(frame, frame_addr, 0); 2192 return -TARGET_QEMU_ESIGRETURN; 2193 2194 badframe: 2195 unlock_user_struct(frame, frame_addr, 0); 2196 force_sig(TARGET_SIGSEGV); 2197 return -TARGET_QEMU_ESIGRETURN; 2198 } 2199 2200 static long do_rt_sigreturn_v2(CPUARMState *env) 2201 { 2202 abi_ulong frame_addr; 2203 struct rt_sigframe_v2 *frame = NULL; 2204 2205 /* 2206 * Since we stacked the signal on a 64-bit boundary, 2207 * then 'sp' should be word aligned here. If it's 2208 * not, then the user is trying to mess with us. 2209 */ 2210 frame_addr = env->regs[13]; 2211 trace_user_do_rt_sigreturn(env, frame_addr); 2212 if (frame_addr & 7) { 2213 goto badframe; 2214 } 2215 2216 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2217 goto badframe; 2218 } 2219 2220 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2221 goto badframe; 2222 } 2223 2224 unlock_user_struct(frame, frame_addr, 0); 2225 return -TARGET_QEMU_ESIGRETURN; 2226 2227 badframe: 2228 unlock_user_struct(frame, frame_addr, 0); 2229 force_sig(TARGET_SIGSEGV); 2230 return -TARGET_QEMU_ESIGRETURN; 2231 } 2232 2233 long do_rt_sigreturn(CPUARMState *env) 2234 { 2235 if (get_osversion() >= 0x020612) { 2236 return do_rt_sigreturn_v2(env); 2237 } else { 2238 return do_rt_sigreturn_v1(env); 2239 } 2240 } 2241 2242 #elif defined(TARGET_SPARC) 2243 2244 #define __SUNOS_MAXWIN 31 2245 2246 /* This is what SunOS does, so shall I. */ 2247 struct target_sigcontext { 2248 abi_ulong sigc_onstack; /* state to restore */ 2249 2250 abi_ulong sigc_mask; /* sigmask to restore */ 2251 abi_ulong sigc_sp; /* stack pointer */ 2252 abi_ulong sigc_pc; /* program counter */ 2253 abi_ulong sigc_npc; /* next program counter */ 2254 abi_ulong sigc_psr; /* for condition codes etc */ 2255 abi_ulong sigc_g1; /* User uses these two registers */ 2256 abi_ulong sigc_o0; /* within the trampoline code. */ 2257 2258 /* Now comes information regarding the users window set 2259 * at the time of the signal. 2260 */ 2261 abi_ulong sigc_oswins; /* outstanding windows */ 2262 2263 /* stack ptrs for each regwin buf */ 2264 char *sigc_spbuf[__SUNOS_MAXWIN]; 2265 2266 /* Windows to restore after signal */ 2267 struct { 2268 abi_ulong locals[8]; 2269 abi_ulong ins[8]; 2270 } sigc_wbuf[__SUNOS_MAXWIN]; 2271 }; 2272 /* A Sparc stack frame */ 2273 struct sparc_stackf { 2274 abi_ulong locals[8]; 2275 abi_ulong ins[8]; 2276 /* It's simpler to treat fp and callers_pc as elements of ins[] 2277 * since we never need to access them ourselves. 2278 */ 2279 char *structptr; 2280 abi_ulong xargs[6]; 2281 abi_ulong xxargs[1]; 2282 }; 2283 2284 typedef struct { 2285 struct { 2286 abi_ulong psr; 2287 abi_ulong pc; 2288 abi_ulong npc; 2289 abi_ulong y; 2290 abi_ulong u_regs[16]; /* globals and ins */ 2291 } si_regs; 2292 int si_mask; 2293 } __siginfo_t; 2294 2295 typedef struct { 2296 abi_ulong si_float_regs[32]; 2297 unsigned long si_fsr; 2298 unsigned long si_fpqdepth; 2299 struct { 2300 unsigned long *insn_addr; 2301 unsigned long insn; 2302 } si_fpqueue [16]; 2303 } qemu_siginfo_fpu_t; 2304 2305 2306 struct target_signal_frame { 2307 struct sparc_stackf ss; 2308 __siginfo_t info; 2309 abi_ulong fpu_save; 2310 abi_ulong insns[2] __attribute__ ((aligned (8))); 2311 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2312 abi_ulong extra_size; /* Should be 0 */ 2313 qemu_siginfo_fpu_t fpu_state; 2314 }; 2315 struct target_rt_signal_frame { 2316 struct sparc_stackf ss; 2317 siginfo_t info; 2318 abi_ulong regs[20]; 2319 sigset_t mask; 2320 abi_ulong fpu_save; 2321 unsigned int insns[2]; 2322 stack_t stack; 2323 unsigned int extra_size; /* Should be 0 */ 2324 qemu_siginfo_fpu_t fpu_state; 2325 }; 2326 2327 #define UREG_O0 16 2328 #define UREG_O6 22 2329 #define UREG_I0 0 2330 #define UREG_I1 1 2331 #define UREG_I2 2 2332 #define UREG_I3 3 2333 #define UREG_I4 4 2334 #define UREG_I5 5 2335 #define UREG_I6 6 2336 #define UREG_I7 7 2337 #define UREG_L0 8 2338 #define UREG_FP UREG_I6 2339 #define UREG_SP UREG_O6 2340 2341 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2342 CPUSPARCState *env, 2343 unsigned long framesize) 2344 { 2345 abi_ulong sp; 2346 2347 sp = env->regwptr[UREG_FP]; 2348 2349 /* This is the X/Open sanctioned signal stack switching. */ 2350 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2351 if (!on_sig_stack(sp) 2352 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2353 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2354 } 2355 } 2356 return sp - framesize; 2357 } 2358 2359 static int 2360 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2361 { 2362 int err = 0, i; 2363 2364 __put_user(env->psr, &si->si_regs.psr); 2365 __put_user(env->pc, &si->si_regs.pc); 2366 __put_user(env->npc, &si->si_regs.npc); 2367 __put_user(env->y, &si->si_regs.y); 2368 for (i=0; i < 8; i++) { 2369 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2370 } 2371 for (i=0; i < 8; i++) { 2372 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2373 } 2374 __put_user(mask, &si->si_mask); 2375 return err; 2376 } 2377 2378 #if 0 2379 static int 2380 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2381 CPUSPARCState *env, unsigned long mask) 2382 { 2383 int err = 0; 2384 2385 __put_user(mask, &sc->sigc_mask); 2386 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2387 __put_user(env->pc, &sc->sigc_pc); 2388 __put_user(env->npc, &sc->sigc_npc); 2389 __put_user(env->psr, &sc->sigc_psr); 2390 __put_user(env->gregs[1], &sc->sigc_g1); 2391 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2392 2393 return err; 2394 } 2395 #endif 2396 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2397 2398 static void setup_frame(int sig, struct target_sigaction *ka, 2399 target_sigset_t *set, CPUSPARCState *env) 2400 { 2401 abi_ulong sf_addr; 2402 struct target_signal_frame *sf; 2403 int sigframe_size, err, i; 2404 2405 /* 1. Make sure everything is clean */ 2406 //synchronize_user_stack(); 2407 2408 sigframe_size = NF_ALIGNEDSZ; 2409 sf_addr = get_sigframe(ka, env, sigframe_size); 2410 trace_user_setup_frame(env, sf_addr); 2411 2412 sf = lock_user(VERIFY_WRITE, sf_addr, 2413 sizeof(struct target_signal_frame), 0); 2414 if (!sf) { 2415 goto sigsegv; 2416 } 2417 #if 0 2418 if (invalid_frame_pointer(sf, sigframe_size)) 2419 goto sigill_and_return; 2420 #endif 2421 /* 2. Save the current process state */ 2422 err = setup___siginfo(&sf->info, env, set->sig[0]); 2423 __put_user(0, &sf->extra_size); 2424 2425 //save_fpu_state(regs, &sf->fpu_state); 2426 //__put_user(&sf->fpu_state, &sf->fpu_save); 2427 2428 __put_user(set->sig[0], &sf->info.si_mask); 2429 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2430 __put_user(set->sig[i + 1], &sf->extramask[i]); 2431 } 2432 2433 for (i = 0; i < 8; i++) { 2434 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2435 } 2436 for (i = 0; i < 8; i++) { 2437 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2438 } 2439 if (err) 2440 goto sigsegv; 2441 2442 /* 3. signal handler back-trampoline and parameters */ 2443 env->regwptr[UREG_FP] = sf_addr; 2444 env->regwptr[UREG_I0] = sig; 2445 env->regwptr[UREG_I1] = sf_addr + 2446 offsetof(struct target_signal_frame, info); 2447 env->regwptr[UREG_I2] = sf_addr + 2448 offsetof(struct target_signal_frame, info); 2449 2450 /* 4. signal handler */ 2451 env->pc = ka->_sa_handler; 2452 env->npc = (env->pc + 4); 2453 /* 5. return to kernel instructions */ 2454 if (ka->sa_restorer) { 2455 env->regwptr[UREG_I7] = ka->sa_restorer; 2456 } else { 2457 uint32_t val32; 2458 2459 env->regwptr[UREG_I7] = sf_addr + 2460 offsetof(struct target_signal_frame, insns) - 2 * 4; 2461 2462 /* mov __NR_sigreturn, %g1 */ 2463 val32 = 0x821020d8; 2464 __put_user(val32, &sf->insns[0]); 2465 2466 /* t 0x10 */ 2467 val32 = 0x91d02010; 2468 __put_user(val32, &sf->insns[1]); 2469 if (err) 2470 goto sigsegv; 2471 2472 /* Flush instruction space. */ 2473 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2474 // tb_flush(env); 2475 } 2476 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2477 return; 2478 #if 0 2479 sigill_and_return: 2480 force_sig(TARGET_SIGILL); 2481 #endif 2482 sigsegv: 2483 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2484 force_sigsegv(sig); 2485 } 2486 2487 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2488 target_siginfo_t *info, 2489 target_sigset_t *set, CPUSPARCState *env) 2490 { 2491 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2492 } 2493 2494 long do_sigreturn(CPUSPARCState *env) 2495 { 2496 abi_ulong sf_addr; 2497 struct target_signal_frame *sf; 2498 uint32_t up_psr, pc, npc; 2499 target_sigset_t set; 2500 sigset_t host_set; 2501 int err=0, i; 2502 2503 sf_addr = env->regwptr[UREG_FP]; 2504 trace_user_do_sigreturn(env, sf_addr); 2505 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2506 goto segv_and_exit; 2507 } 2508 2509 /* 1. Make sure we are not getting garbage from the user */ 2510 2511 if (sf_addr & 3) 2512 goto segv_and_exit; 2513 2514 __get_user(pc, &sf->info.si_regs.pc); 2515 __get_user(npc, &sf->info.si_regs.npc); 2516 2517 if ((pc | npc) & 3) { 2518 goto segv_and_exit; 2519 } 2520 2521 /* 2. Restore the state */ 2522 __get_user(up_psr, &sf->info.si_regs.psr); 2523 2524 /* User can only change condition codes and FPU enabling in %psr. */ 2525 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2526 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2527 2528 env->pc = pc; 2529 env->npc = npc; 2530 __get_user(env->y, &sf->info.si_regs.y); 2531 for (i=0; i < 8; i++) { 2532 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2533 } 2534 for (i=0; i < 8; i++) { 2535 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2536 } 2537 2538 /* FIXME: implement FPU save/restore: 2539 * __get_user(fpu_save, &sf->fpu_save); 2540 * if (fpu_save) 2541 * err |= restore_fpu_state(env, fpu_save); 2542 */ 2543 2544 /* This is pretty much atomic, no amount locking would prevent 2545 * the races which exist anyways. 2546 */ 2547 __get_user(set.sig[0], &sf->info.si_mask); 2548 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2549 __get_user(set.sig[i], &sf->extramask[i - 1]); 2550 } 2551 2552 target_to_host_sigset_internal(&host_set, &set); 2553 set_sigmask(&host_set); 2554 2555 if (err) { 2556 goto segv_and_exit; 2557 } 2558 unlock_user_struct(sf, sf_addr, 0); 2559 return -TARGET_QEMU_ESIGRETURN; 2560 2561 segv_and_exit: 2562 unlock_user_struct(sf, sf_addr, 0); 2563 force_sig(TARGET_SIGSEGV); 2564 return -TARGET_QEMU_ESIGRETURN; 2565 } 2566 2567 long do_rt_sigreturn(CPUSPARCState *env) 2568 { 2569 trace_user_do_rt_sigreturn(env, 0); 2570 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2571 return -TARGET_ENOSYS; 2572 } 2573 2574 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2575 #define MC_TSTATE 0 2576 #define MC_PC 1 2577 #define MC_NPC 2 2578 #define MC_Y 3 2579 #define MC_G1 4 2580 #define MC_G2 5 2581 #define MC_G3 6 2582 #define MC_G4 7 2583 #define MC_G5 8 2584 #define MC_G6 9 2585 #define MC_G7 10 2586 #define MC_O0 11 2587 #define MC_O1 12 2588 #define MC_O2 13 2589 #define MC_O3 14 2590 #define MC_O4 15 2591 #define MC_O5 16 2592 #define MC_O6 17 2593 #define MC_O7 18 2594 #define MC_NGREG 19 2595 2596 typedef abi_ulong target_mc_greg_t; 2597 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2598 2599 struct target_mc_fq { 2600 abi_ulong *mcfq_addr; 2601 uint32_t mcfq_insn; 2602 }; 2603 2604 struct target_mc_fpu { 2605 union { 2606 uint32_t sregs[32]; 2607 uint64_t dregs[32]; 2608 //uint128_t qregs[16]; 2609 } mcfpu_fregs; 2610 abi_ulong mcfpu_fsr; 2611 abi_ulong mcfpu_fprs; 2612 abi_ulong mcfpu_gsr; 2613 struct target_mc_fq *mcfpu_fq; 2614 unsigned char mcfpu_qcnt; 2615 unsigned char mcfpu_qentsz; 2616 unsigned char mcfpu_enab; 2617 }; 2618 typedef struct target_mc_fpu target_mc_fpu_t; 2619 2620 typedef struct { 2621 target_mc_gregset_t mc_gregs; 2622 target_mc_greg_t mc_fp; 2623 target_mc_greg_t mc_i7; 2624 target_mc_fpu_t mc_fpregs; 2625 } target_mcontext_t; 2626 2627 struct target_ucontext { 2628 struct target_ucontext *tuc_link; 2629 abi_ulong tuc_flags; 2630 target_sigset_t tuc_sigmask; 2631 target_mcontext_t tuc_mcontext; 2632 }; 2633 2634 /* A V9 register window */ 2635 struct target_reg_window { 2636 abi_ulong locals[8]; 2637 abi_ulong ins[8]; 2638 }; 2639 2640 #define TARGET_STACK_BIAS 2047 2641 2642 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2643 void sparc64_set_context(CPUSPARCState *env) 2644 { 2645 abi_ulong ucp_addr; 2646 struct target_ucontext *ucp; 2647 target_mc_gregset_t *grp; 2648 abi_ulong pc, npc, tstate; 2649 abi_ulong fp, i7, w_addr; 2650 unsigned int i; 2651 2652 ucp_addr = env->regwptr[UREG_I0]; 2653 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2654 goto do_sigsegv; 2655 } 2656 grp = &ucp->tuc_mcontext.mc_gregs; 2657 __get_user(pc, &((*grp)[MC_PC])); 2658 __get_user(npc, &((*grp)[MC_NPC])); 2659 if ((pc | npc) & 3) { 2660 goto do_sigsegv; 2661 } 2662 if (env->regwptr[UREG_I1]) { 2663 target_sigset_t target_set; 2664 sigset_t set; 2665 2666 if (TARGET_NSIG_WORDS == 1) { 2667 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2668 } else { 2669 abi_ulong *src, *dst; 2670 src = ucp->tuc_sigmask.sig; 2671 dst = target_set.sig; 2672 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2673 __get_user(*dst, src); 2674 } 2675 } 2676 target_to_host_sigset_internal(&set, &target_set); 2677 set_sigmask(&set); 2678 } 2679 env->pc = pc; 2680 env->npc = npc; 2681 __get_user(env->y, &((*grp)[MC_Y])); 2682 __get_user(tstate, &((*grp)[MC_TSTATE])); 2683 env->asi = (tstate >> 24) & 0xff; 2684 cpu_put_ccr(env, tstate >> 32); 2685 cpu_put_cwp64(env, tstate & 0x1f); 2686 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2687 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2688 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2689 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2690 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2691 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2692 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2693 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2694 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2695 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2696 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2697 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2698 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2699 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2700 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2701 2702 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2703 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2704 2705 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2706 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2707 abi_ulong) != 0) { 2708 goto do_sigsegv; 2709 } 2710 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2711 abi_ulong) != 0) { 2712 goto do_sigsegv; 2713 } 2714 /* FIXME this does not match how the kernel handles the FPU in 2715 * its sparc64_set_context implementation. In particular the FPU 2716 * is only restored if fenab is non-zero in: 2717 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2718 */ 2719 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2720 { 2721 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2722 for (i = 0; i < 64; i++, src++) { 2723 if (i & 1) { 2724 __get_user(env->fpr[i/2].l.lower, src); 2725 } else { 2726 __get_user(env->fpr[i/2].l.upper, src); 2727 } 2728 } 2729 } 2730 __get_user(env->fsr, 2731 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2732 __get_user(env->gsr, 2733 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2734 unlock_user_struct(ucp, ucp_addr, 0); 2735 return; 2736 do_sigsegv: 2737 unlock_user_struct(ucp, ucp_addr, 0); 2738 force_sig(TARGET_SIGSEGV); 2739 } 2740 2741 void sparc64_get_context(CPUSPARCState *env) 2742 { 2743 abi_ulong ucp_addr; 2744 struct target_ucontext *ucp; 2745 target_mc_gregset_t *grp; 2746 target_mcontext_t *mcp; 2747 abi_ulong fp, i7, w_addr; 2748 int err; 2749 unsigned int i; 2750 target_sigset_t target_set; 2751 sigset_t set; 2752 2753 ucp_addr = env->regwptr[UREG_I0]; 2754 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2755 goto do_sigsegv; 2756 } 2757 2758 mcp = &ucp->tuc_mcontext; 2759 grp = &mcp->mc_gregs; 2760 2761 /* Skip over the trap instruction, first. */ 2762 env->pc = env->npc; 2763 env->npc += 4; 2764 2765 /* If we're only reading the signal mask then do_sigprocmask() 2766 * is guaranteed not to fail, which is important because we don't 2767 * have any way to signal a failure or restart this operation since 2768 * this is not a normal syscall. 2769 */ 2770 err = do_sigprocmask(0, NULL, &set); 2771 assert(err == 0); 2772 host_to_target_sigset_internal(&target_set, &set); 2773 if (TARGET_NSIG_WORDS == 1) { 2774 __put_user(target_set.sig[0], 2775 (abi_ulong *)&ucp->tuc_sigmask); 2776 } else { 2777 abi_ulong *src, *dst; 2778 src = target_set.sig; 2779 dst = ucp->tuc_sigmask.sig; 2780 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2781 __put_user(*src, dst); 2782 } 2783 if (err) 2784 goto do_sigsegv; 2785 } 2786 2787 /* XXX: tstate must be saved properly */ 2788 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2789 __put_user(env->pc, &((*grp)[MC_PC])); 2790 __put_user(env->npc, &((*grp)[MC_NPC])); 2791 __put_user(env->y, &((*grp)[MC_Y])); 2792 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2793 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2794 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2795 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2796 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2797 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2798 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2799 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2800 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2801 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2802 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2803 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2804 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2805 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2806 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2807 2808 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2809 fp = i7 = 0; 2810 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2811 abi_ulong) != 0) { 2812 goto do_sigsegv; 2813 } 2814 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2815 abi_ulong) != 0) { 2816 goto do_sigsegv; 2817 } 2818 __put_user(fp, &(mcp->mc_fp)); 2819 __put_user(i7, &(mcp->mc_i7)); 2820 2821 { 2822 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2823 for (i = 0; i < 64; i++, dst++) { 2824 if (i & 1) { 2825 __put_user(env->fpr[i/2].l.lower, dst); 2826 } else { 2827 __put_user(env->fpr[i/2].l.upper, dst); 2828 } 2829 } 2830 } 2831 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2832 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2833 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2834 2835 if (err) 2836 goto do_sigsegv; 2837 unlock_user_struct(ucp, ucp_addr, 1); 2838 return; 2839 do_sigsegv: 2840 unlock_user_struct(ucp, ucp_addr, 1); 2841 force_sig(TARGET_SIGSEGV); 2842 } 2843 #endif 2844 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2845 2846 # if defined(TARGET_ABI_MIPSO32) 2847 struct target_sigcontext { 2848 uint32_t sc_regmask; /* Unused */ 2849 uint32_t sc_status; 2850 uint64_t sc_pc; 2851 uint64_t sc_regs[32]; 2852 uint64_t sc_fpregs[32]; 2853 uint32_t sc_ownedfp; /* Unused */ 2854 uint32_t sc_fpc_csr; 2855 uint32_t sc_fpc_eir; /* Unused */ 2856 uint32_t sc_used_math; 2857 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2858 uint32_t pad0; 2859 uint64_t sc_mdhi; 2860 uint64_t sc_mdlo; 2861 target_ulong sc_hi1; /* Was sc_cause */ 2862 target_ulong sc_lo1; /* Was sc_badvaddr */ 2863 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2864 target_ulong sc_lo2; 2865 target_ulong sc_hi3; 2866 target_ulong sc_lo3; 2867 }; 2868 # else /* N32 || N64 */ 2869 struct target_sigcontext { 2870 uint64_t sc_regs[32]; 2871 uint64_t sc_fpregs[32]; 2872 uint64_t sc_mdhi; 2873 uint64_t sc_hi1; 2874 uint64_t sc_hi2; 2875 uint64_t sc_hi3; 2876 uint64_t sc_mdlo; 2877 uint64_t sc_lo1; 2878 uint64_t sc_lo2; 2879 uint64_t sc_lo3; 2880 uint64_t sc_pc; 2881 uint32_t sc_fpc_csr; 2882 uint32_t sc_used_math; 2883 uint32_t sc_dsp; 2884 uint32_t sc_reserved; 2885 }; 2886 # endif /* O32 */ 2887 2888 struct sigframe { 2889 uint32_t sf_ass[4]; /* argument save space for o32 */ 2890 uint32_t sf_code[2]; /* signal trampoline */ 2891 struct target_sigcontext sf_sc; 2892 target_sigset_t sf_mask; 2893 }; 2894 2895 struct target_ucontext { 2896 target_ulong tuc_flags; 2897 target_ulong tuc_link; 2898 target_stack_t tuc_stack; 2899 target_ulong pad0; 2900 struct target_sigcontext tuc_mcontext; 2901 target_sigset_t tuc_sigmask; 2902 }; 2903 2904 struct target_rt_sigframe { 2905 uint32_t rs_ass[4]; /* argument save space for o32 */ 2906 uint32_t rs_code[2]; /* signal trampoline */ 2907 struct target_siginfo rs_info; 2908 struct target_ucontext rs_uc; 2909 }; 2910 2911 /* Install trampoline to jump back from signal handler */ 2912 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2913 { 2914 int err = 0; 2915 2916 /* 2917 * Set up the return code ... 2918 * 2919 * li v0, __NR__foo_sigreturn 2920 * syscall 2921 */ 2922 2923 __put_user(0x24020000 + syscall, tramp + 0); 2924 __put_user(0x0000000c , tramp + 1); 2925 return err; 2926 } 2927 2928 static inline void setup_sigcontext(CPUMIPSState *regs, 2929 struct target_sigcontext *sc) 2930 { 2931 int i; 2932 2933 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2934 regs->hflags &= ~MIPS_HFLAG_BMASK; 2935 2936 __put_user(0, &sc->sc_regs[0]); 2937 for (i = 1; i < 32; ++i) { 2938 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2939 } 2940 2941 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2942 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2943 2944 /* Rather than checking for dsp existence, always copy. The storage 2945 would just be garbage otherwise. */ 2946 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2947 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2948 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2949 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2950 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2951 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2952 { 2953 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2954 __put_user(dsp, &sc->sc_dsp); 2955 } 2956 2957 __put_user(1, &sc->sc_used_math); 2958 2959 for (i = 0; i < 32; ++i) { 2960 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2961 } 2962 } 2963 2964 static inline void 2965 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2966 { 2967 int i; 2968 2969 __get_user(regs->CP0_EPC, &sc->sc_pc); 2970 2971 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2972 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2973 2974 for (i = 1; i < 32; ++i) { 2975 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2976 } 2977 2978 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2979 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2980 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2981 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2982 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2983 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2984 { 2985 uint32_t dsp; 2986 __get_user(dsp, &sc->sc_dsp); 2987 cpu_wrdsp(dsp, 0x3ff, regs); 2988 } 2989 2990 for (i = 0; i < 32; ++i) { 2991 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2992 } 2993 } 2994 2995 /* 2996 * Determine which stack to use.. 2997 */ 2998 static inline abi_ulong 2999 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 3000 { 3001 unsigned long sp; 3002 3003 /* Default to using normal stack */ 3004 sp = regs->active_tc.gpr[29]; 3005 3006 /* 3007 * FPU emulator may have its own trampoline active just 3008 * above the user stack, 16-bytes before the next lowest 3009 * 16 byte boundary. Try to avoid trashing it. 3010 */ 3011 sp -= 32; 3012 3013 /* This is the X/Open sanctioned signal stack switching. */ 3014 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3015 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3016 } 3017 3018 return (sp - frame_size) & ~7; 3019 } 3020 3021 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3022 { 3023 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3024 env->hflags &= ~MIPS_HFLAG_M16; 3025 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3026 env->active_tc.PC &= ~(target_ulong) 1; 3027 } 3028 } 3029 3030 # if defined(TARGET_ABI_MIPSO32) 3031 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3032 static void setup_frame(int sig, struct target_sigaction * ka, 3033 target_sigset_t *set, CPUMIPSState *regs) 3034 { 3035 struct sigframe *frame; 3036 abi_ulong frame_addr; 3037 int i; 3038 3039 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3040 trace_user_setup_frame(regs, frame_addr); 3041 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3042 goto give_sigsegv; 3043 } 3044 3045 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3046 3047 setup_sigcontext(regs, &frame->sf_sc); 3048 3049 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3050 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3051 } 3052 3053 /* 3054 * Arguments to signal handler: 3055 * 3056 * a0 = signal number 3057 * a1 = 0 (should be cause) 3058 * a2 = pointer to struct sigcontext 3059 * 3060 * $25 and PC point to the signal handler, $29 points to the 3061 * struct sigframe. 3062 */ 3063 regs->active_tc.gpr[ 4] = sig; 3064 regs->active_tc.gpr[ 5] = 0; 3065 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3066 regs->active_tc.gpr[29] = frame_addr; 3067 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3068 /* The original kernel code sets CP0_EPC to the handler 3069 * since it returns to userland using eret 3070 * we cannot do this here, and we must set PC directly */ 3071 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3072 mips_set_hflags_isa_mode_from_pc(regs); 3073 unlock_user_struct(frame, frame_addr, 1); 3074 return; 3075 3076 give_sigsegv: 3077 force_sigsegv(sig); 3078 } 3079 3080 long do_sigreturn(CPUMIPSState *regs) 3081 { 3082 struct sigframe *frame; 3083 abi_ulong frame_addr; 3084 sigset_t blocked; 3085 target_sigset_t target_set; 3086 int i; 3087 3088 frame_addr = regs->active_tc.gpr[29]; 3089 trace_user_do_sigreturn(regs, frame_addr); 3090 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3091 goto badframe; 3092 3093 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3094 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3095 } 3096 3097 target_to_host_sigset_internal(&blocked, &target_set); 3098 set_sigmask(&blocked); 3099 3100 restore_sigcontext(regs, &frame->sf_sc); 3101 3102 #if 0 3103 /* 3104 * Don't let your children do this ... 3105 */ 3106 __asm__ __volatile__( 3107 "move\t$29, %0\n\t" 3108 "j\tsyscall_exit" 3109 :/* no outputs */ 3110 :"r" (®s)); 3111 /* Unreached */ 3112 #endif 3113 3114 regs->active_tc.PC = regs->CP0_EPC; 3115 mips_set_hflags_isa_mode_from_pc(regs); 3116 /* I am not sure this is right, but it seems to work 3117 * maybe a problem with nested signals ? */ 3118 regs->CP0_EPC = 0; 3119 return -TARGET_QEMU_ESIGRETURN; 3120 3121 badframe: 3122 force_sig(TARGET_SIGSEGV); 3123 return -TARGET_QEMU_ESIGRETURN; 3124 } 3125 # endif /* O32 */ 3126 3127 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3128 target_siginfo_t *info, 3129 target_sigset_t *set, CPUMIPSState *env) 3130 { 3131 struct target_rt_sigframe *frame; 3132 abi_ulong frame_addr; 3133 int i; 3134 3135 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3136 trace_user_setup_rt_frame(env, frame_addr); 3137 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3138 goto give_sigsegv; 3139 } 3140 3141 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3142 3143 tswap_siginfo(&frame->rs_info, info); 3144 3145 __put_user(0, &frame->rs_uc.tuc_flags); 3146 __put_user(0, &frame->rs_uc.tuc_link); 3147 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3148 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3149 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3150 &frame->rs_uc.tuc_stack.ss_flags); 3151 3152 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3153 3154 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3155 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3156 } 3157 3158 /* 3159 * Arguments to signal handler: 3160 * 3161 * a0 = signal number 3162 * a1 = pointer to siginfo_t 3163 * a2 = pointer to struct ucontext 3164 * 3165 * $25 and PC point to the signal handler, $29 points to the 3166 * struct sigframe. 3167 */ 3168 env->active_tc.gpr[ 4] = sig; 3169 env->active_tc.gpr[ 5] = frame_addr 3170 + offsetof(struct target_rt_sigframe, rs_info); 3171 env->active_tc.gpr[ 6] = frame_addr 3172 + offsetof(struct target_rt_sigframe, rs_uc); 3173 env->active_tc.gpr[29] = frame_addr; 3174 env->active_tc.gpr[31] = frame_addr 3175 + offsetof(struct target_rt_sigframe, rs_code); 3176 /* The original kernel code sets CP0_EPC to the handler 3177 * since it returns to userland using eret 3178 * we cannot do this here, and we must set PC directly */ 3179 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3180 mips_set_hflags_isa_mode_from_pc(env); 3181 unlock_user_struct(frame, frame_addr, 1); 3182 return; 3183 3184 give_sigsegv: 3185 unlock_user_struct(frame, frame_addr, 1); 3186 force_sigsegv(sig); 3187 } 3188 3189 long do_rt_sigreturn(CPUMIPSState *env) 3190 { 3191 struct target_rt_sigframe *frame; 3192 abi_ulong frame_addr; 3193 sigset_t blocked; 3194 3195 frame_addr = env->active_tc.gpr[29]; 3196 trace_user_do_rt_sigreturn(env, frame_addr); 3197 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3198 goto badframe; 3199 } 3200 3201 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3202 set_sigmask(&blocked); 3203 3204 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3205 3206 if (do_sigaltstack(frame_addr + 3207 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3208 0, get_sp_from_cpustate(env)) == -EFAULT) 3209 goto badframe; 3210 3211 env->active_tc.PC = env->CP0_EPC; 3212 mips_set_hflags_isa_mode_from_pc(env); 3213 /* I am not sure this is right, but it seems to work 3214 * maybe a problem with nested signals ? */ 3215 env->CP0_EPC = 0; 3216 return -TARGET_QEMU_ESIGRETURN; 3217 3218 badframe: 3219 force_sig(TARGET_SIGSEGV); 3220 return -TARGET_QEMU_ESIGRETURN; 3221 } 3222 3223 #elif defined(TARGET_SH4) 3224 3225 /* 3226 * code and data structures from linux kernel: 3227 * include/asm-sh/sigcontext.h 3228 * arch/sh/kernel/signal.c 3229 */ 3230 3231 struct target_sigcontext { 3232 target_ulong oldmask; 3233 3234 /* CPU registers */ 3235 target_ulong sc_gregs[16]; 3236 target_ulong sc_pc; 3237 target_ulong sc_pr; 3238 target_ulong sc_sr; 3239 target_ulong sc_gbr; 3240 target_ulong sc_mach; 3241 target_ulong sc_macl; 3242 3243 /* FPU registers */ 3244 target_ulong sc_fpregs[16]; 3245 target_ulong sc_xfpregs[16]; 3246 unsigned int sc_fpscr; 3247 unsigned int sc_fpul; 3248 unsigned int sc_ownedfp; 3249 }; 3250 3251 struct target_sigframe 3252 { 3253 struct target_sigcontext sc; 3254 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3255 uint16_t retcode[3]; 3256 }; 3257 3258 3259 struct target_ucontext { 3260 target_ulong tuc_flags; 3261 struct target_ucontext *tuc_link; 3262 target_stack_t tuc_stack; 3263 struct target_sigcontext tuc_mcontext; 3264 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3265 }; 3266 3267 struct target_rt_sigframe 3268 { 3269 struct target_siginfo info; 3270 struct target_ucontext uc; 3271 uint16_t retcode[3]; 3272 }; 3273 3274 3275 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3276 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3277 3278 static abi_ulong get_sigframe(struct target_sigaction *ka, 3279 unsigned long sp, size_t frame_size) 3280 { 3281 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3282 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3283 } 3284 3285 return (sp - frame_size) & -8ul; 3286 } 3287 3288 static void setup_sigcontext(struct target_sigcontext *sc, 3289 CPUSH4State *regs, unsigned long mask) 3290 { 3291 int i; 3292 3293 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3294 COPY(gregs[0]); COPY(gregs[1]); 3295 COPY(gregs[2]); COPY(gregs[3]); 3296 COPY(gregs[4]); COPY(gregs[5]); 3297 COPY(gregs[6]); COPY(gregs[7]); 3298 COPY(gregs[8]); COPY(gregs[9]); 3299 COPY(gregs[10]); COPY(gregs[11]); 3300 COPY(gregs[12]); COPY(gregs[13]); 3301 COPY(gregs[14]); COPY(gregs[15]); 3302 COPY(gbr); COPY(mach); 3303 COPY(macl); COPY(pr); 3304 COPY(sr); COPY(pc); 3305 #undef COPY 3306 3307 for (i=0; i<16; i++) { 3308 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3309 } 3310 __put_user(regs->fpscr, &sc->sc_fpscr); 3311 __put_user(regs->fpul, &sc->sc_fpul); 3312 3313 /* non-iBCS2 extensions.. */ 3314 __put_user(mask, &sc->oldmask); 3315 } 3316 3317 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3318 { 3319 int i; 3320 3321 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3322 COPY(gregs[0]); COPY(gregs[1]); 3323 COPY(gregs[2]); COPY(gregs[3]); 3324 COPY(gregs[4]); COPY(gregs[5]); 3325 COPY(gregs[6]); COPY(gregs[7]); 3326 COPY(gregs[8]); COPY(gregs[9]); 3327 COPY(gregs[10]); COPY(gregs[11]); 3328 COPY(gregs[12]); COPY(gregs[13]); 3329 COPY(gregs[14]); COPY(gregs[15]); 3330 COPY(gbr); COPY(mach); 3331 COPY(macl); COPY(pr); 3332 COPY(sr); COPY(pc); 3333 #undef COPY 3334 3335 for (i=0; i<16; i++) { 3336 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3337 } 3338 __get_user(regs->fpscr, &sc->sc_fpscr); 3339 __get_user(regs->fpul, &sc->sc_fpul); 3340 3341 regs->tra = -1; /* disable syscall checks */ 3342 } 3343 3344 static void setup_frame(int sig, struct target_sigaction *ka, 3345 target_sigset_t *set, CPUSH4State *regs) 3346 { 3347 struct target_sigframe *frame; 3348 abi_ulong frame_addr; 3349 int i; 3350 3351 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3352 trace_user_setup_frame(regs, frame_addr); 3353 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3354 goto give_sigsegv; 3355 } 3356 3357 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3358 3359 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3360 __put_user(set->sig[i + 1], &frame->extramask[i]); 3361 } 3362 3363 /* Set up to return from userspace. If provided, use a stub 3364 already in userspace. */ 3365 if (ka->sa_flags & TARGET_SA_RESTORER) { 3366 regs->pr = (unsigned long) ka->sa_restorer; 3367 } else { 3368 /* Generate return code (system call to sigreturn) */ 3369 abi_ulong retcode_addr = frame_addr + 3370 offsetof(struct target_sigframe, retcode); 3371 __put_user(MOVW(2), &frame->retcode[0]); 3372 __put_user(TRAP_NOARG, &frame->retcode[1]); 3373 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3374 regs->pr = (unsigned long) retcode_addr; 3375 } 3376 3377 /* Set up registers for signal handler */ 3378 regs->gregs[15] = frame_addr; 3379 regs->gregs[4] = sig; /* Arg for signal handler */ 3380 regs->gregs[5] = 0; 3381 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3382 regs->pc = (unsigned long) ka->_sa_handler; 3383 3384 unlock_user_struct(frame, frame_addr, 1); 3385 return; 3386 3387 give_sigsegv: 3388 unlock_user_struct(frame, frame_addr, 1); 3389 force_sigsegv(sig); 3390 } 3391 3392 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3393 target_siginfo_t *info, 3394 target_sigset_t *set, CPUSH4State *regs) 3395 { 3396 struct target_rt_sigframe *frame; 3397 abi_ulong frame_addr; 3398 int i; 3399 3400 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3401 trace_user_setup_rt_frame(regs, frame_addr); 3402 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3403 goto give_sigsegv; 3404 } 3405 3406 tswap_siginfo(&frame->info, info); 3407 3408 /* Create the ucontext. */ 3409 __put_user(0, &frame->uc.tuc_flags); 3410 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3411 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3412 &frame->uc.tuc_stack.ss_sp); 3413 __put_user(sas_ss_flags(regs->gregs[15]), 3414 &frame->uc.tuc_stack.ss_flags); 3415 __put_user(target_sigaltstack_used.ss_size, 3416 &frame->uc.tuc_stack.ss_size); 3417 setup_sigcontext(&frame->uc.tuc_mcontext, 3418 regs, set->sig[0]); 3419 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3420 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3421 } 3422 3423 /* Set up to return from userspace. If provided, use a stub 3424 already in userspace. */ 3425 if (ka->sa_flags & TARGET_SA_RESTORER) { 3426 regs->pr = (unsigned long) ka->sa_restorer; 3427 } else { 3428 /* Generate return code (system call to sigreturn) */ 3429 abi_ulong retcode_addr = frame_addr + 3430 offsetof(struct target_rt_sigframe, retcode); 3431 __put_user(MOVW(2), &frame->retcode[0]); 3432 __put_user(TRAP_NOARG, &frame->retcode[1]); 3433 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3434 regs->pr = (unsigned long) retcode_addr; 3435 } 3436 3437 /* Set up registers for signal handler */ 3438 regs->gregs[15] = frame_addr; 3439 regs->gregs[4] = sig; /* Arg for signal handler */ 3440 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3441 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3442 regs->pc = (unsigned long) ka->_sa_handler; 3443 3444 unlock_user_struct(frame, frame_addr, 1); 3445 return; 3446 3447 give_sigsegv: 3448 unlock_user_struct(frame, frame_addr, 1); 3449 force_sigsegv(sig); 3450 } 3451 3452 long do_sigreturn(CPUSH4State *regs) 3453 { 3454 struct target_sigframe *frame; 3455 abi_ulong frame_addr; 3456 sigset_t blocked; 3457 target_sigset_t target_set; 3458 int i; 3459 int err = 0; 3460 3461 frame_addr = regs->gregs[15]; 3462 trace_user_do_sigreturn(regs, frame_addr); 3463 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3464 goto badframe; 3465 } 3466 3467 __get_user(target_set.sig[0], &frame->sc.oldmask); 3468 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3469 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3470 } 3471 3472 if (err) 3473 goto badframe; 3474 3475 target_to_host_sigset_internal(&blocked, &target_set); 3476 set_sigmask(&blocked); 3477 3478 restore_sigcontext(regs, &frame->sc); 3479 3480 unlock_user_struct(frame, frame_addr, 0); 3481 return -TARGET_QEMU_ESIGRETURN; 3482 3483 badframe: 3484 unlock_user_struct(frame, frame_addr, 0); 3485 force_sig(TARGET_SIGSEGV); 3486 return -TARGET_QEMU_ESIGRETURN; 3487 } 3488 3489 long do_rt_sigreturn(CPUSH4State *regs) 3490 { 3491 struct target_rt_sigframe *frame; 3492 abi_ulong frame_addr; 3493 sigset_t blocked; 3494 3495 frame_addr = regs->gregs[15]; 3496 trace_user_do_rt_sigreturn(regs, frame_addr); 3497 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3498 goto badframe; 3499 } 3500 3501 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3502 set_sigmask(&blocked); 3503 3504 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3505 3506 if (do_sigaltstack(frame_addr + 3507 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3508 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3509 goto badframe; 3510 } 3511 3512 unlock_user_struct(frame, frame_addr, 0); 3513 return -TARGET_QEMU_ESIGRETURN; 3514 3515 badframe: 3516 unlock_user_struct(frame, frame_addr, 0); 3517 force_sig(TARGET_SIGSEGV); 3518 return -TARGET_QEMU_ESIGRETURN; 3519 } 3520 #elif defined(TARGET_MICROBLAZE) 3521 3522 struct target_sigcontext { 3523 struct target_pt_regs regs; /* needs to be first */ 3524 uint32_t oldmask; 3525 }; 3526 3527 struct target_stack_t { 3528 abi_ulong ss_sp; 3529 int ss_flags; 3530 unsigned int ss_size; 3531 }; 3532 3533 struct target_ucontext { 3534 abi_ulong tuc_flags; 3535 abi_ulong tuc_link; 3536 struct target_stack_t tuc_stack; 3537 struct target_sigcontext tuc_mcontext; 3538 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3539 }; 3540 3541 /* Signal frames. */ 3542 struct target_signal_frame { 3543 struct target_ucontext uc; 3544 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3545 uint32_t tramp[2]; 3546 }; 3547 3548 struct rt_signal_frame { 3549 siginfo_t info; 3550 struct ucontext uc; 3551 uint32_t tramp[2]; 3552 }; 3553 3554 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3555 { 3556 __put_user(env->regs[0], &sc->regs.r0); 3557 __put_user(env->regs[1], &sc->regs.r1); 3558 __put_user(env->regs[2], &sc->regs.r2); 3559 __put_user(env->regs[3], &sc->regs.r3); 3560 __put_user(env->regs[4], &sc->regs.r4); 3561 __put_user(env->regs[5], &sc->regs.r5); 3562 __put_user(env->regs[6], &sc->regs.r6); 3563 __put_user(env->regs[7], &sc->regs.r7); 3564 __put_user(env->regs[8], &sc->regs.r8); 3565 __put_user(env->regs[9], &sc->regs.r9); 3566 __put_user(env->regs[10], &sc->regs.r10); 3567 __put_user(env->regs[11], &sc->regs.r11); 3568 __put_user(env->regs[12], &sc->regs.r12); 3569 __put_user(env->regs[13], &sc->regs.r13); 3570 __put_user(env->regs[14], &sc->regs.r14); 3571 __put_user(env->regs[15], &sc->regs.r15); 3572 __put_user(env->regs[16], &sc->regs.r16); 3573 __put_user(env->regs[17], &sc->regs.r17); 3574 __put_user(env->regs[18], &sc->regs.r18); 3575 __put_user(env->regs[19], &sc->regs.r19); 3576 __put_user(env->regs[20], &sc->regs.r20); 3577 __put_user(env->regs[21], &sc->regs.r21); 3578 __put_user(env->regs[22], &sc->regs.r22); 3579 __put_user(env->regs[23], &sc->regs.r23); 3580 __put_user(env->regs[24], &sc->regs.r24); 3581 __put_user(env->regs[25], &sc->regs.r25); 3582 __put_user(env->regs[26], &sc->regs.r26); 3583 __put_user(env->regs[27], &sc->regs.r27); 3584 __put_user(env->regs[28], &sc->regs.r28); 3585 __put_user(env->regs[29], &sc->regs.r29); 3586 __put_user(env->regs[30], &sc->regs.r30); 3587 __put_user(env->regs[31], &sc->regs.r31); 3588 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3589 } 3590 3591 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3592 { 3593 __get_user(env->regs[0], &sc->regs.r0); 3594 __get_user(env->regs[1], &sc->regs.r1); 3595 __get_user(env->regs[2], &sc->regs.r2); 3596 __get_user(env->regs[3], &sc->regs.r3); 3597 __get_user(env->regs[4], &sc->regs.r4); 3598 __get_user(env->regs[5], &sc->regs.r5); 3599 __get_user(env->regs[6], &sc->regs.r6); 3600 __get_user(env->regs[7], &sc->regs.r7); 3601 __get_user(env->regs[8], &sc->regs.r8); 3602 __get_user(env->regs[9], &sc->regs.r9); 3603 __get_user(env->regs[10], &sc->regs.r10); 3604 __get_user(env->regs[11], &sc->regs.r11); 3605 __get_user(env->regs[12], &sc->regs.r12); 3606 __get_user(env->regs[13], &sc->regs.r13); 3607 __get_user(env->regs[14], &sc->regs.r14); 3608 __get_user(env->regs[15], &sc->regs.r15); 3609 __get_user(env->regs[16], &sc->regs.r16); 3610 __get_user(env->regs[17], &sc->regs.r17); 3611 __get_user(env->regs[18], &sc->regs.r18); 3612 __get_user(env->regs[19], &sc->regs.r19); 3613 __get_user(env->regs[20], &sc->regs.r20); 3614 __get_user(env->regs[21], &sc->regs.r21); 3615 __get_user(env->regs[22], &sc->regs.r22); 3616 __get_user(env->regs[23], &sc->regs.r23); 3617 __get_user(env->regs[24], &sc->regs.r24); 3618 __get_user(env->regs[25], &sc->regs.r25); 3619 __get_user(env->regs[26], &sc->regs.r26); 3620 __get_user(env->regs[27], &sc->regs.r27); 3621 __get_user(env->regs[28], &sc->regs.r28); 3622 __get_user(env->regs[29], &sc->regs.r29); 3623 __get_user(env->regs[30], &sc->regs.r30); 3624 __get_user(env->regs[31], &sc->regs.r31); 3625 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3626 } 3627 3628 static abi_ulong get_sigframe(struct target_sigaction *ka, 3629 CPUMBState *env, int frame_size) 3630 { 3631 abi_ulong sp = env->regs[1]; 3632 3633 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3634 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3635 } 3636 3637 return ((sp - frame_size) & -8UL); 3638 } 3639 3640 static void setup_frame(int sig, struct target_sigaction *ka, 3641 target_sigset_t *set, CPUMBState *env) 3642 { 3643 struct target_signal_frame *frame; 3644 abi_ulong frame_addr; 3645 int i; 3646 3647 frame_addr = get_sigframe(ka, env, sizeof *frame); 3648 trace_user_setup_frame(env, frame_addr); 3649 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3650 goto badframe; 3651 3652 /* Save the mask. */ 3653 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3654 3655 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3656 __put_user(set->sig[i], &frame->extramask[i - 1]); 3657 } 3658 3659 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3660 3661 /* Set up to return from userspace. If provided, use a stub 3662 already in userspace. */ 3663 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3664 if (ka->sa_flags & TARGET_SA_RESTORER) { 3665 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3666 } else { 3667 uint32_t t; 3668 /* Note, these encodings are _big endian_! */ 3669 /* addi r12, r0, __NR_sigreturn */ 3670 t = 0x31800000UL | TARGET_NR_sigreturn; 3671 __put_user(t, frame->tramp + 0); 3672 /* brki r14, 0x8 */ 3673 t = 0xb9cc0008UL; 3674 __put_user(t, frame->tramp + 1); 3675 3676 /* Return from sighandler will jump to the tramp. 3677 Negative 8 offset because return is rtsd r15, 8 */ 3678 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3679 - 8; 3680 } 3681 3682 /* Set up registers for signal handler */ 3683 env->regs[1] = frame_addr; 3684 /* Signal handler args: */ 3685 env->regs[5] = sig; /* Arg 0: signum */ 3686 env->regs[6] = 0; 3687 /* arg 1: sigcontext */ 3688 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3689 3690 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3691 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3692 3693 unlock_user_struct(frame, frame_addr, 1); 3694 return; 3695 badframe: 3696 force_sigsegv(sig); 3697 } 3698 3699 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3700 target_siginfo_t *info, 3701 target_sigset_t *set, CPUMBState *env) 3702 { 3703 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3704 } 3705 3706 long do_sigreturn(CPUMBState *env) 3707 { 3708 struct target_signal_frame *frame; 3709 abi_ulong frame_addr; 3710 target_sigset_t target_set; 3711 sigset_t set; 3712 int i; 3713 3714 frame_addr = env->regs[R_SP]; 3715 trace_user_do_sigreturn(env, frame_addr); 3716 /* Make sure the guest isn't playing games. */ 3717 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3718 goto badframe; 3719 3720 /* Restore blocked signals */ 3721 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3722 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3723 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3724 } 3725 target_to_host_sigset_internal(&set, &target_set); 3726 set_sigmask(&set); 3727 3728 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3729 /* We got here through a sigreturn syscall, our path back is via an 3730 rtb insn so setup r14 for that. */ 3731 env->regs[14] = env->sregs[SR_PC]; 3732 3733 unlock_user_struct(frame, frame_addr, 0); 3734 return -TARGET_QEMU_ESIGRETURN; 3735 badframe: 3736 force_sig(TARGET_SIGSEGV); 3737 return -TARGET_QEMU_ESIGRETURN; 3738 } 3739 3740 long do_rt_sigreturn(CPUMBState *env) 3741 { 3742 trace_user_do_rt_sigreturn(env, 0); 3743 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3744 return -TARGET_ENOSYS; 3745 } 3746 3747 #elif defined(TARGET_CRIS) 3748 3749 struct target_sigcontext { 3750 struct target_pt_regs regs; /* needs to be first */ 3751 uint32_t oldmask; 3752 uint32_t usp; /* usp before stacking this gunk on it */ 3753 }; 3754 3755 /* Signal frames. */ 3756 struct target_signal_frame { 3757 struct target_sigcontext sc; 3758 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3759 uint16_t retcode[4]; /* Trampoline code. */ 3760 }; 3761 3762 struct rt_signal_frame { 3763 siginfo_t *pinfo; 3764 void *puc; 3765 siginfo_t info; 3766 struct ucontext uc; 3767 uint16_t retcode[4]; /* Trampoline code. */ 3768 }; 3769 3770 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3771 { 3772 __put_user(env->regs[0], &sc->regs.r0); 3773 __put_user(env->regs[1], &sc->regs.r1); 3774 __put_user(env->regs[2], &sc->regs.r2); 3775 __put_user(env->regs[3], &sc->regs.r3); 3776 __put_user(env->regs[4], &sc->regs.r4); 3777 __put_user(env->regs[5], &sc->regs.r5); 3778 __put_user(env->regs[6], &sc->regs.r6); 3779 __put_user(env->regs[7], &sc->regs.r7); 3780 __put_user(env->regs[8], &sc->regs.r8); 3781 __put_user(env->regs[9], &sc->regs.r9); 3782 __put_user(env->regs[10], &sc->regs.r10); 3783 __put_user(env->regs[11], &sc->regs.r11); 3784 __put_user(env->regs[12], &sc->regs.r12); 3785 __put_user(env->regs[13], &sc->regs.r13); 3786 __put_user(env->regs[14], &sc->usp); 3787 __put_user(env->regs[15], &sc->regs.acr); 3788 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3789 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3790 __put_user(env->pc, &sc->regs.erp); 3791 } 3792 3793 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3794 { 3795 __get_user(env->regs[0], &sc->regs.r0); 3796 __get_user(env->regs[1], &sc->regs.r1); 3797 __get_user(env->regs[2], &sc->regs.r2); 3798 __get_user(env->regs[3], &sc->regs.r3); 3799 __get_user(env->regs[4], &sc->regs.r4); 3800 __get_user(env->regs[5], &sc->regs.r5); 3801 __get_user(env->regs[6], &sc->regs.r6); 3802 __get_user(env->regs[7], &sc->regs.r7); 3803 __get_user(env->regs[8], &sc->regs.r8); 3804 __get_user(env->regs[9], &sc->regs.r9); 3805 __get_user(env->regs[10], &sc->regs.r10); 3806 __get_user(env->regs[11], &sc->regs.r11); 3807 __get_user(env->regs[12], &sc->regs.r12); 3808 __get_user(env->regs[13], &sc->regs.r13); 3809 __get_user(env->regs[14], &sc->usp); 3810 __get_user(env->regs[15], &sc->regs.acr); 3811 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3812 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3813 __get_user(env->pc, &sc->regs.erp); 3814 } 3815 3816 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3817 { 3818 abi_ulong sp; 3819 /* Align the stack downwards to 4. */ 3820 sp = (env->regs[R_SP] & ~3); 3821 return sp - framesize; 3822 } 3823 3824 static void setup_frame(int sig, struct target_sigaction *ka, 3825 target_sigset_t *set, CPUCRISState *env) 3826 { 3827 struct target_signal_frame *frame; 3828 abi_ulong frame_addr; 3829 int i; 3830 3831 frame_addr = get_sigframe(env, sizeof *frame); 3832 trace_user_setup_frame(env, frame_addr); 3833 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3834 goto badframe; 3835 3836 /* 3837 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3838 * use this trampoline anymore but it sets it up for GDB. 3839 * In QEMU, using the trampoline simplifies things a bit so we use it. 3840 * 3841 * This is movu.w __NR_sigreturn, r9; break 13; 3842 */ 3843 __put_user(0x9c5f, frame->retcode+0); 3844 __put_user(TARGET_NR_sigreturn, 3845 frame->retcode + 1); 3846 __put_user(0xe93d, frame->retcode + 2); 3847 3848 /* Save the mask. */ 3849 __put_user(set->sig[0], &frame->sc.oldmask); 3850 3851 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3852 __put_user(set->sig[i], &frame->extramask[i - 1]); 3853 } 3854 3855 setup_sigcontext(&frame->sc, env); 3856 3857 /* Move the stack and setup the arguments for the handler. */ 3858 env->regs[R_SP] = frame_addr; 3859 env->regs[10] = sig; 3860 env->pc = (unsigned long) ka->_sa_handler; 3861 /* Link SRP so the guest returns through the trampoline. */ 3862 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3863 3864 unlock_user_struct(frame, frame_addr, 1); 3865 return; 3866 badframe: 3867 force_sigsegv(sig); 3868 } 3869 3870 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3871 target_siginfo_t *info, 3872 target_sigset_t *set, CPUCRISState *env) 3873 { 3874 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3875 } 3876 3877 long do_sigreturn(CPUCRISState *env) 3878 { 3879 struct target_signal_frame *frame; 3880 abi_ulong frame_addr; 3881 target_sigset_t target_set; 3882 sigset_t set; 3883 int i; 3884 3885 frame_addr = env->regs[R_SP]; 3886 trace_user_do_sigreturn(env, frame_addr); 3887 /* Make sure the guest isn't playing games. */ 3888 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3889 goto badframe; 3890 } 3891 3892 /* Restore blocked signals */ 3893 __get_user(target_set.sig[0], &frame->sc.oldmask); 3894 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3895 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3896 } 3897 target_to_host_sigset_internal(&set, &target_set); 3898 set_sigmask(&set); 3899 3900 restore_sigcontext(&frame->sc, env); 3901 unlock_user_struct(frame, frame_addr, 0); 3902 return -TARGET_QEMU_ESIGRETURN; 3903 badframe: 3904 force_sig(TARGET_SIGSEGV); 3905 return -TARGET_QEMU_ESIGRETURN; 3906 } 3907 3908 long do_rt_sigreturn(CPUCRISState *env) 3909 { 3910 trace_user_do_rt_sigreturn(env, 0); 3911 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3912 return -TARGET_ENOSYS; 3913 } 3914 3915 #elif defined(TARGET_OPENRISC) 3916 3917 struct target_sigcontext { 3918 struct target_pt_regs regs; 3919 abi_ulong oldmask; 3920 abi_ulong usp; 3921 }; 3922 3923 struct target_ucontext { 3924 abi_ulong tuc_flags; 3925 abi_ulong tuc_link; 3926 target_stack_t tuc_stack; 3927 struct target_sigcontext tuc_mcontext; 3928 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3929 }; 3930 3931 struct target_rt_sigframe { 3932 abi_ulong pinfo; 3933 uint64_t puc; 3934 struct target_siginfo info; 3935 struct target_sigcontext sc; 3936 struct target_ucontext uc; 3937 unsigned char retcode[16]; /* trampoline code */ 3938 }; 3939 3940 /* This is the asm-generic/ucontext.h version */ 3941 #if 0 3942 static int restore_sigcontext(CPUOpenRISCState *regs, 3943 struct target_sigcontext *sc) 3944 { 3945 unsigned int err = 0; 3946 unsigned long old_usp; 3947 3948 /* Alwys make any pending restarted system call return -EINTR */ 3949 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3950 3951 /* restore the regs from &sc->regs (same as sc, since regs is first) 3952 * (sc is already checked for VERIFY_READ since the sigframe was 3953 * checked in sys_sigreturn previously) 3954 */ 3955 3956 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3957 goto badframe; 3958 } 3959 3960 /* make sure the U-flag is set so user-mode cannot fool us */ 3961 3962 regs->sr &= ~SR_SM; 3963 3964 /* restore the old USP as it was before we stacked the sc etc. 3965 * (we cannot just pop the sigcontext since we aligned the sp and 3966 * stuff after pushing it) 3967 */ 3968 3969 __get_user(old_usp, &sc->usp); 3970 phx_signal("old_usp 0x%lx", old_usp); 3971 3972 __PHX__ REALLY /* ??? */ 3973 wrusp(old_usp); 3974 regs->gpr[1] = old_usp; 3975 3976 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3977 * after this completes, but we don't use that mechanism. maybe we can 3978 * use it now ? 3979 */ 3980 3981 return err; 3982 3983 badframe: 3984 return 1; 3985 } 3986 #endif 3987 3988 /* Set up a signal frame. */ 3989 3990 static void setup_sigcontext(struct target_sigcontext *sc, 3991 CPUOpenRISCState *regs, 3992 unsigned long mask) 3993 { 3994 unsigned long usp = regs->gpr[1]; 3995 3996 /* copy the regs. they are first in sc so we can use sc directly */ 3997 3998 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3999 4000 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 4001 the signal handler. The frametype will be restored to its previous 4002 value in restore_sigcontext. */ 4003 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 4004 4005 /* then some other stuff */ 4006 __put_user(mask, &sc->oldmask); 4007 __put_user(usp, &sc->usp); 4008 } 4009 4010 static inline unsigned long align_sigframe(unsigned long sp) 4011 { 4012 return sp & ~3UL; 4013 } 4014 4015 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4016 CPUOpenRISCState *regs, 4017 size_t frame_size) 4018 { 4019 unsigned long sp = regs->gpr[1]; 4020 int onsigstack = on_sig_stack(sp); 4021 4022 /* redzone */ 4023 /* This is the X/Open sanctioned signal stack switching. */ 4024 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4025 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4026 } 4027 4028 sp = align_sigframe(sp - frame_size); 4029 4030 /* 4031 * If we are on the alternate signal stack and would overflow it, don't. 4032 * Return an always-bogus address instead so we will die with SIGSEGV. 4033 */ 4034 4035 if (onsigstack && !likely(on_sig_stack(sp))) { 4036 return -1L; 4037 } 4038 4039 return sp; 4040 } 4041 4042 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4043 target_siginfo_t *info, 4044 target_sigset_t *set, CPUOpenRISCState *env) 4045 { 4046 int err = 0; 4047 abi_ulong frame_addr; 4048 unsigned long return_ip; 4049 struct target_rt_sigframe *frame; 4050 abi_ulong info_addr, uc_addr; 4051 4052 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4053 trace_user_setup_rt_frame(env, frame_addr); 4054 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4055 goto give_sigsegv; 4056 } 4057 4058 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4059 __put_user(info_addr, &frame->pinfo); 4060 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4061 __put_user(uc_addr, &frame->puc); 4062 4063 if (ka->sa_flags & SA_SIGINFO) { 4064 tswap_siginfo(&frame->info, info); 4065 } 4066 4067 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4068 __put_user(0, &frame->uc.tuc_flags); 4069 __put_user(0, &frame->uc.tuc_link); 4070 __put_user(target_sigaltstack_used.ss_sp, 4071 &frame->uc.tuc_stack.ss_sp); 4072 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4073 __put_user(target_sigaltstack_used.ss_size, 4074 &frame->uc.tuc_stack.ss_size); 4075 setup_sigcontext(&frame->sc, env, set->sig[0]); 4076 4077 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4078 4079 /* trampoline - the desired return ip is the retcode itself */ 4080 return_ip = (unsigned long)&frame->retcode; 4081 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4082 __put_user(0xa960, (short *)(frame->retcode + 0)); 4083 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4084 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4085 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4086 4087 if (err) { 4088 goto give_sigsegv; 4089 } 4090 4091 /* TODO what is the current->exec_domain stuff and invmap ? */ 4092 4093 /* Set up registers for signal handler */ 4094 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4095 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4096 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4097 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4098 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4099 4100 /* actually move the usp to reflect the stacked frame */ 4101 env->gpr[1] = (unsigned long)frame; 4102 4103 return; 4104 4105 give_sigsegv: 4106 unlock_user_struct(frame, frame_addr, 1); 4107 force_sigsegv(sig); 4108 } 4109 4110 long do_sigreturn(CPUOpenRISCState *env) 4111 { 4112 trace_user_do_sigreturn(env, 0); 4113 fprintf(stderr, "do_sigreturn: not implemented\n"); 4114 return -TARGET_ENOSYS; 4115 } 4116 4117 long do_rt_sigreturn(CPUOpenRISCState *env) 4118 { 4119 trace_user_do_rt_sigreturn(env, 0); 4120 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4121 return -TARGET_ENOSYS; 4122 } 4123 /* TARGET_OPENRISC */ 4124 4125 #elif defined(TARGET_S390X) 4126 4127 #define __NUM_GPRS 16 4128 #define __NUM_FPRS 16 4129 #define __NUM_ACRS 16 4130 4131 #define S390_SYSCALL_SIZE 2 4132 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4133 4134 #define _SIGCONTEXT_NSIG 64 4135 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4136 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4137 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4138 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4139 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4140 4141 typedef struct { 4142 target_psw_t psw; 4143 target_ulong gprs[__NUM_GPRS]; 4144 unsigned int acrs[__NUM_ACRS]; 4145 } target_s390_regs_common; 4146 4147 typedef struct { 4148 unsigned int fpc; 4149 double fprs[__NUM_FPRS]; 4150 } target_s390_fp_regs; 4151 4152 typedef struct { 4153 target_s390_regs_common regs; 4154 target_s390_fp_regs fpregs; 4155 } target_sigregs; 4156 4157 struct target_sigcontext { 4158 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4159 target_sigregs *sregs; 4160 }; 4161 4162 typedef struct { 4163 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4164 struct target_sigcontext sc; 4165 target_sigregs sregs; 4166 int signo; 4167 uint8_t retcode[S390_SYSCALL_SIZE]; 4168 } sigframe; 4169 4170 struct target_ucontext { 4171 target_ulong tuc_flags; 4172 struct target_ucontext *tuc_link; 4173 target_stack_t tuc_stack; 4174 target_sigregs tuc_mcontext; 4175 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4176 }; 4177 4178 typedef struct { 4179 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4180 uint8_t retcode[S390_SYSCALL_SIZE]; 4181 struct target_siginfo info; 4182 struct target_ucontext uc; 4183 } rt_sigframe; 4184 4185 static inline abi_ulong 4186 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4187 { 4188 abi_ulong sp; 4189 4190 /* Default to using normal stack */ 4191 sp = env->regs[15]; 4192 4193 /* This is the X/Open sanctioned signal stack switching. */ 4194 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4195 if (!sas_ss_flags(sp)) { 4196 sp = target_sigaltstack_used.ss_sp + 4197 target_sigaltstack_used.ss_size; 4198 } 4199 } 4200 4201 /* This is the legacy signal stack switching. */ 4202 else if (/* FIXME !user_mode(regs) */ 0 && 4203 !(ka->sa_flags & TARGET_SA_RESTORER) && 4204 ka->sa_restorer) { 4205 sp = (abi_ulong) ka->sa_restorer; 4206 } 4207 4208 return (sp - frame_size) & -8ul; 4209 } 4210 4211 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4212 { 4213 int i; 4214 //save_access_regs(current->thread.acrs); FIXME 4215 4216 /* Copy a 'clean' PSW mask to the user to avoid leaking 4217 information about whether PER is currently on. */ 4218 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4219 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4220 for (i = 0; i < 16; i++) { 4221 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4222 } 4223 for (i = 0; i < 16; i++) { 4224 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4225 } 4226 /* 4227 * We have to store the fp registers to current->thread.fp_regs 4228 * to merge them with the emulated registers. 4229 */ 4230 //save_fp_regs(¤t->thread.fp_regs); FIXME 4231 for (i = 0; i < 16; i++) { 4232 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4233 } 4234 } 4235 4236 static void setup_frame(int sig, struct target_sigaction *ka, 4237 target_sigset_t *set, CPUS390XState *env) 4238 { 4239 sigframe *frame; 4240 abi_ulong frame_addr; 4241 4242 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4243 trace_user_setup_frame(env, frame_addr); 4244 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4245 goto give_sigsegv; 4246 } 4247 4248 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4249 4250 save_sigregs(env, &frame->sregs); 4251 4252 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4253 (abi_ulong *)&frame->sc.sregs); 4254 4255 /* Set up to return from userspace. If provided, use a stub 4256 already in userspace. */ 4257 if (ka->sa_flags & TARGET_SA_RESTORER) { 4258 env->regs[14] = (unsigned long) 4259 ka->sa_restorer | PSW_ADDR_AMODE; 4260 } else { 4261 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4262 | PSW_ADDR_AMODE; 4263 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4264 (uint16_t *)(frame->retcode)); 4265 } 4266 4267 /* Set up backchain. */ 4268 __put_user(env->regs[15], (abi_ulong *) frame); 4269 4270 /* Set up registers for signal handler */ 4271 env->regs[15] = frame_addr; 4272 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4273 4274 env->regs[2] = sig; //map_signal(sig); 4275 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4276 4277 /* We forgot to include these in the sigcontext. 4278 To avoid breaking binary compatibility, they are passed as args. */ 4279 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4280 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4281 4282 /* Place signal number on stack to allow backtrace from handler. */ 4283 __put_user(env->regs[2], &frame->signo); 4284 unlock_user_struct(frame, frame_addr, 1); 4285 return; 4286 4287 give_sigsegv: 4288 force_sigsegv(sig); 4289 } 4290 4291 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4292 target_siginfo_t *info, 4293 target_sigset_t *set, CPUS390XState *env) 4294 { 4295 int i; 4296 rt_sigframe *frame; 4297 abi_ulong frame_addr; 4298 4299 frame_addr = get_sigframe(ka, env, sizeof *frame); 4300 trace_user_setup_rt_frame(env, frame_addr); 4301 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4302 goto give_sigsegv; 4303 } 4304 4305 tswap_siginfo(&frame->info, info); 4306 4307 /* Create the ucontext. */ 4308 __put_user(0, &frame->uc.tuc_flags); 4309 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4310 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4311 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4312 &frame->uc.tuc_stack.ss_flags); 4313 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4314 save_sigregs(env, &frame->uc.tuc_mcontext); 4315 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4316 __put_user((abi_ulong)set->sig[i], 4317 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4318 } 4319 4320 /* Set up to return from userspace. If provided, use a stub 4321 already in userspace. */ 4322 if (ka->sa_flags & TARGET_SA_RESTORER) { 4323 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4324 } else { 4325 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4326 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4327 (uint16_t *)(frame->retcode)); 4328 } 4329 4330 /* Set up backchain. */ 4331 __put_user(env->regs[15], (abi_ulong *) frame); 4332 4333 /* Set up registers for signal handler */ 4334 env->regs[15] = frame_addr; 4335 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4336 4337 env->regs[2] = sig; //map_signal(sig); 4338 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4339 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4340 return; 4341 4342 give_sigsegv: 4343 force_sigsegv(sig); 4344 } 4345 4346 static int 4347 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4348 { 4349 int err = 0; 4350 int i; 4351 4352 for (i = 0; i < 16; i++) { 4353 __get_user(env->regs[i], &sc->regs.gprs[i]); 4354 } 4355 4356 __get_user(env->psw.mask, &sc->regs.psw.mask); 4357 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4358 (unsigned long long)env->psw.addr); 4359 __get_user(env->psw.addr, &sc->regs.psw.addr); 4360 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4361 4362 for (i = 0; i < 16; i++) { 4363 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4364 } 4365 for (i = 0; i < 16; i++) { 4366 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4367 } 4368 4369 return err; 4370 } 4371 4372 long do_sigreturn(CPUS390XState *env) 4373 { 4374 sigframe *frame; 4375 abi_ulong frame_addr = env->regs[15]; 4376 target_sigset_t target_set; 4377 sigset_t set; 4378 4379 trace_user_do_sigreturn(env, frame_addr); 4380 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4381 goto badframe; 4382 } 4383 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4384 4385 target_to_host_sigset_internal(&set, &target_set); 4386 set_sigmask(&set); /* ~_BLOCKABLE? */ 4387 4388 if (restore_sigregs(env, &frame->sregs)) { 4389 goto badframe; 4390 } 4391 4392 unlock_user_struct(frame, frame_addr, 0); 4393 return -TARGET_QEMU_ESIGRETURN; 4394 4395 badframe: 4396 force_sig(TARGET_SIGSEGV); 4397 return -TARGET_QEMU_ESIGRETURN; 4398 } 4399 4400 long do_rt_sigreturn(CPUS390XState *env) 4401 { 4402 rt_sigframe *frame; 4403 abi_ulong frame_addr = env->regs[15]; 4404 sigset_t set; 4405 4406 trace_user_do_rt_sigreturn(env, frame_addr); 4407 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4408 goto badframe; 4409 } 4410 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4411 4412 set_sigmask(&set); /* ~_BLOCKABLE? */ 4413 4414 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4415 goto badframe; 4416 } 4417 4418 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4419 get_sp_from_cpustate(env)) == -EFAULT) { 4420 goto badframe; 4421 } 4422 unlock_user_struct(frame, frame_addr, 0); 4423 return -TARGET_QEMU_ESIGRETURN; 4424 4425 badframe: 4426 unlock_user_struct(frame, frame_addr, 0); 4427 force_sig(TARGET_SIGSEGV); 4428 return -TARGET_QEMU_ESIGRETURN; 4429 } 4430 4431 #elif defined(TARGET_PPC) 4432 4433 /* Size of dummy stack frame allocated when calling signal handler. 4434 See arch/powerpc/include/asm/ptrace.h. */ 4435 #if defined(TARGET_PPC64) 4436 #define SIGNAL_FRAMESIZE 128 4437 #else 4438 #define SIGNAL_FRAMESIZE 64 4439 #endif 4440 4441 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4442 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4443 struct target_mcontext { 4444 target_ulong mc_gregs[48]; 4445 /* Includes fpscr. */ 4446 uint64_t mc_fregs[33]; 4447 target_ulong mc_pad[2]; 4448 /* We need to handle Altivec and SPE at the same time, which no 4449 kernel needs to do. Fortunately, the kernel defines this bit to 4450 be Altivec-register-large all the time, rather than trying to 4451 twiddle it based on the specific platform. */ 4452 union { 4453 /* SPE vector registers. One extra for SPEFSCR. */ 4454 uint32_t spe[33]; 4455 /* Altivec vector registers. The packing of VSCR and VRSAVE 4456 varies depending on whether we're PPC64 or not: PPC64 splits 4457 them apart; PPC32 stuffs them together. */ 4458 #if defined(TARGET_PPC64) 4459 #define QEMU_NVRREG 34 4460 #else 4461 #define QEMU_NVRREG 33 4462 #endif 4463 ppc_avr_t altivec[QEMU_NVRREG]; 4464 #undef QEMU_NVRREG 4465 } mc_vregs __attribute__((__aligned__(16))); 4466 }; 4467 4468 /* See arch/powerpc/include/asm/sigcontext.h. */ 4469 struct target_sigcontext { 4470 target_ulong _unused[4]; 4471 int32_t signal; 4472 #if defined(TARGET_PPC64) 4473 int32_t pad0; 4474 #endif 4475 target_ulong handler; 4476 target_ulong oldmask; 4477 target_ulong regs; /* struct pt_regs __user * */ 4478 #if defined(TARGET_PPC64) 4479 struct target_mcontext mcontext; 4480 #endif 4481 }; 4482 4483 /* Indices for target_mcontext.mc_gregs, below. 4484 See arch/powerpc/include/asm/ptrace.h for details. */ 4485 enum { 4486 TARGET_PT_R0 = 0, 4487 TARGET_PT_R1 = 1, 4488 TARGET_PT_R2 = 2, 4489 TARGET_PT_R3 = 3, 4490 TARGET_PT_R4 = 4, 4491 TARGET_PT_R5 = 5, 4492 TARGET_PT_R6 = 6, 4493 TARGET_PT_R7 = 7, 4494 TARGET_PT_R8 = 8, 4495 TARGET_PT_R9 = 9, 4496 TARGET_PT_R10 = 10, 4497 TARGET_PT_R11 = 11, 4498 TARGET_PT_R12 = 12, 4499 TARGET_PT_R13 = 13, 4500 TARGET_PT_R14 = 14, 4501 TARGET_PT_R15 = 15, 4502 TARGET_PT_R16 = 16, 4503 TARGET_PT_R17 = 17, 4504 TARGET_PT_R18 = 18, 4505 TARGET_PT_R19 = 19, 4506 TARGET_PT_R20 = 20, 4507 TARGET_PT_R21 = 21, 4508 TARGET_PT_R22 = 22, 4509 TARGET_PT_R23 = 23, 4510 TARGET_PT_R24 = 24, 4511 TARGET_PT_R25 = 25, 4512 TARGET_PT_R26 = 26, 4513 TARGET_PT_R27 = 27, 4514 TARGET_PT_R28 = 28, 4515 TARGET_PT_R29 = 29, 4516 TARGET_PT_R30 = 30, 4517 TARGET_PT_R31 = 31, 4518 TARGET_PT_NIP = 32, 4519 TARGET_PT_MSR = 33, 4520 TARGET_PT_ORIG_R3 = 34, 4521 TARGET_PT_CTR = 35, 4522 TARGET_PT_LNK = 36, 4523 TARGET_PT_XER = 37, 4524 TARGET_PT_CCR = 38, 4525 /* Yes, there are two registers with #39. One is 64-bit only. */ 4526 TARGET_PT_MQ = 39, 4527 TARGET_PT_SOFTE = 39, 4528 TARGET_PT_TRAP = 40, 4529 TARGET_PT_DAR = 41, 4530 TARGET_PT_DSISR = 42, 4531 TARGET_PT_RESULT = 43, 4532 TARGET_PT_REGS_COUNT = 44 4533 }; 4534 4535 4536 struct target_ucontext { 4537 target_ulong tuc_flags; 4538 target_ulong tuc_link; /* struct ucontext __user * */ 4539 struct target_sigaltstack tuc_stack; 4540 #if !defined(TARGET_PPC64) 4541 int32_t tuc_pad[7]; 4542 target_ulong tuc_regs; /* struct mcontext __user * 4543 points to uc_mcontext field */ 4544 #endif 4545 target_sigset_t tuc_sigmask; 4546 #if defined(TARGET_PPC64) 4547 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4548 struct target_sigcontext tuc_sigcontext; 4549 #else 4550 int32_t tuc_maskext[30]; 4551 int32_t tuc_pad2[3]; 4552 struct target_mcontext tuc_mcontext; 4553 #endif 4554 }; 4555 4556 /* See arch/powerpc/kernel/signal_32.c. */ 4557 struct target_sigframe { 4558 struct target_sigcontext sctx; 4559 struct target_mcontext mctx; 4560 int32_t abigap[56]; 4561 }; 4562 4563 #if defined(TARGET_PPC64) 4564 4565 #define TARGET_TRAMP_SIZE 6 4566 4567 struct target_rt_sigframe { 4568 /* sys_rt_sigreturn requires the ucontext be the first field */ 4569 struct target_ucontext uc; 4570 target_ulong _unused[2]; 4571 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4572 target_ulong pinfo; /* struct siginfo __user * */ 4573 target_ulong puc; /* void __user * */ 4574 struct target_siginfo info; 4575 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4576 char abigap[288]; 4577 } __attribute__((aligned(16))); 4578 4579 #else 4580 4581 struct target_rt_sigframe { 4582 struct target_siginfo info; 4583 struct target_ucontext uc; 4584 int32_t abigap[56]; 4585 }; 4586 4587 #endif 4588 4589 #if defined(TARGET_PPC64) 4590 4591 struct target_func_ptr { 4592 target_ulong entry; 4593 target_ulong toc; 4594 }; 4595 4596 #endif 4597 4598 /* We use the mc_pad field for the signal return trampoline. */ 4599 #define tramp mc_pad 4600 4601 /* See arch/powerpc/kernel/signal.c. */ 4602 static target_ulong get_sigframe(struct target_sigaction *ka, 4603 CPUPPCState *env, 4604 int frame_size) 4605 { 4606 target_ulong oldsp; 4607 4608 oldsp = env->gpr[1]; 4609 4610 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4611 (sas_ss_flags(oldsp) == 0)) { 4612 oldsp = (target_sigaltstack_used.ss_sp 4613 + target_sigaltstack_used.ss_size); 4614 } 4615 4616 return (oldsp - frame_size) & ~0xFUL; 4617 } 4618 4619 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4620 { 4621 target_ulong msr = env->msr; 4622 int i; 4623 target_ulong ccr = 0; 4624 4625 /* In general, the kernel attempts to be intelligent about what it 4626 needs to save for Altivec/FP/SPE registers. We don't care that 4627 much, so we just go ahead and save everything. */ 4628 4629 /* Save general registers. */ 4630 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4631 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4632 } 4633 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4634 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4635 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4636 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4637 4638 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4639 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4640 } 4641 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4642 4643 /* Save Altivec registers if necessary. */ 4644 if (env->insns_flags & PPC_ALTIVEC) { 4645 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4646 ppc_avr_t *avr = &env->avr[i]; 4647 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4648 4649 __put_user(avr->u64[0], &vreg->u64[0]); 4650 __put_user(avr->u64[1], &vreg->u64[1]); 4651 } 4652 /* Set MSR_VR in the saved MSR value to indicate that 4653 frame->mc_vregs contains valid data. */ 4654 msr |= MSR_VR; 4655 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4656 &frame->mc_vregs.altivec[32].u32[3]); 4657 } 4658 4659 /* Save floating point registers. */ 4660 if (env->insns_flags & PPC_FLOAT) { 4661 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4662 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4663 } 4664 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4665 } 4666 4667 /* Save SPE registers. The kernel only saves the high half. */ 4668 if (env->insns_flags & PPC_SPE) { 4669 #if defined(TARGET_PPC64) 4670 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4671 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4672 } 4673 #else 4674 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4675 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4676 } 4677 #endif 4678 /* Set MSR_SPE in the saved MSR value to indicate that 4679 frame->mc_vregs contains valid data. */ 4680 msr |= MSR_SPE; 4681 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4682 } 4683 4684 /* Store MSR. */ 4685 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4686 } 4687 4688 static void encode_trampoline(int sigret, uint32_t *tramp) 4689 { 4690 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4691 if (sigret) { 4692 __put_user(0x38000000 | sigret, &tramp[0]); 4693 __put_user(0x44000002, &tramp[1]); 4694 } 4695 } 4696 4697 static void restore_user_regs(CPUPPCState *env, 4698 struct target_mcontext *frame, int sig) 4699 { 4700 target_ulong save_r2 = 0; 4701 target_ulong msr; 4702 target_ulong ccr; 4703 4704 int i; 4705 4706 if (!sig) { 4707 save_r2 = env->gpr[2]; 4708 } 4709 4710 /* Restore general registers. */ 4711 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4712 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4713 } 4714 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4715 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4716 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4717 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4718 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4719 4720 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4721 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4722 } 4723 4724 if (!sig) { 4725 env->gpr[2] = save_r2; 4726 } 4727 /* Restore MSR. */ 4728 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4729 4730 /* If doing signal return, restore the previous little-endian mode. */ 4731 if (sig) 4732 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4733 4734 /* Restore Altivec registers if necessary. */ 4735 if (env->insns_flags & PPC_ALTIVEC) { 4736 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4737 ppc_avr_t *avr = &env->avr[i]; 4738 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4739 4740 __get_user(avr->u64[0], &vreg->u64[0]); 4741 __get_user(avr->u64[1], &vreg->u64[1]); 4742 } 4743 /* Set MSR_VEC in the saved MSR value to indicate that 4744 frame->mc_vregs contains valid data. */ 4745 __get_user(env->spr[SPR_VRSAVE], 4746 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4747 } 4748 4749 /* Restore floating point registers. */ 4750 if (env->insns_flags & PPC_FLOAT) { 4751 uint64_t fpscr; 4752 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4753 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4754 } 4755 __get_user(fpscr, &frame->mc_fregs[32]); 4756 env->fpscr = (uint32_t) fpscr; 4757 } 4758 4759 /* Save SPE registers. The kernel only saves the high half. */ 4760 if (env->insns_flags & PPC_SPE) { 4761 #if defined(TARGET_PPC64) 4762 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4763 uint32_t hi; 4764 4765 __get_user(hi, &frame->mc_vregs.spe[i]); 4766 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4767 } 4768 #else 4769 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4770 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4771 } 4772 #endif 4773 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4774 } 4775 } 4776 4777 static void setup_frame(int sig, struct target_sigaction *ka, 4778 target_sigset_t *set, CPUPPCState *env) 4779 { 4780 struct target_sigframe *frame; 4781 struct target_sigcontext *sc; 4782 target_ulong frame_addr, newsp; 4783 int err = 0; 4784 #if defined(TARGET_PPC64) 4785 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4786 #endif 4787 4788 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4789 trace_user_setup_frame(env, frame_addr); 4790 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4791 goto sigsegv; 4792 sc = &frame->sctx; 4793 4794 __put_user(ka->_sa_handler, &sc->handler); 4795 __put_user(set->sig[0], &sc->oldmask); 4796 #if TARGET_ABI_BITS == 64 4797 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4798 #else 4799 __put_user(set->sig[1], &sc->_unused[3]); 4800 #endif 4801 __put_user(h2g(&frame->mctx), &sc->regs); 4802 __put_user(sig, &sc->signal); 4803 4804 /* Save user regs. */ 4805 save_user_regs(env, &frame->mctx); 4806 4807 /* Construct the trampoline code on the stack. */ 4808 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4809 4810 /* The kernel checks for the presence of a VDSO here. We don't 4811 emulate a vdso, so use a sigreturn system call. */ 4812 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4813 4814 /* Turn off all fp exceptions. */ 4815 env->fpscr = 0; 4816 4817 /* Create a stack frame for the caller of the handler. */ 4818 newsp = frame_addr - SIGNAL_FRAMESIZE; 4819 err |= put_user(env->gpr[1], newsp, target_ulong); 4820 4821 if (err) 4822 goto sigsegv; 4823 4824 /* Set up registers for signal handler. */ 4825 env->gpr[1] = newsp; 4826 env->gpr[3] = sig; 4827 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4828 4829 #if defined(TARGET_PPC64) 4830 if (get_ppc64_abi(image) < 2) { 4831 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4832 struct target_func_ptr *handler = 4833 (struct target_func_ptr *)g2h(ka->_sa_handler); 4834 env->nip = tswapl(handler->entry); 4835 env->gpr[2] = tswapl(handler->toc); 4836 } else { 4837 /* ELFv2 PPC64 function pointers are entry points, but R12 4838 * must also be set */ 4839 env->nip = tswapl((target_ulong) ka->_sa_handler); 4840 env->gpr[12] = env->nip; 4841 } 4842 #else 4843 env->nip = (target_ulong) ka->_sa_handler; 4844 #endif 4845 4846 /* Signal handlers are entered in big-endian mode. */ 4847 env->msr &= ~(1ull << MSR_LE); 4848 4849 unlock_user_struct(frame, frame_addr, 1); 4850 return; 4851 4852 sigsegv: 4853 unlock_user_struct(frame, frame_addr, 1); 4854 force_sigsegv(sig); 4855 } 4856 4857 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4858 target_siginfo_t *info, 4859 target_sigset_t *set, CPUPPCState *env) 4860 { 4861 struct target_rt_sigframe *rt_sf; 4862 uint32_t *trampptr = 0; 4863 struct target_mcontext *mctx = 0; 4864 target_ulong rt_sf_addr, newsp = 0; 4865 int i, err = 0; 4866 #if defined(TARGET_PPC64) 4867 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4868 #endif 4869 4870 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4871 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4872 goto sigsegv; 4873 4874 tswap_siginfo(&rt_sf->info, info); 4875 4876 __put_user(0, &rt_sf->uc.tuc_flags); 4877 __put_user(0, &rt_sf->uc.tuc_link); 4878 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4879 &rt_sf->uc.tuc_stack.ss_sp); 4880 __put_user(sas_ss_flags(env->gpr[1]), 4881 &rt_sf->uc.tuc_stack.ss_flags); 4882 __put_user(target_sigaltstack_used.ss_size, 4883 &rt_sf->uc.tuc_stack.ss_size); 4884 #if !defined(TARGET_PPC64) 4885 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4886 &rt_sf->uc.tuc_regs); 4887 #endif 4888 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4889 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4890 } 4891 4892 #if defined(TARGET_PPC64) 4893 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4894 trampptr = &rt_sf->trampoline[0]; 4895 #else 4896 mctx = &rt_sf->uc.tuc_mcontext; 4897 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4898 #endif 4899 4900 save_user_regs(env, mctx); 4901 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4902 4903 /* The kernel checks for the presence of a VDSO here. We don't 4904 emulate a vdso, so use a sigreturn system call. */ 4905 env->lr = (target_ulong) h2g(trampptr); 4906 4907 /* Turn off all fp exceptions. */ 4908 env->fpscr = 0; 4909 4910 /* Create a stack frame for the caller of the handler. */ 4911 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4912 err |= put_user(env->gpr[1], newsp, target_ulong); 4913 4914 if (err) 4915 goto sigsegv; 4916 4917 /* Set up registers for signal handler. */ 4918 env->gpr[1] = newsp; 4919 env->gpr[3] = (target_ulong) sig; 4920 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4921 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4922 env->gpr[6] = (target_ulong) h2g(rt_sf); 4923 4924 #if defined(TARGET_PPC64) 4925 if (get_ppc64_abi(image) < 2) { 4926 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4927 struct target_func_ptr *handler = 4928 (struct target_func_ptr *)g2h(ka->_sa_handler); 4929 env->nip = tswapl(handler->entry); 4930 env->gpr[2] = tswapl(handler->toc); 4931 } else { 4932 /* ELFv2 PPC64 function pointers are entry points, but R12 4933 * must also be set */ 4934 env->nip = tswapl((target_ulong) ka->_sa_handler); 4935 env->gpr[12] = env->nip; 4936 } 4937 #else 4938 env->nip = (target_ulong) ka->_sa_handler; 4939 #endif 4940 4941 /* Signal handlers are entered in big-endian mode. */ 4942 env->msr &= ~(1ull << MSR_LE); 4943 4944 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4945 return; 4946 4947 sigsegv: 4948 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4949 force_sigsegv(sig); 4950 4951 } 4952 4953 long do_sigreturn(CPUPPCState *env) 4954 { 4955 struct target_sigcontext *sc = NULL; 4956 struct target_mcontext *sr = NULL; 4957 target_ulong sr_addr = 0, sc_addr; 4958 sigset_t blocked; 4959 target_sigset_t set; 4960 4961 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4962 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4963 goto sigsegv; 4964 4965 #if defined(TARGET_PPC64) 4966 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4967 #else 4968 __get_user(set.sig[0], &sc->oldmask); 4969 __get_user(set.sig[1], &sc->_unused[3]); 4970 #endif 4971 target_to_host_sigset_internal(&blocked, &set); 4972 set_sigmask(&blocked); 4973 4974 __get_user(sr_addr, &sc->regs); 4975 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4976 goto sigsegv; 4977 restore_user_regs(env, sr, 1); 4978 4979 unlock_user_struct(sr, sr_addr, 1); 4980 unlock_user_struct(sc, sc_addr, 1); 4981 return -TARGET_QEMU_ESIGRETURN; 4982 4983 sigsegv: 4984 unlock_user_struct(sr, sr_addr, 1); 4985 unlock_user_struct(sc, sc_addr, 1); 4986 force_sig(TARGET_SIGSEGV); 4987 return -TARGET_QEMU_ESIGRETURN; 4988 } 4989 4990 /* See arch/powerpc/kernel/signal_32.c. */ 4991 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4992 { 4993 struct target_mcontext *mcp; 4994 target_ulong mcp_addr; 4995 sigset_t blocked; 4996 target_sigset_t set; 4997 4998 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4999 sizeof (set))) 5000 return 1; 5001 5002 #if defined(TARGET_PPC64) 5003 mcp_addr = h2g(ucp) + 5004 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 5005 #else 5006 __get_user(mcp_addr, &ucp->tuc_regs); 5007 #endif 5008 5009 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5010 return 1; 5011 5012 target_to_host_sigset_internal(&blocked, &set); 5013 set_sigmask(&blocked); 5014 restore_user_regs(env, mcp, sig); 5015 5016 unlock_user_struct(mcp, mcp_addr, 1); 5017 return 0; 5018 } 5019 5020 long do_rt_sigreturn(CPUPPCState *env) 5021 { 5022 struct target_rt_sigframe *rt_sf = NULL; 5023 target_ulong rt_sf_addr; 5024 5025 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5026 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5027 goto sigsegv; 5028 5029 if (do_setcontext(&rt_sf->uc, env, 1)) 5030 goto sigsegv; 5031 5032 do_sigaltstack(rt_sf_addr 5033 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5034 0, env->gpr[1]); 5035 5036 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5037 return -TARGET_QEMU_ESIGRETURN; 5038 5039 sigsegv: 5040 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5041 force_sig(TARGET_SIGSEGV); 5042 return -TARGET_QEMU_ESIGRETURN; 5043 } 5044 5045 #elif defined(TARGET_M68K) 5046 5047 struct target_sigcontext { 5048 abi_ulong sc_mask; 5049 abi_ulong sc_usp; 5050 abi_ulong sc_d0; 5051 abi_ulong sc_d1; 5052 abi_ulong sc_a0; 5053 abi_ulong sc_a1; 5054 unsigned short sc_sr; 5055 abi_ulong sc_pc; 5056 }; 5057 5058 struct target_sigframe 5059 { 5060 abi_ulong pretcode; 5061 int sig; 5062 int code; 5063 abi_ulong psc; 5064 char retcode[8]; 5065 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5066 struct target_sigcontext sc; 5067 }; 5068 5069 typedef int target_greg_t; 5070 #define TARGET_NGREG 18 5071 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5072 5073 typedef struct target_fpregset { 5074 int f_fpcntl[3]; 5075 int f_fpregs[8*3]; 5076 } target_fpregset_t; 5077 5078 struct target_mcontext { 5079 int version; 5080 target_gregset_t gregs; 5081 target_fpregset_t fpregs; 5082 }; 5083 5084 #define TARGET_MCONTEXT_VERSION 2 5085 5086 struct target_ucontext { 5087 abi_ulong tuc_flags; 5088 abi_ulong tuc_link; 5089 target_stack_t tuc_stack; 5090 struct target_mcontext tuc_mcontext; 5091 abi_long tuc_filler[80]; 5092 target_sigset_t tuc_sigmask; 5093 }; 5094 5095 struct target_rt_sigframe 5096 { 5097 abi_ulong pretcode; 5098 int sig; 5099 abi_ulong pinfo; 5100 abi_ulong puc; 5101 char retcode[8]; 5102 struct target_siginfo info; 5103 struct target_ucontext uc; 5104 }; 5105 5106 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5107 abi_ulong mask) 5108 { 5109 __put_user(mask, &sc->sc_mask); 5110 __put_user(env->aregs[7], &sc->sc_usp); 5111 __put_user(env->dregs[0], &sc->sc_d0); 5112 __put_user(env->dregs[1], &sc->sc_d1); 5113 __put_user(env->aregs[0], &sc->sc_a0); 5114 __put_user(env->aregs[1], &sc->sc_a1); 5115 __put_user(env->sr, &sc->sc_sr); 5116 __put_user(env->pc, &sc->sc_pc); 5117 } 5118 5119 static void 5120 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5121 { 5122 int temp; 5123 5124 __get_user(env->aregs[7], &sc->sc_usp); 5125 __get_user(env->dregs[0], &sc->sc_d0); 5126 __get_user(env->dregs[1], &sc->sc_d1); 5127 __get_user(env->aregs[0], &sc->sc_a0); 5128 __get_user(env->aregs[1], &sc->sc_a1); 5129 __get_user(env->pc, &sc->sc_pc); 5130 __get_user(temp, &sc->sc_sr); 5131 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5132 } 5133 5134 /* 5135 * Determine which stack to use.. 5136 */ 5137 static inline abi_ulong 5138 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5139 size_t frame_size) 5140 { 5141 unsigned long sp; 5142 5143 sp = regs->aregs[7]; 5144 5145 /* This is the X/Open sanctioned signal stack switching. */ 5146 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5147 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5148 } 5149 5150 return ((sp - frame_size) & -8UL); 5151 } 5152 5153 static void setup_frame(int sig, struct target_sigaction *ka, 5154 target_sigset_t *set, CPUM68KState *env) 5155 { 5156 struct target_sigframe *frame; 5157 abi_ulong frame_addr; 5158 abi_ulong retcode_addr; 5159 abi_ulong sc_addr; 5160 int i; 5161 5162 frame_addr = get_sigframe(ka, env, sizeof *frame); 5163 trace_user_setup_frame(env, frame_addr); 5164 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5165 goto give_sigsegv; 5166 } 5167 5168 __put_user(sig, &frame->sig); 5169 5170 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5171 __put_user(sc_addr, &frame->psc); 5172 5173 setup_sigcontext(&frame->sc, env, set->sig[0]); 5174 5175 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5176 __put_user(set->sig[i], &frame->extramask[i - 1]); 5177 } 5178 5179 /* Set up to return from userspace. */ 5180 5181 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5182 __put_user(retcode_addr, &frame->pretcode); 5183 5184 /* moveq #,d0; trap #0 */ 5185 5186 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5187 (uint32_t *)(frame->retcode)); 5188 5189 /* Set up to return from userspace */ 5190 5191 env->aregs[7] = frame_addr; 5192 env->pc = ka->_sa_handler; 5193 5194 unlock_user_struct(frame, frame_addr, 1); 5195 return; 5196 5197 give_sigsegv: 5198 force_sigsegv(sig); 5199 } 5200 5201 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5202 CPUM68KState *env) 5203 { 5204 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5205 5206 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5207 __put_user(env->dregs[0], &gregs[0]); 5208 __put_user(env->dregs[1], &gregs[1]); 5209 __put_user(env->dregs[2], &gregs[2]); 5210 __put_user(env->dregs[3], &gregs[3]); 5211 __put_user(env->dregs[4], &gregs[4]); 5212 __put_user(env->dregs[5], &gregs[5]); 5213 __put_user(env->dregs[6], &gregs[6]); 5214 __put_user(env->dregs[7], &gregs[7]); 5215 __put_user(env->aregs[0], &gregs[8]); 5216 __put_user(env->aregs[1], &gregs[9]); 5217 __put_user(env->aregs[2], &gregs[10]); 5218 __put_user(env->aregs[3], &gregs[11]); 5219 __put_user(env->aregs[4], &gregs[12]); 5220 __put_user(env->aregs[5], &gregs[13]); 5221 __put_user(env->aregs[6], &gregs[14]); 5222 __put_user(env->aregs[7], &gregs[15]); 5223 __put_user(env->pc, &gregs[16]); 5224 __put_user(env->sr, &gregs[17]); 5225 5226 return 0; 5227 } 5228 5229 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5230 struct target_ucontext *uc) 5231 { 5232 int temp; 5233 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5234 5235 __get_user(temp, &uc->tuc_mcontext.version); 5236 if (temp != TARGET_MCONTEXT_VERSION) 5237 goto badframe; 5238 5239 /* restore passed registers */ 5240 __get_user(env->dregs[0], &gregs[0]); 5241 __get_user(env->dregs[1], &gregs[1]); 5242 __get_user(env->dregs[2], &gregs[2]); 5243 __get_user(env->dregs[3], &gregs[3]); 5244 __get_user(env->dregs[4], &gregs[4]); 5245 __get_user(env->dregs[5], &gregs[5]); 5246 __get_user(env->dregs[6], &gregs[6]); 5247 __get_user(env->dregs[7], &gregs[7]); 5248 __get_user(env->aregs[0], &gregs[8]); 5249 __get_user(env->aregs[1], &gregs[9]); 5250 __get_user(env->aregs[2], &gregs[10]); 5251 __get_user(env->aregs[3], &gregs[11]); 5252 __get_user(env->aregs[4], &gregs[12]); 5253 __get_user(env->aregs[5], &gregs[13]); 5254 __get_user(env->aregs[6], &gregs[14]); 5255 __get_user(env->aregs[7], &gregs[15]); 5256 __get_user(env->pc, &gregs[16]); 5257 __get_user(temp, &gregs[17]); 5258 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5259 5260 return 0; 5261 5262 badframe: 5263 return 1; 5264 } 5265 5266 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5267 target_siginfo_t *info, 5268 target_sigset_t *set, CPUM68KState *env) 5269 { 5270 struct target_rt_sigframe *frame; 5271 abi_ulong frame_addr; 5272 abi_ulong retcode_addr; 5273 abi_ulong info_addr; 5274 abi_ulong uc_addr; 5275 int err = 0; 5276 int i; 5277 5278 frame_addr = get_sigframe(ka, env, sizeof *frame); 5279 trace_user_setup_rt_frame(env, frame_addr); 5280 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5281 goto give_sigsegv; 5282 } 5283 5284 __put_user(sig, &frame->sig); 5285 5286 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5287 __put_user(info_addr, &frame->pinfo); 5288 5289 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5290 __put_user(uc_addr, &frame->puc); 5291 5292 tswap_siginfo(&frame->info, info); 5293 5294 /* Create the ucontext */ 5295 5296 __put_user(0, &frame->uc.tuc_flags); 5297 __put_user(0, &frame->uc.tuc_link); 5298 __put_user(target_sigaltstack_used.ss_sp, 5299 &frame->uc.tuc_stack.ss_sp); 5300 __put_user(sas_ss_flags(env->aregs[7]), 5301 &frame->uc.tuc_stack.ss_flags); 5302 __put_user(target_sigaltstack_used.ss_size, 5303 &frame->uc.tuc_stack.ss_size); 5304 err |= target_rt_setup_ucontext(&frame->uc, env); 5305 5306 if (err) 5307 goto give_sigsegv; 5308 5309 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5310 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5311 } 5312 5313 /* Set up to return from userspace. */ 5314 5315 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5316 __put_user(retcode_addr, &frame->pretcode); 5317 5318 /* moveq #,d0; notb d0; trap #0 */ 5319 5320 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5321 (uint32_t *)(frame->retcode + 0)); 5322 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5323 5324 if (err) 5325 goto give_sigsegv; 5326 5327 /* Set up to return from userspace */ 5328 5329 env->aregs[7] = frame_addr; 5330 env->pc = ka->_sa_handler; 5331 5332 unlock_user_struct(frame, frame_addr, 1); 5333 return; 5334 5335 give_sigsegv: 5336 unlock_user_struct(frame, frame_addr, 1); 5337 force_sigsegv(sig); 5338 } 5339 5340 long do_sigreturn(CPUM68KState *env) 5341 { 5342 struct target_sigframe *frame; 5343 abi_ulong frame_addr = env->aregs[7] - 4; 5344 target_sigset_t target_set; 5345 sigset_t set; 5346 int i; 5347 5348 trace_user_do_sigreturn(env, frame_addr); 5349 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5350 goto badframe; 5351 5352 /* set blocked signals */ 5353 5354 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5355 5356 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5357 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5358 } 5359 5360 target_to_host_sigset_internal(&set, &target_set); 5361 set_sigmask(&set); 5362 5363 /* restore registers */ 5364 5365 restore_sigcontext(env, &frame->sc); 5366 5367 unlock_user_struct(frame, frame_addr, 0); 5368 return -TARGET_QEMU_ESIGRETURN; 5369 5370 badframe: 5371 force_sig(TARGET_SIGSEGV); 5372 return -TARGET_QEMU_ESIGRETURN; 5373 } 5374 5375 long do_rt_sigreturn(CPUM68KState *env) 5376 { 5377 struct target_rt_sigframe *frame; 5378 abi_ulong frame_addr = env->aregs[7] - 4; 5379 target_sigset_t target_set; 5380 sigset_t set; 5381 5382 trace_user_do_rt_sigreturn(env, frame_addr); 5383 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5384 goto badframe; 5385 5386 target_to_host_sigset_internal(&set, &target_set); 5387 set_sigmask(&set); 5388 5389 /* restore registers */ 5390 5391 if (target_rt_restore_ucontext(env, &frame->uc)) 5392 goto badframe; 5393 5394 if (do_sigaltstack(frame_addr + 5395 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5396 0, get_sp_from_cpustate(env)) == -EFAULT) 5397 goto badframe; 5398 5399 unlock_user_struct(frame, frame_addr, 0); 5400 return -TARGET_QEMU_ESIGRETURN; 5401 5402 badframe: 5403 unlock_user_struct(frame, frame_addr, 0); 5404 force_sig(TARGET_SIGSEGV); 5405 return -TARGET_QEMU_ESIGRETURN; 5406 } 5407 5408 #elif defined(TARGET_ALPHA) 5409 5410 struct target_sigcontext { 5411 abi_long sc_onstack; 5412 abi_long sc_mask; 5413 abi_long sc_pc; 5414 abi_long sc_ps; 5415 abi_long sc_regs[32]; 5416 abi_long sc_ownedfp; 5417 abi_long sc_fpregs[32]; 5418 abi_ulong sc_fpcr; 5419 abi_ulong sc_fp_control; 5420 abi_ulong sc_reserved1; 5421 abi_ulong sc_reserved2; 5422 abi_ulong sc_ssize; 5423 abi_ulong sc_sbase; 5424 abi_ulong sc_traparg_a0; 5425 abi_ulong sc_traparg_a1; 5426 abi_ulong sc_traparg_a2; 5427 abi_ulong sc_fp_trap_pc; 5428 abi_ulong sc_fp_trigger_sum; 5429 abi_ulong sc_fp_trigger_inst; 5430 }; 5431 5432 struct target_ucontext { 5433 abi_ulong tuc_flags; 5434 abi_ulong tuc_link; 5435 abi_ulong tuc_osf_sigmask; 5436 target_stack_t tuc_stack; 5437 struct target_sigcontext tuc_mcontext; 5438 target_sigset_t tuc_sigmask; 5439 }; 5440 5441 struct target_sigframe { 5442 struct target_sigcontext sc; 5443 unsigned int retcode[3]; 5444 }; 5445 5446 struct target_rt_sigframe { 5447 target_siginfo_t info; 5448 struct target_ucontext uc; 5449 unsigned int retcode[3]; 5450 }; 5451 5452 #define INSN_MOV_R30_R16 0x47fe0410 5453 #define INSN_LDI_R0 0x201f0000 5454 #define INSN_CALLSYS 0x00000083 5455 5456 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5457 abi_ulong frame_addr, target_sigset_t *set) 5458 { 5459 int i; 5460 5461 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5462 __put_user(set->sig[0], &sc->sc_mask); 5463 __put_user(env->pc, &sc->sc_pc); 5464 __put_user(8, &sc->sc_ps); 5465 5466 for (i = 0; i < 31; ++i) { 5467 __put_user(env->ir[i], &sc->sc_regs[i]); 5468 } 5469 __put_user(0, &sc->sc_regs[31]); 5470 5471 for (i = 0; i < 31; ++i) { 5472 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5473 } 5474 __put_user(0, &sc->sc_fpregs[31]); 5475 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5476 5477 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5478 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5479 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5480 } 5481 5482 static void restore_sigcontext(CPUAlphaState *env, 5483 struct target_sigcontext *sc) 5484 { 5485 uint64_t fpcr; 5486 int i; 5487 5488 __get_user(env->pc, &sc->sc_pc); 5489 5490 for (i = 0; i < 31; ++i) { 5491 __get_user(env->ir[i], &sc->sc_regs[i]); 5492 } 5493 for (i = 0; i < 31; ++i) { 5494 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5495 } 5496 5497 __get_user(fpcr, &sc->sc_fpcr); 5498 cpu_alpha_store_fpcr(env, fpcr); 5499 } 5500 5501 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5502 CPUAlphaState *env, 5503 unsigned long framesize) 5504 { 5505 abi_ulong sp = env->ir[IR_SP]; 5506 5507 /* This is the X/Open sanctioned signal stack switching. */ 5508 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5509 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5510 } 5511 return (sp - framesize) & -32; 5512 } 5513 5514 static void setup_frame(int sig, struct target_sigaction *ka, 5515 target_sigset_t *set, CPUAlphaState *env) 5516 { 5517 abi_ulong frame_addr, r26; 5518 struct target_sigframe *frame; 5519 int err = 0; 5520 5521 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5522 trace_user_setup_frame(env, frame_addr); 5523 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5524 goto give_sigsegv; 5525 } 5526 5527 setup_sigcontext(&frame->sc, env, frame_addr, set); 5528 5529 if (ka->sa_restorer) { 5530 r26 = ka->sa_restorer; 5531 } else { 5532 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5533 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5534 &frame->retcode[1]); 5535 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5536 /* imb() */ 5537 r26 = frame_addr; 5538 } 5539 5540 unlock_user_struct(frame, frame_addr, 1); 5541 5542 if (err) { 5543 give_sigsegv: 5544 force_sigsegv(sig); 5545 return; 5546 } 5547 5548 env->ir[IR_RA] = r26; 5549 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5550 env->ir[IR_A0] = sig; 5551 env->ir[IR_A1] = 0; 5552 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5553 env->ir[IR_SP] = frame_addr; 5554 } 5555 5556 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5557 target_siginfo_t *info, 5558 target_sigset_t *set, CPUAlphaState *env) 5559 { 5560 abi_ulong frame_addr, r26; 5561 struct target_rt_sigframe *frame; 5562 int i, err = 0; 5563 5564 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5565 trace_user_setup_rt_frame(env, frame_addr); 5566 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5567 goto give_sigsegv; 5568 } 5569 5570 tswap_siginfo(&frame->info, info); 5571 5572 __put_user(0, &frame->uc.tuc_flags); 5573 __put_user(0, &frame->uc.tuc_link); 5574 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5575 __put_user(target_sigaltstack_used.ss_sp, 5576 &frame->uc.tuc_stack.ss_sp); 5577 __put_user(sas_ss_flags(env->ir[IR_SP]), 5578 &frame->uc.tuc_stack.ss_flags); 5579 __put_user(target_sigaltstack_used.ss_size, 5580 &frame->uc.tuc_stack.ss_size); 5581 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5582 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5583 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5584 } 5585 5586 if (ka->sa_restorer) { 5587 r26 = ka->sa_restorer; 5588 } else { 5589 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5590 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5591 &frame->retcode[1]); 5592 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5593 /* imb(); */ 5594 r26 = frame_addr; 5595 } 5596 5597 if (err) { 5598 give_sigsegv: 5599 force_sigsegv(sig); 5600 return; 5601 } 5602 5603 env->ir[IR_RA] = r26; 5604 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5605 env->ir[IR_A0] = sig; 5606 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5607 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5608 env->ir[IR_SP] = frame_addr; 5609 } 5610 5611 long do_sigreturn(CPUAlphaState *env) 5612 { 5613 struct target_sigcontext *sc; 5614 abi_ulong sc_addr = env->ir[IR_A0]; 5615 target_sigset_t target_set; 5616 sigset_t set; 5617 5618 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5619 goto badframe; 5620 } 5621 5622 target_sigemptyset(&target_set); 5623 __get_user(target_set.sig[0], &sc->sc_mask); 5624 5625 target_to_host_sigset_internal(&set, &target_set); 5626 set_sigmask(&set); 5627 5628 restore_sigcontext(env, sc); 5629 unlock_user_struct(sc, sc_addr, 0); 5630 return -TARGET_QEMU_ESIGRETURN; 5631 5632 badframe: 5633 force_sig(TARGET_SIGSEGV); 5634 return -TARGET_QEMU_ESIGRETURN; 5635 } 5636 5637 long do_rt_sigreturn(CPUAlphaState *env) 5638 { 5639 abi_ulong frame_addr = env->ir[IR_A0]; 5640 struct target_rt_sigframe *frame; 5641 sigset_t set; 5642 5643 trace_user_do_rt_sigreturn(env, frame_addr); 5644 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5645 goto badframe; 5646 } 5647 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5648 set_sigmask(&set); 5649 5650 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5651 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5652 uc.tuc_stack), 5653 0, env->ir[IR_SP]) == -EFAULT) { 5654 goto badframe; 5655 } 5656 5657 unlock_user_struct(frame, frame_addr, 0); 5658 return -TARGET_QEMU_ESIGRETURN; 5659 5660 5661 badframe: 5662 unlock_user_struct(frame, frame_addr, 0); 5663 force_sig(TARGET_SIGSEGV); 5664 return -TARGET_QEMU_ESIGRETURN; 5665 } 5666 5667 #elif defined(TARGET_TILEGX) 5668 5669 struct target_sigcontext { 5670 union { 5671 /* General-purpose registers. */ 5672 abi_ulong gregs[56]; 5673 struct { 5674 abi_ulong __gregs[53]; 5675 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5676 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5677 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5678 }; 5679 }; 5680 abi_ulong pc; /* Program counter. */ 5681 abi_ulong ics; /* In Interrupt Critical Section? */ 5682 abi_ulong faultnum; /* Fault number. */ 5683 abi_ulong pad[5]; 5684 }; 5685 5686 struct target_ucontext { 5687 abi_ulong tuc_flags; 5688 abi_ulong tuc_link; 5689 target_stack_t tuc_stack; 5690 struct target_sigcontext tuc_mcontext; 5691 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5692 }; 5693 5694 struct target_rt_sigframe { 5695 unsigned char save_area[16]; /* caller save area */ 5696 struct target_siginfo info; 5697 struct target_ucontext uc; 5698 abi_ulong retcode[2]; 5699 }; 5700 5701 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5702 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5703 5704 5705 static void setup_sigcontext(struct target_sigcontext *sc, 5706 CPUArchState *env, int signo) 5707 { 5708 int i; 5709 5710 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5711 __put_user(env->regs[i], &sc->gregs[i]); 5712 } 5713 5714 __put_user(env->pc, &sc->pc); 5715 __put_user(0, &sc->ics); 5716 __put_user(signo, &sc->faultnum); 5717 } 5718 5719 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5720 { 5721 int i; 5722 5723 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5724 __get_user(env->regs[i], &sc->gregs[i]); 5725 } 5726 5727 __get_user(env->pc, &sc->pc); 5728 } 5729 5730 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5731 size_t frame_size) 5732 { 5733 unsigned long sp = env->regs[TILEGX_R_SP]; 5734 5735 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5736 return -1UL; 5737 } 5738 5739 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5740 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5741 } 5742 5743 sp -= frame_size; 5744 sp &= -16UL; 5745 return sp; 5746 } 5747 5748 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5749 target_siginfo_t *info, 5750 target_sigset_t *set, CPUArchState *env) 5751 { 5752 abi_ulong frame_addr; 5753 struct target_rt_sigframe *frame; 5754 unsigned long restorer; 5755 5756 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5757 trace_user_setup_rt_frame(env, frame_addr); 5758 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5759 goto give_sigsegv; 5760 } 5761 5762 /* Always write at least the signal number for the stack backtracer. */ 5763 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5764 /* At sigreturn time, restore the callee-save registers too. */ 5765 tswap_siginfo(&frame->info, info); 5766 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5767 } else { 5768 __put_user(info->si_signo, &frame->info.si_signo); 5769 } 5770 5771 /* Create the ucontext. */ 5772 __put_user(0, &frame->uc.tuc_flags); 5773 __put_user(0, &frame->uc.tuc_link); 5774 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5775 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5776 &frame->uc.tuc_stack.ss_flags); 5777 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5778 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5779 5780 if (ka->sa_flags & TARGET_SA_RESTORER) { 5781 restorer = (unsigned long) ka->sa_restorer; 5782 } else { 5783 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5784 __put_user(INSN_SWINT1, &frame->retcode[1]); 5785 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5786 } 5787 env->pc = (unsigned long) ka->_sa_handler; 5788 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5789 env->regs[TILEGX_R_LR] = restorer; 5790 env->regs[0] = (unsigned long) sig; 5791 env->regs[1] = (unsigned long) &frame->info; 5792 env->regs[2] = (unsigned long) &frame->uc; 5793 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5794 5795 unlock_user_struct(frame, frame_addr, 1); 5796 return; 5797 5798 give_sigsegv: 5799 force_sigsegv(sig); 5800 } 5801 5802 long do_rt_sigreturn(CPUTLGState *env) 5803 { 5804 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5805 struct target_rt_sigframe *frame; 5806 sigset_t set; 5807 5808 trace_user_do_rt_sigreturn(env, frame_addr); 5809 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5810 goto badframe; 5811 } 5812 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5813 set_sigmask(&set); 5814 5815 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5816 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5817 uc.tuc_stack), 5818 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5819 goto badframe; 5820 } 5821 5822 unlock_user_struct(frame, frame_addr, 0); 5823 return -TARGET_QEMU_ESIGRETURN; 5824 5825 5826 badframe: 5827 unlock_user_struct(frame, frame_addr, 0); 5828 force_sig(TARGET_SIGSEGV); 5829 return -TARGET_QEMU_ESIGRETURN; 5830 } 5831 5832 #else 5833 5834 static void setup_frame(int sig, struct target_sigaction *ka, 5835 target_sigset_t *set, CPUArchState *env) 5836 { 5837 fprintf(stderr, "setup_frame: not implemented\n"); 5838 } 5839 5840 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5841 target_siginfo_t *info, 5842 target_sigset_t *set, CPUArchState *env) 5843 { 5844 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5845 } 5846 5847 long do_sigreturn(CPUArchState *env) 5848 { 5849 fprintf(stderr, "do_sigreturn: not implemented\n"); 5850 return -TARGET_ENOSYS; 5851 } 5852 5853 long do_rt_sigreturn(CPUArchState *env) 5854 { 5855 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5856 return -TARGET_ENOSYS; 5857 } 5858 5859 #endif 5860 5861 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 5862 struct emulated_sigtable *k) 5863 { 5864 CPUState *cpu = ENV_GET_CPU(cpu_env); 5865 abi_ulong handler; 5866 sigset_t set; 5867 target_sigset_t target_old_set; 5868 struct target_sigaction *sa; 5869 TaskState *ts = cpu->opaque; 5870 5871 trace_user_handle_signal(cpu_env, sig); 5872 /* dequeue signal */ 5873 k->pending = 0; 5874 5875 sig = gdb_handlesig(cpu, sig); 5876 if (!sig) { 5877 sa = NULL; 5878 handler = TARGET_SIG_IGN; 5879 } else { 5880 sa = &sigact_table[sig - 1]; 5881 handler = sa->_sa_handler; 5882 } 5883 5884 if (do_strace) { 5885 print_taken_signal(sig, &k->info); 5886 } 5887 5888 if (handler == TARGET_SIG_DFL) { 5889 /* default handler : ignore some signal. The other are job control or fatal */ 5890 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5891 kill(getpid(),SIGSTOP); 5892 } else if (sig != TARGET_SIGCHLD && 5893 sig != TARGET_SIGURG && 5894 sig != TARGET_SIGWINCH && 5895 sig != TARGET_SIGCONT) { 5896 dump_core_and_abort(sig); 5897 } 5898 } else if (handler == TARGET_SIG_IGN) { 5899 /* ignore sig */ 5900 } else if (handler == TARGET_SIG_ERR) { 5901 dump_core_and_abort(sig); 5902 } else { 5903 /* compute the blocked signals during the handler execution */ 5904 sigset_t *blocked_set; 5905 5906 target_to_host_sigset(&set, &sa->sa_mask); 5907 /* SA_NODEFER indicates that the current signal should not be 5908 blocked during the handler */ 5909 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5910 sigaddset(&set, target_to_host_signal(sig)); 5911 5912 /* save the previous blocked signal state to restore it at the 5913 end of the signal execution (see do_sigreturn) */ 5914 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5915 5916 /* block signals in the handler */ 5917 blocked_set = ts->in_sigsuspend ? 5918 &ts->sigsuspend_mask : &ts->signal_mask; 5919 sigorset(&ts->signal_mask, blocked_set, &set); 5920 ts->in_sigsuspend = 0; 5921 5922 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5923 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5924 { 5925 CPUX86State *env = cpu_env; 5926 if (env->eflags & VM_MASK) 5927 save_v86_state(env); 5928 } 5929 #endif 5930 /* prepare the stack frame of the virtual CPU */ 5931 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5932 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5933 /* These targets do not have traditional signals. */ 5934 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5935 #else 5936 if (sa->sa_flags & TARGET_SA_SIGINFO) 5937 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5938 else 5939 setup_frame(sig, sa, &target_old_set, cpu_env); 5940 #endif 5941 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5942 sa->_sa_handler = TARGET_SIG_DFL; 5943 } 5944 } 5945 } 5946 5947 void process_pending_signals(CPUArchState *cpu_env) 5948 { 5949 CPUState *cpu = ENV_GET_CPU(cpu_env); 5950 int sig; 5951 TaskState *ts = cpu->opaque; 5952 sigset_t set; 5953 sigset_t *blocked_set; 5954 5955 while (atomic_read(&ts->signal_pending)) { 5956 /* FIXME: This is not threadsafe. */ 5957 sigfillset(&set); 5958 sigprocmask(SIG_SETMASK, &set, 0); 5959 5960 restart_scan: 5961 sig = ts->sync_signal.pending; 5962 if (sig) { 5963 /* Synchronous signals are forced, 5964 * see force_sig_info() and callers in Linux 5965 * Note that not all of our queue_signal() calls in QEMU correspond 5966 * to force_sig_info() calls in Linux (some are send_sig_info()). 5967 * However it seems like a kernel bug to me to allow the process 5968 * to block a synchronous signal since it could then just end up 5969 * looping round and round indefinitely. 5970 */ 5971 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5972 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5973 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5974 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5975 } 5976 5977 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 5978 } 5979 5980 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5981 blocked_set = ts->in_sigsuspend ? 5982 &ts->sigsuspend_mask : &ts->signal_mask; 5983 5984 if (ts->sigtab[sig - 1].pending && 5985 (!sigismember(blocked_set, 5986 target_to_host_signal_table[sig]))) { 5987 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 5988 /* Restart scan from the beginning, as handle_pending_signal 5989 * might have resulted in a new synchronous signal (eg SIGSEGV). 5990 */ 5991 goto restart_scan; 5992 } 5993 } 5994 5995 /* if no signal is pending, unblock signals and recheck (the act 5996 * of unblocking might cause us to take another host signal which 5997 * will set signal_pending again). 5998 */ 5999 atomic_set(&ts->signal_pending, 0); 6000 ts->in_sigsuspend = 0; 6001 set = ts->signal_mask; 6002 sigdelset(&set, SIGSEGV); 6003 sigdelset(&set, SIGBUS); 6004 sigprocmask(SIG_SETMASK, &set, 0); 6005 } 6006 ts->in_sigsuspend = 0; 6007 } 6008