1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 516 !defined(TARGET_X86_64) 517 /* Force a synchronously taken signal. The kernel force_sig() function 518 * also forces the signal to "not blocked, not ignored", but for QEMU 519 * that work is done in process_pending_signals(). 520 */ 521 static void force_sig(int sig) 522 { 523 CPUState *cpu = thread_cpu; 524 CPUArchState *env = cpu->env_ptr; 525 target_siginfo_t info; 526 527 info.si_signo = sig; 528 info.si_errno = 0; 529 info.si_code = TARGET_SI_KERNEL; 530 info._sifields._kill._pid = 0; 531 info._sifields._kill._uid = 0; 532 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 533 } 534 #endif 535 536 #if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) 537 538 /* Force a SIGSEGV if we couldn't write to memory trying to set 539 * up the signal frame. oldsig is the signal we were trying to handle 540 * at the point of failure. 541 */ 542 static void force_sigsegv(int oldsig) 543 { 544 CPUState *cpu = thread_cpu; 545 CPUArchState *env = cpu->env_ptr; 546 target_siginfo_t info; 547 548 if (oldsig == SIGSEGV) { 549 /* Make sure we don't try to deliver the signal again; this will 550 * end up with handle_pending_signal() calling dump_core_and_abort(). 551 */ 552 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 553 } 554 info.si_signo = TARGET_SIGSEGV; 555 info.si_errno = 0; 556 info.si_code = TARGET_SI_KERNEL; 557 info._sifields._kill._pid = 0; 558 info._sifields._kill._uid = 0; 559 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 560 } 561 #endif 562 563 /* abort execution with signal */ 564 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 565 { 566 CPUState *cpu = thread_cpu; 567 CPUArchState *env = cpu->env_ptr; 568 TaskState *ts = (TaskState *)cpu->opaque; 569 int host_sig, core_dumped = 0; 570 struct sigaction act; 571 572 host_sig = target_to_host_signal(target_sig); 573 trace_user_force_sig(env, target_sig, host_sig); 574 gdb_signalled(env, target_sig); 575 576 /* dump core if supported by target binary format */ 577 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 578 stop_all_tasks(); 579 core_dumped = 580 ((*ts->bprm->core_dump)(target_sig, env) == 0); 581 } 582 if (core_dumped) { 583 /* we already dumped the core of target process, we don't want 584 * a coredump of qemu itself */ 585 struct rlimit nodump; 586 getrlimit(RLIMIT_CORE, &nodump); 587 nodump.rlim_cur=0; 588 setrlimit(RLIMIT_CORE, &nodump); 589 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 590 target_sig, strsignal(host_sig), "core dumped" ); 591 } 592 593 /* The proper exit code for dying from an uncaught signal is 594 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 595 * a negative value. To get the proper exit code we need to 596 * actually die from an uncaught signal. Here the default signal 597 * handler is installed, we send ourself a signal and we wait for 598 * it to arrive. */ 599 sigfillset(&act.sa_mask); 600 act.sa_handler = SIG_DFL; 601 act.sa_flags = 0; 602 sigaction(host_sig, &act, NULL); 603 604 /* For some reason raise(host_sig) doesn't send the signal when 605 * statically linked on x86-64. */ 606 kill(getpid(), host_sig); 607 608 /* Make sure the signal isn't masked (just reuse the mask inside 609 of act) */ 610 sigdelset(&act.sa_mask, host_sig); 611 sigsuspend(&act.sa_mask); 612 613 /* unreachable */ 614 abort(); 615 } 616 617 /* queue a signal so that it will be send to the virtual CPU as soon 618 as possible */ 619 int queue_signal(CPUArchState *env, int sig, int si_type, 620 target_siginfo_t *info) 621 { 622 CPUState *cpu = ENV_GET_CPU(env); 623 TaskState *ts = cpu->opaque; 624 625 trace_user_queue_signal(env, sig); 626 627 info->si_code = deposit32(info->si_code, 16, 16, si_type); 628 629 ts->sync_signal.info = *info; 630 ts->sync_signal.pending = sig; 631 /* signal that a new signal is pending */ 632 atomic_set(&ts->signal_pending, 1); 633 return 1; /* indicates that the signal was queued */ 634 } 635 636 #ifndef HAVE_SAFE_SYSCALL 637 static inline void rewind_if_in_safe_syscall(void *puc) 638 { 639 /* Default version: never rewind */ 640 } 641 #endif 642 643 static void host_signal_handler(int host_signum, siginfo_t *info, 644 void *puc) 645 { 646 CPUArchState *env = thread_cpu->env_ptr; 647 CPUState *cpu = ENV_GET_CPU(env); 648 TaskState *ts = cpu->opaque; 649 650 int sig; 651 target_siginfo_t tinfo; 652 ucontext_t *uc = puc; 653 struct emulated_sigtable *k; 654 655 /* the CPU emulator uses some host signals to detect exceptions, 656 we forward to it some signals */ 657 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 658 && info->si_code > 0) { 659 if (cpu_signal_handler(host_signum, info, puc)) 660 return; 661 } 662 663 /* get target signal number */ 664 sig = host_to_target_signal(host_signum); 665 if (sig < 1 || sig > TARGET_NSIG) 666 return; 667 trace_user_host_signal(env, host_signum, sig); 668 669 rewind_if_in_safe_syscall(puc); 670 671 host_to_target_siginfo_noswap(&tinfo, info); 672 k = &ts->sigtab[sig - 1]; 673 k->info = tinfo; 674 k->pending = sig; 675 ts->signal_pending = 1; 676 677 /* Block host signals until target signal handler entered. We 678 * can't block SIGSEGV or SIGBUS while we're executing guest 679 * code in case the guest code provokes one in the window between 680 * now and it getting out to the main loop. Signals will be 681 * unblocked again in process_pending_signals(). 682 * 683 * WARNING: we cannot use sigfillset() here because the uc_sigmask 684 * field is a kernel sigset_t, which is much smaller than the 685 * libc sigset_t which sigfillset() operates on. Using sigfillset() 686 * would write 0xff bytes off the end of the structure and trash 687 * data on the struct. 688 * We can't use sizeof(uc->uc_sigmask) either, because the libc 689 * headers define the struct field with the wrong (too large) type. 690 */ 691 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 692 sigdelset(&uc->uc_sigmask, SIGSEGV); 693 sigdelset(&uc->uc_sigmask, SIGBUS); 694 695 /* interrupt the virtual CPU as soon as possible */ 696 cpu_exit(thread_cpu); 697 } 698 699 /* do_sigaltstack() returns target values and errnos. */ 700 /* compare linux/kernel/signal.c:do_sigaltstack() */ 701 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 702 { 703 int ret; 704 struct target_sigaltstack oss; 705 706 /* XXX: test errors */ 707 if(uoss_addr) 708 { 709 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 710 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 711 __put_user(sas_ss_flags(sp), &oss.ss_flags); 712 } 713 714 if(uss_addr) 715 { 716 struct target_sigaltstack *uss; 717 struct target_sigaltstack ss; 718 size_t minstacksize = TARGET_MINSIGSTKSZ; 719 720 #if defined(TARGET_PPC64) 721 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 722 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 723 if (get_ppc64_abi(image) > 1) { 724 minstacksize = 4096; 725 } 726 #endif 727 728 ret = -TARGET_EFAULT; 729 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 730 goto out; 731 } 732 __get_user(ss.ss_sp, &uss->ss_sp); 733 __get_user(ss.ss_size, &uss->ss_size); 734 __get_user(ss.ss_flags, &uss->ss_flags); 735 unlock_user_struct(uss, uss_addr, 0); 736 737 ret = -TARGET_EPERM; 738 if (on_sig_stack(sp)) 739 goto out; 740 741 ret = -TARGET_EINVAL; 742 if (ss.ss_flags != TARGET_SS_DISABLE 743 && ss.ss_flags != TARGET_SS_ONSTACK 744 && ss.ss_flags != 0) 745 goto out; 746 747 if (ss.ss_flags == TARGET_SS_DISABLE) { 748 ss.ss_size = 0; 749 ss.ss_sp = 0; 750 } else { 751 ret = -TARGET_ENOMEM; 752 if (ss.ss_size < minstacksize) { 753 goto out; 754 } 755 } 756 757 target_sigaltstack_used.ss_sp = ss.ss_sp; 758 target_sigaltstack_used.ss_size = ss.ss_size; 759 } 760 761 if (uoss_addr) { 762 ret = -TARGET_EFAULT; 763 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 764 goto out; 765 } 766 767 ret = 0; 768 out: 769 return ret; 770 } 771 772 /* do_sigaction() return target values and host errnos */ 773 int do_sigaction(int sig, const struct target_sigaction *act, 774 struct target_sigaction *oact) 775 { 776 struct target_sigaction *k; 777 struct sigaction act1; 778 int host_sig; 779 int ret = 0; 780 781 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 782 return -TARGET_EINVAL; 783 } 784 785 if (block_signals()) { 786 return -TARGET_ERESTARTSYS; 787 } 788 789 k = &sigact_table[sig - 1]; 790 if (oact) { 791 __put_user(k->_sa_handler, &oact->_sa_handler); 792 __put_user(k->sa_flags, &oact->sa_flags); 793 #if !defined(TARGET_MIPS) 794 __put_user(k->sa_restorer, &oact->sa_restorer); 795 #endif 796 /* Not swapped. */ 797 oact->sa_mask = k->sa_mask; 798 } 799 if (act) { 800 /* FIXME: This is not threadsafe. */ 801 __get_user(k->_sa_handler, &act->_sa_handler); 802 __get_user(k->sa_flags, &act->sa_flags); 803 #if !defined(TARGET_MIPS) 804 __get_user(k->sa_restorer, &act->sa_restorer); 805 #endif 806 /* To be swapped in target_to_host_sigset. */ 807 k->sa_mask = act->sa_mask; 808 809 /* we update the host linux signal state */ 810 host_sig = target_to_host_signal(sig); 811 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 812 sigfillset(&act1.sa_mask); 813 act1.sa_flags = SA_SIGINFO; 814 if (k->sa_flags & TARGET_SA_RESTART) 815 act1.sa_flags |= SA_RESTART; 816 /* NOTE: it is important to update the host kernel signal 817 ignore state to avoid getting unexpected interrupted 818 syscalls */ 819 if (k->_sa_handler == TARGET_SIG_IGN) { 820 act1.sa_sigaction = (void *)SIG_IGN; 821 } else if (k->_sa_handler == TARGET_SIG_DFL) { 822 if (fatal_signal (sig)) 823 act1.sa_sigaction = host_signal_handler; 824 else 825 act1.sa_sigaction = (void *)SIG_DFL; 826 } else { 827 act1.sa_sigaction = host_signal_handler; 828 } 829 ret = sigaction(host_sig, &act1, NULL); 830 } 831 } 832 return ret; 833 } 834 835 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 836 837 /* from the Linux kernel */ 838 839 struct target_fpreg { 840 uint16_t significand[4]; 841 uint16_t exponent; 842 }; 843 844 struct target_fpxreg { 845 uint16_t significand[4]; 846 uint16_t exponent; 847 uint16_t padding[3]; 848 }; 849 850 struct target_xmmreg { 851 abi_ulong element[4]; 852 }; 853 854 struct target_fpstate { 855 /* Regular FPU environment */ 856 abi_ulong cw; 857 abi_ulong sw; 858 abi_ulong tag; 859 abi_ulong ipoff; 860 abi_ulong cssel; 861 abi_ulong dataoff; 862 abi_ulong datasel; 863 struct target_fpreg _st[8]; 864 uint16_t status; 865 uint16_t magic; /* 0xffff = regular FPU data only */ 866 867 /* FXSR FPU environment */ 868 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 869 abi_ulong mxcsr; 870 abi_ulong reserved; 871 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 872 struct target_xmmreg _xmm[8]; 873 abi_ulong padding[56]; 874 }; 875 876 #define X86_FXSR_MAGIC 0x0000 877 878 struct target_sigcontext { 879 uint16_t gs, __gsh; 880 uint16_t fs, __fsh; 881 uint16_t es, __esh; 882 uint16_t ds, __dsh; 883 abi_ulong edi; 884 abi_ulong esi; 885 abi_ulong ebp; 886 abi_ulong esp; 887 abi_ulong ebx; 888 abi_ulong edx; 889 abi_ulong ecx; 890 abi_ulong eax; 891 abi_ulong trapno; 892 abi_ulong err; 893 abi_ulong eip; 894 uint16_t cs, __csh; 895 abi_ulong eflags; 896 abi_ulong esp_at_signal; 897 uint16_t ss, __ssh; 898 abi_ulong fpstate; /* pointer */ 899 abi_ulong oldmask; 900 abi_ulong cr2; 901 }; 902 903 struct target_ucontext { 904 abi_ulong tuc_flags; 905 abi_ulong tuc_link; 906 target_stack_t tuc_stack; 907 struct target_sigcontext tuc_mcontext; 908 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 909 }; 910 911 struct sigframe 912 { 913 abi_ulong pretcode; 914 int sig; 915 struct target_sigcontext sc; 916 struct target_fpstate fpstate; 917 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 918 char retcode[8]; 919 }; 920 921 struct rt_sigframe 922 { 923 abi_ulong pretcode; 924 int sig; 925 abi_ulong pinfo; 926 abi_ulong puc; 927 struct target_siginfo info; 928 struct target_ucontext uc; 929 struct target_fpstate fpstate; 930 char retcode[8]; 931 }; 932 933 /* 934 * Set up a signal frame. 935 */ 936 937 /* XXX: save x87 state */ 938 static void setup_sigcontext(struct target_sigcontext *sc, 939 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 940 abi_ulong fpstate_addr) 941 { 942 CPUState *cs = CPU(x86_env_get_cpu(env)); 943 uint16_t magic; 944 945 /* already locked in setup_frame() */ 946 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 947 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 948 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 949 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 950 __put_user(env->regs[R_EDI], &sc->edi); 951 __put_user(env->regs[R_ESI], &sc->esi); 952 __put_user(env->regs[R_EBP], &sc->ebp); 953 __put_user(env->regs[R_ESP], &sc->esp); 954 __put_user(env->regs[R_EBX], &sc->ebx); 955 __put_user(env->regs[R_EDX], &sc->edx); 956 __put_user(env->regs[R_ECX], &sc->ecx); 957 __put_user(env->regs[R_EAX], &sc->eax); 958 __put_user(cs->exception_index, &sc->trapno); 959 __put_user(env->error_code, &sc->err); 960 __put_user(env->eip, &sc->eip); 961 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 962 __put_user(env->eflags, &sc->eflags); 963 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 964 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 965 966 cpu_x86_fsave(env, fpstate_addr, 1); 967 fpstate->status = fpstate->sw; 968 magic = 0xffff; 969 __put_user(magic, &fpstate->magic); 970 __put_user(fpstate_addr, &sc->fpstate); 971 972 /* non-iBCS2 extensions.. */ 973 __put_user(mask, &sc->oldmask); 974 __put_user(env->cr[2], &sc->cr2); 975 } 976 977 /* 978 * Determine which stack to use.. 979 */ 980 981 static inline abi_ulong 982 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 983 { 984 unsigned long esp; 985 986 /* Default to using normal stack */ 987 esp = env->regs[R_ESP]; 988 /* This is the X/Open sanctioned signal stack switching. */ 989 if (ka->sa_flags & TARGET_SA_ONSTACK) { 990 if (sas_ss_flags(esp) == 0) { 991 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 992 } 993 } else { 994 995 /* This is the legacy signal stack switching. */ 996 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 997 !(ka->sa_flags & TARGET_SA_RESTORER) && 998 ka->sa_restorer) { 999 esp = (unsigned long) ka->sa_restorer; 1000 } 1001 } 1002 return (esp - frame_size) & -8ul; 1003 } 1004 1005 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 1006 static void setup_frame(int sig, struct target_sigaction *ka, 1007 target_sigset_t *set, CPUX86State *env) 1008 { 1009 abi_ulong frame_addr; 1010 struct sigframe *frame; 1011 int i; 1012 1013 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1014 trace_user_setup_frame(env, frame_addr); 1015 1016 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1017 goto give_sigsegv; 1018 1019 __put_user(sig, &frame->sig); 1020 1021 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1022 frame_addr + offsetof(struct sigframe, fpstate)); 1023 1024 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1025 __put_user(set->sig[i], &frame->extramask[i - 1]); 1026 } 1027 1028 /* Set up to return from userspace. If provided, use a stub 1029 already in userspace. */ 1030 if (ka->sa_flags & TARGET_SA_RESTORER) { 1031 __put_user(ka->sa_restorer, &frame->pretcode); 1032 } else { 1033 uint16_t val16; 1034 abi_ulong retcode_addr; 1035 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1036 __put_user(retcode_addr, &frame->pretcode); 1037 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1038 val16 = 0xb858; 1039 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1040 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1041 val16 = 0x80cd; 1042 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1043 } 1044 1045 1046 /* Set up registers for signal handler */ 1047 env->regs[R_ESP] = frame_addr; 1048 env->eip = ka->_sa_handler; 1049 1050 cpu_x86_load_seg(env, R_DS, __USER_DS); 1051 cpu_x86_load_seg(env, R_ES, __USER_DS); 1052 cpu_x86_load_seg(env, R_SS, __USER_DS); 1053 cpu_x86_load_seg(env, R_CS, __USER_CS); 1054 env->eflags &= ~TF_MASK; 1055 1056 unlock_user_struct(frame, frame_addr, 1); 1057 1058 return; 1059 1060 give_sigsegv: 1061 force_sigsegv(sig); 1062 } 1063 1064 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1065 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1066 target_siginfo_t *info, 1067 target_sigset_t *set, CPUX86State *env) 1068 { 1069 abi_ulong frame_addr, addr; 1070 struct rt_sigframe *frame; 1071 int i; 1072 1073 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1074 trace_user_setup_rt_frame(env, frame_addr); 1075 1076 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1077 goto give_sigsegv; 1078 1079 __put_user(sig, &frame->sig); 1080 addr = frame_addr + offsetof(struct rt_sigframe, info); 1081 __put_user(addr, &frame->pinfo); 1082 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1083 __put_user(addr, &frame->puc); 1084 tswap_siginfo(&frame->info, info); 1085 1086 /* Create the ucontext. */ 1087 __put_user(0, &frame->uc.tuc_flags); 1088 __put_user(0, &frame->uc.tuc_link); 1089 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1090 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1091 &frame->uc.tuc_stack.ss_flags); 1092 __put_user(target_sigaltstack_used.ss_size, 1093 &frame->uc.tuc_stack.ss_size); 1094 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1095 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1096 1097 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1098 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1099 } 1100 1101 /* Set up to return from userspace. If provided, use a stub 1102 already in userspace. */ 1103 if (ka->sa_flags & TARGET_SA_RESTORER) { 1104 __put_user(ka->sa_restorer, &frame->pretcode); 1105 } else { 1106 uint16_t val16; 1107 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1108 __put_user(addr, &frame->pretcode); 1109 /* This is movl $,%eax ; int $0x80 */ 1110 __put_user(0xb8, (char *)(frame->retcode+0)); 1111 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1112 val16 = 0x80cd; 1113 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1114 } 1115 1116 /* Set up registers for signal handler */ 1117 env->regs[R_ESP] = frame_addr; 1118 env->eip = ka->_sa_handler; 1119 1120 cpu_x86_load_seg(env, R_DS, __USER_DS); 1121 cpu_x86_load_seg(env, R_ES, __USER_DS); 1122 cpu_x86_load_seg(env, R_SS, __USER_DS); 1123 cpu_x86_load_seg(env, R_CS, __USER_CS); 1124 env->eflags &= ~TF_MASK; 1125 1126 unlock_user_struct(frame, frame_addr, 1); 1127 1128 return; 1129 1130 give_sigsegv: 1131 force_sigsegv(sig); 1132 } 1133 1134 static int 1135 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1136 { 1137 unsigned int err = 0; 1138 abi_ulong fpstate_addr; 1139 unsigned int tmpflags; 1140 1141 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1142 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1143 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1144 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1145 1146 env->regs[R_EDI] = tswapl(sc->edi); 1147 env->regs[R_ESI] = tswapl(sc->esi); 1148 env->regs[R_EBP] = tswapl(sc->ebp); 1149 env->regs[R_ESP] = tswapl(sc->esp); 1150 env->regs[R_EBX] = tswapl(sc->ebx); 1151 env->regs[R_EDX] = tswapl(sc->edx); 1152 env->regs[R_ECX] = tswapl(sc->ecx); 1153 env->regs[R_EAX] = tswapl(sc->eax); 1154 env->eip = tswapl(sc->eip); 1155 1156 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1157 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1158 1159 tmpflags = tswapl(sc->eflags); 1160 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1161 // regs->orig_eax = -1; /* disable syscall checks */ 1162 1163 fpstate_addr = tswapl(sc->fpstate); 1164 if (fpstate_addr != 0) { 1165 if (!access_ok(VERIFY_READ, fpstate_addr, 1166 sizeof(struct target_fpstate))) 1167 goto badframe; 1168 cpu_x86_frstor(env, fpstate_addr, 1); 1169 } 1170 1171 return err; 1172 badframe: 1173 return 1; 1174 } 1175 1176 long do_sigreturn(CPUX86State *env) 1177 { 1178 struct sigframe *frame; 1179 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1180 target_sigset_t target_set; 1181 sigset_t set; 1182 int i; 1183 1184 trace_user_do_sigreturn(env, frame_addr); 1185 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1186 goto badframe; 1187 /* set blocked signals */ 1188 __get_user(target_set.sig[0], &frame->sc.oldmask); 1189 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1190 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1191 } 1192 1193 target_to_host_sigset_internal(&set, &target_set); 1194 set_sigmask(&set); 1195 1196 /* restore registers */ 1197 if (restore_sigcontext(env, &frame->sc)) 1198 goto badframe; 1199 unlock_user_struct(frame, frame_addr, 0); 1200 return -TARGET_QEMU_ESIGRETURN; 1201 1202 badframe: 1203 unlock_user_struct(frame, frame_addr, 0); 1204 force_sig(TARGET_SIGSEGV); 1205 return -TARGET_QEMU_ESIGRETURN; 1206 } 1207 1208 long do_rt_sigreturn(CPUX86State *env) 1209 { 1210 abi_ulong frame_addr; 1211 struct rt_sigframe *frame; 1212 sigset_t set; 1213 1214 frame_addr = env->regs[R_ESP] - 4; 1215 trace_user_do_rt_sigreturn(env, frame_addr); 1216 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1217 goto badframe; 1218 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1219 set_sigmask(&set); 1220 1221 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1222 goto badframe; 1223 } 1224 1225 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1226 get_sp_from_cpustate(env)) == -EFAULT) { 1227 goto badframe; 1228 } 1229 1230 unlock_user_struct(frame, frame_addr, 0); 1231 return -TARGET_QEMU_ESIGRETURN; 1232 1233 badframe: 1234 unlock_user_struct(frame, frame_addr, 0); 1235 force_sig(TARGET_SIGSEGV); 1236 return -TARGET_QEMU_ESIGRETURN; 1237 } 1238 1239 #elif defined(TARGET_AARCH64) 1240 1241 struct target_sigcontext { 1242 uint64_t fault_address; 1243 /* AArch64 registers */ 1244 uint64_t regs[31]; 1245 uint64_t sp; 1246 uint64_t pc; 1247 uint64_t pstate; 1248 /* 4K reserved for FP/SIMD state and future expansion */ 1249 char __reserved[4096] __attribute__((__aligned__(16))); 1250 }; 1251 1252 struct target_ucontext { 1253 abi_ulong tuc_flags; 1254 abi_ulong tuc_link; 1255 target_stack_t tuc_stack; 1256 target_sigset_t tuc_sigmask; 1257 /* glibc uses a 1024-bit sigset_t */ 1258 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1259 /* last for future expansion */ 1260 struct target_sigcontext tuc_mcontext; 1261 }; 1262 1263 /* 1264 * Header to be used at the beginning of structures extending the user 1265 * context. Such structures must be placed after the rt_sigframe on the stack 1266 * and be 16-byte aligned. The last structure must be a dummy one with the 1267 * magic and size set to 0. 1268 */ 1269 struct target_aarch64_ctx { 1270 uint32_t magic; 1271 uint32_t size; 1272 }; 1273 1274 #define TARGET_FPSIMD_MAGIC 0x46508001 1275 1276 struct target_fpsimd_context { 1277 struct target_aarch64_ctx head; 1278 uint32_t fpsr; 1279 uint32_t fpcr; 1280 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1281 }; 1282 1283 /* 1284 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1285 * user space as it will change with the addition of new context. User space 1286 * should check the magic/size information. 1287 */ 1288 struct target_aux_context { 1289 struct target_fpsimd_context fpsimd; 1290 /* additional context to be added before "end" */ 1291 struct target_aarch64_ctx end; 1292 }; 1293 1294 struct target_rt_sigframe { 1295 struct target_siginfo info; 1296 struct target_ucontext uc; 1297 uint64_t fp; 1298 uint64_t lr; 1299 uint32_t tramp[2]; 1300 }; 1301 1302 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1303 CPUARMState *env, target_sigset_t *set) 1304 { 1305 int i; 1306 struct target_aux_context *aux = 1307 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1308 1309 /* set up the stack frame for unwinding */ 1310 __put_user(env->xregs[29], &sf->fp); 1311 __put_user(env->xregs[30], &sf->lr); 1312 1313 for (i = 0; i < 31; i++) { 1314 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1315 } 1316 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1317 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1318 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1319 1320 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1321 1322 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1323 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1324 } 1325 1326 for (i = 0; i < 32; i++) { 1327 #ifdef TARGET_WORDS_BIGENDIAN 1328 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1329 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1330 #else 1331 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1332 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1333 #endif 1334 } 1335 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1336 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1337 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1338 __put_user(sizeof(struct target_fpsimd_context), 1339 &aux->fpsimd.head.size); 1340 1341 /* set the "end" magic */ 1342 __put_user(0, &aux->end.magic); 1343 __put_user(0, &aux->end.size); 1344 1345 return 0; 1346 } 1347 1348 static int target_restore_sigframe(CPUARMState *env, 1349 struct target_rt_sigframe *sf) 1350 { 1351 sigset_t set; 1352 int i; 1353 struct target_aux_context *aux = 1354 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1355 uint32_t magic, size, fpsr, fpcr; 1356 uint64_t pstate; 1357 1358 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1359 set_sigmask(&set); 1360 1361 for (i = 0; i < 31; i++) { 1362 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1363 } 1364 1365 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1366 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1367 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1368 pstate_write(env, pstate); 1369 1370 __get_user(magic, &aux->fpsimd.head.magic); 1371 __get_user(size, &aux->fpsimd.head.size); 1372 1373 if (magic != TARGET_FPSIMD_MAGIC 1374 || size != sizeof(struct target_fpsimd_context)) { 1375 return 1; 1376 } 1377 1378 for (i = 0; i < 32; i++) { 1379 #ifdef TARGET_WORDS_BIGENDIAN 1380 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1381 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1382 #else 1383 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1384 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1385 #endif 1386 } 1387 __get_user(fpsr, &aux->fpsimd.fpsr); 1388 vfp_set_fpsr(env, fpsr); 1389 __get_user(fpcr, &aux->fpsimd.fpcr); 1390 vfp_set_fpcr(env, fpcr); 1391 1392 return 0; 1393 } 1394 1395 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1396 { 1397 abi_ulong sp; 1398 1399 sp = env->xregs[31]; 1400 1401 /* 1402 * This is the X/Open sanctioned signal stack switching. 1403 */ 1404 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1405 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1406 } 1407 1408 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1409 1410 return sp; 1411 } 1412 1413 static void target_setup_frame(int usig, struct target_sigaction *ka, 1414 target_siginfo_t *info, target_sigset_t *set, 1415 CPUARMState *env) 1416 { 1417 struct target_rt_sigframe *frame; 1418 abi_ulong frame_addr, return_addr; 1419 1420 frame_addr = get_sigframe(ka, env); 1421 trace_user_setup_frame(env, frame_addr); 1422 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1423 goto give_sigsegv; 1424 } 1425 1426 __put_user(0, &frame->uc.tuc_flags); 1427 __put_user(0, &frame->uc.tuc_link); 1428 1429 __put_user(target_sigaltstack_used.ss_sp, 1430 &frame->uc.tuc_stack.ss_sp); 1431 __put_user(sas_ss_flags(env->xregs[31]), 1432 &frame->uc.tuc_stack.ss_flags); 1433 __put_user(target_sigaltstack_used.ss_size, 1434 &frame->uc.tuc_stack.ss_size); 1435 target_setup_sigframe(frame, env, set); 1436 if (ka->sa_flags & TARGET_SA_RESTORER) { 1437 return_addr = ka->sa_restorer; 1438 } else { 1439 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1440 __put_user(0xd2801168, &frame->tramp[0]); 1441 __put_user(0xd4000001, &frame->tramp[1]); 1442 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1443 } 1444 env->xregs[0] = usig; 1445 env->xregs[31] = frame_addr; 1446 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1447 env->pc = ka->_sa_handler; 1448 env->xregs[30] = return_addr; 1449 if (info) { 1450 tswap_siginfo(&frame->info, info); 1451 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1452 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1453 } 1454 1455 unlock_user_struct(frame, frame_addr, 1); 1456 return; 1457 1458 give_sigsegv: 1459 unlock_user_struct(frame, frame_addr, 1); 1460 force_sigsegv(usig); 1461 } 1462 1463 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1464 target_siginfo_t *info, target_sigset_t *set, 1465 CPUARMState *env) 1466 { 1467 target_setup_frame(sig, ka, info, set, env); 1468 } 1469 1470 static void setup_frame(int sig, struct target_sigaction *ka, 1471 target_sigset_t *set, CPUARMState *env) 1472 { 1473 target_setup_frame(sig, ka, 0, set, env); 1474 } 1475 1476 long do_rt_sigreturn(CPUARMState *env) 1477 { 1478 struct target_rt_sigframe *frame = NULL; 1479 abi_ulong frame_addr = env->xregs[31]; 1480 1481 trace_user_do_rt_sigreturn(env, frame_addr); 1482 if (frame_addr & 15) { 1483 goto badframe; 1484 } 1485 1486 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1487 goto badframe; 1488 } 1489 1490 if (target_restore_sigframe(env, frame)) { 1491 goto badframe; 1492 } 1493 1494 if (do_sigaltstack(frame_addr + 1495 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1496 0, get_sp_from_cpustate(env)) == -EFAULT) { 1497 goto badframe; 1498 } 1499 1500 unlock_user_struct(frame, frame_addr, 0); 1501 return -TARGET_QEMU_ESIGRETURN; 1502 1503 badframe: 1504 unlock_user_struct(frame, frame_addr, 0); 1505 force_sig(TARGET_SIGSEGV); 1506 return -TARGET_QEMU_ESIGRETURN; 1507 } 1508 1509 long do_sigreturn(CPUARMState *env) 1510 { 1511 return do_rt_sigreturn(env); 1512 } 1513 1514 #elif defined(TARGET_ARM) 1515 1516 struct target_sigcontext { 1517 abi_ulong trap_no; 1518 abi_ulong error_code; 1519 abi_ulong oldmask; 1520 abi_ulong arm_r0; 1521 abi_ulong arm_r1; 1522 abi_ulong arm_r2; 1523 abi_ulong arm_r3; 1524 abi_ulong arm_r4; 1525 abi_ulong arm_r5; 1526 abi_ulong arm_r6; 1527 abi_ulong arm_r7; 1528 abi_ulong arm_r8; 1529 abi_ulong arm_r9; 1530 abi_ulong arm_r10; 1531 abi_ulong arm_fp; 1532 abi_ulong arm_ip; 1533 abi_ulong arm_sp; 1534 abi_ulong arm_lr; 1535 abi_ulong arm_pc; 1536 abi_ulong arm_cpsr; 1537 abi_ulong fault_address; 1538 }; 1539 1540 struct target_ucontext_v1 { 1541 abi_ulong tuc_flags; 1542 abi_ulong tuc_link; 1543 target_stack_t tuc_stack; 1544 struct target_sigcontext tuc_mcontext; 1545 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1546 }; 1547 1548 struct target_ucontext_v2 { 1549 abi_ulong tuc_flags; 1550 abi_ulong tuc_link; 1551 target_stack_t tuc_stack; 1552 struct target_sigcontext tuc_mcontext; 1553 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1554 char __unused[128 - sizeof(target_sigset_t)]; 1555 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1556 }; 1557 1558 struct target_user_vfp { 1559 uint64_t fpregs[32]; 1560 abi_ulong fpscr; 1561 }; 1562 1563 struct target_user_vfp_exc { 1564 abi_ulong fpexc; 1565 abi_ulong fpinst; 1566 abi_ulong fpinst2; 1567 }; 1568 1569 struct target_vfp_sigframe { 1570 abi_ulong magic; 1571 abi_ulong size; 1572 struct target_user_vfp ufp; 1573 struct target_user_vfp_exc ufp_exc; 1574 } __attribute__((__aligned__(8))); 1575 1576 struct target_iwmmxt_sigframe { 1577 abi_ulong magic; 1578 abi_ulong size; 1579 uint64_t regs[16]; 1580 /* Note that not all the coprocessor control registers are stored here */ 1581 uint32_t wcssf; 1582 uint32_t wcasf; 1583 uint32_t wcgr0; 1584 uint32_t wcgr1; 1585 uint32_t wcgr2; 1586 uint32_t wcgr3; 1587 } __attribute__((__aligned__(8))); 1588 1589 #define TARGET_VFP_MAGIC 0x56465001 1590 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1591 1592 struct sigframe_v1 1593 { 1594 struct target_sigcontext sc; 1595 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1596 abi_ulong retcode; 1597 }; 1598 1599 struct sigframe_v2 1600 { 1601 struct target_ucontext_v2 uc; 1602 abi_ulong retcode; 1603 }; 1604 1605 struct rt_sigframe_v1 1606 { 1607 abi_ulong pinfo; 1608 abi_ulong puc; 1609 struct target_siginfo info; 1610 struct target_ucontext_v1 uc; 1611 abi_ulong retcode; 1612 }; 1613 1614 struct rt_sigframe_v2 1615 { 1616 struct target_siginfo info; 1617 struct target_ucontext_v2 uc; 1618 abi_ulong retcode; 1619 }; 1620 1621 #define TARGET_CONFIG_CPU_32 1 1622 1623 /* 1624 * For ARM syscalls, we encode the syscall number into the instruction. 1625 */ 1626 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1627 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1628 1629 /* 1630 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1631 * need two 16-bit instructions. 1632 */ 1633 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1634 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1635 1636 static const abi_ulong retcodes[4] = { 1637 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1638 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1639 }; 1640 1641 1642 static inline int valid_user_regs(CPUARMState *regs) 1643 { 1644 return 1; 1645 } 1646 1647 static void 1648 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1649 CPUARMState *env, abi_ulong mask) 1650 { 1651 __put_user(env->regs[0], &sc->arm_r0); 1652 __put_user(env->regs[1], &sc->arm_r1); 1653 __put_user(env->regs[2], &sc->arm_r2); 1654 __put_user(env->regs[3], &sc->arm_r3); 1655 __put_user(env->regs[4], &sc->arm_r4); 1656 __put_user(env->regs[5], &sc->arm_r5); 1657 __put_user(env->regs[6], &sc->arm_r6); 1658 __put_user(env->regs[7], &sc->arm_r7); 1659 __put_user(env->regs[8], &sc->arm_r8); 1660 __put_user(env->regs[9], &sc->arm_r9); 1661 __put_user(env->regs[10], &sc->arm_r10); 1662 __put_user(env->regs[11], &sc->arm_fp); 1663 __put_user(env->regs[12], &sc->arm_ip); 1664 __put_user(env->regs[13], &sc->arm_sp); 1665 __put_user(env->regs[14], &sc->arm_lr); 1666 __put_user(env->regs[15], &sc->arm_pc); 1667 #ifdef TARGET_CONFIG_CPU_32 1668 __put_user(cpsr_read(env), &sc->arm_cpsr); 1669 #endif 1670 1671 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1672 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1673 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1674 __put_user(mask, &sc->oldmask); 1675 } 1676 1677 static inline abi_ulong 1678 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1679 { 1680 unsigned long sp = regs->regs[13]; 1681 1682 /* 1683 * This is the X/Open sanctioned signal stack switching. 1684 */ 1685 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1686 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1687 } 1688 /* 1689 * ATPCS B01 mandates 8-byte alignment 1690 */ 1691 return (sp - framesize) & ~7; 1692 } 1693 1694 static void 1695 setup_return(CPUARMState *env, struct target_sigaction *ka, 1696 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1697 { 1698 abi_ulong handler = ka->_sa_handler; 1699 abi_ulong retcode; 1700 int thumb = handler & 1; 1701 uint32_t cpsr = cpsr_read(env); 1702 1703 cpsr &= ~CPSR_IT; 1704 if (thumb) { 1705 cpsr |= CPSR_T; 1706 } else { 1707 cpsr &= ~CPSR_T; 1708 } 1709 1710 if (ka->sa_flags & TARGET_SA_RESTORER) { 1711 retcode = ka->sa_restorer; 1712 } else { 1713 unsigned int idx = thumb; 1714 1715 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1716 idx += 2; 1717 } 1718 1719 __put_user(retcodes[idx], rc); 1720 1721 retcode = rc_addr + thumb; 1722 } 1723 1724 env->regs[0] = usig; 1725 env->regs[13] = frame_addr; 1726 env->regs[14] = retcode; 1727 env->regs[15] = handler & (thumb ? ~1 : ~3); 1728 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1729 } 1730 1731 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1732 { 1733 int i; 1734 struct target_vfp_sigframe *vfpframe; 1735 vfpframe = (struct target_vfp_sigframe *)regspace; 1736 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1737 __put_user(sizeof(*vfpframe), &vfpframe->size); 1738 for (i = 0; i < 32; i++) { 1739 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1740 } 1741 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1742 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1743 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1744 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1745 return (abi_ulong*)(vfpframe+1); 1746 } 1747 1748 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1749 CPUARMState *env) 1750 { 1751 int i; 1752 struct target_iwmmxt_sigframe *iwmmxtframe; 1753 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1754 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1755 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1756 for (i = 0; i < 16; i++) { 1757 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1758 } 1759 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1760 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1761 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1762 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1763 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1764 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1765 return (abi_ulong*)(iwmmxtframe+1); 1766 } 1767 1768 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1769 target_sigset_t *set, CPUARMState *env) 1770 { 1771 struct target_sigaltstack stack; 1772 int i; 1773 abi_ulong *regspace; 1774 1775 /* Clear all the bits of the ucontext we don't use. */ 1776 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1777 1778 memset(&stack, 0, sizeof(stack)); 1779 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1780 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1781 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1782 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1783 1784 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1785 /* Save coprocessor signal frame. */ 1786 regspace = uc->tuc_regspace; 1787 if (arm_feature(env, ARM_FEATURE_VFP)) { 1788 regspace = setup_sigframe_v2_vfp(regspace, env); 1789 } 1790 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1791 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1792 } 1793 1794 /* Write terminating magic word */ 1795 __put_user(0, regspace); 1796 1797 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1798 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1799 } 1800 } 1801 1802 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1803 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1804 target_sigset_t *set, CPUARMState *regs) 1805 { 1806 struct sigframe_v1 *frame; 1807 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1808 int i; 1809 1810 trace_user_setup_frame(regs, frame_addr); 1811 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1812 goto sigsegv; 1813 } 1814 1815 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1816 1817 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1818 __put_user(set->sig[i], &frame->extramask[i - 1]); 1819 } 1820 1821 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1822 frame_addr + offsetof(struct sigframe_v1, retcode)); 1823 1824 unlock_user_struct(frame, frame_addr, 1); 1825 return; 1826 sigsegv: 1827 force_sigsegv(usig); 1828 } 1829 1830 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1831 target_sigset_t *set, CPUARMState *regs) 1832 { 1833 struct sigframe_v2 *frame; 1834 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1835 1836 trace_user_setup_frame(regs, frame_addr); 1837 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1838 goto sigsegv; 1839 } 1840 1841 setup_sigframe_v2(&frame->uc, set, regs); 1842 1843 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1844 frame_addr + offsetof(struct sigframe_v2, retcode)); 1845 1846 unlock_user_struct(frame, frame_addr, 1); 1847 return; 1848 sigsegv: 1849 force_sigsegv(usig); 1850 } 1851 1852 static void setup_frame(int usig, struct target_sigaction *ka, 1853 target_sigset_t *set, CPUARMState *regs) 1854 { 1855 if (get_osversion() >= 0x020612) { 1856 setup_frame_v2(usig, ka, set, regs); 1857 } else { 1858 setup_frame_v1(usig, ka, set, regs); 1859 } 1860 } 1861 1862 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1863 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1864 target_siginfo_t *info, 1865 target_sigset_t *set, CPUARMState *env) 1866 { 1867 struct rt_sigframe_v1 *frame; 1868 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1869 struct target_sigaltstack stack; 1870 int i; 1871 abi_ulong info_addr, uc_addr; 1872 1873 trace_user_setup_rt_frame(env, frame_addr); 1874 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1875 goto sigsegv; 1876 } 1877 1878 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1879 __put_user(info_addr, &frame->pinfo); 1880 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1881 __put_user(uc_addr, &frame->puc); 1882 tswap_siginfo(&frame->info, info); 1883 1884 /* Clear all the bits of the ucontext we don't use. */ 1885 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1886 1887 memset(&stack, 0, sizeof(stack)); 1888 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1889 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1890 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1891 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1892 1893 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1894 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1895 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1896 } 1897 1898 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1899 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1900 1901 env->regs[1] = info_addr; 1902 env->regs[2] = uc_addr; 1903 1904 unlock_user_struct(frame, frame_addr, 1); 1905 return; 1906 sigsegv: 1907 force_sigsegv(usig); 1908 } 1909 1910 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1911 target_siginfo_t *info, 1912 target_sigset_t *set, CPUARMState *env) 1913 { 1914 struct rt_sigframe_v2 *frame; 1915 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1916 abi_ulong info_addr, uc_addr; 1917 1918 trace_user_setup_rt_frame(env, frame_addr); 1919 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1920 goto sigsegv; 1921 } 1922 1923 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1924 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1925 tswap_siginfo(&frame->info, info); 1926 1927 setup_sigframe_v2(&frame->uc, set, env); 1928 1929 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1930 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1931 1932 env->regs[1] = info_addr; 1933 env->regs[2] = uc_addr; 1934 1935 unlock_user_struct(frame, frame_addr, 1); 1936 return; 1937 sigsegv: 1938 force_sigsegv(usig); 1939 } 1940 1941 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1942 target_siginfo_t *info, 1943 target_sigset_t *set, CPUARMState *env) 1944 { 1945 if (get_osversion() >= 0x020612) { 1946 setup_rt_frame_v2(usig, ka, info, set, env); 1947 } else { 1948 setup_rt_frame_v1(usig, ka, info, set, env); 1949 } 1950 } 1951 1952 static int 1953 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1954 { 1955 int err = 0; 1956 uint32_t cpsr; 1957 1958 __get_user(env->regs[0], &sc->arm_r0); 1959 __get_user(env->regs[1], &sc->arm_r1); 1960 __get_user(env->regs[2], &sc->arm_r2); 1961 __get_user(env->regs[3], &sc->arm_r3); 1962 __get_user(env->regs[4], &sc->arm_r4); 1963 __get_user(env->regs[5], &sc->arm_r5); 1964 __get_user(env->regs[6], &sc->arm_r6); 1965 __get_user(env->regs[7], &sc->arm_r7); 1966 __get_user(env->regs[8], &sc->arm_r8); 1967 __get_user(env->regs[9], &sc->arm_r9); 1968 __get_user(env->regs[10], &sc->arm_r10); 1969 __get_user(env->regs[11], &sc->arm_fp); 1970 __get_user(env->regs[12], &sc->arm_ip); 1971 __get_user(env->regs[13], &sc->arm_sp); 1972 __get_user(env->regs[14], &sc->arm_lr); 1973 __get_user(env->regs[15], &sc->arm_pc); 1974 #ifdef TARGET_CONFIG_CPU_32 1975 __get_user(cpsr, &sc->arm_cpsr); 1976 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1977 #endif 1978 1979 err |= !valid_user_regs(env); 1980 1981 return err; 1982 } 1983 1984 static long do_sigreturn_v1(CPUARMState *env) 1985 { 1986 abi_ulong frame_addr; 1987 struct sigframe_v1 *frame = NULL; 1988 target_sigset_t set; 1989 sigset_t host_set; 1990 int i; 1991 1992 /* 1993 * Since we stacked the signal on a 64-bit boundary, 1994 * then 'sp' should be word aligned here. If it's 1995 * not, then the user is trying to mess with us. 1996 */ 1997 frame_addr = env->regs[13]; 1998 trace_user_do_sigreturn(env, frame_addr); 1999 if (frame_addr & 7) { 2000 goto badframe; 2001 } 2002 2003 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2004 goto badframe; 2005 } 2006 2007 __get_user(set.sig[0], &frame->sc.oldmask); 2008 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2009 __get_user(set.sig[i], &frame->extramask[i - 1]); 2010 } 2011 2012 target_to_host_sigset_internal(&host_set, &set); 2013 set_sigmask(&host_set); 2014 2015 if (restore_sigcontext(env, &frame->sc)) { 2016 goto badframe; 2017 } 2018 2019 #if 0 2020 /* Send SIGTRAP if we're single-stepping */ 2021 if (ptrace_cancel_bpt(current)) 2022 send_sig(SIGTRAP, current, 1); 2023 #endif 2024 unlock_user_struct(frame, frame_addr, 0); 2025 return -TARGET_QEMU_ESIGRETURN; 2026 2027 badframe: 2028 force_sig(TARGET_SIGSEGV); 2029 return -TARGET_QEMU_ESIGRETURN; 2030 } 2031 2032 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2033 { 2034 int i; 2035 abi_ulong magic, sz; 2036 uint32_t fpscr, fpexc; 2037 struct target_vfp_sigframe *vfpframe; 2038 vfpframe = (struct target_vfp_sigframe *)regspace; 2039 2040 __get_user(magic, &vfpframe->magic); 2041 __get_user(sz, &vfpframe->size); 2042 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2043 return 0; 2044 } 2045 for (i = 0; i < 32; i++) { 2046 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2047 } 2048 __get_user(fpscr, &vfpframe->ufp.fpscr); 2049 vfp_set_fpscr(env, fpscr); 2050 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2051 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2052 * and the exception flag is cleared 2053 */ 2054 fpexc |= (1 << 30); 2055 fpexc &= ~((1 << 31) | (1 << 28)); 2056 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2057 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2058 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2059 return (abi_ulong*)(vfpframe + 1); 2060 } 2061 2062 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2063 abi_ulong *regspace) 2064 { 2065 int i; 2066 abi_ulong magic, sz; 2067 struct target_iwmmxt_sigframe *iwmmxtframe; 2068 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2069 2070 __get_user(magic, &iwmmxtframe->magic); 2071 __get_user(sz, &iwmmxtframe->size); 2072 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2073 return 0; 2074 } 2075 for (i = 0; i < 16; i++) { 2076 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2077 } 2078 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2079 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2080 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2081 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2082 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2083 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2084 return (abi_ulong*)(iwmmxtframe + 1); 2085 } 2086 2087 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 2088 struct target_ucontext_v2 *uc) 2089 { 2090 sigset_t host_set; 2091 abi_ulong *regspace; 2092 2093 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2094 set_sigmask(&host_set); 2095 2096 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2097 return 1; 2098 2099 /* Restore coprocessor signal frame */ 2100 regspace = uc->tuc_regspace; 2101 if (arm_feature(env, ARM_FEATURE_VFP)) { 2102 regspace = restore_sigframe_v2_vfp(env, regspace); 2103 if (!regspace) { 2104 return 1; 2105 } 2106 } 2107 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2108 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2109 if (!regspace) { 2110 return 1; 2111 } 2112 } 2113 2114 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2115 return 1; 2116 2117 #if 0 2118 /* Send SIGTRAP if we're single-stepping */ 2119 if (ptrace_cancel_bpt(current)) 2120 send_sig(SIGTRAP, current, 1); 2121 #endif 2122 2123 return 0; 2124 } 2125 2126 static long do_sigreturn_v2(CPUARMState *env) 2127 { 2128 abi_ulong frame_addr; 2129 struct sigframe_v2 *frame = NULL; 2130 2131 /* 2132 * Since we stacked the signal on a 64-bit boundary, 2133 * then 'sp' should be word aligned here. If it's 2134 * not, then the user is trying to mess with us. 2135 */ 2136 frame_addr = env->regs[13]; 2137 trace_user_do_sigreturn(env, frame_addr); 2138 if (frame_addr & 7) { 2139 goto badframe; 2140 } 2141 2142 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2143 goto badframe; 2144 } 2145 2146 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2147 goto badframe; 2148 } 2149 2150 unlock_user_struct(frame, frame_addr, 0); 2151 return -TARGET_QEMU_ESIGRETURN; 2152 2153 badframe: 2154 unlock_user_struct(frame, frame_addr, 0); 2155 force_sig(TARGET_SIGSEGV); 2156 return -TARGET_QEMU_ESIGRETURN; 2157 } 2158 2159 long do_sigreturn(CPUARMState *env) 2160 { 2161 if (get_osversion() >= 0x020612) { 2162 return do_sigreturn_v2(env); 2163 } else { 2164 return do_sigreturn_v1(env); 2165 } 2166 } 2167 2168 static long do_rt_sigreturn_v1(CPUARMState *env) 2169 { 2170 abi_ulong frame_addr; 2171 struct rt_sigframe_v1 *frame = NULL; 2172 sigset_t host_set; 2173 2174 /* 2175 * Since we stacked the signal on a 64-bit boundary, 2176 * then 'sp' should be word aligned here. If it's 2177 * not, then the user is trying to mess with us. 2178 */ 2179 frame_addr = env->regs[13]; 2180 trace_user_do_rt_sigreturn(env, frame_addr); 2181 if (frame_addr & 7) { 2182 goto badframe; 2183 } 2184 2185 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2186 goto badframe; 2187 } 2188 2189 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2190 set_sigmask(&host_set); 2191 2192 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2193 goto badframe; 2194 } 2195 2196 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2197 goto badframe; 2198 2199 #if 0 2200 /* Send SIGTRAP if we're single-stepping */ 2201 if (ptrace_cancel_bpt(current)) 2202 send_sig(SIGTRAP, current, 1); 2203 #endif 2204 unlock_user_struct(frame, frame_addr, 0); 2205 return -TARGET_QEMU_ESIGRETURN; 2206 2207 badframe: 2208 unlock_user_struct(frame, frame_addr, 0); 2209 force_sig(TARGET_SIGSEGV); 2210 return -TARGET_QEMU_ESIGRETURN; 2211 } 2212 2213 static long do_rt_sigreturn_v2(CPUARMState *env) 2214 { 2215 abi_ulong frame_addr; 2216 struct rt_sigframe_v2 *frame = NULL; 2217 2218 /* 2219 * Since we stacked the signal on a 64-bit boundary, 2220 * then 'sp' should be word aligned here. If it's 2221 * not, then the user is trying to mess with us. 2222 */ 2223 frame_addr = env->regs[13]; 2224 trace_user_do_rt_sigreturn(env, frame_addr); 2225 if (frame_addr & 7) { 2226 goto badframe; 2227 } 2228 2229 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2230 goto badframe; 2231 } 2232 2233 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2234 goto badframe; 2235 } 2236 2237 unlock_user_struct(frame, frame_addr, 0); 2238 return -TARGET_QEMU_ESIGRETURN; 2239 2240 badframe: 2241 unlock_user_struct(frame, frame_addr, 0); 2242 force_sig(TARGET_SIGSEGV); 2243 return -TARGET_QEMU_ESIGRETURN; 2244 } 2245 2246 long do_rt_sigreturn(CPUARMState *env) 2247 { 2248 if (get_osversion() >= 0x020612) { 2249 return do_rt_sigreturn_v2(env); 2250 } else { 2251 return do_rt_sigreturn_v1(env); 2252 } 2253 } 2254 2255 #elif defined(TARGET_SPARC) 2256 2257 #define __SUNOS_MAXWIN 31 2258 2259 /* This is what SunOS does, so shall I. */ 2260 struct target_sigcontext { 2261 abi_ulong sigc_onstack; /* state to restore */ 2262 2263 abi_ulong sigc_mask; /* sigmask to restore */ 2264 abi_ulong sigc_sp; /* stack pointer */ 2265 abi_ulong sigc_pc; /* program counter */ 2266 abi_ulong sigc_npc; /* next program counter */ 2267 abi_ulong sigc_psr; /* for condition codes etc */ 2268 abi_ulong sigc_g1; /* User uses these two registers */ 2269 abi_ulong sigc_o0; /* within the trampoline code. */ 2270 2271 /* Now comes information regarding the users window set 2272 * at the time of the signal. 2273 */ 2274 abi_ulong sigc_oswins; /* outstanding windows */ 2275 2276 /* stack ptrs for each regwin buf */ 2277 char *sigc_spbuf[__SUNOS_MAXWIN]; 2278 2279 /* Windows to restore after signal */ 2280 struct { 2281 abi_ulong locals[8]; 2282 abi_ulong ins[8]; 2283 } sigc_wbuf[__SUNOS_MAXWIN]; 2284 }; 2285 /* A Sparc stack frame */ 2286 struct sparc_stackf { 2287 abi_ulong locals[8]; 2288 abi_ulong ins[8]; 2289 /* It's simpler to treat fp and callers_pc as elements of ins[] 2290 * since we never need to access them ourselves. 2291 */ 2292 char *structptr; 2293 abi_ulong xargs[6]; 2294 abi_ulong xxargs[1]; 2295 }; 2296 2297 typedef struct { 2298 struct { 2299 abi_ulong psr; 2300 abi_ulong pc; 2301 abi_ulong npc; 2302 abi_ulong y; 2303 abi_ulong u_regs[16]; /* globals and ins */ 2304 } si_regs; 2305 int si_mask; 2306 } __siginfo_t; 2307 2308 typedef struct { 2309 abi_ulong si_float_regs[32]; 2310 unsigned long si_fsr; 2311 unsigned long si_fpqdepth; 2312 struct { 2313 unsigned long *insn_addr; 2314 unsigned long insn; 2315 } si_fpqueue [16]; 2316 } qemu_siginfo_fpu_t; 2317 2318 2319 struct target_signal_frame { 2320 struct sparc_stackf ss; 2321 __siginfo_t info; 2322 abi_ulong fpu_save; 2323 abi_ulong insns[2] __attribute__ ((aligned (8))); 2324 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2325 abi_ulong extra_size; /* Should be 0 */ 2326 qemu_siginfo_fpu_t fpu_state; 2327 }; 2328 struct target_rt_signal_frame { 2329 struct sparc_stackf ss; 2330 siginfo_t info; 2331 abi_ulong regs[20]; 2332 sigset_t mask; 2333 abi_ulong fpu_save; 2334 unsigned int insns[2]; 2335 stack_t stack; 2336 unsigned int extra_size; /* Should be 0 */ 2337 qemu_siginfo_fpu_t fpu_state; 2338 }; 2339 2340 #define UREG_O0 16 2341 #define UREG_O6 22 2342 #define UREG_I0 0 2343 #define UREG_I1 1 2344 #define UREG_I2 2 2345 #define UREG_I3 3 2346 #define UREG_I4 4 2347 #define UREG_I5 5 2348 #define UREG_I6 6 2349 #define UREG_I7 7 2350 #define UREG_L0 8 2351 #define UREG_FP UREG_I6 2352 #define UREG_SP UREG_O6 2353 2354 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2355 CPUSPARCState *env, 2356 unsigned long framesize) 2357 { 2358 abi_ulong sp; 2359 2360 sp = env->regwptr[UREG_FP]; 2361 2362 /* This is the X/Open sanctioned signal stack switching. */ 2363 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2364 if (!on_sig_stack(sp) 2365 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2366 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2367 } 2368 } 2369 return sp - framesize; 2370 } 2371 2372 static int 2373 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2374 { 2375 int err = 0, i; 2376 2377 __put_user(env->psr, &si->si_regs.psr); 2378 __put_user(env->pc, &si->si_regs.pc); 2379 __put_user(env->npc, &si->si_regs.npc); 2380 __put_user(env->y, &si->si_regs.y); 2381 for (i=0; i < 8; i++) { 2382 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2383 } 2384 for (i=0; i < 8; i++) { 2385 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2386 } 2387 __put_user(mask, &si->si_mask); 2388 return err; 2389 } 2390 2391 #if 0 2392 static int 2393 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2394 CPUSPARCState *env, unsigned long mask) 2395 { 2396 int err = 0; 2397 2398 __put_user(mask, &sc->sigc_mask); 2399 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2400 __put_user(env->pc, &sc->sigc_pc); 2401 __put_user(env->npc, &sc->sigc_npc); 2402 __put_user(env->psr, &sc->sigc_psr); 2403 __put_user(env->gregs[1], &sc->sigc_g1); 2404 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2405 2406 return err; 2407 } 2408 #endif 2409 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2410 2411 static void setup_frame(int sig, struct target_sigaction *ka, 2412 target_sigset_t *set, CPUSPARCState *env) 2413 { 2414 abi_ulong sf_addr; 2415 struct target_signal_frame *sf; 2416 int sigframe_size, err, i; 2417 2418 /* 1. Make sure everything is clean */ 2419 //synchronize_user_stack(); 2420 2421 sigframe_size = NF_ALIGNEDSZ; 2422 sf_addr = get_sigframe(ka, env, sigframe_size); 2423 trace_user_setup_frame(env, sf_addr); 2424 2425 sf = lock_user(VERIFY_WRITE, sf_addr, 2426 sizeof(struct target_signal_frame), 0); 2427 if (!sf) { 2428 goto sigsegv; 2429 } 2430 #if 0 2431 if (invalid_frame_pointer(sf, sigframe_size)) 2432 goto sigill_and_return; 2433 #endif 2434 /* 2. Save the current process state */ 2435 err = setup___siginfo(&sf->info, env, set->sig[0]); 2436 __put_user(0, &sf->extra_size); 2437 2438 //save_fpu_state(regs, &sf->fpu_state); 2439 //__put_user(&sf->fpu_state, &sf->fpu_save); 2440 2441 __put_user(set->sig[0], &sf->info.si_mask); 2442 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2443 __put_user(set->sig[i + 1], &sf->extramask[i]); 2444 } 2445 2446 for (i = 0; i < 8; i++) { 2447 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2448 } 2449 for (i = 0; i < 8; i++) { 2450 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2451 } 2452 if (err) 2453 goto sigsegv; 2454 2455 /* 3. signal handler back-trampoline and parameters */ 2456 env->regwptr[UREG_FP] = sf_addr; 2457 env->regwptr[UREG_I0] = sig; 2458 env->regwptr[UREG_I1] = sf_addr + 2459 offsetof(struct target_signal_frame, info); 2460 env->regwptr[UREG_I2] = sf_addr + 2461 offsetof(struct target_signal_frame, info); 2462 2463 /* 4. signal handler */ 2464 env->pc = ka->_sa_handler; 2465 env->npc = (env->pc + 4); 2466 /* 5. return to kernel instructions */ 2467 if (ka->sa_restorer) { 2468 env->regwptr[UREG_I7] = ka->sa_restorer; 2469 } else { 2470 uint32_t val32; 2471 2472 env->regwptr[UREG_I7] = sf_addr + 2473 offsetof(struct target_signal_frame, insns) - 2 * 4; 2474 2475 /* mov __NR_sigreturn, %g1 */ 2476 val32 = 0x821020d8; 2477 __put_user(val32, &sf->insns[0]); 2478 2479 /* t 0x10 */ 2480 val32 = 0x91d02010; 2481 __put_user(val32, &sf->insns[1]); 2482 if (err) 2483 goto sigsegv; 2484 2485 /* Flush instruction space. */ 2486 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2487 // tb_flush(env); 2488 } 2489 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2490 return; 2491 #if 0 2492 sigill_and_return: 2493 force_sig(TARGET_SIGILL); 2494 #endif 2495 sigsegv: 2496 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2497 force_sigsegv(sig); 2498 } 2499 2500 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2501 target_siginfo_t *info, 2502 target_sigset_t *set, CPUSPARCState *env) 2503 { 2504 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2505 } 2506 2507 long do_sigreturn(CPUSPARCState *env) 2508 { 2509 abi_ulong sf_addr; 2510 struct target_signal_frame *sf; 2511 uint32_t up_psr, pc, npc; 2512 target_sigset_t set; 2513 sigset_t host_set; 2514 int err=0, i; 2515 2516 sf_addr = env->regwptr[UREG_FP]; 2517 trace_user_do_sigreturn(env, sf_addr); 2518 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2519 goto segv_and_exit; 2520 } 2521 2522 /* 1. Make sure we are not getting garbage from the user */ 2523 2524 if (sf_addr & 3) 2525 goto segv_and_exit; 2526 2527 __get_user(pc, &sf->info.si_regs.pc); 2528 __get_user(npc, &sf->info.si_regs.npc); 2529 2530 if ((pc | npc) & 3) { 2531 goto segv_and_exit; 2532 } 2533 2534 /* 2. Restore the state */ 2535 __get_user(up_psr, &sf->info.si_regs.psr); 2536 2537 /* User can only change condition codes and FPU enabling in %psr. */ 2538 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2539 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2540 2541 env->pc = pc; 2542 env->npc = npc; 2543 __get_user(env->y, &sf->info.si_regs.y); 2544 for (i=0; i < 8; i++) { 2545 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2546 } 2547 for (i=0; i < 8; i++) { 2548 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2549 } 2550 2551 /* FIXME: implement FPU save/restore: 2552 * __get_user(fpu_save, &sf->fpu_save); 2553 * if (fpu_save) 2554 * err |= restore_fpu_state(env, fpu_save); 2555 */ 2556 2557 /* This is pretty much atomic, no amount locking would prevent 2558 * the races which exist anyways. 2559 */ 2560 __get_user(set.sig[0], &sf->info.si_mask); 2561 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2562 __get_user(set.sig[i], &sf->extramask[i - 1]); 2563 } 2564 2565 target_to_host_sigset_internal(&host_set, &set); 2566 set_sigmask(&host_set); 2567 2568 if (err) { 2569 goto segv_and_exit; 2570 } 2571 unlock_user_struct(sf, sf_addr, 0); 2572 return -TARGET_QEMU_ESIGRETURN; 2573 2574 segv_and_exit: 2575 unlock_user_struct(sf, sf_addr, 0); 2576 force_sig(TARGET_SIGSEGV); 2577 return -TARGET_QEMU_ESIGRETURN; 2578 } 2579 2580 long do_rt_sigreturn(CPUSPARCState *env) 2581 { 2582 trace_user_do_rt_sigreturn(env, 0); 2583 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2584 return -TARGET_ENOSYS; 2585 } 2586 2587 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2588 #define MC_TSTATE 0 2589 #define MC_PC 1 2590 #define MC_NPC 2 2591 #define MC_Y 3 2592 #define MC_G1 4 2593 #define MC_G2 5 2594 #define MC_G3 6 2595 #define MC_G4 7 2596 #define MC_G5 8 2597 #define MC_G6 9 2598 #define MC_G7 10 2599 #define MC_O0 11 2600 #define MC_O1 12 2601 #define MC_O2 13 2602 #define MC_O3 14 2603 #define MC_O4 15 2604 #define MC_O5 16 2605 #define MC_O6 17 2606 #define MC_O7 18 2607 #define MC_NGREG 19 2608 2609 typedef abi_ulong target_mc_greg_t; 2610 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2611 2612 struct target_mc_fq { 2613 abi_ulong *mcfq_addr; 2614 uint32_t mcfq_insn; 2615 }; 2616 2617 struct target_mc_fpu { 2618 union { 2619 uint32_t sregs[32]; 2620 uint64_t dregs[32]; 2621 //uint128_t qregs[16]; 2622 } mcfpu_fregs; 2623 abi_ulong mcfpu_fsr; 2624 abi_ulong mcfpu_fprs; 2625 abi_ulong mcfpu_gsr; 2626 struct target_mc_fq *mcfpu_fq; 2627 unsigned char mcfpu_qcnt; 2628 unsigned char mcfpu_qentsz; 2629 unsigned char mcfpu_enab; 2630 }; 2631 typedef struct target_mc_fpu target_mc_fpu_t; 2632 2633 typedef struct { 2634 target_mc_gregset_t mc_gregs; 2635 target_mc_greg_t mc_fp; 2636 target_mc_greg_t mc_i7; 2637 target_mc_fpu_t mc_fpregs; 2638 } target_mcontext_t; 2639 2640 struct target_ucontext { 2641 struct target_ucontext *tuc_link; 2642 abi_ulong tuc_flags; 2643 target_sigset_t tuc_sigmask; 2644 target_mcontext_t tuc_mcontext; 2645 }; 2646 2647 /* A V9 register window */ 2648 struct target_reg_window { 2649 abi_ulong locals[8]; 2650 abi_ulong ins[8]; 2651 }; 2652 2653 #define TARGET_STACK_BIAS 2047 2654 2655 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2656 void sparc64_set_context(CPUSPARCState *env) 2657 { 2658 abi_ulong ucp_addr; 2659 struct target_ucontext *ucp; 2660 target_mc_gregset_t *grp; 2661 abi_ulong pc, npc, tstate; 2662 abi_ulong fp, i7, w_addr; 2663 unsigned int i; 2664 2665 ucp_addr = env->regwptr[UREG_I0]; 2666 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2667 goto do_sigsegv; 2668 } 2669 grp = &ucp->tuc_mcontext.mc_gregs; 2670 __get_user(pc, &((*grp)[MC_PC])); 2671 __get_user(npc, &((*grp)[MC_NPC])); 2672 if ((pc | npc) & 3) { 2673 goto do_sigsegv; 2674 } 2675 if (env->regwptr[UREG_I1]) { 2676 target_sigset_t target_set; 2677 sigset_t set; 2678 2679 if (TARGET_NSIG_WORDS == 1) { 2680 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2681 } else { 2682 abi_ulong *src, *dst; 2683 src = ucp->tuc_sigmask.sig; 2684 dst = target_set.sig; 2685 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2686 __get_user(*dst, src); 2687 } 2688 } 2689 target_to_host_sigset_internal(&set, &target_set); 2690 set_sigmask(&set); 2691 } 2692 env->pc = pc; 2693 env->npc = npc; 2694 __get_user(env->y, &((*grp)[MC_Y])); 2695 __get_user(tstate, &((*grp)[MC_TSTATE])); 2696 env->asi = (tstate >> 24) & 0xff; 2697 cpu_put_ccr(env, tstate >> 32); 2698 cpu_put_cwp64(env, tstate & 0x1f); 2699 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2700 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2701 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2702 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2703 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2704 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2705 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2706 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2707 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2708 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2709 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2710 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2711 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2712 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2713 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2714 2715 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2716 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2717 2718 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2719 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2720 abi_ulong) != 0) { 2721 goto do_sigsegv; 2722 } 2723 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2724 abi_ulong) != 0) { 2725 goto do_sigsegv; 2726 } 2727 /* FIXME this does not match how the kernel handles the FPU in 2728 * its sparc64_set_context implementation. In particular the FPU 2729 * is only restored if fenab is non-zero in: 2730 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2731 */ 2732 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2733 { 2734 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2735 for (i = 0; i < 64; i++, src++) { 2736 if (i & 1) { 2737 __get_user(env->fpr[i/2].l.lower, src); 2738 } else { 2739 __get_user(env->fpr[i/2].l.upper, src); 2740 } 2741 } 2742 } 2743 __get_user(env->fsr, 2744 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2745 __get_user(env->gsr, 2746 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2747 unlock_user_struct(ucp, ucp_addr, 0); 2748 return; 2749 do_sigsegv: 2750 unlock_user_struct(ucp, ucp_addr, 0); 2751 force_sig(TARGET_SIGSEGV); 2752 } 2753 2754 void sparc64_get_context(CPUSPARCState *env) 2755 { 2756 abi_ulong ucp_addr; 2757 struct target_ucontext *ucp; 2758 target_mc_gregset_t *grp; 2759 target_mcontext_t *mcp; 2760 abi_ulong fp, i7, w_addr; 2761 int err; 2762 unsigned int i; 2763 target_sigset_t target_set; 2764 sigset_t set; 2765 2766 ucp_addr = env->regwptr[UREG_I0]; 2767 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2768 goto do_sigsegv; 2769 } 2770 2771 mcp = &ucp->tuc_mcontext; 2772 grp = &mcp->mc_gregs; 2773 2774 /* Skip over the trap instruction, first. */ 2775 env->pc = env->npc; 2776 env->npc += 4; 2777 2778 /* If we're only reading the signal mask then do_sigprocmask() 2779 * is guaranteed not to fail, which is important because we don't 2780 * have any way to signal a failure or restart this operation since 2781 * this is not a normal syscall. 2782 */ 2783 err = do_sigprocmask(0, NULL, &set); 2784 assert(err == 0); 2785 host_to_target_sigset_internal(&target_set, &set); 2786 if (TARGET_NSIG_WORDS == 1) { 2787 __put_user(target_set.sig[0], 2788 (abi_ulong *)&ucp->tuc_sigmask); 2789 } else { 2790 abi_ulong *src, *dst; 2791 src = target_set.sig; 2792 dst = ucp->tuc_sigmask.sig; 2793 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2794 __put_user(*src, dst); 2795 } 2796 if (err) 2797 goto do_sigsegv; 2798 } 2799 2800 /* XXX: tstate must be saved properly */ 2801 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2802 __put_user(env->pc, &((*grp)[MC_PC])); 2803 __put_user(env->npc, &((*grp)[MC_NPC])); 2804 __put_user(env->y, &((*grp)[MC_Y])); 2805 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2806 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2807 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2808 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2809 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2810 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2811 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2812 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2813 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2814 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2815 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2816 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2817 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2818 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2819 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2820 2821 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2822 fp = i7 = 0; 2823 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2824 abi_ulong) != 0) { 2825 goto do_sigsegv; 2826 } 2827 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2828 abi_ulong) != 0) { 2829 goto do_sigsegv; 2830 } 2831 __put_user(fp, &(mcp->mc_fp)); 2832 __put_user(i7, &(mcp->mc_i7)); 2833 2834 { 2835 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2836 for (i = 0; i < 64; i++, dst++) { 2837 if (i & 1) { 2838 __put_user(env->fpr[i/2].l.lower, dst); 2839 } else { 2840 __put_user(env->fpr[i/2].l.upper, dst); 2841 } 2842 } 2843 } 2844 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2845 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2846 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2847 2848 if (err) 2849 goto do_sigsegv; 2850 unlock_user_struct(ucp, ucp_addr, 1); 2851 return; 2852 do_sigsegv: 2853 unlock_user_struct(ucp, ucp_addr, 1); 2854 force_sig(TARGET_SIGSEGV); 2855 } 2856 #endif 2857 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2858 2859 # if defined(TARGET_ABI_MIPSO32) 2860 struct target_sigcontext { 2861 uint32_t sc_regmask; /* Unused */ 2862 uint32_t sc_status; 2863 uint64_t sc_pc; 2864 uint64_t sc_regs[32]; 2865 uint64_t sc_fpregs[32]; 2866 uint32_t sc_ownedfp; /* Unused */ 2867 uint32_t sc_fpc_csr; 2868 uint32_t sc_fpc_eir; /* Unused */ 2869 uint32_t sc_used_math; 2870 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2871 uint32_t pad0; 2872 uint64_t sc_mdhi; 2873 uint64_t sc_mdlo; 2874 target_ulong sc_hi1; /* Was sc_cause */ 2875 target_ulong sc_lo1; /* Was sc_badvaddr */ 2876 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2877 target_ulong sc_lo2; 2878 target_ulong sc_hi3; 2879 target_ulong sc_lo3; 2880 }; 2881 # else /* N32 || N64 */ 2882 struct target_sigcontext { 2883 uint64_t sc_regs[32]; 2884 uint64_t sc_fpregs[32]; 2885 uint64_t sc_mdhi; 2886 uint64_t sc_hi1; 2887 uint64_t sc_hi2; 2888 uint64_t sc_hi3; 2889 uint64_t sc_mdlo; 2890 uint64_t sc_lo1; 2891 uint64_t sc_lo2; 2892 uint64_t sc_lo3; 2893 uint64_t sc_pc; 2894 uint32_t sc_fpc_csr; 2895 uint32_t sc_used_math; 2896 uint32_t sc_dsp; 2897 uint32_t sc_reserved; 2898 }; 2899 # endif /* O32 */ 2900 2901 struct sigframe { 2902 uint32_t sf_ass[4]; /* argument save space for o32 */ 2903 uint32_t sf_code[2]; /* signal trampoline */ 2904 struct target_sigcontext sf_sc; 2905 target_sigset_t sf_mask; 2906 }; 2907 2908 struct target_ucontext { 2909 target_ulong tuc_flags; 2910 target_ulong tuc_link; 2911 target_stack_t tuc_stack; 2912 target_ulong pad0; 2913 struct target_sigcontext tuc_mcontext; 2914 target_sigset_t tuc_sigmask; 2915 }; 2916 2917 struct target_rt_sigframe { 2918 uint32_t rs_ass[4]; /* argument save space for o32 */ 2919 uint32_t rs_code[2]; /* signal trampoline */ 2920 struct target_siginfo rs_info; 2921 struct target_ucontext rs_uc; 2922 }; 2923 2924 /* Install trampoline to jump back from signal handler */ 2925 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2926 { 2927 int err = 0; 2928 2929 /* 2930 * Set up the return code ... 2931 * 2932 * li v0, __NR__foo_sigreturn 2933 * syscall 2934 */ 2935 2936 __put_user(0x24020000 + syscall, tramp + 0); 2937 __put_user(0x0000000c , tramp + 1); 2938 return err; 2939 } 2940 2941 static inline void setup_sigcontext(CPUMIPSState *regs, 2942 struct target_sigcontext *sc) 2943 { 2944 int i; 2945 2946 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2947 regs->hflags &= ~MIPS_HFLAG_BMASK; 2948 2949 __put_user(0, &sc->sc_regs[0]); 2950 for (i = 1; i < 32; ++i) { 2951 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2952 } 2953 2954 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2955 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2956 2957 /* Rather than checking for dsp existence, always copy. The storage 2958 would just be garbage otherwise. */ 2959 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2960 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2961 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2962 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2963 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2964 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2965 { 2966 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2967 __put_user(dsp, &sc->sc_dsp); 2968 } 2969 2970 __put_user(1, &sc->sc_used_math); 2971 2972 for (i = 0; i < 32; ++i) { 2973 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2974 } 2975 } 2976 2977 static inline void 2978 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2979 { 2980 int i; 2981 2982 __get_user(regs->CP0_EPC, &sc->sc_pc); 2983 2984 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2985 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2986 2987 for (i = 1; i < 32; ++i) { 2988 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2989 } 2990 2991 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2992 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2993 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2994 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2995 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2996 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2997 { 2998 uint32_t dsp; 2999 __get_user(dsp, &sc->sc_dsp); 3000 cpu_wrdsp(dsp, 0x3ff, regs); 3001 } 3002 3003 for (i = 0; i < 32; ++i) { 3004 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3005 } 3006 } 3007 3008 /* 3009 * Determine which stack to use.. 3010 */ 3011 static inline abi_ulong 3012 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 3013 { 3014 unsigned long sp; 3015 3016 /* Default to using normal stack */ 3017 sp = regs->active_tc.gpr[29]; 3018 3019 /* 3020 * FPU emulator may have its own trampoline active just 3021 * above the user stack, 16-bytes before the next lowest 3022 * 16 byte boundary. Try to avoid trashing it. 3023 */ 3024 sp -= 32; 3025 3026 /* This is the X/Open sanctioned signal stack switching. */ 3027 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3028 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3029 } 3030 3031 return (sp - frame_size) & ~7; 3032 } 3033 3034 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3035 { 3036 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3037 env->hflags &= ~MIPS_HFLAG_M16; 3038 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3039 env->active_tc.PC &= ~(target_ulong) 1; 3040 } 3041 } 3042 3043 # if defined(TARGET_ABI_MIPSO32) 3044 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3045 static void setup_frame(int sig, struct target_sigaction * ka, 3046 target_sigset_t *set, CPUMIPSState *regs) 3047 { 3048 struct sigframe *frame; 3049 abi_ulong frame_addr; 3050 int i; 3051 3052 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3053 trace_user_setup_frame(regs, frame_addr); 3054 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3055 goto give_sigsegv; 3056 } 3057 3058 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3059 3060 setup_sigcontext(regs, &frame->sf_sc); 3061 3062 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3063 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3064 } 3065 3066 /* 3067 * Arguments to signal handler: 3068 * 3069 * a0 = signal number 3070 * a1 = 0 (should be cause) 3071 * a2 = pointer to struct sigcontext 3072 * 3073 * $25 and PC point to the signal handler, $29 points to the 3074 * struct sigframe. 3075 */ 3076 regs->active_tc.gpr[ 4] = sig; 3077 regs->active_tc.gpr[ 5] = 0; 3078 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3079 regs->active_tc.gpr[29] = frame_addr; 3080 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3081 /* The original kernel code sets CP0_EPC to the handler 3082 * since it returns to userland using eret 3083 * we cannot do this here, and we must set PC directly */ 3084 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3085 mips_set_hflags_isa_mode_from_pc(regs); 3086 unlock_user_struct(frame, frame_addr, 1); 3087 return; 3088 3089 give_sigsegv: 3090 force_sigsegv(sig); 3091 } 3092 3093 long do_sigreturn(CPUMIPSState *regs) 3094 { 3095 struct sigframe *frame; 3096 abi_ulong frame_addr; 3097 sigset_t blocked; 3098 target_sigset_t target_set; 3099 int i; 3100 3101 frame_addr = regs->active_tc.gpr[29]; 3102 trace_user_do_sigreturn(regs, frame_addr); 3103 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3104 goto badframe; 3105 3106 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3107 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3108 } 3109 3110 target_to_host_sigset_internal(&blocked, &target_set); 3111 set_sigmask(&blocked); 3112 3113 restore_sigcontext(regs, &frame->sf_sc); 3114 3115 #if 0 3116 /* 3117 * Don't let your children do this ... 3118 */ 3119 __asm__ __volatile__( 3120 "move\t$29, %0\n\t" 3121 "j\tsyscall_exit" 3122 :/* no outputs */ 3123 :"r" (®s)); 3124 /* Unreached */ 3125 #endif 3126 3127 regs->active_tc.PC = regs->CP0_EPC; 3128 mips_set_hflags_isa_mode_from_pc(regs); 3129 /* I am not sure this is right, but it seems to work 3130 * maybe a problem with nested signals ? */ 3131 regs->CP0_EPC = 0; 3132 return -TARGET_QEMU_ESIGRETURN; 3133 3134 badframe: 3135 force_sig(TARGET_SIGSEGV); 3136 return -TARGET_QEMU_ESIGRETURN; 3137 } 3138 # endif /* O32 */ 3139 3140 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3141 target_siginfo_t *info, 3142 target_sigset_t *set, CPUMIPSState *env) 3143 { 3144 struct target_rt_sigframe *frame; 3145 abi_ulong frame_addr; 3146 int i; 3147 3148 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3149 trace_user_setup_rt_frame(env, frame_addr); 3150 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3151 goto give_sigsegv; 3152 } 3153 3154 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3155 3156 tswap_siginfo(&frame->rs_info, info); 3157 3158 __put_user(0, &frame->rs_uc.tuc_flags); 3159 __put_user(0, &frame->rs_uc.tuc_link); 3160 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3161 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3162 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3163 &frame->rs_uc.tuc_stack.ss_flags); 3164 3165 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3166 3167 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3168 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3169 } 3170 3171 /* 3172 * Arguments to signal handler: 3173 * 3174 * a0 = signal number 3175 * a1 = pointer to siginfo_t 3176 * a2 = pointer to struct ucontext 3177 * 3178 * $25 and PC point to the signal handler, $29 points to the 3179 * struct sigframe. 3180 */ 3181 env->active_tc.gpr[ 4] = sig; 3182 env->active_tc.gpr[ 5] = frame_addr 3183 + offsetof(struct target_rt_sigframe, rs_info); 3184 env->active_tc.gpr[ 6] = frame_addr 3185 + offsetof(struct target_rt_sigframe, rs_uc); 3186 env->active_tc.gpr[29] = frame_addr; 3187 env->active_tc.gpr[31] = frame_addr 3188 + offsetof(struct target_rt_sigframe, rs_code); 3189 /* The original kernel code sets CP0_EPC to the handler 3190 * since it returns to userland using eret 3191 * we cannot do this here, and we must set PC directly */ 3192 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3193 mips_set_hflags_isa_mode_from_pc(env); 3194 unlock_user_struct(frame, frame_addr, 1); 3195 return; 3196 3197 give_sigsegv: 3198 unlock_user_struct(frame, frame_addr, 1); 3199 force_sigsegv(sig); 3200 } 3201 3202 long do_rt_sigreturn(CPUMIPSState *env) 3203 { 3204 struct target_rt_sigframe *frame; 3205 abi_ulong frame_addr; 3206 sigset_t blocked; 3207 3208 frame_addr = env->active_tc.gpr[29]; 3209 trace_user_do_rt_sigreturn(env, frame_addr); 3210 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3211 goto badframe; 3212 } 3213 3214 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3215 set_sigmask(&blocked); 3216 3217 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3218 3219 if (do_sigaltstack(frame_addr + 3220 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3221 0, get_sp_from_cpustate(env)) == -EFAULT) 3222 goto badframe; 3223 3224 env->active_tc.PC = env->CP0_EPC; 3225 mips_set_hflags_isa_mode_from_pc(env); 3226 /* I am not sure this is right, but it seems to work 3227 * maybe a problem with nested signals ? */ 3228 env->CP0_EPC = 0; 3229 return -TARGET_QEMU_ESIGRETURN; 3230 3231 badframe: 3232 force_sig(TARGET_SIGSEGV); 3233 return -TARGET_QEMU_ESIGRETURN; 3234 } 3235 3236 #elif defined(TARGET_SH4) 3237 3238 /* 3239 * code and data structures from linux kernel: 3240 * include/asm-sh/sigcontext.h 3241 * arch/sh/kernel/signal.c 3242 */ 3243 3244 struct target_sigcontext { 3245 target_ulong oldmask; 3246 3247 /* CPU registers */ 3248 target_ulong sc_gregs[16]; 3249 target_ulong sc_pc; 3250 target_ulong sc_pr; 3251 target_ulong sc_sr; 3252 target_ulong sc_gbr; 3253 target_ulong sc_mach; 3254 target_ulong sc_macl; 3255 3256 /* FPU registers */ 3257 target_ulong sc_fpregs[16]; 3258 target_ulong sc_xfpregs[16]; 3259 unsigned int sc_fpscr; 3260 unsigned int sc_fpul; 3261 unsigned int sc_ownedfp; 3262 }; 3263 3264 struct target_sigframe 3265 { 3266 struct target_sigcontext sc; 3267 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3268 uint16_t retcode[3]; 3269 }; 3270 3271 3272 struct target_ucontext { 3273 target_ulong tuc_flags; 3274 struct target_ucontext *tuc_link; 3275 target_stack_t tuc_stack; 3276 struct target_sigcontext tuc_mcontext; 3277 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3278 }; 3279 3280 struct target_rt_sigframe 3281 { 3282 struct target_siginfo info; 3283 struct target_ucontext uc; 3284 uint16_t retcode[3]; 3285 }; 3286 3287 3288 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3289 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3290 3291 static abi_ulong get_sigframe(struct target_sigaction *ka, 3292 unsigned long sp, size_t frame_size) 3293 { 3294 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3295 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3296 } 3297 3298 return (sp - frame_size) & -8ul; 3299 } 3300 3301 static void setup_sigcontext(struct target_sigcontext *sc, 3302 CPUSH4State *regs, unsigned long mask) 3303 { 3304 int i; 3305 3306 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3307 COPY(gregs[0]); COPY(gregs[1]); 3308 COPY(gregs[2]); COPY(gregs[3]); 3309 COPY(gregs[4]); COPY(gregs[5]); 3310 COPY(gregs[6]); COPY(gregs[7]); 3311 COPY(gregs[8]); COPY(gregs[9]); 3312 COPY(gregs[10]); COPY(gregs[11]); 3313 COPY(gregs[12]); COPY(gregs[13]); 3314 COPY(gregs[14]); COPY(gregs[15]); 3315 COPY(gbr); COPY(mach); 3316 COPY(macl); COPY(pr); 3317 COPY(sr); COPY(pc); 3318 #undef COPY 3319 3320 for (i=0; i<16; i++) { 3321 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3322 } 3323 __put_user(regs->fpscr, &sc->sc_fpscr); 3324 __put_user(regs->fpul, &sc->sc_fpul); 3325 3326 /* non-iBCS2 extensions.. */ 3327 __put_user(mask, &sc->oldmask); 3328 } 3329 3330 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3331 { 3332 int i; 3333 3334 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3335 COPY(gregs[0]); COPY(gregs[1]); 3336 COPY(gregs[2]); COPY(gregs[3]); 3337 COPY(gregs[4]); COPY(gregs[5]); 3338 COPY(gregs[6]); COPY(gregs[7]); 3339 COPY(gregs[8]); COPY(gregs[9]); 3340 COPY(gregs[10]); COPY(gregs[11]); 3341 COPY(gregs[12]); COPY(gregs[13]); 3342 COPY(gregs[14]); COPY(gregs[15]); 3343 COPY(gbr); COPY(mach); 3344 COPY(macl); COPY(pr); 3345 COPY(sr); COPY(pc); 3346 #undef COPY 3347 3348 for (i=0; i<16; i++) { 3349 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3350 } 3351 __get_user(regs->fpscr, &sc->sc_fpscr); 3352 __get_user(regs->fpul, &sc->sc_fpul); 3353 3354 regs->tra = -1; /* disable syscall checks */ 3355 } 3356 3357 static void setup_frame(int sig, struct target_sigaction *ka, 3358 target_sigset_t *set, CPUSH4State *regs) 3359 { 3360 struct target_sigframe *frame; 3361 abi_ulong frame_addr; 3362 int i; 3363 3364 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3365 trace_user_setup_frame(regs, frame_addr); 3366 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3367 goto give_sigsegv; 3368 } 3369 3370 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3371 3372 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3373 __put_user(set->sig[i + 1], &frame->extramask[i]); 3374 } 3375 3376 /* Set up to return from userspace. If provided, use a stub 3377 already in userspace. */ 3378 if (ka->sa_flags & TARGET_SA_RESTORER) { 3379 regs->pr = (unsigned long) ka->sa_restorer; 3380 } else { 3381 /* Generate return code (system call to sigreturn) */ 3382 abi_ulong retcode_addr = frame_addr + 3383 offsetof(struct target_sigframe, retcode); 3384 __put_user(MOVW(2), &frame->retcode[0]); 3385 __put_user(TRAP_NOARG, &frame->retcode[1]); 3386 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3387 regs->pr = (unsigned long) retcode_addr; 3388 } 3389 3390 /* Set up registers for signal handler */ 3391 regs->gregs[15] = frame_addr; 3392 regs->gregs[4] = sig; /* Arg for signal handler */ 3393 regs->gregs[5] = 0; 3394 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3395 regs->pc = (unsigned long) ka->_sa_handler; 3396 3397 unlock_user_struct(frame, frame_addr, 1); 3398 return; 3399 3400 give_sigsegv: 3401 unlock_user_struct(frame, frame_addr, 1); 3402 force_sigsegv(sig); 3403 } 3404 3405 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3406 target_siginfo_t *info, 3407 target_sigset_t *set, CPUSH4State *regs) 3408 { 3409 struct target_rt_sigframe *frame; 3410 abi_ulong frame_addr; 3411 int i; 3412 3413 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3414 trace_user_setup_rt_frame(regs, frame_addr); 3415 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3416 goto give_sigsegv; 3417 } 3418 3419 tswap_siginfo(&frame->info, info); 3420 3421 /* Create the ucontext. */ 3422 __put_user(0, &frame->uc.tuc_flags); 3423 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3424 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3425 &frame->uc.tuc_stack.ss_sp); 3426 __put_user(sas_ss_flags(regs->gregs[15]), 3427 &frame->uc.tuc_stack.ss_flags); 3428 __put_user(target_sigaltstack_used.ss_size, 3429 &frame->uc.tuc_stack.ss_size); 3430 setup_sigcontext(&frame->uc.tuc_mcontext, 3431 regs, set->sig[0]); 3432 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3433 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3434 } 3435 3436 /* Set up to return from userspace. If provided, use a stub 3437 already in userspace. */ 3438 if (ka->sa_flags & TARGET_SA_RESTORER) { 3439 regs->pr = (unsigned long) ka->sa_restorer; 3440 } else { 3441 /* Generate return code (system call to sigreturn) */ 3442 abi_ulong retcode_addr = frame_addr + 3443 offsetof(struct target_rt_sigframe, retcode); 3444 __put_user(MOVW(2), &frame->retcode[0]); 3445 __put_user(TRAP_NOARG, &frame->retcode[1]); 3446 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3447 regs->pr = (unsigned long) retcode_addr; 3448 } 3449 3450 /* Set up registers for signal handler */ 3451 regs->gregs[15] = frame_addr; 3452 regs->gregs[4] = sig; /* Arg for signal handler */ 3453 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3454 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3455 regs->pc = (unsigned long) ka->_sa_handler; 3456 3457 unlock_user_struct(frame, frame_addr, 1); 3458 return; 3459 3460 give_sigsegv: 3461 unlock_user_struct(frame, frame_addr, 1); 3462 force_sigsegv(sig); 3463 } 3464 3465 long do_sigreturn(CPUSH4State *regs) 3466 { 3467 struct target_sigframe *frame; 3468 abi_ulong frame_addr; 3469 sigset_t blocked; 3470 target_sigset_t target_set; 3471 int i; 3472 int err = 0; 3473 3474 frame_addr = regs->gregs[15]; 3475 trace_user_do_sigreturn(regs, frame_addr); 3476 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3477 goto badframe; 3478 } 3479 3480 __get_user(target_set.sig[0], &frame->sc.oldmask); 3481 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3482 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3483 } 3484 3485 if (err) 3486 goto badframe; 3487 3488 target_to_host_sigset_internal(&blocked, &target_set); 3489 set_sigmask(&blocked); 3490 3491 restore_sigcontext(regs, &frame->sc); 3492 3493 unlock_user_struct(frame, frame_addr, 0); 3494 return -TARGET_QEMU_ESIGRETURN; 3495 3496 badframe: 3497 unlock_user_struct(frame, frame_addr, 0); 3498 force_sig(TARGET_SIGSEGV); 3499 return -TARGET_QEMU_ESIGRETURN; 3500 } 3501 3502 long do_rt_sigreturn(CPUSH4State *regs) 3503 { 3504 struct target_rt_sigframe *frame; 3505 abi_ulong frame_addr; 3506 sigset_t blocked; 3507 3508 frame_addr = regs->gregs[15]; 3509 trace_user_do_rt_sigreturn(regs, frame_addr); 3510 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3511 goto badframe; 3512 } 3513 3514 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3515 set_sigmask(&blocked); 3516 3517 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3518 3519 if (do_sigaltstack(frame_addr + 3520 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3521 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3522 goto badframe; 3523 } 3524 3525 unlock_user_struct(frame, frame_addr, 0); 3526 return -TARGET_QEMU_ESIGRETURN; 3527 3528 badframe: 3529 unlock_user_struct(frame, frame_addr, 0); 3530 force_sig(TARGET_SIGSEGV); 3531 return -TARGET_QEMU_ESIGRETURN; 3532 } 3533 #elif defined(TARGET_MICROBLAZE) 3534 3535 struct target_sigcontext { 3536 struct target_pt_regs regs; /* needs to be first */ 3537 uint32_t oldmask; 3538 }; 3539 3540 struct target_stack_t { 3541 abi_ulong ss_sp; 3542 int ss_flags; 3543 unsigned int ss_size; 3544 }; 3545 3546 struct target_ucontext { 3547 abi_ulong tuc_flags; 3548 abi_ulong tuc_link; 3549 struct target_stack_t tuc_stack; 3550 struct target_sigcontext tuc_mcontext; 3551 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3552 }; 3553 3554 /* Signal frames. */ 3555 struct target_signal_frame { 3556 struct target_ucontext uc; 3557 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3558 uint32_t tramp[2]; 3559 }; 3560 3561 struct rt_signal_frame { 3562 siginfo_t info; 3563 struct ucontext uc; 3564 uint32_t tramp[2]; 3565 }; 3566 3567 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3568 { 3569 __put_user(env->regs[0], &sc->regs.r0); 3570 __put_user(env->regs[1], &sc->regs.r1); 3571 __put_user(env->regs[2], &sc->regs.r2); 3572 __put_user(env->regs[3], &sc->regs.r3); 3573 __put_user(env->regs[4], &sc->regs.r4); 3574 __put_user(env->regs[5], &sc->regs.r5); 3575 __put_user(env->regs[6], &sc->regs.r6); 3576 __put_user(env->regs[7], &sc->regs.r7); 3577 __put_user(env->regs[8], &sc->regs.r8); 3578 __put_user(env->regs[9], &sc->regs.r9); 3579 __put_user(env->regs[10], &sc->regs.r10); 3580 __put_user(env->regs[11], &sc->regs.r11); 3581 __put_user(env->regs[12], &sc->regs.r12); 3582 __put_user(env->regs[13], &sc->regs.r13); 3583 __put_user(env->regs[14], &sc->regs.r14); 3584 __put_user(env->regs[15], &sc->regs.r15); 3585 __put_user(env->regs[16], &sc->regs.r16); 3586 __put_user(env->regs[17], &sc->regs.r17); 3587 __put_user(env->regs[18], &sc->regs.r18); 3588 __put_user(env->regs[19], &sc->regs.r19); 3589 __put_user(env->regs[20], &sc->regs.r20); 3590 __put_user(env->regs[21], &sc->regs.r21); 3591 __put_user(env->regs[22], &sc->regs.r22); 3592 __put_user(env->regs[23], &sc->regs.r23); 3593 __put_user(env->regs[24], &sc->regs.r24); 3594 __put_user(env->regs[25], &sc->regs.r25); 3595 __put_user(env->regs[26], &sc->regs.r26); 3596 __put_user(env->regs[27], &sc->regs.r27); 3597 __put_user(env->regs[28], &sc->regs.r28); 3598 __put_user(env->regs[29], &sc->regs.r29); 3599 __put_user(env->regs[30], &sc->regs.r30); 3600 __put_user(env->regs[31], &sc->regs.r31); 3601 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3602 } 3603 3604 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3605 { 3606 __get_user(env->regs[0], &sc->regs.r0); 3607 __get_user(env->regs[1], &sc->regs.r1); 3608 __get_user(env->regs[2], &sc->regs.r2); 3609 __get_user(env->regs[3], &sc->regs.r3); 3610 __get_user(env->regs[4], &sc->regs.r4); 3611 __get_user(env->regs[5], &sc->regs.r5); 3612 __get_user(env->regs[6], &sc->regs.r6); 3613 __get_user(env->regs[7], &sc->regs.r7); 3614 __get_user(env->regs[8], &sc->regs.r8); 3615 __get_user(env->regs[9], &sc->regs.r9); 3616 __get_user(env->regs[10], &sc->regs.r10); 3617 __get_user(env->regs[11], &sc->regs.r11); 3618 __get_user(env->regs[12], &sc->regs.r12); 3619 __get_user(env->regs[13], &sc->regs.r13); 3620 __get_user(env->regs[14], &sc->regs.r14); 3621 __get_user(env->regs[15], &sc->regs.r15); 3622 __get_user(env->regs[16], &sc->regs.r16); 3623 __get_user(env->regs[17], &sc->regs.r17); 3624 __get_user(env->regs[18], &sc->regs.r18); 3625 __get_user(env->regs[19], &sc->regs.r19); 3626 __get_user(env->regs[20], &sc->regs.r20); 3627 __get_user(env->regs[21], &sc->regs.r21); 3628 __get_user(env->regs[22], &sc->regs.r22); 3629 __get_user(env->regs[23], &sc->regs.r23); 3630 __get_user(env->regs[24], &sc->regs.r24); 3631 __get_user(env->regs[25], &sc->regs.r25); 3632 __get_user(env->regs[26], &sc->regs.r26); 3633 __get_user(env->regs[27], &sc->regs.r27); 3634 __get_user(env->regs[28], &sc->regs.r28); 3635 __get_user(env->regs[29], &sc->regs.r29); 3636 __get_user(env->regs[30], &sc->regs.r30); 3637 __get_user(env->regs[31], &sc->regs.r31); 3638 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3639 } 3640 3641 static abi_ulong get_sigframe(struct target_sigaction *ka, 3642 CPUMBState *env, int frame_size) 3643 { 3644 abi_ulong sp = env->regs[1]; 3645 3646 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3647 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3648 } 3649 3650 return ((sp - frame_size) & -8UL); 3651 } 3652 3653 static void setup_frame(int sig, struct target_sigaction *ka, 3654 target_sigset_t *set, CPUMBState *env) 3655 { 3656 struct target_signal_frame *frame; 3657 abi_ulong frame_addr; 3658 int i; 3659 3660 frame_addr = get_sigframe(ka, env, sizeof *frame); 3661 trace_user_setup_frame(env, frame_addr); 3662 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3663 goto badframe; 3664 3665 /* Save the mask. */ 3666 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3667 3668 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3669 __put_user(set->sig[i], &frame->extramask[i - 1]); 3670 } 3671 3672 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3673 3674 /* Set up to return from userspace. If provided, use a stub 3675 already in userspace. */ 3676 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3677 if (ka->sa_flags & TARGET_SA_RESTORER) { 3678 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3679 } else { 3680 uint32_t t; 3681 /* Note, these encodings are _big endian_! */ 3682 /* addi r12, r0, __NR_sigreturn */ 3683 t = 0x31800000UL | TARGET_NR_sigreturn; 3684 __put_user(t, frame->tramp + 0); 3685 /* brki r14, 0x8 */ 3686 t = 0xb9cc0008UL; 3687 __put_user(t, frame->tramp + 1); 3688 3689 /* Return from sighandler will jump to the tramp. 3690 Negative 8 offset because return is rtsd r15, 8 */ 3691 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3692 - 8; 3693 } 3694 3695 /* Set up registers for signal handler */ 3696 env->regs[1] = frame_addr; 3697 /* Signal handler args: */ 3698 env->regs[5] = sig; /* Arg 0: signum */ 3699 env->regs[6] = 0; 3700 /* arg 1: sigcontext */ 3701 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3702 3703 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3704 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3705 3706 unlock_user_struct(frame, frame_addr, 1); 3707 return; 3708 badframe: 3709 force_sigsegv(sig); 3710 } 3711 3712 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3713 target_siginfo_t *info, 3714 target_sigset_t *set, CPUMBState *env) 3715 { 3716 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3717 } 3718 3719 long do_sigreturn(CPUMBState *env) 3720 { 3721 struct target_signal_frame *frame; 3722 abi_ulong frame_addr; 3723 target_sigset_t target_set; 3724 sigset_t set; 3725 int i; 3726 3727 frame_addr = env->regs[R_SP]; 3728 trace_user_do_sigreturn(env, frame_addr); 3729 /* Make sure the guest isn't playing games. */ 3730 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3731 goto badframe; 3732 3733 /* Restore blocked signals */ 3734 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3735 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3736 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3737 } 3738 target_to_host_sigset_internal(&set, &target_set); 3739 set_sigmask(&set); 3740 3741 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3742 /* We got here through a sigreturn syscall, our path back is via an 3743 rtb insn so setup r14 for that. */ 3744 env->regs[14] = env->sregs[SR_PC]; 3745 3746 unlock_user_struct(frame, frame_addr, 0); 3747 return -TARGET_QEMU_ESIGRETURN; 3748 badframe: 3749 force_sig(TARGET_SIGSEGV); 3750 return -TARGET_QEMU_ESIGRETURN; 3751 } 3752 3753 long do_rt_sigreturn(CPUMBState *env) 3754 { 3755 trace_user_do_rt_sigreturn(env, 0); 3756 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3757 return -TARGET_ENOSYS; 3758 } 3759 3760 #elif defined(TARGET_CRIS) 3761 3762 struct target_sigcontext { 3763 struct target_pt_regs regs; /* needs to be first */ 3764 uint32_t oldmask; 3765 uint32_t usp; /* usp before stacking this gunk on it */ 3766 }; 3767 3768 /* Signal frames. */ 3769 struct target_signal_frame { 3770 struct target_sigcontext sc; 3771 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3772 uint16_t retcode[4]; /* Trampoline code. */ 3773 }; 3774 3775 struct rt_signal_frame { 3776 siginfo_t *pinfo; 3777 void *puc; 3778 siginfo_t info; 3779 struct ucontext uc; 3780 uint16_t retcode[4]; /* Trampoline code. */ 3781 }; 3782 3783 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3784 { 3785 __put_user(env->regs[0], &sc->regs.r0); 3786 __put_user(env->regs[1], &sc->regs.r1); 3787 __put_user(env->regs[2], &sc->regs.r2); 3788 __put_user(env->regs[3], &sc->regs.r3); 3789 __put_user(env->regs[4], &sc->regs.r4); 3790 __put_user(env->regs[5], &sc->regs.r5); 3791 __put_user(env->regs[6], &sc->regs.r6); 3792 __put_user(env->regs[7], &sc->regs.r7); 3793 __put_user(env->regs[8], &sc->regs.r8); 3794 __put_user(env->regs[9], &sc->regs.r9); 3795 __put_user(env->regs[10], &sc->regs.r10); 3796 __put_user(env->regs[11], &sc->regs.r11); 3797 __put_user(env->regs[12], &sc->regs.r12); 3798 __put_user(env->regs[13], &sc->regs.r13); 3799 __put_user(env->regs[14], &sc->usp); 3800 __put_user(env->regs[15], &sc->regs.acr); 3801 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3802 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3803 __put_user(env->pc, &sc->regs.erp); 3804 } 3805 3806 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3807 { 3808 __get_user(env->regs[0], &sc->regs.r0); 3809 __get_user(env->regs[1], &sc->regs.r1); 3810 __get_user(env->regs[2], &sc->regs.r2); 3811 __get_user(env->regs[3], &sc->regs.r3); 3812 __get_user(env->regs[4], &sc->regs.r4); 3813 __get_user(env->regs[5], &sc->regs.r5); 3814 __get_user(env->regs[6], &sc->regs.r6); 3815 __get_user(env->regs[7], &sc->regs.r7); 3816 __get_user(env->regs[8], &sc->regs.r8); 3817 __get_user(env->regs[9], &sc->regs.r9); 3818 __get_user(env->regs[10], &sc->regs.r10); 3819 __get_user(env->regs[11], &sc->regs.r11); 3820 __get_user(env->regs[12], &sc->regs.r12); 3821 __get_user(env->regs[13], &sc->regs.r13); 3822 __get_user(env->regs[14], &sc->usp); 3823 __get_user(env->regs[15], &sc->regs.acr); 3824 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3825 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3826 __get_user(env->pc, &sc->regs.erp); 3827 } 3828 3829 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3830 { 3831 abi_ulong sp; 3832 /* Align the stack downwards to 4. */ 3833 sp = (env->regs[R_SP] & ~3); 3834 return sp - framesize; 3835 } 3836 3837 static void setup_frame(int sig, struct target_sigaction *ka, 3838 target_sigset_t *set, CPUCRISState *env) 3839 { 3840 struct target_signal_frame *frame; 3841 abi_ulong frame_addr; 3842 int i; 3843 3844 frame_addr = get_sigframe(env, sizeof *frame); 3845 trace_user_setup_frame(env, frame_addr); 3846 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3847 goto badframe; 3848 3849 /* 3850 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3851 * use this trampoline anymore but it sets it up for GDB. 3852 * In QEMU, using the trampoline simplifies things a bit so we use it. 3853 * 3854 * This is movu.w __NR_sigreturn, r9; break 13; 3855 */ 3856 __put_user(0x9c5f, frame->retcode+0); 3857 __put_user(TARGET_NR_sigreturn, 3858 frame->retcode + 1); 3859 __put_user(0xe93d, frame->retcode + 2); 3860 3861 /* Save the mask. */ 3862 __put_user(set->sig[0], &frame->sc.oldmask); 3863 3864 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3865 __put_user(set->sig[i], &frame->extramask[i - 1]); 3866 } 3867 3868 setup_sigcontext(&frame->sc, env); 3869 3870 /* Move the stack and setup the arguments for the handler. */ 3871 env->regs[R_SP] = frame_addr; 3872 env->regs[10] = sig; 3873 env->pc = (unsigned long) ka->_sa_handler; 3874 /* Link SRP so the guest returns through the trampoline. */ 3875 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3876 3877 unlock_user_struct(frame, frame_addr, 1); 3878 return; 3879 badframe: 3880 force_sigsegv(sig); 3881 } 3882 3883 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3884 target_siginfo_t *info, 3885 target_sigset_t *set, CPUCRISState *env) 3886 { 3887 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3888 } 3889 3890 long do_sigreturn(CPUCRISState *env) 3891 { 3892 struct target_signal_frame *frame; 3893 abi_ulong frame_addr; 3894 target_sigset_t target_set; 3895 sigset_t set; 3896 int i; 3897 3898 frame_addr = env->regs[R_SP]; 3899 trace_user_do_sigreturn(env, frame_addr); 3900 /* Make sure the guest isn't playing games. */ 3901 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3902 goto badframe; 3903 } 3904 3905 /* Restore blocked signals */ 3906 __get_user(target_set.sig[0], &frame->sc.oldmask); 3907 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3908 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3909 } 3910 target_to_host_sigset_internal(&set, &target_set); 3911 set_sigmask(&set); 3912 3913 restore_sigcontext(&frame->sc, env); 3914 unlock_user_struct(frame, frame_addr, 0); 3915 return -TARGET_QEMU_ESIGRETURN; 3916 badframe: 3917 force_sig(TARGET_SIGSEGV); 3918 return -TARGET_QEMU_ESIGRETURN; 3919 } 3920 3921 long do_rt_sigreturn(CPUCRISState *env) 3922 { 3923 trace_user_do_rt_sigreturn(env, 0); 3924 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3925 return -TARGET_ENOSYS; 3926 } 3927 3928 #elif defined(TARGET_OPENRISC) 3929 3930 struct target_sigcontext { 3931 struct target_pt_regs regs; 3932 abi_ulong oldmask; 3933 abi_ulong usp; 3934 }; 3935 3936 struct target_ucontext { 3937 abi_ulong tuc_flags; 3938 abi_ulong tuc_link; 3939 target_stack_t tuc_stack; 3940 struct target_sigcontext tuc_mcontext; 3941 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3942 }; 3943 3944 struct target_rt_sigframe { 3945 abi_ulong pinfo; 3946 uint64_t puc; 3947 struct target_siginfo info; 3948 struct target_sigcontext sc; 3949 struct target_ucontext uc; 3950 unsigned char retcode[16]; /* trampoline code */ 3951 }; 3952 3953 /* This is the asm-generic/ucontext.h version */ 3954 #if 0 3955 static int restore_sigcontext(CPUOpenRISCState *regs, 3956 struct target_sigcontext *sc) 3957 { 3958 unsigned int err = 0; 3959 unsigned long old_usp; 3960 3961 /* Alwys make any pending restarted system call return -EINTR */ 3962 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3963 3964 /* restore the regs from &sc->regs (same as sc, since regs is first) 3965 * (sc is already checked for VERIFY_READ since the sigframe was 3966 * checked in sys_sigreturn previously) 3967 */ 3968 3969 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3970 goto badframe; 3971 } 3972 3973 /* make sure the U-flag is set so user-mode cannot fool us */ 3974 3975 regs->sr &= ~SR_SM; 3976 3977 /* restore the old USP as it was before we stacked the sc etc. 3978 * (we cannot just pop the sigcontext since we aligned the sp and 3979 * stuff after pushing it) 3980 */ 3981 3982 __get_user(old_usp, &sc->usp); 3983 phx_signal("old_usp 0x%lx", old_usp); 3984 3985 __PHX__ REALLY /* ??? */ 3986 wrusp(old_usp); 3987 regs->gpr[1] = old_usp; 3988 3989 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3990 * after this completes, but we don't use that mechanism. maybe we can 3991 * use it now ? 3992 */ 3993 3994 return err; 3995 3996 badframe: 3997 return 1; 3998 } 3999 #endif 4000 4001 /* Set up a signal frame. */ 4002 4003 static void setup_sigcontext(struct target_sigcontext *sc, 4004 CPUOpenRISCState *regs, 4005 unsigned long mask) 4006 { 4007 unsigned long usp = regs->gpr[1]; 4008 4009 /* copy the regs. they are first in sc so we can use sc directly */ 4010 4011 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 4012 4013 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 4014 the signal handler. The frametype will be restored to its previous 4015 value in restore_sigcontext. */ 4016 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 4017 4018 /* then some other stuff */ 4019 __put_user(mask, &sc->oldmask); 4020 __put_user(usp, &sc->usp); 4021 } 4022 4023 static inline unsigned long align_sigframe(unsigned long sp) 4024 { 4025 return sp & ~3UL; 4026 } 4027 4028 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4029 CPUOpenRISCState *regs, 4030 size_t frame_size) 4031 { 4032 unsigned long sp = regs->gpr[1]; 4033 int onsigstack = on_sig_stack(sp); 4034 4035 /* redzone */ 4036 /* This is the X/Open sanctioned signal stack switching. */ 4037 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4038 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4039 } 4040 4041 sp = align_sigframe(sp - frame_size); 4042 4043 /* 4044 * If we are on the alternate signal stack and would overflow it, don't. 4045 * Return an always-bogus address instead so we will die with SIGSEGV. 4046 */ 4047 4048 if (onsigstack && !likely(on_sig_stack(sp))) { 4049 return -1L; 4050 } 4051 4052 return sp; 4053 } 4054 4055 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4056 target_siginfo_t *info, 4057 target_sigset_t *set, CPUOpenRISCState *env) 4058 { 4059 int err = 0; 4060 abi_ulong frame_addr; 4061 unsigned long return_ip; 4062 struct target_rt_sigframe *frame; 4063 abi_ulong info_addr, uc_addr; 4064 4065 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4066 trace_user_setup_rt_frame(env, frame_addr); 4067 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4068 goto give_sigsegv; 4069 } 4070 4071 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4072 __put_user(info_addr, &frame->pinfo); 4073 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4074 __put_user(uc_addr, &frame->puc); 4075 4076 if (ka->sa_flags & SA_SIGINFO) { 4077 tswap_siginfo(&frame->info, info); 4078 } 4079 4080 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4081 __put_user(0, &frame->uc.tuc_flags); 4082 __put_user(0, &frame->uc.tuc_link); 4083 __put_user(target_sigaltstack_used.ss_sp, 4084 &frame->uc.tuc_stack.ss_sp); 4085 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4086 __put_user(target_sigaltstack_used.ss_size, 4087 &frame->uc.tuc_stack.ss_size); 4088 setup_sigcontext(&frame->sc, env, set->sig[0]); 4089 4090 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4091 4092 /* trampoline - the desired return ip is the retcode itself */ 4093 return_ip = (unsigned long)&frame->retcode; 4094 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4095 __put_user(0xa960, (short *)(frame->retcode + 0)); 4096 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4097 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4098 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4099 4100 if (err) { 4101 goto give_sigsegv; 4102 } 4103 4104 /* TODO what is the current->exec_domain stuff and invmap ? */ 4105 4106 /* Set up registers for signal handler */ 4107 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4108 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4109 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4110 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4111 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4112 4113 /* actually move the usp to reflect the stacked frame */ 4114 env->gpr[1] = (unsigned long)frame; 4115 4116 return; 4117 4118 give_sigsegv: 4119 unlock_user_struct(frame, frame_addr, 1); 4120 force_sigsegv(sig); 4121 } 4122 4123 long do_sigreturn(CPUOpenRISCState *env) 4124 { 4125 trace_user_do_sigreturn(env, 0); 4126 fprintf(stderr, "do_sigreturn: not implemented\n"); 4127 return -TARGET_ENOSYS; 4128 } 4129 4130 long do_rt_sigreturn(CPUOpenRISCState *env) 4131 { 4132 trace_user_do_rt_sigreturn(env, 0); 4133 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4134 return -TARGET_ENOSYS; 4135 } 4136 /* TARGET_OPENRISC */ 4137 4138 #elif defined(TARGET_S390X) 4139 4140 #define __NUM_GPRS 16 4141 #define __NUM_FPRS 16 4142 #define __NUM_ACRS 16 4143 4144 #define S390_SYSCALL_SIZE 2 4145 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4146 4147 #define _SIGCONTEXT_NSIG 64 4148 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4149 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4150 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4151 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4152 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4153 4154 typedef struct { 4155 target_psw_t psw; 4156 target_ulong gprs[__NUM_GPRS]; 4157 unsigned int acrs[__NUM_ACRS]; 4158 } target_s390_regs_common; 4159 4160 typedef struct { 4161 unsigned int fpc; 4162 double fprs[__NUM_FPRS]; 4163 } target_s390_fp_regs; 4164 4165 typedef struct { 4166 target_s390_regs_common regs; 4167 target_s390_fp_regs fpregs; 4168 } target_sigregs; 4169 4170 struct target_sigcontext { 4171 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4172 target_sigregs *sregs; 4173 }; 4174 4175 typedef struct { 4176 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4177 struct target_sigcontext sc; 4178 target_sigregs sregs; 4179 int signo; 4180 uint8_t retcode[S390_SYSCALL_SIZE]; 4181 } sigframe; 4182 4183 struct target_ucontext { 4184 target_ulong tuc_flags; 4185 struct target_ucontext *tuc_link; 4186 target_stack_t tuc_stack; 4187 target_sigregs tuc_mcontext; 4188 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4189 }; 4190 4191 typedef struct { 4192 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4193 uint8_t retcode[S390_SYSCALL_SIZE]; 4194 struct target_siginfo info; 4195 struct target_ucontext uc; 4196 } rt_sigframe; 4197 4198 static inline abi_ulong 4199 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4200 { 4201 abi_ulong sp; 4202 4203 /* Default to using normal stack */ 4204 sp = env->regs[15]; 4205 4206 /* This is the X/Open sanctioned signal stack switching. */ 4207 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4208 if (!sas_ss_flags(sp)) { 4209 sp = target_sigaltstack_used.ss_sp + 4210 target_sigaltstack_used.ss_size; 4211 } 4212 } 4213 4214 /* This is the legacy signal stack switching. */ 4215 else if (/* FIXME !user_mode(regs) */ 0 && 4216 !(ka->sa_flags & TARGET_SA_RESTORER) && 4217 ka->sa_restorer) { 4218 sp = (abi_ulong) ka->sa_restorer; 4219 } 4220 4221 return (sp - frame_size) & -8ul; 4222 } 4223 4224 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4225 { 4226 int i; 4227 //save_access_regs(current->thread.acrs); FIXME 4228 4229 /* Copy a 'clean' PSW mask to the user to avoid leaking 4230 information about whether PER is currently on. */ 4231 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4232 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4233 for (i = 0; i < 16; i++) { 4234 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4235 } 4236 for (i = 0; i < 16; i++) { 4237 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4238 } 4239 /* 4240 * We have to store the fp registers to current->thread.fp_regs 4241 * to merge them with the emulated registers. 4242 */ 4243 //save_fp_regs(¤t->thread.fp_regs); FIXME 4244 for (i = 0; i < 16; i++) { 4245 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4246 } 4247 } 4248 4249 static void setup_frame(int sig, struct target_sigaction *ka, 4250 target_sigset_t *set, CPUS390XState *env) 4251 { 4252 sigframe *frame; 4253 abi_ulong frame_addr; 4254 4255 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4256 trace_user_setup_frame(env, frame_addr); 4257 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4258 goto give_sigsegv; 4259 } 4260 4261 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4262 4263 save_sigregs(env, &frame->sregs); 4264 4265 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4266 (abi_ulong *)&frame->sc.sregs); 4267 4268 /* Set up to return from userspace. If provided, use a stub 4269 already in userspace. */ 4270 if (ka->sa_flags & TARGET_SA_RESTORER) { 4271 env->regs[14] = (unsigned long) 4272 ka->sa_restorer | PSW_ADDR_AMODE; 4273 } else { 4274 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4275 | PSW_ADDR_AMODE; 4276 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4277 (uint16_t *)(frame->retcode)); 4278 } 4279 4280 /* Set up backchain. */ 4281 __put_user(env->regs[15], (abi_ulong *) frame); 4282 4283 /* Set up registers for signal handler */ 4284 env->regs[15] = frame_addr; 4285 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4286 4287 env->regs[2] = sig; //map_signal(sig); 4288 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4289 4290 /* We forgot to include these in the sigcontext. 4291 To avoid breaking binary compatibility, they are passed as args. */ 4292 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4293 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4294 4295 /* Place signal number on stack to allow backtrace from handler. */ 4296 __put_user(env->regs[2], &frame->signo); 4297 unlock_user_struct(frame, frame_addr, 1); 4298 return; 4299 4300 give_sigsegv: 4301 force_sigsegv(sig); 4302 } 4303 4304 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4305 target_siginfo_t *info, 4306 target_sigset_t *set, CPUS390XState *env) 4307 { 4308 int i; 4309 rt_sigframe *frame; 4310 abi_ulong frame_addr; 4311 4312 frame_addr = get_sigframe(ka, env, sizeof *frame); 4313 trace_user_setup_rt_frame(env, frame_addr); 4314 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4315 goto give_sigsegv; 4316 } 4317 4318 tswap_siginfo(&frame->info, info); 4319 4320 /* Create the ucontext. */ 4321 __put_user(0, &frame->uc.tuc_flags); 4322 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4323 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4324 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4325 &frame->uc.tuc_stack.ss_flags); 4326 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4327 save_sigregs(env, &frame->uc.tuc_mcontext); 4328 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4329 __put_user((abi_ulong)set->sig[i], 4330 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4331 } 4332 4333 /* Set up to return from userspace. If provided, use a stub 4334 already in userspace. */ 4335 if (ka->sa_flags & TARGET_SA_RESTORER) { 4336 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4337 } else { 4338 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4339 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4340 (uint16_t *)(frame->retcode)); 4341 } 4342 4343 /* Set up backchain. */ 4344 __put_user(env->regs[15], (abi_ulong *) frame); 4345 4346 /* Set up registers for signal handler */ 4347 env->regs[15] = frame_addr; 4348 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4349 4350 env->regs[2] = sig; //map_signal(sig); 4351 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4352 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4353 return; 4354 4355 give_sigsegv: 4356 force_sigsegv(sig); 4357 } 4358 4359 static int 4360 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4361 { 4362 int err = 0; 4363 int i; 4364 4365 for (i = 0; i < 16; i++) { 4366 __get_user(env->regs[i], &sc->regs.gprs[i]); 4367 } 4368 4369 __get_user(env->psw.mask, &sc->regs.psw.mask); 4370 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4371 (unsigned long long)env->psw.addr); 4372 __get_user(env->psw.addr, &sc->regs.psw.addr); 4373 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4374 4375 for (i = 0; i < 16; i++) { 4376 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4377 } 4378 for (i = 0; i < 16; i++) { 4379 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4380 } 4381 4382 return err; 4383 } 4384 4385 long do_sigreturn(CPUS390XState *env) 4386 { 4387 sigframe *frame; 4388 abi_ulong frame_addr = env->regs[15]; 4389 target_sigset_t target_set; 4390 sigset_t set; 4391 4392 trace_user_do_sigreturn(env, frame_addr); 4393 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4394 goto badframe; 4395 } 4396 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4397 4398 target_to_host_sigset_internal(&set, &target_set); 4399 set_sigmask(&set); /* ~_BLOCKABLE? */ 4400 4401 if (restore_sigregs(env, &frame->sregs)) { 4402 goto badframe; 4403 } 4404 4405 unlock_user_struct(frame, frame_addr, 0); 4406 return -TARGET_QEMU_ESIGRETURN; 4407 4408 badframe: 4409 force_sig(TARGET_SIGSEGV); 4410 return -TARGET_QEMU_ESIGRETURN; 4411 } 4412 4413 long do_rt_sigreturn(CPUS390XState *env) 4414 { 4415 rt_sigframe *frame; 4416 abi_ulong frame_addr = env->regs[15]; 4417 sigset_t set; 4418 4419 trace_user_do_rt_sigreturn(env, frame_addr); 4420 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4421 goto badframe; 4422 } 4423 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4424 4425 set_sigmask(&set); /* ~_BLOCKABLE? */ 4426 4427 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4428 goto badframe; 4429 } 4430 4431 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4432 get_sp_from_cpustate(env)) == -EFAULT) { 4433 goto badframe; 4434 } 4435 unlock_user_struct(frame, frame_addr, 0); 4436 return -TARGET_QEMU_ESIGRETURN; 4437 4438 badframe: 4439 unlock_user_struct(frame, frame_addr, 0); 4440 force_sig(TARGET_SIGSEGV); 4441 return -TARGET_QEMU_ESIGRETURN; 4442 } 4443 4444 #elif defined(TARGET_PPC) 4445 4446 /* Size of dummy stack frame allocated when calling signal handler. 4447 See arch/powerpc/include/asm/ptrace.h. */ 4448 #if defined(TARGET_PPC64) 4449 #define SIGNAL_FRAMESIZE 128 4450 #else 4451 #define SIGNAL_FRAMESIZE 64 4452 #endif 4453 4454 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4455 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4456 struct target_mcontext { 4457 target_ulong mc_gregs[48]; 4458 /* Includes fpscr. */ 4459 uint64_t mc_fregs[33]; 4460 target_ulong mc_pad[2]; 4461 /* We need to handle Altivec and SPE at the same time, which no 4462 kernel needs to do. Fortunately, the kernel defines this bit to 4463 be Altivec-register-large all the time, rather than trying to 4464 twiddle it based on the specific platform. */ 4465 union { 4466 /* SPE vector registers. One extra for SPEFSCR. */ 4467 uint32_t spe[33]; 4468 /* Altivec vector registers. The packing of VSCR and VRSAVE 4469 varies depending on whether we're PPC64 or not: PPC64 splits 4470 them apart; PPC32 stuffs them together. */ 4471 #if defined(TARGET_PPC64) 4472 #define QEMU_NVRREG 34 4473 #else 4474 #define QEMU_NVRREG 33 4475 #endif 4476 ppc_avr_t altivec[QEMU_NVRREG]; 4477 #undef QEMU_NVRREG 4478 } mc_vregs __attribute__((__aligned__(16))); 4479 }; 4480 4481 /* See arch/powerpc/include/asm/sigcontext.h. */ 4482 struct target_sigcontext { 4483 target_ulong _unused[4]; 4484 int32_t signal; 4485 #if defined(TARGET_PPC64) 4486 int32_t pad0; 4487 #endif 4488 target_ulong handler; 4489 target_ulong oldmask; 4490 target_ulong regs; /* struct pt_regs __user * */ 4491 #if defined(TARGET_PPC64) 4492 struct target_mcontext mcontext; 4493 #endif 4494 }; 4495 4496 /* Indices for target_mcontext.mc_gregs, below. 4497 See arch/powerpc/include/asm/ptrace.h for details. */ 4498 enum { 4499 TARGET_PT_R0 = 0, 4500 TARGET_PT_R1 = 1, 4501 TARGET_PT_R2 = 2, 4502 TARGET_PT_R3 = 3, 4503 TARGET_PT_R4 = 4, 4504 TARGET_PT_R5 = 5, 4505 TARGET_PT_R6 = 6, 4506 TARGET_PT_R7 = 7, 4507 TARGET_PT_R8 = 8, 4508 TARGET_PT_R9 = 9, 4509 TARGET_PT_R10 = 10, 4510 TARGET_PT_R11 = 11, 4511 TARGET_PT_R12 = 12, 4512 TARGET_PT_R13 = 13, 4513 TARGET_PT_R14 = 14, 4514 TARGET_PT_R15 = 15, 4515 TARGET_PT_R16 = 16, 4516 TARGET_PT_R17 = 17, 4517 TARGET_PT_R18 = 18, 4518 TARGET_PT_R19 = 19, 4519 TARGET_PT_R20 = 20, 4520 TARGET_PT_R21 = 21, 4521 TARGET_PT_R22 = 22, 4522 TARGET_PT_R23 = 23, 4523 TARGET_PT_R24 = 24, 4524 TARGET_PT_R25 = 25, 4525 TARGET_PT_R26 = 26, 4526 TARGET_PT_R27 = 27, 4527 TARGET_PT_R28 = 28, 4528 TARGET_PT_R29 = 29, 4529 TARGET_PT_R30 = 30, 4530 TARGET_PT_R31 = 31, 4531 TARGET_PT_NIP = 32, 4532 TARGET_PT_MSR = 33, 4533 TARGET_PT_ORIG_R3 = 34, 4534 TARGET_PT_CTR = 35, 4535 TARGET_PT_LNK = 36, 4536 TARGET_PT_XER = 37, 4537 TARGET_PT_CCR = 38, 4538 /* Yes, there are two registers with #39. One is 64-bit only. */ 4539 TARGET_PT_MQ = 39, 4540 TARGET_PT_SOFTE = 39, 4541 TARGET_PT_TRAP = 40, 4542 TARGET_PT_DAR = 41, 4543 TARGET_PT_DSISR = 42, 4544 TARGET_PT_RESULT = 43, 4545 TARGET_PT_REGS_COUNT = 44 4546 }; 4547 4548 4549 struct target_ucontext { 4550 target_ulong tuc_flags; 4551 target_ulong tuc_link; /* struct ucontext __user * */ 4552 struct target_sigaltstack tuc_stack; 4553 #if !defined(TARGET_PPC64) 4554 int32_t tuc_pad[7]; 4555 target_ulong tuc_regs; /* struct mcontext __user * 4556 points to uc_mcontext field */ 4557 #endif 4558 target_sigset_t tuc_sigmask; 4559 #if defined(TARGET_PPC64) 4560 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4561 struct target_sigcontext tuc_sigcontext; 4562 #else 4563 int32_t tuc_maskext[30]; 4564 int32_t tuc_pad2[3]; 4565 struct target_mcontext tuc_mcontext; 4566 #endif 4567 }; 4568 4569 /* See arch/powerpc/kernel/signal_32.c. */ 4570 struct target_sigframe { 4571 struct target_sigcontext sctx; 4572 struct target_mcontext mctx; 4573 int32_t abigap[56]; 4574 }; 4575 4576 #if defined(TARGET_PPC64) 4577 4578 #define TARGET_TRAMP_SIZE 6 4579 4580 struct target_rt_sigframe { 4581 /* sys_rt_sigreturn requires the ucontext be the first field */ 4582 struct target_ucontext uc; 4583 target_ulong _unused[2]; 4584 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4585 target_ulong pinfo; /* struct siginfo __user * */ 4586 target_ulong puc; /* void __user * */ 4587 struct target_siginfo info; 4588 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4589 char abigap[288]; 4590 } __attribute__((aligned(16))); 4591 4592 #else 4593 4594 struct target_rt_sigframe { 4595 struct target_siginfo info; 4596 struct target_ucontext uc; 4597 int32_t abigap[56]; 4598 }; 4599 4600 #endif 4601 4602 #if defined(TARGET_PPC64) 4603 4604 struct target_func_ptr { 4605 target_ulong entry; 4606 target_ulong toc; 4607 }; 4608 4609 #endif 4610 4611 /* We use the mc_pad field for the signal return trampoline. */ 4612 #define tramp mc_pad 4613 4614 /* See arch/powerpc/kernel/signal.c. */ 4615 static target_ulong get_sigframe(struct target_sigaction *ka, 4616 CPUPPCState *env, 4617 int frame_size) 4618 { 4619 target_ulong oldsp; 4620 4621 oldsp = env->gpr[1]; 4622 4623 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4624 (sas_ss_flags(oldsp) == 0)) { 4625 oldsp = (target_sigaltstack_used.ss_sp 4626 + target_sigaltstack_used.ss_size); 4627 } 4628 4629 return (oldsp - frame_size) & ~0xFUL; 4630 } 4631 4632 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4633 { 4634 target_ulong msr = env->msr; 4635 int i; 4636 target_ulong ccr = 0; 4637 4638 /* In general, the kernel attempts to be intelligent about what it 4639 needs to save for Altivec/FP/SPE registers. We don't care that 4640 much, so we just go ahead and save everything. */ 4641 4642 /* Save general registers. */ 4643 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4644 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4645 } 4646 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4647 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4648 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4649 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4650 4651 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4652 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4653 } 4654 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4655 4656 /* Save Altivec registers if necessary. */ 4657 if (env->insns_flags & PPC_ALTIVEC) { 4658 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4659 ppc_avr_t *avr = &env->avr[i]; 4660 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4661 4662 __put_user(avr->u64[0], &vreg->u64[0]); 4663 __put_user(avr->u64[1], &vreg->u64[1]); 4664 } 4665 /* Set MSR_VR in the saved MSR value to indicate that 4666 frame->mc_vregs contains valid data. */ 4667 msr |= MSR_VR; 4668 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4669 &frame->mc_vregs.altivec[32].u32[3]); 4670 } 4671 4672 /* Save floating point registers. */ 4673 if (env->insns_flags & PPC_FLOAT) { 4674 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4675 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4676 } 4677 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4678 } 4679 4680 /* Save SPE registers. The kernel only saves the high half. */ 4681 if (env->insns_flags & PPC_SPE) { 4682 #if defined(TARGET_PPC64) 4683 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4684 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4685 } 4686 #else 4687 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4688 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4689 } 4690 #endif 4691 /* Set MSR_SPE in the saved MSR value to indicate that 4692 frame->mc_vregs contains valid data. */ 4693 msr |= MSR_SPE; 4694 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4695 } 4696 4697 /* Store MSR. */ 4698 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4699 } 4700 4701 static void encode_trampoline(int sigret, uint32_t *tramp) 4702 { 4703 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4704 if (sigret) { 4705 __put_user(0x38000000 | sigret, &tramp[0]); 4706 __put_user(0x44000002, &tramp[1]); 4707 } 4708 } 4709 4710 static void restore_user_regs(CPUPPCState *env, 4711 struct target_mcontext *frame, int sig) 4712 { 4713 target_ulong save_r2 = 0; 4714 target_ulong msr; 4715 target_ulong ccr; 4716 4717 int i; 4718 4719 if (!sig) { 4720 save_r2 = env->gpr[2]; 4721 } 4722 4723 /* Restore general registers. */ 4724 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4725 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4726 } 4727 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4728 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4729 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4730 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4731 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4732 4733 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4734 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4735 } 4736 4737 if (!sig) { 4738 env->gpr[2] = save_r2; 4739 } 4740 /* Restore MSR. */ 4741 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4742 4743 /* If doing signal return, restore the previous little-endian mode. */ 4744 if (sig) 4745 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4746 4747 /* Restore Altivec registers if necessary. */ 4748 if (env->insns_flags & PPC_ALTIVEC) { 4749 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4750 ppc_avr_t *avr = &env->avr[i]; 4751 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4752 4753 __get_user(avr->u64[0], &vreg->u64[0]); 4754 __get_user(avr->u64[1], &vreg->u64[1]); 4755 } 4756 /* Set MSR_VEC in the saved MSR value to indicate that 4757 frame->mc_vregs contains valid data. */ 4758 __get_user(env->spr[SPR_VRSAVE], 4759 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4760 } 4761 4762 /* Restore floating point registers. */ 4763 if (env->insns_flags & PPC_FLOAT) { 4764 uint64_t fpscr; 4765 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4766 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4767 } 4768 __get_user(fpscr, &frame->mc_fregs[32]); 4769 env->fpscr = (uint32_t) fpscr; 4770 } 4771 4772 /* Save SPE registers. The kernel only saves the high half. */ 4773 if (env->insns_flags & PPC_SPE) { 4774 #if defined(TARGET_PPC64) 4775 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4776 uint32_t hi; 4777 4778 __get_user(hi, &frame->mc_vregs.spe[i]); 4779 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4780 } 4781 #else 4782 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4783 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4784 } 4785 #endif 4786 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4787 } 4788 } 4789 4790 static void setup_frame(int sig, struct target_sigaction *ka, 4791 target_sigset_t *set, CPUPPCState *env) 4792 { 4793 struct target_sigframe *frame; 4794 struct target_sigcontext *sc; 4795 target_ulong frame_addr, newsp; 4796 int err = 0; 4797 #if defined(TARGET_PPC64) 4798 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4799 #endif 4800 4801 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4802 trace_user_setup_frame(env, frame_addr); 4803 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4804 goto sigsegv; 4805 sc = &frame->sctx; 4806 4807 __put_user(ka->_sa_handler, &sc->handler); 4808 __put_user(set->sig[0], &sc->oldmask); 4809 #if TARGET_ABI_BITS == 64 4810 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4811 #else 4812 __put_user(set->sig[1], &sc->_unused[3]); 4813 #endif 4814 __put_user(h2g(&frame->mctx), &sc->regs); 4815 __put_user(sig, &sc->signal); 4816 4817 /* Save user regs. */ 4818 save_user_regs(env, &frame->mctx); 4819 4820 /* Construct the trampoline code on the stack. */ 4821 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4822 4823 /* The kernel checks for the presence of a VDSO here. We don't 4824 emulate a vdso, so use a sigreturn system call. */ 4825 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4826 4827 /* Turn off all fp exceptions. */ 4828 env->fpscr = 0; 4829 4830 /* Create a stack frame for the caller of the handler. */ 4831 newsp = frame_addr - SIGNAL_FRAMESIZE; 4832 err |= put_user(env->gpr[1], newsp, target_ulong); 4833 4834 if (err) 4835 goto sigsegv; 4836 4837 /* Set up registers for signal handler. */ 4838 env->gpr[1] = newsp; 4839 env->gpr[3] = sig; 4840 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4841 4842 #if defined(TARGET_PPC64) 4843 if (get_ppc64_abi(image) < 2) { 4844 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4845 struct target_func_ptr *handler = 4846 (struct target_func_ptr *)g2h(ka->_sa_handler); 4847 env->nip = tswapl(handler->entry); 4848 env->gpr[2] = tswapl(handler->toc); 4849 } else { 4850 /* ELFv2 PPC64 function pointers are entry points, but R12 4851 * must also be set */ 4852 env->nip = tswapl((target_ulong) ka->_sa_handler); 4853 env->gpr[12] = env->nip; 4854 } 4855 #else 4856 env->nip = (target_ulong) ka->_sa_handler; 4857 #endif 4858 4859 /* Signal handlers are entered in big-endian mode. */ 4860 env->msr &= ~(1ull << MSR_LE); 4861 4862 unlock_user_struct(frame, frame_addr, 1); 4863 return; 4864 4865 sigsegv: 4866 unlock_user_struct(frame, frame_addr, 1); 4867 force_sigsegv(sig); 4868 } 4869 4870 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4871 target_siginfo_t *info, 4872 target_sigset_t *set, CPUPPCState *env) 4873 { 4874 struct target_rt_sigframe *rt_sf; 4875 uint32_t *trampptr = 0; 4876 struct target_mcontext *mctx = 0; 4877 target_ulong rt_sf_addr, newsp = 0; 4878 int i, err = 0; 4879 #if defined(TARGET_PPC64) 4880 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4881 #endif 4882 4883 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4884 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4885 goto sigsegv; 4886 4887 tswap_siginfo(&rt_sf->info, info); 4888 4889 __put_user(0, &rt_sf->uc.tuc_flags); 4890 __put_user(0, &rt_sf->uc.tuc_link); 4891 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4892 &rt_sf->uc.tuc_stack.ss_sp); 4893 __put_user(sas_ss_flags(env->gpr[1]), 4894 &rt_sf->uc.tuc_stack.ss_flags); 4895 __put_user(target_sigaltstack_used.ss_size, 4896 &rt_sf->uc.tuc_stack.ss_size); 4897 #if !defined(TARGET_PPC64) 4898 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4899 &rt_sf->uc.tuc_regs); 4900 #endif 4901 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4902 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4903 } 4904 4905 #if defined(TARGET_PPC64) 4906 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4907 trampptr = &rt_sf->trampoline[0]; 4908 #else 4909 mctx = &rt_sf->uc.tuc_mcontext; 4910 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4911 #endif 4912 4913 save_user_regs(env, mctx); 4914 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4915 4916 /* The kernel checks for the presence of a VDSO here. We don't 4917 emulate a vdso, so use a sigreturn system call. */ 4918 env->lr = (target_ulong) h2g(trampptr); 4919 4920 /* Turn off all fp exceptions. */ 4921 env->fpscr = 0; 4922 4923 /* Create a stack frame for the caller of the handler. */ 4924 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4925 err |= put_user(env->gpr[1], newsp, target_ulong); 4926 4927 if (err) 4928 goto sigsegv; 4929 4930 /* Set up registers for signal handler. */ 4931 env->gpr[1] = newsp; 4932 env->gpr[3] = (target_ulong) sig; 4933 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4934 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4935 env->gpr[6] = (target_ulong) h2g(rt_sf); 4936 4937 #if defined(TARGET_PPC64) 4938 if (get_ppc64_abi(image) < 2) { 4939 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4940 struct target_func_ptr *handler = 4941 (struct target_func_ptr *)g2h(ka->_sa_handler); 4942 env->nip = tswapl(handler->entry); 4943 env->gpr[2] = tswapl(handler->toc); 4944 } else { 4945 /* ELFv2 PPC64 function pointers are entry points, but R12 4946 * must also be set */ 4947 env->nip = tswapl((target_ulong) ka->_sa_handler); 4948 env->gpr[12] = env->nip; 4949 } 4950 #else 4951 env->nip = (target_ulong) ka->_sa_handler; 4952 #endif 4953 4954 /* Signal handlers are entered in big-endian mode. */ 4955 env->msr &= ~(1ull << MSR_LE); 4956 4957 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4958 return; 4959 4960 sigsegv: 4961 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4962 force_sigsegv(sig); 4963 4964 } 4965 4966 long do_sigreturn(CPUPPCState *env) 4967 { 4968 struct target_sigcontext *sc = NULL; 4969 struct target_mcontext *sr = NULL; 4970 target_ulong sr_addr = 0, sc_addr; 4971 sigset_t blocked; 4972 target_sigset_t set; 4973 4974 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4975 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4976 goto sigsegv; 4977 4978 #if defined(TARGET_PPC64) 4979 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4980 #else 4981 __get_user(set.sig[0], &sc->oldmask); 4982 __get_user(set.sig[1], &sc->_unused[3]); 4983 #endif 4984 target_to_host_sigset_internal(&blocked, &set); 4985 set_sigmask(&blocked); 4986 4987 __get_user(sr_addr, &sc->regs); 4988 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4989 goto sigsegv; 4990 restore_user_regs(env, sr, 1); 4991 4992 unlock_user_struct(sr, sr_addr, 1); 4993 unlock_user_struct(sc, sc_addr, 1); 4994 return -TARGET_QEMU_ESIGRETURN; 4995 4996 sigsegv: 4997 unlock_user_struct(sr, sr_addr, 1); 4998 unlock_user_struct(sc, sc_addr, 1); 4999 force_sig(TARGET_SIGSEGV); 5000 return -TARGET_QEMU_ESIGRETURN; 5001 } 5002 5003 /* See arch/powerpc/kernel/signal_32.c. */ 5004 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 5005 { 5006 struct target_mcontext *mcp; 5007 target_ulong mcp_addr; 5008 sigset_t blocked; 5009 target_sigset_t set; 5010 5011 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 5012 sizeof (set))) 5013 return 1; 5014 5015 #if defined(TARGET_PPC64) 5016 mcp_addr = h2g(ucp) + 5017 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 5018 #else 5019 __get_user(mcp_addr, &ucp->tuc_regs); 5020 #endif 5021 5022 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5023 return 1; 5024 5025 target_to_host_sigset_internal(&blocked, &set); 5026 set_sigmask(&blocked); 5027 restore_user_regs(env, mcp, sig); 5028 5029 unlock_user_struct(mcp, mcp_addr, 1); 5030 return 0; 5031 } 5032 5033 long do_rt_sigreturn(CPUPPCState *env) 5034 { 5035 struct target_rt_sigframe *rt_sf = NULL; 5036 target_ulong rt_sf_addr; 5037 5038 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5039 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5040 goto sigsegv; 5041 5042 if (do_setcontext(&rt_sf->uc, env, 1)) 5043 goto sigsegv; 5044 5045 do_sigaltstack(rt_sf_addr 5046 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5047 0, env->gpr[1]); 5048 5049 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5050 return -TARGET_QEMU_ESIGRETURN; 5051 5052 sigsegv: 5053 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5054 force_sig(TARGET_SIGSEGV); 5055 return -TARGET_QEMU_ESIGRETURN; 5056 } 5057 5058 #elif defined(TARGET_M68K) 5059 5060 struct target_sigcontext { 5061 abi_ulong sc_mask; 5062 abi_ulong sc_usp; 5063 abi_ulong sc_d0; 5064 abi_ulong sc_d1; 5065 abi_ulong sc_a0; 5066 abi_ulong sc_a1; 5067 unsigned short sc_sr; 5068 abi_ulong sc_pc; 5069 }; 5070 5071 struct target_sigframe 5072 { 5073 abi_ulong pretcode; 5074 int sig; 5075 int code; 5076 abi_ulong psc; 5077 char retcode[8]; 5078 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5079 struct target_sigcontext sc; 5080 }; 5081 5082 typedef int target_greg_t; 5083 #define TARGET_NGREG 18 5084 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5085 5086 typedef struct target_fpregset { 5087 int f_fpcntl[3]; 5088 int f_fpregs[8*3]; 5089 } target_fpregset_t; 5090 5091 struct target_mcontext { 5092 int version; 5093 target_gregset_t gregs; 5094 target_fpregset_t fpregs; 5095 }; 5096 5097 #define TARGET_MCONTEXT_VERSION 2 5098 5099 struct target_ucontext { 5100 abi_ulong tuc_flags; 5101 abi_ulong tuc_link; 5102 target_stack_t tuc_stack; 5103 struct target_mcontext tuc_mcontext; 5104 abi_long tuc_filler[80]; 5105 target_sigset_t tuc_sigmask; 5106 }; 5107 5108 struct target_rt_sigframe 5109 { 5110 abi_ulong pretcode; 5111 int sig; 5112 abi_ulong pinfo; 5113 abi_ulong puc; 5114 char retcode[8]; 5115 struct target_siginfo info; 5116 struct target_ucontext uc; 5117 }; 5118 5119 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5120 abi_ulong mask) 5121 { 5122 __put_user(mask, &sc->sc_mask); 5123 __put_user(env->aregs[7], &sc->sc_usp); 5124 __put_user(env->dregs[0], &sc->sc_d0); 5125 __put_user(env->dregs[1], &sc->sc_d1); 5126 __put_user(env->aregs[0], &sc->sc_a0); 5127 __put_user(env->aregs[1], &sc->sc_a1); 5128 __put_user(env->sr, &sc->sc_sr); 5129 __put_user(env->pc, &sc->sc_pc); 5130 } 5131 5132 static void 5133 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5134 { 5135 int temp; 5136 5137 __get_user(env->aregs[7], &sc->sc_usp); 5138 __get_user(env->dregs[0], &sc->sc_d0); 5139 __get_user(env->dregs[1], &sc->sc_d1); 5140 __get_user(env->aregs[0], &sc->sc_a0); 5141 __get_user(env->aregs[1], &sc->sc_a1); 5142 __get_user(env->pc, &sc->sc_pc); 5143 __get_user(temp, &sc->sc_sr); 5144 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5145 } 5146 5147 /* 5148 * Determine which stack to use.. 5149 */ 5150 static inline abi_ulong 5151 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5152 size_t frame_size) 5153 { 5154 unsigned long sp; 5155 5156 sp = regs->aregs[7]; 5157 5158 /* This is the X/Open sanctioned signal stack switching. */ 5159 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5160 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5161 } 5162 5163 return ((sp - frame_size) & -8UL); 5164 } 5165 5166 static void setup_frame(int sig, struct target_sigaction *ka, 5167 target_sigset_t *set, CPUM68KState *env) 5168 { 5169 struct target_sigframe *frame; 5170 abi_ulong frame_addr; 5171 abi_ulong retcode_addr; 5172 abi_ulong sc_addr; 5173 int i; 5174 5175 frame_addr = get_sigframe(ka, env, sizeof *frame); 5176 trace_user_setup_frame(env, frame_addr); 5177 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5178 goto give_sigsegv; 5179 } 5180 5181 __put_user(sig, &frame->sig); 5182 5183 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5184 __put_user(sc_addr, &frame->psc); 5185 5186 setup_sigcontext(&frame->sc, env, set->sig[0]); 5187 5188 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5189 __put_user(set->sig[i], &frame->extramask[i - 1]); 5190 } 5191 5192 /* Set up to return from userspace. */ 5193 5194 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5195 __put_user(retcode_addr, &frame->pretcode); 5196 5197 /* moveq #,d0; trap #0 */ 5198 5199 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5200 (uint32_t *)(frame->retcode)); 5201 5202 /* Set up to return from userspace */ 5203 5204 env->aregs[7] = frame_addr; 5205 env->pc = ka->_sa_handler; 5206 5207 unlock_user_struct(frame, frame_addr, 1); 5208 return; 5209 5210 give_sigsegv: 5211 force_sigsegv(sig); 5212 } 5213 5214 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5215 CPUM68KState *env) 5216 { 5217 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5218 5219 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5220 __put_user(env->dregs[0], &gregs[0]); 5221 __put_user(env->dregs[1], &gregs[1]); 5222 __put_user(env->dregs[2], &gregs[2]); 5223 __put_user(env->dregs[3], &gregs[3]); 5224 __put_user(env->dregs[4], &gregs[4]); 5225 __put_user(env->dregs[5], &gregs[5]); 5226 __put_user(env->dregs[6], &gregs[6]); 5227 __put_user(env->dregs[7], &gregs[7]); 5228 __put_user(env->aregs[0], &gregs[8]); 5229 __put_user(env->aregs[1], &gregs[9]); 5230 __put_user(env->aregs[2], &gregs[10]); 5231 __put_user(env->aregs[3], &gregs[11]); 5232 __put_user(env->aregs[4], &gregs[12]); 5233 __put_user(env->aregs[5], &gregs[13]); 5234 __put_user(env->aregs[6], &gregs[14]); 5235 __put_user(env->aregs[7], &gregs[15]); 5236 __put_user(env->pc, &gregs[16]); 5237 __put_user(env->sr, &gregs[17]); 5238 5239 return 0; 5240 } 5241 5242 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5243 struct target_ucontext *uc) 5244 { 5245 int temp; 5246 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5247 5248 __get_user(temp, &uc->tuc_mcontext.version); 5249 if (temp != TARGET_MCONTEXT_VERSION) 5250 goto badframe; 5251 5252 /* restore passed registers */ 5253 __get_user(env->dregs[0], &gregs[0]); 5254 __get_user(env->dregs[1], &gregs[1]); 5255 __get_user(env->dregs[2], &gregs[2]); 5256 __get_user(env->dregs[3], &gregs[3]); 5257 __get_user(env->dregs[4], &gregs[4]); 5258 __get_user(env->dregs[5], &gregs[5]); 5259 __get_user(env->dregs[6], &gregs[6]); 5260 __get_user(env->dregs[7], &gregs[7]); 5261 __get_user(env->aregs[0], &gregs[8]); 5262 __get_user(env->aregs[1], &gregs[9]); 5263 __get_user(env->aregs[2], &gregs[10]); 5264 __get_user(env->aregs[3], &gregs[11]); 5265 __get_user(env->aregs[4], &gregs[12]); 5266 __get_user(env->aregs[5], &gregs[13]); 5267 __get_user(env->aregs[6], &gregs[14]); 5268 __get_user(env->aregs[7], &gregs[15]); 5269 __get_user(env->pc, &gregs[16]); 5270 __get_user(temp, &gregs[17]); 5271 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5272 5273 return 0; 5274 5275 badframe: 5276 return 1; 5277 } 5278 5279 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5280 target_siginfo_t *info, 5281 target_sigset_t *set, CPUM68KState *env) 5282 { 5283 struct target_rt_sigframe *frame; 5284 abi_ulong frame_addr; 5285 abi_ulong retcode_addr; 5286 abi_ulong info_addr; 5287 abi_ulong uc_addr; 5288 int err = 0; 5289 int i; 5290 5291 frame_addr = get_sigframe(ka, env, sizeof *frame); 5292 trace_user_setup_rt_frame(env, frame_addr); 5293 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5294 goto give_sigsegv; 5295 } 5296 5297 __put_user(sig, &frame->sig); 5298 5299 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5300 __put_user(info_addr, &frame->pinfo); 5301 5302 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5303 __put_user(uc_addr, &frame->puc); 5304 5305 tswap_siginfo(&frame->info, info); 5306 5307 /* Create the ucontext */ 5308 5309 __put_user(0, &frame->uc.tuc_flags); 5310 __put_user(0, &frame->uc.tuc_link); 5311 __put_user(target_sigaltstack_used.ss_sp, 5312 &frame->uc.tuc_stack.ss_sp); 5313 __put_user(sas_ss_flags(env->aregs[7]), 5314 &frame->uc.tuc_stack.ss_flags); 5315 __put_user(target_sigaltstack_used.ss_size, 5316 &frame->uc.tuc_stack.ss_size); 5317 err |= target_rt_setup_ucontext(&frame->uc, env); 5318 5319 if (err) 5320 goto give_sigsegv; 5321 5322 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5323 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5324 } 5325 5326 /* Set up to return from userspace. */ 5327 5328 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5329 __put_user(retcode_addr, &frame->pretcode); 5330 5331 /* moveq #,d0; notb d0; trap #0 */ 5332 5333 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5334 (uint32_t *)(frame->retcode + 0)); 5335 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5336 5337 if (err) 5338 goto give_sigsegv; 5339 5340 /* Set up to return from userspace */ 5341 5342 env->aregs[7] = frame_addr; 5343 env->pc = ka->_sa_handler; 5344 5345 unlock_user_struct(frame, frame_addr, 1); 5346 return; 5347 5348 give_sigsegv: 5349 unlock_user_struct(frame, frame_addr, 1); 5350 force_sigsegv(sig); 5351 } 5352 5353 long do_sigreturn(CPUM68KState *env) 5354 { 5355 struct target_sigframe *frame; 5356 abi_ulong frame_addr = env->aregs[7] - 4; 5357 target_sigset_t target_set; 5358 sigset_t set; 5359 int i; 5360 5361 trace_user_do_sigreturn(env, frame_addr); 5362 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5363 goto badframe; 5364 5365 /* set blocked signals */ 5366 5367 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5368 5369 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5370 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5371 } 5372 5373 target_to_host_sigset_internal(&set, &target_set); 5374 set_sigmask(&set); 5375 5376 /* restore registers */ 5377 5378 restore_sigcontext(env, &frame->sc); 5379 5380 unlock_user_struct(frame, frame_addr, 0); 5381 return -TARGET_QEMU_ESIGRETURN; 5382 5383 badframe: 5384 force_sig(TARGET_SIGSEGV); 5385 return -TARGET_QEMU_ESIGRETURN; 5386 } 5387 5388 long do_rt_sigreturn(CPUM68KState *env) 5389 { 5390 struct target_rt_sigframe *frame; 5391 abi_ulong frame_addr = env->aregs[7] - 4; 5392 target_sigset_t target_set; 5393 sigset_t set; 5394 5395 trace_user_do_rt_sigreturn(env, frame_addr); 5396 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5397 goto badframe; 5398 5399 target_to_host_sigset_internal(&set, &target_set); 5400 set_sigmask(&set); 5401 5402 /* restore registers */ 5403 5404 if (target_rt_restore_ucontext(env, &frame->uc)) 5405 goto badframe; 5406 5407 if (do_sigaltstack(frame_addr + 5408 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5409 0, get_sp_from_cpustate(env)) == -EFAULT) 5410 goto badframe; 5411 5412 unlock_user_struct(frame, frame_addr, 0); 5413 return -TARGET_QEMU_ESIGRETURN; 5414 5415 badframe: 5416 unlock_user_struct(frame, frame_addr, 0); 5417 force_sig(TARGET_SIGSEGV); 5418 return -TARGET_QEMU_ESIGRETURN; 5419 } 5420 5421 #elif defined(TARGET_ALPHA) 5422 5423 struct target_sigcontext { 5424 abi_long sc_onstack; 5425 abi_long sc_mask; 5426 abi_long sc_pc; 5427 abi_long sc_ps; 5428 abi_long sc_regs[32]; 5429 abi_long sc_ownedfp; 5430 abi_long sc_fpregs[32]; 5431 abi_ulong sc_fpcr; 5432 abi_ulong sc_fp_control; 5433 abi_ulong sc_reserved1; 5434 abi_ulong sc_reserved2; 5435 abi_ulong sc_ssize; 5436 abi_ulong sc_sbase; 5437 abi_ulong sc_traparg_a0; 5438 abi_ulong sc_traparg_a1; 5439 abi_ulong sc_traparg_a2; 5440 abi_ulong sc_fp_trap_pc; 5441 abi_ulong sc_fp_trigger_sum; 5442 abi_ulong sc_fp_trigger_inst; 5443 }; 5444 5445 struct target_ucontext { 5446 abi_ulong tuc_flags; 5447 abi_ulong tuc_link; 5448 abi_ulong tuc_osf_sigmask; 5449 target_stack_t tuc_stack; 5450 struct target_sigcontext tuc_mcontext; 5451 target_sigset_t tuc_sigmask; 5452 }; 5453 5454 struct target_sigframe { 5455 struct target_sigcontext sc; 5456 unsigned int retcode[3]; 5457 }; 5458 5459 struct target_rt_sigframe { 5460 target_siginfo_t info; 5461 struct target_ucontext uc; 5462 unsigned int retcode[3]; 5463 }; 5464 5465 #define INSN_MOV_R30_R16 0x47fe0410 5466 #define INSN_LDI_R0 0x201f0000 5467 #define INSN_CALLSYS 0x00000083 5468 5469 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5470 abi_ulong frame_addr, target_sigset_t *set) 5471 { 5472 int i; 5473 5474 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5475 __put_user(set->sig[0], &sc->sc_mask); 5476 __put_user(env->pc, &sc->sc_pc); 5477 __put_user(8, &sc->sc_ps); 5478 5479 for (i = 0; i < 31; ++i) { 5480 __put_user(env->ir[i], &sc->sc_regs[i]); 5481 } 5482 __put_user(0, &sc->sc_regs[31]); 5483 5484 for (i = 0; i < 31; ++i) { 5485 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5486 } 5487 __put_user(0, &sc->sc_fpregs[31]); 5488 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5489 5490 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5491 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5492 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5493 } 5494 5495 static void restore_sigcontext(CPUAlphaState *env, 5496 struct target_sigcontext *sc) 5497 { 5498 uint64_t fpcr; 5499 int i; 5500 5501 __get_user(env->pc, &sc->sc_pc); 5502 5503 for (i = 0; i < 31; ++i) { 5504 __get_user(env->ir[i], &sc->sc_regs[i]); 5505 } 5506 for (i = 0; i < 31; ++i) { 5507 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5508 } 5509 5510 __get_user(fpcr, &sc->sc_fpcr); 5511 cpu_alpha_store_fpcr(env, fpcr); 5512 } 5513 5514 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5515 CPUAlphaState *env, 5516 unsigned long framesize) 5517 { 5518 abi_ulong sp = env->ir[IR_SP]; 5519 5520 /* This is the X/Open sanctioned signal stack switching. */ 5521 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5522 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5523 } 5524 return (sp - framesize) & -32; 5525 } 5526 5527 static void setup_frame(int sig, struct target_sigaction *ka, 5528 target_sigset_t *set, CPUAlphaState *env) 5529 { 5530 abi_ulong frame_addr, r26; 5531 struct target_sigframe *frame; 5532 int err = 0; 5533 5534 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5535 trace_user_setup_frame(env, frame_addr); 5536 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5537 goto give_sigsegv; 5538 } 5539 5540 setup_sigcontext(&frame->sc, env, frame_addr, set); 5541 5542 if (ka->sa_restorer) { 5543 r26 = ka->sa_restorer; 5544 } else { 5545 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5546 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5547 &frame->retcode[1]); 5548 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5549 /* imb() */ 5550 r26 = frame_addr; 5551 } 5552 5553 unlock_user_struct(frame, frame_addr, 1); 5554 5555 if (err) { 5556 give_sigsegv: 5557 force_sigsegv(sig); 5558 return; 5559 } 5560 5561 env->ir[IR_RA] = r26; 5562 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5563 env->ir[IR_A0] = sig; 5564 env->ir[IR_A1] = 0; 5565 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5566 env->ir[IR_SP] = frame_addr; 5567 } 5568 5569 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5570 target_siginfo_t *info, 5571 target_sigset_t *set, CPUAlphaState *env) 5572 { 5573 abi_ulong frame_addr, r26; 5574 struct target_rt_sigframe *frame; 5575 int i, err = 0; 5576 5577 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5578 trace_user_setup_rt_frame(env, frame_addr); 5579 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5580 goto give_sigsegv; 5581 } 5582 5583 tswap_siginfo(&frame->info, info); 5584 5585 __put_user(0, &frame->uc.tuc_flags); 5586 __put_user(0, &frame->uc.tuc_link); 5587 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5588 __put_user(target_sigaltstack_used.ss_sp, 5589 &frame->uc.tuc_stack.ss_sp); 5590 __put_user(sas_ss_flags(env->ir[IR_SP]), 5591 &frame->uc.tuc_stack.ss_flags); 5592 __put_user(target_sigaltstack_used.ss_size, 5593 &frame->uc.tuc_stack.ss_size); 5594 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5595 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5596 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5597 } 5598 5599 if (ka->sa_restorer) { 5600 r26 = ka->sa_restorer; 5601 } else { 5602 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5603 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5604 &frame->retcode[1]); 5605 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5606 /* imb(); */ 5607 r26 = frame_addr; 5608 } 5609 5610 if (err) { 5611 give_sigsegv: 5612 force_sigsegv(sig); 5613 return; 5614 } 5615 5616 env->ir[IR_RA] = r26; 5617 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5618 env->ir[IR_A0] = sig; 5619 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5620 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5621 env->ir[IR_SP] = frame_addr; 5622 } 5623 5624 long do_sigreturn(CPUAlphaState *env) 5625 { 5626 struct target_sigcontext *sc; 5627 abi_ulong sc_addr = env->ir[IR_A0]; 5628 target_sigset_t target_set; 5629 sigset_t set; 5630 5631 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5632 goto badframe; 5633 } 5634 5635 target_sigemptyset(&target_set); 5636 __get_user(target_set.sig[0], &sc->sc_mask); 5637 5638 target_to_host_sigset_internal(&set, &target_set); 5639 set_sigmask(&set); 5640 5641 restore_sigcontext(env, sc); 5642 unlock_user_struct(sc, sc_addr, 0); 5643 return -TARGET_QEMU_ESIGRETURN; 5644 5645 badframe: 5646 force_sig(TARGET_SIGSEGV); 5647 return -TARGET_QEMU_ESIGRETURN; 5648 } 5649 5650 long do_rt_sigreturn(CPUAlphaState *env) 5651 { 5652 abi_ulong frame_addr = env->ir[IR_A0]; 5653 struct target_rt_sigframe *frame; 5654 sigset_t set; 5655 5656 trace_user_do_rt_sigreturn(env, frame_addr); 5657 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5658 goto badframe; 5659 } 5660 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5661 set_sigmask(&set); 5662 5663 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5664 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5665 uc.tuc_stack), 5666 0, env->ir[IR_SP]) == -EFAULT) { 5667 goto badframe; 5668 } 5669 5670 unlock_user_struct(frame, frame_addr, 0); 5671 return -TARGET_QEMU_ESIGRETURN; 5672 5673 5674 badframe: 5675 unlock_user_struct(frame, frame_addr, 0); 5676 force_sig(TARGET_SIGSEGV); 5677 return -TARGET_QEMU_ESIGRETURN; 5678 } 5679 5680 #elif defined(TARGET_TILEGX) 5681 5682 struct target_sigcontext { 5683 union { 5684 /* General-purpose registers. */ 5685 abi_ulong gregs[56]; 5686 struct { 5687 abi_ulong __gregs[53]; 5688 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5689 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5690 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5691 }; 5692 }; 5693 abi_ulong pc; /* Program counter. */ 5694 abi_ulong ics; /* In Interrupt Critical Section? */ 5695 abi_ulong faultnum; /* Fault number. */ 5696 abi_ulong pad[5]; 5697 }; 5698 5699 struct target_ucontext { 5700 abi_ulong tuc_flags; 5701 abi_ulong tuc_link; 5702 target_stack_t tuc_stack; 5703 struct target_sigcontext tuc_mcontext; 5704 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5705 }; 5706 5707 struct target_rt_sigframe { 5708 unsigned char save_area[16]; /* caller save area */ 5709 struct target_siginfo info; 5710 struct target_ucontext uc; 5711 abi_ulong retcode[2]; 5712 }; 5713 5714 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5715 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5716 5717 5718 static void setup_sigcontext(struct target_sigcontext *sc, 5719 CPUArchState *env, int signo) 5720 { 5721 int i; 5722 5723 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5724 __put_user(env->regs[i], &sc->gregs[i]); 5725 } 5726 5727 __put_user(env->pc, &sc->pc); 5728 __put_user(0, &sc->ics); 5729 __put_user(signo, &sc->faultnum); 5730 } 5731 5732 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5733 { 5734 int i; 5735 5736 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5737 __get_user(env->regs[i], &sc->gregs[i]); 5738 } 5739 5740 __get_user(env->pc, &sc->pc); 5741 } 5742 5743 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5744 size_t frame_size) 5745 { 5746 unsigned long sp = env->regs[TILEGX_R_SP]; 5747 5748 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5749 return -1UL; 5750 } 5751 5752 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5753 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5754 } 5755 5756 sp -= frame_size; 5757 sp &= -16UL; 5758 return sp; 5759 } 5760 5761 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5762 target_siginfo_t *info, 5763 target_sigset_t *set, CPUArchState *env) 5764 { 5765 abi_ulong frame_addr; 5766 struct target_rt_sigframe *frame; 5767 unsigned long restorer; 5768 5769 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5770 trace_user_setup_rt_frame(env, frame_addr); 5771 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5772 goto give_sigsegv; 5773 } 5774 5775 /* Always write at least the signal number for the stack backtracer. */ 5776 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5777 /* At sigreturn time, restore the callee-save registers too. */ 5778 tswap_siginfo(&frame->info, info); 5779 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5780 } else { 5781 __put_user(info->si_signo, &frame->info.si_signo); 5782 } 5783 5784 /* Create the ucontext. */ 5785 __put_user(0, &frame->uc.tuc_flags); 5786 __put_user(0, &frame->uc.tuc_link); 5787 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5788 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5789 &frame->uc.tuc_stack.ss_flags); 5790 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5791 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5792 5793 if (ka->sa_flags & TARGET_SA_RESTORER) { 5794 restorer = (unsigned long) ka->sa_restorer; 5795 } else { 5796 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5797 __put_user(INSN_SWINT1, &frame->retcode[1]); 5798 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5799 } 5800 env->pc = (unsigned long) ka->_sa_handler; 5801 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5802 env->regs[TILEGX_R_LR] = restorer; 5803 env->regs[0] = (unsigned long) sig; 5804 env->regs[1] = (unsigned long) &frame->info; 5805 env->regs[2] = (unsigned long) &frame->uc; 5806 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5807 5808 unlock_user_struct(frame, frame_addr, 1); 5809 return; 5810 5811 give_sigsegv: 5812 force_sigsegv(sig); 5813 } 5814 5815 long do_rt_sigreturn(CPUTLGState *env) 5816 { 5817 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5818 struct target_rt_sigframe *frame; 5819 sigset_t set; 5820 5821 trace_user_do_rt_sigreturn(env, frame_addr); 5822 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5823 goto badframe; 5824 } 5825 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5826 set_sigmask(&set); 5827 5828 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5829 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5830 uc.tuc_stack), 5831 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5832 goto badframe; 5833 } 5834 5835 unlock_user_struct(frame, frame_addr, 0); 5836 return -TARGET_QEMU_ESIGRETURN; 5837 5838 5839 badframe: 5840 unlock_user_struct(frame, frame_addr, 0); 5841 force_sig(TARGET_SIGSEGV); 5842 return -TARGET_QEMU_ESIGRETURN; 5843 } 5844 5845 #else 5846 5847 static void setup_frame(int sig, struct target_sigaction *ka, 5848 target_sigset_t *set, CPUArchState *env) 5849 { 5850 fprintf(stderr, "setup_frame: not implemented\n"); 5851 } 5852 5853 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5854 target_siginfo_t *info, 5855 target_sigset_t *set, CPUArchState *env) 5856 { 5857 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5858 } 5859 5860 long do_sigreturn(CPUArchState *env) 5861 { 5862 fprintf(stderr, "do_sigreturn: not implemented\n"); 5863 return -TARGET_ENOSYS; 5864 } 5865 5866 long do_rt_sigreturn(CPUArchState *env) 5867 { 5868 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5869 return -TARGET_ENOSYS; 5870 } 5871 5872 #endif 5873 5874 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 5875 struct emulated_sigtable *k) 5876 { 5877 CPUState *cpu = ENV_GET_CPU(cpu_env); 5878 abi_ulong handler; 5879 sigset_t set; 5880 target_sigset_t target_old_set; 5881 struct target_sigaction *sa; 5882 TaskState *ts = cpu->opaque; 5883 5884 trace_user_handle_signal(cpu_env, sig); 5885 /* dequeue signal */ 5886 k->pending = 0; 5887 5888 sig = gdb_handlesig(cpu, sig); 5889 if (!sig) { 5890 sa = NULL; 5891 handler = TARGET_SIG_IGN; 5892 } else { 5893 sa = &sigact_table[sig - 1]; 5894 handler = sa->_sa_handler; 5895 } 5896 5897 if (do_strace) { 5898 print_taken_signal(sig, &k->info); 5899 } 5900 5901 if (handler == TARGET_SIG_DFL) { 5902 /* default handler : ignore some signal. The other are job control or fatal */ 5903 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5904 kill(getpid(),SIGSTOP); 5905 } else if (sig != TARGET_SIGCHLD && 5906 sig != TARGET_SIGURG && 5907 sig != TARGET_SIGWINCH && 5908 sig != TARGET_SIGCONT) { 5909 dump_core_and_abort(sig); 5910 } 5911 } else if (handler == TARGET_SIG_IGN) { 5912 /* ignore sig */ 5913 } else if (handler == TARGET_SIG_ERR) { 5914 dump_core_and_abort(sig); 5915 } else { 5916 /* compute the blocked signals during the handler execution */ 5917 sigset_t *blocked_set; 5918 5919 target_to_host_sigset(&set, &sa->sa_mask); 5920 /* SA_NODEFER indicates that the current signal should not be 5921 blocked during the handler */ 5922 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5923 sigaddset(&set, target_to_host_signal(sig)); 5924 5925 /* save the previous blocked signal state to restore it at the 5926 end of the signal execution (see do_sigreturn) */ 5927 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5928 5929 /* block signals in the handler */ 5930 blocked_set = ts->in_sigsuspend ? 5931 &ts->sigsuspend_mask : &ts->signal_mask; 5932 sigorset(&ts->signal_mask, blocked_set, &set); 5933 ts->in_sigsuspend = 0; 5934 5935 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5936 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5937 { 5938 CPUX86State *env = cpu_env; 5939 if (env->eflags & VM_MASK) 5940 save_v86_state(env); 5941 } 5942 #endif 5943 /* prepare the stack frame of the virtual CPU */ 5944 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5945 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5946 /* These targets do not have traditional signals. */ 5947 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5948 #else 5949 if (sa->sa_flags & TARGET_SA_SIGINFO) 5950 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5951 else 5952 setup_frame(sig, sa, &target_old_set, cpu_env); 5953 #endif 5954 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5955 sa->_sa_handler = TARGET_SIG_DFL; 5956 } 5957 } 5958 } 5959 5960 void process_pending_signals(CPUArchState *cpu_env) 5961 { 5962 CPUState *cpu = ENV_GET_CPU(cpu_env); 5963 int sig; 5964 TaskState *ts = cpu->opaque; 5965 sigset_t set; 5966 sigset_t *blocked_set; 5967 5968 while (atomic_read(&ts->signal_pending)) { 5969 /* FIXME: This is not threadsafe. */ 5970 sigfillset(&set); 5971 sigprocmask(SIG_SETMASK, &set, 0); 5972 5973 restart_scan: 5974 sig = ts->sync_signal.pending; 5975 if (sig) { 5976 /* Synchronous signals are forced, 5977 * see force_sig_info() and callers in Linux 5978 * Note that not all of our queue_signal() calls in QEMU correspond 5979 * to force_sig_info() calls in Linux (some are send_sig_info()). 5980 * However it seems like a kernel bug to me to allow the process 5981 * to block a synchronous signal since it could then just end up 5982 * looping round and round indefinitely. 5983 */ 5984 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5985 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5986 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5987 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5988 } 5989 5990 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 5991 } 5992 5993 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5994 blocked_set = ts->in_sigsuspend ? 5995 &ts->sigsuspend_mask : &ts->signal_mask; 5996 5997 if (ts->sigtab[sig - 1].pending && 5998 (!sigismember(blocked_set, 5999 target_to_host_signal_table[sig]))) { 6000 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 6001 /* Restart scan from the beginning, as handle_pending_signal 6002 * might have resulted in a new synchronous signal (eg SIGSEGV). 6003 */ 6004 goto restart_scan; 6005 } 6006 } 6007 6008 /* if no signal is pending, unblock signals and recheck (the act 6009 * of unblocking might cause us to take another host signal which 6010 * will set signal_pending again). 6011 */ 6012 atomic_set(&ts->signal_pending, 0); 6013 ts->in_sigsuspend = 0; 6014 set = ts->signal_mask; 6015 sigdelset(&set, SIGSEGV); 6016 sigdelset(&set, SIGBUS); 6017 sigprocmask(SIG_SETMASK, &set, 0); 6018 } 6019 ts->in_sigsuspend = 0; 6020 } 6021