1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include <sys/ucontext.h> 21 #include <sys/resource.h> 22 23 #include "qemu.h" 24 #include "qemu-common.h" 25 #include "target_signal.h" 26 #include "trace.h" 27 28 static struct target_sigaltstack target_sigaltstack_used = { 29 .ss_sp = 0, 30 .ss_size = 0, 31 .ss_flags = TARGET_SS_DISABLE, 32 }; 33 34 static struct target_sigaction sigact_table[TARGET_NSIG]; 35 36 static void host_signal_handler(int host_signum, siginfo_t *info, 37 void *puc); 38 39 static uint8_t host_to_target_signal_table[_NSIG] = { 40 [SIGHUP] = TARGET_SIGHUP, 41 [SIGINT] = TARGET_SIGINT, 42 [SIGQUIT] = TARGET_SIGQUIT, 43 [SIGILL] = TARGET_SIGILL, 44 [SIGTRAP] = TARGET_SIGTRAP, 45 [SIGABRT] = TARGET_SIGABRT, 46 /* [SIGIOT] = TARGET_SIGIOT,*/ 47 [SIGBUS] = TARGET_SIGBUS, 48 [SIGFPE] = TARGET_SIGFPE, 49 [SIGKILL] = TARGET_SIGKILL, 50 [SIGUSR1] = TARGET_SIGUSR1, 51 [SIGSEGV] = TARGET_SIGSEGV, 52 [SIGUSR2] = TARGET_SIGUSR2, 53 [SIGPIPE] = TARGET_SIGPIPE, 54 [SIGALRM] = TARGET_SIGALRM, 55 [SIGTERM] = TARGET_SIGTERM, 56 #ifdef SIGSTKFLT 57 [SIGSTKFLT] = TARGET_SIGSTKFLT, 58 #endif 59 [SIGCHLD] = TARGET_SIGCHLD, 60 [SIGCONT] = TARGET_SIGCONT, 61 [SIGSTOP] = TARGET_SIGSTOP, 62 [SIGTSTP] = TARGET_SIGTSTP, 63 [SIGTTIN] = TARGET_SIGTTIN, 64 [SIGTTOU] = TARGET_SIGTTOU, 65 [SIGURG] = TARGET_SIGURG, 66 [SIGXCPU] = TARGET_SIGXCPU, 67 [SIGXFSZ] = TARGET_SIGXFSZ, 68 [SIGVTALRM] = TARGET_SIGVTALRM, 69 [SIGPROF] = TARGET_SIGPROF, 70 [SIGWINCH] = TARGET_SIGWINCH, 71 [SIGIO] = TARGET_SIGIO, 72 [SIGPWR] = TARGET_SIGPWR, 73 [SIGSYS] = TARGET_SIGSYS, 74 /* next signals stay the same */ 75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 77 To fix this properly we need to do manual signal delivery multiplexed 78 over a single host signal. */ 79 [__SIGRTMIN] = __SIGRTMAX, 80 [__SIGRTMAX] = __SIGRTMIN, 81 }; 82 static uint8_t target_to_host_signal_table[_NSIG]; 83 84 static inline int on_sig_stack(unsigned long sp) 85 { 86 return (sp - target_sigaltstack_used.ss_sp 87 < target_sigaltstack_used.ss_size); 88 } 89 90 static inline int sas_ss_flags(unsigned long sp) 91 { 92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 93 : on_sig_stack(sp) ? SS_ONSTACK : 0); 94 } 95 96 int host_to_target_signal(int sig) 97 { 98 if (sig < 0 || sig >= _NSIG) 99 return sig; 100 return host_to_target_signal_table[sig]; 101 } 102 103 int target_to_host_signal(int sig) 104 { 105 if (sig < 0 || sig >= _NSIG) 106 return sig; 107 return target_to_host_signal_table[sig]; 108 } 109 110 static inline void target_sigemptyset(target_sigset_t *set) 111 { 112 memset(set, 0, sizeof(*set)); 113 } 114 115 static inline void target_sigaddset(target_sigset_t *set, int signum) 116 { 117 signum--; 118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 119 set->sig[signum / TARGET_NSIG_BPW] |= mask; 120 } 121 122 static inline int target_sigismember(const target_sigset_t *set, int signum) 123 { 124 signum--; 125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 127 } 128 129 static void host_to_target_sigset_internal(target_sigset_t *d, 130 const sigset_t *s) 131 { 132 int i; 133 target_sigemptyset(d); 134 for (i = 1; i <= TARGET_NSIG; i++) { 135 if (sigismember(s, i)) { 136 target_sigaddset(d, host_to_target_signal(i)); 137 } 138 } 139 } 140 141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 142 { 143 target_sigset_t d1; 144 int i; 145 146 host_to_target_sigset_internal(&d1, s); 147 for(i = 0;i < TARGET_NSIG_WORDS; i++) 148 d->sig[i] = tswapal(d1.sig[i]); 149 } 150 151 static void target_to_host_sigset_internal(sigset_t *d, 152 const target_sigset_t *s) 153 { 154 int i; 155 sigemptyset(d); 156 for (i = 1; i <= TARGET_NSIG; i++) { 157 if (target_sigismember(s, i)) { 158 sigaddset(d, target_to_host_signal(i)); 159 } 160 } 161 } 162 163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 164 { 165 target_sigset_t s1; 166 int i; 167 168 for(i = 0;i < TARGET_NSIG_WORDS; i++) 169 s1.sig[i] = tswapal(s->sig[i]); 170 target_to_host_sigset_internal(d, &s1); 171 } 172 173 void host_to_target_old_sigset(abi_ulong *old_sigset, 174 const sigset_t *sigset) 175 { 176 target_sigset_t d; 177 host_to_target_sigset(&d, sigset); 178 *old_sigset = d.sig[0]; 179 } 180 181 void target_to_host_old_sigset(sigset_t *sigset, 182 const abi_ulong *old_sigset) 183 { 184 target_sigset_t d; 185 int i; 186 187 d.sig[0] = *old_sigset; 188 for(i = 1;i < TARGET_NSIG_WORDS; i++) 189 d.sig[i] = 0; 190 target_to_host_sigset(sigset, &d); 191 } 192 193 int block_signals(void) 194 { 195 TaskState *ts = (TaskState *)thread_cpu->opaque; 196 sigset_t set; 197 int pending; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 pending = atomic_xchg(&ts->signal_pending, 1); 207 208 return pending; 209 } 210 211 /* Wrapper for sigprocmask function 212 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 213 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 214 * a signal was already pending and the syscall must be restarted, or 215 * 0 on success. 216 * If set is NULL, this is guaranteed not to fail. 217 */ 218 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 219 { 220 TaskState *ts = (TaskState *)thread_cpu->opaque; 221 222 if (oldset) { 223 *oldset = ts->signal_mask; 224 } 225 226 if (set) { 227 int i; 228 229 if (block_signals()) { 230 return -TARGET_ERESTARTSYS; 231 } 232 233 switch (how) { 234 case SIG_BLOCK: 235 sigorset(&ts->signal_mask, &ts->signal_mask, set); 236 break; 237 case SIG_UNBLOCK: 238 for (i = 1; i <= NSIG; ++i) { 239 if (sigismember(set, i)) { 240 sigdelset(&ts->signal_mask, i); 241 } 242 } 243 break; 244 case SIG_SETMASK: 245 ts->signal_mask = *set; 246 break; 247 default: 248 g_assert_not_reached(); 249 } 250 251 /* Silently ignore attempts to change blocking status of KILL or STOP */ 252 sigdelset(&ts->signal_mask, SIGKILL); 253 sigdelset(&ts->signal_mask, SIGSTOP); 254 } 255 return 0; 256 } 257 258 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 259 !defined(TARGET_X86_64) 260 /* Just set the guest's signal mask to the specified value; the 261 * caller is assumed to have called block_signals() already. 262 */ 263 static void set_sigmask(const sigset_t *set) 264 { 265 TaskState *ts = (TaskState *)thread_cpu->opaque; 266 267 ts->signal_mask = *set; 268 } 269 #endif 270 271 /* siginfo conversion */ 272 273 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 274 const siginfo_t *info) 275 { 276 int sig = host_to_target_signal(info->si_signo); 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV 282 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { 283 /* Should never come here, but who knows. The information for 284 the target is irrelevant. */ 285 tinfo->_sifields._sigfault._addr = 0; 286 } else if (sig == TARGET_SIGIO) { 287 tinfo->_sifields._sigpoll._band = info->si_band; 288 tinfo->_sifields._sigpoll._fd = info->si_fd; 289 } else if (sig == TARGET_SIGCHLD) { 290 tinfo->_sifields._sigchld._pid = info->si_pid; 291 tinfo->_sifields._sigchld._uid = info->si_uid; 292 tinfo->_sifields._sigchld._status 293 = host_to_target_waitstatus(info->si_status); 294 tinfo->_sifields._sigchld._utime = info->si_utime; 295 tinfo->_sifields._sigchld._stime = info->si_stime; 296 } else if (sig >= TARGET_SIGRTMIN) { 297 tinfo->_sifields._rt._pid = info->si_pid; 298 tinfo->_sifields._rt._uid = info->si_uid; 299 /* XXX: potential problem if 64 bit */ 300 tinfo->_sifields._rt._sigval.sival_ptr 301 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 302 } 303 } 304 305 static void tswap_siginfo(target_siginfo_t *tinfo, 306 const target_siginfo_t *info) 307 { 308 int sig = info->si_signo; 309 tinfo->si_signo = tswap32(sig); 310 tinfo->si_errno = tswap32(info->si_errno); 311 tinfo->si_code = tswap32(info->si_code); 312 313 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV 314 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { 315 tinfo->_sifields._sigfault._addr 316 = tswapal(info->_sifields._sigfault._addr); 317 } else if (sig == TARGET_SIGIO) { 318 tinfo->_sifields._sigpoll._band 319 = tswap32(info->_sifields._sigpoll._band); 320 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd); 321 } else if (sig == TARGET_SIGCHLD) { 322 tinfo->_sifields._sigchld._pid 323 = tswap32(info->_sifields._sigchld._pid); 324 tinfo->_sifields._sigchld._uid 325 = tswap32(info->_sifields._sigchld._uid); 326 tinfo->_sifields._sigchld._status 327 = tswap32(info->_sifields._sigchld._status); 328 tinfo->_sifields._sigchld._utime 329 = tswapal(info->_sifields._sigchld._utime); 330 tinfo->_sifields._sigchld._stime 331 = tswapal(info->_sifields._sigchld._stime); 332 } else if (sig >= TARGET_SIGRTMIN) { 333 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid); 334 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid); 335 tinfo->_sifields._rt._sigval.sival_ptr 336 = tswapal(info->_sifields._rt._sigval.sival_ptr); 337 } 338 } 339 340 341 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 342 { 343 host_to_target_siginfo_noswap(tinfo, info); 344 tswap_siginfo(tinfo, tinfo); 345 } 346 347 /* XXX: we support only POSIX RT signals are used. */ 348 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 349 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 350 { 351 info->si_signo = tswap32(tinfo->si_signo); 352 info->si_errno = tswap32(tinfo->si_errno); 353 info->si_code = tswap32(tinfo->si_code); 354 info->si_pid = tswap32(tinfo->_sifields._rt._pid); 355 info->si_uid = tswap32(tinfo->_sifields._rt._uid); 356 info->si_value.sival_ptr = 357 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr); 358 } 359 360 static int fatal_signal (int sig) 361 { 362 switch (sig) { 363 case TARGET_SIGCHLD: 364 case TARGET_SIGURG: 365 case TARGET_SIGWINCH: 366 /* Ignored by default. */ 367 return 0; 368 case TARGET_SIGCONT: 369 case TARGET_SIGSTOP: 370 case TARGET_SIGTSTP: 371 case TARGET_SIGTTIN: 372 case TARGET_SIGTTOU: 373 /* Job control signals. */ 374 return 0; 375 default: 376 return 1; 377 } 378 } 379 380 /* returns 1 if given signal should dump core if not handled */ 381 static int core_dump_signal(int sig) 382 { 383 switch (sig) { 384 case TARGET_SIGABRT: 385 case TARGET_SIGFPE: 386 case TARGET_SIGILL: 387 case TARGET_SIGQUIT: 388 case TARGET_SIGSEGV: 389 case TARGET_SIGTRAP: 390 case TARGET_SIGBUS: 391 return (1); 392 default: 393 return (0); 394 } 395 } 396 397 void signal_init(void) 398 { 399 TaskState *ts = (TaskState *)thread_cpu->opaque; 400 struct sigaction act; 401 struct sigaction oact; 402 int i, j; 403 int host_sig; 404 405 /* generate signal conversion tables */ 406 for(i = 1; i < _NSIG; i++) { 407 if (host_to_target_signal_table[i] == 0) 408 host_to_target_signal_table[i] = i; 409 } 410 for(i = 1; i < _NSIG; i++) { 411 j = host_to_target_signal_table[i]; 412 target_to_host_signal_table[j] = i; 413 } 414 415 /* Set the signal mask from the host mask. */ 416 sigprocmask(0, 0, &ts->signal_mask); 417 418 /* set all host signal handlers. ALL signals are blocked during 419 the handlers to serialize them. */ 420 memset(sigact_table, 0, sizeof(sigact_table)); 421 422 sigfillset(&act.sa_mask); 423 act.sa_flags = SA_SIGINFO; 424 act.sa_sigaction = host_signal_handler; 425 for(i = 1; i <= TARGET_NSIG; i++) { 426 host_sig = target_to_host_signal(i); 427 sigaction(host_sig, NULL, &oact); 428 if (oact.sa_sigaction == (void *)SIG_IGN) { 429 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 430 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 431 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 432 } 433 /* If there's already a handler installed then something has 434 gone horribly wrong, so don't even try to handle that case. */ 435 /* Install some handlers for our own use. We need at least 436 SIGSEGV and SIGBUS, to detect exceptions. We can not just 437 trap all signals because it affects syscall interrupt 438 behavior. But do trap all default-fatal signals. */ 439 if (fatal_signal (i)) 440 sigaction(host_sig, &act, NULL); 441 } 442 } 443 444 445 /* abort execution with signal */ 446 static void QEMU_NORETURN force_sig(int target_sig) 447 { 448 CPUState *cpu = thread_cpu; 449 CPUArchState *env = cpu->env_ptr; 450 TaskState *ts = (TaskState *)cpu->opaque; 451 int host_sig, core_dumped = 0; 452 struct sigaction act; 453 454 host_sig = target_to_host_signal(target_sig); 455 trace_user_force_sig(env, target_sig, host_sig); 456 gdb_signalled(env, target_sig); 457 458 /* dump core if supported by target binary format */ 459 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 460 stop_all_tasks(); 461 core_dumped = 462 ((*ts->bprm->core_dump)(target_sig, env) == 0); 463 } 464 if (core_dumped) { 465 /* we already dumped the core of target process, we don't want 466 * a coredump of qemu itself */ 467 struct rlimit nodump; 468 getrlimit(RLIMIT_CORE, &nodump); 469 nodump.rlim_cur=0; 470 setrlimit(RLIMIT_CORE, &nodump); 471 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 472 target_sig, strsignal(host_sig), "core dumped" ); 473 } 474 475 /* The proper exit code for dying from an uncaught signal is 476 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 477 * a negative value. To get the proper exit code we need to 478 * actually die from an uncaught signal. Here the default signal 479 * handler is installed, we send ourself a signal and we wait for 480 * it to arrive. */ 481 sigfillset(&act.sa_mask); 482 act.sa_handler = SIG_DFL; 483 act.sa_flags = 0; 484 sigaction(host_sig, &act, NULL); 485 486 /* For some reason raise(host_sig) doesn't send the signal when 487 * statically linked on x86-64. */ 488 kill(getpid(), host_sig); 489 490 /* Make sure the signal isn't masked (just reuse the mask inside 491 of act) */ 492 sigdelset(&act.sa_mask, host_sig); 493 sigsuspend(&act.sa_mask); 494 495 /* unreachable */ 496 abort(); 497 } 498 499 /* queue a signal so that it will be send to the virtual CPU as soon 500 as possible */ 501 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) 502 { 503 CPUState *cpu = ENV_GET_CPU(env); 504 TaskState *ts = cpu->opaque; 505 506 trace_user_queue_signal(env, sig); 507 508 ts->sync_signal.info = *info; 509 ts->sync_signal.pending = sig; 510 /* signal that a new signal is pending */ 511 atomic_set(&ts->signal_pending, 1); 512 return 1; /* indicates that the signal was queued */ 513 } 514 515 #ifndef HAVE_SAFE_SYSCALL 516 static inline void rewind_if_in_safe_syscall(void *puc) 517 { 518 /* Default version: never rewind */ 519 } 520 #endif 521 522 static void host_signal_handler(int host_signum, siginfo_t *info, 523 void *puc) 524 { 525 CPUArchState *env = thread_cpu->env_ptr; 526 CPUState *cpu = ENV_GET_CPU(env); 527 TaskState *ts = cpu->opaque; 528 529 int sig; 530 target_siginfo_t tinfo; 531 ucontext_t *uc = puc; 532 struct emulated_sigtable *k; 533 534 /* the CPU emulator uses some host signals to detect exceptions, 535 we forward to it some signals */ 536 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 537 && info->si_code > 0) { 538 if (cpu_signal_handler(host_signum, info, puc)) 539 return; 540 } 541 542 /* get target signal number */ 543 sig = host_to_target_signal(host_signum); 544 if (sig < 1 || sig > TARGET_NSIG) 545 return; 546 trace_user_host_signal(env, host_signum, sig); 547 548 rewind_if_in_safe_syscall(puc); 549 550 host_to_target_siginfo_noswap(&tinfo, info); 551 k = &ts->sigtab[sig - 1]; 552 k->info = tinfo; 553 k->pending = sig; 554 ts->signal_pending = 1; 555 556 /* Block host signals until target signal handler entered. We 557 * can't block SIGSEGV or SIGBUS while we're executing guest 558 * code in case the guest code provokes one in the window between 559 * now and it getting out to the main loop. Signals will be 560 * unblocked again in process_pending_signals(). 561 */ 562 sigfillset(&uc->uc_sigmask); 563 sigdelset(&uc->uc_sigmask, SIGSEGV); 564 sigdelset(&uc->uc_sigmask, SIGBUS); 565 566 /* interrupt the virtual CPU as soon as possible */ 567 cpu_exit(thread_cpu); 568 } 569 570 /* do_sigaltstack() returns target values and errnos. */ 571 /* compare linux/kernel/signal.c:do_sigaltstack() */ 572 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 573 { 574 int ret; 575 struct target_sigaltstack oss; 576 577 /* XXX: test errors */ 578 if(uoss_addr) 579 { 580 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 581 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 582 __put_user(sas_ss_flags(sp), &oss.ss_flags); 583 } 584 585 if(uss_addr) 586 { 587 struct target_sigaltstack *uss; 588 struct target_sigaltstack ss; 589 size_t minstacksize = TARGET_MINSIGSTKSZ; 590 591 #if defined(TARGET_PPC64) 592 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 593 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 594 if (get_ppc64_abi(image) > 1) { 595 minstacksize = 4096; 596 } 597 #endif 598 599 ret = -TARGET_EFAULT; 600 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 601 goto out; 602 } 603 __get_user(ss.ss_sp, &uss->ss_sp); 604 __get_user(ss.ss_size, &uss->ss_size); 605 __get_user(ss.ss_flags, &uss->ss_flags); 606 unlock_user_struct(uss, uss_addr, 0); 607 608 ret = -TARGET_EPERM; 609 if (on_sig_stack(sp)) 610 goto out; 611 612 ret = -TARGET_EINVAL; 613 if (ss.ss_flags != TARGET_SS_DISABLE 614 && ss.ss_flags != TARGET_SS_ONSTACK 615 && ss.ss_flags != 0) 616 goto out; 617 618 if (ss.ss_flags == TARGET_SS_DISABLE) { 619 ss.ss_size = 0; 620 ss.ss_sp = 0; 621 } else { 622 ret = -TARGET_ENOMEM; 623 if (ss.ss_size < minstacksize) { 624 goto out; 625 } 626 } 627 628 target_sigaltstack_used.ss_sp = ss.ss_sp; 629 target_sigaltstack_used.ss_size = ss.ss_size; 630 } 631 632 if (uoss_addr) { 633 ret = -TARGET_EFAULT; 634 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 635 goto out; 636 } 637 638 ret = 0; 639 out: 640 return ret; 641 } 642 643 /* do_sigaction() return target values and host errnos */ 644 int do_sigaction(int sig, const struct target_sigaction *act, 645 struct target_sigaction *oact) 646 { 647 struct target_sigaction *k; 648 struct sigaction act1; 649 int host_sig; 650 int ret = 0; 651 652 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 653 return -TARGET_EINVAL; 654 } 655 656 if (block_signals()) { 657 return -TARGET_ERESTARTSYS; 658 } 659 660 k = &sigact_table[sig - 1]; 661 if (oact) { 662 __put_user(k->_sa_handler, &oact->_sa_handler); 663 __put_user(k->sa_flags, &oact->sa_flags); 664 #if !defined(TARGET_MIPS) 665 __put_user(k->sa_restorer, &oact->sa_restorer); 666 #endif 667 /* Not swapped. */ 668 oact->sa_mask = k->sa_mask; 669 } 670 if (act) { 671 /* FIXME: This is not threadsafe. */ 672 __get_user(k->_sa_handler, &act->_sa_handler); 673 __get_user(k->sa_flags, &act->sa_flags); 674 #if !defined(TARGET_MIPS) 675 __get_user(k->sa_restorer, &act->sa_restorer); 676 #endif 677 /* To be swapped in target_to_host_sigset. */ 678 k->sa_mask = act->sa_mask; 679 680 /* we update the host linux signal state */ 681 host_sig = target_to_host_signal(sig); 682 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 683 sigfillset(&act1.sa_mask); 684 act1.sa_flags = SA_SIGINFO; 685 if (k->sa_flags & TARGET_SA_RESTART) 686 act1.sa_flags |= SA_RESTART; 687 /* NOTE: it is important to update the host kernel signal 688 ignore state to avoid getting unexpected interrupted 689 syscalls */ 690 if (k->_sa_handler == TARGET_SIG_IGN) { 691 act1.sa_sigaction = (void *)SIG_IGN; 692 } else if (k->_sa_handler == TARGET_SIG_DFL) { 693 if (fatal_signal (sig)) 694 act1.sa_sigaction = host_signal_handler; 695 else 696 act1.sa_sigaction = (void *)SIG_DFL; 697 } else { 698 act1.sa_sigaction = host_signal_handler; 699 } 700 ret = sigaction(host_sig, &act1, NULL); 701 } 702 } 703 return ret; 704 } 705 706 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 707 708 /* from the Linux kernel */ 709 710 struct target_fpreg { 711 uint16_t significand[4]; 712 uint16_t exponent; 713 }; 714 715 struct target_fpxreg { 716 uint16_t significand[4]; 717 uint16_t exponent; 718 uint16_t padding[3]; 719 }; 720 721 struct target_xmmreg { 722 abi_ulong element[4]; 723 }; 724 725 struct target_fpstate { 726 /* Regular FPU environment */ 727 abi_ulong cw; 728 abi_ulong sw; 729 abi_ulong tag; 730 abi_ulong ipoff; 731 abi_ulong cssel; 732 abi_ulong dataoff; 733 abi_ulong datasel; 734 struct target_fpreg _st[8]; 735 uint16_t status; 736 uint16_t magic; /* 0xffff = regular FPU data only */ 737 738 /* FXSR FPU environment */ 739 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 740 abi_ulong mxcsr; 741 abi_ulong reserved; 742 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 743 struct target_xmmreg _xmm[8]; 744 abi_ulong padding[56]; 745 }; 746 747 #define X86_FXSR_MAGIC 0x0000 748 749 struct target_sigcontext { 750 uint16_t gs, __gsh; 751 uint16_t fs, __fsh; 752 uint16_t es, __esh; 753 uint16_t ds, __dsh; 754 abi_ulong edi; 755 abi_ulong esi; 756 abi_ulong ebp; 757 abi_ulong esp; 758 abi_ulong ebx; 759 abi_ulong edx; 760 abi_ulong ecx; 761 abi_ulong eax; 762 abi_ulong trapno; 763 abi_ulong err; 764 abi_ulong eip; 765 uint16_t cs, __csh; 766 abi_ulong eflags; 767 abi_ulong esp_at_signal; 768 uint16_t ss, __ssh; 769 abi_ulong fpstate; /* pointer */ 770 abi_ulong oldmask; 771 abi_ulong cr2; 772 }; 773 774 struct target_ucontext { 775 abi_ulong tuc_flags; 776 abi_ulong tuc_link; 777 target_stack_t tuc_stack; 778 struct target_sigcontext tuc_mcontext; 779 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 780 }; 781 782 struct sigframe 783 { 784 abi_ulong pretcode; 785 int sig; 786 struct target_sigcontext sc; 787 struct target_fpstate fpstate; 788 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 789 char retcode[8]; 790 }; 791 792 struct rt_sigframe 793 { 794 abi_ulong pretcode; 795 int sig; 796 abi_ulong pinfo; 797 abi_ulong puc; 798 struct target_siginfo info; 799 struct target_ucontext uc; 800 struct target_fpstate fpstate; 801 char retcode[8]; 802 }; 803 804 /* 805 * Set up a signal frame. 806 */ 807 808 /* XXX: save x87 state */ 809 static void setup_sigcontext(struct target_sigcontext *sc, 810 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 811 abi_ulong fpstate_addr) 812 { 813 CPUState *cs = CPU(x86_env_get_cpu(env)); 814 uint16_t magic; 815 816 /* already locked in setup_frame() */ 817 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 818 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 819 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 820 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 821 __put_user(env->regs[R_EDI], &sc->edi); 822 __put_user(env->regs[R_ESI], &sc->esi); 823 __put_user(env->regs[R_EBP], &sc->ebp); 824 __put_user(env->regs[R_ESP], &sc->esp); 825 __put_user(env->regs[R_EBX], &sc->ebx); 826 __put_user(env->regs[R_EDX], &sc->edx); 827 __put_user(env->regs[R_ECX], &sc->ecx); 828 __put_user(env->regs[R_EAX], &sc->eax); 829 __put_user(cs->exception_index, &sc->trapno); 830 __put_user(env->error_code, &sc->err); 831 __put_user(env->eip, &sc->eip); 832 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 833 __put_user(env->eflags, &sc->eflags); 834 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 835 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 836 837 cpu_x86_fsave(env, fpstate_addr, 1); 838 fpstate->status = fpstate->sw; 839 magic = 0xffff; 840 __put_user(magic, &fpstate->magic); 841 __put_user(fpstate_addr, &sc->fpstate); 842 843 /* non-iBCS2 extensions.. */ 844 __put_user(mask, &sc->oldmask); 845 __put_user(env->cr[2], &sc->cr2); 846 } 847 848 /* 849 * Determine which stack to use.. 850 */ 851 852 static inline abi_ulong 853 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 854 { 855 unsigned long esp; 856 857 /* Default to using normal stack */ 858 esp = env->regs[R_ESP]; 859 /* This is the X/Open sanctioned signal stack switching. */ 860 if (ka->sa_flags & TARGET_SA_ONSTACK) { 861 if (sas_ss_flags(esp) == 0) { 862 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 863 } 864 } else { 865 866 /* This is the legacy signal stack switching. */ 867 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 868 !(ka->sa_flags & TARGET_SA_RESTORER) && 869 ka->sa_restorer) { 870 esp = (unsigned long) ka->sa_restorer; 871 } 872 } 873 return (esp - frame_size) & -8ul; 874 } 875 876 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 877 static void setup_frame(int sig, struct target_sigaction *ka, 878 target_sigset_t *set, CPUX86State *env) 879 { 880 abi_ulong frame_addr; 881 struct sigframe *frame; 882 int i; 883 884 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 885 trace_user_setup_frame(env, frame_addr); 886 887 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 888 goto give_sigsegv; 889 890 __put_user(sig, &frame->sig); 891 892 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 893 frame_addr + offsetof(struct sigframe, fpstate)); 894 895 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 896 __put_user(set->sig[i], &frame->extramask[i - 1]); 897 } 898 899 /* Set up to return from userspace. If provided, use a stub 900 already in userspace. */ 901 if (ka->sa_flags & TARGET_SA_RESTORER) { 902 __put_user(ka->sa_restorer, &frame->pretcode); 903 } else { 904 uint16_t val16; 905 abi_ulong retcode_addr; 906 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 907 __put_user(retcode_addr, &frame->pretcode); 908 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 909 val16 = 0xb858; 910 __put_user(val16, (uint16_t *)(frame->retcode+0)); 911 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 912 val16 = 0x80cd; 913 __put_user(val16, (uint16_t *)(frame->retcode+6)); 914 } 915 916 917 /* Set up registers for signal handler */ 918 env->regs[R_ESP] = frame_addr; 919 env->eip = ka->_sa_handler; 920 921 cpu_x86_load_seg(env, R_DS, __USER_DS); 922 cpu_x86_load_seg(env, R_ES, __USER_DS); 923 cpu_x86_load_seg(env, R_SS, __USER_DS); 924 cpu_x86_load_seg(env, R_CS, __USER_CS); 925 env->eflags &= ~TF_MASK; 926 927 unlock_user_struct(frame, frame_addr, 1); 928 929 return; 930 931 give_sigsegv: 932 if (sig == TARGET_SIGSEGV) { 933 ka->_sa_handler = TARGET_SIG_DFL; 934 } 935 force_sig(TARGET_SIGSEGV /* , current */); 936 } 937 938 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 939 static void setup_rt_frame(int sig, struct target_sigaction *ka, 940 target_siginfo_t *info, 941 target_sigset_t *set, CPUX86State *env) 942 { 943 abi_ulong frame_addr, addr; 944 struct rt_sigframe *frame; 945 int i; 946 947 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 948 trace_user_setup_rt_frame(env, frame_addr); 949 950 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 951 goto give_sigsegv; 952 953 __put_user(sig, &frame->sig); 954 addr = frame_addr + offsetof(struct rt_sigframe, info); 955 __put_user(addr, &frame->pinfo); 956 addr = frame_addr + offsetof(struct rt_sigframe, uc); 957 __put_user(addr, &frame->puc); 958 tswap_siginfo(&frame->info, info); 959 960 /* Create the ucontext. */ 961 __put_user(0, &frame->uc.tuc_flags); 962 __put_user(0, &frame->uc.tuc_link); 963 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 964 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 965 &frame->uc.tuc_stack.ss_flags); 966 __put_user(target_sigaltstack_used.ss_size, 967 &frame->uc.tuc_stack.ss_size); 968 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 969 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 970 971 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 972 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 973 } 974 975 /* Set up to return from userspace. If provided, use a stub 976 already in userspace. */ 977 if (ka->sa_flags & TARGET_SA_RESTORER) { 978 __put_user(ka->sa_restorer, &frame->pretcode); 979 } else { 980 uint16_t val16; 981 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 982 __put_user(addr, &frame->pretcode); 983 /* This is movl $,%eax ; int $0x80 */ 984 __put_user(0xb8, (char *)(frame->retcode+0)); 985 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 986 val16 = 0x80cd; 987 __put_user(val16, (uint16_t *)(frame->retcode+5)); 988 } 989 990 /* Set up registers for signal handler */ 991 env->regs[R_ESP] = frame_addr; 992 env->eip = ka->_sa_handler; 993 994 cpu_x86_load_seg(env, R_DS, __USER_DS); 995 cpu_x86_load_seg(env, R_ES, __USER_DS); 996 cpu_x86_load_seg(env, R_SS, __USER_DS); 997 cpu_x86_load_seg(env, R_CS, __USER_CS); 998 env->eflags &= ~TF_MASK; 999 1000 unlock_user_struct(frame, frame_addr, 1); 1001 1002 return; 1003 1004 give_sigsegv: 1005 if (sig == TARGET_SIGSEGV) { 1006 ka->_sa_handler = TARGET_SIG_DFL; 1007 } 1008 force_sig(TARGET_SIGSEGV /* , current */); 1009 } 1010 1011 static int 1012 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1013 { 1014 unsigned int err = 0; 1015 abi_ulong fpstate_addr; 1016 unsigned int tmpflags; 1017 1018 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1019 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1020 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1021 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1022 1023 env->regs[R_EDI] = tswapl(sc->edi); 1024 env->regs[R_ESI] = tswapl(sc->esi); 1025 env->regs[R_EBP] = tswapl(sc->ebp); 1026 env->regs[R_ESP] = tswapl(sc->esp); 1027 env->regs[R_EBX] = tswapl(sc->ebx); 1028 env->regs[R_EDX] = tswapl(sc->edx); 1029 env->regs[R_ECX] = tswapl(sc->ecx); 1030 env->regs[R_EAX] = tswapl(sc->eax); 1031 env->eip = tswapl(sc->eip); 1032 1033 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1034 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1035 1036 tmpflags = tswapl(sc->eflags); 1037 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1038 // regs->orig_eax = -1; /* disable syscall checks */ 1039 1040 fpstate_addr = tswapl(sc->fpstate); 1041 if (fpstate_addr != 0) { 1042 if (!access_ok(VERIFY_READ, fpstate_addr, 1043 sizeof(struct target_fpstate))) 1044 goto badframe; 1045 cpu_x86_frstor(env, fpstate_addr, 1); 1046 } 1047 1048 return err; 1049 badframe: 1050 return 1; 1051 } 1052 1053 long do_sigreturn(CPUX86State *env) 1054 { 1055 struct sigframe *frame; 1056 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1057 target_sigset_t target_set; 1058 sigset_t set; 1059 int i; 1060 1061 trace_user_do_sigreturn(env, frame_addr); 1062 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1063 goto badframe; 1064 /* set blocked signals */ 1065 __get_user(target_set.sig[0], &frame->sc.oldmask); 1066 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1067 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1068 } 1069 1070 target_to_host_sigset_internal(&set, &target_set); 1071 set_sigmask(&set); 1072 1073 /* restore registers */ 1074 if (restore_sigcontext(env, &frame->sc)) 1075 goto badframe; 1076 unlock_user_struct(frame, frame_addr, 0); 1077 return -TARGET_QEMU_ESIGRETURN; 1078 1079 badframe: 1080 unlock_user_struct(frame, frame_addr, 0); 1081 force_sig(TARGET_SIGSEGV); 1082 return 0; 1083 } 1084 1085 long do_rt_sigreturn(CPUX86State *env) 1086 { 1087 abi_ulong frame_addr; 1088 struct rt_sigframe *frame; 1089 sigset_t set; 1090 1091 frame_addr = env->regs[R_ESP] - 4; 1092 trace_user_do_rt_sigreturn(env, frame_addr); 1093 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1094 goto badframe; 1095 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1096 set_sigmask(&set); 1097 1098 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1099 goto badframe; 1100 } 1101 1102 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1103 get_sp_from_cpustate(env)) == -EFAULT) { 1104 goto badframe; 1105 } 1106 1107 unlock_user_struct(frame, frame_addr, 0); 1108 return -TARGET_QEMU_ESIGRETURN; 1109 1110 badframe: 1111 unlock_user_struct(frame, frame_addr, 0); 1112 force_sig(TARGET_SIGSEGV); 1113 return 0; 1114 } 1115 1116 #elif defined(TARGET_AARCH64) 1117 1118 struct target_sigcontext { 1119 uint64_t fault_address; 1120 /* AArch64 registers */ 1121 uint64_t regs[31]; 1122 uint64_t sp; 1123 uint64_t pc; 1124 uint64_t pstate; 1125 /* 4K reserved for FP/SIMD state and future expansion */ 1126 char __reserved[4096] __attribute__((__aligned__(16))); 1127 }; 1128 1129 struct target_ucontext { 1130 abi_ulong tuc_flags; 1131 abi_ulong tuc_link; 1132 target_stack_t tuc_stack; 1133 target_sigset_t tuc_sigmask; 1134 /* glibc uses a 1024-bit sigset_t */ 1135 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1136 /* last for future expansion */ 1137 struct target_sigcontext tuc_mcontext; 1138 }; 1139 1140 /* 1141 * Header to be used at the beginning of structures extending the user 1142 * context. Such structures must be placed after the rt_sigframe on the stack 1143 * and be 16-byte aligned. The last structure must be a dummy one with the 1144 * magic and size set to 0. 1145 */ 1146 struct target_aarch64_ctx { 1147 uint32_t magic; 1148 uint32_t size; 1149 }; 1150 1151 #define TARGET_FPSIMD_MAGIC 0x46508001 1152 1153 struct target_fpsimd_context { 1154 struct target_aarch64_ctx head; 1155 uint32_t fpsr; 1156 uint32_t fpcr; 1157 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1158 }; 1159 1160 /* 1161 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1162 * user space as it will change with the addition of new context. User space 1163 * should check the magic/size information. 1164 */ 1165 struct target_aux_context { 1166 struct target_fpsimd_context fpsimd; 1167 /* additional context to be added before "end" */ 1168 struct target_aarch64_ctx end; 1169 }; 1170 1171 struct target_rt_sigframe { 1172 struct target_siginfo info; 1173 struct target_ucontext uc; 1174 uint64_t fp; 1175 uint64_t lr; 1176 uint32_t tramp[2]; 1177 }; 1178 1179 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1180 CPUARMState *env, target_sigset_t *set) 1181 { 1182 int i; 1183 struct target_aux_context *aux = 1184 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1185 1186 /* set up the stack frame for unwinding */ 1187 __put_user(env->xregs[29], &sf->fp); 1188 __put_user(env->xregs[30], &sf->lr); 1189 1190 for (i = 0; i < 31; i++) { 1191 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1192 } 1193 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1194 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1195 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1196 1197 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1198 1199 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1200 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1201 } 1202 1203 for (i = 0; i < 32; i++) { 1204 #ifdef TARGET_WORDS_BIGENDIAN 1205 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1206 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1207 #else 1208 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1209 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1210 #endif 1211 } 1212 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1213 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1214 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1215 __put_user(sizeof(struct target_fpsimd_context), 1216 &aux->fpsimd.head.size); 1217 1218 /* set the "end" magic */ 1219 __put_user(0, &aux->end.magic); 1220 __put_user(0, &aux->end.size); 1221 1222 return 0; 1223 } 1224 1225 static int target_restore_sigframe(CPUARMState *env, 1226 struct target_rt_sigframe *sf) 1227 { 1228 sigset_t set; 1229 int i; 1230 struct target_aux_context *aux = 1231 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1232 uint32_t magic, size, fpsr, fpcr; 1233 uint64_t pstate; 1234 1235 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1236 set_sigmask(&set); 1237 1238 for (i = 0; i < 31; i++) { 1239 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1240 } 1241 1242 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1243 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1244 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1245 pstate_write(env, pstate); 1246 1247 __get_user(magic, &aux->fpsimd.head.magic); 1248 __get_user(size, &aux->fpsimd.head.size); 1249 1250 if (magic != TARGET_FPSIMD_MAGIC 1251 || size != sizeof(struct target_fpsimd_context)) { 1252 return 1; 1253 } 1254 1255 for (i = 0; i < 32; i++) { 1256 #ifdef TARGET_WORDS_BIGENDIAN 1257 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1258 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1259 #else 1260 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1261 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1262 #endif 1263 } 1264 __get_user(fpsr, &aux->fpsimd.fpsr); 1265 vfp_set_fpsr(env, fpsr); 1266 __get_user(fpcr, &aux->fpsimd.fpcr); 1267 vfp_set_fpcr(env, fpcr); 1268 1269 return 0; 1270 } 1271 1272 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1273 { 1274 abi_ulong sp; 1275 1276 sp = env->xregs[31]; 1277 1278 /* 1279 * This is the X/Open sanctioned signal stack switching. 1280 */ 1281 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1282 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1283 } 1284 1285 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1286 1287 return sp; 1288 } 1289 1290 static void target_setup_frame(int usig, struct target_sigaction *ka, 1291 target_siginfo_t *info, target_sigset_t *set, 1292 CPUARMState *env) 1293 { 1294 struct target_rt_sigframe *frame; 1295 abi_ulong frame_addr, return_addr; 1296 1297 frame_addr = get_sigframe(ka, env); 1298 trace_user_setup_frame(env, frame_addr); 1299 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1300 goto give_sigsegv; 1301 } 1302 1303 __put_user(0, &frame->uc.tuc_flags); 1304 __put_user(0, &frame->uc.tuc_link); 1305 1306 __put_user(target_sigaltstack_used.ss_sp, 1307 &frame->uc.tuc_stack.ss_sp); 1308 __put_user(sas_ss_flags(env->xregs[31]), 1309 &frame->uc.tuc_stack.ss_flags); 1310 __put_user(target_sigaltstack_used.ss_size, 1311 &frame->uc.tuc_stack.ss_size); 1312 target_setup_sigframe(frame, env, set); 1313 if (ka->sa_flags & TARGET_SA_RESTORER) { 1314 return_addr = ka->sa_restorer; 1315 } else { 1316 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1317 __put_user(0xd2801168, &frame->tramp[0]); 1318 __put_user(0xd4000001, &frame->tramp[1]); 1319 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1320 } 1321 env->xregs[0] = usig; 1322 env->xregs[31] = frame_addr; 1323 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1324 env->pc = ka->_sa_handler; 1325 env->xregs[30] = return_addr; 1326 if (info) { 1327 tswap_siginfo(&frame->info, info); 1328 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1329 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1330 } 1331 1332 unlock_user_struct(frame, frame_addr, 1); 1333 return; 1334 1335 give_sigsegv: 1336 unlock_user_struct(frame, frame_addr, 1); 1337 force_sig(TARGET_SIGSEGV); 1338 } 1339 1340 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1341 target_siginfo_t *info, target_sigset_t *set, 1342 CPUARMState *env) 1343 { 1344 target_setup_frame(sig, ka, info, set, env); 1345 } 1346 1347 static void setup_frame(int sig, struct target_sigaction *ka, 1348 target_sigset_t *set, CPUARMState *env) 1349 { 1350 target_setup_frame(sig, ka, 0, set, env); 1351 } 1352 1353 long do_rt_sigreturn(CPUARMState *env) 1354 { 1355 struct target_rt_sigframe *frame = NULL; 1356 abi_ulong frame_addr = env->xregs[31]; 1357 1358 trace_user_do_rt_sigreturn(env, frame_addr); 1359 if (frame_addr & 15) { 1360 goto badframe; 1361 } 1362 1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1364 goto badframe; 1365 } 1366 1367 if (target_restore_sigframe(env, frame)) { 1368 goto badframe; 1369 } 1370 1371 if (do_sigaltstack(frame_addr + 1372 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1373 0, get_sp_from_cpustate(env)) == -EFAULT) { 1374 goto badframe; 1375 } 1376 1377 unlock_user_struct(frame, frame_addr, 0); 1378 return -TARGET_QEMU_ESIGRETURN; 1379 1380 badframe: 1381 unlock_user_struct(frame, frame_addr, 0); 1382 force_sig(TARGET_SIGSEGV); 1383 return 0; 1384 } 1385 1386 long do_sigreturn(CPUARMState *env) 1387 { 1388 return do_rt_sigreturn(env); 1389 } 1390 1391 #elif defined(TARGET_ARM) 1392 1393 struct target_sigcontext { 1394 abi_ulong trap_no; 1395 abi_ulong error_code; 1396 abi_ulong oldmask; 1397 abi_ulong arm_r0; 1398 abi_ulong arm_r1; 1399 abi_ulong arm_r2; 1400 abi_ulong arm_r3; 1401 abi_ulong arm_r4; 1402 abi_ulong arm_r5; 1403 abi_ulong arm_r6; 1404 abi_ulong arm_r7; 1405 abi_ulong arm_r8; 1406 abi_ulong arm_r9; 1407 abi_ulong arm_r10; 1408 abi_ulong arm_fp; 1409 abi_ulong arm_ip; 1410 abi_ulong arm_sp; 1411 abi_ulong arm_lr; 1412 abi_ulong arm_pc; 1413 abi_ulong arm_cpsr; 1414 abi_ulong fault_address; 1415 }; 1416 1417 struct target_ucontext_v1 { 1418 abi_ulong tuc_flags; 1419 abi_ulong tuc_link; 1420 target_stack_t tuc_stack; 1421 struct target_sigcontext tuc_mcontext; 1422 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1423 }; 1424 1425 struct target_ucontext_v2 { 1426 abi_ulong tuc_flags; 1427 abi_ulong tuc_link; 1428 target_stack_t tuc_stack; 1429 struct target_sigcontext tuc_mcontext; 1430 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1431 char __unused[128 - sizeof(target_sigset_t)]; 1432 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1433 }; 1434 1435 struct target_user_vfp { 1436 uint64_t fpregs[32]; 1437 abi_ulong fpscr; 1438 }; 1439 1440 struct target_user_vfp_exc { 1441 abi_ulong fpexc; 1442 abi_ulong fpinst; 1443 abi_ulong fpinst2; 1444 }; 1445 1446 struct target_vfp_sigframe { 1447 abi_ulong magic; 1448 abi_ulong size; 1449 struct target_user_vfp ufp; 1450 struct target_user_vfp_exc ufp_exc; 1451 } __attribute__((__aligned__(8))); 1452 1453 struct target_iwmmxt_sigframe { 1454 abi_ulong magic; 1455 abi_ulong size; 1456 uint64_t regs[16]; 1457 /* Note that not all the coprocessor control registers are stored here */ 1458 uint32_t wcssf; 1459 uint32_t wcasf; 1460 uint32_t wcgr0; 1461 uint32_t wcgr1; 1462 uint32_t wcgr2; 1463 uint32_t wcgr3; 1464 } __attribute__((__aligned__(8))); 1465 1466 #define TARGET_VFP_MAGIC 0x56465001 1467 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1468 1469 struct sigframe_v1 1470 { 1471 struct target_sigcontext sc; 1472 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1473 abi_ulong retcode; 1474 }; 1475 1476 struct sigframe_v2 1477 { 1478 struct target_ucontext_v2 uc; 1479 abi_ulong retcode; 1480 }; 1481 1482 struct rt_sigframe_v1 1483 { 1484 abi_ulong pinfo; 1485 abi_ulong puc; 1486 struct target_siginfo info; 1487 struct target_ucontext_v1 uc; 1488 abi_ulong retcode; 1489 }; 1490 1491 struct rt_sigframe_v2 1492 { 1493 struct target_siginfo info; 1494 struct target_ucontext_v2 uc; 1495 abi_ulong retcode; 1496 }; 1497 1498 #define TARGET_CONFIG_CPU_32 1 1499 1500 /* 1501 * For ARM syscalls, we encode the syscall number into the instruction. 1502 */ 1503 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1504 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1505 1506 /* 1507 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1508 * need two 16-bit instructions. 1509 */ 1510 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1511 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1512 1513 static const abi_ulong retcodes[4] = { 1514 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1515 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1516 }; 1517 1518 1519 static inline int valid_user_regs(CPUARMState *regs) 1520 { 1521 return 1; 1522 } 1523 1524 static void 1525 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1526 CPUARMState *env, abi_ulong mask) 1527 { 1528 __put_user(env->regs[0], &sc->arm_r0); 1529 __put_user(env->regs[1], &sc->arm_r1); 1530 __put_user(env->regs[2], &sc->arm_r2); 1531 __put_user(env->regs[3], &sc->arm_r3); 1532 __put_user(env->regs[4], &sc->arm_r4); 1533 __put_user(env->regs[5], &sc->arm_r5); 1534 __put_user(env->regs[6], &sc->arm_r6); 1535 __put_user(env->regs[7], &sc->arm_r7); 1536 __put_user(env->regs[8], &sc->arm_r8); 1537 __put_user(env->regs[9], &sc->arm_r9); 1538 __put_user(env->regs[10], &sc->arm_r10); 1539 __put_user(env->regs[11], &sc->arm_fp); 1540 __put_user(env->regs[12], &sc->arm_ip); 1541 __put_user(env->regs[13], &sc->arm_sp); 1542 __put_user(env->regs[14], &sc->arm_lr); 1543 __put_user(env->regs[15], &sc->arm_pc); 1544 #ifdef TARGET_CONFIG_CPU_32 1545 __put_user(cpsr_read(env), &sc->arm_cpsr); 1546 #endif 1547 1548 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1549 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1550 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1551 __put_user(mask, &sc->oldmask); 1552 } 1553 1554 static inline abi_ulong 1555 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1556 { 1557 unsigned long sp = regs->regs[13]; 1558 1559 /* 1560 * This is the X/Open sanctioned signal stack switching. 1561 */ 1562 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1563 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1564 } 1565 /* 1566 * ATPCS B01 mandates 8-byte alignment 1567 */ 1568 return (sp - framesize) & ~7; 1569 } 1570 1571 static void 1572 setup_return(CPUARMState *env, struct target_sigaction *ka, 1573 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1574 { 1575 abi_ulong handler = ka->_sa_handler; 1576 abi_ulong retcode; 1577 int thumb = handler & 1; 1578 uint32_t cpsr = cpsr_read(env); 1579 1580 cpsr &= ~CPSR_IT; 1581 if (thumb) { 1582 cpsr |= CPSR_T; 1583 } else { 1584 cpsr &= ~CPSR_T; 1585 } 1586 1587 if (ka->sa_flags & TARGET_SA_RESTORER) { 1588 retcode = ka->sa_restorer; 1589 } else { 1590 unsigned int idx = thumb; 1591 1592 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1593 idx += 2; 1594 } 1595 1596 __put_user(retcodes[idx], rc); 1597 1598 retcode = rc_addr + thumb; 1599 } 1600 1601 env->regs[0] = usig; 1602 env->regs[13] = frame_addr; 1603 env->regs[14] = retcode; 1604 env->regs[15] = handler & (thumb ? ~1 : ~3); 1605 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1606 } 1607 1608 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1609 { 1610 int i; 1611 struct target_vfp_sigframe *vfpframe; 1612 vfpframe = (struct target_vfp_sigframe *)regspace; 1613 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1614 __put_user(sizeof(*vfpframe), &vfpframe->size); 1615 for (i = 0; i < 32; i++) { 1616 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1617 } 1618 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1619 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1620 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1621 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1622 return (abi_ulong*)(vfpframe+1); 1623 } 1624 1625 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1626 CPUARMState *env) 1627 { 1628 int i; 1629 struct target_iwmmxt_sigframe *iwmmxtframe; 1630 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1631 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1632 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1633 for (i = 0; i < 16; i++) { 1634 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1635 } 1636 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1637 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1638 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1639 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1640 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1641 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1642 return (abi_ulong*)(iwmmxtframe+1); 1643 } 1644 1645 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1646 target_sigset_t *set, CPUARMState *env) 1647 { 1648 struct target_sigaltstack stack; 1649 int i; 1650 abi_ulong *regspace; 1651 1652 /* Clear all the bits of the ucontext we don't use. */ 1653 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1654 1655 memset(&stack, 0, sizeof(stack)); 1656 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1657 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1658 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1659 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1660 1661 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1662 /* Save coprocessor signal frame. */ 1663 regspace = uc->tuc_regspace; 1664 if (arm_feature(env, ARM_FEATURE_VFP)) { 1665 regspace = setup_sigframe_v2_vfp(regspace, env); 1666 } 1667 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1668 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1669 } 1670 1671 /* Write terminating magic word */ 1672 __put_user(0, regspace); 1673 1674 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1675 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1676 } 1677 } 1678 1679 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1680 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1681 target_sigset_t *set, CPUARMState *regs) 1682 { 1683 struct sigframe_v1 *frame; 1684 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1685 int i; 1686 1687 trace_user_setup_frame(regs, frame_addr); 1688 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1689 return; 1690 } 1691 1692 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1693 1694 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1695 __put_user(set->sig[i], &frame->extramask[i - 1]); 1696 } 1697 1698 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1699 frame_addr + offsetof(struct sigframe_v1, retcode)); 1700 1701 unlock_user_struct(frame, frame_addr, 1); 1702 } 1703 1704 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1705 target_sigset_t *set, CPUARMState *regs) 1706 { 1707 struct sigframe_v2 *frame; 1708 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1709 1710 trace_user_setup_frame(regs, frame_addr); 1711 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1712 return; 1713 } 1714 1715 setup_sigframe_v2(&frame->uc, set, regs); 1716 1717 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1718 frame_addr + offsetof(struct sigframe_v2, retcode)); 1719 1720 unlock_user_struct(frame, frame_addr, 1); 1721 } 1722 1723 static void setup_frame(int usig, struct target_sigaction *ka, 1724 target_sigset_t *set, CPUARMState *regs) 1725 { 1726 if (get_osversion() >= 0x020612) { 1727 setup_frame_v2(usig, ka, set, regs); 1728 } else { 1729 setup_frame_v1(usig, ka, set, regs); 1730 } 1731 } 1732 1733 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1734 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1735 target_siginfo_t *info, 1736 target_sigset_t *set, CPUARMState *env) 1737 { 1738 struct rt_sigframe_v1 *frame; 1739 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1740 struct target_sigaltstack stack; 1741 int i; 1742 abi_ulong info_addr, uc_addr; 1743 1744 trace_user_setup_rt_frame(env, frame_addr); 1745 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1746 return /* 1 */; 1747 } 1748 1749 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1750 __put_user(info_addr, &frame->pinfo); 1751 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1752 __put_user(uc_addr, &frame->puc); 1753 tswap_siginfo(&frame->info, info); 1754 1755 /* Clear all the bits of the ucontext we don't use. */ 1756 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1757 1758 memset(&stack, 0, sizeof(stack)); 1759 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1760 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1761 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1762 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1763 1764 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1765 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1766 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1767 } 1768 1769 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1770 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1771 1772 env->regs[1] = info_addr; 1773 env->regs[2] = uc_addr; 1774 1775 unlock_user_struct(frame, frame_addr, 1); 1776 } 1777 1778 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1779 target_siginfo_t *info, 1780 target_sigset_t *set, CPUARMState *env) 1781 { 1782 struct rt_sigframe_v2 *frame; 1783 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1784 abi_ulong info_addr, uc_addr; 1785 1786 trace_user_setup_rt_frame(env, frame_addr); 1787 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1788 return /* 1 */; 1789 } 1790 1791 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1792 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1793 tswap_siginfo(&frame->info, info); 1794 1795 setup_sigframe_v2(&frame->uc, set, env); 1796 1797 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1798 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1799 1800 env->regs[1] = info_addr; 1801 env->regs[2] = uc_addr; 1802 1803 unlock_user_struct(frame, frame_addr, 1); 1804 } 1805 1806 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1807 target_siginfo_t *info, 1808 target_sigset_t *set, CPUARMState *env) 1809 { 1810 if (get_osversion() >= 0x020612) { 1811 setup_rt_frame_v2(usig, ka, info, set, env); 1812 } else { 1813 setup_rt_frame_v1(usig, ka, info, set, env); 1814 } 1815 } 1816 1817 static int 1818 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1819 { 1820 int err = 0; 1821 uint32_t cpsr; 1822 1823 __get_user(env->regs[0], &sc->arm_r0); 1824 __get_user(env->regs[1], &sc->arm_r1); 1825 __get_user(env->regs[2], &sc->arm_r2); 1826 __get_user(env->regs[3], &sc->arm_r3); 1827 __get_user(env->regs[4], &sc->arm_r4); 1828 __get_user(env->regs[5], &sc->arm_r5); 1829 __get_user(env->regs[6], &sc->arm_r6); 1830 __get_user(env->regs[7], &sc->arm_r7); 1831 __get_user(env->regs[8], &sc->arm_r8); 1832 __get_user(env->regs[9], &sc->arm_r9); 1833 __get_user(env->regs[10], &sc->arm_r10); 1834 __get_user(env->regs[11], &sc->arm_fp); 1835 __get_user(env->regs[12], &sc->arm_ip); 1836 __get_user(env->regs[13], &sc->arm_sp); 1837 __get_user(env->regs[14], &sc->arm_lr); 1838 __get_user(env->regs[15], &sc->arm_pc); 1839 #ifdef TARGET_CONFIG_CPU_32 1840 __get_user(cpsr, &sc->arm_cpsr); 1841 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1842 #endif 1843 1844 err |= !valid_user_regs(env); 1845 1846 return err; 1847 } 1848 1849 static long do_sigreturn_v1(CPUARMState *env) 1850 { 1851 abi_ulong frame_addr; 1852 struct sigframe_v1 *frame = NULL; 1853 target_sigset_t set; 1854 sigset_t host_set; 1855 int i; 1856 1857 /* 1858 * Since we stacked the signal on a 64-bit boundary, 1859 * then 'sp' should be word aligned here. If it's 1860 * not, then the user is trying to mess with us. 1861 */ 1862 frame_addr = env->regs[13]; 1863 trace_user_do_sigreturn(env, frame_addr); 1864 if (frame_addr & 7) { 1865 goto badframe; 1866 } 1867 1868 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1869 goto badframe; 1870 } 1871 1872 __get_user(set.sig[0], &frame->sc.oldmask); 1873 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1874 __get_user(set.sig[i], &frame->extramask[i - 1]); 1875 } 1876 1877 target_to_host_sigset_internal(&host_set, &set); 1878 set_sigmask(&host_set); 1879 1880 if (restore_sigcontext(env, &frame->sc)) { 1881 goto badframe; 1882 } 1883 1884 #if 0 1885 /* Send SIGTRAP if we're single-stepping */ 1886 if (ptrace_cancel_bpt(current)) 1887 send_sig(SIGTRAP, current, 1); 1888 #endif 1889 unlock_user_struct(frame, frame_addr, 0); 1890 return -TARGET_QEMU_ESIGRETURN; 1891 1892 badframe: 1893 force_sig(TARGET_SIGSEGV /* , current */); 1894 return 0; 1895 } 1896 1897 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 1898 { 1899 int i; 1900 abi_ulong magic, sz; 1901 uint32_t fpscr, fpexc; 1902 struct target_vfp_sigframe *vfpframe; 1903 vfpframe = (struct target_vfp_sigframe *)regspace; 1904 1905 __get_user(magic, &vfpframe->magic); 1906 __get_user(sz, &vfpframe->size); 1907 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 1908 return 0; 1909 } 1910 for (i = 0; i < 32; i++) { 1911 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1912 } 1913 __get_user(fpscr, &vfpframe->ufp.fpscr); 1914 vfp_set_fpscr(env, fpscr); 1915 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 1916 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 1917 * and the exception flag is cleared 1918 */ 1919 fpexc |= (1 << 30); 1920 fpexc &= ~((1 << 31) | (1 << 28)); 1921 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 1922 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1923 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1924 return (abi_ulong*)(vfpframe + 1); 1925 } 1926 1927 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 1928 abi_ulong *regspace) 1929 { 1930 int i; 1931 abi_ulong magic, sz; 1932 struct target_iwmmxt_sigframe *iwmmxtframe; 1933 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1934 1935 __get_user(magic, &iwmmxtframe->magic); 1936 __get_user(sz, &iwmmxtframe->size); 1937 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 1938 return 0; 1939 } 1940 for (i = 0; i < 16; i++) { 1941 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1942 } 1943 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1944 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1945 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1946 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1947 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1948 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1949 return (abi_ulong*)(iwmmxtframe + 1); 1950 } 1951 1952 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 1953 struct target_ucontext_v2 *uc) 1954 { 1955 sigset_t host_set; 1956 abi_ulong *regspace; 1957 1958 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 1959 set_sigmask(&host_set); 1960 1961 if (restore_sigcontext(env, &uc->tuc_mcontext)) 1962 return 1; 1963 1964 /* Restore coprocessor signal frame */ 1965 regspace = uc->tuc_regspace; 1966 if (arm_feature(env, ARM_FEATURE_VFP)) { 1967 regspace = restore_sigframe_v2_vfp(env, regspace); 1968 if (!regspace) { 1969 return 1; 1970 } 1971 } 1972 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1973 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 1974 if (!regspace) { 1975 return 1; 1976 } 1977 } 1978 1979 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 1980 return 1; 1981 1982 #if 0 1983 /* Send SIGTRAP if we're single-stepping */ 1984 if (ptrace_cancel_bpt(current)) 1985 send_sig(SIGTRAP, current, 1); 1986 #endif 1987 1988 return 0; 1989 } 1990 1991 static long do_sigreturn_v2(CPUARMState *env) 1992 { 1993 abi_ulong frame_addr; 1994 struct sigframe_v2 *frame = NULL; 1995 1996 /* 1997 * Since we stacked the signal on a 64-bit boundary, 1998 * then 'sp' should be word aligned here. If it's 1999 * not, then the user is trying to mess with us. 2000 */ 2001 frame_addr = env->regs[13]; 2002 trace_user_do_sigreturn(env, frame_addr); 2003 if (frame_addr & 7) { 2004 goto badframe; 2005 } 2006 2007 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2008 goto badframe; 2009 } 2010 2011 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2012 goto badframe; 2013 } 2014 2015 unlock_user_struct(frame, frame_addr, 0); 2016 return -TARGET_QEMU_ESIGRETURN; 2017 2018 badframe: 2019 unlock_user_struct(frame, frame_addr, 0); 2020 force_sig(TARGET_SIGSEGV /* , current */); 2021 return 0; 2022 } 2023 2024 long do_sigreturn(CPUARMState *env) 2025 { 2026 if (get_osversion() >= 0x020612) { 2027 return do_sigreturn_v2(env); 2028 } else { 2029 return do_sigreturn_v1(env); 2030 } 2031 } 2032 2033 static long do_rt_sigreturn_v1(CPUARMState *env) 2034 { 2035 abi_ulong frame_addr; 2036 struct rt_sigframe_v1 *frame = NULL; 2037 sigset_t host_set; 2038 2039 /* 2040 * Since we stacked the signal on a 64-bit boundary, 2041 * then 'sp' should be word aligned here. If it's 2042 * not, then the user is trying to mess with us. 2043 */ 2044 frame_addr = env->regs[13]; 2045 trace_user_do_rt_sigreturn(env, frame_addr); 2046 if (frame_addr & 7) { 2047 goto badframe; 2048 } 2049 2050 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2051 goto badframe; 2052 } 2053 2054 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2055 set_sigmask(&host_set); 2056 2057 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2058 goto badframe; 2059 } 2060 2061 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2062 goto badframe; 2063 2064 #if 0 2065 /* Send SIGTRAP if we're single-stepping */ 2066 if (ptrace_cancel_bpt(current)) 2067 send_sig(SIGTRAP, current, 1); 2068 #endif 2069 unlock_user_struct(frame, frame_addr, 0); 2070 return -TARGET_QEMU_ESIGRETURN; 2071 2072 badframe: 2073 unlock_user_struct(frame, frame_addr, 0); 2074 force_sig(TARGET_SIGSEGV /* , current */); 2075 return 0; 2076 } 2077 2078 static long do_rt_sigreturn_v2(CPUARMState *env) 2079 { 2080 abi_ulong frame_addr; 2081 struct rt_sigframe_v2 *frame = NULL; 2082 2083 /* 2084 * Since we stacked the signal on a 64-bit boundary, 2085 * then 'sp' should be word aligned here. If it's 2086 * not, then the user is trying to mess with us. 2087 */ 2088 frame_addr = env->regs[13]; 2089 trace_user_do_rt_sigreturn(env, frame_addr); 2090 if (frame_addr & 7) { 2091 goto badframe; 2092 } 2093 2094 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2095 goto badframe; 2096 } 2097 2098 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) { 2099 goto badframe; 2100 } 2101 2102 unlock_user_struct(frame, frame_addr, 0); 2103 return -TARGET_QEMU_ESIGRETURN; 2104 2105 badframe: 2106 unlock_user_struct(frame, frame_addr, 0); 2107 force_sig(TARGET_SIGSEGV /* , current */); 2108 return 0; 2109 } 2110 2111 long do_rt_sigreturn(CPUARMState *env) 2112 { 2113 if (get_osversion() >= 0x020612) { 2114 return do_rt_sigreturn_v2(env); 2115 } else { 2116 return do_rt_sigreturn_v1(env); 2117 } 2118 } 2119 2120 #elif defined(TARGET_SPARC) 2121 2122 #define __SUNOS_MAXWIN 31 2123 2124 /* This is what SunOS does, so shall I. */ 2125 struct target_sigcontext { 2126 abi_ulong sigc_onstack; /* state to restore */ 2127 2128 abi_ulong sigc_mask; /* sigmask to restore */ 2129 abi_ulong sigc_sp; /* stack pointer */ 2130 abi_ulong sigc_pc; /* program counter */ 2131 abi_ulong sigc_npc; /* next program counter */ 2132 abi_ulong sigc_psr; /* for condition codes etc */ 2133 abi_ulong sigc_g1; /* User uses these two registers */ 2134 abi_ulong sigc_o0; /* within the trampoline code. */ 2135 2136 /* Now comes information regarding the users window set 2137 * at the time of the signal. 2138 */ 2139 abi_ulong sigc_oswins; /* outstanding windows */ 2140 2141 /* stack ptrs for each regwin buf */ 2142 char *sigc_spbuf[__SUNOS_MAXWIN]; 2143 2144 /* Windows to restore after signal */ 2145 struct { 2146 abi_ulong locals[8]; 2147 abi_ulong ins[8]; 2148 } sigc_wbuf[__SUNOS_MAXWIN]; 2149 }; 2150 /* A Sparc stack frame */ 2151 struct sparc_stackf { 2152 abi_ulong locals[8]; 2153 abi_ulong ins[8]; 2154 /* It's simpler to treat fp and callers_pc as elements of ins[] 2155 * since we never need to access them ourselves. 2156 */ 2157 char *structptr; 2158 abi_ulong xargs[6]; 2159 abi_ulong xxargs[1]; 2160 }; 2161 2162 typedef struct { 2163 struct { 2164 abi_ulong psr; 2165 abi_ulong pc; 2166 abi_ulong npc; 2167 abi_ulong y; 2168 abi_ulong u_regs[16]; /* globals and ins */ 2169 } si_regs; 2170 int si_mask; 2171 } __siginfo_t; 2172 2173 typedef struct { 2174 abi_ulong si_float_regs[32]; 2175 unsigned long si_fsr; 2176 unsigned long si_fpqdepth; 2177 struct { 2178 unsigned long *insn_addr; 2179 unsigned long insn; 2180 } si_fpqueue [16]; 2181 } qemu_siginfo_fpu_t; 2182 2183 2184 struct target_signal_frame { 2185 struct sparc_stackf ss; 2186 __siginfo_t info; 2187 abi_ulong fpu_save; 2188 abi_ulong insns[2] __attribute__ ((aligned (8))); 2189 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2190 abi_ulong extra_size; /* Should be 0 */ 2191 qemu_siginfo_fpu_t fpu_state; 2192 }; 2193 struct target_rt_signal_frame { 2194 struct sparc_stackf ss; 2195 siginfo_t info; 2196 abi_ulong regs[20]; 2197 sigset_t mask; 2198 abi_ulong fpu_save; 2199 unsigned int insns[2]; 2200 stack_t stack; 2201 unsigned int extra_size; /* Should be 0 */ 2202 qemu_siginfo_fpu_t fpu_state; 2203 }; 2204 2205 #define UREG_O0 16 2206 #define UREG_O6 22 2207 #define UREG_I0 0 2208 #define UREG_I1 1 2209 #define UREG_I2 2 2210 #define UREG_I3 3 2211 #define UREG_I4 4 2212 #define UREG_I5 5 2213 #define UREG_I6 6 2214 #define UREG_I7 7 2215 #define UREG_L0 8 2216 #define UREG_FP UREG_I6 2217 #define UREG_SP UREG_O6 2218 2219 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2220 CPUSPARCState *env, 2221 unsigned long framesize) 2222 { 2223 abi_ulong sp; 2224 2225 sp = env->regwptr[UREG_FP]; 2226 2227 /* This is the X/Open sanctioned signal stack switching. */ 2228 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2229 if (!on_sig_stack(sp) 2230 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2231 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2232 } 2233 } 2234 return sp - framesize; 2235 } 2236 2237 static int 2238 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2239 { 2240 int err = 0, i; 2241 2242 __put_user(env->psr, &si->si_regs.psr); 2243 __put_user(env->pc, &si->si_regs.pc); 2244 __put_user(env->npc, &si->si_regs.npc); 2245 __put_user(env->y, &si->si_regs.y); 2246 for (i=0; i < 8; i++) { 2247 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2248 } 2249 for (i=0; i < 8; i++) { 2250 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2251 } 2252 __put_user(mask, &si->si_mask); 2253 return err; 2254 } 2255 2256 #if 0 2257 static int 2258 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2259 CPUSPARCState *env, unsigned long mask) 2260 { 2261 int err = 0; 2262 2263 __put_user(mask, &sc->sigc_mask); 2264 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2265 __put_user(env->pc, &sc->sigc_pc); 2266 __put_user(env->npc, &sc->sigc_npc); 2267 __put_user(env->psr, &sc->sigc_psr); 2268 __put_user(env->gregs[1], &sc->sigc_g1); 2269 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2270 2271 return err; 2272 } 2273 #endif 2274 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2275 2276 static void setup_frame(int sig, struct target_sigaction *ka, 2277 target_sigset_t *set, CPUSPARCState *env) 2278 { 2279 abi_ulong sf_addr; 2280 struct target_signal_frame *sf; 2281 int sigframe_size, err, i; 2282 2283 /* 1. Make sure everything is clean */ 2284 //synchronize_user_stack(); 2285 2286 sigframe_size = NF_ALIGNEDSZ; 2287 sf_addr = get_sigframe(ka, env, sigframe_size); 2288 trace_user_setup_frame(env, sf_addr); 2289 2290 sf = lock_user(VERIFY_WRITE, sf_addr, 2291 sizeof(struct target_signal_frame), 0); 2292 if (!sf) { 2293 goto sigsegv; 2294 } 2295 #if 0 2296 if (invalid_frame_pointer(sf, sigframe_size)) 2297 goto sigill_and_return; 2298 #endif 2299 /* 2. Save the current process state */ 2300 err = setup___siginfo(&sf->info, env, set->sig[0]); 2301 __put_user(0, &sf->extra_size); 2302 2303 //save_fpu_state(regs, &sf->fpu_state); 2304 //__put_user(&sf->fpu_state, &sf->fpu_save); 2305 2306 __put_user(set->sig[0], &sf->info.si_mask); 2307 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2308 __put_user(set->sig[i + 1], &sf->extramask[i]); 2309 } 2310 2311 for (i = 0; i < 8; i++) { 2312 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2313 } 2314 for (i = 0; i < 8; i++) { 2315 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2316 } 2317 if (err) 2318 goto sigsegv; 2319 2320 /* 3. signal handler back-trampoline and parameters */ 2321 env->regwptr[UREG_FP] = sf_addr; 2322 env->regwptr[UREG_I0] = sig; 2323 env->regwptr[UREG_I1] = sf_addr + 2324 offsetof(struct target_signal_frame, info); 2325 env->regwptr[UREG_I2] = sf_addr + 2326 offsetof(struct target_signal_frame, info); 2327 2328 /* 4. signal handler */ 2329 env->pc = ka->_sa_handler; 2330 env->npc = (env->pc + 4); 2331 /* 5. return to kernel instructions */ 2332 if (ka->sa_restorer) { 2333 env->regwptr[UREG_I7] = ka->sa_restorer; 2334 } else { 2335 uint32_t val32; 2336 2337 env->regwptr[UREG_I7] = sf_addr + 2338 offsetof(struct target_signal_frame, insns) - 2 * 4; 2339 2340 /* mov __NR_sigreturn, %g1 */ 2341 val32 = 0x821020d8; 2342 __put_user(val32, &sf->insns[0]); 2343 2344 /* t 0x10 */ 2345 val32 = 0x91d02010; 2346 __put_user(val32, &sf->insns[1]); 2347 if (err) 2348 goto sigsegv; 2349 2350 /* Flush instruction space. */ 2351 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2352 // tb_flush(env); 2353 } 2354 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2355 return; 2356 #if 0 2357 sigill_and_return: 2358 force_sig(TARGET_SIGILL); 2359 #endif 2360 sigsegv: 2361 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2362 force_sig(TARGET_SIGSEGV); 2363 } 2364 2365 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2366 target_siginfo_t *info, 2367 target_sigset_t *set, CPUSPARCState *env) 2368 { 2369 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2370 } 2371 2372 long do_sigreturn(CPUSPARCState *env) 2373 { 2374 abi_ulong sf_addr; 2375 struct target_signal_frame *sf; 2376 uint32_t up_psr, pc, npc; 2377 target_sigset_t set; 2378 sigset_t host_set; 2379 int err=0, i; 2380 2381 sf_addr = env->regwptr[UREG_FP]; 2382 trace_user_do_sigreturn(env, sf_addr); 2383 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2384 goto segv_and_exit; 2385 } 2386 2387 /* 1. Make sure we are not getting garbage from the user */ 2388 2389 if (sf_addr & 3) 2390 goto segv_and_exit; 2391 2392 __get_user(pc, &sf->info.si_regs.pc); 2393 __get_user(npc, &sf->info.si_regs.npc); 2394 2395 if ((pc | npc) & 3) { 2396 goto segv_and_exit; 2397 } 2398 2399 /* 2. Restore the state */ 2400 __get_user(up_psr, &sf->info.si_regs.psr); 2401 2402 /* User can only change condition codes and FPU enabling in %psr. */ 2403 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2404 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2405 2406 env->pc = pc; 2407 env->npc = npc; 2408 __get_user(env->y, &sf->info.si_regs.y); 2409 for (i=0; i < 8; i++) { 2410 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2411 } 2412 for (i=0; i < 8; i++) { 2413 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2414 } 2415 2416 /* FIXME: implement FPU save/restore: 2417 * __get_user(fpu_save, &sf->fpu_save); 2418 * if (fpu_save) 2419 * err |= restore_fpu_state(env, fpu_save); 2420 */ 2421 2422 /* This is pretty much atomic, no amount locking would prevent 2423 * the races which exist anyways. 2424 */ 2425 __get_user(set.sig[0], &sf->info.si_mask); 2426 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2427 __get_user(set.sig[i], &sf->extramask[i - 1]); 2428 } 2429 2430 target_to_host_sigset_internal(&host_set, &set); 2431 set_sigmask(&host_set); 2432 2433 if (err) { 2434 goto segv_and_exit; 2435 } 2436 unlock_user_struct(sf, sf_addr, 0); 2437 return -TARGET_QEMU_ESIGRETURN; 2438 2439 segv_and_exit: 2440 unlock_user_struct(sf, sf_addr, 0); 2441 force_sig(TARGET_SIGSEGV); 2442 } 2443 2444 long do_rt_sigreturn(CPUSPARCState *env) 2445 { 2446 trace_user_do_rt_sigreturn(env, 0); 2447 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2448 return -TARGET_ENOSYS; 2449 } 2450 2451 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2452 #define MC_TSTATE 0 2453 #define MC_PC 1 2454 #define MC_NPC 2 2455 #define MC_Y 3 2456 #define MC_G1 4 2457 #define MC_G2 5 2458 #define MC_G3 6 2459 #define MC_G4 7 2460 #define MC_G5 8 2461 #define MC_G6 9 2462 #define MC_G7 10 2463 #define MC_O0 11 2464 #define MC_O1 12 2465 #define MC_O2 13 2466 #define MC_O3 14 2467 #define MC_O4 15 2468 #define MC_O5 16 2469 #define MC_O6 17 2470 #define MC_O7 18 2471 #define MC_NGREG 19 2472 2473 typedef abi_ulong target_mc_greg_t; 2474 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2475 2476 struct target_mc_fq { 2477 abi_ulong *mcfq_addr; 2478 uint32_t mcfq_insn; 2479 }; 2480 2481 struct target_mc_fpu { 2482 union { 2483 uint32_t sregs[32]; 2484 uint64_t dregs[32]; 2485 //uint128_t qregs[16]; 2486 } mcfpu_fregs; 2487 abi_ulong mcfpu_fsr; 2488 abi_ulong mcfpu_fprs; 2489 abi_ulong mcfpu_gsr; 2490 struct target_mc_fq *mcfpu_fq; 2491 unsigned char mcfpu_qcnt; 2492 unsigned char mcfpu_qentsz; 2493 unsigned char mcfpu_enab; 2494 }; 2495 typedef struct target_mc_fpu target_mc_fpu_t; 2496 2497 typedef struct { 2498 target_mc_gregset_t mc_gregs; 2499 target_mc_greg_t mc_fp; 2500 target_mc_greg_t mc_i7; 2501 target_mc_fpu_t mc_fpregs; 2502 } target_mcontext_t; 2503 2504 struct target_ucontext { 2505 struct target_ucontext *tuc_link; 2506 abi_ulong tuc_flags; 2507 target_sigset_t tuc_sigmask; 2508 target_mcontext_t tuc_mcontext; 2509 }; 2510 2511 /* A V9 register window */ 2512 struct target_reg_window { 2513 abi_ulong locals[8]; 2514 abi_ulong ins[8]; 2515 }; 2516 2517 #define TARGET_STACK_BIAS 2047 2518 2519 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2520 void sparc64_set_context(CPUSPARCState *env) 2521 { 2522 abi_ulong ucp_addr; 2523 struct target_ucontext *ucp; 2524 target_mc_gregset_t *grp; 2525 abi_ulong pc, npc, tstate; 2526 abi_ulong fp, i7, w_addr; 2527 unsigned int i; 2528 2529 ucp_addr = env->regwptr[UREG_I0]; 2530 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2531 goto do_sigsegv; 2532 } 2533 grp = &ucp->tuc_mcontext.mc_gregs; 2534 __get_user(pc, &((*grp)[MC_PC])); 2535 __get_user(npc, &((*grp)[MC_NPC])); 2536 if ((pc | npc) & 3) { 2537 goto do_sigsegv; 2538 } 2539 if (env->regwptr[UREG_I1]) { 2540 target_sigset_t target_set; 2541 sigset_t set; 2542 2543 if (TARGET_NSIG_WORDS == 1) { 2544 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2545 } else { 2546 abi_ulong *src, *dst; 2547 src = ucp->tuc_sigmask.sig; 2548 dst = target_set.sig; 2549 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2550 __get_user(*dst, src); 2551 } 2552 } 2553 target_to_host_sigset_internal(&set, &target_set); 2554 set_sigmask(&set); 2555 } 2556 env->pc = pc; 2557 env->npc = npc; 2558 __get_user(env->y, &((*grp)[MC_Y])); 2559 __get_user(tstate, &((*grp)[MC_TSTATE])); 2560 env->asi = (tstate >> 24) & 0xff; 2561 cpu_put_ccr(env, tstate >> 32); 2562 cpu_put_cwp64(env, tstate & 0x1f); 2563 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2564 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2565 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2566 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2567 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2568 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2569 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2570 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2571 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2572 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2573 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2574 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2575 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2576 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2577 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2578 2579 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2580 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2581 2582 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2583 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2584 abi_ulong) != 0) { 2585 goto do_sigsegv; 2586 } 2587 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2588 abi_ulong) != 0) { 2589 goto do_sigsegv; 2590 } 2591 /* FIXME this does not match how the kernel handles the FPU in 2592 * its sparc64_set_context implementation. In particular the FPU 2593 * is only restored if fenab is non-zero in: 2594 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2595 */ 2596 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2597 { 2598 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2599 for (i = 0; i < 64; i++, src++) { 2600 if (i & 1) { 2601 __get_user(env->fpr[i/2].l.lower, src); 2602 } else { 2603 __get_user(env->fpr[i/2].l.upper, src); 2604 } 2605 } 2606 } 2607 __get_user(env->fsr, 2608 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2609 __get_user(env->gsr, 2610 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2611 unlock_user_struct(ucp, ucp_addr, 0); 2612 return; 2613 do_sigsegv: 2614 unlock_user_struct(ucp, ucp_addr, 0); 2615 force_sig(TARGET_SIGSEGV); 2616 } 2617 2618 void sparc64_get_context(CPUSPARCState *env) 2619 { 2620 abi_ulong ucp_addr; 2621 struct target_ucontext *ucp; 2622 target_mc_gregset_t *grp; 2623 target_mcontext_t *mcp; 2624 abi_ulong fp, i7, w_addr; 2625 int err; 2626 unsigned int i; 2627 target_sigset_t target_set; 2628 sigset_t set; 2629 2630 ucp_addr = env->regwptr[UREG_I0]; 2631 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2632 goto do_sigsegv; 2633 } 2634 2635 mcp = &ucp->tuc_mcontext; 2636 grp = &mcp->mc_gregs; 2637 2638 /* Skip over the trap instruction, first. */ 2639 env->pc = env->npc; 2640 env->npc += 4; 2641 2642 /* If we're only reading the signal mask then do_sigprocmask() 2643 * is guaranteed not to fail, which is important because we don't 2644 * have any way to signal a failure or restart this operation since 2645 * this is not a normal syscall. 2646 */ 2647 err = do_sigprocmask(0, NULL, &set); 2648 assert(err == 0); 2649 host_to_target_sigset_internal(&target_set, &set); 2650 if (TARGET_NSIG_WORDS == 1) { 2651 __put_user(target_set.sig[0], 2652 (abi_ulong *)&ucp->tuc_sigmask); 2653 } else { 2654 abi_ulong *src, *dst; 2655 src = target_set.sig; 2656 dst = ucp->tuc_sigmask.sig; 2657 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2658 __put_user(*src, dst); 2659 } 2660 if (err) 2661 goto do_sigsegv; 2662 } 2663 2664 /* XXX: tstate must be saved properly */ 2665 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2666 __put_user(env->pc, &((*grp)[MC_PC])); 2667 __put_user(env->npc, &((*grp)[MC_NPC])); 2668 __put_user(env->y, &((*grp)[MC_Y])); 2669 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2670 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2671 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2672 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2673 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2674 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2675 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2676 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2677 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2678 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2679 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2680 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2681 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2682 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2683 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2684 2685 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2686 fp = i7 = 0; 2687 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2688 abi_ulong) != 0) { 2689 goto do_sigsegv; 2690 } 2691 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2692 abi_ulong) != 0) { 2693 goto do_sigsegv; 2694 } 2695 __put_user(fp, &(mcp->mc_fp)); 2696 __put_user(i7, &(mcp->mc_i7)); 2697 2698 { 2699 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2700 for (i = 0; i < 64; i++, dst++) { 2701 if (i & 1) { 2702 __put_user(env->fpr[i/2].l.lower, dst); 2703 } else { 2704 __put_user(env->fpr[i/2].l.upper, dst); 2705 } 2706 } 2707 } 2708 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2709 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2710 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2711 2712 if (err) 2713 goto do_sigsegv; 2714 unlock_user_struct(ucp, ucp_addr, 1); 2715 return; 2716 do_sigsegv: 2717 unlock_user_struct(ucp, ucp_addr, 1); 2718 force_sig(TARGET_SIGSEGV); 2719 } 2720 #endif 2721 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2722 2723 # if defined(TARGET_ABI_MIPSO32) 2724 struct target_sigcontext { 2725 uint32_t sc_regmask; /* Unused */ 2726 uint32_t sc_status; 2727 uint64_t sc_pc; 2728 uint64_t sc_regs[32]; 2729 uint64_t sc_fpregs[32]; 2730 uint32_t sc_ownedfp; /* Unused */ 2731 uint32_t sc_fpc_csr; 2732 uint32_t sc_fpc_eir; /* Unused */ 2733 uint32_t sc_used_math; 2734 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2735 uint32_t pad0; 2736 uint64_t sc_mdhi; 2737 uint64_t sc_mdlo; 2738 target_ulong sc_hi1; /* Was sc_cause */ 2739 target_ulong sc_lo1; /* Was sc_badvaddr */ 2740 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2741 target_ulong sc_lo2; 2742 target_ulong sc_hi3; 2743 target_ulong sc_lo3; 2744 }; 2745 # else /* N32 || N64 */ 2746 struct target_sigcontext { 2747 uint64_t sc_regs[32]; 2748 uint64_t sc_fpregs[32]; 2749 uint64_t sc_mdhi; 2750 uint64_t sc_hi1; 2751 uint64_t sc_hi2; 2752 uint64_t sc_hi3; 2753 uint64_t sc_mdlo; 2754 uint64_t sc_lo1; 2755 uint64_t sc_lo2; 2756 uint64_t sc_lo3; 2757 uint64_t sc_pc; 2758 uint32_t sc_fpc_csr; 2759 uint32_t sc_used_math; 2760 uint32_t sc_dsp; 2761 uint32_t sc_reserved; 2762 }; 2763 # endif /* O32 */ 2764 2765 struct sigframe { 2766 uint32_t sf_ass[4]; /* argument save space for o32 */ 2767 uint32_t sf_code[2]; /* signal trampoline */ 2768 struct target_sigcontext sf_sc; 2769 target_sigset_t sf_mask; 2770 }; 2771 2772 struct target_ucontext { 2773 target_ulong tuc_flags; 2774 target_ulong tuc_link; 2775 target_stack_t tuc_stack; 2776 target_ulong pad0; 2777 struct target_sigcontext tuc_mcontext; 2778 target_sigset_t tuc_sigmask; 2779 }; 2780 2781 struct target_rt_sigframe { 2782 uint32_t rs_ass[4]; /* argument save space for o32 */ 2783 uint32_t rs_code[2]; /* signal trampoline */ 2784 struct target_siginfo rs_info; 2785 struct target_ucontext rs_uc; 2786 }; 2787 2788 /* Install trampoline to jump back from signal handler */ 2789 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2790 { 2791 int err = 0; 2792 2793 /* 2794 * Set up the return code ... 2795 * 2796 * li v0, __NR__foo_sigreturn 2797 * syscall 2798 */ 2799 2800 __put_user(0x24020000 + syscall, tramp + 0); 2801 __put_user(0x0000000c , tramp + 1); 2802 return err; 2803 } 2804 2805 static inline void setup_sigcontext(CPUMIPSState *regs, 2806 struct target_sigcontext *sc) 2807 { 2808 int i; 2809 2810 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2811 regs->hflags &= ~MIPS_HFLAG_BMASK; 2812 2813 __put_user(0, &sc->sc_regs[0]); 2814 for (i = 1; i < 32; ++i) { 2815 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2816 } 2817 2818 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2819 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2820 2821 /* Rather than checking for dsp existence, always copy. The storage 2822 would just be garbage otherwise. */ 2823 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2824 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2825 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2826 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2827 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2828 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2829 { 2830 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2831 __put_user(dsp, &sc->sc_dsp); 2832 } 2833 2834 __put_user(1, &sc->sc_used_math); 2835 2836 for (i = 0; i < 32; ++i) { 2837 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2838 } 2839 } 2840 2841 static inline void 2842 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2843 { 2844 int i; 2845 2846 __get_user(regs->CP0_EPC, &sc->sc_pc); 2847 2848 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2849 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2850 2851 for (i = 1; i < 32; ++i) { 2852 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2853 } 2854 2855 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2856 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2857 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2858 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2859 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2860 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2861 { 2862 uint32_t dsp; 2863 __get_user(dsp, &sc->sc_dsp); 2864 cpu_wrdsp(dsp, 0x3ff, regs); 2865 } 2866 2867 for (i = 0; i < 32; ++i) { 2868 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2869 } 2870 } 2871 2872 /* 2873 * Determine which stack to use.. 2874 */ 2875 static inline abi_ulong 2876 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2877 { 2878 unsigned long sp; 2879 2880 /* Default to using normal stack */ 2881 sp = regs->active_tc.gpr[29]; 2882 2883 /* 2884 * FPU emulator may have its own trampoline active just 2885 * above the user stack, 16-bytes before the next lowest 2886 * 16 byte boundary. Try to avoid trashing it. 2887 */ 2888 sp -= 32; 2889 2890 /* This is the X/Open sanctioned signal stack switching. */ 2891 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2892 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2893 } 2894 2895 return (sp - frame_size) & ~7; 2896 } 2897 2898 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 2899 { 2900 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 2901 env->hflags &= ~MIPS_HFLAG_M16; 2902 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 2903 env->active_tc.PC &= ~(target_ulong) 1; 2904 } 2905 } 2906 2907 # if defined(TARGET_ABI_MIPSO32) 2908 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 2909 static void setup_frame(int sig, struct target_sigaction * ka, 2910 target_sigset_t *set, CPUMIPSState *regs) 2911 { 2912 struct sigframe *frame; 2913 abi_ulong frame_addr; 2914 int i; 2915 2916 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2917 trace_user_setup_frame(regs, frame_addr); 2918 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2919 goto give_sigsegv; 2920 } 2921 2922 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 2923 2924 setup_sigcontext(regs, &frame->sf_sc); 2925 2926 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2927 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 2928 } 2929 2930 /* 2931 * Arguments to signal handler: 2932 * 2933 * a0 = signal number 2934 * a1 = 0 (should be cause) 2935 * a2 = pointer to struct sigcontext 2936 * 2937 * $25 and PC point to the signal handler, $29 points to the 2938 * struct sigframe. 2939 */ 2940 regs->active_tc.gpr[ 4] = sig; 2941 regs->active_tc.gpr[ 5] = 0; 2942 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 2943 regs->active_tc.gpr[29] = frame_addr; 2944 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 2945 /* The original kernel code sets CP0_EPC to the handler 2946 * since it returns to userland using eret 2947 * we cannot do this here, and we must set PC directly */ 2948 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 2949 mips_set_hflags_isa_mode_from_pc(regs); 2950 unlock_user_struct(frame, frame_addr, 1); 2951 return; 2952 2953 give_sigsegv: 2954 force_sig(TARGET_SIGSEGV/*, current*/); 2955 } 2956 2957 long do_sigreturn(CPUMIPSState *regs) 2958 { 2959 struct sigframe *frame; 2960 abi_ulong frame_addr; 2961 sigset_t blocked; 2962 target_sigset_t target_set; 2963 int i; 2964 2965 frame_addr = regs->active_tc.gpr[29]; 2966 trace_user_do_sigreturn(regs, frame_addr); 2967 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 2968 goto badframe; 2969 2970 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2971 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 2972 } 2973 2974 target_to_host_sigset_internal(&blocked, &target_set); 2975 set_sigmask(&blocked); 2976 2977 restore_sigcontext(regs, &frame->sf_sc); 2978 2979 #if 0 2980 /* 2981 * Don't let your children do this ... 2982 */ 2983 __asm__ __volatile__( 2984 "move\t$29, %0\n\t" 2985 "j\tsyscall_exit" 2986 :/* no outputs */ 2987 :"r" (®s)); 2988 /* Unreached */ 2989 #endif 2990 2991 regs->active_tc.PC = regs->CP0_EPC; 2992 mips_set_hflags_isa_mode_from_pc(regs); 2993 /* I am not sure this is right, but it seems to work 2994 * maybe a problem with nested signals ? */ 2995 regs->CP0_EPC = 0; 2996 return -TARGET_QEMU_ESIGRETURN; 2997 2998 badframe: 2999 force_sig(TARGET_SIGSEGV/*, current*/); 3000 return 0; 3001 } 3002 # endif /* O32 */ 3003 3004 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3005 target_siginfo_t *info, 3006 target_sigset_t *set, CPUMIPSState *env) 3007 { 3008 struct target_rt_sigframe *frame; 3009 abi_ulong frame_addr; 3010 int i; 3011 3012 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3013 trace_user_setup_rt_frame(env, frame_addr); 3014 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3015 goto give_sigsegv; 3016 } 3017 3018 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3019 3020 tswap_siginfo(&frame->rs_info, info); 3021 3022 __put_user(0, &frame->rs_uc.tuc_flags); 3023 __put_user(0, &frame->rs_uc.tuc_link); 3024 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3025 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3026 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3027 &frame->rs_uc.tuc_stack.ss_flags); 3028 3029 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3030 3031 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3032 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3033 } 3034 3035 /* 3036 * Arguments to signal handler: 3037 * 3038 * a0 = signal number 3039 * a1 = pointer to siginfo_t 3040 * a2 = pointer to struct ucontext 3041 * 3042 * $25 and PC point to the signal handler, $29 points to the 3043 * struct sigframe. 3044 */ 3045 env->active_tc.gpr[ 4] = sig; 3046 env->active_tc.gpr[ 5] = frame_addr 3047 + offsetof(struct target_rt_sigframe, rs_info); 3048 env->active_tc.gpr[ 6] = frame_addr 3049 + offsetof(struct target_rt_sigframe, rs_uc); 3050 env->active_tc.gpr[29] = frame_addr; 3051 env->active_tc.gpr[31] = frame_addr 3052 + offsetof(struct target_rt_sigframe, rs_code); 3053 /* The original kernel code sets CP0_EPC to the handler 3054 * since it returns to userland using eret 3055 * we cannot do this here, and we must set PC directly */ 3056 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3057 mips_set_hflags_isa_mode_from_pc(env); 3058 unlock_user_struct(frame, frame_addr, 1); 3059 return; 3060 3061 give_sigsegv: 3062 unlock_user_struct(frame, frame_addr, 1); 3063 force_sig(TARGET_SIGSEGV/*, current*/); 3064 } 3065 3066 long do_rt_sigreturn(CPUMIPSState *env) 3067 { 3068 struct target_rt_sigframe *frame; 3069 abi_ulong frame_addr; 3070 sigset_t blocked; 3071 3072 frame_addr = env->active_tc.gpr[29]; 3073 trace_user_do_rt_sigreturn(env, frame_addr); 3074 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3075 goto badframe; 3076 } 3077 3078 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3079 set_sigmask(&blocked); 3080 3081 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3082 3083 if (do_sigaltstack(frame_addr + 3084 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3085 0, get_sp_from_cpustate(env)) == -EFAULT) 3086 goto badframe; 3087 3088 env->active_tc.PC = env->CP0_EPC; 3089 mips_set_hflags_isa_mode_from_pc(env); 3090 /* I am not sure this is right, but it seems to work 3091 * maybe a problem with nested signals ? */ 3092 env->CP0_EPC = 0; 3093 return -TARGET_QEMU_ESIGRETURN; 3094 3095 badframe: 3096 force_sig(TARGET_SIGSEGV/*, current*/); 3097 return 0; 3098 } 3099 3100 #elif defined(TARGET_SH4) 3101 3102 /* 3103 * code and data structures from linux kernel: 3104 * include/asm-sh/sigcontext.h 3105 * arch/sh/kernel/signal.c 3106 */ 3107 3108 struct target_sigcontext { 3109 target_ulong oldmask; 3110 3111 /* CPU registers */ 3112 target_ulong sc_gregs[16]; 3113 target_ulong sc_pc; 3114 target_ulong sc_pr; 3115 target_ulong sc_sr; 3116 target_ulong sc_gbr; 3117 target_ulong sc_mach; 3118 target_ulong sc_macl; 3119 3120 /* FPU registers */ 3121 target_ulong sc_fpregs[16]; 3122 target_ulong sc_xfpregs[16]; 3123 unsigned int sc_fpscr; 3124 unsigned int sc_fpul; 3125 unsigned int sc_ownedfp; 3126 }; 3127 3128 struct target_sigframe 3129 { 3130 struct target_sigcontext sc; 3131 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3132 uint16_t retcode[3]; 3133 }; 3134 3135 3136 struct target_ucontext { 3137 target_ulong tuc_flags; 3138 struct target_ucontext *tuc_link; 3139 target_stack_t tuc_stack; 3140 struct target_sigcontext tuc_mcontext; 3141 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3142 }; 3143 3144 struct target_rt_sigframe 3145 { 3146 struct target_siginfo info; 3147 struct target_ucontext uc; 3148 uint16_t retcode[3]; 3149 }; 3150 3151 3152 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3153 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3154 3155 static abi_ulong get_sigframe(struct target_sigaction *ka, 3156 unsigned long sp, size_t frame_size) 3157 { 3158 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3159 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3160 } 3161 3162 return (sp - frame_size) & -8ul; 3163 } 3164 3165 static void setup_sigcontext(struct target_sigcontext *sc, 3166 CPUSH4State *regs, unsigned long mask) 3167 { 3168 int i; 3169 3170 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3171 COPY(gregs[0]); COPY(gregs[1]); 3172 COPY(gregs[2]); COPY(gregs[3]); 3173 COPY(gregs[4]); COPY(gregs[5]); 3174 COPY(gregs[6]); COPY(gregs[7]); 3175 COPY(gregs[8]); COPY(gregs[9]); 3176 COPY(gregs[10]); COPY(gregs[11]); 3177 COPY(gregs[12]); COPY(gregs[13]); 3178 COPY(gregs[14]); COPY(gregs[15]); 3179 COPY(gbr); COPY(mach); 3180 COPY(macl); COPY(pr); 3181 COPY(sr); COPY(pc); 3182 #undef COPY 3183 3184 for (i=0; i<16; i++) { 3185 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3186 } 3187 __put_user(regs->fpscr, &sc->sc_fpscr); 3188 __put_user(regs->fpul, &sc->sc_fpul); 3189 3190 /* non-iBCS2 extensions.. */ 3191 __put_user(mask, &sc->oldmask); 3192 } 3193 3194 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3195 { 3196 int i; 3197 3198 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3199 COPY(gregs[0]); COPY(gregs[1]); 3200 COPY(gregs[2]); COPY(gregs[3]); 3201 COPY(gregs[4]); COPY(gregs[5]); 3202 COPY(gregs[6]); COPY(gregs[7]); 3203 COPY(gregs[8]); COPY(gregs[9]); 3204 COPY(gregs[10]); COPY(gregs[11]); 3205 COPY(gregs[12]); COPY(gregs[13]); 3206 COPY(gregs[14]); COPY(gregs[15]); 3207 COPY(gbr); COPY(mach); 3208 COPY(macl); COPY(pr); 3209 COPY(sr); COPY(pc); 3210 #undef COPY 3211 3212 for (i=0; i<16; i++) { 3213 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3214 } 3215 __get_user(regs->fpscr, &sc->sc_fpscr); 3216 __get_user(regs->fpul, &sc->sc_fpul); 3217 3218 regs->tra = -1; /* disable syscall checks */ 3219 } 3220 3221 static void setup_frame(int sig, struct target_sigaction *ka, 3222 target_sigset_t *set, CPUSH4State *regs) 3223 { 3224 struct target_sigframe *frame; 3225 abi_ulong frame_addr; 3226 int i; 3227 3228 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3229 trace_user_setup_frame(regs, frame_addr); 3230 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3231 goto give_sigsegv; 3232 } 3233 3234 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3235 3236 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3237 __put_user(set->sig[i + 1], &frame->extramask[i]); 3238 } 3239 3240 /* Set up to return from userspace. If provided, use a stub 3241 already in userspace. */ 3242 if (ka->sa_flags & TARGET_SA_RESTORER) { 3243 regs->pr = (unsigned long) ka->sa_restorer; 3244 } else { 3245 /* Generate return code (system call to sigreturn) */ 3246 abi_ulong retcode_addr = frame_addr + 3247 offsetof(struct target_sigframe, retcode); 3248 __put_user(MOVW(2), &frame->retcode[0]); 3249 __put_user(TRAP_NOARG, &frame->retcode[1]); 3250 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3251 regs->pr = (unsigned long) retcode_addr; 3252 } 3253 3254 /* Set up registers for signal handler */ 3255 regs->gregs[15] = frame_addr; 3256 regs->gregs[4] = sig; /* Arg for signal handler */ 3257 regs->gregs[5] = 0; 3258 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3259 regs->pc = (unsigned long) ka->_sa_handler; 3260 3261 unlock_user_struct(frame, frame_addr, 1); 3262 return; 3263 3264 give_sigsegv: 3265 unlock_user_struct(frame, frame_addr, 1); 3266 force_sig(TARGET_SIGSEGV); 3267 } 3268 3269 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3270 target_siginfo_t *info, 3271 target_sigset_t *set, CPUSH4State *regs) 3272 { 3273 struct target_rt_sigframe *frame; 3274 abi_ulong frame_addr; 3275 int i; 3276 3277 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3278 trace_user_setup_rt_frame(regs, frame_addr); 3279 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3280 goto give_sigsegv; 3281 } 3282 3283 tswap_siginfo(&frame->info, info); 3284 3285 /* Create the ucontext. */ 3286 __put_user(0, &frame->uc.tuc_flags); 3287 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3288 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3289 &frame->uc.tuc_stack.ss_sp); 3290 __put_user(sas_ss_flags(regs->gregs[15]), 3291 &frame->uc.tuc_stack.ss_flags); 3292 __put_user(target_sigaltstack_used.ss_size, 3293 &frame->uc.tuc_stack.ss_size); 3294 setup_sigcontext(&frame->uc.tuc_mcontext, 3295 regs, set->sig[0]); 3296 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3297 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3298 } 3299 3300 /* Set up to return from userspace. If provided, use a stub 3301 already in userspace. */ 3302 if (ka->sa_flags & TARGET_SA_RESTORER) { 3303 regs->pr = (unsigned long) ka->sa_restorer; 3304 } else { 3305 /* Generate return code (system call to sigreturn) */ 3306 abi_ulong retcode_addr = frame_addr + 3307 offsetof(struct target_rt_sigframe, retcode); 3308 __put_user(MOVW(2), &frame->retcode[0]); 3309 __put_user(TRAP_NOARG, &frame->retcode[1]); 3310 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3311 regs->pr = (unsigned long) retcode_addr; 3312 } 3313 3314 /* Set up registers for signal handler */ 3315 regs->gregs[15] = frame_addr; 3316 regs->gregs[4] = sig; /* Arg for signal handler */ 3317 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3318 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3319 regs->pc = (unsigned long) ka->_sa_handler; 3320 3321 unlock_user_struct(frame, frame_addr, 1); 3322 return; 3323 3324 give_sigsegv: 3325 unlock_user_struct(frame, frame_addr, 1); 3326 force_sig(TARGET_SIGSEGV); 3327 } 3328 3329 long do_sigreturn(CPUSH4State *regs) 3330 { 3331 struct target_sigframe *frame; 3332 abi_ulong frame_addr; 3333 sigset_t blocked; 3334 target_sigset_t target_set; 3335 int i; 3336 int err = 0; 3337 3338 frame_addr = regs->gregs[15]; 3339 trace_user_do_sigreturn(regs, frame_addr); 3340 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3341 goto badframe; 3342 } 3343 3344 __get_user(target_set.sig[0], &frame->sc.oldmask); 3345 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3346 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3347 } 3348 3349 if (err) 3350 goto badframe; 3351 3352 target_to_host_sigset_internal(&blocked, &target_set); 3353 set_sigmask(&blocked); 3354 3355 restore_sigcontext(regs, &frame->sc); 3356 3357 unlock_user_struct(frame, frame_addr, 0); 3358 return -TARGET_QEMU_ESIGRETURN; 3359 3360 badframe: 3361 unlock_user_struct(frame, frame_addr, 0); 3362 force_sig(TARGET_SIGSEGV); 3363 return 0; 3364 } 3365 3366 long do_rt_sigreturn(CPUSH4State *regs) 3367 { 3368 struct target_rt_sigframe *frame; 3369 abi_ulong frame_addr; 3370 sigset_t blocked; 3371 3372 frame_addr = regs->gregs[15]; 3373 trace_user_do_rt_sigreturn(regs, frame_addr); 3374 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3375 goto badframe; 3376 } 3377 3378 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3379 set_sigmask(&blocked); 3380 3381 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3382 3383 if (do_sigaltstack(frame_addr + 3384 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3385 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3386 goto badframe; 3387 } 3388 3389 unlock_user_struct(frame, frame_addr, 0); 3390 return -TARGET_QEMU_ESIGRETURN; 3391 3392 badframe: 3393 unlock_user_struct(frame, frame_addr, 0); 3394 force_sig(TARGET_SIGSEGV); 3395 return 0; 3396 } 3397 #elif defined(TARGET_MICROBLAZE) 3398 3399 struct target_sigcontext { 3400 struct target_pt_regs regs; /* needs to be first */ 3401 uint32_t oldmask; 3402 }; 3403 3404 struct target_stack_t { 3405 abi_ulong ss_sp; 3406 int ss_flags; 3407 unsigned int ss_size; 3408 }; 3409 3410 struct target_ucontext { 3411 abi_ulong tuc_flags; 3412 abi_ulong tuc_link; 3413 struct target_stack_t tuc_stack; 3414 struct target_sigcontext tuc_mcontext; 3415 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3416 }; 3417 3418 /* Signal frames. */ 3419 struct target_signal_frame { 3420 struct target_ucontext uc; 3421 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3422 uint32_t tramp[2]; 3423 }; 3424 3425 struct rt_signal_frame { 3426 siginfo_t info; 3427 struct ucontext uc; 3428 uint32_t tramp[2]; 3429 }; 3430 3431 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3432 { 3433 __put_user(env->regs[0], &sc->regs.r0); 3434 __put_user(env->regs[1], &sc->regs.r1); 3435 __put_user(env->regs[2], &sc->regs.r2); 3436 __put_user(env->regs[3], &sc->regs.r3); 3437 __put_user(env->regs[4], &sc->regs.r4); 3438 __put_user(env->regs[5], &sc->regs.r5); 3439 __put_user(env->regs[6], &sc->regs.r6); 3440 __put_user(env->regs[7], &sc->regs.r7); 3441 __put_user(env->regs[8], &sc->regs.r8); 3442 __put_user(env->regs[9], &sc->regs.r9); 3443 __put_user(env->regs[10], &sc->regs.r10); 3444 __put_user(env->regs[11], &sc->regs.r11); 3445 __put_user(env->regs[12], &sc->regs.r12); 3446 __put_user(env->regs[13], &sc->regs.r13); 3447 __put_user(env->regs[14], &sc->regs.r14); 3448 __put_user(env->regs[15], &sc->regs.r15); 3449 __put_user(env->regs[16], &sc->regs.r16); 3450 __put_user(env->regs[17], &sc->regs.r17); 3451 __put_user(env->regs[18], &sc->regs.r18); 3452 __put_user(env->regs[19], &sc->regs.r19); 3453 __put_user(env->regs[20], &sc->regs.r20); 3454 __put_user(env->regs[21], &sc->regs.r21); 3455 __put_user(env->regs[22], &sc->regs.r22); 3456 __put_user(env->regs[23], &sc->regs.r23); 3457 __put_user(env->regs[24], &sc->regs.r24); 3458 __put_user(env->regs[25], &sc->regs.r25); 3459 __put_user(env->regs[26], &sc->regs.r26); 3460 __put_user(env->regs[27], &sc->regs.r27); 3461 __put_user(env->regs[28], &sc->regs.r28); 3462 __put_user(env->regs[29], &sc->regs.r29); 3463 __put_user(env->regs[30], &sc->regs.r30); 3464 __put_user(env->regs[31], &sc->regs.r31); 3465 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3466 } 3467 3468 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3469 { 3470 __get_user(env->regs[0], &sc->regs.r0); 3471 __get_user(env->regs[1], &sc->regs.r1); 3472 __get_user(env->regs[2], &sc->regs.r2); 3473 __get_user(env->regs[3], &sc->regs.r3); 3474 __get_user(env->regs[4], &sc->regs.r4); 3475 __get_user(env->regs[5], &sc->regs.r5); 3476 __get_user(env->regs[6], &sc->regs.r6); 3477 __get_user(env->regs[7], &sc->regs.r7); 3478 __get_user(env->regs[8], &sc->regs.r8); 3479 __get_user(env->regs[9], &sc->regs.r9); 3480 __get_user(env->regs[10], &sc->regs.r10); 3481 __get_user(env->regs[11], &sc->regs.r11); 3482 __get_user(env->regs[12], &sc->regs.r12); 3483 __get_user(env->regs[13], &sc->regs.r13); 3484 __get_user(env->regs[14], &sc->regs.r14); 3485 __get_user(env->regs[15], &sc->regs.r15); 3486 __get_user(env->regs[16], &sc->regs.r16); 3487 __get_user(env->regs[17], &sc->regs.r17); 3488 __get_user(env->regs[18], &sc->regs.r18); 3489 __get_user(env->regs[19], &sc->regs.r19); 3490 __get_user(env->regs[20], &sc->regs.r20); 3491 __get_user(env->regs[21], &sc->regs.r21); 3492 __get_user(env->regs[22], &sc->regs.r22); 3493 __get_user(env->regs[23], &sc->regs.r23); 3494 __get_user(env->regs[24], &sc->regs.r24); 3495 __get_user(env->regs[25], &sc->regs.r25); 3496 __get_user(env->regs[26], &sc->regs.r26); 3497 __get_user(env->regs[27], &sc->regs.r27); 3498 __get_user(env->regs[28], &sc->regs.r28); 3499 __get_user(env->regs[29], &sc->regs.r29); 3500 __get_user(env->regs[30], &sc->regs.r30); 3501 __get_user(env->regs[31], &sc->regs.r31); 3502 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3503 } 3504 3505 static abi_ulong get_sigframe(struct target_sigaction *ka, 3506 CPUMBState *env, int frame_size) 3507 { 3508 abi_ulong sp = env->regs[1]; 3509 3510 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3511 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3512 } 3513 3514 return ((sp - frame_size) & -8UL); 3515 } 3516 3517 static void setup_frame(int sig, struct target_sigaction *ka, 3518 target_sigset_t *set, CPUMBState *env) 3519 { 3520 struct target_signal_frame *frame; 3521 abi_ulong frame_addr; 3522 int i; 3523 3524 frame_addr = get_sigframe(ka, env, sizeof *frame); 3525 trace_user_setup_frame(env, frame_addr); 3526 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3527 goto badframe; 3528 3529 /* Save the mask. */ 3530 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3531 3532 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3533 __put_user(set->sig[i], &frame->extramask[i - 1]); 3534 } 3535 3536 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3537 3538 /* Set up to return from userspace. If provided, use a stub 3539 already in userspace. */ 3540 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3541 if (ka->sa_flags & TARGET_SA_RESTORER) { 3542 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3543 } else { 3544 uint32_t t; 3545 /* Note, these encodings are _big endian_! */ 3546 /* addi r12, r0, __NR_sigreturn */ 3547 t = 0x31800000UL | TARGET_NR_sigreturn; 3548 __put_user(t, frame->tramp + 0); 3549 /* brki r14, 0x8 */ 3550 t = 0xb9cc0008UL; 3551 __put_user(t, frame->tramp + 1); 3552 3553 /* Return from sighandler will jump to the tramp. 3554 Negative 8 offset because return is rtsd r15, 8 */ 3555 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3556 - 8; 3557 } 3558 3559 /* Set up registers for signal handler */ 3560 env->regs[1] = frame_addr; 3561 /* Signal handler args: */ 3562 env->regs[5] = sig; /* Arg 0: signum */ 3563 env->regs[6] = 0; 3564 /* arg 1: sigcontext */ 3565 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3566 3567 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3568 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3569 3570 unlock_user_struct(frame, frame_addr, 1); 3571 return; 3572 badframe: 3573 force_sig(TARGET_SIGSEGV); 3574 } 3575 3576 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3577 target_siginfo_t *info, 3578 target_sigset_t *set, CPUMBState *env) 3579 { 3580 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3581 } 3582 3583 long do_sigreturn(CPUMBState *env) 3584 { 3585 struct target_signal_frame *frame; 3586 abi_ulong frame_addr; 3587 target_sigset_t target_set; 3588 sigset_t set; 3589 int i; 3590 3591 frame_addr = env->regs[R_SP]; 3592 trace_user_do_sigreturn(env, frame_addr); 3593 /* Make sure the guest isn't playing games. */ 3594 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3595 goto badframe; 3596 3597 /* Restore blocked signals */ 3598 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3599 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3600 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3601 } 3602 target_to_host_sigset_internal(&set, &target_set); 3603 set_sigmask(&set); 3604 3605 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3606 /* We got here through a sigreturn syscall, our path back is via an 3607 rtb insn so setup r14 for that. */ 3608 env->regs[14] = env->sregs[SR_PC]; 3609 3610 unlock_user_struct(frame, frame_addr, 0); 3611 return -TARGET_QEMU_ESIGRETURN; 3612 badframe: 3613 force_sig(TARGET_SIGSEGV); 3614 } 3615 3616 long do_rt_sigreturn(CPUMBState *env) 3617 { 3618 trace_user_do_rt_sigreturn(env, 0); 3619 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3620 return -TARGET_ENOSYS; 3621 } 3622 3623 #elif defined(TARGET_CRIS) 3624 3625 struct target_sigcontext { 3626 struct target_pt_regs regs; /* needs to be first */ 3627 uint32_t oldmask; 3628 uint32_t usp; /* usp before stacking this gunk on it */ 3629 }; 3630 3631 /* Signal frames. */ 3632 struct target_signal_frame { 3633 struct target_sigcontext sc; 3634 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3635 uint16_t retcode[4]; /* Trampoline code. */ 3636 }; 3637 3638 struct rt_signal_frame { 3639 siginfo_t *pinfo; 3640 void *puc; 3641 siginfo_t info; 3642 struct ucontext uc; 3643 uint16_t retcode[4]; /* Trampoline code. */ 3644 }; 3645 3646 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3647 { 3648 __put_user(env->regs[0], &sc->regs.r0); 3649 __put_user(env->regs[1], &sc->regs.r1); 3650 __put_user(env->regs[2], &sc->regs.r2); 3651 __put_user(env->regs[3], &sc->regs.r3); 3652 __put_user(env->regs[4], &sc->regs.r4); 3653 __put_user(env->regs[5], &sc->regs.r5); 3654 __put_user(env->regs[6], &sc->regs.r6); 3655 __put_user(env->regs[7], &sc->regs.r7); 3656 __put_user(env->regs[8], &sc->regs.r8); 3657 __put_user(env->regs[9], &sc->regs.r9); 3658 __put_user(env->regs[10], &sc->regs.r10); 3659 __put_user(env->regs[11], &sc->regs.r11); 3660 __put_user(env->regs[12], &sc->regs.r12); 3661 __put_user(env->regs[13], &sc->regs.r13); 3662 __put_user(env->regs[14], &sc->usp); 3663 __put_user(env->regs[15], &sc->regs.acr); 3664 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3665 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3666 __put_user(env->pc, &sc->regs.erp); 3667 } 3668 3669 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3670 { 3671 __get_user(env->regs[0], &sc->regs.r0); 3672 __get_user(env->regs[1], &sc->regs.r1); 3673 __get_user(env->regs[2], &sc->regs.r2); 3674 __get_user(env->regs[3], &sc->regs.r3); 3675 __get_user(env->regs[4], &sc->regs.r4); 3676 __get_user(env->regs[5], &sc->regs.r5); 3677 __get_user(env->regs[6], &sc->regs.r6); 3678 __get_user(env->regs[7], &sc->regs.r7); 3679 __get_user(env->regs[8], &sc->regs.r8); 3680 __get_user(env->regs[9], &sc->regs.r9); 3681 __get_user(env->regs[10], &sc->regs.r10); 3682 __get_user(env->regs[11], &sc->regs.r11); 3683 __get_user(env->regs[12], &sc->regs.r12); 3684 __get_user(env->regs[13], &sc->regs.r13); 3685 __get_user(env->regs[14], &sc->usp); 3686 __get_user(env->regs[15], &sc->regs.acr); 3687 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3688 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3689 __get_user(env->pc, &sc->regs.erp); 3690 } 3691 3692 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3693 { 3694 abi_ulong sp; 3695 /* Align the stack downwards to 4. */ 3696 sp = (env->regs[R_SP] & ~3); 3697 return sp - framesize; 3698 } 3699 3700 static void setup_frame(int sig, struct target_sigaction *ka, 3701 target_sigset_t *set, CPUCRISState *env) 3702 { 3703 struct target_signal_frame *frame; 3704 abi_ulong frame_addr; 3705 int i; 3706 3707 frame_addr = get_sigframe(env, sizeof *frame); 3708 trace_user_setup_frame(env, frame_addr); 3709 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3710 goto badframe; 3711 3712 /* 3713 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3714 * use this trampoline anymore but it sets it up for GDB. 3715 * In QEMU, using the trampoline simplifies things a bit so we use it. 3716 * 3717 * This is movu.w __NR_sigreturn, r9; break 13; 3718 */ 3719 __put_user(0x9c5f, frame->retcode+0); 3720 __put_user(TARGET_NR_sigreturn, 3721 frame->retcode + 1); 3722 __put_user(0xe93d, frame->retcode + 2); 3723 3724 /* Save the mask. */ 3725 __put_user(set->sig[0], &frame->sc.oldmask); 3726 3727 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3728 __put_user(set->sig[i], &frame->extramask[i - 1]); 3729 } 3730 3731 setup_sigcontext(&frame->sc, env); 3732 3733 /* Move the stack and setup the arguments for the handler. */ 3734 env->regs[R_SP] = frame_addr; 3735 env->regs[10] = sig; 3736 env->pc = (unsigned long) ka->_sa_handler; 3737 /* Link SRP so the guest returns through the trampoline. */ 3738 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3739 3740 unlock_user_struct(frame, frame_addr, 1); 3741 return; 3742 badframe: 3743 force_sig(TARGET_SIGSEGV); 3744 } 3745 3746 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3747 target_siginfo_t *info, 3748 target_sigset_t *set, CPUCRISState *env) 3749 { 3750 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3751 } 3752 3753 long do_sigreturn(CPUCRISState *env) 3754 { 3755 struct target_signal_frame *frame; 3756 abi_ulong frame_addr; 3757 target_sigset_t target_set; 3758 sigset_t set; 3759 int i; 3760 3761 frame_addr = env->regs[R_SP]; 3762 trace_user_do_sigreturn(env, frame_addr); 3763 /* Make sure the guest isn't playing games. */ 3764 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3765 goto badframe; 3766 } 3767 3768 /* Restore blocked signals */ 3769 __get_user(target_set.sig[0], &frame->sc.oldmask); 3770 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3771 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3772 } 3773 target_to_host_sigset_internal(&set, &target_set); 3774 set_sigmask(&set); 3775 3776 restore_sigcontext(&frame->sc, env); 3777 unlock_user_struct(frame, frame_addr, 0); 3778 return -TARGET_QEMU_ESIGRETURN; 3779 badframe: 3780 force_sig(TARGET_SIGSEGV); 3781 } 3782 3783 long do_rt_sigreturn(CPUCRISState *env) 3784 { 3785 trace_user_do_rt_sigreturn(env, 0); 3786 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3787 return -TARGET_ENOSYS; 3788 } 3789 3790 #elif defined(TARGET_OPENRISC) 3791 3792 struct target_sigcontext { 3793 struct target_pt_regs regs; 3794 abi_ulong oldmask; 3795 abi_ulong usp; 3796 }; 3797 3798 struct target_ucontext { 3799 abi_ulong tuc_flags; 3800 abi_ulong tuc_link; 3801 target_stack_t tuc_stack; 3802 struct target_sigcontext tuc_mcontext; 3803 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3804 }; 3805 3806 struct target_rt_sigframe { 3807 abi_ulong pinfo; 3808 uint64_t puc; 3809 struct target_siginfo info; 3810 struct target_sigcontext sc; 3811 struct target_ucontext uc; 3812 unsigned char retcode[16]; /* trampoline code */ 3813 }; 3814 3815 /* This is the asm-generic/ucontext.h version */ 3816 #if 0 3817 static int restore_sigcontext(CPUOpenRISCState *regs, 3818 struct target_sigcontext *sc) 3819 { 3820 unsigned int err = 0; 3821 unsigned long old_usp; 3822 3823 /* Alwys make any pending restarted system call return -EINTR */ 3824 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3825 3826 /* restore the regs from &sc->regs (same as sc, since regs is first) 3827 * (sc is already checked for VERIFY_READ since the sigframe was 3828 * checked in sys_sigreturn previously) 3829 */ 3830 3831 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3832 goto badframe; 3833 } 3834 3835 /* make sure the U-flag is set so user-mode cannot fool us */ 3836 3837 regs->sr &= ~SR_SM; 3838 3839 /* restore the old USP as it was before we stacked the sc etc. 3840 * (we cannot just pop the sigcontext since we aligned the sp and 3841 * stuff after pushing it) 3842 */ 3843 3844 __get_user(old_usp, &sc->usp); 3845 phx_signal("old_usp 0x%lx", old_usp); 3846 3847 __PHX__ REALLY /* ??? */ 3848 wrusp(old_usp); 3849 regs->gpr[1] = old_usp; 3850 3851 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3852 * after this completes, but we don't use that mechanism. maybe we can 3853 * use it now ? 3854 */ 3855 3856 return err; 3857 3858 badframe: 3859 return 1; 3860 } 3861 #endif 3862 3863 /* Set up a signal frame. */ 3864 3865 static void setup_sigcontext(struct target_sigcontext *sc, 3866 CPUOpenRISCState *regs, 3867 unsigned long mask) 3868 { 3869 unsigned long usp = regs->gpr[1]; 3870 3871 /* copy the regs. they are first in sc so we can use sc directly */ 3872 3873 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3874 3875 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3876 the signal handler. The frametype will be restored to its previous 3877 value in restore_sigcontext. */ 3878 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3879 3880 /* then some other stuff */ 3881 __put_user(mask, &sc->oldmask); 3882 __put_user(usp, &sc->usp); 3883 } 3884 3885 static inline unsigned long align_sigframe(unsigned long sp) 3886 { 3887 unsigned long i; 3888 i = sp & ~3UL; 3889 return i; 3890 } 3891 3892 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 3893 CPUOpenRISCState *regs, 3894 size_t frame_size) 3895 { 3896 unsigned long sp = regs->gpr[1]; 3897 int onsigstack = on_sig_stack(sp); 3898 3899 /* redzone */ 3900 /* This is the X/Open sanctioned signal stack switching. */ 3901 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 3902 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3903 } 3904 3905 sp = align_sigframe(sp - frame_size); 3906 3907 /* 3908 * If we are on the alternate signal stack and would overflow it, don't. 3909 * Return an always-bogus address instead so we will die with SIGSEGV. 3910 */ 3911 3912 if (onsigstack && !likely(on_sig_stack(sp))) { 3913 return -1L; 3914 } 3915 3916 return sp; 3917 } 3918 3919 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3920 target_siginfo_t *info, 3921 target_sigset_t *set, CPUOpenRISCState *env) 3922 { 3923 int err = 0; 3924 abi_ulong frame_addr; 3925 unsigned long return_ip; 3926 struct target_rt_sigframe *frame; 3927 abi_ulong info_addr, uc_addr; 3928 3929 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3930 trace_user_setup_rt_frame(env, frame_addr); 3931 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3932 goto give_sigsegv; 3933 } 3934 3935 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 3936 __put_user(info_addr, &frame->pinfo); 3937 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 3938 __put_user(uc_addr, &frame->puc); 3939 3940 if (ka->sa_flags & SA_SIGINFO) { 3941 tswap_siginfo(&frame->info, info); 3942 } 3943 3944 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 3945 __put_user(0, &frame->uc.tuc_flags); 3946 __put_user(0, &frame->uc.tuc_link); 3947 __put_user(target_sigaltstack_used.ss_sp, 3948 &frame->uc.tuc_stack.ss_sp); 3949 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 3950 __put_user(target_sigaltstack_used.ss_size, 3951 &frame->uc.tuc_stack.ss_size); 3952 setup_sigcontext(&frame->sc, env, set->sig[0]); 3953 3954 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 3955 3956 /* trampoline - the desired return ip is the retcode itself */ 3957 return_ip = (unsigned long)&frame->retcode; 3958 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 3959 __put_user(0xa960, (short *)(frame->retcode + 0)); 3960 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 3961 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 3962 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 3963 3964 if (err) { 3965 goto give_sigsegv; 3966 } 3967 3968 /* TODO what is the current->exec_domain stuff and invmap ? */ 3969 3970 /* Set up registers for signal handler */ 3971 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 3972 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 3973 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 3974 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 3975 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 3976 3977 /* actually move the usp to reflect the stacked frame */ 3978 env->gpr[1] = (unsigned long)frame; 3979 3980 return; 3981 3982 give_sigsegv: 3983 unlock_user_struct(frame, frame_addr, 1); 3984 if (sig == TARGET_SIGSEGV) { 3985 ka->_sa_handler = TARGET_SIG_DFL; 3986 } 3987 force_sig(TARGET_SIGSEGV); 3988 } 3989 3990 long do_sigreturn(CPUOpenRISCState *env) 3991 { 3992 trace_user_do_sigreturn(env, 0); 3993 fprintf(stderr, "do_sigreturn: not implemented\n"); 3994 return -TARGET_ENOSYS; 3995 } 3996 3997 long do_rt_sigreturn(CPUOpenRISCState *env) 3998 { 3999 trace_user_do_rt_sigreturn(env, 0); 4000 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4001 return -TARGET_ENOSYS; 4002 } 4003 /* TARGET_OPENRISC */ 4004 4005 #elif defined(TARGET_S390X) 4006 4007 #define __NUM_GPRS 16 4008 #define __NUM_FPRS 16 4009 #define __NUM_ACRS 16 4010 4011 #define S390_SYSCALL_SIZE 2 4012 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4013 4014 #define _SIGCONTEXT_NSIG 64 4015 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4016 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4017 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4018 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4019 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4020 4021 typedef struct { 4022 target_psw_t psw; 4023 target_ulong gprs[__NUM_GPRS]; 4024 unsigned int acrs[__NUM_ACRS]; 4025 } target_s390_regs_common; 4026 4027 typedef struct { 4028 unsigned int fpc; 4029 double fprs[__NUM_FPRS]; 4030 } target_s390_fp_regs; 4031 4032 typedef struct { 4033 target_s390_regs_common regs; 4034 target_s390_fp_regs fpregs; 4035 } target_sigregs; 4036 4037 struct target_sigcontext { 4038 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4039 target_sigregs *sregs; 4040 }; 4041 4042 typedef struct { 4043 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4044 struct target_sigcontext sc; 4045 target_sigregs sregs; 4046 int signo; 4047 uint8_t retcode[S390_SYSCALL_SIZE]; 4048 } sigframe; 4049 4050 struct target_ucontext { 4051 target_ulong tuc_flags; 4052 struct target_ucontext *tuc_link; 4053 target_stack_t tuc_stack; 4054 target_sigregs tuc_mcontext; 4055 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4056 }; 4057 4058 typedef struct { 4059 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4060 uint8_t retcode[S390_SYSCALL_SIZE]; 4061 struct target_siginfo info; 4062 struct target_ucontext uc; 4063 } rt_sigframe; 4064 4065 static inline abi_ulong 4066 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4067 { 4068 abi_ulong sp; 4069 4070 /* Default to using normal stack */ 4071 sp = env->regs[15]; 4072 4073 /* This is the X/Open sanctioned signal stack switching. */ 4074 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4075 if (!sas_ss_flags(sp)) { 4076 sp = target_sigaltstack_used.ss_sp + 4077 target_sigaltstack_used.ss_size; 4078 } 4079 } 4080 4081 /* This is the legacy signal stack switching. */ 4082 else if (/* FIXME !user_mode(regs) */ 0 && 4083 !(ka->sa_flags & TARGET_SA_RESTORER) && 4084 ka->sa_restorer) { 4085 sp = (abi_ulong) ka->sa_restorer; 4086 } 4087 4088 return (sp - frame_size) & -8ul; 4089 } 4090 4091 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4092 { 4093 int i; 4094 //save_access_regs(current->thread.acrs); FIXME 4095 4096 /* Copy a 'clean' PSW mask to the user to avoid leaking 4097 information about whether PER is currently on. */ 4098 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4099 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4100 for (i = 0; i < 16; i++) { 4101 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4102 } 4103 for (i = 0; i < 16; i++) { 4104 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4105 } 4106 /* 4107 * We have to store the fp registers to current->thread.fp_regs 4108 * to merge them with the emulated registers. 4109 */ 4110 //save_fp_regs(¤t->thread.fp_regs); FIXME 4111 for (i = 0; i < 16; i++) { 4112 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4113 } 4114 } 4115 4116 static void setup_frame(int sig, struct target_sigaction *ka, 4117 target_sigset_t *set, CPUS390XState *env) 4118 { 4119 sigframe *frame; 4120 abi_ulong frame_addr; 4121 4122 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4123 trace_user_setup_frame(env, frame_addr); 4124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4125 goto give_sigsegv; 4126 } 4127 4128 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4129 4130 save_sigregs(env, &frame->sregs); 4131 4132 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4133 (abi_ulong *)&frame->sc.sregs); 4134 4135 /* Set up to return from userspace. If provided, use a stub 4136 already in userspace. */ 4137 if (ka->sa_flags & TARGET_SA_RESTORER) { 4138 env->regs[14] = (unsigned long) 4139 ka->sa_restorer | PSW_ADDR_AMODE; 4140 } else { 4141 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4142 | PSW_ADDR_AMODE; 4143 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4144 (uint16_t *)(frame->retcode)); 4145 } 4146 4147 /* Set up backchain. */ 4148 __put_user(env->regs[15], (abi_ulong *) frame); 4149 4150 /* Set up registers for signal handler */ 4151 env->regs[15] = frame_addr; 4152 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4153 4154 env->regs[2] = sig; //map_signal(sig); 4155 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4156 4157 /* We forgot to include these in the sigcontext. 4158 To avoid breaking binary compatibility, they are passed as args. */ 4159 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4160 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4161 4162 /* Place signal number on stack to allow backtrace from handler. */ 4163 __put_user(env->regs[2], (int *) &frame->signo); 4164 unlock_user_struct(frame, frame_addr, 1); 4165 return; 4166 4167 give_sigsegv: 4168 force_sig(TARGET_SIGSEGV); 4169 } 4170 4171 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4172 target_siginfo_t *info, 4173 target_sigset_t *set, CPUS390XState *env) 4174 { 4175 int i; 4176 rt_sigframe *frame; 4177 abi_ulong frame_addr; 4178 4179 frame_addr = get_sigframe(ka, env, sizeof *frame); 4180 trace_user_setup_rt_frame(env, frame_addr); 4181 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4182 goto give_sigsegv; 4183 } 4184 4185 tswap_siginfo(&frame->info, info); 4186 4187 /* Create the ucontext. */ 4188 __put_user(0, &frame->uc.tuc_flags); 4189 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4190 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4191 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4192 &frame->uc.tuc_stack.ss_flags); 4193 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4194 save_sigregs(env, &frame->uc.tuc_mcontext); 4195 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4196 __put_user((abi_ulong)set->sig[i], 4197 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4198 } 4199 4200 /* Set up to return from userspace. If provided, use a stub 4201 already in userspace. */ 4202 if (ka->sa_flags & TARGET_SA_RESTORER) { 4203 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4204 } else { 4205 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4206 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4207 (uint16_t *)(frame->retcode)); 4208 } 4209 4210 /* Set up backchain. */ 4211 __put_user(env->regs[15], (abi_ulong *) frame); 4212 4213 /* Set up registers for signal handler */ 4214 env->regs[15] = frame_addr; 4215 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4216 4217 env->regs[2] = sig; //map_signal(sig); 4218 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4219 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4220 return; 4221 4222 give_sigsegv: 4223 force_sig(TARGET_SIGSEGV); 4224 } 4225 4226 static int 4227 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4228 { 4229 int err = 0; 4230 int i; 4231 4232 for (i = 0; i < 16; i++) { 4233 __get_user(env->regs[i], &sc->regs.gprs[i]); 4234 } 4235 4236 __get_user(env->psw.mask, &sc->regs.psw.mask); 4237 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4238 (unsigned long long)env->psw.addr); 4239 __get_user(env->psw.addr, &sc->regs.psw.addr); 4240 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4241 4242 for (i = 0; i < 16; i++) { 4243 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4244 } 4245 for (i = 0; i < 16; i++) { 4246 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4247 } 4248 4249 return err; 4250 } 4251 4252 long do_sigreturn(CPUS390XState *env) 4253 { 4254 sigframe *frame; 4255 abi_ulong frame_addr = env->regs[15]; 4256 target_sigset_t target_set; 4257 sigset_t set; 4258 4259 trace_user_do_sigreturn(env, frame_addr); 4260 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4261 goto badframe; 4262 } 4263 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4264 4265 target_to_host_sigset_internal(&set, &target_set); 4266 set_sigmask(&set); /* ~_BLOCKABLE? */ 4267 4268 if (restore_sigregs(env, &frame->sregs)) { 4269 goto badframe; 4270 } 4271 4272 unlock_user_struct(frame, frame_addr, 0); 4273 return -TARGET_QEMU_ESIGRETURN; 4274 4275 badframe: 4276 force_sig(TARGET_SIGSEGV); 4277 return 0; 4278 } 4279 4280 long do_rt_sigreturn(CPUS390XState *env) 4281 { 4282 rt_sigframe *frame; 4283 abi_ulong frame_addr = env->regs[15]; 4284 sigset_t set; 4285 4286 trace_user_do_rt_sigreturn(env, frame_addr); 4287 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4288 goto badframe; 4289 } 4290 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4291 4292 set_sigmask(&set); /* ~_BLOCKABLE? */ 4293 4294 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4295 goto badframe; 4296 } 4297 4298 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4299 get_sp_from_cpustate(env)) == -EFAULT) { 4300 goto badframe; 4301 } 4302 unlock_user_struct(frame, frame_addr, 0); 4303 return -TARGET_QEMU_ESIGRETURN; 4304 4305 badframe: 4306 unlock_user_struct(frame, frame_addr, 0); 4307 force_sig(TARGET_SIGSEGV); 4308 return 0; 4309 } 4310 4311 #elif defined(TARGET_PPC) 4312 4313 /* Size of dummy stack frame allocated when calling signal handler. 4314 See arch/powerpc/include/asm/ptrace.h. */ 4315 #if defined(TARGET_PPC64) 4316 #define SIGNAL_FRAMESIZE 128 4317 #else 4318 #define SIGNAL_FRAMESIZE 64 4319 #endif 4320 4321 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4322 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4323 struct target_mcontext { 4324 target_ulong mc_gregs[48]; 4325 /* Includes fpscr. */ 4326 uint64_t mc_fregs[33]; 4327 target_ulong mc_pad[2]; 4328 /* We need to handle Altivec and SPE at the same time, which no 4329 kernel needs to do. Fortunately, the kernel defines this bit to 4330 be Altivec-register-large all the time, rather than trying to 4331 twiddle it based on the specific platform. */ 4332 union { 4333 /* SPE vector registers. One extra for SPEFSCR. */ 4334 uint32_t spe[33]; 4335 /* Altivec vector registers. The packing of VSCR and VRSAVE 4336 varies depending on whether we're PPC64 or not: PPC64 splits 4337 them apart; PPC32 stuffs them together. */ 4338 #if defined(TARGET_PPC64) 4339 #define QEMU_NVRREG 34 4340 #else 4341 #define QEMU_NVRREG 33 4342 #endif 4343 ppc_avr_t altivec[QEMU_NVRREG]; 4344 #undef QEMU_NVRREG 4345 } mc_vregs __attribute__((__aligned__(16))); 4346 }; 4347 4348 /* See arch/powerpc/include/asm/sigcontext.h. */ 4349 struct target_sigcontext { 4350 target_ulong _unused[4]; 4351 int32_t signal; 4352 #if defined(TARGET_PPC64) 4353 int32_t pad0; 4354 #endif 4355 target_ulong handler; 4356 target_ulong oldmask; 4357 target_ulong regs; /* struct pt_regs __user * */ 4358 #if defined(TARGET_PPC64) 4359 struct target_mcontext mcontext; 4360 #endif 4361 }; 4362 4363 /* Indices for target_mcontext.mc_gregs, below. 4364 See arch/powerpc/include/asm/ptrace.h for details. */ 4365 enum { 4366 TARGET_PT_R0 = 0, 4367 TARGET_PT_R1 = 1, 4368 TARGET_PT_R2 = 2, 4369 TARGET_PT_R3 = 3, 4370 TARGET_PT_R4 = 4, 4371 TARGET_PT_R5 = 5, 4372 TARGET_PT_R6 = 6, 4373 TARGET_PT_R7 = 7, 4374 TARGET_PT_R8 = 8, 4375 TARGET_PT_R9 = 9, 4376 TARGET_PT_R10 = 10, 4377 TARGET_PT_R11 = 11, 4378 TARGET_PT_R12 = 12, 4379 TARGET_PT_R13 = 13, 4380 TARGET_PT_R14 = 14, 4381 TARGET_PT_R15 = 15, 4382 TARGET_PT_R16 = 16, 4383 TARGET_PT_R17 = 17, 4384 TARGET_PT_R18 = 18, 4385 TARGET_PT_R19 = 19, 4386 TARGET_PT_R20 = 20, 4387 TARGET_PT_R21 = 21, 4388 TARGET_PT_R22 = 22, 4389 TARGET_PT_R23 = 23, 4390 TARGET_PT_R24 = 24, 4391 TARGET_PT_R25 = 25, 4392 TARGET_PT_R26 = 26, 4393 TARGET_PT_R27 = 27, 4394 TARGET_PT_R28 = 28, 4395 TARGET_PT_R29 = 29, 4396 TARGET_PT_R30 = 30, 4397 TARGET_PT_R31 = 31, 4398 TARGET_PT_NIP = 32, 4399 TARGET_PT_MSR = 33, 4400 TARGET_PT_ORIG_R3 = 34, 4401 TARGET_PT_CTR = 35, 4402 TARGET_PT_LNK = 36, 4403 TARGET_PT_XER = 37, 4404 TARGET_PT_CCR = 38, 4405 /* Yes, there are two registers with #39. One is 64-bit only. */ 4406 TARGET_PT_MQ = 39, 4407 TARGET_PT_SOFTE = 39, 4408 TARGET_PT_TRAP = 40, 4409 TARGET_PT_DAR = 41, 4410 TARGET_PT_DSISR = 42, 4411 TARGET_PT_RESULT = 43, 4412 TARGET_PT_REGS_COUNT = 44 4413 }; 4414 4415 4416 struct target_ucontext { 4417 target_ulong tuc_flags; 4418 target_ulong tuc_link; /* struct ucontext __user * */ 4419 struct target_sigaltstack tuc_stack; 4420 #if !defined(TARGET_PPC64) 4421 int32_t tuc_pad[7]; 4422 target_ulong tuc_regs; /* struct mcontext __user * 4423 points to uc_mcontext field */ 4424 #endif 4425 target_sigset_t tuc_sigmask; 4426 #if defined(TARGET_PPC64) 4427 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4428 struct target_sigcontext tuc_sigcontext; 4429 #else 4430 int32_t tuc_maskext[30]; 4431 int32_t tuc_pad2[3]; 4432 struct target_mcontext tuc_mcontext; 4433 #endif 4434 }; 4435 4436 /* See arch/powerpc/kernel/signal_32.c. */ 4437 struct target_sigframe { 4438 struct target_sigcontext sctx; 4439 struct target_mcontext mctx; 4440 int32_t abigap[56]; 4441 }; 4442 4443 #if defined(TARGET_PPC64) 4444 4445 #define TARGET_TRAMP_SIZE 6 4446 4447 struct target_rt_sigframe { 4448 /* sys_rt_sigreturn requires the ucontext be the first field */ 4449 struct target_ucontext uc; 4450 target_ulong _unused[2]; 4451 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4452 target_ulong pinfo; /* struct siginfo __user * */ 4453 target_ulong puc; /* void __user * */ 4454 struct target_siginfo info; 4455 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4456 char abigap[288]; 4457 } __attribute__((aligned(16))); 4458 4459 #else 4460 4461 struct target_rt_sigframe { 4462 struct target_siginfo info; 4463 struct target_ucontext uc; 4464 int32_t abigap[56]; 4465 }; 4466 4467 #endif 4468 4469 #if defined(TARGET_PPC64) 4470 4471 struct target_func_ptr { 4472 target_ulong entry; 4473 target_ulong toc; 4474 }; 4475 4476 #endif 4477 4478 /* We use the mc_pad field for the signal return trampoline. */ 4479 #define tramp mc_pad 4480 4481 /* See arch/powerpc/kernel/signal.c. */ 4482 static target_ulong get_sigframe(struct target_sigaction *ka, 4483 CPUPPCState *env, 4484 int frame_size) 4485 { 4486 target_ulong oldsp, newsp; 4487 4488 oldsp = env->gpr[1]; 4489 4490 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4491 (sas_ss_flags(oldsp) == 0)) { 4492 oldsp = (target_sigaltstack_used.ss_sp 4493 + target_sigaltstack_used.ss_size); 4494 } 4495 4496 newsp = (oldsp - frame_size) & ~0xFUL; 4497 4498 return newsp; 4499 } 4500 4501 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4502 { 4503 target_ulong msr = env->msr; 4504 int i; 4505 target_ulong ccr = 0; 4506 4507 /* In general, the kernel attempts to be intelligent about what it 4508 needs to save for Altivec/FP/SPE registers. We don't care that 4509 much, so we just go ahead and save everything. */ 4510 4511 /* Save general registers. */ 4512 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4513 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4514 } 4515 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4516 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4517 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4518 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4519 4520 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4521 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4522 } 4523 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4524 4525 /* Save Altivec registers if necessary. */ 4526 if (env->insns_flags & PPC_ALTIVEC) { 4527 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4528 ppc_avr_t *avr = &env->avr[i]; 4529 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4530 4531 __put_user(avr->u64[0], &vreg->u64[0]); 4532 __put_user(avr->u64[1], &vreg->u64[1]); 4533 } 4534 /* Set MSR_VR in the saved MSR value to indicate that 4535 frame->mc_vregs contains valid data. */ 4536 msr |= MSR_VR; 4537 __put_user((uint32_t)env->spr[SPR_VRSAVE], 4538 &frame->mc_vregs.altivec[32].u32[3]); 4539 } 4540 4541 /* Save floating point registers. */ 4542 if (env->insns_flags & PPC_FLOAT) { 4543 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4544 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4545 } 4546 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4547 } 4548 4549 /* Save SPE registers. The kernel only saves the high half. */ 4550 if (env->insns_flags & PPC_SPE) { 4551 #if defined(TARGET_PPC64) 4552 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4553 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4554 } 4555 #else 4556 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4557 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4558 } 4559 #endif 4560 /* Set MSR_SPE in the saved MSR value to indicate that 4561 frame->mc_vregs contains valid data. */ 4562 msr |= MSR_SPE; 4563 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4564 } 4565 4566 /* Store MSR. */ 4567 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4568 } 4569 4570 static void encode_trampoline(int sigret, uint32_t *tramp) 4571 { 4572 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4573 if (sigret) { 4574 __put_user(0x38000000 | sigret, &tramp[0]); 4575 __put_user(0x44000002, &tramp[1]); 4576 } 4577 } 4578 4579 static void restore_user_regs(CPUPPCState *env, 4580 struct target_mcontext *frame, int sig) 4581 { 4582 target_ulong save_r2 = 0; 4583 target_ulong msr; 4584 target_ulong ccr; 4585 4586 int i; 4587 4588 if (!sig) { 4589 save_r2 = env->gpr[2]; 4590 } 4591 4592 /* Restore general registers. */ 4593 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4594 __get_user(env->gpr[i], &frame->mc_gregs[i]); 4595 } 4596 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4597 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4598 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4599 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4600 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4601 4602 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4603 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4604 } 4605 4606 if (!sig) { 4607 env->gpr[2] = save_r2; 4608 } 4609 /* Restore MSR. */ 4610 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4611 4612 /* If doing signal return, restore the previous little-endian mode. */ 4613 if (sig) 4614 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 4615 4616 /* Restore Altivec registers if necessary. */ 4617 if (env->insns_flags & PPC_ALTIVEC) { 4618 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4619 ppc_avr_t *avr = &env->avr[i]; 4620 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4621 4622 __get_user(avr->u64[0], &vreg->u64[0]); 4623 __get_user(avr->u64[1], &vreg->u64[1]); 4624 } 4625 /* Set MSR_VEC in the saved MSR value to indicate that 4626 frame->mc_vregs contains valid data. */ 4627 __get_user(env->spr[SPR_VRSAVE], 4628 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3])); 4629 } 4630 4631 /* Restore floating point registers. */ 4632 if (env->insns_flags & PPC_FLOAT) { 4633 uint64_t fpscr; 4634 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4635 __get_user(env->fpr[i], &frame->mc_fregs[i]); 4636 } 4637 __get_user(fpscr, &frame->mc_fregs[32]); 4638 env->fpscr = (uint32_t) fpscr; 4639 } 4640 4641 /* Save SPE registers. The kernel only saves the high half. */ 4642 if (env->insns_flags & PPC_SPE) { 4643 #if defined(TARGET_PPC64) 4644 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4645 uint32_t hi; 4646 4647 __get_user(hi, &frame->mc_vregs.spe[i]); 4648 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4649 } 4650 #else 4651 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4652 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4653 } 4654 #endif 4655 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4656 } 4657 } 4658 4659 static void setup_frame(int sig, struct target_sigaction *ka, 4660 target_sigset_t *set, CPUPPCState *env) 4661 { 4662 struct target_sigframe *frame; 4663 struct target_sigcontext *sc; 4664 target_ulong frame_addr, newsp; 4665 int err = 0; 4666 #if defined(TARGET_PPC64) 4667 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4668 #endif 4669 4670 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4671 trace_user_setup_frame(env, frame_addr); 4672 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4673 goto sigsegv; 4674 sc = &frame->sctx; 4675 4676 __put_user(ka->_sa_handler, &sc->handler); 4677 __put_user(set->sig[0], &sc->oldmask); 4678 #if TARGET_ABI_BITS == 64 4679 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4680 #else 4681 __put_user(set->sig[1], &sc->_unused[3]); 4682 #endif 4683 __put_user(h2g(&frame->mctx), &sc->regs); 4684 __put_user(sig, &sc->signal); 4685 4686 /* Save user regs. */ 4687 save_user_regs(env, &frame->mctx); 4688 4689 /* Construct the trampoline code on the stack. */ 4690 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 4691 4692 /* The kernel checks for the presence of a VDSO here. We don't 4693 emulate a vdso, so use a sigreturn system call. */ 4694 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4695 4696 /* Turn off all fp exceptions. */ 4697 env->fpscr = 0; 4698 4699 /* Create a stack frame for the caller of the handler. */ 4700 newsp = frame_addr - SIGNAL_FRAMESIZE; 4701 err |= put_user(env->gpr[1], newsp, target_ulong); 4702 4703 if (err) 4704 goto sigsegv; 4705 4706 /* Set up registers for signal handler. */ 4707 env->gpr[1] = newsp; 4708 env->gpr[3] = sig; 4709 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4710 4711 #if defined(TARGET_PPC64) 4712 if (get_ppc64_abi(image) < 2) { 4713 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4714 struct target_func_ptr *handler = 4715 (struct target_func_ptr *)g2h(ka->_sa_handler); 4716 env->nip = tswapl(handler->entry); 4717 env->gpr[2] = tswapl(handler->toc); 4718 } else { 4719 /* ELFv2 PPC64 function pointers are entry points, but R12 4720 * must also be set */ 4721 env->nip = tswapl((target_ulong) ka->_sa_handler); 4722 env->gpr[12] = env->nip; 4723 } 4724 #else 4725 env->nip = (target_ulong) ka->_sa_handler; 4726 #endif 4727 4728 /* Signal handlers are entered in big-endian mode. */ 4729 env->msr &= ~(1ull << MSR_LE); 4730 4731 unlock_user_struct(frame, frame_addr, 1); 4732 return; 4733 4734 sigsegv: 4735 unlock_user_struct(frame, frame_addr, 1); 4736 force_sig(TARGET_SIGSEGV); 4737 } 4738 4739 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4740 target_siginfo_t *info, 4741 target_sigset_t *set, CPUPPCState *env) 4742 { 4743 struct target_rt_sigframe *rt_sf; 4744 uint32_t *trampptr = 0; 4745 struct target_mcontext *mctx = 0; 4746 target_ulong rt_sf_addr, newsp = 0; 4747 int i, err = 0; 4748 #if defined(TARGET_PPC64) 4749 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 4750 #endif 4751 4752 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4753 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4754 goto sigsegv; 4755 4756 tswap_siginfo(&rt_sf->info, info); 4757 4758 __put_user(0, &rt_sf->uc.tuc_flags); 4759 __put_user(0, &rt_sf->uc.tuc_link); 4760 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4761 &rt_sf->uc.tuc_stack.ss_sp); 4762 __put_user(sas_ss_flags(env->gpr[1]), 4763 &rt_sf->uc.tuc_stack.ss_flags); 4764 __put_user(target_sigaltstack_used.ss_size, 4765 &rt_sf->uc.tuc_stack.ss_size); 4766 #if !defined(TARGET_PPC64) 4767 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4768 &rt_sf->uc.tuc_regs); 4769 #endif 4770 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4771 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4772 } 4773 4774 #if defined(TARGET_PPC64) 4775 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 4776 trampptr = &rt_sf->trampoline[0]; 4777 #else 4778 mctx = &rt_sf->uc.tuc_mcontext; 4779 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 4780 #endif 4781 4782 save_user_regs(env, mctx); 4783 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 4784 4785 /* The kernel checks for the presence of a VDSO here. We don't 4786 emulate a vdso, so use a sigreturn system call. */ 4787 env->lr = (target_ulong) h2g(trampptr); 4788 4789 /* Turn off all fp exceptions. */ 4790 env->fpscr = 0; 4791 4792 /* Create a stack frame for the caller of the handler. */ 4793 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4794 err |= put_user(env->gpr[1], newsp, target_ulong); 4795 4796 if (err) 4797 goto sigsegv; 4798 4799 /* Set up registers for signal handler. */ 4800 env->gpr[1] = newsp; 4801 env->gpr[3] = (target_ulong) sig; 4802 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4803 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4804 env->gpr[6] = (target_ulong) h2g(rt_sf); 4805 4806 #if defined(TARGET_PPC64) 4807 if (get_ppc64_abi(image) < 2) { 4808 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 4809 struct target_func_ptr *handler = 4810 (struct target_func_ptr *)g2h(ka->_sa_handler); 4811 env->nip = tswapl(handler->entry); 4812 env->gpr[2] = tswapl(handler->toc); 4813 } else { 4814 /* ELFv2 PPC64 function pointers are entry points, but R12 4815 * must also be set */ 4816 env->nip = tswapl((target_ulong) ka->_sa_handler); 4817 env->gpr[12] = env->nip; 4818 } 4819 #else 4820 env->nip = (target_ulong) ka->_sa_handler; 4821 #endif 4822 4823 /* Signal handlers are entered in big-endian mode. */ 4824 env->msr &= ~(1ull << MSR_LE); 4825 4826 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4827 return; 4828 4829 sigsegv: 4830 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4831 force_sig(TARGET_SIGSEGV); 4832 4833 } 4834 4835 long do_sigreturn(CPUPPCState *env) 4836 { 4837 struct target_sigcontext *sc = NULL; 4838 struct target_mcontext *sr = NULL; 4839 target_ulong sr_addr = 0, sc_addr; 4840 sigset_t blocked; 4841 target_sigset_t set; 4842 4843 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4844 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4845 goto sigsegv; 4846 4847 #if defined(TARGET_PPC64) 4848 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 4849 #else 4850 __get_user(set.sig[0], &sc->oldmask); 4851 __get_user(set.sig[1], &sc->_unused[3]); 4852 #endif 4853 target_to_host_sigset_internal(&blocked, &set); 4854 set_sigmask(&blocked); 4855 4856 __get_user(sr_addr, &sc->regs); 4857 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4858 goto sigsegv; 4859 restore_user_regs(env, sr, 1); 4860 4861 unlock_user_struct(sr, sr_addr, 1); 4862 unlock_user_struct(sc, sc_addr, 1); 4863 return -TARGET_QEMU_ESIGRETURN; 4864 4865 sigsegv: 4866 unlock_user_struct(sr, sr_addr, 1); 4867 unlock_user_struct(sc, sc_addr, 1); 4868 force_sig(TARGET_SIGSEGV); 4869 return 0; 4870 } 4871 4872 /* See arch/powerpc/kernel/signal_32.c. */ 4873 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4874 { 4875 struct target_mcontext *mcp; 4876 target_ulong mcp_addr; 4877 sigset_t blocked; 4878 target_sigset_t set; 4879 4880 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4881 sizeof (set))) 4882 return 1; 4883 4884 #if defined(TARGET_PPC64) 4885 mcp_addr = h2g(ucp) + 4886 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 4887 #else 4888 __get_user(mcp_addr, &ucp->tuc_regs); 4889 #endif 4890 4891 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 4892 return 1; 4893 4894 target_to_host_sigset_internal(&blocked, &set); 4895 set_sigmask(&blocked); 4896 restore_user_regs(env, mcp, sig); 4897 4898 unlock_user_struct(mcp, mcp_addr, 1); 4899 return 0; 4900 } 4901 4902 long do_rt_sigreturn(CPUPPCState *env) 4903 { 4904 struct target_rt_sigframe *rt_sf = NULL; 4905 target_ulong rt_sf_addr; 4906 4907 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 4908 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 4909 goto sigsegv; 4910 4911 if (do_setcontext(&rt_sf->uc, env, 1)) 4912 goto sigsegv; 4913 4914 do_sigaltstack(rt_sf_addr 4915 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 4916 0, env->gpr[1]); 4917 4918 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4919 return -TARGET_QEMU_ESIGRETURN; 4920 4921 sigsegv: 4922 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4923 force_sig(TARGET_SIGSEGV); 4924 return 0; 4925 } 4926 4927 #elif defined(TARGET_M68K) 4928 4929 struct target_sigcontext { 4930 abi_ulong sc_mask; 4931 abi_ulong sc_usp; 4932 abi_ulong sc_d0; 4933 abi_ulong sc_d1; 4934 abi_ulong sc_a0; 4935 abi_ulong sc_a1; 4936 unsigned short sc_sr; 4937 abi_ulong sc_pc; 4938 }; 4939 4940 struct target_sigframe 4941 { 4942 abi_ulong pretcode; 4943 int sig; 4944 int code; 4945 abi_ulong psc; 4946 char retcode[8]; 4947 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 4948 struct target_sigcontext sc; 4949 }; 4950 4951 typedef int target_greg_t; 4952 #define TARGET_NGREG 18 4953 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 4954 4955 typedef struct target_fpregset { 4956 int f_fpcntl[3]; 4957 int f_fpregs[8*3]; 4958 } target_fpregset_t; 4959 4960 struct target_mcontext { 4961 int version; 4962 target_gregset_t gregs; 4963 target_fpregset_t fpregs; 4964 }; 4965 4966 #define TARGET_MCONTEXT_VERSION 2 4967 4968 struct target_ucontext { 4969 abi_ulong tuc_flags; 4970 abi_ulong tuc_link; 4971 target_stack_t tuc_stack; 4972 struct target_mcontext tuc_mcontext; 4973 abi_long tuc_filler[80]; 4974 target_sigset_t tuc_sigmask; 4975 }; 4976 4977 struct target_rt_sigframe 4978 { 4979 abi_ulong pretcode; 4980 int sig; 4981 abi_ulong pinfo; 4982 abi_ulong puc; 4983 char retcode[8]; 4984 struct target_siginfo info; 4985 struct target_ucontext uc; 4986 }; 4987 4988 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 4989 abi_ulong mask) 4990 { 4991 __put_user(mask, &sc->sc_mask); 4992 __put_user(env->aregs[7], &sc->sc_usp); 4993 __put_user(env->dregs[0], &sc->sc_d0); 4994 __put_user(env->dregs[1], &sc->sc_d1); 4995 __put_user(env->aregs[0], &sc->sc_a0); 4996 __put_user(env->aregs[1], &sc->sc_a1); 4997 __put_user(env->sr, &sc->sc_sr); 4998 __put_user(env->pc, &sc->sc_pc); 4999 } 5000 5001 static void 5002 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5003 { 5004 int temp; 5005 5006 __get_user(env->aregs[7], &sc->sc_usp); 5007 __get_user(env->dregs[0], &sc->sc_d0); 5008 __get_user(env->dregs[1], &sc->sc_d1); 5009 __get_user(env->aregs[0], &sc->sc_a0); 5010 __get_user(env->aregs[1], &sc->sc_a1); 5011 __get_user(env->pc, &sc->sc_pc); 5012 __get_user(temp, &sc->sc_sr); 5013 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5014 } 5015 5016 /* 5017 * Determine which stack to use.. 5018 */ 5019 static inline abi_ulong 5020 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5021 size_t frame_size) 5022 { 5023 unsigned long sp; 5024 5025 sp = regs->aregs[7]; 5026 5027 /* This is the X/Open sanctioned signal stack switching. */ 5028 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5029 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5030 } 5031 5032 return ((sp - frame_size) & -8UL); 5033 } 5034 5035 static void setup_frame(int sig, struct target_sigaction *ka, 5036 target_sigset_t *set, CPUM68KState *env) 5037 { 5038 struct target_sigframe *frame; 5039 abi_ulong frame_addr; 5040 abi_ulong retcode_addr; 5041 abi_ulong sc_addr; 5042 int i; 5043 5044 frame_addr = get_sigframe(ka, env, sizeof *frame); 5045 trace_user_setup_frame(env, frame_addr); 5046 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5047 goto give_sigsegv; 5048 } 5049 5050 __put_user(sig, &frame->sig); 5051 5052 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5053 __put_user(sc_addr, &frame->psc); 5054 5055 setup_sigcontext(&frame->sc, env, set->sig[0]); 5056 5057 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5058 __put_user(set->sig[i], &frame->extramask[i - 1]); 5059 } 5060 5061 /* Set up to return from userspace. */ 5062 5063 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5064 __put_user(retcode_addr, &frame->pretcode); 5065 5066 /* moveq #,d0; trap #0 */ 5067 5068 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5069 (uint32_t *)(frame->retcode)); 5070 5071 /* Set up to return from userspace */ 5072 5073 env->aregs[7] = frame_addr; 5074 env->pc = ka->_sa_handler; 5075 5076 unlock_user_struct(frame, frame_addr, 1); 5077 return; 5078 5079 give_sigsegv: 5080 force_sig(TARGET_SIGSEGV); 5081 } 5082 5083 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5084 CPUM68KState *env) 5085 { 5086 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5087 5088 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5089 __put_user(env->dregs[0], &gregs[0]); 5090 __put_user(env->dregs[1], &gregs[1]); 5091 __put_user(env->dregs[2], &gregs[2]); 5092 __put_user(env->dregs[3], &gregs[3]); 5093 __put_user(env->dregs[4], &gregs[4]); 5094 __put_user(env->dregs[5], &gregs[5]); 5095 __put_user(env->dregs[6], &gregs[6]); 5096 __put_user(env->dregs[7], &gregs[7]); 5097 __put_user(env->aregs[0], &gregs[8]); 5098 __put_user(env->aregs[1], &gregs[9]); 5099 __put_user(env->aregs[2], &gregs[10]); 5100 __put_user(env->aregs[3], &gregs[11]); 5101 __put_user(env->aregs[4], &gregs[12]); 5102 __put_user(env->aregs[5], &gregs[13]); 5103 __put_user(env->aregs[6], &gregs[14]); 5104 __put_user(env->aregs[7], &gregs[15]); 5105 __put_user(env->pc, &gregs[16]); 5106 __put_user(env->sr, &gregs[17]); 5107 5108 return 0; 5109 } 5110 5111 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5112 struct target_ucontext *uc) 5113 { 5114 int temp; 5115 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5116 5117 __get_user(temp, &uc->tuc_mcontext.version); 5118 if (temp != TARGET_MCONTEXT_VERSION) 5119 goto badframe; 5120 5121 /* restore passed registers */ 5122 __get_user(env->dregs[0], &gregs[0]); 5123 __get_user(env->dregs[1], &gregs[1]); 5124 __get_user(env->dregs[2], &gregs[2]); 5125 __get_user(env->dregs[3], &gregs[3]); 5126 __get_user(env->dregs[4], &gregs[4]); 5127 __get_user(env->dregs[5], &gregs[5]); 5128 __get_user(env->dregs[6], &gregs[6]); 5129 __get_user(env->dregs[7], &gregs[7]); 5130 __get_user(env->aregs[0], &gregs[8]); 5131 __get_user(env->aregs[1], &gregs[9]); 5132 __get_user(env->aregs[2], &gregs[10]); 5133 __get_user(env->aregs[3], &gregs[11]); 5134 __get_user(env->aregs[4], &gregs[12]); 5135 __get_user(env->aregs[5], &gregs[13]); 5136 __get_user(env->aregs[6], &gregs[14]); 5137 __get_user(env->aregs[7], &gregs[15]); 5138 __get_user(env->pc, &gregs[16]); 5139 __get_user(temp, &gregs[17]); 5140 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5141 5142 return 0; 5143 5144 badframe: 5145 return 1; 5146 } 5147 5148 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5149 target_siginfo_t *info, 5150 target_sigset_t *set, CPUM68KState *env) 5151 { 5152 struct target_rt_sigframe *frame; 5153 abi_ulong frame_addr; 5154 abi_ulong retcode_addr; 5155 abi_ulong info_addr; 5156 abi_ulong uc_addr; 5157 int err = 0; 5158 int i; 5159 5160 frame_addr = get_sigframe(ka, env, sizeof *frame); 5161 trace_user_setup_rt_frame(env, frame_addr); 5162 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5163 goto give_sigsegv; 5164 } 5165 5166 __put_user(sig, &frame->sig); 5167 5168 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5169 __put_user(info_addr, &frame->pinfo); 5170 5171 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5172 __put_user(uc_addr, &frame->puc); 5173 5174 tswap_siginfo(&frame->info, info); 5175 5176 /* Create the ucontext */ 5177 5178 __put_user(0, &frame->uc.tuc_flags); 5179 __put_user(0, &frame->uc.tuc_link); 5180 __put_user(target_sigaltstack_used.ss_sp, 5181 &frame->uc.tuc_stack.ss_sp); 5182 __put_user(sas_ss_flags(env->aregs[7]), 5183 &frame->uc.tuc_stack.ss_flags); 5184 __put_user(target_sigaltstack_used.ss_size, 5185 &frame->uc.tuc_stack.ss_size); 5186 err |= target_rt_setup_ucontext(&frame->uc, env); 5187 5188 if (err) 5189 goto give_sigsegv; 5190 5191 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5192 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5193 } 5194 5195 /* Set up to return from userspace. */ 5196 5197 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5198 __put_user(retcode_addr, &frame->pretcode); 5199 5200 /* moveq #,d0; notb d0; trap #0 */ 5201 5202 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5203 (uint32_t *)(frame->retcode + 0)); 5204 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5205 5206 if (err) 5207 goto give_sigsegv; 5208 5209 /* Set up to return from userspace */ 5210 5211 env->aregs[7] = frame_addr; 5212 env->pc = ka->_sa_handler; 5213 5214 unlock_user_struct(frame, frame_addr, 1); 5215 return; 5216 5217 give_sigsegv: 5218 unlock_user_struct(frame, frame_addr, 1); 5219 force_sig(TARGET_SIGSEGV); 5220 } 5221 5222 long do_sigreturn(CPUM68KState *env) 5223 { 5224 struct target_sigframe *frame; 5225 abi_ulong frame_addr = env->aregs[7] - 4; 5226 target_sigset_t target_set; 5227 sigset_t set; 5228 int i; 5229 5230 trace_user_do_sigreturn(env, frame_addr); 5231 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5232 goto badframe; 5233 5234 /* set blocked signals */ 5235 5236 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5237 5238 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5239 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5240 } 5241 5242 target_to_host_sigset_internal(&set, &target_set); 5243 set_sigmask(&set); 5244 5245 /* restore registers */ 5246 5247 restore_sigcontext(env, &frame->sc); 5248 5249 unlock_user_struct(frame, frame_addr, 0); 5250 return -TARGET_QEMU_ESIGRETURN; 5251 5252 badframe: 5253 force_sig(TARGET_SIGSEGV); 5254 return 0; 5255 } 5256 5257 long do_rt_sigreturn(CPUM68KState *env) 5258 { 5259 struct target_rt_sigframe *frame; 5260 abi_ulong frame_addr = env->aregs[7] - 4; 5261 target_sigset_t target_set; 5262 sigset_t set; 5263 5264 trace_user_do_rt_sigreturn(env, frame_addr); 5265 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5266 goto badframe; 5267 5268 target_to_host_sigset_internal(&set, &target_set); 5269 set_sigmask(&set); 5270 5271 /* restore registers */ 5272 5273 if (target_rt_restore_ucontext(env, &frame->uc)) 5274 goto badframe; 5275 5276 if (do_sigaltstack(frame_addr + 5277 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5278 0, get_sp_from_cpustate(env)) == -EFAULT) 5279 goto badframe; 5280 5281 unlock_user_struct(frame, frame_addr, 0); 5282 return -TARGET_QEMU_ESIGRETURN; 5283 5284 badframe: 5285 unlock_user_struct(frame, frame_addr, 0); 5286 force_sig(TARGET_SIGSEGV); 5287 return 0; 5288 } 5289 5290 #elif defined(TARGET_ALPHA) 5291 5292 struct target_sigcontext { 5293 abi_long sc_onstack; 5294 abi_long sc_mask; 5295 abi_long sc_pc; 5296 abi_long sc_ps; 5297 abi_long sc_regs[32]; 5298 abi_long sc_ownedfp; 5299 abi_long sc_fpregs[32]; 5300 abi_ulong sc_fpcr; 5301 abi_ulong sc_fp_control; 5302 abi_ulong sc_reserved1; 5303 abi_ulong sc_reserved2; 5304 abi_ulong sc_ssize; 5305 abi_ulong sc_sbase; 5306 abi_ulong sc_traparg_a0; 5307 abi_ulong sc_traparg_a1; 5308 abi_ulong sc_traparg_a2; 5309 abi_ulong sc_fp_trap_pc; 5310 abi_ulong sc_fp_trigger_sum; 5311 abi_ulong sc_fp_trigger_inst; 5312 }; 5313 5314 struct target_ucontext { 5315 abi_ulong tuc_flags; 5316 abi_ulong tuc_link; 5317 abi_ulong tuc_osf_sigmask; 5318 target_stack_t tuc_stack; 5319 struct target_sigcontext tuc_mcontext; 5320 target_sigset_t tuc_sigmask; 5321 }; 5322 5323 struct target_sigframe { 5324 struct target_sigcontext sc; 5325 unsigned int retcode[3]; 5326 }; 5327 5328 struct target_rt_sigframe { 5329 target_siginfo_t info; 5330 struct target_ucontext uc; 5331 unsigned int retcode[3]; 5332 }; 5333 5334 #define INSN_MOV_R30_R16 0x47fe0410 5335 #define INSN_LDI_R0 0x201f0000 5336 #define INSN_CALLSYS 0x00000083 5337 5338 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5339 abi_ulong frame_addr, target_sigset_t *set) 5340 { 5341 int i; 5342 5343 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5344 __put_user(set->sig[0], &sc->sc_mask); 5345 __put_user(env->pc, &sc->sc_pc); 5346 __put_user(8, &sc->sc_ps); 5347 5348 for (i = 0; i < 31; ++i) { 5349 __put_user(env->ir[i], &sc->sc_regs[i]); 5350 } 5351 __put_user(0, &sc->sc_regs[31]); 5352 5353 for (i = 0; i < 31; ++i) { 5354 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5355 } 5356 __put_user(0, &sc->sc_fpregs[31]); 5357 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5358 5359 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5360 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5361 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5362 } 5363 5364 static void restore_sigcontext(CPUAlphaState *env, 5365 struct target_sigcontext *sc) 5366 { 5367 uint64_t fpcr; 5368 int i; 5369 5370 __get_user(env->pc, &sc->sc_pc); 5371 5372 for (i = 0; i < 31; ++i) { 5373 __get_user(env->ir[i], &sc->sc_regs[i]); 5374 } 5375 for (i = 0; i < 31; ++i) { 5376 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5377 } 5378 5379 __get_user(fpcr, &sc->sc_fpcr); 5380 cpu_alpha_store_fpcr(env, fpcr); 5381 } 5382 5383 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5384 CPUAlphaState *env, 5385 unsigned long framesize) 5386 { 5387 abi_ulong sp = env->ir[IR_SP]; 5388 5389 /* This is the X/Open sanctioned signal stack switching. */ 5390 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5391 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5392 } 5393 return (sp - framesize) & -32; 5394 } 5395 5396 static void setup_frame(int sig, struct target_sigaction *ka, 5397 target_sigset_t *set, CPUAlphaState *env) 5398 { 5399 abi_ulong frame_addr, r26; 5400 struct target_sigframe *frame; 5401 int err = 0; 5402 5403 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5404 trace_user_setup_frame(env, frame_addr); 5405 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5406 goto give_sigsegv; 5407 } 5408 5409 setup_sigcontext(&frame->sc, env, frame_addr, set); 5410 5411 if (ka->sa_restorer) { 5412 r26 = ka->sa_restorer; 5413 } else { 5414 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5415 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5416 &frame->retcode[1]); 5417 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5418 /* imb() */ 5419 r26 = frame_addr; 5420 } 5421 5422 unlock_user_struct(frame, frame_addr, 1); 5423 5424 if (err) { 5425 give_sigsegv: 5426 if (sig == TARGET_SIGSEGV) { 5427 ka->_sa_handler = TARGET_SIG_DFL; 5428 } 5429 force_sig(TARGET_SIGSEGV); 5430 } 5431 5432 env->ir[IR_RA] = r26; 5433 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5434 env->ir[IR_A0] = sig; 5435 env->ir[IR_A1] = 0; 5436 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5437 env->ir[IR_SP] = frame_addr; 5438 } 5439 5440 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5441 target_siginfo_t *info, 5442 target_sigset_t *set, CPUAlphaState *env) 5443 { 5444 abi_ulong frame_addr, r26; 5445 struct target_rt_sigframe *frame; 5446 int i, err = 0; 5447 5448 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5449 trace_user_setup_rt_frame(env, frame_addr); 5450 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5451 goto give_sigsegv; 5452 } 5453 5454 tswap_siginfo(&frame->info, info); 5455 5456 __put_user(0, &frame->uc.tuc_flags); 5457 __put_user(0, &frame->uc.tuc_link); 5458 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5459 __put_user(target_sigaltstack_used.ss_sp, 5460 &frame->uc.tuc_stack.ss_sp); 5461 __put_user(sas_ss_flags(env->ir[IR_SP]), 5462 &frame->uc.tuc_stack.ss_flags); 5463 __put_user(target_sigaltstack_used.ss_size, 5464 &frame->uc.tuc_stack.ss_size); 5465 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5466 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5467 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5468 } 5469 5470 if (ka->sa_restorer) { 5471 r26 = ka->sa_restorer; 5472 } else { 5473 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5474 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5475 &frame->retcode[1]); 5476 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5477 /* imb(); */ 5478 r26 = frame_addr; 5479 } 5480 5481 if (err) { 5482 give_sigsegv: 5483 if (sig == TARGET_SIGSEGV) { 5484 ka->_sa_handler = TARGET_SIG_DFL; 5485 } 5486 force_sig(TARGET_SIGSEGV); 5487 } 5488 5489 env->ir[IR_RA] = r26; 5490 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5491 env->ir[IR_A0] = sig; 5492 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5493 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5494 env->ir[IR_SP] = frame_addr; 5495 } 5496 5497 long do_sigreturn(CPUAlphaState *env) 5498 { 5499 struct target_sigcontext *sc; 5500 abi_ulong sc_addr = env->ir[IR_A0]; 5501 target_sigset_t target_set; 5502 sigset_t set; 5503 5504 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5505 goto badframe; 5506 } 5507 5508 target_sigemptyset(&target_set); 5509 __get_user(target_set.sig[0], &sc->sc_mask); 5510 5511 target_to_host_sigset_internal(&set, &target_set); 5512 set_sigmask(&set); 5513 5514 restore_sigcontext(env, sc); 5515 unlock_user_struct(sc, sc_addr, 0); 5516 return -TARGET_QEMU_ESIGRETURN; 5517 5518 badframe: 5519 force_sig(TARGET_SIGSEGV); 5520 } 5521 5522 long do_rt_sigreturn(CPUAlphaState *env) 5523 { 5524 abi_ulong frame_addr = env->ir[IR_A0]; 5525 struct target_rt_sigframe *frame; 5526 sigset_t set; 5527 5528 trace_user_do_rt_sigreturn(env, frame_addr); 5529 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5530 goto badframe; 5531 } 5532 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5533 set_sigmask(&set); 5534 5535 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5536 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5537 uc.tuc_stack), 5538 0, env->ir[IR_SP]) == -EFAULT) { 5539 goto badframe; 5540 } 5541 5542 unlock_user_struct(frame, frame_addr, 0); 5543 return -TARGET_QEMU_ESIGRETURN; 5544 5545 5546 badframe: 5547 unlock_user_struct(frame, frame_addr, 0); 5548 force_sig(TARGET_SIGSEGV); 5549 } 5550 5551 #elif defined(TARGET_TILEGX) 5552 5553 struct target_sigcontext { 5554 union { 5555 /* General-purpose registers. */ 5556 abi_ulong gregs[56]; 5557 struct { 5558 abi_ulong __gregs[53]; 5559 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5560 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5561 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5562 }; 5563 }; 5564 abi_ulong pc; /* Program counter. */ 5565 abi_ulong ics; /* In Interrupt Critical Section? */ 5566 abi_ulong faultnum; /* Fault number. */ 5567 abi_ulong pad[5]; 5568 }; 5569 5570 struct target_ucontext { 5571 abi_ulong tuc_flags; 5572 abi_ulong tuc_link; 5573 target_stack_t tuc_stack; 5574 struct target_sigcontext tuc_mcontext; 5575 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5576 }; 5577 5578 struct target_rt_sigframe { 5579 unsigned char save_area[16]; /* caller save area */ 5580 struct target_siginfo info; 5581 struct target_ucontext uc; 5582 abi_ulong retcode[2]; 5583 }; 5584 5585 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5586 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5587 5588 5589 static void setup_sigcontext(struct target_sigcontext *sc, 5590 CPUArchState *env, int signo) 5591 { 5592 int i; 5593 5594 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5595 __put_user(env->regs[i], &sc->gregs[i]); 5596 } 5597 5598 __put_user(env->pc, &sc->pc); 5599 __put_user(0, &sc->ics); 5600 __put_user(signo, &sc->faultnum); 5601 } 5602 5603 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 5604 { 5605 int i; 5606 5607 for (i = 0; i < TILEGX_R_COUNT; ++i) { 5608 __get_user(env->regs[i], &sc->gregs[i]); 5609 } 5610 5611 __get_user(env->pc, &sc->pc); 5612 } 5613 5614 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 5615 size_t frame_size) 5616 { 5617 unsigned long sp = env->regs[TILEGX_R_SP]; 5618 5619 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 5620 return -1UL; 5621 } 5622 5623 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 5624 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5625 } 5626 5627 sp -= frame_size; 5628 sp &= -16UL; 5629 return sp; 5630 } 5631 5632 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5633 target_siginfo_t *info, 5634 target_sigset_t *set, CPUArchState *env) 5635 { 5636 abi_ulong frame_addr; 5637 struct target_rt_sigframe *frame; 5638 unsigned long restorer; 5639 5640 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5641 trace_user_setup_rt_frame(env, frame_addr); 5642 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5643 goto give_sigsegv; 5644 } 5645 5646 /* Always write at least the signal number for the stack backtracer. */ 5647 if (ka->sa_flags & TARGET_SA_SIGINFO) { 5648 /* At sigreturn time, restore the callee-save registers too. */ 5649 tswap_siginfo(&frame->info, info); 5650 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 5651 } else { 5652 __put_user(info->si_signo, &frame->info.si_signo); 5653 } 5654 5655 /* Create the ucontext. */ 5656 __put_user(0, &frame->uc.tuc_flags); 5657 __put_user(0, &frame->uc.tuc_link); 5658 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5659 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 5660 &frame->uc.tuc_stack.ss_flags); 5661 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5662 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 5663 5664 if (ka->sa_flags & TARGET_SA_RESTORER) { 5665 restorer = (unsigned long) ka->sa_restorer; 5666 } else { 5667 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 5668 __put_user(INSN_SWINT1, &frame->retcode[1]); 5669 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 5670 } 5671 env->pc = (unsigned long) ka->_sa_handler; 5672 env->regs[TILEGX_R_SP] = (unsigned long) frame; 5673 env->regs[TILEGX_R_LR] = restorer; 5674 env->regs[0] = (unsigned long) sig; 5675 env->regs[1] = (unsigned long) &frame->info; 5676 env->regs[2] = (unsigned long) &frame->uc; 5677 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 5678 5679 unlock_user_struct(frame, frame_addr, 1); 5680 return; 5681 5682 give_sigsegv: 5683 if (sig == TARGET_SIGSEGV) { 5684 ka->_sa_handler = TARGET_SIG_DFL; 5685 } 5686 force_sig(TARGET_SIGSEGV /* , current */); 5687 } 5688 5689 long do_rt_sigreturn(CPUTLGState *env) 5690 { 5691 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 5692 struct target_rt_sigframe *frame; 5693 sigset_t set; 5694 5695 trace_user_do_rt_sigreturn(env, frame_addr); 5696 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5697 goto badframe; 5698 } 5699 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5700 set_sigmask(&set); 5701 5702 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5703 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5704 uc.tuc_stack), 5705 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 5706 goto badframe; 5707 } 5708 5709 unlock_user_struct(frame, frame_addr, 0); 5710 return -TARGET_QEMU_ESIGRETURN; 5711 5712 5713 badframe: 5714 unlock_user_struct(frame, frame_addr, 0); 5715 force_sig(TARGET_SIGSEGV); 5716 } 5717 5718 #else 5719 5720 static void setup_frame(int sig, struct target_sigaction *ka, 5721 target_sigset_t *set, CPUArchState *env) 5722 { 5723 fprintf(stderr, "setup_frame: not implemented\n"); 5724 } 5725 5726 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5727 target_siginfo_t *info, 5728 target_sigset_t *set, CPUArchState *env) 5729 { 5730 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5731 } 5732 5733 long do_sigreturn(CPUArchState *env) 5734 { 5735 fprintf(stderr, "do_sigreturn: not implemented\n"); 5736 return -TARGET_ENOSYS; 5737 } 5738 5739 long do_rt_sigreturn(CPUArchState *env) 5740 { 5741 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5742 return -TARGET_ENOSYS; 5743 } 5744 5745 #endif 5746 5747 static void handle_pending_signal(CPUArchState *cpu_env, int sig) 5748 { 5749 CPUState *cpu = ENV_GET_CPU(cpu_env); 5750 abi_ulong handler; 5751 sigset_t set; 5752 target_sigset_t target_old_set; 5753 struct target_sigaction *sa; 5754 TaskState *ts = cpu->opaque; 5755 struct emulated_sigtable *k = &ts->sigtab[sig - 1]; 5756 5757 trace_user_handle_signal(cpu_env, sig); 5758 /* dequeue signal */ 5759 k->pending = 0; 5760 5761 sig = gdb_handlesig(cpu, sig); 5762 if (!sig) { 5763 sa = NULL; 5764 handler = TARGET_SIG_IGN; 5765 } else { 5766 sa = &sigact_table[sig - 1]; 5767 handler = sa->_sa_handler; 5768 } 5769 5770 if (handler == TARGET_SIG_DFL) { 5771 /* default handler : ignore some signal. The other are job control or fatal */ 5772 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5773 kill(getpid(),SIGSTOP); 5774 } else if (sig != TARGET_SIGCHLD && 5775 sig != TARGET_SIGURG && 5776 sig != TARGET_SIGWINCH && 5777 sig != TARGET_SIGCONT) { 5778 force_sig(sig); 5779 } 5780 } else if (handler == TARGET_SIG_IGN) { 5781 /* ignore sig */ 5782 } else if (handler == TARGET_SIG_ERR) { 5783 force_sig(sig); 5784 } else { 5785 /* compute the blocked signals during the handler execution */ 5786 sigset_t *blocked_set; 5787 5788 target_to_host_sigset(&set, &sa->sa_mask); 5789 /* SA_NODEFER indicates that the current signal should not be 5790 blocked during the handler */ 5791 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5792 sigaddset(&set, target_to_host_signal(sig)); 5793 5794 /* save the previous blocked signal state to restore it at the 5795 end of the signal execution (see do_sigreturn) */ 5796 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 5797 5798 /* block signals in the handler */ 5799 blocked_set = ts->in_sigsuspend ? 5800 &ts->sigsuspend_mask : &ts->signal_mask; 5801 sigorset(&ts->signal_mask, blocked_set, &set); 5802 ts->in_sigsuspend = 0; 5803 5804 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5805 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5806 { 5807 CPUX86State *env = cpu_env; 5808 if (env->eflags & VM_MASK) 5809 save_v86_state(env); 5810 } 5811 #endif 5812 /* prepare the stack frame of the virtual CPU */ 5813 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 5814 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) 5815 /* These targets do not have traditional signals. */ 5816 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5817 #else 5818 if (sa->sa_flags & TARGET_SA_SIGINFO) 5819 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 5820 else 5821 setup_frame(sig, sa, &target_old_set, cpu_env); 5822 #endif 5823 if (sa->sa_flags & TARGET_SA_RESETHAND) { 5824 sa->_sa_handler = TARGET_SIG_DFL; 5825 } 5826 } 5827 } 5828 5829 void process_pending_signals(CPUArchState *cpu_env) 5830 { 5831 CPUState *cpu = ENV_GET_CPU(cpu_env); 5832 int sig; 5833 TaskState *ts = cpu->opaque; 5834 sigset_t set; 5835 sigset_t *blocked_set; 5836 5837 while (atomic_read(&ts->signal_pending)) { 5838 /* FIXME: This is not threadsafe. */ 5839 sigfillset(&set); 5840 sigprocmask(SIG_SETMASK, &set, 0); 5841 5842 sig = ts->sync_signal.pending; 5843 if (sig) { 5844 /* Synchronous signals are forced, 5845 * see force_sig_info() and callers in Linux 5846 * Note that not all of our queue_signal() calls in QEMU correspond 5847 * to force_sig_info() calls in Linux (some are send_sig_info()). 5848 * However it seems like a kernel bug to me to allow the process 5849 * to block a synchronous signal since it could then just end up 5850 * looping round and round indefinitely. 5851 */ 5852 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 5853 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 5854 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 5855 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 5856 } 5857 5858 handle_pending_signal(cpu_env, sig); 5859 } 5860 5861 for (sig = 1; sig <= TARGET_NSIG; sig++) { 5862 blocked_set = ts->in_sigsuspend ? 5863 &ts->sigsuspend_mask : &ts->signal_mask; 5864 5865 if (ts->sigtab[sig - 1].pending && 5866 (!sigismember(blocked_set, 5867 target_to_host_signal_table[sig]))) { 5868 handle_pending_signal(cpu_env, sig); 5869 /* Restart scan from the beginning */ 5870 sig = 1; 5871 } 5872 } 5873 5874 /* if no signal is pending, unblock signals and recheck (the act 5875 * of unblocking might cause us to take another host signal which 5876 * will set signal_pending again). 5877 */ 5878 atomic_set(&ts->signal_pending, 0); 5879 ts->in_sigsuspend = 0; 5880 set = ts->signal_mask; 5881 sigdelset(&set, SIGSEGV); 5882 sigdelset(&set, SIGBUS); 5883 sigprocmask(SIG_SETMASK, &set, 0); 5884 } 5885 ts->in_sigsuspend = 0; 5886 } 5887