1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <stdlib.h> 20 #include <stdio.h> 21 #include <string.h> 22 #include <stdarg.h> 23 #include <unistd.h> 24 #include <errno.h> 25 #include <assert.h> 26 #include <sys/ucontext.h> 27 #include <sys/resource.h> 28 29 #include "qemu.h" 30 #include "qemu-common.h" 31 #include "target_signal.h" 32 33 //#define DEBUG_SIGNAL 34 35 static struct target_sigaltstack target_sigaltstack_used = { 36 .ss_sp = 0, 37 .ss_size = 0, 38 .ss_flags = TARGET_SS_DISABLE, 39 }; 40 41 static struct target_sigaction sigact_table[TARGET_NSIG]; 42 43 static void host_signal_handler(int host_signum, siginfo_t *info, 44 void *puc); 45 46 static uint8_t host_to_target_signal_table[_NSIG] = { 47 [SIGHUP] = TARGET_SIGHUP, 48 [SIGINT] = TARGET_SIGINT, 49 [SIGQUIT] = TARGET_SIGQUIT, 50 [SIGILL] = TARGET_SIGILL, 51 [SIGTRAP] = TARGET_SIGTRAP, 52 [SIGABRT] = TARGET_SIGABRT, 53 /* [SIGIOT] = TARGET_SIGIOT,*/ 54 [SIGBUS] = TARGET_SIGBUS, 55 [SIGFPE] = TARGET_SIGFPE, 56 [SIGKILL] = TARGET_SIGKILL, 57 [SIGUSR1] = TARGET_SIGUSR1, 58 [SIGSEGV] = TARGET_SIGSEGV, 59 [SIGUSR2] = TARGET_SIGUSR2, 60 [SIGPIPE] = TARGET_SIGPIPE, 61 [SIGALRM] = TARGET_SIGALRM, 62 [SIGTERM] = TARGET_SIGTERM, 63 #ifdef SIGSTKFLT 64 [SIGSTKFLT] = TARGET_SIGSTKFLT, 65 #endif 66 [SIGCHLD] = TARGET_SIGCHLD, 67 [SIGCONT] = TARGET_SIGCONT, 68 [SIGSTOP] = TARGET_SIGSTOP, 69 [SIGTSTP] = TARGET_SIGTSTP, 70 [SIGTTIN] = TARGET_SIGTTIN, 71 [SIGTTOU] = TARGET_SIGTTOU, 72 [SIGURG] = TARGET_SIGURG, 73 [SIGXCPU] = TARGET_SIGXCPU, 74 [SIGXFSZ] = TARGET_SIGXFSZ, 75 [SIGVTALRM] = TARGET_SIGVTALRM, 76 [SIGPROF] = TARGET_SIGPROF, 77 [SIGWINCH] = TARGET_SIGWINCH, 78 [SIGIO] = TARGET_SIGIO, 79 [SIGPWR] = TARGET_SIGPWR, 80 [SIGSYS] = TARGET_SIGSYS, 81 /* next signals stay the same */ 82 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 83 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 84 To fix this properly we need to do manual signal delivery multiplexed 85 over a single host signal. */ 86 [__SIGRTMIN] = __SIGRTMAX, 87 [__SIGRTMAX] = __SIGRTMIN, 88 }; 89 static uint8_t target_to_host_signal_table[_NSIG]; 90 91 static inline int on_sig_stack(unsigned long sp) 92 { 93 return (sp - target_sigaltstack_used.ss_sp 94 < target_sigaltstack_used.ss_size); 95 } 96 97 static inline int sas_ss_flags(unsigned long sp) 98 { 99 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 100 : on_sig_stack(sp) ? SS_ONSTACK : 0); 101 } 102 103 int host_to_target_signal(int sig) 104 { 105 if (sig < 0 || sig >= _NSIG) 106 return sig; 107 return host_to_target_signal_table[sig]; 108 } 109 110 int target_to_host_signal(int sig) 111 { 112 if (sig < 0 || sig >= _NSIG) 113 return sig; 114 return target_to_host_signal_table[sig]; 115 } 116 117 static inline void target_sigemptyset(target_sigset_t *set) 118 { 119 memset(set, 0, sizeof(*set)); 120 } 121 122 static inline void target_sigaddset(target_sigset_t *set, int signum) 123 { 124 signum--; 125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 126 set->sig[signum / TARGET_NSIG_BPW] |= mask; 127 } 128 129 static inline int target_sigismember(const target_sigset_t *set, int signum) 130 { 131 signum--; 132 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 133 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 134 } 135 136 static void host_to_target_sigset_internal(target_sigset_t *d, 137 const sigset_t *s) 138 { 139 int i; 140 target_sigemptyset(d); 141 for (i = 1; i <= TARGET_NSIG; i++) { 142 if (sigismember(s, i)) { 143 target_sigaddset(d, host_to_target_signal(i)); 144 } 145 } 146 } 147 148 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 149 { 150 target_sigset_t d1; 151 int i; 152 153 host_to_target_sigset_internal(&d1, s); 154 for(i = 0;i < TARGET_NSIG_WORDS; i++) 155 d->sig[i] = tswapal(d1.sig[i]); 156 } 157 158 static void target_to_host_sigset_internal(sigset_t *d, 159 const target_sigset_t *s) 160 { 161 int i; 162 sigemptyset(d); 163 for (i = 1; i <= TARGET_NSIG; i++) { 164 if (target_sigismember(s, i)) { 165 sigaddset(d, target_to_host_signal(i)); 166 } 167 } 168 } 169 170 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 171 { 172 target_sigset_t s1; 173 int i; 174 175 for(i = 0;i < TARGET_NSIG_WORDS; i++) 176 s1.sig[i] = tswapal(s->sig[i]); 177 target_to_host_sigset_internal(d, &s1); 178 } 179 180 void host_to_target_old_sigset(abi_ulong *old_sigset, 181 const sigset_t *sigset) 182 { 183 target_sigset_t d; 184 host_to_target_sigset(&d, sigset); 185 *old_sigset = d.sig[0]; 186 } 187 188 void target_to_host_old_sigset(sigset_t *sigset, 189 const abi_ulong *old_sigset) 190 { 191 target_sigset_t d; 192 int i; 193 194 d.sig[0] = *old_sigset; 195 for(i = 1;i < TARGET_NSIG_WORDS; i++) 196 d.sig[i] = 0; 197 target_to_host_sigset(sigset, &d); 198 } 199 200 /* Wrapper for sigprocmask function 201 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 202 * are host signal set, not guest ones. This wraps the sigprocmask host calls 203 * that should be protected (calls originated from guest) 204 */ 205 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 206 { 207 int ret; 208 sigset_t val; 209 sigset_t *temp = NULL; 210 CPUState *cpu = thread_cpu; 211 TaskState *ts = (TaskState *)cpu->opaque; 212 bool segv_was_blocked = ts->sigsegv_blocked; 213 214 if (set) { 215 bool has_sigsegv = sigismember(set, SIGSEGV); 216 val = *set; 217 temp = &val; 218 219 sigdelset(temp, SIGSEGV); 220 221 switch (how) { 222 case SIG_BLOCK: 223 if (has_sigsegv) { 224 ts->sigsegv_blocked = true; 225 } 226 break; 227 case SIG_UNBLOCK: 228 if (has_sigsegv) { 229 ts->sigsegv_blocked = false; 230 } 231 break; 232 case SIG_SETMASK: 233 ts->sigsegv_blocked = has_sigsegv; 234 break; 235 default: 236 g_assert_not_reached(); 237 } 238 } 239 240 ret = sigprocmask(how, temp, oldset); 241 242 if (oldset && segv_was_blocked) { 243 sigaddset(oldset, SIGSEGV); 244 } 245 246 return ret; 247 } 248 249 /* siginfo conversion */ 250 251 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 252 const siginfo_t *info) 253 { 254 int sig = host_to_target_signal(info->si_signo); 255 tinfo->si_signo = sig; 256 tinfo->si_errno = 0; 257 tinfo->si_code = info->si_code; 258 259 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV 260 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { 261 /* Should never come here, but who knows. The information for 262 the target is irrelevant. */ 263 tinfo->_sifields._sigfault._addr = 0; 264 } else if (sig == TARGET_SIGIO) { 265 tinfo->_sifields._sigpoll._band = info->si_band; 266 tinfo->_sifields._sigpoll._fd = info->si_fd; 267 } else if (sig == TARGET_SIGCHLD) { 268 tinfo->_sifields._sigchld._pid = info->si_pid; 269 tinfo->_sifields._sigchld._uid = info->si_uid; 270 tinfo->_sifields._sigchld._status 271 = host_to_target_waitstatus(info->si_status); 272 tinfo->_sifields._sigchld._utime = info->si_utime; 273 tinfo->_sifields._sigchld._stime = info->si_stime; 274 } else if (sig >= TARGET_SIGRTMIN) { 275 tinfo->_sifields._rt._pid = info->si_pid; 276 tinfo->_sifields._rt._uid = info->si_uid; 277 /* XXX: potential problem if 64 bit */ 278 tinfo->_sifields._rt._sigval.sival_ptr 279 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 280 } 281 } 282 283 static void tswap_siginfo(target_siginfo_t *tinfo, 284 const target_siginfo_t *info) 285 { 286 int sig = info->si_signo; 287 tinfo->si_signo = tswap32(sig); 288 tinfo->si_errno = tswap32(info->si_errno); 289 tinfo->si_code = tswap32(info->si_code); 290 291 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV 292 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { 293 tinfo->_sifields._sigfault._addr 294 = tswapal(info->_sifields._sigfault._addr); 295 } else if (sig == TARGET_SIGIO) { 296 tinfo->_sifields._sigpoll._band 297 = tswap32(info->_sifields._sigpoll._band); 298 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd); 299 } else if (sig == TARGET_SIGCHLD) { 300 tinfo->_sifields._sigchld._pid 301 = tswap32(info->_sifields._sigchld._pid); 302 tinfo->_sifields._sigchld._uid 303 = tswap32(info->_sifields._sigchld._uid); 304 tinfo->_sifields._sigchld._status 305 = tswap32(info->_sifields._sigchld._status); 306 tinfo->_sifields._sigchld._utime 307 = tswapal(info->_sifields._sigchld._utime); 308 tinfo->_sifields._sigchld._stime 309 = tswapal(info->_sifields._sigchld._stime); 310 } else if (sig >= TARGET_SIGRTMIN) { 311 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid); 312 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid); 313 tinfo->_sifields._rt._sigval.sival_ptr 314 = tswapal(info->_sifields._rt._sigval.sival_ptr); 315 } 316 } 317 318 319 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 320 { 321 host_to_target_siginfo_noswap(tinfo, info); 322 tswap_siginfo(tinfo, tinfo); 323 } 324 325 /* XXX: we support only POSIX RT signals are used. */ 326 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 327 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 328 { 329 info->si_signo = tswap32(tinfo->si_signo); 330 info->si_errno = tswap32(tinfo->si_errno); 331 info->si_code = tswap32(tinfo->si_code); 332 info->si_pid = tswap32(tinfo->_sifields._rt._pid); 333 info->si_uid = tswap32(tinfo->_sifields._rt._uid); 334 info->si_value.sival_ptr = 335 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr); 336 } 337 338 static int fatal_signal (int sig) 339 { 340 switch (sig) { 341 case TARGET_SIGCHLD: 342 case TARGET_SIGURG: 343 case TARGET_SIGWINCH: 344 /* Ignored by default. */ 345 return 0; 346 case TARGET_SIGCONT: 347 case TARGET_SIGSTOP: 348 case TARGET_SIGTSTP: 349 case TARGET_SIGTTIN: 350 case TARGET_SIGTTOU: 351 /* Job control signals. */ 352 return 0; 353 default: 354 return 1; 355 } 356 } 357 358 /* returns 1 if given signal should dump core if not handled */ 359 static int core_dump_signal(int sig) 360 { 361 switch (sig) { 362 case TARGET_SIGABRT: 363 case TARGET_SIGFPE: 364 case TARGET_SIGILL: 365 case TARGET_SIGQUIT: 366 case TARGET_SIGSEGV: 367 case TARGET_SIGTRAP: 368 case TARGET_SIGBUS: 369 return (1); 370 default: 371 return (0); 372 } 373 } 374 375 void signal_init(void) 376 { 377 struct sigaction act; 378 struct sigaction oact; 379 int i, j; 380 int host_sig; 381 382 /* generate signal conversion tables */ 383 for(i = 1; i < _NSIG; i++) { 384 if (host_to_target_signal_table[i] == 0) 385 host_to_target_signal_table[i] = i; 386 } 387 for(i = 1; i < _NSIG; i++) { 388 j = host_to_target_signal_table[i]; 389 target_to_host_signal_table[j] = i; 390 } 391 392 /* set all host signal handlers. ALL signals are blocked during 393 the handlers to serialize them. */ 394 memset(sigact_table, 0, sizeof(sigact_table)); 395 396 sigfillset(&act.sa_mask); 397 act.sa_flags = SA_SIGINFO; 398 act.sa_sigaction = host_signal_handler; 399 for(i = 1; i <= TARGET_NSIG; i++) { 400 host_sig = target_to_host_signal(i); 401 sigaction(host_sig, NULL, &oact); 402 if (oact.sa_sigaction == (void *)SIG_IGN) { 403 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 404 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 405 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 406 } 407 /* If there's already a handler installed then something has 408 gone horribly wrong, so don't even try to handle that case. */ 409 /* Install some handlers for our own use. We need at least 410 SIGSEGV and SIGBUS, to detect exceptions. We can not just 411 trap all signals because it affects syscall interrupt 412 behavior. But do trap all default-fatal signals. */ 413 if (fatal_signal (i)) 414 sigaction(host_sig, &act, NULL); 415 } 416 } 417 418 /* signal queue handling */ 419 420 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env) 421 { 422 CPUState *cpu = ENV_GET_CPU(env); 423 TaskState *ts = cpu->opaque; 424 struct sigqueue *q = ts->first_free; 425 if (!q) 426 return NULL; 427 ts->first_free = q->next; 428 return q; 429 } 430 431 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q) 432 { 433 CPUState *cpu = ENV_GET_CPU(env); 434 TaskState *ts = cpu->opaque; 435 436 q->next = ts->first_free; 437 ts->first_free = q; 438 } 439 440 /* abort execution with signal */ 441 static void QEMU_NORETURN force_sig(int target_sig) 442 { 443 CPUState *cpu = thread_cpu; 444 CPUArchState *env = cpu->env_ptr; 445 TaskState *ts = (TaskState *)cpu->opaque; 446 int host_sig, core_dumped = 0; 447 struct sigaction act; 448 host_sig = target_to_host_signal(target_sig); 449 gdb_signalled(env, target_sig); 450 451 /* dump core if supported by target binary format */ 452 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 453 stop_all_tasks(); 454 core_dumped = 455 ((*ts->bprm->core_dump)(target_sig, env) == 0); 456 } 457 if (core_dumped) { 458 /* we already dumped the core of target process, we don't want 459 * a coredump of qemu itself */ 460 struct rlimit nodump; 461 getrlimit(RLIMIT_CORE, &nodump); 462 nodump.rlim_cur=0; 463 setrlimit(RLIMIT_CORE, &nodump); 464 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 465 target_sig, strsignal(host_sig), "core dumped" ); 466 } 467 468 /* The proper exit code for dying from an uncaught signal is 469 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 470 * a negative value. To get the proper exit code we need to 471 * actually die from an uncaught signal. Here the default signal 472 * handler is installed, we send ourself a signal and we wait for 473 * it to arrive. */ 474 sigfillset(&act.sa_mask); 475 act.sa_handler = SIG_DFL; 476 act.sa_flags = 0; 477 sigaction(host_sig, &act, NULL); 478 479 /* For some reason raise(host_sig) doesn't send the signal when 480 * statically linked on x86-64. */ 481 kill(getpid(), host_sig); 482 483 /* Make sure the signal isn't masked (just reuse the mask inside 484 of act) */ 485 sigdelset(&act.sa_mask, host_sig); 486 sigsuspend(&act.sa_mask); 487 488 /* unreachable */ 489 abort(); 490 } 491 492 /* queue a signal so that it will be send to the virtual CPU as soon 493 as possible */ 494 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) 495 { 496 CPUState *cpu = ENV_GET_CPU(env); 497 TaskState *ts = cpu->opaque; 498 struct emulated_sigtable *k; 499 struct sigqueue *q, **pq; 500 abi_ulong handler; 501 int queue; 502 503 #if defined(DEBUG_SIGNAL) 504 fprintf(stderr, "queue_signal: sig=%d\n", 505 sig); 506 #endif 507 k = &ts->sigtab[sig - 1]; 508 queue = gdb_queuesig (); 509 handler = sigact_table[sig - 1]._sa_handler; 510 511 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) { 512 /* Guest has blocked SIGSEGV but we got one anyway. Assume this 513 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info 514 * because it got a real MMU fault). A blocked SIGSEGV in that 515 * situation is treated as if using the default handler. This is 516 * not correct if some other process has randomly sent us a SIGSEGV 517 * via kill(), but that is not easy to distinguish at this point, 518 * so we assume it doesn't happen. 519 */ 520 handler = TARGET_SIG_DFL; 521 } 522 523 if (!queue && handler == TARGET_SIG_DFL) { 524 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 525 kill(getpid(),SIGSTOP); 526 return 0; 527 } else 528 /* default handler : ignore some signal. The other are fatal */ 529 if (sig != TARGET_SIGCHLD && 530 sig != TARGET_SIGURG && 531 sig != TARGET_SIGWINCH && 532 sig != TARGET_SIGCONT) { 533 force_sig(sig); 534 } else { 535 return 0; /* indicate ignored */ 536 } 537 } else if (!queue && handler == TARGET_SIG_IGN) { 538 /* ignore signal */ 539 return 0; 540 } else if (!queue && handler == TARGET_SIG_ERR) { 541 force_sig(sig); 542 } else { 543 pq = &k->first; 544 if (sig < TARGET_SIGRTMIN) { 545 /* if non real time signal, we queue exactly one signal */ 546 if (!k->pending) 547 q = &k->info; 548 else 549 return 0; 550 } else { 551 if (!k->pending) { 552 /* first signal */ 553 q = &k->info; 554 } else { 555 q = alloc_sigqueue(env); 556 if (!q) 557 return -EAGAIN; 558 while (*pq != NULL) 559 pq = &(*pq)->next; 560 } 561 } 562 *pq = q; 563 q->info = *info; 564 q->next = NULL; 565 k->pending = 1; 566 /* signal that a new signal is pending */ 567 ts->signal_pending = 1; 568 return 1; /* indicates that the signal was queued */ 569 } 570 } 571 572 static void host_signal_handler(int host_signum, siginfo_t *info, 573 void *puc) 574 { 575 CPUArchState *env = thread_cpu->env_ptr; 576 int sig; 577 target_siginfo_t tinfo; 578 579 /* the CPU emulator uses some host signals to detect exceptions, 580 we forward to it some signals */ 581 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 582 && info->si_code > 0) { 583 if (cpu_signal_handler(host_signum, info, puc)) 584 return; 585 } 586 587 /* get target signal number */ 588 sig = host_to_target_signal(host_signum); 589 if (sig < 1 || sig > TARGET_NSIG) 590 return; 591 #if defined(DEBUG_SIGNAL) 592 fprintf(stderr, "qemu: got signal %d\n", sig); 593 #endif 594 host_to_target_siginfo_noswap(&tinfo, info); 595 if (queue_signal(env, sig, &tinfo) == 1) { 596 /* interrupt the virtual CPU as soon as possible */ 597 cpu_exit(thread_cpu); 598 } 599 } 600 601 /* do_sigaltstack() returns target values and errnos. */ 602 /* compare linux/kernel/signal.c:do_sigaltstack() */ 603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 604 { 605 int ret; 606 struct target_sigaltstack oss; 607 608 /* XXX: test errors */ 609 if(uoss_addr) 610 { 611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 613 __put_user(sas_ss_flags(sp), &oss.ss_flags); 614 } 615 616 if(uss_addr) 617 { 618 struct target_sigaltstack *uss; 619 struct target_sigaltstack ss; 620 621 ret = -TARGET_EFAULT; 622 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1) 623 || __get_user(ss.ss_sp, &uss->ss_sp) 624 || __get_user(ss.ss_size, &uss->ss_size) 625 || __get_user(ss.ss_flags, &uss->ss_flags)) 626 goto out; 627 unlock_user_struct(uss, uss_addr, 0); 628 629 ret = -TARGET_EPERM; 630 if (on_sig_stack(sp)) 631 goto out; 632 633 ret = -TARGET_EINVAL; 634 if (ss.ss_flags != TARGET_SS_DISABLE 635 && ss.ss_flags != TARGET_SS_ONSTACK 636 && ss.ss_flags != 0) 637 goto out; 638 639 if (ss.ss_flags == TARGET_SS_DISABLE) { 640 ss.ss_size = 0; 641 ss.ss_sp = 0; 642 } else { 643 ret = -TARGET_ENOMEM; 644 if (ss.ss_size < MINSIGSTKSZ) 645 goto out; 646 } 647 648 target_sigaltstack_used.ss_sp = ss.ss_sp; 649 target_sigaltstack_used.ss_size = ss.ss_size; 650 } 651 652 if (uoss_addr) { 653 ret = -TARGET_EFAULT; 654 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 655 goto out; 656 } 657 658 ret = 0; 659 out: 660 return ret; 661 } 662 663 /* do_sigaction() return host values and errnos */ 664 int do_sigaction(int sig, const struct target_sigaction *act, 665 struct target_sigaction *oact) 666 { 667 struct target_sigaction *k; 668 struct sigaction act1; 669 int host_sig; 670 int ret = 0; 671 672 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) 673 return -EINVAL; 674 k = &sigact_table[sig - 1]; 675 #if defined(DEBUG_SIGNAL) 676 fprintf(stderr, "sigaction sig=%d act=0x%p, oact=0x%p\n", 677 sig, act, oact); 678 #endif 679 if (oact) { 680 __put_user(k->_sa_handler, &oact->_sa_handler); 681 __put_user(k->sa_flags, &oact->sa_flags); 682 #if !defined(TARGET_MIPS) 683 __put_user(k->sa_restorer, &oact->sa_restorer); 684 #endif 685 /* Not swapped. */ 686 oact->sa_mask = k->sa_mask; 687 } 688 if (act) { 689 /* FIXME: This is not threadsafe. */ 690 __get_user(k->_sa_handler, &act->_sa_handler); 691 __get_user(k->sa_flags, &act->sa_flags); 692 #if !defined(TARGET_MIPS) 693 __get_user(k->sa_restorer, &act->sa_restorer); 694 #endif 695 /* To be swapped in target_to_host_sigset. */ 696 k->sa_mask = act->sa_mask; 697 698 /* we update the host linux signal state */ 699 host_sig = target_to_host_signal(sig); 700 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 701 sigfillset(&act1.sa_mask); 702 act1.sa_flags = SA_SIGINFO; 703 if (k->sa_flags & TARGET_SA_RESTART) 704 act1.sa_flags |= SA_RESTART; 705 /* NOTE: it is important to update the host kernel signal 706 ignore state to avoid getting unexpected interrupted 707 syscalls */ 708 if (k->_sa_handler == TARGET_SIG_IGN) { 709 act1.sa_sigaction = (void *)SIG_IGN; 710 } else if (k->_sa_handler == TARGET_SIG_DFL) { 711 if (fatal_signal (sig)) 712 act1.sa_sigaction = host_signal_handler; 713 else 714 act1.sa_sigaction = (void *)SIG_DFL; 715 } else { 716 act1.sa_sigaction = host_signal_handler; 717 } 718 ret = sigaction(host_sig, &act1, NULL); 719 } 720 } 721 return ret; 722 } 723 724 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo, 725 const target_siginfo_t *info) 726 { 727 tswap_siginfo(tinfo, info); 728 return 0; 729 } 730 731 static inline int current_exec_domain_sig(int sig) 732 { 733 return /* current->exec_domain && current->exec_domain->signal_invmap 734 && sig < 32 ? current->exec_domain->signal_invmap[sig] : */ sig; 735 } 736 737 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 738 739 /* from the Linux kernel */ 740 741 struct target_fpreg { 742 uint16_t significand[4]; 743 uint16_t exponent; 744 }; 745 746 struct target_fpxreg { 747 uint16_t significand[4]; 748 uint16_t exponent; 749 uint16_t padding[3]; 750 }; 751 752 struct target_xmmreg { 753 abi_ulong element[4]; 754 }; 755 756 struct target_fpstate { 757 /* Regular FPU environment */ 758 abi_ulong cw; 759 abi_ulong sw; 760 abi_ulong tag; 761 abi_ulong ipoff; 762 abi_ulong cssel; 763 abi_ulong dataoff; 764 abi_ulong datasel; 765 struct target_fpreg _st[8]; 766 uint16_t status; 767 uint16_t magic; /* 0xffff = regular FPU data only */ 768 769 /* FXSR FPU environment */ 770 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 771 abi_ulong mxcsr; 772 abi_ulong reserved; 773 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 774 struct target_xmmreg _xmm[8]; 775 abi_ulong padding[56]; 776 }; 777 778 #define X86_FXSR_MAGIC 0x0000 779 780 struct target_sigcontext { 781 uint16_t gs, __gsh; 782 uint16_t fs, __fsh; 783 uint16_t es, __esh; 784 uint16_t ds, __dsh; 785 abi_ulong edi; 786 abi_ulong esi; 787 abi_ulong ebp; 788 abi_ulong esp; 789 abi_ulong ebx; 790 abi_ulong edx; 791 abi_ulong ecx; 792 abi_ulong eax; 793 abi_ulong trapno; 794 abi_ulong err; 795 abi_ulong eip; 796 uint16_t cs, __csh; 797 abi_ulong eflags; 798 abi_ulong esp_at_signal; 799 uint16_t ss, __ssh; 800 abi_ulong fpstate; /* pointer */ 801 abi_ulong oldmask; 802 abi_ulong cr2; 803 }; 804 805 struct target_ucontext { 806 abi_ulong tuc_flags; 807 abi_ulong tuc_link; 808 target_stack_t tuc_stack; 809 struct target_sigcontext tuc_mcontext; 810 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 811 }; 812 813 struct sigframe 814 { 815 abi_ulong pretcode; 816 int sig; 817 struct target_sigcontext sc; 818 struct target_fpstate fpstate; 819 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 820 char retcode[8]; 821 }; 822 823 struct rt_sigframe 824 { 825 abi_ulong pretcode; 826 int sig; 827 abi_ulong pinfo; 828 abi_ulong puc; 829 struct target_siginfo info; 830 struct target_ucontext uc; 831 struct target_fpstate fpstate; 832 char retcode[8]; 833 }; 834 835 /* 836 * Set up a signal frame. 837 */ 838 839 /* XXX: save x87 state */ 840 static int 841 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, 842 CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr) 843 { 844 CPUState *cs = CPU(x86_env_get_cpu(env)); 845 int err = 0; 846 uint16_t magic; 847 848 /* already locked in setup_frame() */ 849 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 850 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 851 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 852 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 853 __put_user(env->regs[R_EDI], &sc->edi); 854 __put_user(env->regs[R_ESI], &sc->esi); 855 __put_user(env->regs[R_EBP], &sc->ebp); 856 __put_user(env->regs[R_ESP], &sc->esp); 857 __put_user(env->regs[R_EBX], &sc->ebx); 858 __put_user(env->regs[R_EDX], &sc->edx); 859 __put_user(env->regs[R_ECX], &sc->ecx); 860 __put_user(env->regs[R_EAX], &sc->eax); 861 __put_user(cs->exception_index, &sc->trapno); 862 __put_user(env->error_code, &sc->err); 863 __put_user(env->eip, &sc->eip); 864 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 865 __put_user(env->eflags, &sc->eflags); 866 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 867 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 868 869 cpu_x86_fsave(env, fpstate_addr, 1); 870 fpstate->status = fpstate->sw; 871 magic = 0xffff; 872 __put_user(magic, &fpstate->magic); 873 __put_user(fpstate_addr, &sc->fpstate); 874 875 /* non-iBCS2 extensions.. */ 876 __put_user(mask, &sc->oldmask); 877 __put_user(env->cr[2], &sc->cr2); 878 return err; 879 } 880 881 /* 882 * Determine which stack to use.. 883 */ 884 885 static inline abi_ulong 886 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 887 { 888 unsigned long esp; 889 890 /* Default to using normal stack */ 891 esp = env->regs[R_ESP]; 892 /* This is the X/Open sanctioned signal stack switching. */ 893 if (ka->sa_flags & TARGET_SA_ONSTACK) { 894 if (sas_ss_flags(esp) == 0) 895 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 896 } 897 898 /* This is the legacy signal stack switching. */ 899 else 900 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 901 !(ka->sa_flags & TARGET_SA_RESTORER) && 902 ka->sa_restorer) { 903 esp = (unsigned long) ka->sa_restorer; 904 } 905 return (esp - frame_size) & -8ul; 906 } 907 908 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 909 static void setup_frame(int sig, struct target_sigaction *ka, 910 target_sigset_t *set, CPUX86State *env) 911 { 912 abi_ulong frame_addr; 913 struct sigframe *frame; 914 int i, err = 0; 915 916 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 917 918 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 919 goto give_sigsegv; 920 921 __put_user(current_exec_domain_sig(sig), 922 &frame->sig); 923 if (err) 924 goto give_sigsegv; 925 926 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 927 frame_addr + offsetof(struct sigframe, fpstate)); 928 if (err) 929 goto give_sigsegv; 930 931 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 932 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 933 goto give_sigsegv; 934 } 935 936 /* Set up to return from userspace. If provided, use a stub 937 already in userspace. */ 938 if (ka->sa_flags & TARGET_SA_RESTORER) { 939 __put_user(ka->sa_restorer, &frame->pretcode); 940 } else { 941 uint16_t val16; 942 abi_ulong retcode_addr; 943 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 944 __put_user(retcode_addr, &frame->pretcode); 945 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 946 val16 = 0xb858; 947 __put_user(val16, (uint16_t *)(frame->retcode+0)); 948 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 949 val16 = 0x80cd; 950 __put_user(val16, (uint16_t *)(frame->retcode+6)); 951 } 952 953 if (err) 954 goto give_sigsegv; 955 956 /* Set up registers for signal handler */ 957 env->regs[R_ESP] = frame_addr; 958 env->eip = ka->_sa_handler; 959 960 cpu_x86_load_seg(env, R_DS, __USER_DS); 961 cpu_x86_load_seg(env, R_ES, __USER_DS); 962 cpu_x86_load_seg(env, R_SS, __USER_DS); 963 cpu_x86_load_seg(env, R_CS, __USER_CS); 964 env->eflags &= ~TF_MASK; 965 966 unlock_user_struct(frame, frame_addr, 1); 967 968 return; 969 970 give_sigsegv: 971 unlock_user_struct(frame, frame_addr, 1); 972 if (sig == TARGET_SIGSEGV) 973 ka->_sa_handler = TARGET_SIG_DFL; 974 force_sig(TARGET_SIGSEGV /* , current */); 975 } 976 977 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 978 static void setup_rt_frame(int sig, struct target_sigaction *ka, 979 target_siginfo_t *info, 980 target_sigset_t *set, CPUX86State *env) 981 { 982 abi_ulong frame_addr, addr; 983 struct rt_sigframe *frame; 984 int i, err = 0; 985 986 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 987 988 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 989 goto give_sigsegv; 990 991 __put_user(current_exec_domain_sig(sig), &frame->sig); 992 addr = frame_addr + offsetof(struct rt_sigframe, info); 993 __put_user(addr, &frame->pinfo); 994 addr = frame_addr + offsetof(struct rt_sigframe, uc); 995 __put_user(addr, &frame->puc); 996 err |= copy_siginfo_to_user(&frame->info, info); 997 if (err) 998 goto give_sigsegv; 999 1000 /* Create the ucontext. */ 1001 __put_user(0, &frame->uc.tuc_flags); 1002 __put_user(0, &frame->uc.tuc_link); 1003 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1004 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1005 &frame->uc.tuc_stack.ss_flags); 1006 __put_user(target_sigaltstack_used.ss_size, 1007 &frame->uc.tuc_stack.ss_size); 1008 err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, 1009 env, set->sig[0], 1010 frame_addr + offsetof(struct rt_sigframe, fpstate)); 1011 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1012 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) 1013 goto give_sigsegv; 1014 } 1015 1016 /* Set up to return from userspace. If provided, use a stub 1017 already in userspace. */ 1018 if (ka->sa_flags & TARGET_SA_RESTORER) { 1019 __put_user(ka->sa_restorer, &frame->pretcode); 1020 } else { 1021 uint16_t val16; 1022 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1023 __put_user(addr, &frame->pretcode); 1024 /* This is movl $,%eax ; int $0x80 */ 1025 __put_user(0xb8, (char *)(frame->retcode+0)); 1026 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1027 val16 = 0x80cd; 1028 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1029 } 1030 1031 if (err) 1032 goto give_sigsegv; 1033 1034 /* Set up registers for signal handler */ 1035 env->regs[R_ESP] = frame_addr; 1036 env->eip = ka->_sa_handler; 1037 1038 cpu_x86_load_seg(env, R_DS, __USER_DS); 1039 cpu_x86_load_seg(env, R_ES, __USER_DS); 1040 cpu_x86_load_seg(env, R_SS, __USER_DS); 1041 cpu_x86_load_seg(env, R_CS, __USER_CS); 1042 env->eflags &= ~TF_MASK; 1043 1044 unlock_user_struct(frame, frame_addr, 1); 1045 1046 return; 1047 1048 give_sigsegv: 1049 unlock_user_struct(frame, frame_addr, 1); 1050 if (sig == TARGET_SIGSEGV) 1051 ka->_sa_handler = TARGET_SIG_DFL; 1052 force_sig(TARGET_SIGSEGV /* , current */); 1053 } 1054 1055 static int 1056 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax) 1057 { 1058 unsigned int err = 0; 1059 abi_ulong fpstate_addr; 1060 unsigned int tmpflags; 1061 1062 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1063 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1064 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1065 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1066 1067 env->regs[R_EDI] = tswapl(sc->edi); 1068 env->regs[R_ESI] = tswapl(sc->esi); 1069 env->regs[R_EBP] = tswapl(sc->ebp); 1070 env->regs[R_ESP] = tswapl(sc->esp); 1071 env->regs[R_EBX] = tswapl(sc->ebx); 1072 env->regs[R_EDX] = tswapl(sc->edx); 1073 env->regs[R_ECX] = tswapl(sc->ecx); 1074 env->eip = tswapl(sc->eip); 1075 1076 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1077 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1078 1079 tmpflags = tswapl(sc->eflags); 1080 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1081 // regs->orig_eax = -1; /* disable syscall checks */ 1082 1083 fpstate_addr = tswapl(sc->fpstate); 1084 if (fpstate_addr != 0) { 1085 if (!access_ok(VERIFY_READ, fpstate_addr, 1086 sizeof(struct target_fpstate))) 1087 goto badframe; 1088 cpu_x86_frstor(env, fpstate_addr, 1); 1089 } 1090 1091 *peax = tswapl(sc->eax); 1092 return err; 1093 badframe: 1094 return 1; 1095 } 1096 1097 long do_sigreturn(CPUX86State *env) 1098 { 1099 struct sigframe *frame; 1100 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1101 target_sigset_t target_set; 1102 sigset_t set; 1103 int eax, i; 1104 1105 #if defined(DEBUG_SIGNAL) 1106 fprintf(stderr, "do_sigreturn\n"); 1107 #endif 1108 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1109 goto badframe; 1110 /* set blocked signals */ 1111 if (__get_user(target_set.sig[0], &frame->sc.oldmask)) 1112 goto badframe; 1113 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1114 if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) 1115 goto badframe; 1116 } 1117 1118 target_to_host_sigset_internal(&set, &target_set); 1119 do_sigprocmask(SIG_SETMASK, &set, NULL); 1120 1121 /* restore registers */ 1122 if (restore_sigcontext(env, &frame->sc, &eax)) 1123 goto badframe; 1124 unlock_user_struct(frame, frame_addr, 0); 1125 return eax; 1126 1127 badframe: 1128 unlock_user_struct(frame, frame_addr, 0); 1129 force_sig(TARGET_SIGSEGV); 1130 return 0; 1131 } 1132 1133 long do_rt_sigreturn(CPUX86State *env) 1134 { 1135 abi_ulong frame_addr; 1136 struct rt_sigframe *frame; 1137 sigset_t set; 1138 int eax; 1139 1140 frame_addr = env->regs[R_ESP] - 4; 1141 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1142 goto badframe; 1143 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1144 do_sigprocmask(SIG_SETMASK, &set, NULL); 1145 1146 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax)) 1147 goto badframe; 1148 1149 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1150 get_sp_from_cpustate(env)) == -EFAULT) 1151 goto badframe; 1152 1153 unlock_user_struct(frame, frame_addr, 0); 1154 return eax; 1155 1156 badframe: 1157 unlock_user_struct(frame, frame_addr, 0); 1158 force_sig(TARGET_SIGSEGV); 1159 return 0; 1160 } 1161 1162 #elif defined(TARGET_AARCH64) 1163 1164 struct target_sigcontext { 1165 uint64_t fault_address; 1166 /* AArch64 registers */ 1167 uint64_t regs[31]; 1168 uint64_t sp; 1169 uint64_t pc; 1170 uint64_t pstate; 1171 /* 4K reserved for FP/SIMD state and future expansion */ 1172 char __reserved[4096] __attribute__((__aligned__(16))); 1173 }; 1174 1175 struct target_ucontext { 1176 abi_ulong tuc_flags; 1177 abi_ulong tuc_link; 1178 target_stack_t tuc_stack; 1179 target_sigset_t tuc_sigmask; 1180 /* glibc uses a 1024-bit sigset_t */ 1181 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1182 /* last for future expansion */ 1183 struct target_sigcontext tuc_mcontext; 1184 }; 1185 1186 /* 1187 * Header to be used at the beginning of structures extending the user 1188 * context. Such structures must be placed after the rt_sigframe on the stack 1189 * and be 16-byte aligned. The last structure must be a dummy one with the 1190 * magic and size set to 0. 1191 */ 1192 struct target_aarch64_ctx { 1193 uint32_t magic; 1194 uint32_t size; 1195 }; 1196 1197 #define TARGET_FPSIMD_MAGIC 0x46508001 1198 1199 struct target_fpsimd_context { 1200 struct target_aarch64_ctx head; 1201 uint32_t fpsr; 1202 uint32_t fpcr; 1203 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1204 }; 1205 1206 /* 1207 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1208 * user space as it will change with the addition of new context. User space 1209 * should check the magic/size information. 1210 */ 1211 struct target_aux_context { 1212 struct target_fpsimd_context fpsimd; 1213 /* additional context to be added before "end" */ 1214 struct target_aarch64_ctx end; 1215 }; 1216 1217 struct target_rt_sigframe { 1218 struct target_siginfo info; 1219 struct target_ucontext uc; 1220 uint64_t fp; 1221 uint64_t lr; 1222 uint32_t tramp[2]; 1223 }; 1224 1225 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1226 CPUARMState *env, target_sigset_t *set) 1227 { 1228 int i; 1229 struct target_aux_context *aux = 1230 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1231 1232 /* set up the stack frame for unwinding */ 1233 __put_user(env->xregs[29], &sf->fp); 1234 __put_user(env->xregs[30], &sf->lr); 1235 1236 for (i = 0; i < 31; i++) { 1237 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1238 } 1239 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1240 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1241 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1242 1243 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1244 1245 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1246 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1247 } 1248 1249 for (i = 0; i < 32; i++) { 1250 #ifdef TARGET_WORDS_BIGENDIAN 1251 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1252 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1253 #else 1254 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1255 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1256 #endif 1257 } 1258 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1259 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1260 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1261 __put_user(sizeof(struct target_fpsimd_context), 1262 &aux->fpsimd.head.size); 1263 1264 /* set the "end" magic */ 1265 __put_user(0, &aux->end.magic); 1266 __put_user(0, &aux->end.size); 1267 1268 return 0; 1269 } 1270 1271 static int target_restore_sigframe(CPUARMState *env, 1272 struct target_rt_sigframe *sf) 1273 { 1274 sigset_t set; 1275 int i; 1276 struct target_aux_context *aux = 1277 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1278 uint32_t magic, size, fpsr, fpcr; 1279 uint64_t pstate; 1280 1281 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1282 do_sigprocmask(SIG_SETMASK, &set, NULL); 1283 1284 for (i = 0; i < 31; i++) { 1285 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1286 } 1287 1288 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1289 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1290 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1291 pstate_write(env, pstate); 1292 1293 __get_user(magic, &aux->fpsimd.head.magic); 1294 __get_user(size, &aux->fpsimd.head.size); 1295 1296 if (magic != TARGET_FPSIMD_MAGIC 1297 || size != sizeof(struct target_fpsimd_context)) { 1298 return 1; 1299 } 1300 1301 for (i = 0; i < 32; i++) { 1302 #ifdef TARGET_WORDS_BIGENDIAN 1303 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1304 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1305 #else 1306 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1307 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1308 #endif 1309 } 1310 __get_user(fpsr, &aux->fpsimd.fpsr); 1311 vfp_set_fpsr(env, fpsr); 1312 __get_user(fpcr, &aux->fpsimd.fpcr); 1313 vfp_set_fpcr(env, fpcr); 1314 1315 return 0; 1316 } 1317 1318 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1319 { 1320 abi_ulong sp; 1321 1322 sp = env->xregs[31]; 1323 1324 /* 1325 * This is the X/Open sanctioned signal stack switching. 1326 */ 1327 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 1328 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1329 } 1330 1331 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1332 1333 return sp; 1334 } 1335 1336 static void target_setup_frame(int usig, struct target_sigaction *ka, 1337 target_siginfo_t *info, target_sigset_t *set, 1338 CPUARMState *env) 1339 { 1340 struct target_rt_sigframe *frame; 1341 abi_ulong frame_addr, return_addr; 1342 1343 frame_addr = get_sigframe(ka, env); 1344 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1345 goto give_sigsegv; 1346 } 1347 1348 __put_user(0, &frame->uc.tuc_flags); 1349 __put_user(0, &frame->uc.tuc_link); 1350 1351 __put_user(target_sigaltstack_used.ss_sp, 1352 &frame->uc.tuc_stack.ss_sp); 1353 __put_user(sas_ss_flags(env->xregs[31]), 1354 &frame->uc.tuc_stack.ss_flags); 1355 __put_user(target_sigaltstack_used.ss_size, 1356 &frame->uc.tuc_stack.ss_size); 1357 target_setup_sigframe(frame, env, set); 1358 if (ka->sa_flags & TARGET_SA_RESTORER) { 1359 return_addr = ka->sa_restorer; 1360 } else { 1361 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1362 __put_user(0xd2801168, &frame->tramp[0]); 1363 __put_user(0xd4000001, &frame->tramp[1]); 1364 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1365 } 1366 env->xregs[0] = usig; 1367 env->xregs[31] = frame_addr; 1368 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1369 env->pc = ka->_sa_handler; 1370 env->xregs[30] = return_addr; 1371 if (info) { 1372 if (copy_siginfo_to_user(&frame->info, info)) { 1373 goto give_sigsegv; 1374 } 1375 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1376 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1377 } 1378 1379 unlock_user_struct(frame, frame_addr, 1); 1380 return; 1381 1382 give_sigsegv: 1383 unlock_user_struct(frame, frame_addr, 1); 1384 force_sig(TARGET_SIGSEGV); 1385 } 1386 1387 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1388 target_siginfo_t *info, target_sigset_t *set, 1389 CPUARMState *env) 1390 { 1391 target_setup_frame(sig, ka, info, set, env); 1392 } 1393 1394 static void setup_frame(int sig, struct target_sigaction *ka, 1395 target_sigset_t *set, CPUARMState *env) 1396 { 1397 target_setup_frame(sig, ka, 0, set, env); 1398 } 1399 1400 long do_rt_sigreturn(CPUARMState *env) 1401 { 1402 struct target_rt_sigframe *frame = NULL; 1403 abi_ulong frame_addr = env->xregs[31]; 1404 1405 if (frame_addr & 15) { 1406 goto badframe; 1407 } 1408 1409 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1410 goto badframe; 1411 } 1412 1413 if (target_restore_sigframe(env, frame)) { 1414 goto badframe; 1415 } 1416 1417 if (do_sigaltstack(frame_addr + 1418 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1419 0, get_sp_from_cpustate(env)) == -EFAULT) { 1420 goto badframe; 1421 } 1422 1423 unlock_user_struct(frame, frame_addr, 0); 1424 return env->xregs[0]; 1425 1426 badframe: 1427 unlock_user_struct(frame, frame_addr, 0); 1428 force_sig(TARGET_SIGSEGV); 1429 return 0; 1430 } 1431 1432 long do_sigreturn(CPUARMState *env) 1433 { 1434 return do_rt_sigreturn(env); 1435 } 1436 1437 #elif defined(TARGET_ARM) 1438 1439 struct target_sigcontext { 1440 abi_ulong trap_no; 1441 abi_ulong error_code; 1442 abi_ulong oldmask; 1443 abi_ulong arm_r0; 1444 abi_ulong arm_r1; 1445 abi_ulong arm_r2; 1446 abi_ulong arm_r3; 1447 abi_ulong arm_r4; 1448 abi_ulong arm_r5; 1449 abi_ulong arm_r6; 1450 abi_ulong arm_r7; 1451 abi_ulong arm_r8; 1452 abi_ulong arm_r9; 1453 abi_ulong arm_r10; 1454 abi_ulong arm_fp; 1455 abi_ulong arm_ip; 1456 abi_ulong arm_sp; 1457 abi_ulong arm_lr; 1458 abi_ulong arm_pc; 1459 abi_ulong arm_cpsr; 1460 abi_ulong fault_address; 1461 }; 1462 1463 struct target_ucontext_v1 { 1464 abi_ulong tuc_flags; 1465 abi_ulong tuc_link; 1466 target_stack_t tuc_stack; 1467 struct target_sigcontext tuc_mcontext; 1468 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1469 }; 1470 1471 struct target_ucontext_v2 { 1472 abi_ulong tuc_flags; 1473 abi_ulong tuc_link; 1474 target_stack_t tuc_stack; 1475 struct target_sigcontext tuc_mcontext; 1476 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1477 char __unused[128 - sizeof(target_sigset_t)]; 1478 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1479 }; 1480 1481 struct target_user_vfp { 1482 uint64_t fpregs[32]; 1483 abi_ulong fpscr; 1484 }; 1485 1486 struct target_user_vfp_exc { 1487 abi_ulong fpexc; 1488 abi_ulong fpinst; 1489 abi_ulong fpinst2; 1490 }; 1491 1492 struct target_vfp_sigframe { 1493 abi_ulong magic; 1494 abi_ulong size; 1495 struct target_user_vfp ufp; 1496 struct target_user_vfp_exc ufp_exc; 1497 } __attribute__((__aligned__(8))); 1498 1499 struct target_iwmmxt_sigframe { 1500 abi_ulong magic; 1501 abi_ulong size; 1502 uint64_t regs[16]; 1503 /* Note that not all the coprocessor control registers are stored here */ 1504 uint32_t wcssf; 1505 uint32_t wcasf; 1506 uint32_t wcgr0; 1507 uint32_t wcgr1; 1508 uint32_t wcgr2; 1509 uint32_t wcgr3; 1510 } __attribute__((__aligned__(8))); 1511 1512 #define TARGET_VFP_MAGIC 0x56465001 1513 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1514 1515 struct sigframe_v1 1516 { 1517 struct target_sigcontext sc; 1518 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1519 abi_ulong retcode; 1520 }; 1521 1522 struct sigframe_v2 1523 { 1524 struct target_ucontext_v2 uc; 1525 abi_ulong retcode; 1526 }; 1527 1528 struct rt_sigframe_v1 1529 { 1530 abi_ulong pinfo; 1531 abi_ulong puc; 1532 struct target_siginfo info; 1533 struct target_ucontext_v1 uc; 1534 abi_ulong retcode; 1535 }; 1536 1537 struct rt_sigframe_v2 1538 { 1539 struct target_siginfo info; 1540 struct target_ucontext_v2 uc; 1541 abi_ulong retcode; 1542 }; 1543 1544 #define TARGET_CONFIG_CPU_32 1 1545 1546 /* 1547 * For ARM syscalls, we encode the syscall number into the instruction. 1548 */ 1549 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1550 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1551 1552 /* 1553 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1554 * need two 16-bit instructions. 1555 */ 1556 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1557 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1558 1559 static const abi_ulong retcodes[4] = { 1560 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1561 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1562 }; 1563 1564 1565 static inline int valid_user_regs(CPUARMState *regs) 1566 { 1567 return 1; 1568 } 1569 1570 static void 1571 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1572 CPUARMState *env, abi_ulong mask) 1573 { 1574 __put_user(env->regs[0], &sc->arm_r0); 1575 __put_user(env->regs[1], &sc->arm_r1); 1576 __put_user(env->regs[2], &sc->arm_r2); 1577 __put_user(env->regs[3], &sc->arm_r3); 1578 __put_user(env->regs[4], &sc->arm_r4); 1579 __put_user(env->regs[5], &sc->arm_r5); 1580 __put_user(env->regs[6], &sc->arm_r6); 1581 __put_user(env->regs[7], &sc->arm_r7); 1582 __put_user(env->regs[8], &sc->arm_r8); 1583 __put_user(env->regs[9], &sc->arm_r9); 1584 __put_user(env->regs[10], &sc->arm_r10); 1585 __put_user(env->regs[11], &sc->arm_fp); 1586 __put_user(env->regs[12], &sc->arm_ip); 1587 __put_user(env->regs[13], &sc->arm_sp); 1588 __put_user(env->regs[14], &sc->arm_lr); 1589 __put_user(env->regs[15], &sc->arm_pc); 1590 #ifdef TARGET_CONFIG_CPU_32 1591 __put_user(cpsr_read(env), &sc->arm_cpsr); 1592 #endif 1593 1594 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1595 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1596 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1597 __put_user(mask, &sc->oldmask); 1598 } 1599 1600 static inline abi_ulong 1601 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1602 { 1603 unsigned long sp = regs->regs[13]; 1604 1605 /* 1606 * This is the X/Open sanctioned signal stack switching. 1607 */ 1608 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) 1609 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1610 /* 1611 * ATPCS B01 mandates 8-byte alignment 1612 */ 1613 return (sp - framesize) & ~7; 1614 } 1615 1616 static int 1617 setup_return(CPUARMState *env, struct target_sigaction *ka, 1618 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1619 { 1620 abi_ulong handler = ka->_sa_handler; 1621 abi_ulong retcode; 1622 int thumb = handler & 1; 1623 uint32_t cpsr = cpsr_read(env); 1624 1625 cpsr &= ~CPSR_IT; 1626 if (thumb) { 1627 cpsr |= CPSR_T; 1628 } else { 1629 cpsr &= ~CPSR_T; 1630 } 1631 1632 if (ka->sa_flags & TARGET_SA_RESTORER) { 1633 retcode = ka->sa_restorer; 1634 } else { 1635 unsigned int idx = thumb; 1636 1637 if (ka->sa_flags & TARGET_SA_SIGINFO) 1638 idx += 2; 1639 1640 if (__put_user(retcodes[idx], rc)) 1641 return 1; 1642 1643 retcode = rc_addr + thumb; 1644 } 1645 1646 env->regs[0] = usig; 1647 env->regs[13] = frame_addr; 1648 env->regs[14] = retcode; 1649 env->regs[15] = handler & (thumb ? ~1 : ~3); 1650 cpsr_write(env, cpsr, 0xffffffff); 1651 1652 return 0; 1653 } 1654 1655 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1656 { 1657 int i; 1658 struct target_vfp_sigframe *vfpframe; 1659 vfpframe = (struct target_vfp_sigframe *)regspace; 1660 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1661 __put_user(sizeof(*vfpframe), &vfpframe->size); 1662 for (i = 0; i < 32; i++) { 1663 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1664 } 1665 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1666 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1667 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1668 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1669 return (abi_ulong*)(vfpframe+1); 1670 } 1671 1672 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1673 CPUARMState *env) 1674 { 1675 int i; 1676 struct target_iwmmxt_sigframe *iwmmxtframe; 1677 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1678 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1679 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1680 for (i = 0; i < 16; i++) { 1681 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1682 } 1683 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1684 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1685 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1686 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1687 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1688 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1689 return (abi_ulong*)(iwmmxtframe+1); 1690 } 1691 1692 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1693 target_sigset_t *set, CPUARMState *env) 1694 { 1695 struct target_sigaltstack stack; 1696 int i; 1697 abi_ulong *regspace; 1698 1699 /* Clear all the bits of the ucontext we don't use. */ 1700 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1701 1702 memset(&stack, 0, sizeof(stack)); 1703 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1704 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1705 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1706 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1707 1708 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1709 /* Save coprocessor signal frame. */ 1710 regspace = uc->tuc_regspace; 1711 if (arm_feature(env, ARM_FEATURE_VFP)) { 1712 regspace = setup_sigframe_v2_vfp(regspace, env); 1713 } 1714 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1715 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1716 } 1717 1718 /* Write terminating magic word */ 1719 __put_user(0, regspace); 1720 1721 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1722 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1723 } 1724 } 1725 1726 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1727 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1728 target_sigset_t *set, CPUARMState *regs) 1729 { 1730 struct sigframe_v1 *frame; 1731 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1732 int i; 1733 1734 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1735 return; 1736 1737 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1738 1739 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1740 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 1741 goto end; 1742 } 1743 1744 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1745 frame_addr + offsetof(struct sigframe_v1, retcode)); 1746 1747 end: 1748 unlock_user_struct(frame, frame_addr, 1); 1749 } 1750 1751 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1752 target_sigset_t *set, CPUARMState *regs) 1753 { 1754 struct sigframe_v2 *frame; 1755 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1756 1757 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1758 return; 1759 1760 setup_sigframe_v2(&frame->uc, set, regs); 1761 1762 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1763 frame_addr + offsetof(struct sigframe_v2, retcode)); 1764 1765 unlock_user_struct(frame, frame_addr, 1); 1766 } 1767 1768 static void setup_frame(int usig, struct target_sigaction *ka, 1769 target_sigset_t *set, CPUARMState *regs) 1770 { 1771 if (get_osversion() >= 0x020612) { 1772 setup_frame_v2(usig, ka, set, regs); 1773 } else { 1774 setup_frame_v1(usig, ka, set, regs); 1775 } 1776 } 1777 1778 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1779 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1780 target_siginfo_t *info, 1781 target_sigset_t *set, CPUARMState *env) 1782 { 1783 struct rt_sigframe_v1 *frame; 1784 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1785 struct target_sigaltstack stack; 1786 int i; 1787 abi_ulong info_addr, uc_addr; 1788 1789 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1790 return /* 1 */; 1791 1792 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1793 __put_user(info_addr, &frame->pinfo); 1794 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1795 __put_user(uc_addr, &frame->puc); 1796 copy_siginfo_to_user(&frame->info, info); 1797 1798 /* Clear all the bits of the ucontext we don't use. */ 1799 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1800 1801 memset(&stack, 0, sizeof(stack)); 1802 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1803 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1804 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1805 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1806 1807 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1808 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1809 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) 1810 goto end; 1811 } 1812 1813 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1814 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1815 1816 env->regs[1] = info_addr; 1817 env->regs[2] = uc_addr; 1818 1819 end: 1820 unlock_user_struct(frame, frame_addr, 1); 1821 } 1822 1823 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1824 target_siginfo_t *info, 1825 target_sigset_t *set, CPUARMState *env) 1826 { 1827 struct rt_sigframe_v2 *frame; 1828 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1829 abi_ulong info_addr, uc_addr; 1830 1831 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1832 return /* 1 */; 1833 1834 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1835 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1836 copy_siginfo_to_user(&frame->info, info); 1837 1838 setup_sigframe_v2(&frame->uc, set, env); 1839 1840 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1841 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1842 1843 env->regs[1] = info_addr; 1844 env->regs[2] = uc_addr; 1845 1846 unlock_user_struct(frame, frame_addr, 1); 1847 } 1848 1849 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1850 target_siginfo_t *info, 1851 target_sigset_t *set, CPUARMState *env) 1852 { 1853 if (get_osversion() >= 0x020612) { 1854 setup_rt_frame_v2(usig, ka, info, set, env); 1855 } else { 1856 setup_rt_frame_v1(usig, ka, info, set, env); 1857 } 1858 } 1859 1860 static int 1861 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1862 { 1863 int err = 0; 1864 uint32_t cpsr; 1865 1866 __get_user(env->regs[0], &sc->arm_r0); 1867 __get_user(env->regs[1], &sc->arm_r1); 1868 __get_user(env->regs[2], &sc->arm_r2); 1869 __get_user(env->regs[3], &sc->arm_r3); 1870 __get_user(env->regs[4], &sc->arm_r4); 1871 __get_user(env->regs[5], &sc->arm_r5); 1872 __get_user(env->regs[6], &sc->arm_r6); 1873 __get_user(env->regs[7], &sc->arm_r7); 1874 __get_user(env->regs[8], &sc->arm_r8); 1875 __get_user(env->regs[9], &sc->arm_r9); 1876 __get_user(env->regs[10], &sc->arm_r10); 1877 __get_user(env->regs[11], &sc->arm_fp); 1878 __get_user(env->regs[12], &sc->arm_ip); 1879 __get_user(env->regs[13], &sc->arm_sp); 1880 __get_user(env->regs[14], &sc->arm_lr); 1881 __get_user(env->regs[15], &sc->arm_pc); 1882 #ifdef TARGET_CONFIG_CPU_32 1883 __get_user(cpsr, &sc->arm_cpsr); 1884 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC); 1885 #endif 1886 1887 err |= !valid_user_regs(env); 1888 1889 return err; 1890 } 1891 1892 static long do_sigreturn_v1(CPUARMState *env) 1893 { 1894 abi_ulong frame_addr; 1895 struct sigframe_v1 *frame = NULL; 1896 target_sigset_t set; 1897 sigset_t host_set; 1898 int i; 1899 1900 /* 1901 * Since we stacked the signal on a 64-bit boundary, 1902 * then 'sp' should be word aligned here. If it's 1903 * not, then the user is trying to mess with us. 1904 */ 1905 frame_addr = env->regs[13]; 1906 if (frame_addr & 7) { 1907 goto badframe; 1908 } 1909 1910 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1911 goto badframe; 1912 1913 if (__get_user(set.sig[0], &frame->sc.oldmask)) 1914 goto badframe; 1915 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1916 if (__get_user(set.sig[i], &frame->extramask[i - 1])) 1917 goto badframe; 1918 } 1919 1920 target_to_host_sigset_internal(&host_set, &set); 1921 do_sigprocmask(SIG_SETMASK, &host_set, NULL); 1922 1923 if (restore_sigcontext(env, &frame->sc)) 1924 goto badframe; 1925 1926 #if 0 1927 /* Send SIGTRAP if we're single-stepping */ 1928 if (ptrace_cancel_bpt(current)) 1929 send_sig(SIGTRAP, current, 1); 1930 #endif 1931 unlock_user_struct(frame, frame_addr, 0); 1932 return env->regs[0]; 1933 1934 badframe: 1935 unlock_user_struct(frame, frame_addr, 0); 1936 force_sig(TARGET_SIGSEGV /* , current */); 1937 return 0; 1938 } 1939 1940 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 1941 { 1942 int i; 1943 abi_ulong magic, sz; 1944 uint32_t fpscr, fpexc; 1945 struct target_vfp_sigframe *vfpframe; 1946 vfpframe = (struct target_vfp_sigframe *)regspace; 1947 1948 __get_user(magic, &vfpframe->magic); 1949 __get_user(sz, &vfpframe->size); 1950 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 1951 return 0; 1952 } 1953 for (i = 0; i < 32; i++) { 1954 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1955 } 1956 __get_user(fpscr, &vfpframe->ufp.fpscr); 1957 vfp_set_fpscr(env, fpscr); 1958 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 1959 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 1960 * and the exception flag is cleared 1961 */ 1962 fpexc |= (1 << 30); 1963 fpexc &= ~((1 << 31) | (1 << 28)); 1964 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 1965 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1966 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1967 return (abi_ulong*)(vfpframe + 1); 1968 } 1969 1970 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 1971 abi_ulong *regspace) 1972 { 1973 int i; 1974 abi_ulong magic, sz; 1975 struct target_iwmmxt_sigframe *iwmmxtframe; 1976 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1977 1978 __get_user(magic, &iwmmxtframe->magic); 1979 __get_user(sz, &iwmmxtframe->size); 1980 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 1981 return 0; 1982 } 1983 for (i = 0; i < 16; i++) { 1984 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1985 } 1986 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1987 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1988 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1989 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1990 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1991 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1992 return (abi_ulong*)(iwmmxtframe + 1); 1993 } 1994 1995 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr, 1996 struct target_ucontext_v2 *uc) 1997 { 1998 sigset_t host_set; 1999 abi_ulong *regspace; 2000 2001 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2002 do_sigprocmask(SIG_SETMASK, &host_set, NULL); 2003 2004 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2005 return 1; 2006 2007 /* Restore coprocessor signal frame */ 2008 regspace = uc->tuc_regspace; 2009 if (arm_feature(env, ARM_FEATURE_VFP)) { 2010 regspace = restore_sigframe_v2_vfp(env, regspace); 2011 if (!regspace) { 2012 return 1; 2013 } 2014 } 2015 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2016 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2017 if (!regspace) { 2018 return 1; 2019 } 2020 } 2021 2022 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2023 return 1; 2024 2025 #if 0 2026 /* Send SIGTRAP if we're single-stepping */ 2027 if (ptrace_cancel_bpt(current)) 2028 send_sig(SIGTRAP, current, 1); 2029 #endif 2030 2031 return 0; 2032 } 2033 2034 static long do_sigreturn_v2(CPUARMState *env) 2035 { 2036 abi_ulong frame_addr; 2037 struct sigframe_v2 *frame = NULL; 2038 2039 /* 2040 * Since we stacked the signal on a 64-bit boundary, 2041 * then 'sp' should be word aligned here. If it's 2042 * not, then the user is trying to mess with us. 2043 */ 2044 frame_addr = env->regs[13]; 2045 if (frame_addr & 7) { 2046 goto badframe; 2047 } 2048 2049 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 2050 goto badframe; 2051 2052 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) 2053 goto badframe; 2054 2055 unlock_user_struct(frame, frame_addr, 0); 2056 return env->regs[0]; 2057 2058 badframe: 2059 unlock_user_struct(frame, frame_addr, 0); 2060 force_sig(TARGET_SIGSEGV /* , current */); 2061 return 0; 2062 } 2063 2064 long do_sigreturn(CPUARMState *env) 2065 { 2066 if (get_osversion() >= 0x020612) { 2067 return do_sigreturn_v2(env); 2068 } else { 2069 return do_sigreturn_v1(env); 2070 } 2071 } 2072 2073 static long do_rt_sigreturn_v1(CPUARMState *env) 2074 { 2075 abi_ulong frame_addr; 2076 struct rt_sigframe_v1 *frame = NULL; 2077 sigset_t host_set; 2078 2079 /* 2080 * Since we stacked the signal on a 64-bit boundary, 2081 * then 'sp' should be word aligned here. If it's 2082 * not, then the user is trying to mess with us. 2083 */ 2084 frame_addr = env->regs[13]; 2085 if (frame_addr & 7) { 2086 goto badframe; 2087 } 2088 2089 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 2090 goto badframe; 2091 2092 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2093 do_sigprocmask(SIG_SETMASK, &host_set, NULL); 2094 2095 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) 2096 goto badframe; 2097 2098 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2099 goto badframe; 2100 2101 #if 0 2102 /* Send SIGTRAP if we're single-stepping */ 2103 if (ptrace_cancel_bpt(current)) 2104 send_sig(SIGTRAP, current, 1); 2105 #endif 2106 unlock_user_struct(frame, frame_addr, 0); 2107 return env->regs[0]; 2108 2109 badframe: 2110 unlock_user_struct(frame, frame_addr, 0); 2111 force_sig(TARGET_SIGSEGV /* , current */); 2112 return 0; 2113 } 2114 2115 static long do_rt_sigreturn_v2(CPUARMState *env) 2116 { 2117 abi_ulong frame_addr; 2118 struct rt_sigframe_v2 *frame = NULL; 2119 2120 /* 2121 * Since we stacked the signal on a 64-bit boundary, 2122 * then 'sp' should be word aligned here. If it's 2123 * not, then the user is trying to mess with us. 2124 */ 2125 frame_addr = env->regs[13]; 2126 if (frame_addr & 7) { 2127 goto badframe; 2128 } 2129 2130 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 2131 goto badframe; 2132 2133 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) 2134 goto badframe; 2135 2136 unlock_user_struct(frame, frame_addr, 0); 2137 return env->regs[0]; 2138 2139 badframe: 2140 unlock_user_struct(frame, frame_addr, 0); 2141 force_sig(TARGET_SIGSEGV /* , current */); 2142 return 0; 2143 } 2144 2145 long do_rt_sigreturn(CPUARMState *env) 2146 { 2147 if (get_osversion() >= 0x020612) { 2148 return do_rt_sigreturn_v2(env); 2149 } else { 2150 return do_rt_sigreturn_v1(env); 2151 } 2152 } 2153 2154 #elif defined(TARGET_SPARC) 2155 2156 #define __SUNOS_MAXWIN 31 2157 2158 /* This is what SunOS does, so shall I. */ 2159 struct target_sigcontext { 2160 abi_ulong sigc_onstack; /* state to restore */ 2161 2162 abi_ulong sigc_mask; /* sigmask to restore */ 2163 abi_ulong sigc_sp; /* stack pointer */ 2164 abi_ulong sigc_pc; /* program counter */ 2165 abi_ulong sigc_npc; /* next program counter */ 2166 abi_ulong sigc_psr; /* for condition codes etc */ 2167 abi_ulong sigc_g1; /* User uses these two registers */ 2168 abi_ulong sigc_o0; /* within the trampoline code. */ 2169 2170 /* Now comes information regarding the users window set 2171 * at the time of the signal. 2172 */ 2173 abi_ulong sigc_oswins; /* outstanding windows */ 2174 2175 /* stack ptrs for each regwin buf */ 2176 char *sigc_spbuf[__SUNOS_MAXWIN]; 2177 2178 /* Windows to restore after signal */ 2179 struct { 2180 abi_ulong locals[8]; 2181 abi_ulong ins[8]; 2182 } sigc_wbuf[__SUNOS_MAXWIN]; 2183 }; 2184 /* A Sparc stack frame */ 2185 struct sparc_stackf { 2186 abi_ulong locals[8]; 2187 abi_ulong ins[8]; 2188 /* It's simpler to treat fp and callers_pc as elements of ins[] 2189 * since we never need to access them ourselves. 2190 */ 2191 char *structptr; 2192 abi_ulong xargs[6]; 2193 abi_ulong xxargs[1]; 2194 }; 2195 2196 typedef struct { 2197 struct { 2198 abi_ulong psr; 2199 abi_ulong pc; 2200 abi_ulong npc; 2201 abi_ulong y; 2202 abi_ulong u_regs[16]; /* globals and ins */ 2203 } si_regs; 2204 int si_mask; 2205 } __siginfo_t; 2206 2207 typedef struct { 2208 abi_ulong si_float_regs[32]; 2209 unsigned long si_fsr; 2210 unsigned long si_fpqdepth; 2211 struct { 2212 unsigned long *insn_addr; 2213 unsigned long insn; 2214 } si_fpqueue [16]; 2215 } qemu_siginfo_fpu_t; 2216 2217 2218 struct target_signal_frame { 2219 struct sparc_stackf ss; 2220 __siginfo_t info; 2221 abi_ulong fpu_save; 2222 abi_ulong insns[2] __attribute__ ((aligned (8))); 2223 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2224 abi_ulong extra_size; /* Should be 0 */ 2225 qemu_siginfo_fpu_t fpu_state; 2226 }; 2227 struct target_rt_signal_frame { 2228 struct sparc_stackf ss; 2229 siginfo_t info; 2230 abi_ulong regs[20]; 2231 sigset_t mask; 2232 abi_ulong fpu_save; 2233 unsigned int insns[2]; 2234 stack_t stack; 2235 unsigned int extra_size; /* Should be 0 */ 2236 qemu_siginfo_fpu_t fpu_state; 2237 }; 2238 2239 #define UREG_O0 16 2240 #define UREG_O6 22 2241 #define UREG_I0 0 2242 #define UREG_I1 1 2243 #define UREG_I2 2 2244 #define UREG_I3 3 2245 #define UREG_I4 4 2246 #define UREG_I5 5 2247 #define UREG_I6 6 2248 #define UREG_I7 7 2249 #define UREG_L0 8 2250 #define UREG_FP UREG_I6 2251 #define UREG_SP UREG_O6 2252 2253 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2254 CPUSPARCState *env, 2255 unsigned long framesize) 2256 { 2257 abi_ulong sp; 2258 2259 sp = env->regwptr[UREG_FP]; 2260 2261 /* This is the X/Open sanctioned signal stack switching. */ 2262 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2263 if (!on_sig_stack(sp) 2264 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) 2265 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2266 } 2267 return sp - framesize; 2268 } 2269 2270 static int 2271 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2272 { 2273 int err = 0, i; 2274 2275 __put_user(env->psr, &si->si_regs.psr); 2276 __put_user(env->pc, &si->si_regs.pc); 2277 __put_user(env->npc, &si->si_regs.npc); 2278 __put_user(env->y, &si->si_regs.y); 2279 for (i=0; i < 8; i++) { 2280 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2281 } 2282 for (i=0; i < 8; i++) { 2283 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2284 } 2285 __put_user(mask, &si->si_mask); 2286 return err; 2287 } 2288 2289 #if 0 2290 static int 2291 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2292 CPUSPARCState *env, unsigned long mask) 2293 { 2294 int err = 0; 2295 2296 __put_user(mask, &sc->sigc_mask); 2297 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2298 __put_user(env->pc, &sc->sigc_pc); 2299 __put_user(env->npc, &sc->sigc_npc); 2300 __put_user(env->psr, &sc->sigc_psr); 2301 __put_user(env->gregs[1], &sc->sigc_g1); 2302 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2303 2304 return err; 2305 } 2306 #endif 2307 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2308 2309 static void setup_frame(int sig, struct target_sigaction *ka, 2310 target_sigset_t *set, CPUSPARCState *env) 2311 { 2312 abi_ulong sf_addr; 2313 struct target_signal_frame *sf; 2314 int sigframe_size, err, i; 2315 2316 /* 1. Make sure everything is clean */ 2317 //synchronize_user_stack(); 2318 2319 sigframe_size = NF_ALIGNEDSZ; 2320 sf_addr = get_sigframe(ka, env, sigframe_size); 2321 2322 sf = lock_user(VERIFY_WRITE, sf_addr, 2323 sizeof(struct target_signal_frame), 0); 2324 if (!sf) 2325 goto sigsegv; 2326 2327 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]); 2328 #if 0 2329 if (invalid_frame_pointer(sf, sigframe_size)) 2330 goto sigill_and_return; 2331 #endif 2332 /* 2. Save the current process state */ 2333 err = setup___siginfo(&sf->info, env, set->sig[0]); 2334 __put_user(0, &sf->extra_size); 2335 2336 //save_fpu_state(regs, &sf->fpu_state); 2337 //__put_user(&sf->fpu_state, &sf->fpu_save); 2338 2339 __put_user(set->sig[0], &sf->info.si_mask); 2340 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2341 __put_user(set->sig[i + 1], &sf->extramask[i]); 2342 } 2343 2344 for (i = 0; i < 8; i++) { 2345 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2346 } 2347 for (i = 0; i < 8; i++) { 2348 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2349 } 2350 if (err) 2351 goto sigsegv; 2352 2353 /* 3. signal handler back-trampoline and parameters */ 2354 env->regwptr[UREG_FP] = sf_addr; 2355 env->regwptr[UREG_I0] = sig; 2356 env->regwptr[UREG_I1] = sf_addr + 2357 offsetof(struct target_signal_frame, info); 2358 env->regwptr[UREG_I2] = sf_addr + 2359 offsetof(struct target_signal_frame, info); 2360 2361 /* 4. signal handler */ 2362 env->pc = ka->_sa_handler; 2363 env->npc = (env->pc + 4); 2364 /* 5. return to kernel instructions */ 2365 if (ka->sa_restorer) 2366 env->regwptr[UREG_I7] = ka->sa_restorer; 2367 else { 2368 uint32_t val32; 2369 2370 env->regwptr[UREG_I7] = sf_addr + 2371 offsetof(struct target_signal_frame, insns) - 2 * 4; 2372 2373 /* mov __NR_sigreturn, %g1 */ 2374 val32 = 0x821020d8; 2375 __put_user(val32, &sf->insns[0]); 2376 2377 /* t 0x10 */ 2378 val32 = 0x91d02010; 2379 __put_user(val32, &sf->insns[1]); 2380 if (err) 2381 goto sigsegv; 2382 2383 /* Flush instruction space. */ 2384 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2385 // tb_flush(env); 2386 } 2387 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2388 return; 2389 #if 0 2390 sigill_and_return: 2391 force_sig(TARGET_SIGILL); 2392 #endif 2393 sigsegv: 2394 //fprintf(stderr, "force_sig\n"); 2395 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2396 force_sig(TARGET_SIGSEGV); 2397 } 2398 static inline int 2399 restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu) 2400 { 2401 int err; 2402 #if 0 2403 #ifdef CONFIG_SMP 2404 if (current->flags & PF_USEDFPU) 2405 regs->psr &= ~PSR_EF; 2406 #else 2407 if (current == last_task_used_math) { 2408 last_task_used_math = 0; 2409 regs->psr &= ~PSR_EF; 2410 } 2411 #endif 2412 current->used_math = 1; 2413 current->flags &= ~PF_USEDFPU; 2414 #endif 2415 #if 0 2416 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu))) 2417 return -EFAULT; 2418 #endif 2419 2420 /* XXX: incorrect */ 2421 err = copy_from_user(&env->fpr[0], fpu->si_float_regs[0], 2422 (sizeof(abi_ulong) * 32)); 2423 err |= __get_user(env->fsr, &fpu->si_fsr); 2424 #if 0 2425 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); 2426 if (current->thread.fpqdepth != 0) 2427 err |= __copy_from_user(¤t->thread.fpqueue[0], 2428 &fpu->si_fpqueue[0], 2429 ((sizeof(unsigned long) + 2430 (sizeof(unsigned long *)))*16)); 2431 #endif 2432 return err; 2433 } 2434 2435 2436 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2437 target_siginfo_t *info, 2438 target_sigset_t *set, CPUSPARCState *env) 2439 { 2440 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2441 } 2442 2443 long do_sigreturn(CPUSPARCState *env) 2444 { 2445 abi_ulong sf_addr; 2446 struct target_signal_frame *sf; 2447 uint32_t up_psr, pc, npc; 2448 target_sigset_t set; 2449 sigset_t host_set; 2450 int err=0, i; 2451 2452 sf_addr = env->regwptr[UREG_FP]; 2453 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) 2454 goto segv_and_exit; 2455 #if 0 2456 fprintf(stderr, "sigreturn\n"); 2457 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]); 2458 #endif 2459 //cpu_dump_state(env, stderr, fprintf, 0); 2460 2461 /* 1. Make sure we are not getting garbage from the user */ 2462 2463 if (sf_addr & 3) 2464 goto segv_and_exit; 2465 2466 __get_user(pc, &sf->info.si_regs.pc); 2467 __get_user(npc, &sf->info.si_regs.npc); 2468 2469 if ((pc | npc) & 3) 2470 goto segv_and_exit; 2471 2472 /* 2. Restore the state */ 2473 __get_user(up_psr, &sf->info.si_regs.psr); 2474 2475 /* User can only change condition codes and FPU enabling in %psr. */ 2476 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2477 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2478 2479 env->pc = pc; 2480 env->npc = npc; 2481 __get_user(env->y, &sf->info.si_regs.y); 2482 for (i=0; i < 8; i++) { 2483 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2484 } 2485 for (i=0; i < 8; i++) { 2486 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2487 } 2488 2489 /* FIXME: implement FPU save/restore: 2490 * __get_user(fpu_save, &sf->fpu_save); 2491 * if (fpu_save) 2492 * err |= restore_fpu_state(env, fpu_save); 2493 */ 2494 2495 /* This is pretty much atomic, no amount locking would prevent 2496 * the races which exist anyways. 2497 */ 2498 __get_user(set.sig[0], &sf->info.si_mask); 2499 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2500 __get_user(set.sig[i], &sf->extramask[i - 1]); 2501 } 2502 2503 target_to_host_sigset_internal(&host_set, &set); 2504 do_sigprocmask(SIG_SETMASK, &host_set, NULL); 2505 2506 if (err) 2507 goto segv_and_exit; 2508 unlock_user_struct(sf, sf_addr, 0); 2509 return env->regwptr[0]; 2510 2511 segv_and_exit: 2512 unlock_user_struct(sf, sf_addr, 0); 2513 force_sig(TARGET_SIGSEGV); 2514 } 2515 2516 long do_rt_sigreturn(CPUSPARCState *env) 2517 { 2518 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2519 return -TARGET_ENOSYS; 2520 } 2521 2522 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2523 #define MC_TSTATE 0 2524 #define MC_PC 1 2525 #define MC_NPC 2 2526 #define MC_Y 3 2527 #define MC_G1 4 2528 #define MC_G2 5 2529 #define MC_G3 6 2530 #define MC_G4 7 2531 #define MC_G5 8 2532 #define MC_G6 9 2533 #define MC_G7 10 2534 #define MC_O0 11 2535 #define MC_O1 12 2536 #define MC_O2 13 2537 #define MC_O3 14 2538 #define MC_O4 15 2539 #define MC_O5 16 2540 #define MC_O6 17 2541 #define MC_O7 18 2542 #define MC_NGREG 19 2543 2544 typedef abi_ulong target_mc_greg_t; 2545 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2546 2547 struct target_mc_fq { 2548 abi_ulong *mcfq_addr; 2549 uint32_t mcfq_insn; 2550 }; 2551 2552 struct target_mc_fpu { 2553 union { 2554 uint32_t sregs[32]; 2555 uint64_t dregs[32]; 2556 //uint128_t qregs[16]; 2557 } mcfpu_fregs; 2558 abi_ulong mcfpu_fsr; 2559 abi_ulong mcfpu_fprs; 2560 abi_ulong mcfpu_gsr; 2561 struct target_mc_fq *mcfpu_fq; 2562 unsigned char mcfpu_qcnt; 2563 unsigned char mcfpu_qentsz; 2564 unsigned char mcfpu_enab; 2565 }; 2566 typedef struct target_mc_fpu target_mc_fpu_t; 2567 2568 typedef struct { 2569 target_mc_gregset_t mc_gregs; 2570 target_mc_greg_t mc_fp; 2571 target_mc_greg_t mc_i7; 2572 target_mc_fpu_t mc_fpregs; 2573 } target_mcontext_t; 2574 2575 struct target_ucontext { 2576 struct target_ucontext *tuc_link; 2577 abi_ulong tuc_flags; 2578 target_sigset_t tuc_sigmask; 2579 target_mcontext_t tuc_mcontext; 2580 }; 2581 2582 /* A V9 register window */ 2583 struct target_reg_window { 2584 abi_ulong locals[8]; 2585 abi_ulong ins[8]; 2586 }; 2587 2588 #define TARGET_STACK_BIAS 2047 2589 2590 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2591 void sparc64_set_context(CPUSPARCState *env) 2592 { 2593 abi_ulong ucp_addr; 2594 struct target_ucontext *ucp; 2595 target_mc_gregset_t *grp; 2596 abi_ulong pc, npc, tstate; 2597 abi_ulong fp, i7, w_addr; 2598 int err = 0; 2599 unsigned int i; 2600 2601 ucp_addr = env->regwptr[UREG_I0]; 2602 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) 2603 goto do_sigsegv; 2604 grp = &ucp->tuc_mcontext.mc_gregs; 2605 __get_user(pc, &((*grp)[MC_PC])); 2606 __get_user(npc, &((*grp)[MC_NPC])); 2607 if (err || ((pc | npc) & 3)) 2608 goto do_sigsegv; 2609 if (env->regwptr[UREG_I1]) { 2610 target_sigset_t target_set; 2611 sigset_t set; 2612 2613 if (TARGET_NSIG_WORDS == 1) { 2614 if (__get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0])) 2615 goto do_sigsegv; 2616 } else { 2617 abi_ulong *src, *dst; 2618 src = ucp->tuc_sigmask.sig; 2619 dst = target_set.sig; 2620 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2621 __get_user(*dst, src); 2622 } 2623 if (err) 2624 goto do_sigsegv; 2625 } 2626 target_to_host_sigset_internal(&set, &target_set); 2627 do_sigprocmask(SIG_SETMASK, &set, NULL); 2628 } 2629 env->pc = pc; 2630 env->npc = npc; 2631 __get_user(env->y, &((*grp)[MC_Y])); 2632 __get_user(tstate, &((*grp)[MC_TSTATE])); 2633 env->asi = (tstate >> 24) & 0xff; 2634 cpu_put_ccr(env, tstate >> 32); 2635 cpu_put_cwp64(env, tstate & 0x1f); 2636 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2637 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2638 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2639 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2640 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2641 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2642 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2643 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2644 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2645 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2646 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2647 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2648 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2649 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2650 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2651 2652 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2653 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2654 2655 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2656 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2657 abi_ulong) != 0) 2658 goto do_sigsegv; 2659 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2660 abi_ulong) != 0) 2661 goto do_sigsegv; 2662 /* FIXME this does not match how the kernel handles the FPU in 2663 * its sparc64_set_context implementation. In particular the FPU 2664 * is only restored if fenab is non-zero in: 2665 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2666 */ 2667 err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2668 { 2669 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2670 for (i = 0; i < 64; i++, src++) { 2671 if (i & 1) { 2672 __get_user(env->fpr[i/2].l.lower, src); 2673 } else { 2674 __get_user(env->fpr[i/2].l.upper, src); 2675 } 2676 } 2677 } 2678 __get_user(env->fsr, 2679 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2680 __get_user(env->gsr, 2681 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2682 if (err) 2683 goto do_sigsegv; 2684 unlock_user_struct(ucp, ucp_addr, 0); 2685 return; 2686 do_sigsegv: 2687 unlock_user_struct(ucp, ucp_addr, 0); 2688 force_sig(TARGET_SIGSEGV); 2689 } 2690 2691 void sparc64_get_context(CPUSPARCState *env) 2692 { 2693 abi_ulong ucp_addr; 2694 struct target_ucontext *ucp; 2695 target_mc_gregset_t *grp; 2696 target_mcontext_t *mcp; 2697 abi_ulong fp, i7, w_addr; 2698 int err; 2699 unsigned int i; 2700 target_sigset_t target_set; 2701 sigset_t set; 2702 2703 ucp_addr = env->regwptr[UREG_I0]; 2704 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) 2705 goto do_sigsegv; 2706 2707 mcp = &ucp->tuc_mcontext; 2708 grp = &mcp->mc_gregs; 2709 2710 /* Skip over the trap instruction, first. */ 2711 env->pc = env->npc; 2712 env->npc += 4; 2713 2714 err = 0; 2715 2716 do_sigprocmask(0, NULL, &set); 2717 host_to_target_sigset_internal(&target_set, &set); 2718 if (TARGET_NSIG_WORDS == 1) { 2719 __put_user(target_set.sig[0], 2720 (abi_ulong *)&ucp->tuc_sigmask); 2721 } else { 2722 abi_ulong *src, *dst; 2723 src = target_set.sig; 2724 dst = ucp->tuc_sigmask.sig; 2725 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2726 __put_user(*src, dst); 2727 } 2728 if (err) 2729 goto do_sigsegv; 2730 } 2731 2732 /* XXX: tstate must be saved properly */ 2733 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2734 __put_user(env->pc, &((*grp)[MC_PC])); 2735 __put_user(env->npc, &((*grp)[MC_NPC])); 2736 __put_user(env->y, &((*grp)[MC_Y])); 2737 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2738 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2739 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2740 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2741 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2742 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2743 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2744 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2745 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2746 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2747 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2748 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2749 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2750 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2751 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2752 2753 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2754 fp = i7 = 0; 2755 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2756 abi_ulong) != 0) 2757 goto do_sigsegv; 2758 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2759 abi_ulong) != 0) 2760 goto do_sigsegv; 2761 __put_user(fp, &(mcp->mc_fp)); 2762 __put_user(i7, &(mcp->mc_i7)); 2763 2764 { 2765 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2766 for (i = 0; i < 64; i++, dst++) { 2767 if (i & 1) { 2768 __put_user(env->fpr[i/2].l.lower, dst); 2769 } else { 2770 __put_user(env->fpr[i/2].l.upper, dst); 2771 } 2772 } 2773 } 2774 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2775 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2776 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2777 2778 if (err) 2779 goto do_sigsegv; 2780 unlock_user_struct(ucp, ucp_addr, 1); 2781 return; 2782 do_sigsegv: 2783 unlock_user_struct(ucp, ucp_addr, 1); 2784 force_sig(TARGET_SIGSEGV); 2785 } 2786 #endif 2787 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2788 2789 # if defined(TARGET_ABI_MIPSO32) 2790 struct target_sigcontext { 2791 uint32_t sc_regmask; /* Unused */ 2792 uint32_t sc_status; 2793 uint64_t sc_pc; 2794 uint64_t sc_regs[32]; 2795 uint64_t sc_fpregs[32]; 2796 uint32_t sc_ownedfp; /* Unused */ 2797 uint32_t sc_fpc_csr; 2798 uint32_t sc_fpc_eir; /* Unused */ 2799 uint32_t sc_used_math; 2800 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2801 uint32_t pad0; 2802 uint64_t sc_mdhi; 2803 uint64_t sc_mdlo; 2804 target_ulong sc_hi1; /* Was sc_cause */ 2805 target_ulong sc_lo1; /* Was sc_badvaddr */ 2806 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2807 target_ulong sc_lo2; 2808 target_ulong sc_hi3; 2809 target_ulong sc_lo3; 2810 }; 2811 # else /* N32 || N64 */ 2812 struct target_sigcontext { 2813 uint64_t sc_regs[32]; 2814 uint64_t sc_fpregs[32]; 2815 uint64_t sc_mdhi; 2816 uint64_t sc_hi1; 2817 uint64_t sc_hi2; 2818 uint64_t sc_hi3; 2819 uint64_t sc_mdlo; 2820 uint64_t sc_lo1; 2821 uint64_t sc_lo2; 2822 uint64_t sc_lo3; 2823 uint64_t sc_pc; 2824 uint32_t sc_fpc_csr; 2825 uint32_t sc_used_math; 2826 uint32_t sc_dsp; 2827 uint32_t sc_reserved; 2828 }; 2829 # endif /* O32 */ 2830 2831 struct sigframe { 2832 uint32_t sf_ass[4]; /* argument save space for o32 */ 2833 uint32_t sf_code[2]; /* signal trampoline */ 2834 struct target_sigcontext sf_sc; 2835 target_sigset_t sf_mask; 2836 }; 2837 2838 struct target_ucontext { 2839 target_ulong tuc_flags; 2840 target_ulong tuc_link; 2841 target_stack_t tuc_stack; 2842 target_ulong pad0; 2843 struct target_sigcontext tuc_mcontext; 2844 target_sigset_t tuc_sigmask; 2845 }; 2846 2847 struct target_rt_sigframe { 2848 uint32_t rs_ass[4]; /* argument save space for o32 */ 2849 uint32_t rs_code[2]; /* signal trampoline */ 2850 struct target_siginfo rs_info; 2851 struct target_ucontext rs_uc; 2852 }; 2853 2854 /* Install trampoline to jump back from signal handler */ 2855 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2856 { 2857 int err = 0; 2858 2859 /* 2860 * Set up the return code ... 2861 * 2862 * li v0, __NR__foo_sigreturn 2863 * syscall 2864 */ 2865 2866 __put_user(0x24020000 + syscall, tramp + 0); 2867 __put_user(0x0000000c , tramp + 1); 2868 return err; 2869 } 2870 2871 static inline int 2872 setup_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2873 { 2874 int err = 0; 2875 int i; 2876 2877 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2878 regs->hflags &= ~MIPS_HFLAG_BMASK; 2879 2880 __put_user(0, &sc->sc_regs[0]); 2881 for (i = 1; i < 32; ++i) { 2882 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2883 } 2884 2885 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2886 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2887 2888 /* Rather than checking for dsp existence, always copy. The storage 2889 would just be garbage otherwise. */ 2890 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2891 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2892 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2893 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2894 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2895 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2896 { 2897 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2898 __put_user(dsp, &sc->sc_dsp); 2899 } 2900 2901 __put_user(1, &sc->sc_used_math); 2902 2903 for (i = 0; i < 32; ++i) { 2904 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2905 } 2906 2907 return err; 2908 } 2909 2910 static inline int 2911 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2912 { 2913 int err = 0; 2914 int i; 2915 2916 __get_user(regs->CP0_EPC, &sc->sc_pc); 2917 2918 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2919 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2920 2921 for (i = 1; i < 32; ++i) { 2922 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2923 } 2924 2925 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2926 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2927 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2928 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2929 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2930 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2931 { 2932 uint32_t dsp; 2933 __get_user(dsp, &sc->sc_dsp); 2934 cpu_wrdsp(dsp, 0x3ff, regs); 2935 } 2936 2937 for (i = 0; i < 32; ++i) { 2938 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2939 } 2940 2941 return err; 2942 } 2943 2944 /* 2945 * Determine which stack to use.. 2946 */ 2947 static inline abi_ulong 2948 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 2949 { 2950 unsigned long sp; 2951 2952 /* Default to using normal stack */ 2953 sp = regs->active_tc.gpr[29]; 2954 2955 /* 2956 * FPU emulator may have its own trampoline active just 2957 * above the user stack, 16-bytes before the next lowest 2958 * 16 byte boundary. Try to avoid trashing it. 2959 */ 2960 sp -= 32; 2961 2962 /* This is the X/Open sanctioned signal stack switching. */ 2963 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2964 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2965 } 2966 2967 return (sp - frame_size) & ~7; 2968 } 2969 2970 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 2971 { 2972 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 2973 env->hflags &= ~MIPS_HFLAG_M16; 2974 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 2975 env->active_tc.PC &= ~(target_ulong) 1; 2976 } 2977 } 2978 2979 # if defined(TARGET_ABI_MIPSO32) 2980 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 2981 static void setup_frame(int sig, struct target_sigaction * ka, 2982 target_sigset_t *set, CPUMIPSState *regs) 2983 { 2984 struct sigframe *frame; 2985 abi_ulong frame_addr; 2986 int i; 2987 2988 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2989 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 2990 goto give_sigsegv; 2991 2992 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 2993 2994 if(setup_sigcontext(regs, &frame->sf_sc)) 2995 goto give_sigsegv; 2996 2997 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2998 if(__put_user(set->sig[i], &frame->sf_mask.sig[i])) 2999 goto give_sigsegv; 3000 } 3001 3002 /* 3003 * Arguments to signal handler: 3004 * 3005 * a0 = signal number 3006 * a1 = 0 (should be cause) 3007 * a2 = pointer to struct sigcontext 3008 * 3009 * $25 and PC point to the signal handler, $29 points to the 3010 * struct sigframe. 3011 */ 3012 regs->active_tc.gpr[ 4] = sig; 3013 regs->active_tc.gpr[ 5] = 0; 3014 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3015 regs->active_tc.gpr[29] = frame_addr; 3016 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3017 /* The original kernel code sets CP0_EPC to the handler 3018 * since it returns to userland using eret 3019 * we cannot do this here, and we must set PC directly */ 3020 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3021 mips_set_hflags_isa_mode_from_pc(regs); 3022 unlock_user_struct(frame, frame_addr, 1); 3023 return; 3024 3025 give_sigsegv: 3026 unlock_user_struct(frame, frame_addr, 1); 3027 force_sig(TARGET_SIGSEGV/*, current*/); 3028 } 3029 3030 long do_sigreturn(CPUMIPSState *regs) 3031 { 3032 struct sigframe *frame; 3033 abi_ulong frame_addr; 3034 sigset_t blocked; 3035 target_sigset_t target_set; 3036 int i; 3037 3038 #if defined(DEBUG_SIGNAL) 3039 fprintf(stderr, "do_sigreturn\n"); 3040 #endif 3041 frame_addr = regs->active_tc.gpr[29]; 3042 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3043 goto badframe; 3044 3045 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3046 if(__get_user(target_set.sig[i], &frame->sf_mask.sig[i])) 3047 goto badframe; 3048 } 3049 3050 target_to_host_sigset_internal(&blocked, &target_set); 3051 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 3052 3053 if (restore_sigcontext(regs, &frame->sf_sc)) 3054 goto badframe; 3055 3056 #if 0 3057 /* 3058 * Don't let your children do this ... 3059 */ 3060 __asm__ __volatile__( 3061 "move\t$29, %0\n\t" 3062 "j\tsyscall_exit" 3063 :/* no outputs */ 3064 :"r" (®s)); 3065 /* Unreached */ 3066 #endif 3067 3068 regs->active_tc.PC = regs->CP0_EPC; 3069 mips_set_hflags_isa_mode_from_pc(regs); 3070 /* I am not sure this is right, but it seems to work 3071 * maybe a problem with nested signals ? */ 3072 regs->CP0_EPC = 0; 3073 return -TARGET_QEMU_ESIGRETURN; 3074 3075 badframe: 3076 force_sig(TARGET_SIGSEGV/*, current*/); 3077 return 0; 3078 } 3079 # endif /* O32 */ 3080 3081 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3082 target_siginfo_t *info, 3083 target_sigset_t *set, CPUMIPSState *env) 3084 { 3085 struct target_rt_sigframe *frame; 3086 abi_ulong frame_addr; 3087 int i; 3088 3089 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3090 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3091 goto give_sigsegv; 3092 3093 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3094 3095 copy_siginfo_to_user(&frame->rs_info, info); 3096 3097 __put_user(0, &frame->rs_uc.tuc_flags); 3098 __put_user(0, &frame->rs_uc.tuc_link); 3099 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3100 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3101 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3102 &frame->rs_uc.tuc_stack.ss_flags); 3103 3104 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3105 3106 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3107 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3108 } 3109 3110 /* 3111 * Arguments to signal handler: 3112 * 3113 * a0 = signal number 3114 * a1 = pointer to siginfo_t 3115 * a2 = pointer to struct ucontext 3116 * 3117 * $25 and PC point to the signal handler, $29 points to the 3118 * struct sigframe. 3119 */ 3120 env->active_tc.gpr[ 4] = sig; 3121 env->active_tc.gpr[ 5] = frame_addr 3122 + offsetof(struct target_rt_sigframe, rs_info); 3123 env->active_tc.gpr[ 6] = frame_addr 3124 + offsetof(struct target_rt_sigframe, rs_uc); 3125 env->active_tc.gpr[29] = frame_addr; 3126 env->active_tc.gpr[31] = frame_addr 3127 + offsetof(struct target_rt_sigframe, rs_code); 3128 /* The original kernel code sets CP0_EPC to the handler 3129 * since it returns to userland using eret 3130 * we cannot do this here, and we must set PC directly */ 3131 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3132 mips_set_hflags_isa_mode_from_pc(env); 3133 unlock_user_struct(frame, frame_addr, 1); 3134 return; 3135 3136 give_sigsegv: 3137 unlock_user_struct(frame, frame_addr, 1); 3138 force_sig(TARGET_SIGSEGV/*, current*/); 3139 } 3140 3141 long do_rt_sigreturn(CPUMIPSState *env) 3142 { 3143 struct target_rt_sigframe *frame; 3144 abi_ulong frame_addr; 3145 sigset_t blocked; 3146 3147 #if defined(DEBUG_SIGNAL) 3148 fprintf(stderr, "do_rt_sigreturn\n"); 3149 #endif 3150 frame_addr = env->active_tc.gpr[29]; 3151 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3152 goto badframe; 3153 3154 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3155 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 3156 3157 if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext)) 3158 goto badframe; 3159 3160 if (do_sigaltstack(frame_addr + 3161 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3162 0, get_sp_from_cpustate(env)) == -EFAULT) 3163 goto badframe; 3164 3165 env->active_tc.PC = env->CP0_EPC; 3166 mips_set_hflags_isa_mode_from_pc(env); 3167 /* I am not sure this is right, but it seems to work 3168 * maybe a problem with nested signals ? */ 3169 env->CP0_EPC = 0; 3170 return -TARGET_QEMU_ESIGRETURN; 3171 3172 badframe: 3173 force_sig(TARGET_SIGSEGV/*, current*/); 3174 return 0; 3175 } 3176 3177 #elif defined(TARGET_SH4) 3178 3179 /* 3180 * code and data structures from linux kernel: 3181 * include/asm-sh/sigcontext.h 3182 * arch/sh/kernel/signal.c 3183 */ 3184 3185 struct target_sigcontext { 3186 target_ulong oldmask; 3187 3188 /* CPU registers */ 3189 target_ulong sc_gregs[16]; 3190 target_ulong sc_pc; 3191 target_ulong sc_pr; 3192 target_ulong sc_sr; 3193 target_ulong sc_gbr; 3194 target_ulong sc_mach; 3195 target_ulong sc_macl; 3196 3197 /* FPU registers */ 3198 target_ulong sc_fpregs[16]; 3199 target_ulong sc_xfpregs[16]; 3200 unsigned int sc_fpscr; 3201 unsigned int sc_fpul; 3202 unsigned int sc_ownedfp; 3203 }; 3204 3205 struct target_sigframe 3206 { 3207 struct target_sigcontext sc; 3208 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3209 uint16_t retcode[3]; 3210 }; 3211 3212 3213 struct target_ucontext { 3214 target_ulong tuc_flags; 3215 struct target_ucontext *tuc_link; 3216 target_stack_t tuc_stack; 3217 struct target_sigcontext tuc_mcontext; 3218 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3219 }; 3220 3221 struct target_rt_sigframe 3222 { 3223 struct target_siginfo info; 3224 struct target_ucontext uc; 3225 uint16_t retcode[3]; 3226 }; 3227 3228 3229 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3230 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3231 3232 static abi_ulong get_sigframe(struct target_sigaction *ka, 3233 unsigned long sp, size_t frame_size) 3234 { 3235 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3236 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3237 } 3238 3239 return (sp - frame_size) & -8ul; 3240 } 3241 3242 static int setup_sigcontext(struct target_sigcontext *sc, 3243 CPUSH4State *regs, unsigned long mask) 3244 { 3245 int err = 0; 3246 int i; 3247 3248 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3249 COPY(gregs[0]); COPY(gregs[1]); 3250 COPY(gregs[2]); COPY(gregs[3]); 3251 COPY(gregs[4]); COPY(gregs[5]); 3252 COPY(gregs[6]); COPY(gregs[7]); 3253 COPY(gregs[8]); COPY(gregs[9]); 3254 COPY(gregs[10]); COPY(gregs[11]); 3255 COPY(gregs[12]); COPY(gregs[13]); 3256 COPY(gregs[14]); COPY(gregs[15]); 3257 COPY(gbr); COPY(mach); 3258 COPY(macl); COPY(pr); 3259 COPY(sr); COPY(pc); 3260 #undef COPY 3261 3262 for (i=0; i<16; i++) { 3263 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3264 } 3265 __put_user(regs->fpscr, &sc->sc_fpscr); 3266 __put_user(regs->fpul, &sc->sc_fpul); 3267 3268 /* non-iBCS2 extensions.. */ 3269 __put_user(mask, &sc->oldmask); 3270 3271 return err; 3272 } 3273 3274 static int restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc, 3275 target_ulong *r0_p) 3276 { 3277 unsigned int err = 0; 3278 int i; 3279 3280 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3281 COPY(gregs[1]); 3282 COPY(gregs[2]); COPY(gregs[3]); 3283 COPY(gregs[4]); COPY(gregs[5]); 3284 COPY(gregs[6]); COPY(gregs[7]); 3285 COPY(gregs[8]); COPY(gregs[9]); 3286 COPY(gregs[10]); COPY(gregs[11]); 3287 COPY(gregs[12]); COPY(gregs[13]); 3288 COPY(gregs[14]); COPY(gregs[15]); 3289 COPY(gbr); COPY(mach); 3290 COPY(macl); COPY(pr); 3291 COPY(sr); COPY(pc); 3292 #undef COPY 3293 3294 for (i=0; i<16; i++) { 3295 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3296 } 3297 __get_user(regs->fpscr, &sc->sc_fpscr); 3298 __get_user(regs->fpul, &sc->sc_fpul); 3299 3300 regs->tra = -1; /* disable syscall checks */ 3301 __get_user(*r0_p, &sc->sc_gregs[0]); 3302 return err; 3303 } 3304 3305 static void setup_frame(int sig, struct target_sigaction *ka, 3306 target_sigset_t *set, CPUSH4State *regs) 3307 { 3308 struct target_sigframe *frame; 3309 abi_ulong frame_addr; 3310 int i; 3311 int err = 0; 3312 int signal; 3313 3314 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3315 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3316 goto give_sigsegv; 3317 3318 signal = current_exec_domain_sig(sig); 3319 3320 err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); 3321 3322 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3323 __put_user(set->sig[i + 1], &frame->extramask[i]); 3324 } 3325 3326 /* Set up to return from userspace. If provided, use a stub 3327 already in userspace. */ 3328 if (ka->sa_flags & TARGET_SA_RESTORER) { 3329 regs->pr = (unsigned long) ka->sa_restorer; 3330 } else { 3331 /* Generate return code (system call to sigreturn) */ 3332 __put_user(MOVW(2), &frame->retcode[0]); 3333 __put_user(TRAP_NOARG, &frame->retcode[1]); 3334 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3335 regs->pr = (unsigned long) frame->retcode; 3336 } 3337 3338 if (err) 3339 goto give_sigsegv; 3340 3341 /* Set up registers for signal handler */ 3342 regs->gregs[15] = frame_addr; 3343 regs->gregs[4] = signal; /* Arg for signal handler */ 3344 regs->gregs[5] = 0; 3345 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3346 regs->pc = (unsigned long) ka->_sa_handler; 3347 3348 unlock_user_struct(frame, frame_addr, 1); 3349 return; 3350 3351 give_sigsegv: 3352 unlock_user_struct(frame, frame_addr, 1); 3353 force_sig(TARGET_SIGSEGV); 3354 } 3355 3356 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3357 target_siginfo_t *info, 3358 target_sigset_t *set, CPUSH4State *regs) 3359 { 3360 struct target_rt_sigframe *frame; 3361 abi_ulong frame_addr; 3362 int i; 3363 int err = 0; 3364 int signal; 3365 3366 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3367 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3368 goto give_sigsegv; 3369 3370 signal = current_exec_domain_sig(sig); 3371 3372 err |= copy_siginfo_to_user(&frame->info, info); 3373 3374 /* Create the ucontext. */ 3375 __put_user(0, &frame->uc.tuc_flags); 3376 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3377 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3378 &frame->uc.tuc_stack.ss_sp); 3379 __put_user(sas_ss_flags(regs->gregs[15]), 3380 &frame->uc.tuc_stack.ss_flags); 3381 __put_user(target_sigaltstack_used.ss_size, 3382 &frame->uc.tuc_stack.ss_size); 3383 setup_sigcontext(&frame->uc.tuc_mcontext, 3384 regs, set->sig[0]); 3385 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3386 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3387 } 3388 3389 /* Set up to return from userspace. If provided, use a stub 3390 already in userspace. */ 3391 if (ka->sa_flags & TARGET_SA_RESTORER) { 3392 regs->pr = (unsigned long) ka->sa_restorer; 3393 } else { 3394 /* Generate return code (system call to sigreturn) */ 3395 __put_user(MOVW(2), &frame->retcode[0]); 3396 __put_user(TRAP_NOARG, &frame->retcode[1]); 3397 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3398 regs->pr = (unsigned long) frame->retcode; 3399 } 3400 3401 if (err) 3402 goto give_sigsegv; 3403 3404 /* Set up registers for signal handler */ 3405 regs->gregs[15] = frame_addr; 3406 regs->gregs[4] = signal; /* Arg for signal handler */ 3407 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3408 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3409 regs->pc = (unsigned long) ka->_sa_handler; 3410 3411 unlock_user_struct(frame, frame_addr, 1); 3412 return; 3413 3414 give_sigsegv: 3415 unlock_user_struct(frame, frame_addr, 1); 3416 force_sig(TARGET_SIGSEGV); 3417 } 3418 3419 long do_sigreturn(CPUSH4State *regs) 3420 { 3421 struct target_sigframe *frame; 3422 abi_ulong frame_addr; 3423 sigset_t blocked; 3424 target_sigset_t target_set; 3425 target_ulong r0; 3426 int i; 3427 int err = 0; 3428 3429 #if defined(DEBUG_SIGNAL) 3430 fprintf(stderr, "do_sigreturn\n"); 3431 #endif 3432 frame_addr = regs->gregs[15]; 3433 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3434 goto badframe; 3435 3436 __get_user(target_set.sig[0], &frame->sc.oldmask); 3437 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3438 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3439 } 3440 3441 if (err) 3442 goto badframe; 3443 3444 target_to_host_sigset_internal(&blocked, &target_set); 3445 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 3446 3447 if (restore_sigcontext(regs, &frame->sc, &r0)) 3448 goto badframe; 3449 3450 unlock_user_struct(frame, frame_addr, 0); 3451 return r0; 3452 3453 badframe: 3454 unlock_user_struct(frame, frame_addr, 0); 3455 force_sig(TARGET_SIGSEGV); 3456 return 0; 3457 } 3458 3459 long do_rt_sigreturn(CPUSH4State *regs) 3460 { 3461 struct target_rt_sigframe *frame; 3462 abi_ulong frame_addr; 3463 sigset_t blocked; 3464 target_ulong r0; 3465 3466 #if defined(DEBUG_SIGNAL) 3467 fprintf(stderr, "do_rt_sigreturn\n"); 3468 #endif 3469 frame_addr = regs->gregs[15]; 3470 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3471 goto badframe; 3472 3473 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3474 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 3475 3476 if (restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0)) 3477 goto badframe; 3478 3479 if (do_sigaltstack(frame_addr + 3480 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3481 0, get_sp_from_cpustate(regs)) == -EFAULT) 3482 goto badframe; 3483 3484 unlock_user_struct(frame, frame_addr, 0); 3485 return r0; 3486 3487 badframe: 3488 unlock_user_struct(frame, frame_addr, 0); 3489 force_sig(TARGET_SIGSEGV); 3490 return 0; 3491 } 3492 #elif defined(TARGET_MICROBLAZE) 3493 3494 struct target_sigcontext { 3495 struct target_pt_regs regs; /* needs to be first */ 3496 uint32_t oldmask; 3497 }; 3498 3499 struct target_stack_t { 3500 abi_ulong ss_sp; 3501 int ss_flags; 3502 unsigned int ss_size; 3503 }; 3504 3505 struct target_ucontext { 3506 abi_ulong tuc_flags; 3507 abi_ulong tuc_link; 3508 struct target_stack_t tuc_stack; 3509 struct target_sigcontext tuc_mcontext; 3510 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3511 }; 3512 3513 /* Signal frames. */ 3514 struct target_signal_frame { 3515 struct target_ucontext uc; 3516 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3517 uint32_t tramp[2]; 3518 }; 3519 3520 struct rt_signal_frame { 3521 siginfo_t info; 3522 struct ucontext uc; 3523 uint32_t tramp[2]; 3524 }; 3525 3526 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3527 { 3528 __put_user(env->regs[0], &sc->regs.r0); 3529 __put_user(env->regs[1], &sc->regs.r1); 3530 __put_user(env->regs[2], &sc->regs.r2); 3531 __put_user(env->regs[3], &sc->regs.r3); 3532 __put_user(env->regs[4], &sc->regs.r4); 3533 __put_user(env->regs[5], &sc->regs.r5); 3534 __put_user(env->regs[6], &sc->regs.r6); 3535 __put_user(env->regs[7], &sc->regs.r7); 3536 __put_user(env->regs[8], &sc->regs.r8); 3537 __put_user(env->regs[9], &sc->regs.r9); 3538 __put_user(env->regs[10], &sc->regs.r10); 3539 __put_user(env->regs[11], &sc->regs.r11); 3540 __put_user(env->regs[12], &sc->regs.r12); 3541 __put_user(env->regs[13], &sc->regs.r13); 3542 __put_user(env->regs[14], &sc->regs.r14); 3543 __put_user(env->regs[15], &sc->regs.r15); 3544 __put_user(env->regs[16], &sc->regs.r16); 3545 __put_user(env->regs[17], &sc->regs.r17); 3546 __put_user(env->regs[18], &sc->regs.r18); 3547 __put_user(env->regs[19], &sc->regs.r19); 3548 __put_user(env->regs[20], &sc->regs.r20); 3549 __put_user(env->regs[21], &sc->regs.r21); 3550 __put_user(env->regs[22], &sc->regs.r22); 3551 __put_user(env->regs[23], &sc->regs.r23); 3552 __put_user(env->regs[24], &sc->regs.r24); 3553 __put_user(env->regs[25], &sc->regs.r25); 3554 __put_user(env->regs[26], &sc->regs.r26); 3555 __put_user(env->regs[27], &sc->regs.r27); 3556 __put_user(env->regs[28], &sc->regs.r28); 3557 __put_user(env->regs[29], &sc->regs.r29); 3558 __put_user(env->regs[30], &sc->regs.r30); 3559 __put_user(env->regs[31], &sc->regs.r31); 3560 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3561 } 3562 3563 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3564 { 3565 __get_user(env->regs[0], &sc->regs.r0); 3566 __get_user(env->regs[1], &sc->regs.r1); 3567 __get_user(env->regs[2], &sc->regs.r2); 3568 __get_user(env->regs[3], &sc->regs.r3); 3569 __get_user(env->regs[4], &sc->regs.r4); 3570 __get_user(env->regs[5], &sc->regs.r5); 3571 __get_user(env->regs[6], &sc->regs.r6); 3572 __get_user(env->regs[7], &sc->regs.r7); 3573 __get_user(env->regs[8], &sc->regs.r8); 3574 __get_user(env->regs[9], &sc->regs.r9); 3575 __get_user(env->regs[10], &sc->regs.r10); 3576 __get_user(env->regs[11], &sc->regs.r11); 3577 __get_user(env->regs[12], &sc->regs.r12); 3578 __get_user(env->regs[13], &sc->regs.r13); 3579 __get_user(env->regs[14], &sc->regs.r14); 3580 __get_user(env->regs[15], &sc->regs.r15); 3581 __get_user(env->regs[16], &sc->regs.r16); 3582 __get_user(env->regs[17], &sc->regs.r17); 3583 __get_user(env->regs[18], &sc->regs.r18); 3584 __get_user(env->regs[19], &sc->regs.r19); 3585 __get_user(env->regs[20], &sc->regs.r20); 3586 __get_user(env->regs[21], &sc->regs.r21); 3587 __get_user(env->regs[22], &sc->regs.r22); 3588 __get_user(env->regs[23], &sc->regs.r23); 3589 __get_user(env->regs[24], &sc->regs.r24); 3590 __get_user(env->regs[25], &sc->regs.r25); 3591 __get_user(env->regs[26], &sc->regs.r26); 3592 __get_user(env->regs[27], &sc->regs.r27); 3593 __get_user(env->regs[28], &sc->regs.r28); 3594 __get_user(env->regs[29], &sc->regs.r29); 3595 __get_user(env->regs[30], &sc->regs.r30); 3596 __get_user(env->regs[31], &sc->regs.r31); 3597 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3598 } 3599 3600 static abi_ulong get_sigframe(struct target_sigaction *ka, 3601 CPUMBState *env, int frame_size) 3602 { 3603 abi_ulong sp = env->regs[1]; 3604 3605 if ((ka->sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) 3606 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3607 3608 return ((sp - frame_size) & -8UL); 3609 } 3610 3611 static void setup_frame(int sig, struct target_sigaction *ka, 3612 target_sigset_t *set, CPUMBState *env) 3613 { 3614 struct target_signal_frame *frame; 3615 abi_ulong frame_addr; 3616 int err = 0; 3617 int i; 3618 3619 frame_addr = get_sigframe(ka, env, sizeof *frame); 3620 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3621 goto badframe; 3622 3623 /* Save the mask. */ 3624 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3625 if (err) 3626 goto badframe; 3627 3628 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3629 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 3630 goto badframe; 3631 } 3632 3633 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3634 3635 /* Set up to return from userspace. If provided, use a stub 3636 already in userspace. */ 3637 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3638 if (ka->sa_flags & TARGET_SA_RESTORER) { 3639 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3640 } else { 3641 uint32_t t; 3642 /* Note, these encodings are _big endian_! */ 3643 /* addi r12, r0, __NR_sigreturn */ 3644 t = 0x31800000UL | TARGET_NR_sigreturn; 3645 __put_user(t, frame->tramp + 0); 3646 /* brki r14, 0x8 */ 3647 t = 0xb9cc0008UL; 3648 __put_user(t, frame->tramp + 1); 3649 3650 /* Return from sighandler will jump to the tramp. 3651 Negative 8 offset because return is rtsd r15, 8 */ 3652 env->regs[15] = ((unsigned long)frame->tramp) - 8; 3653 } 3654 3655 if (err) 3656 goto badframe; 3657 3658 /* Set up registers for signal handler */ 3659 env->regs[1] = frame_addr; 3660 /* Signal handler args: */ 3661 env->regs[5] = sig; /* Arg 0: signum */ 3662 env->regs[6] = 0; 3663 /* arg 1: sigcontext */ 3664 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3665 3666 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3667 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3668 3669 unlock_user_struct(frame, frame_addr, 1); 3670 return; 3671 badframe: 3672 unlock_user_struct(frame, frame_addr, 1); 3673 force_sig(TARGET_SIGSEGV); 3674 } 3675 3676 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3677 target_siginfo_t *info, 3678 target_sigset_t *set, CPUMBState *env) 3679 { 3680 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3681 } 3682 3683 long do_sigreturn(CPUMBState *env) 3684 { 3685 struct target_signal_frame *frame; 3686 abi_ulong frame_addr; 3687 target_sigset_t target_set; 3688 sigset_t set; 3689 int i; 3690 3691 frame_addr = env->regs[R_SP]; 3692 /* Make sure the guest isn't playing games. */ 3693 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3694 goto badframe; 3695 3696 /* Restore blocked signals */ 3697 if (__get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask)) 3698 goto badframe; 3699 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3700 if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) 3701 goto badframe; 3702 } 3703 target_to_host_sigset_internal(&set, &target_set); 3704 do_sigprocmask(SIG_SETMASK, &set, NULL); 3705 3706 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3707 /* We got here through a sigreturn syscall, our path back is via an 3708 rtb insn so setup r14 for that. */ 3709 env->regs[14] = env->sregs[SR_PC]; 3710 3711 unlock_user_struct(frame, frame_addr, 0); 3712 return env->regs[10]; 3713 badframe: 3714 unlock_user_struct(frame, frame_addr, 0); 3715 force_sig(TARGET_SIGSEGV); 3716 } 3717 3718 long do_rt_sigreturn(CPUMBState *env) 3719 { 3720 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3721 return -TARGET_ENOSYS; 3722 } 3723 3724 #elif defined(TARGET_CRIS) 3725 3726 struct target_sigcontext { 3727 struct target_pt_regs regs; /* needs to be first */ 3728 uint32_t oldmask; 3729 uint32_t usp; /* usp before stacking this gunk on it */ 3730 }; 3731 3732 /* Signal frames. */ 3733 struct target_signal_frame { 3734 struct target_sigcontext sc; 3735 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3736 uint16_t retcode[4]; /* Trampoline code. */ 3737 }; 3738 3739 struct rt_signal_frame { 3740 siginfo_t *pinfo; 3741 void *puc; 3742 siginfo_t info; 3743 struct ucontext uc; 3744 uint16_t retcode[4]; /* Trampoline code. */ 3745 }; 3746 3747 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3748 { 3749 __put_user(env->regs[0], &sc->regs.r0); 3750 __put_user(env->regs[1], &sc->regs.r1); 3751 __put_user(env->regs[2], &sc->regs.r2); 3752 __put_user(env->regs[3], &sc->regs.r3); 3753 __put_user(env->regs[4], &sc->regs.r4); 3754 __put_user(env->regs[5], &sc->regs.r5); 3755 __put_user(env->regs[6], &sc->regs.r6); 3756 __put_user(env->regs[7], &sc->regs.r7); 3757 __put_user(env->regs[8], &sc->regs.r8); 3758 __put_user(env->regs[9], &sc->regs.r9); 3759 __put_user(env->regs[10], &sc->regs.r10); 3760 __put_user(env->regs[11], &sc->regs.r11); 3761 __put_user(env->regs[12], &sc->regs.r12); 3762 __put_user(env->regs[13], &sc->regs.r13); 3763 __put_user(env->regs[14], &sc->usp); 3764 __put_user(env->regs[15], &sc->regs.acr); 3765 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3766 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3767 __put_user(env->pc, &sc->regs.erp); 3768 } 3769 3770 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3771 { 3772 __get_user(env->regs[0], &sc->regs.r0); 3773 __get_user(env->regs[1], &sc->regs.r1); 3774 __get_user(env->regs[2], &sc->regs.r2); 3775 __get_user(env->regs[3], &sc->regs.r3); 3776 __get_user(env->regs[4], &sc->regs.r4); 3777 __get_user(env->regs[5], &sc->regs.r5); 3778 __get_user(env->regs[6], &sc->regs.r6); 3779 __get_user(env->regs[7], &sc->regs.r7); 3780 __get_user(env->regs[8], &sc->regs.r8); 3781 __get_user(env->regs[9], &sc->regs.r9); 3782 __get_user(env->regs[10], &sc->regs.r10); 3783 __get_user(env->regs[11], &sc->regs.r11); 3784 __get_user(env->regs[12], &sc->regs.r12); 3785 __get_user(env->regs[13], &sc->regs.r13); 3786 __get_user(env->regs[14], &sc->usp); 3787 __get_user(env->regs[15], &sc->regs.acr); 3788 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3789 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3790 __get_user(env->pc, &sc->regs.erp); 3791 } 3792 3793 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3794 { 3795 abi_ulong sp; 3796 /* Align the stack downwards to 4. */ 3797 sp = (env->regs[R_SP] & ~3); 3798 return sp - framesize; 3799 } 3800 3801 static void setup_frame(int sig, struct target_sigaction *ka, 3802 target_sigset_t *set, CPUCRISState *env) 3803 { 3804 struct target_signal_frame *frame; 3805 abi_ulong frame_addr; 3806 int err = 0; 3807 int i; 3808 3809 frame_addr = get_sigframe(env, sizeof *frame); 3810 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3811 goto badframe; 3812 3813 /* 3814 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3815 * use this trampoline anymore but it sets it up for GDB. 3816 * In QEMU, using the trampoline simplifies things a bit so we use it. 3817 * 3818 * This is movu.w __NR_sigreturn, r9; break 13; 3819 */ 3820 __put_user(0x9c5f, frame->retcode+0); 3821 __put_user(TARGET_NR_sigreturn, 3822 frame->retcode + 1); 3823 __put_user(0xe93d, frame->retcode + 2); 3824 3825 /* Save the mask. */ 3826 __put_user(set->sig[0], &frame->sc.oldmask); 3827 if (err) 3828 goto badframe; 3829 3830 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3831 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 3832 goto badframe; 3833 } 3834 3835 setup_sigcontext(&frame->sc, env); 3836 3837 /* Move the stack and setup the arguments for the handler. */ 3838 env->regs[R_SP] = frame_addr; 3839 env->regs[10] = sig; 3840 env->pc = (unsigned long) ka->_sa_handler; 3841 /* Link SRP so the guest returns through the trampoline. */ 3842 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3843 3844 unlock_user_struct(frame, frame_addr, 1); 3845 return; 3846 badframe: 3847 unlock_user_struct(frame, frame_addr, 1); 3848 force_sig(TARGET_SIGSEGV); 3849 } 3850 3851 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3852 target_siginfo_t *info, 3853 target_sigset_t *set, CPUCRISState *env) 3854 { 3855 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3856 } 3857 3858 long do_sigreturn(CPUCRISState *env) 3859 { 3860 struct target_signal_frame *frame; 3861 abi_ulong frame_addr; 3862 target_sigset_t target_set; 3863 sigset_t set; 3864 int i; 3865 3866 frame_addr = env->regs[R_SP]; 3867 /* Make sure the guest isn't playing games. */ 3868 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3869 goto badframe; 3870 3871 /* Restore blocked signals */ 3872 if (__get_user(target_set.sig[0], &frame->sc.oldmask)) 3873 goto badframe; 3874 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3875 if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) 3876 goto badframe; 3877 } 3878 target_to_host_sigset_internal(&set, &target_set); 3879 do_sigprocmask(SIG_SETMASK, &set, NULL); 3880 3881 restore_sigcontext(&frame->sc, env); 3882 unlock_user_struct(frame, frame_addr, 0); 3883 return env->regs[10]; 3884 badframe: 3885 unlock_user_struct(frame, frame_addr, 0); 3886 force_sig(TARGET_SIGSEGV); 3887 } 3888 3889 long do_rt_sigreturn(CPUCRISState *env) 3890 { 3891 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3892 return -TARGET_ENOSYS; 3893 } 3894 3895 #elif defined(TARGET_OPENRISC) 3896 3897 struct target_sigcontext { 3898 struct target_pt_regs regs; 3899 abi_ulong oldmask; 3900 abi_ulong usp; 3901 }; 3902 3903 struct target_ucontext { 3904 abi_ulong tuc_flags; 3905 abi_ulong tuc_link; 3906 target_stack_t tuc_stack; 3907 struct target_sigcontext tuc_mcontext; 3908 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3909 }; 3910 3911 struct target_rt_sigframe { 3912 abi_ulong pinfo; 3913 uint64_t puc; 3914 struct target_siginfo info; 3915 struct target_sigcontext sc; 3916 struct target_ucontext uc; 3917 unsigned char retcode[16]; /* trampoline code */ 3918 }; 3919 3920 /* This is the asm-generic/ucontext.h version */ 3921 #if 0 3922 static int restore_sigcontext(CPUOpenRISCState *regs, 3923 struct target_sigcontext *sc) 3924 { 3925 unsigned int err = 0; 3926 unsigned long old_usp; 3927 3928 /* Alwys make any pending restarted system call return -EINTR */ 3929 current_thread_info()->restart_block.fn = do_no_restart_syscall; 3930 3931 /* restore the regs from &sc->regs (same as sc, since regs is first) 3932 * (sc is already checked for VERIFY_READ since the sigframe was 3933 * checked in sys_sigreturn previously) 3934 */ 3935 3936 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 3937 goto badframe; 3938 } 3939 3940 /* make sure the U-flag is set so user-mode cannot fool us */ 3941 3942 regs->sr &= ~SR_SM; 3943 3944 /* restore the old USP as it was before we stacked the sc etc. 3945 * (we cannot just pop the sigcontext since we aligned the sp and 3946 * stuff after pushing it) 3947 */ 3948 3949 __get_user(old_usp, &sc->usp); 3950 phx_signal("old_usp 0x%lx", old_usp); 3951 3952 __PHX__ REALLY /* ??? */ 3953 wrusp(old_usp); 3954 regs->gpr[1] = old_usp; 3955 3956 /* TODO: the other ports use regs->orig_XX to disable syscall checks 3957 * after this completes, but we don't use that mechanism. maybe we can 3958 * use it now ? 3959 */ 3960 3961 return err; 3962 3963 badframe: 3964 return 1; 3965 } 3966 #endif 3967 3968 /* Set up a signal frame. */ 3969 3970 static int setup_sigcontext(struct target_sigcontext *sc, 3971 CPUOpenRISCState *regs, 3972 unsigned long mask) 3973 { 3974 int err = 0; 3975 unsigned long usp = regs->gpr[1]; 3976 3977 /* copy the regs. they are first in sc so we can use sc directly */ 3978 3979 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 3980 3981 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 3982 the signal handler. The frametype will be restored to its previous 3983 value in restore_sigcontext. */ 3984 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 3985 3986 /* then some other stuff */ 3987 __put_user(mask, &sc->oldmask); 3988 __put_user(usp, &sc->usp); return err; 3989 } 3990 3991 static inline unsigned long align_sigframe(unsigned long sp) 3992 { 3993 unsigned long i; 3994 i = sp & ~3UL; 3995 return i; 3996 } 3997 3998 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 3999 CPUOpenRISCState *regs, 4000 size_t frame_size) 4001 { 4002 unsigned long sp = regs->gpr[1]; 4003 int onsigstack = on_sig_stack(sp); 4004 4005 /* redzone */ 4006 /* This is the X/Open sanctioned signal stack switching. */ 4007 if ((ka->sa_flags & SA_ONSTACK) != 0 && !onsigstack) { 4008 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4009 } 4010 4011 sp = align_sigframe(sp - frame_size); 4012 4013 /* 4014 * If we are on the alternate signal stack and would overflow it, don't. 4015 * Return an always-bogus address instead so we will die with SIGSEGV. 4016 */ 4017 4018 if (onsigstack && !likely(on_sig_stack(sp))) { 4019 return -1L; 4020 } 4021 4022 return sp; 4023 } 4024 4025 static void setup_frame(int sig, struct target_sigaction *ka, 4026 target_sigset_t *set, CPUOpenRISCState *env) 4027 { 4028 qemu_log("Not implement.\n"); 4029 } 4030 4031 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4032 target_siginfo_t *info, 4033 target_sigset_t *set, CPUOpenRISCState *env) 4034 { 4035 int err = 0; 4036 abi_ulong frame_addr; 4037 unsigned long return_ip; 4038 struct target_rt_sigframe *frame; 4039 abi_ulong info_addr, uc_addr; 4040 4041 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4042 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4043 goto give_sigsegv; 4044 } 4045 4046 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4047 __put_user(info_addr, &frame->pinfo); 4048 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4049 __put_user(uc_addr, &frame->puc); 4050 4051 if (ka->sa_flags & SA_SIGINFO) { 4052 err |= copy_siginfo_to_user(&frame->info, info); 4053 } 4054 if (err) { 4055 goto give_sigsegv; 4056 } 4057 4058 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4059 __put_user(0, &frame->uc.tuc_flags); 4060 __put_user(0, &frame->uc.tuc_link); 4061 __put_user(target_sigaltstack_used.ss_sp, 4062 &frame->uc.tuc_stack.ss_sp); 4063 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4064 __put_user(target_sigaltstack_used.ss_size, 4065 &frame->uc.tuc_stack.ss_size); 4066 err |= setup_sigcontext(&frame->sc, env, set->sig[0]); 4067 4068 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4069 4070 if (err) { 4071 goto give_sigsegv; 4072 } 4073 4074 /* trampoline - the desired return ip is the retcode itself */ 4075 return_ip = (unsigned long)&frame->retcode; 4076 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4077 __put_user(0xa960, (short *)(frame->retcode + 0)); 4078 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4079 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4080 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4081 4082 if (err) { 4083 goto give_sigsegv; 4084 } 4085 4086 /* TODO what is the current->exec_domain stuff and invmap ? */ 4087 4088 /* Set up registers for signal handler */ 4089 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4090 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4091 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4092 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4093 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4094 4095 /* actually move the usp to reflect the stacked frame */ 4096 env->gpr[1] = (unsigned long)frame; 4097 4098 return; 4099 4100 give_sigsegv: 4101 unlock_user_struct(frame, frame_addr, 1); 4102 if (sig == TARGET_SIGSEGV) { 4103 ka->_sa_handler = TARGET_SIG_DFL; 4104 } 4105 force_sig(TARGET_SIGSEGV); 4106 } 4107 4108 long do_sigreturn(CPUOpenRISCState *env) 4109 { 4110 4111 qemu_log("do_sigreturn: not implemented\n"); 4112 return -TARGET_ENOSYS; 4113 } 4114 4115 long do_rt_sigreturn(CPUOpenRISCState *env) 4116 { 4117 qemu_log("do_rt_sigreturn: not implemented\n"); 4118 return -TARGET_ENOSYS; 4119 } 4120 /* TARGET_OPENRISC */ 4121 4122 #elif defined(TARGET_S390X) 4123 4124 #define __NUM_GPRS 16 4125 #define __NUM_FPRS 16 4126 #define __NUM_ACRS 16 4127 4128 #define S390_SYSCALL_SIZE 2 4129 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4130 4131 #define _SIGCONTEXT_NSIG 64 4132 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4133 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4134 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4135 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4136 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4137 4138 typedef struct { 4139 target_psw_t psw; 4140 target_ulong gprs[__NUM_GPRS]; 4141 unsigned int acrs[__NUM_ACRS]; 4142 } target_s390_regs_common; 4143 4144 typedef struct { 4145 unsigned int fpc; 4146 double fprs[__NUM_FPRS]; 4147 } target_s390_fp_regs; 4148 4149 typedef struct { 4150 target_s390_regs_common regs; 4151 target_s390_fp_regs fpregs; 4152 } target_sigregs; 4153 4154 struct target_sigcontext { 4155 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4156 target_sigregs *sregs; 4157 }; 4158 4159 typedef struct { 4160 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4161 struct target_sigcontext sc; 4162 target_sigregs sregs; 4163 int signo; 4164 uint8_t retcode[S390_SYSCALL_SIZE]; 4165 } sigframe; 4166 4167 struct target_ucontext { 4168 target_ulong tuc_flags; 4169 struct target_ucontext *tuc_link; 4170 target_stack_t tuc_stack; 4171 target_sigregs tuc_mcontext; 4172 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4173 }; 4174 4175 typedef struct { 4176 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4177 uint8_t retcode[S390_SYSCALL_SIZE]; 4178 struct target_siginfo info; 4179 struct target_ucontext uc; 4180 } rt_sigframe; 4181 4182 static inline abi_ulong 4183 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4184 { 4185 abi_ulong sp; 4186 4187 /* Default to using normal stack */ 4188 sp = env->regs[15]; 4189 4190 /* This is the X/Open sanctioned signal stack switching. */ 4191 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4192 if (!sas_ss_flags(sp)) { 4193 sp = target_sigaltstack_used.ss_sp + 4194 target_sigaltstack_used.ss_size; 4195 } 4196 } 4197 4198 /* This is the legacy signal stack switching. */ 4199 else if (/* FIXME !user_mode(regs) */ 0 && 4200 !(ka->sa_flags & TARGET_SA_RESTORER) && 4201 ka->sa_restorer) { 4202 sp = (abi_ulong) ka->sa_restorer; 4203 } 4204 4205 return (sp - frame_size) & -8ul; 4206 } 4207 4208 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4209 { 4210 int i; 4211 //save_access_regs(current->thread.acrs); FIXME 4212 4213 /* Copy a 'clean' PSW mask to the user to avoid leaking 4214 information about whether PER is currently on. */ 4215 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4216 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4217 for (i = 0; i < 16; i++) { 4218 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4219 } 4220 for (i = 0; i < 16; i++) { 4221 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4222 } 4223 /* 4224 * We have to store the fp registers to current->thread.fp_regs 4225 * to merge them with the emulated registers. 4226 */ 4227 //save_fp_regs(¤t->thread.fp_regs); FIXME 4228 for (i = 0; i < 16; i++) { 4229 __put_user(env->fregs[i].ll, &sregs->fpregs.fprs[i]); 4230 } 4231 } 4232 4233 static void setup_frame(int sig, struct target_sigaction *ka, 4234 target_sigset_t *set, CPUS390XState *env) 4235 { 4236 sigframe *frame; 4237 abi_ulong frame_addr; 4238 4239 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4240 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, 4241 (unsigned long long)frame_addr); 4242 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4243 goto give_sigsegv; 4244 } 4245 4246 qemu_log("%s: 1\n", __FUNCTION__); 4247 if (__put_user(set->sig[0], &frame->sc.oldmask[0])) { 4248 goto give_sigsegv; 4249 } 4250 4251 save_sigregs(env, &frame->sregs); 4252 4253 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4254 (abi_ulong *)&frame->sc.sregs); 4255 4256 /* Set up to return from userspace. If provided, use a stub 4257 already in userspace. */ 4258 if (ka->sa_flags & TARGET_SA_RESTORER) { 4259 env->regs[14] = (unsigned long) 4260 ka->sa_restorer | PSW_ADDR_AMODE; 4261 } else { 4262 env->regs[14] = (unsigned long) 4263 frame->retcode | PSW_ADDR_AMODE; 4264 if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4265 (uint16_t *)(frame->retcode))) 4266 goto give_sigsegv; 4267 } 4268 4269 /* Set up backchain. */ 4270 if (__put_user(env->regs[15], (abi_ulong *) frame)) { 4271 goto give_sigsegv; 4272 } 4273 4274 /* Set up registers for signal handler */ 4275 env->regs[15] = frame_addr; 4276 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4277 4278 env->regs[2] = sig; //map_signal(sig); 4279 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4280 4281 /* We forgot to include these in the sigcontext. 4282 To avoid breaking binary compatibility, they are passed as args. */ 4283 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4284 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4285 4286 /* Place signal number on stack to allow backtrace from handler. */ 4287 if (__put_user(env->regs[2], (int *) &frame->signo)) { 4288 goto give_sigsegv; 4289 } 4290 unlock_user_struct(frame, frame_addr, 1); 4291 return; 4292 4293 give_sigsegv: 4294 qemu_log("%s: give_sigsegv\n", __FUNCTION__); 4295 unlock_user_struct(frame, frame_addr, 1); 4296 force_sig(TARGET_SIGSEGV); 4297 } 4298 4299 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4300 target_siginfo_t *info, 4301 target_sigset_t *set, CPUS390XState *env) 4302 { 4303 int i; 4304 rt_sigframe *frame; 4305 abi_ulong frame_addr; 4306 4307 frame_addr = get_sigframe(ka, env, sizeof *frame); 4308 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, 4309 (unsigned long long)frame_addr); 4310 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4311 goto give_sigsegv; 4312 } 4313 4314 qemu_log("%s: 1\n", __FUNCTION__); 4315 if (copy_siginfo_to_user(&frame->info, info)) { 4316 goto give_sigsegv; 4317 } 4318 4319 /* Create the ucontext. */ 4320 __put_user(0, &frame->uc.tuc_flags); 4321 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4322 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4323 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4324 &frame->uc.tuc_stack.ss_flags); 4325 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4326 save_sigregs(env, &frame->uc.tuc_mcontext); 4327 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4328 __put_user((abi_ulong)set->sig[i], 4329 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4330 } 4331 4332 /* Set up to return from userspace. If provided, use a stub 4333 already in userspace. */ 4334 if (ka->sa_flags & TARGET_SA_RESTORER) { 4335 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4336 } else { 4337 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4338 if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4339 (uint16_t *)(frame->retcode))) { 4340 goto give_sigsegv; 4341 } 4342 } 4343 4344 /* Set up backchain. */ 4345 if (__put_user(env->regs[15], (abi_ulong *) frame)) { 4346 goto give_sigsegv; 4347 } 4348 4349 /* Set up registers for signal handler */ 4350 env->regs[15] = frame_addr; 4351 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4352 4353 env->regs[2] = sig; //map_signal(sig); 4354 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4355 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4356 return; 4357 4358 give_sigsegv: 4359 qemu_log("%s: give_sigsegv\n", __FUNCTION__); 4360 unlock_user_struct(frame, frame_addr, 1); 4361 force_sig(TARGET_SIGSEGV); 4362 } 4363 4364 static int 4365 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4366 { 4367 int err = 0; 4368 int i; 4369 4370 for (i = 0; i < 16; i++) { 4371 __get_user(env->regs[i], &sc->regs.gprs[i]); 4372 } 4373 4374 __get_user(env->psw.mask, &sc->regs.psw.mask); 4375 qemu_log("%s: sc->regs.psw.addr 0x%llx env->psw.addr 0x%llx\n", 4376 __FUNCTION__, (unsigned long long)sc->regs.psw.addr, 4377 (unsigned long long)env->psw.addr); 4378 __get_user(env->psw.addr, &sc->regs.psw.addr); 4379 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4380 4381 for (i = 0; i < 16; i++) { 4382 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4383 } 4384 for (i = 0; i < 16; i++) { 4385 __get_user(env->fregs[i].ll, &sc->fpregs.fprs[i]); 4386 } 4387 4388 return err; 4389 } 4390 4391 long do_sigreturn(CPUS390XState *env) 4392 { 4393 sigframe *frame; 4394 abi_ulong frame_addr = env->regs[15]; 4395 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, 4396 (unsigned long long)frame_addr); 4397 target_sigset_t target_set; 4398 sigset_t set; 4399 4400 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4401 goto badframe; 4402 } 4403 if (__get_user(target_set.sig[0], &frame->sc.oldmask[0])) { 4404 goto badframe; 4405 } 4406 4407 target_to_host_sigset_internal(&set, &target_set); 4408 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ 4409 4410 if (restore_sigregs(env, &frame->sregs)) { 4411 goto badframe; 4412 } 4413 4414 unlock_user_struct(frame, frame_addr, 0); 4415 return env->regs[2]; 4416 4417 badframe: 4418 unlock_user_struct(frame, frame_addr, 0); 4419 force_sig(TARGET_SIGSEGV); 4420 return 0; 4421 } 4422 4423 long do_rt_sigreturn(CPUS390XState *env) 4424 { 4425 rt_sigframe *frame; 4426 abi_ulong frame_addr = env->regs[15]; 4427 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, 4428 (unsigned long long)frame_addr); 4429 sigset_t set; 4430 4431 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4432 goto badframe; 4433 } 4434 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4435 4436 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ 4437 4438 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4439 goto badframe; 4440 } 4441 4442 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4443 get_sp_from_cpustate(env)) == -EFAULT) { 4444 goto badframe; 4445 } 4446 unlock_user_struct(frame, frame_addr, 0); 4447 return env->regs[2]; 4448 4449 badframe: 4450 unlock_user_struct(frame, frame_addr, 0); 4451 force_sig(TARGET_SIGSEGV); 4452 return 0; 4453 } 4454 4455 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 4456 4457 /* FIXME: Many of the structures are defined for both PPC and PPC64, but 4458 the signal handling is different enough that we haven't implemented 4459 support for PPC64 yet. Hence the restriction above. 4460 4461 There are various #if'd blocks for code for TARGET_PPC64. These 4462 blocks should go away so that we can successfully run 32-bit and 4463 64-bit binaries on a QEMU configured for PPC64. */ 4464 4465 /* Size of dummy stack frame allocated when calling signal handler. 4466 See arch/powerpc/include/asm/ptrace.h. */ 4467 #if defined(TARGET_PPC64) 4468 #define SIGNAL_FRAMESIZE 128 4469 #else 4470 #define SIGNAL_FRAMESIZE 64 4471 #endif 4472 4473 /* See arch/powerpc/include/asm/sigcontext.h. */ 4474 struct target_sigcontext { 4475 target_ulong _unused[4]; 4476 int32_t signal; 4477 #if defined(TARGET_PPC64) 4478 int32_t pad0; 4479 #endif 4480 target_ulong handler; 4481 target_ulong oldmask; 4482 target_ulong regs; /* struct pt_regs __user * */ 4483 /* TODO: PPC64 includes extra bits here. */ 4484 }; 4485 4486 /* Indices for target_mcontext.mc_gregs, below. 4487 See arch/powerpc/include/asm/ptrace.h for details. */ 4488 enum { 4489 TARGET_PT_R0 = 0, 4490 TARGET_PT_R1 = 1, 4491 TARGET_PT_R2 = 2, 4492 TARGET_PT_R3 = 3, 4493 TARGET_PT_R4 = 4, 4494 TARGET_PT_R5 = 5, 4495 TARGET_PT_R6 = 6, 4496 TARGET_PT_R7 = 7, 4497 TARGET_PT_R8 = 8, 4498 TARGET_PT_R9 = 9, 4499 TARGET_PT_R10 = 10, 4500 TARGET_PT_R11 = 11, 4501 TARGET_PT_R12 = 12, 4502 TARGET_PT_R13 = 13, 4503 TARGET_PT_R14 = 14, 4504 TARGET_PT_R15 = 15, 4505 TARGET_PT_R16 = 16, 4506 TARGET_PT_R17 = 17, 4507 TARGET_PT_R18 = 18, 4508 TARGET_PT_R19 = 19, 4509 TARGET_PT_R20 = 20, 4510 TARGET_PT_R21 = 21, 4511 TARGET_PT_R22 = 22, 4512 TARGET_PT_R23 = 23, 4513 TARGET_PT_R24 = 24, 4514 TARGET_PT_R25 = 25, 4515 TARGET_PT_R26 = 26, 4516 TARGET_PT_R27 = 27, 4517 TARGET_PT_R28 = 28, 4518 TARGET_PT_R29 = 29, 4519 TARGET_PT_R30 = 30, 4520 TARGET_PT_R31 = 31, 4521 TARGET_PT_NIP = 32, 4522 TARGET_PT_MSR = 33, 4523 TARGET_PT_ORIG_R3 = 34, 4524 TARGET_PT_CTR = 35, 4525 TARGET_PT_LNK = 36, 4526 TARGET_PT_XER = 37, 4527 TARGET_PT_CCR = 38, 4528 /* Yes, there are two registers with #39. One is 64-bit only. */ 4529 TARGET_PT_MQ = 39, 4530 TARGET_PT_SOFTE = 39, 4531 TARGET_PT_TRAP = 40, 4532 TARGET_PT_DAR = 41, 4533 TARGET_PT_DSISR = 42, 4534 TARGET_PT_RESULT = 43, 4535 TARGET_PT_REGS_COUNT = 44 4536 }; 4537 4538 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4539 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4540 struct target_mcontext { 4541 target_ulong mc_gregs[48]; 4542 /* Includes fpscr. */ 4543 uint64_t mc_fregs[33]; 4544 target_ulong mc_pad[2]; 4545 /* We need to handle Altivec and SPE at the same time, which no 4546 kernel needs to do. Fortunately, the kernel defines this bit to 4547 be Altivec-register-large all the time, rather than trying to 4548 twiddle it based on the specific platform. */ 4549 union { 4550 /* SPE vector registers. One extra for SPEFSCR. */ 4551 uint32_t spe[33]; 4552 /* Altivec vector registers. The packing of VSCR and VRSAVE 4553 varies depending on whether we're PPC64 or not: PPC64 splits 4554 them apart; PPC32 stuffs them together. */ 4555 #if defined(TARGET_PPC64) 4556 #define QEMU_NVRREG 34 4557 #else 4558 #define QEMU_NVRREG 33 4559 #endif 4560 ppc_avr_t altivec[QEMU_NVRREG]; 4561 #undef QEMU_NVRREG 4562 } mc_vregs __attribute__((__aligned__(16))); 4563 }; 4564 4565 struct target_ucontext { 4566 target_ulong tuc_flags; 4567 target_ulong tuc_link; /* struct ucontext __user * */ 4568 struct target_sigaltstack tuc_stack; 4569 #if !defined(TARGET_PPC64) 4570 int32_t tuc_pad[7]; 4571 target_ulong tuc_regs; /* struct mcontext __user * 4572 points to uc_mcontext field */ 4573 #endif 4574 target_sigset_t tuc_sigmask; 4575 #if defined(TARGET_PPC64) 4576 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4577 struct target_sigcontext tuc_mcontext; 4578 #else 4579 int32_t tuc_maskext[30]; 4580 int32_t tuc_pad2[3]; 4581 struct target_mcontext tuc_mcontext; 4582 #endif 4583 }; 4584 4585 /* See arch/powerpc/kernel/signal_32.c. */ 4586 struct target_sigframe { 4587 struct target_sigcontext sctx; 4588 struct target_mcontext mctx; 4589 int32_t abigap[56]; 4590 }; 4591 4592 struct target_rt_sigframe { 4593 struct target_siginfo info; 4594 struct target_ucontext uc; 4595 int32_t abigap[56]; 4596 }; 4597 4598 /* We use the mc_pad field for the signal return trampoline. */ 4599 #define tramp mc_pad 4600 4601 /* See arch/powerpc/kernel/signal.c. */ 4602 static target_ulong get_sigframe(struct target_sigaction *ka, 4603 CPUPPCState *env, 4604 int frame_size) 4605 { 4606 target_ulong oldsp, newsp; 4607 4608 oldsp = env->gpr[1]; 4609 4610 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4611 (sas_ss_flags(oldsp) == 0)) { 4612 oldsp = (target_sigaltstack_used.ss_sp 4613 + target_sigaltstack_used.ss_size); 4614 } 4615 4616 newsp = (oldsp - frame_size) & ~0xFUL; 4617 4618 return newsp; 4619 } 4620 4621 static int save_user_regs(CPUPPCState *env, struct target_mcontext *frame, 4622 int sigret) 4623 { 4624 target_ulong msr = env->msr; 4625 int i; 4626 target_ulong ccr = 0; 4627 4628 /* In general, the kernel attempts to be intelligent about what it 4629 needs to save for Altivec/FP/SPE registers. We don't care that 4630 much, so we just go ahead and save everything. */ 4631 4632 /* Save general registers. */ 4633 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4634 if (__put_user(env->gpr[i], &frame->mc_gregs[i])) { 4635 return 1; 4636 } 4637 } 4638 if (__put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]) 4639 || __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]) 4640 || __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]) 4641 || __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER])) 4642 return 1; 4643 4644 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4645 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4646 } 4647 if (__put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR])) 4648 return 1; 4649 4650 /* Save Altivec registers if necessary. */ 4651 if (env->insns_flags & PPC_ALTIVEC) { 4652 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4653 ppc_avr_t *avr = &env->avr[i]; 4654 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4655 4656 if (__put_user(avr->u64[0], &vreg->u64[0]) || 4657 __put_user(avr->u64[1], &vreg->u64[1])) { 4658 return 1; 4659 } 4660 } 4661 /* Set MSR_VR in the saved MSR value to indicate that 4662 frame->mc_vregs contains valid data. */ 4663 msr |= MSR_VR; 4664 if (__put_user((uint32_t)env->spr[SPR_VRSAVE], 4665 &frame->mc_vregs.altivec[32].u32[3])) 4666 return 1; 4667 } 4668 4669 /* Save floating point registers. */ 4670 if (env->insns_flags & PPC_FLOAT) { 4671 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4672 if (__put_user(env->fpr[i], &frame->mc_fregs[i])) { 4673 return 1; 4674 } 4675 } 4676 if (__put_user((uint64_t) env->fpscr, &frame->mc_fregs[32])) 4677 return 1; 4678 } 4679 4680 /* Save SPE registers. The kernel only saves the high half. */ 4681 if (env->insns_flags & PPC_SPE) { 4682 #if defined(TARGET_PPC64) 4683 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4684 if (__put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i])) { 4685 return 1; 4686 } 4687 } 4688 #else 4689 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4690 if (__put_user(env->gprh[i], &frame->mc_vregs.spe[i])) { 4691 return 1; 4692 } 4693 } 4694 #endif 4695 /* Set MSR_SPE in the saved MSR value to indicate that 4696 frame->mc_vregs contains valid data. */ 4697 msr |= MSR_SPE; 4698 if (__put_user(env->spe_fscr, &frame->mc_vregs.spe[32])) 4699 return 1; 4700 } 4701 4702 /* Store MSR. */ 4703 if (__put_user(msr, &frame->mc_gregs[TARGET_PT_MSR])) 4704 return 1; 4705 4706 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4707 if (sigret) { 4708 if (__put_user(0x38000000UL | sigret, &frame->tramp[0]) || 4709 __put_user(0x44000002UL, &frame->tramp[1])) { 4710 return 1; 4711 } 4712 } 4713 4714 return 0; 4715 } 4716 4717 static int restore_user_regs(CPUPPCState *env, 4718 struct target_mcontext *frame, int sig) 4719 { 4720 target_ulong save_r2 = 0; 4721 target_ulong msr; 4722 target_ulong ccr; 4723 4724 int i; 4725 4726 if (!sig) { 4727 save_r2 = env->gpr[2]; 4728 } 4729 4730 /* Restore general registers. */ 4731 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4732 if (__get_user(env->gpr[i], &frame->mc_gregs[i])) { 4733 return 1; 4734 } 4735 } 4736 if (__get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]) 4737 || __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]) 4738 || __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]) 4739 || __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER])) 4740 return 1; 4741 if (__get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR])) 4742 return 1; 4743 4744 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4745 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 4746 } 4747 4748 if (!sig) { 4749 env->gpr[2] = save_r2; 4750 } 4751 /* Restore MSR. */ 4752 if (__get_user(msr, &frame->mc_gregs[TARGET_PT_MSR])) 4753 return 1; 4754 4755 /* If doing signal return, restore the previous little-endian mode. */ 4756 if (sig) 4757 env->msr = (env->msr & ~MSR_LE) | (msr & MSR_LE); 4758 4759 /* Restore Altivec registers if necessary. */ 4760 if (env->insns_flags & PPC_ALTIVEC) { 4761 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4762 ppc_avr_t *avr = &env->avr[i]; 4763 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; 4764 4765 if (__get_user(avr->u64[0], &vreg->u64[0]) || 4766 __get_user(avr->u64[1], &vreg->u64[1])) { 4767 return 1; 4768 } 4769 } 4770 /* Set MSR_VEC in the saved MSR value to indicate that 4771 frame->mc_vregs contains valid data. */ 4772 if (__get_user(env->spr[SPR_VRSAVE], 4773 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]))) 4774 return 1; 4775 } 4776 4777 /* Restore floating point registers. */ 4778 if (env->insns_flags & PPC_FLOAT) { 4779 uint64_t fpscr; 4780 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4781 if (__get_user(env->fpr[i], &frame->mc_fregs[i])) { 4782 return 1; 4783 } 4784 } 4785 if (__get_user(fpscr, &frame->mc_fregs[32])) 4786 return 1; 4787 env->fpscr = (uint32_t) fpscr; 4788 } 4789 4790 /* Save SPE registers. The kernel only saves the high half. */ 4791 if (env->insns_flags & PPC_SPE) { 4792 #if defined(TARGET_PPC64) 4793 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4794 uint32_t hi; 4795 4796 if (__get_user(hi, &frame->mc_vregs.spe[i])) { 4797 return 1; 4798 } 4799 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 4800 } 4801 #else 4802 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4803 if (__get_user(env->gprh[i], &frame->mc_vregs.spe[i])) { 4804 return 1; 4805 } 4806 } 4807 #endif 4808 if (__get_user(env->spe_fscr, &frame->mc_vregs.spe[32])) 4809 return 1; 4810 } 4811 4812 return 0; 4813 } 4814 4815 static void setup_frame(int sig, struct target_sigaction *ka, 4816 target_sigset_t *set, CPUPPCState *env) 4817 { 4818 struct target_sigframe *frame; 4819 struct target_sigcontext *sc; 4820 target_ulong frame_addr, newsp; 4821 int err = 0; 4822 int signal; 4823 4824 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4825 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4826 goto sigsegv; 4827 sc = &frame->sctx; 4828 4829 signal = current_exec_domain_sig(sig); 4830 4831 __put_user(ka->_sa_handler, &sc->handler); 4832 __put_user(set->sig[0], &sc->oldmask); 4833 #if defined(TARGET_PPC64) 4834 __put_user(set->sig[0] >> 32, &sc->_unused[3]); 4835 #else 4836 __put_user(set->sig[1], &sc->_unused[3]); 4837 #endif 4838 __put_user(h2g(&frame->mctx), &sc->regs); 4839 __put_user(sig, &sc->signal); 4840 4841 /* Save user regs. */ 4842 err |= save_user_regs(env, &frame->mctx, TARGET_NR_sigreturn); 4843 4844 /* The kernel checks for the presence of a VDSO here. We don't 4845 emulate a vdso, so use a sigreturn system call. */ 4846 env->lr = (target_ulong) h2g(frame->mctx.tramp); 4847 4848 /* Turn off all fp exceptions. */ 4849 env->fpscr = 0; 4850 4851 /* Create a stack frame for the caller of the handler. */ 4852 newsp = frame_addr - SIGNAL_FRAMESIZE; 4853 err |= put_user(env->gpr[1], newsp, target_ulong); 4854 4855 if (err) 4856 goto sigsegv; 4857 4858 /* Set up registers for signal handler. */ 4859 env->gpr[1] = newsp; 4860 env->gpr[3] = signal; 4861 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 4862 env->nip = (target_ulong) ka->_sa_handler; 4863 /* Signal handlers are entered in big-endian mode. */ 4864 env->msr &= ~MSR_LE; 4865 4866 unlock_user_struct(frame, frame_addr, 1); 4867 return; 4868 4869 sigsegv: 4870 unlock_user_struct(frame, frame_addr, 1); 4871 qemu_log("segfaulting from setup_frame\n"); 4872 force_sig(TARGET_SIGSEGV); 4873 } 4874 4875 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4876 target_siginfo_t *info, 4877 target_sigset_t *set, CPUPPCState *env) 4878 { 4879 struct target_rt_sigframe *rt_sf; 4880 struct target_mcontext *frame; 4881 target_ulong rt_sf_addr, newsp = 0; 4882 int i, err = 0; 4883 int signal; 4884 4885 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 4886 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 4887 goto sigsegv; 4888 4889 signal = current_exec_domain_sig(sig); 4890 4891 err |= copy_siginfo_to_user(&rt_sf->info, info); 4892 4893 __put_user(0, &rt_sf->uc.tuc_flags); 4894 __put_user(0, &rt_sf->uc.tuc_link); 4895 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 4896 &rt_sf->uc.tuc_stack.ss_sp); 4897 __put_user(sas_ss_flags(env->gpr[1]), 4898 &rt_sf->uc.tuc_stack.ss_flags); 4899 __put_user(target_sigaltstack_used.ss_size, 4900 &rt_sf->uc.tuc_stack.ss_size); 4901 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 4902 &rt_sf->uc.tuc_regs); 4903 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 4904 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 4905 } 4906 4907 frame = &rt_sf->uc.tuc_mcontext; 4908 err |= save_user_regs(env, frame, TARGET_NR_rt_sigreturn); 4909 4910 /* The kernel checks for the presence of a VDSO here. We don't 4911 emulate a vdso, so use a sigreturn system call. */ 4912 env->lr = (target_ulong) h2g(frame->tramp); 4913 4914 /* Turn off all fp exceptions. */ 4915 env->fpscr = 0; 4916 4917 /* Create a stack frame for the caller of the handler. */ 4918 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 4919 __put_user(env->gpr[1], (target_ulong *)(uintptr_t) newsp); 4920 4921 if (err) 4922 goto sigsegv; 4923 4924 /* Set up registers for signal handler. */ 4925 env->gpr[1] = newsp; 4926 env->gpr[3] = (target_ulong) signal; 4927 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 4928 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 4929 env->gpr[6] = (target_ulong) h2g(rt_sf); 4930 env->nip = (target_ulong) ka->_sa_handler; 4931 /* Signal handlers are entered in big-endian mode. */ 4932 env->msr &= ~MSR_LE; 4933 4934 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4935 return; 4936 4937 sigsegv: 4938 unlock_user_struct(rt_sf, rt_sf_addr, 1); 4939 qemu_log("segfaulting from setup_rt_frame\n"); 4940 force_sig(TARGET_SIGSEGV); 4941 4942 } 4943 4944 long do_sigreturn(CPUPPCState *env) 4945 { 4946 struct target_sigcontext *sc = NULL; 4947 struct target_mcontext *sr = NULL; 4948 target_ulong sr_addr = 0, sc_addr; 4949 sigset_t blocked; 4950 target_sigset_t set; 4951 4952 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 4953 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 4954 goto sigsegv; 4955 4956 #if defined(TARGET_PPC64) 4957 set.sig[0] = sc->oldmask + ((long)(sc->_unused[3]) << 32); 4958 #else 4959 if(__get_user(set.sig[0], &sc->oldmask) || 4960 __get_user(set.sig[1], &sc->_unused[3])) 4961 goto sigsegv; 4962 #endif 4963 target_to_host_sigset_internal(&blocked, &set); 4964 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 4965 4966 if (__get_user(sr_addr, &sc->regs)) 4967 goto sigsegv; 4968 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 4969 goto sigsegv; 4970 if (restore_user_regs(env, sr, 1)) 4971 goto sigsegv; 4972 4973 unlock_user_struct(sr, sr_addr, 1); 4974 unlock_user_struct(sc, sc_addr, 1); 4975 return -TARGET_QEMU_ESIGRETURN; 4976 4977 sigsegv: 4978 unlock_user_struct(sr, sr_addr, 1); 4979 unlock_user_struct(sc, sc_addr, 1); 4980 qemu_log("segfaulting from do_sigreturn\n"); 4981 force_sig(TARGET_SIGSEGV); 4982 return 0; 4983 } 4984 4985 /* See arch/powerpc/kernel/signal_32.c. */ 4986 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 4987 { 4988 struct target_mcontext *mcp; 4989 target_ulong mcp_addr; 4990 sigset_t blocked; 4991 target_sigset_t set; 4992 4993 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 4994 sizeof (set))) 4995 return 1; 4996 4997 #if defined(TARGET_PPC64) 4998 fprintf (stderr, "do_setcontext: not implemented\n"); 4999 return 0; 5000 #else 5001 if (__get_user(mcp_addr, &ucp->tuc_regs)) 5002 return 1; 5003 5004 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5005 return 1; 5006 5007 target_to_host_sigset_internal(&blocked, &set); 5008 do_sigprocmask(SIG_SETMASK, &blocked, NULL); 5009 if (restore_user_regs(env, mcp, sig)) 5010 goto sigsegv; 5011 5012 unlock_user_struct(mcp, mcp_addr, 1); 5013 return 0; 5014 5015 sigsegv: 5016 unlock_user_struct(mcp, mcp_addr, 1); 5017 return 1; 5018 #endif 5019 } 5020 5021 long do_rt_sigreturn(CPUPPCState *env) 5022 { 5023 struct target_rt_sigframe *rt_sf = NULL; 5024 target_ulong rt_sf_addr; 5025 5026 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5027 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5028 goto sigsegv; 5029 5030 if (do_setcontext(&rt_sf->uc, env, 1)) 5031 goto sigsegv; 5032 5033 do_sigaltstack(rt_sf_addr 5034 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5035 0, env->gpr[1]); 5036 5037 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5038 return -TARGET_QEMU_ESIGRETURN; 5039 5040 sigsegv: 5041 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5042 qemu_log("segfaulting from do_rt_sigreturn\n"); 5043 force_sig(TARGET_SIGSEGV); 5044 return 0; 5045 } 5046 5047 #elif defined(TARGET_M68K) 5048 5049 struct target_sigcontext { 5050 abi_ulong sc_mask; 5051 abi_ulong sc_usp; 5052 abi_ulong sc_d0; 5053 abi_ulong sc_d1; 5054 abi_ulong sc_a0; 5055 abi_ulong sc_a1; 5056 unsigned short sc_sr; 5057 abi_ulong sc_pc; 5058 }; 5059 5060 struct target_sigframe 5061 { 5062 abi_ulong pretcode; 5063 int sig; 5064 int code; 5065 abi_ulong psc; 5066 char retcode[8]; 5067 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5068 struct target_sigcontext sc; 5069 }; 5070 5071 typedef int target_greg_t; 5072 #define TARGET_NGREG 18 5073 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5074 5075 typedef struct target_fpregset { 5076 int f_fpcntl[3]; 5077 int f_fpregs[8*3]; 5078 } target_fpregset_t; 5079 5080 struct target_mcontext { 5081 int version; 5082 target_gregset_t gregs; 5083 target_fpregset_t fpregs; 5084 }; 5085 5086 #define TARGET_MCONTEXT_VERSION 2 5087 5088 struct target_ucontext { 5089 abi_ulong tuc_flags; 5090 abi_ulong tuc_link; 5091 target_stack_t tuc_stack; 5092 struct target_mcontext tuc_mcontext; 5093 abi_long tuc_filler[80]; 5094 target_sigset_t tuc_sigmask; 5095 }; 5096 5097 struct target_rt_sigframe 5098 { 5099 abi_ulong pretcode; 5100 int sig; 5101 abi_ulong pinfo; 5102 abi_ulong puc; 5103 char retcode[8]; 5104 struct target_siginfo info; 5105 struct target_ucontext uc; 5106 }; 5107 5108 static int 5109 setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5110 abi_ulong mask) 5111 { 5112 int err = 0; 5113 5114 __put_user(mask, &sc->sc_mask); 5115 __put_user(env->aregs[7], &sc->sc_usp); 5116 __put_user(env->dregs[0], &sc->sc_d0); 5117 __put_user(env->dregs[1], &sc->sc_d1); 5118 __put_user(env->aregs[0], &sc->sc_a0); 5119 __put_user(env->aregs[1], &sc->sc_a1); 5120 __put_user(env->sr, &sc->sc_sr); 5121 __put_user(env->pc, &sc->sc_pc); 5122 5123 return err; 5124 } 5125 5126 static int 5127 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc, int *pd0) 5128 { 5129 int err = 0; 5130 int temp; 5131 5132 __get_user(env->aregs[7], &sc->sc_usp); 5133 __get_user(env->dregs[1], &sc->sc_d1); 5134 __get_user(env->aregs[0], &sc->sc_a0); 5135 __get_user(env->aregs[1], &sc->sc_a1); 5136 __get_user(env->pc, &sc->sc_pc); 5137 __get_user(temp, &sc->sc_sr); 5138 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5139 5140 *pd0 = tswapl(sc->sc_d0); 5141 5142 return err; 5143 } 5144 5145 /* 5146 * Determine which stack to use.. 5147 */ 5148 static inline abi_ulong 5149 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5150 size_t frame_size) 5151 { 5152 unsigned long sp; 5153 5154 sp = regs->aregs[7]; 5155 5156 /* This is the X/Open sanctioned signal stack switching. */ 5157 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5158 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5159 } 5160 5161 return ((sp - frame_size) & -8UL); 5162 } 5163 5164 static void setup_frame(int sig, struct target_sigaction *ka, 5165 target_sigset_t *set, CPUM68KState *env) 5166 { 5167 struct target_sigframe *frame; 5168 abi_ulong frame_addr; 5169 abi_ulong retcode_addr; 5170 abi_ulong sc_addr; 5171 int err = 0; 5172 int i; 5173 5174 frame_addr = get_sigframe(ka, env, sizeof *frame); 5175 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 5176 goto give_sigsegv; 5177 5178 __put_user(sig, &frame->sig); 5179 5180 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5181 __put_user(sc_addr, &frame->psc); 5182 5183 err |= setup_sigcontext(&frame->sc, env, set->sig[0]); 5184 if (err) 5185 goto give_sigsegv; 5186 5187 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5188 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 5189 goto give_sigsegv; 5190 } 5191 5192 /* Set up to return from userspace. */ 5193 5194 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5195 __put_user(retcode_addr, &frame->pretcode); 5196 5197 /* moveq #,d0; trap #0 */ 5198 5199 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5200 (long *)(frame->retcode)); 5201 5202 if (err) 5203 goto give_sigsegv; 5204 5205 /* Set up to return from userspace */ 5206 5207 env->aregs[7] = frame_addr; 5208 env->pc = ka->_sa_handler; 5209 5210 unlock_user_struct(frame, frame_addr, 1); 5211 return; 5212 5213 give_sigsegv: 5214 unlock_user_struct(frame, frame_addr, 1); 5215 force_sig(TARGET_SIGSEGV); 5216 } 5217 5218 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5219 CPUM68KState *env) 5220 { 5221 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5222 5223 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5224 __put_user(env->dregs[0], &gregs[0]); 5225 __put_user(env->dregs[1], &gregs[1]); 5226 __put_user(env->dregs[2], &gregs[2]); 5227 __put_user(env->dregs[3], &gregs[3]); 5228 __put_user(env->dregs[4], &gregs[4]); 5229 __put_user(env->dregs[5], &gregs[5]); 5230 __put_user(env->dregs[6], &gregs[6]); 5231 __put_user(env->dregs[7], &gregs[7]); 5232 __put_user(env->aregs[0], &gregs[8]); 5233 __put_user(env->aregs[1], &gregs[9]); 5234 __put_user(env->aregs[2], &gregs[10]); 5235 __put_user(env->aregs[3], &gregs[11]); 5236 __put_user(env->aregs[4], &gregs[12]); 5237 __put_user(env->aregs[5], &gregs[13]); 5238 __put_user(env->aregs[6], &gregs[14]); 5239 __put_user(env->aregs[7], &gregs[15]); 5240 __put_user(env->pc, &gregs[16]); 5241 __put_user(env->sr, &gregs[17]); 5242 5243 return 0; 5244 } 5245 5246 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5247 struct target_ucontext *uc, 5248 int *pd0) 5249 { 5250 int temp; 5251 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5252 5253 __get_user(temp, &uc->tuc_mcontext.version); 5254 if (temp != TARGET_MCONTEXT_VERSION) 5255 goto badframe; 5256 5257 /* restore passed registers */ 5258 __get_user(env->dregs[0], &gregs[0]); 5259 __get_user(env->dregs[1], &gregs[1]); 5260 __get_user(env->dregs[2], &gregs[2]); 5261 __get_user(env->dregs[3], &gregs[3]); 5262 __get_user(env->dregs[4], &gregs[4]); 5263 __get_user(env->dregs[5], &gregs[5]); 5264 __get_user(env->dregs[6], &gregs[6]); 5265 __get_user(env->dregs[7], &gregs[7]); 5266 __get_user(env->aregs[0], &gregs[8]); 5267 __get_user(env->aregs[1], &gregs[9]); 5268 __get_user(env->aregs[2], &gregs[10]); 5269 __get_user(env->aregs[3], &gregs[11]); 5270 __get_user(env->aregs[4], &gregs[12]); 5271 __get_user(env->aregs[5], &gregs[13]); 5272 __get_user(env->aregs[6], &gregs[14]); 5273 __get_user(env->aregs[7], &gregs[15]); 5274 __get_user(env->pc, &gregs[16]); 5275 __get_user(temp, &gregs[17]); 5276 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5277 5278 *pd0 = env->dregs[0]; 5279 return 0; 5280 5281 badframe: 5282 return 1; 5283 } 5284 5285 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5286 target_siginfo_t *info, 5287 target_sigset_t *set, CPUM68KState *env) 5288 { 5289 struct target_rt_sigframe *frame; 5290 abi_ulong frame_addr; 5291 abi_ulong retcode_addr; 5292 abi_ulong info_addr; 5293 abi_ulong uc_addr; 5294 int err = 0; 5295 int i; 5296 5297 frame_addr = get_sigframe(ka, env, sizeof *frame); 5298 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 5299 goto give_sigsegv; 5300 5301 __put_user(sig, &frame->sig); 5302 5303 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5304 __put_user(info_addr, &frame->pinfo); 5305 5306 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5307 __put_user(uc_addr, &frame->puc); 5308 5309 err |= copy_siginfo_to_user(&frame->info, info); 5310 5311 /* Create the ucontext */ 5312 5313 __put_user(0, &frame->uc.tuc_flags); 5314 __put_user(0, &frame->uc.tuc_link); 5315 __put_user(target_sigaltstack_used.ss_sp, 5316 &frame->uc.tuc_stack.ss_sp); 5317 __put_user(sas_ss_flags(env->aregs[7]), 5318 &frame->uc.tuc_stack.ss_flags); 5319 __put_user(target_sigaltstack_used.ss_size, 5320 &frame->uc.tuc_stack.ss_size); 5321 err |= target_rt_setup_ucontext(&frame->uc, env); 5322 5323 if (err) 5324 goto give_sigsegv; 5325 5326 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5327 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) 5328 goto give_sigsegv; 5329 } 5330 5331 /* Set up to return from userspace. */ 5332 5333 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5334 __put_user(retcode_addr, &frame->pretcode); 5335 5336 /* moveq #,d0; notb d0; trap #0 */ 5337 5338 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5339 (long *)(frame->retcode + 0)); 5340 __put_user(0x4e40, (short *)(frame->retcode + 4)); 5341 5342 if (err) 5343 goto give_sigsegv; 5344 5345 /* Set up to return from userspace */ 5346 5347 env->aregs[7] = frame_addr; 5348 env->pc = ka->_sa_handler; 5349 5350 unlock_user_struct(frame, frame_addr, 1); 5351 return; 5352 5353 give_sigsegv: 5354 unlock_user_struct(frame, frame_addr, 1); 5355 force_sig(TARGET_SIGSEGV); 5356 } 5357 5358 long do_sigreturn(CPUM68KState *env) 5359 { 5360 struct target_sigframe *frame; 5361 abi_ulong frame_addr = env->aregs[7] - 4; 5362 target_sigset_t target_set; 5363 sigset_t set; 5364 int d0, i; 5365 5366 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5367 goto badframe; 5368 5369 /* set blocked signals */ 5370 5371 if (__get_user(target_set.sig[0], &frame->sc.sc_mask)) 5372 goto badframe; 5373 5374 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5375 if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) 5376 goto badframe; 5377 } 5378 5379 target_to_host_sigset_internal(&set, &target_set); 5380 do_sigprocmask(SIG_SETMASK, &set, NULL); 5381 5382 /* restore registers */ 5383 5384 if (restore_sigcontext(env, &frame->sc, &d0)) 5385 goto badframe; 5386 5387 unlock_user_struct(frame, frame_addr, 0); 5388 return d0; 5389 5390 badframe: 5391 unlock_user_struct(frame, frame_addr, 0); 5392 force_sig(TARGET_SIGSEGV); 5393 return 0; 5394 } 5395 5396 long do_rt_sigreturn(CPUM68KState *env) 5397 { 5398 struct target_rt_sigframe *frame; 5399 abi_ulong frame_addr = env->aregs[7] - 4; 5400 target_sigset_t target_set; 5401 sigset_t set; 5402 int d0; 5403 5404 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5405 goto badframe; 5406 5407 target_to_host_sigset_internal(&set, &target_set); 5408 do_sigprocmask(SIG_SETMASK, &set, NULL); 5409 5410 /* restore registers */ 5411 5412 if (target_rt_restore_ucontext(env, &frame->uc, &d0)) 5413 goto badframe; 5414 5415 if (do_sigaltstack(frame_addr + 5416 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5417 0, get_sp_from_cpustate(env)) == -EFAULT) 5418 goto badframe; 5419 5420 unlock_user_struct(frame, frame_addr, 0); 5421 return d0; 5422 5423 badframe: 5424 unlock_user_struct(frame, frame_addr, 0); 5425 force_sig(TARGET_SIGSEGV); 5426 return 0; 5427 } 5428 5429 #elif defined(TARGET_ALPHA) 5430 5431 struct target_sigcontext { 5432 abi_long sc_onstack; 5433 abi_long sc_mask; 5434 abi_long sc_pc; 5435 abi_long sc_ps; 5436 abi_long sc_regs[32]; 5437 abi_long sc_ownedfp; 5438 abi_long sc_fpregs[32]; 5439 abi_ulong sc_fpcr; 5440 abi_ulong sc_fp_control; 5441 abi_ulong sc_reserved1; 5442 abi_ulong sc_reserved2; 5443 abi_ulong sc_ssize; 5444 abi_ulong sc_sbase; 5445 abi_ulong sc_traparg_a0; 5446 abi_ulong sc_traparg_a1; 5447 abi_ulong sc_traparg_a2; 5448 abi_ulong sc_fp_trap_pc; 5449 abi_ulong sc_fp_trigger_sum; 5450 abi_ulong sc_fp_trigger_inst; 5451 }; 5452 5453 struct target_ucontext { 5454 abi_ulong tuc_flags; 5455 abi_ulong tuc_link; 5456 abi_ulong tuc_osf_sigmask; 5457 target_stack_t tuc_stack; 5458 struct target_sigcontext tuc_mcontext; 5459 target_sigset_t tuc_sigmask; 5460 }; 5461 5462 struct target_sigframe { 5463 struct target_sigcontext sc; 5464 unsigned int retcode[3]; 5465 }; 5466 5467 struct target_rt_sigframe { 5468 target_siginfo_t info; 5469 struct target_ucontext uc; 5470 unsigned int retcode[3]; 5471 }; 5472 5473 #define INSN_MOV_R30_R16 0x47fe0410 5474 #define INSN_LDI_R0 0x201f0000 5475 #define INSN_CALLSYS 0x00000083 5476 5477 static int setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5478 abi_ulong frame_addr, target_sigset_t *set) 5479 { 5480 int i, err = 0; 5481 5482 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5483 __put_user(set->sig[0], &sc->sc_mask); 5484 __put_user(env->pc, &sc->sc_pc); 5485 __put_user(8, &sc->sc_ps); 5486 5487 for (i = 0; i < 31; ++i) { 5488 __put_user(env->ir[i], &sc->sc_regs[i]); 5489 } 5490 __put_user(0, &sc->sc_regs[31]); 5491 5492 for (i = 0; i < 31; ++i) { 5493 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5494 } 5495 __put_user(0, &sc->sc_fpregs[31]); 5496 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5497 5498 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5499 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5500 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5501 5502 return err; 5503 } 5504 5505 static int restore_sigcontext(CPUAlphaState *env, 5506 struct target_sigcontext *sc) 5507 { 5508 uint64_t fpcr; 5509 int i, err = 0; 5510 5511 __get_user(env->pc, &sc->sc_pc); 5512 5513 for (i = 0; i < 31; ++i) { 5514 __get_user(env->ir[i], &sc->sc_regs[i]); 5515 } 5516 for (i = 0; i < 31; ++i) { 5517 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5518 } 5519 5520 __get_user(fpcr, &sc->sc_fpcr); 5521 cpu_alpha_store_fpcr(env, fpcr); 5522 5523 return err; 5524 } 5525 5526 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5527 CPUAlphaState *env, 5528 unsigned long framesize) 5529 { 5530 abi_ulong sp = env->ir[IR_SP]; 5531 5532 /* This is the X/Open sanctioned signal stack switching. */ 5533 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5534 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5535 } 5536 return (sp - framesize) & -32; 5537 } 5538 5539 static void setup_frame(int sig, struct target_sigaction *ka, 5540 target_sigset_t *set, CPUAlphaState *env) 5541 { 5542 abi_ulong frame_addr, r26; 5543 struct target_sigframe *frame; 5544 int err = 0; 5545 5546 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5547 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5548 goto give_sigsegv; 5549 } 5550 5551 err |= setup_sigcontext(&frame->sc, env, frame_addr, set); 5552 5553 if (ka->sa_restorer) { 5554 r26 = ka->sa_restorer; 5555 } else { 5556 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5557 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5558 &frame->retcode[1]); 5559 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5560 /* imb() */ 5561 r26 = frame_addr; 5562 } 5563 5564 unlock_user_struct(frame, frame_addr, 1); 5565 5566 if (err) { 5567 give_sigsegv: 5568 if (sig == TARGET_SIGSEGV) { 5569 ka->_sa_handler = TARGET_SIG_DFL; 5570 } 5571 force_sig(TARGET_SIGSEGV); 5572 } 5573 5574 env->ir[IR_RA] = r26; 5575 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5576 env->ir[IR_A0] = sig; 5577 env->ir[IR_A1] = 0; 5578 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5579 env->ir[IR_SP] = frame_addr; 5580 } 5581 5582 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5583 target_siginfo_t *info, 5584 target_sigset_t *set, CPUAlphaState *env) 5585 { 5586 abi_ulong frame_addr, r26; 5587 struct target_rt_sigframe *frame; 5588 int i, err = 0; 5589 5590 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5591 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5592 goto give_sigsegv; 5593 } 5594 5595 err |= copy_siginfo_to_user(&frame->info, info); 5596 5597 __put_user(0, &frame->uc.tuc_flags); 5598 __put_user(0, &frame->uc.tuc_link); 5599 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5600 __put_user(target_sigaltstack_used.ss_sp, 5601 &frame->uc.tuc_stack.ss_sp); 5602 __put_user(sas_ss_flags(env->ir[IR_SP]), 5603 &frame->uc.tuc_stack.ss_flags); 5604 __put_user(target_sigaltstack_used.ss_size, 5605 &frame->uc.tuc_stack.ss_size); 5606 err |= setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5607 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5608 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5609 } 5610 5611 if (ka->sa_restorer) { 5612 r26 = ka->sa_restorer; 5613 } else { 5614 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5615 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5616 &frame->retcode[1]); 5617 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5618 /* imb(); */ 5619 r26 = frame_addr; 5620 } 5621 5622 if (err) { 5623 give_sigsegv: 5624 if (sig == TARGET_SIGSEGV) { 5625 ka->_sa_handler = TARGET_SIG_DFL; 5626 } 5627 force_sig(TARGET_SIGSEGV); 5628 } 5629 5630 env->ir[IR_RA] = r26; 5631 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5632 env->ir[IR_A0] = sig; 5633 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5634 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5635 env->ir[IR_SP] = frame_addr; 5636 } 5637 5638 long do_sigreturn(CPUAlphaState *env) 5639 { 5640 struct target_sigcontext *sc; 5641 abi_ulong sc_addr = env->ir[IR_A0]; 5642 target_sigset_t target_set; 5643 sigset_t set; 5644 5645 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5646 goto badframe; 5647 } 5648 5649 target_sigemptyset(&target_set); 5650 if (__get_user(target_set.sig[0], &sc->sc_mask)) { 5651 goto badframe; 5652 } 5653 5654 target_to_host_sigset_internal(&set, &target_set); 5655 do_sigprocmask(SIG_SETMASK, &set, NULL); 5656 5657 if (restore_sigcontext(env, sc)) { 5658 goto badframe; 5659 } 5660 unlock_user_struct(sc, sc_addr, 0); 5661 return env->ir[IR_V0]; 5662 5663 badframe: 5664 unlock_user_struct(sc, sc_addr, 0); 5665 force_sig(TARGET_SIGSEGV); 5666 } 5667 5668 long do_rt_sigreturn(CPUAlphaState *env) 5669 { 5670 abi_ulong frame_addr = env->ir[IR_A0]; 5671 struct target_rt_sigframe *frame; 5672 sigset_t set; 5673 5674 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5675 goto badframe; 5676 } 5677 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5678 do_sigprocmask(SIG_SETMASK, &set, NULL); 5679 5680 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 5681 goto badframe; 5682 } 5683 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5684 uc.tuc_stack), 5685 0, env->ir[IR_SP]) == -EFAULT) { 5686 goto badframe; 5687 } 5688 5689 unlock_user_struct(frame, frame_addr, 0); 5690 return env->ir[IR_V0]; 5691 5692 5693 badframe: 5694 unlock_user_struct(frame, frame_addr, 0); 5695 force_sig(TARGET_SIGSEGV); 5696 } 5697 5698 #else 5699 5700 static void setup_frame(int sig, struct target_sigaction *ka, 5701 target_sigset_t *set, CPUArchState *env) 5702 { 5703 fprintf(stderr, "setup_frame: not implemented\n"); 5704 } 5705 5706 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5707 target_siginfo_t *info, 5708 target_sigset_t *set, CPUArchState *env) 5709 { 5710 fprintf(stderr, "setup_rt_frame: not implemented\n"); 5711 } 5712 5713 long do_sigreturn(CPUArchState *env) 5714 { 5715 fprintf(stderr, "do_sigreturn: not implemented\n"); 5716 return -TARGET_ENOSYS; 5717 } 5718 5719 long do_rt_sigreturn(CPUArchState *env) 5720 { 5721 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 5722 return -TARGET_ENOSYS; 5723 } 5724 5725 #endif 5726 5727 void process_pending_signals(CPUArchState *cpu_env) 5728 { 5729 CPUState *cpu = ENV_GET_CPU(cpu_env); 5730 int sig; 5731 abi_ulong handler; 5732 sigset_t set, old_set; 5733 target_sigset_t target_old_set; 5734 struct emulated_sigtable *k; 5735 struct target_sigaction *sa; 5736 struct sigqueue *q; 5737 TaskState *ts = cpu->opaque; 5738 5739 if (!ts->signal_pending) 5740 return; 5741 5742 /* FIXME: This is not threadsafe. */ 5743 k = ts->sigtab; 5744 for(sig = 1; sig <= TARGET_NSIG; sig++) { 5745 if (k->pending) 5746 goto handle_signal; 5747 k++; 5748 } 5749 /* if no signal is pending, just return */ 5750 ts->signal_pending = 0; 5751 return; 5752 5753 handle_signal: 5754 #ifdef DEBUG_SIGNAL 5755 fprintf(stderr, "qemu: process signal %d\n", sig); 5756 #endif 5757 /* dequeue signal */ 5758 q = k->first; 5759 k->first = q->next; 5760 if (!k->first) 5761 k->pending = 0; 5762 5763 sig = gdb_handlesig(cpu, sig); 5764 if (!sig) { 5765 sa = NULL; 5766 handler = TARGET_SIG_IGN; 5767 } else { 5768 sa = &sigact_table[sig - 1]; 5769 handler = sa->_sa_handler; 5770 } 5771 5772 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) { 5773 /* Guest has blocked SIGSEGV but we got one anyway. Assume this 5774 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info 5775 * because it got a real MMU fault), and treat as if default handler. 5776 */ 5777 handler = TARGET_SIG_DFL; 5778 } 5779 5780 if (handler == TARGET_SIG_DFL) { 5781 /* default handler : ignore some signal. The other are job control or fatal */ 5782 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 5783 kill(getpid(),SIGSTOP); 5784 } else if (sig != TARGET_SIGCHLD && 5785 sig != TARGET_SIGURG && 5786 sig != TARGET_SIGWINCH && 5787 sig != TARGET_SIGCONT) { 5788 force_sig(sig); 5789 } 5790 } else if (handler == TARGET_SIG_IGN) { 5791 /* ignore sig */ 5792 } else if (handler == TARGET_SIG_ERR) { 5793 force_sig(sig); 5794 } else { 5795 /* compute the blocked signals during the handler execution */ 5796 target_to_host_sigset(&set, &sa->sa_mask); 5797 /* SA_NODEFER indicates that the current signal should not be 5798 blocked during the handler */ 5799 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 5800 sigaddset(&set, target_to_host_signal(sig)); 5801 5802 /* block signals in the handler using Linux */ 5803 do_sigprocmask(SIG_BLOCK, &set, &old_set); 5804 /* save the previous blocked signal state to restore it at the 5805 end of the signal execution (see do_sigreturn) */ 5806 host_to_target_sigset_internal(&target_old_set, &old_set); 5807 5808 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 5809 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 5810 { 5811 CPUX86State *env = cpu_env; 5812 if (env->eflags & VM_MASK) 5813 save_v86_state(env); 5814 } 5815 #endif 5816 /* prepare the stack frame of the virtual CPU */ 5817 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) 5818 /* These targets do not have traditional signals. */ 5819 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); 5820 #else 5821 if (sa->sa_flags & TARGET_SA_SIGINFO) 5822 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); 5823 else 5824 setup_frame(sig, sa, &target_old_set, cpu_env); 5825 #endif 5826 if (sa->sa_flags & TARGET_SA_RESETHAND) 5827 sa->_sa_handler = TARGET_SIG_DFL; 5828 } 5829 if (q != &k->info) 5830 free_sigqueue(cpu_env, q); 5831 } 5832