1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <string.h> 23 #include <stdarg.h> 24 #include <unistd.h> 25 #include <signal.h> 26 #include <errno.h> 27 #include <sys/ucontext.h> 28 29 #include "qemu.h" 30 #include "target_signal.h" 31 32 //#define DEBUG_SIGNAL 33 34 #define MAX_SIGQUEUE_SIZE 1024 35 36 struct sigqueue { 37 struct sigqueue *next; 38 target_siginfo_t info; 39 }; 40 41 struct emulated_sigaction { 42 struct target_sigaction sa; 43 int pending; /* true if signal is pending */ 44 struct sigqueue *first; 45 struct sigqueue info; /* in order to always have memory for the 46 first signal, we put it here */ 47 }; 48 49 struct target_sigaltstack target_sigaltstack_used = { 50 .ss_sp = 0, 51 .ss_size = 0, 52 .ss_flags = TARGET_SS_DISABLE, 53 }; 54 55 static struct emulated_sigaction sigact_table[TARGET_NSIG]; 56 static struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */ 57 static struct sigqueue *first_free; /* first free siginfo queue entry */ 58 static int signal_pending; /* non zero if a signal may be pending */ 59 60 static void host_signal_handler(int host_signum, siginfo_t *info, 61 void *puc); 62 63 static uint8_t host_to_target_signal_table[65] = { 64 [SIGHUP] = TARGET_SIGHUP, 65 [SIGINT] = TARGET_SIGINT, 66 [SIGQUIT] = TARGET_SIGQUIT, 67 [SIGILL] = TARGET_SIGILL, 68 [SIGTRAP] = TARGET_SIGTRAP, 69 [SIGABRT] = TARGET_SIGABRT, 70 /* [SIGIOT] = TARGET_SIGIOT,*/ 71 [SIGBUS] = TARGET_SIGBUS, 72 [SIGFPE] = TARGET_SIGFPE, 73 [SIGKILL] = TARGET_SIGKILL, 74 [SIGUSR1] = TARGET_SIGUSR1, 75 [SIGSEGV] = TARGET_SIGSEGV, 76 [SIGUSR2] = TARGET_SIGUSR2, 77 [SIGPIPE] = TARGET_SIGPIPE, 78 [SIGALRM] = TARGET_SIGALRM, 79 [SIGTERM] = TARGET_SIGTERM, 80 #ifdef SIGSTKFLT 81 [SIGSTKFLT] = TARGET_SIGSTKFLT, 82 #endif 83 [SIGCHLD] = TARGET_SIGCHLD, 84 [SIGCONT] = TARGET_SIGCONT, 85 [SIGSTOP] = TARGET_SIGSTOP, 86 [SIGTSTP] = TARGET_SIGTSTP, 87 [SIGTTIN] = TARGET_SIGTTIN, 88 [SIGTTOU] = TARGET_SIGTTOU, 89 [SIGURG] = TARGET_SIGURG, 90 [SIGXCPU] = TARGET_SIGXCPU, 91 [SIGXFSZ] = TARGET_SIGXFSZ, 92 [SIGVTALRM] = TARGET_SIGVTALRM, 93 [SIGPROF] = TARGET_SIGPROF, 94 [SIGWINCH] = TARGET_SIGWINCH, 95 [SIGIO] = TARGET_SIGIO, 96 [SIGPWR] = TARGET_SIGPWR, 97 [SIGSYS] = TARGET_SIGSYS, 98 /* next signals stay the same */ 99 }; 100 static uint8_t target_to_host_signal_table[65]; 101 102 static inline int on_sig_stack(unsigned long sp) 103 { 104 return (sp - target_sigaltstack_used.ss_sp 105 < target_sigaltstack_used.ss_size); 106 } 107 108 static inline int sas_ss_flags(unsigned long sp) 109 { 110 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 111 : on_sig_stack(sp) ? SS_ONSTACK : 0); 112 } 113 114 static inline int host_to_target_signal(int sig) 115 { 116 return host_to_target_signal_table[sig]; 117 } 118 119 static inline int target_to_host_signal(int sig) 120 { 121 return target_to_host_signal_table[sig]; 122 } 123 124 static void host_to_target_sigset_internal(target_sigset_t *d, 125 const sigset_t *s) 126 { 127 int i; 128 unsigned long sigmask; 129 uint32_t target_sigmask; 130 131 sigmask = ((unsigned long *)s)[0]; 132 target_sigmask = 0; 133 for(i = 0; i < 32; i++) { 134 if (sigmask & (1 << i)) 135 target_sigmask |= 1 << (host_to_target_signal(i + 1) - 1); 136 } 137 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 32 138 d->sig[0] = target_sigmask; 139 for(i = 1;i < TARGET_NSIG_WORDS; i++) { 140 d->sig[i] = ((unsigned long *)s)[i]; 141 } 142 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2 143 d->sig[0] = target_sigmask; 144 d->sig[1] = sigmask >> 32; 145 #else 146 /* XXX: do it */ 147 #endif 148 } 149 150 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 151 { 152 target_sigset_t d1; 153 int i; 154 155 host_to_target_sigset_internal(&d1, s); 156 for(i = 0;i < TARGET_NSIG_WORDS; i++) 157 d->sig[i] = tswapl(d1.sig[i]); 158 } 159 160 void target_to_host_sigset_internal(sigset_t *d, const target_sigset_t *s) 161 { 162 int i; 163 unsigned long sigmask; 164 abi_ulong target_sigmask; 165 166 target_sigmask = s->sig[0]; 167 sigmask = 0; 168 for(i = 0; i < 32; i++) { 169 if (target_sigmask & (1 << i)) 170 sigmask |= 1 << (target_to_host_signal(i + 1) - 1); 171 } 172 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 32 173 ((unsigned long *)d)[0] = sigmask; 174 for(i = 1;i < TARGET_NSIG_WORDS; i++) { 175 ((unsigned long *)d)[i] = s->sig[i]; 176 } 177 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2 178 ((unsigned long *)d)[0] = sigmask | ((unsigned long)(s->sig[1]) << 32); 179 #else 180 /* XXX: do it */ 181 #endif /* TARGET_ABI_BITS */ 182 } 183 184 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 185 { 186 target_sigset_t s1; 187 int i; 188 189 for(i = 0;i < TARGET_NSIG_WORDS; i++) 190 s1.sig[i] = tswapl(s->sig[i]); 191 target_to_host_sigset_internal(d, &s1); 192 } 193 194 void host_to_target_old_sigset(abi_ulong *old_sigset, 195 const sigset_t *sigset) 196 { 197 target_sigset_t d; 198 host_to_target_sigset(&d, sigset); 199 *old_sigset = d.sig[0]; 200 } 201 202 void target_to_host_old_sigset(sigset_t *sigset, 203 const abi_ulong *old_sigset) 204 { 205 target_sigset_t d; 206 int i; 207 208 d.sig[0] = *old_sigset; 209 for(i = 1;i < TARGET_NSIG_WORDS; i++) 210 d.sig[i] = 0; 211 target_to_host_sigset(sigset, &d); 212 } 213 214 /* siginfo conversion */ 215 216 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 217 const siginfo_t *info) 218 { 219 int sig; 220 sig = host_to_target_signal(info->si_signo); 221 tinfo->si_signo = sig; 222 tinfo->si_errno = 0; 223 tinfo->si_code = 0; 224 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV || 225 sig == SIGBUS || sig == SIGTRAP) { 226 /* should never come here, but who knows. The information for 227 the target is irrelevant */ 228 tinfo->_sifields._sigfault._addr = 0; 229 } else if (sig == SIGIO) { 230 tinfo->_sifields._sigpoll._fd = info->si_fd; 231 } else if (sig >= TARGET_SIGRTMIN) { 232 tinfo->_sifields._rt._pid = info->si_pid; 233 tinfo->_sifields._rt._uid = info->si_uid; 234 /* XXX: potential problem if 64 bit */ 235 tinfo->_sifields._rt._sigval.sival_ptr = 236 (abi_ulong)(unsigned long)info->si_value.sival_ptr; 237 } 238 } 239 240 static void tswap_siginfo(target_siginfo_t *tinfo, 241 const target_siginfo_t *info) 242 { 243 int sig; 244 sig = info->si_signo; 245 tinfo->si_signo = tswap32(sig); 246 tinfo->si_errno = tswap32(info->si_errno); 247 tinfo->si_code = tswap32(info->si_code); 248 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV || 249 sig == SIGBUS || sig == SIGTRAP) { 250 tinfo->_sifields._sigfault._addr = 251 tswapl(info->_sifields._sigfault._addr); 252 } else if (sig == SIGIO) { 253 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd); 254 } else if (sig >= TARGET_SIGRTMIN) { 255 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid); 256 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid); 257 tinfo->_sifields._rt._sigval.sival_ptr = 258 tswapl(info->_sifields._rt._sigval.sival_ptr); 259 } 260 } 261 262 263 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 264 { 265 host_to_target_siginfo_noswap(tinfo, info); 266 tswap_siginfo(tinfo, tinfo); 267 } 268 269 /* XXX: we support only POSIX RT signals are used. */ 270 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 271 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 272 { 273 info->si_signo = tswap32(tinfo->si_signo); 274 info->si_errno = tswap32(tinfo->si_errno); 275 info->si_code = tswap32(tinfo->si_code); 276 info->si_pid = tswap32(tinfo->_sifields._rt._pid); 277 info->si_uid = tswap32(tinfo->_sifields._rt._uid); 278 info->si_value.sival_ptr = 279 (void *)(long)tswapl(tinfo->_sifields._rt._sigval.sival_ptr); 280 } 281 282 void signal_init(void) 283 { 284 struct sigaction act; 285 int i, j; 286 287 /* generate signal conversion tables */ 288 for(i = 1; i <= 64; i++) { 289 if (host_to_target_signal_table[i] == 0) 290 host_to_target_signal_table[i] = i; 291 } 292 for(i = 1; i <= 64; i++) { 293 j = host_to_target_signal_table[i]; 294 target_to_host_signal_table[j] = i; 295 } 296 297 /* set all host signal handlers. ALL signals are blocked during 298 the handlers to serialize them. */ 299 sigfillset(&act.sa_mask); 300 act.sa_flags = SA_SIGINFO; 301 act.sa_sigaction = host_signal_handler; 302 for(i = 1; i < NSIG; i++) { 303 sigaction(i, &act, NULL); 304 } 305 306 memset(sigact_table, 0, sizeof(sigact_table)); 307 308 first_free = &sigqueue_table[0]; 309 for(i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) 310 sigqueue_table[i].next = &sigqueue_table[i + 1]; 311 sigqueue_table[MAX_SIGQUEUE_SIZE - 1].next = NULL; 312 } 313 314 /* signal queue handling */ 315 316 static inline struct sigqueue *alloc_sigqueue(void) 317 { 318 struct sigqueue *q = first_free; 319 if (!q) 320 return NULL; 321 first_free = q->next; 322 return q; 323 } 324 325 static inline void free_sigqueue(struct sigqueue *q) 326 { 327 q->next = first_free; 328 first_free = q; 329 } 330 331 /* abort execution with signal */ 332 void __attribute((noreturn)) force_sig(int sig) 333 { 334 int host_sig; 335 host_sig = target_to_host_signal(sig); 336 fprintf(stderr, "qemu: uncaught target signal %d (%s) - exiting\n", 337 sig, strsignal(host_sig)); 338 #if 1 339 _exit(-host_sig); 340 #else 341 { 342 struct sigaction act; 343 sigemptyset(&act.sa_mask); 344 act.sa_flags = SA_SIGINFO; 345 act.sa_sigaction = SIG_DFL; 346 sigaction(SIGABRT, &act, NULL); 347 abort(); 348 } 349 #endif 350 } 351 352 /* queue a signal so that it will be send to the virtual CPU as soon 353 as possible */ 354 int queue_signal(int sig, target_siginfo_t *info) 355 { 356 struct emulated_sigaction *k; 357 struct sigqueue *q, **pq; 358 abi_ulong handler; 359 360 #if defined(DEBUG_SIGNAL) 361 fprintf(stderr, "queue_signal: sig=%d\n", 362 sig); 363 #endif 364 k = &sigact_table[sig - 1]; 365 handler = k->sa._sa_handler; 366 if (handler == TARGET_SIG_DFL) { 367 /* default handler : ignore some signal. The other are fatal */ 368 if (sig != TARGET_SIGCHLD && 369 sig != TARGET_SIGURG && 370 sig != TARGET_SIGWINCH) { 371 force_sig(sig); 372 } else { 373 return 0; /* indicate ignored */ 374 } 375 } else if (handler == TARGET_SIG_IGN) { 376 /* ignore signal */ 377 return 0; 378 } else if (handler == TARGET_SIG_ERR) { 379 force_sig(sig); 380 } else { 381 pq = &k->first; 382 if (sig < TARGET_SIGRTMIN) { 383 /* if non real time signal, we queue exactly one signal */ 384 if (!k->pending) 385 q = &k->info; 386 else 387 return 0; 388 } else { 389 if (!k->pending) { 390 /* first signal */ 391 q = &k->info; 392 } else { 393 q = alloc_sigqueue(); 394 if (!q) 395 return -EAGAIN; 396 while (*pq != NULL) 397 pq = &(*pq)->next; 398 } 399 } 400 *pq = q; 401 q->info = *info; 402 q->next = NULL; 403 k->pending = 1; 404 /* signal that a new signal is pending */ 405 signal_pending = 1; 406 return 1; /* indicates that the signal was queued */ 407 } 408 } 409 410 static void host_signal_handler(int host_signum, siginfo_t *info, 411 void *puc) 412 { 413 int sig; 414 target_siginfo_t tinfo; 415 416 /* the CPU emulator uses some host signals to detect exceptions, 417 we we forward to it some signals */ 418 if (host_signum == SIGSEGV || host_signum == SIGBUS) { 419 if (cpu_signal_handler(host_signum, info, puc)) 420 return; 421 } 422 423 /* get target signal number */ 424 sig = host_to_target_signal(host_signum); 425 if (sig < 1 || sig > TARGET_NSIG) 426 return; 427 #if defined(DEBUG_SIGNAL) 428 fprintf(stderr, "qemu: got signal %d\n", sig); 429 #endif 430 host_to_target_siginfo_noswap(&tinfo, info); 431 if (queue_signal(sig, &tinfo) == 1) { 432 /* interrupt the virtual CPU as soon as possible */ 433 cpu_interrupt(global_env, CPU_INTERRUPT_EXIT); 434 } 435 } 436 437 /* do_sigaltstack() returns target values and errnos. */ 438 /* compare linux/kernel/signal.c:do_sigaltstack() */ 439 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 440 { 441 int ret; 442 struct target_sigaltstack oss; 443 444 /* XXX: test errors */ 445 if(uoss_addr) 446 { 447 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 448 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 449 __put_user(sas_ss_flags(sp), &oss.ss_flags); 450 } 451 452 if(uss_addr) 453 { 454 struct target_sigaltstack *uss; 455 struct target_sigaltstack ss; 456 457 ret = -TARGET_EFAULT; 458 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1) 459 || __get_user(ss.ss_sp, &uss->ss_sp) 460 || __get_user(ss.ss_size, &uss->ss_size) 461 || __get_user(ss.ss_flags, &uss->ss_flags)) 462 goto out; 463 unlock_user_struct(uss, uss_addr, 0); 464 465 ret = -TARGET_EPERM; 466 if (on_sig_stack(sp)) 467 goto out; 468 469 ret = -TARGET_EINVAL; 470 if (ss.ss_flags != TARGET_SS_DISABLE 471 && ss.ss_flags != TARGET_SS_ONSTACK 472 && ss.ss_flags != 0) 473 goto out; 474 475 if (ss.ss_flags == TARGET_SS_DISABLE) { 476 ss.ss_size = 0; 477 ss.ss_sp = 0; 478 } else { 479 ret = -TARGET_ENOMEM; 480 if (ss.ss_size < MINSIGSTKSZ) 481 goto out; 482 } 483 484 target_sigaltstack_used.ss_sp = ss.ss_sp; 485 target_sigaltstack_used.ss_size = ss.ss_size; 486 } 487 488 if (uoss_addr) { 489 ret = -TARGET_EFAULT; 490 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 491 goto out; 492 } 493 494 ret = 0; 495 out: 496 return ret; 497 } 498 499 /* do_sigaction() return host values and errnos */ 500 int do_sigaction(int sig, const struct target_sigaction *act, 501 struct target_sigaction *oact) 502 { 503 struct emulated_sigaction *k; 504 struct sigaction act1; 505 int host_sig; 506 int ret = 0; 507 508 if (sig < 1 || sig > TARGET_NSIG || sig == SIGKILL || sig == SIGSTOP) 509 return -EINVAL; 510 k = &sigact_table[sig - 1]; 511 #if defined(DEBUG_SIGNAL) 512 fprintf(stderr, "sigaction sig=%d act=0x%08x, oact=0x%08x\n", 513 sig, (int)act, (int)oact); 514 #endif 515 if (oact) { 516 oact->_sa_handler = tswapl(k->sa._sa_handler); 517 oact->sa_flags = tswapl(k->sa.sa_flags); 518 #if !defined(TARGET_MIPS) 519 oact->sa_restorer = tswapl(k->sa.sa_restorer); 520 #endif 521 oact->sa_mask = k->sa.sa_mask; 522 } 523 if (act) { 524 k->sa._sa_handler = tswapl(act->_sa_handler); 525 k->sa.sa_flags = tswapl(act->sa_flags); 526 #if !defined(TARGET_MIPS) 527 k->sa.sa_restorer = tswapl(act->sa_restorer); 528 #endif 529 k->sa.sa_mask = act->sa_mask; 530 531 /* we update the host linux signal state */ 532 host_sig = target_to_host_signal(sig); 533 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 534 sigfillset(&act1.sa_mask); 535 act1.sa_flags = SA_SIGINFO; 536 if (k->sa.sa_flags & TARGET_SA_RESTART) 537 act1.sa_flags |= SA_RESTART; 538 /* NOTE: it is important to update the host kernel signal 539 ignore state to avoid getting unexpected interrupted 540 syscalls */ 541 if (k->sa._sa_handler == TARGET_SIG_IGN) { 542 act1.sa_sigaction = (void *)SIG_IGN; 543 } else if (k->sa._sa_handler == TARGET_SIG_DFL) { 544 act1.sa_sigaction = (void *)SIG_DFL; 545 } else { 546 act1.sa_sigaction = host_signal_handler; 547 } 548 ret = sigaction(host_sig, &act1, NULL); 549 } 550 } 551 return ret; 552 } 553 554 #ifndef offsetof 555 #define offsetof(type, field) ((size_t) &((type *)0)->field) 556 #endif 557 558 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo, 559 const target_siginfo_t *info) 560 { 561 tswap_siginfo(tinfo, info); 562 return 0; 563 } 564 565 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 566 567 /* from the Linux kernel */ 568 569 struct target_fpreg { 570 uint16_t significand[4]; 571 uint16_t exponent; 572 }; 573 574 struct target_fpxreg { 575 uint16_t significand[4]; 576 uint16_t exponent; 577 uint16_t padding[3]; 578 }; 579 580 struct target_xmmreg { 581 abi_ulong element[4]; 582 }; 583 584 struct target_fpstate { 585 /* Regular FPU environment */ 586 abi_ulong cw; 587 abi_ulong sw; 588 abi_ulong tag; 589 abi_ulong ipoff; 590 abi_ulong cssel; 591 abi_ulong dataoff; 592 abi_ulong datasel; 593 struct target_fpreg _st[8]; 594 uint16_t status; 595 uint16_t magic; /* 0xffff = regular FPU data only */ 596 597 /* FXSR FPU environment */ 598 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 599 abi_ulong mxcsr; 600 abi_ulong reserved; 601 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 602 struct target_xmmreg _xmm[8]; 603 abi_ulong padding[56]; 604 }; 605 606 #define X86_FXSR_MAGIC 0x0000 607 608 struct target_sigcontext { 609 uint16_t gs, __gsh; 610 uint16_t fs, __fsh; 611 uint16_t es, __esh; 612 uint16_t ds, __dsh; 613 abi_ulong edi; 614 abi_ulong esi; 615 abi_ulong ebp; 616 abi_ulong esp; 617 abi_ulong ebx; 618 abi_ulong edx; 619 abi_ulong ecx; 620 abi_ulong eax; 621 abi_ulong trapno; 622 abi_ulong err; 623 abi_ulong eip; 624 uint16_t cs, __csh; 625 abi_ulong eflags; 626 abi_ulong esp_at_signal; 627 uint16_t ss, __ssh; 628 abi_ulong fpstate; /* pointer */ 629 abi_ulong oldmask; 630 abi_ulong cr2; 631 }; 632 633 struct target_ucontext { 634 abi_ulong tuc_flags; 635 abi_ulong tuc_link; 636 target_stack_t tuc_stack; 637 struct target_sigcontext tuc_mcontext; 638 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 639 }; 640 641 struct sigframe 642 { 643 abi_ulong pretcode; 644 int sig; 645 struct target_sigcontext sc; 646 struct target_fpstate fpstate; 647 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 648 char retcode[8]; 649 }; 650 651 struct rt_sigframe 652 { 653 abi_ulong pretcode; 654 int sig; 655 abi_ulong pinfo; 656 abi_ulong puc; 657 struct target_siginfo info; 658 struct target_ucontext uc; 659 struct target_fpstate fpstate; 660 char retcode[8]; 661 }; 662 663 /* 664 * Set up a signal frame. 665 */ 666 667 /* XXX: save x87 state */ 668 static int 669 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, 670 CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr) 671 { 672 int err = 0; 673 uint16_t magic; 674 675 /* already locked in setup_frame() */ 676 err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 677 err |= __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 678 err |= __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 679 err |= __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 680 err |= __put_user(env->regs[R_EDI], &sc->edi); 681 err |= __put_user(env->regs[R_ESI], &sc->esi); 682 err |= __put_user(env->regs[R_EBP], &sc->ebp); 683 err |= __put_user(env->regs[R_ESP], &sc->esp); 684 err |= __put_user(env->regs[R_EBX], &sc->ebx); 685 err |= __put_user(env->regs[R_EDX], &sc->edx); 686 err |= __put_user(env->regs[R_ECX], &sc->ecx); 687 err |= __put_user(env->regs[R_EAX], &sc->eax); 688 err |= __put_user(env->exception_index, &sc->trapno); 689 err |= __put_user(env->error_code, &sc->err); 690 err |= __put_user(env->eip, &sc->eip); 691 err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 692 err |= __put_user(env->eflags, &sc->eflags); 693 err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal); 694 err |= __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 695 696 cpu_x86_fsave(env, fpstate_addr, 1); 697 fpstate->status = fpstate->sw; 698 magic = 0xffff; 699 err |= __put_user(magic, &fpstate->magic); 700 err |= __put_user(fpstate_addr, &sc->fpstate); 701 702 /* non-iBCS2 extensions.. */ 703 err |= __put_user(mask, &sc->oldmask); 704 err |= __put_user(env->cr[2], &sc->cr2); 705 return err; 706 } 707 708 /* 709 * Determine which stack to use.. 710 */ 711 712 static inline abi_ulong 713 get_sigframe(struct emulated_sigaction *ka, CPUX86State *env, size_t frame_size) 714 { 715 unsigned long esp; 716 717 /* Default to using normal stack */ 718 esp = env->regs[R_ESP]; 719 /* This is the X/Open sanctioned signal stack switching. */ 720 if (ka->sa.sa_flags & TARGET_SA_ONSTACK) { 721 if (sas_ss_flags(esp) == 0) 722 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 723 } 724 725 /* This is the legacy signal stack switching. */ 726 else 727 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 728 !(ka->sa.sa_flags & TARGET_SA_RESTORER) && 729 ka->sa.sa_restorer) { 730 esp = (unsigned long) ka->sa.sa_restorer; 731 } 732 return (esp - frame_size) & -8ul; 733 } 734 735 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 736 static void setup_frame(int sig, struct emulated_sigaction *ka, 737 target_sigset_t *set, CPUX86State *env) 738 { 739 abi_ulong frame_addr; 740 struct sigframe *frame; 741 int i, err = 0; 742 743 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 744 745 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 746 goto give_sigsegv; 747 748 err |= __put_user((/*current->exec_domain 749 && current->exec_domain->signal_invmap 750 && sig < 32 751 ? current->exec_domain->signal_invmap[sig] 752 : */ sig), 753 &frame->sig); 754 if (err) 755 goto give_sigsegv; 756 757 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 758 frame_addr + offsetof(struct sigframe, fpstate)); 759 if (err) 760 goto give_sigsegv; 761 762 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 763 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 764 goto give_sigsegv; 765 } 766 767 /* Set up to return from userspace. If provided, use a stub 768 already in userspace. */ 769 if (ka->sa.sa_flags & TARGET_SA_RESTORER) { 770 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 771 } else { 772 uint16_t val16; 773 abi_ulong retcode_addr; 774 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 775 err |= __put_user(retcode_addr, &frame->pretcode); 776 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 777 val16 = 0xb858; 778 err |= __put_user(val16, (uint16_t *)(frame->retcode+0)); 779 err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 780 val16 = 0x80cd; 781 err |= __put_user(val16, (uint16_t *)(frame->retcode+6)); 782 } 783 784 if (err) 785 goto give_sigsegv; 786 787 /* Set up registers for signal handler */ 788 env->regs[R_ESP] = frame_addr; 789 env->eip = ka->sa._sa_handler; 790 791 cpu_x86_load_seg(env, R_DS, __USER_DS); 792 cpu_x86_load_seg(env, R_ES, __USER_DS); 793 cpu_x86_load_seg(env, R_SS, __USER_DS); 794 cpu_x86_load_seg(env, R_CS, __USER_CS); 795 env->eflags &= ~TF_MASK; 796 797 unlock_user_struct(frame, frame_addr, 1); 798 799 return; 800 801 give_sigsegv: 802 unlock_user_struct(frame, frame_addr, 1); 803 if (sig == TARGET_SIGSEGV) 804 ka->sa._sa_handler = TARGET_SIG_DFL; 805 force_sig(TARGET_SIGSEGV /* , current */); 806 } 807 808 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 809 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 810 target_siginfo_t *info, 811 target_sigset_t *set, CPUX86State *env) 812 { 813 abi_ulong frame_addr, addr; 814 struct rt_sigframe *frame; 815 int i, err = 0; 816 817 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 818 819 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 820 goto give_sigsegv; 821 822 err |= __put_user((/*current->exec_domain 823 && current->exec_domain->signal_invmap 824 && sig < 32 825 ? current->exec_domain->signal_invmap[sig] 826 : */sig), 827 &frame->sig); 828 addr = frame_addr + offsetof(struct rt_sigframe, info); 829 err |= __put_user(addr, &frame->pinfo); 830 addr = frame_addr + offsetof(struct rt_sigframe, uc); 831 err |= __put_user(addr, &frame->puc); 832 err |= copy_siginfo_to_user(&frame->info, info); 833 if (err) 834 goto give_sigsegv; 835 836 /* Create the ucontext. */ 837 err |= __put_user(0, &frame->uc.tuc_flags); 838 err |= __put_user(0, &frame->uc.tuc_link); 839 err |= __put_user(target_sigaltstack_used.ss_sp, 840 &frame->uc.tuc_stack.ss_sp); 841 err |= __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 842 &frame->uc.tuc_stack.ss_flags); 843 err |= __put_user(target_sigaltstack_used.ss_size, 844 &frame->uc.tuc_stack.ss_size); 845 err |= setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, 846 env, set->sig[0], 847 frame_addr + offsetof(struct rt_sigframe, fpstate)); 848 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 849 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) 850 goto give_sigsegv; 851 } 852 853 /* Set up to return from userspace. If provided, use a stub 854 already in userspace. */ 855 if (ka->sa.sa_flags & TARGET_SA_RESTORER) { 856 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 857 } else { 858 uint16_t val16; 859 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 860 err |= __put_user(addr, &frame->pretcode); 861 /* This is movl $,%eax ; int $0x80 */ 862 err |= __put_user(0xb8, (char *)(frame->retcode+0)); 863 err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 864 val16 = 0x80cd; 865 err |= __put_user(val16, (uint16_t *)(frame->retcode+5)); 866 } 867 868 if (err) 869 goto give_sigsegv; 870 871 /* Set up registers for signal handler */ 872 env->regs[R_ESP] = frame_addr; 873 env->eip = ka->sa._sa_handler; 874 875 cpu_x86_load_seg(env, R_DS, __USER_DS); 876 cpu_x86_load_seg(env, R_ES, __USER_DS); 877 cpu_x86_load_seg(env, R_SS, __USER_DS); 878 cpu_x86_load_seg(env, R_CS, __USER_CS); 879 env->eflags &= ~TF_MASK; 880 881 unlock_user_struct(frame, frame_addr, 1); 882 883 return; 884 885 give_sigsegv: 886 unlock_user_struct(frame, frame_addr, 1); 887 if (sig == TARGET_SIGSEGV) 888 ka->sa._sa_handler = TARGET_SIG_DFL; 889 force_sig(TARGET_SIGSEGV /* , current */); 890 } 891 892 static int 893 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax) 894 { 895 unsigned int err = 0; 896 abi_ulong fpstate_addr; 897 unsigned int tmpflags; 898 899 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 900 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 901 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 902 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 903 904 env->regs[R_EDI] = tswapl(sc->edi); 905 env->regs[R_ESI] = tswapl(sc->esi); 906 env->regs[R_EBP] = tswapl(sc->ebp); 907 env->regs[R_ESP] = tswapl(sc->esp); 908 env->regs[R_EBX] = tswapl(sc->ebx); 909 env->regs[R_EDX] = tswapl(sc->edx); 910 env->regs[R_ECX] = tswapl(sc->ecx); 911 env->eip = tswapl(sc->eip); 912 913 cpu_x86_load_seg(env, R_CS, lduw(&sc->cs) | 3); 914 cpu_x86_load_seg(env, R_SS, lduw(&sc->ss) | 3); 915 916 tmpflags = tswapl(sc->eflags); 917 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 918 // regs->orig_eax = -1; /* disable syscall checks */ 919 920 fpstate_addr = tswapl(sc->fpstate); 921 if (fpstate_addr != 0) { 922 if (!access_ok(VERIFY_READ, fpstate_addr, 923 sizeof(struct target_fpstate))) 924 goto badframe; 925 cpu_x86_frstor(env, fpstate_addr, 1); 926 } 927 928 *peax = tswapl(sc->eax); 929 return err; 930 badframe: 931 return 1; 932 } 933 934 long do_sigreturn(CPUX86State *env) 935 { 936 struct sigframe *frame; 937 abi_ulong frame_addr = env->regs[R_ESP] - 8; 938 target_sigset_t target_set; 939 sigset_t set; 940 int eax, i; 941 942 #if defined(DEBUG_SIGNAL) 943 fprintf(stderr, "do_sigreturn\n"); 944 #endif 945 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 946 goto badframe; 947 /* set blocked signals */ 948 if (__get_user(target_set.sig[0], &frame->sc.oldmask)) 949 goto badframe; 950 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 951 if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) 952 goto badframe; 953 } 954 955 target_to_host_sigset_internal(&set, &target_set); 956 sigprocmask(SIG_SETMASK, &set, NULL); 957 958 /* restore registers */ 959 if (restore_sigcontext(env, &frame->sc, &eax)) 960 goto badframe; 961 unlock_user_struct(frame, frame_addr, 0); 962 return eax; 963 964 badframe: 965 unlock_user_struct(frame, frame_addr, 0); 966 force_sig(TARGET_SIGSEGV); 967 return 0; 968 } 969 970 long do_rt_sigreturn(CPUX86State *env) 971 { 972 abi_ulong frame_addr; 973 struct rt_sigframe *frame; 974 sigset_t set; 975 int eax; 976 977 frame_addr = env->regs[R_ESP] - 4; 978 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 979 goto badframe; 980 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 981 sigprocmask(SIG_SETMASK, &set, NULL); 982 983 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax)) 984 goto badframe; 985 986 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 987 get_sp_from_cpustate(env)) == -EFAULT) 988 goto badframe; 989 990 unlock_user_struct(frame, frame_addr, 0); 991 return eax; 992 993 badframe: 994 unlock_user_struct(frame, frame_addr, 0); 995 force_sig(TARGET_SIGSEGV); 996 return 0; 997 } 998 999 #elif defined(TARGET_ARM) 1000 1001 struct target_sigcontext { 1002 abi_ulong trap_no; 1003 abi_ulong error_code; 1004 abi_ulong oldmask; 1005 abi_ulong arm_r0; 1006 abi_ulong arm_r1; 1007 abi_ulong arm_r2; 1008 abi_ulong arm_r3; 1009 abi_ulong arm_r4; 1010 abi_ulong arm_r5; 1011 abi_ulong arm_r6; 1012 abi_ulong arm_r7; 1013 abi_ulong arm_r8; 1014 abi_ulong arm_r9; 1015 abi_ulong arm_r10; 1016 abi_ulong arm_fp; 1017 abi_ulong arm_ip; 1018 abi_ulong arm_sp; 1019 abi_ulong arm_lr; 1020 abi_ulong arm_pc; 1021 abi_ulong arm_cpsr; 1022 abi_ulong fault_address; 1023 }; 1024 1025 struct target_ucontext { 1026 abi_ulong tuc_flags; 1027 abi_ulong tuc_link; 1028 target_stack_t tuc_stack; 1029 struct target_sigcontext tuc_mcontext; 1030 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1031 }; 1032 1033 struct sigframe 1034 { 1035 struct target_sigcontext sc; 1036 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1037 abi_ulong retcode; 1038 }; 1039 1040 struct rt_sigframe 1041 { 1042 struct target_siginfo *pinfo; 1043 void *puc; 1044 struct target_siginfo info; 1045 struct target_ucontext uc; 1046 abi_ulong retcode; 1047 }; 1048 1049 #define TARGET_CONFIG_CPU_32 1 1050 1051 /* 1052 * For ARM syscalls, we encode the syscall number into the instruction. 1053 */ 1054 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1055 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1056 1057 /* 1058 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1059 * need two 16-bit instructions. 1060 */ 1061 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1062 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1063 1064 static const abi_ulong retcodes[4] = { 1065 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1066 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1067 }; 1068 1069 1070 #define __put_user_error(x,p,e) __put_user(x, p) 1071 #define __get_user_error(x,p,e) __get_user(x, p) 1072 1073 static inline int valid_user_regs(CPUState *regs) 1074 { 1075 return 1; 1076 } 1077 1078 static int 1079 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1080 CPUState *env, unsigned long mask) 1081 { 1082 int err = 0; 1083 1084 __put_user_error(env->regs[0], &sc->arm_r0, err); 1085 __put_user_error(env->regs[1], &sc->arm_r1, err); 1086 __put_user_error(env->regs[2], &sc->arm_r2, err); 1087 __put_user_error(env->regs[3], &sc->arm_r3, err); 1088 __put_user_error(env->regs[4], &sc->arm_r4, err); 1089 __put_user_error(env->regs[5], &sc->arm_r5, err); 1090 __put_user_error(env->regs[6], &sc->arm_r6, err); 1091 __put_user_error(env->regs[7], &sc->arm_r7, err); 1092 __put_user_error(env->regs[8], &sc->arm_r8, err); 1093 __put_user_error(env->regs[9], &sc->arm_r9, err); 1094 __put_user_error(env->regs[10], &sc->arm_r10, err); 1095 __put_user_error(env->regs[11], &sc->arm_fp, err); 1096 __put_user_error(env->regs[12], &sc->arm_ip, err); 1097 __put_user_error(env->regs[13], &sc->arm_sp, err); 1098 __put_user_error(env->regs[14], &sc->arm_lr, err); 1099 __put_user_error(env->regs[15], &sc->arm_pc, err); 1100 #ifdef TARGET_CONFIG_CPU_32 1101 __put_user_error(cpsr_read(env), &sc->arm_cpsr, err); 1102 #endif 1103 1104 __put_user_error(/* current->thread.trap_no */ 0, &sc->trap_no, err); 1105 __put_user_error(/* current->thread.error_code */ 0, &sc->error_code, err); 1106 __put_user_error(/* current->thread.address */ 0, &sc->fault_address, err); 1107 __put_user_error(mask, &sc->oldmask, err); 1108 1109 return err; 1110 } 1111 1112 static inline abi_ulong 1113 get_sigframe(struct emulated_sigaction *ka, CPUState *regs, int framesize) 1114 { 1115 unsigned long sp = regs->regs[13]; 1116 1117 /* 1118 * This is the X/Open sanctioned signal stack switching. 1119 */ 1120 if ((ka->sa.sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) 1121 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1122 /* 1123 * ATPCS B01 mandates 8-byte alignment 1124 */ 1125 return (sp - framesize) & ~7; 1126 } 1127 1128 static int 1129 setup_return(CPUState *env, struct emulated_sigaction *ka, 1130 abi_ulong *rc, void *frame, int usig) 1131 { 1132 abi_ulong handler = (abi_ulong)ka->sa._sa_handler; 1133 abi_ulong retcode; 1134 int thumb = 0; 1135 #if defined(TARGET_CONFIG_CPU_32) 1136 #if 0 1137 abi_ulong cpsr = env->cpsr; 1138 1139 /* 1140 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 1141 */ 1142 if (ka->sa.sa_flags & SA_THIRTYTWO) 1143 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 1144 1145 #ifdef CONFIG_ARM_THUMB 1146 if (elf_hwcap & HWCAP_THUMB) { 1147 /* 1148 * The LSB of the handler determines if we're going to 1149 * be using THUMB or ARM mode for this signal handler. 1150 */ 1151 thumb = handler & 1; 1152 1153 if (thumb) 1154 cpsr |= T_BIT; 1155 else 1156 cpsr &= ~T_BIT; 1157 } 1158 #endif /* CONFIG_ARM_THUMB */ 1159 #endif /* 0 */ 1160 #endif /* TARGET_CONFIG_CPU_32 */ 1161 1162 if (ka->sa.sa_flags & TARGET_SA_RESTORER) { 1163 retcode = (abi_ulong)ka->sa.sa_restorer; 1164 } else { 1165 unsigned int idx = thumb; 1166 1167 if (ka->sa.sa_flags & TARGET_SA_SIGINFO) 1168 idx += 2; 1169 1170 if (__put_user(retcodes[idx], rc)) 1171 return 1; 1172 #if 0 1173 flush_icache_range((abi_ulong)rc, 1174 (abi_ulong)(rc + 1)); 1175 #endif 1176 retcode = ((abi_ulong)rc) + thumb; 1177 } 1178 1179 env->regs[0] = usig; 1180 env->regs[13] = h2g(frame); 1181 env->regs[14] = retcode; 1182 env->regs[15] = handler & (thumb ? ~1 : ~3); 1183 1184 #if 0 1185 #ifdef TARGET_CONFIG_CPU_32 1186 env->cpsr = cpsr; 1187 #endif 1188 #endif 1189 1190 return 0; 1191 } 1192 1193 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1194 static void setup_frame(int usig, struct emulated_sigaction *ka, 1195 target_sigset_t *set, CPUState *regs) 1196 { 1197 struct sigframe *frame; 1198 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1199 int i, err = 0; 1200 1201 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1202 return; 1203 1204 err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]); 1205 1206 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1207 if (__put_user(set->sig[i], &frame->extramask[i - 1])) 1208 goto end; 1209 } 1210 1211 if (err == 0) 1212 err = setup_return(regs, ka, &frame->retcode, frame, usig); 1213 1214 end: 1215 unlock_user_struct(frame, frame_addr, 1); 1216 // return err; 1217 } 1218 1219 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1220 static void setup_rt_frame(int usig, struct emulated_sigaction *ka, 1221 target_siginfo_t *info, 1222 target_sigset_t *set, CPUState *env) 1223 { 1224 struct rt_sigframe *frame; 1225 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1226 struct target_sigaltstack stack; 1227 int i, err = 0; 1228 1229 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1230 return /* 1 */; 1231 1232 __put_user_error(&frame->info, (abi_ulong *)&frame->pinfo, err); 1233 __put_user_error(&frame->uc, (abi_ulong *)&frame->puc, err); 1234 err |= copy_siginfo_to_user(&frame->info, info); 1235 1236 /* Clear all the bits of the ucontext we don't use. */ 1237 memset(&frame->uc, 0, offsetof(struct target_ucontext, tuc_mcontext)); 1238 1239 memset(&stack, 0, sizeof(stack)); 1240 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1241 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1242 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1243 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1244 1245 err |= setup_sigcontext(&frame->uc.tuc_mcontext, /*&frame->fpstate,*/ 1246 env, set->sig[0]); 1247 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1248 if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) 1249 goto end; 1250 } 1251 1252 if (err == 0) 1253 err = setup_return(env, ka, &frame->retcode, frame, usig); 1254 1255 if (err == 0) { 1256 /* 1257 * For realtime signals we must also set the second and third 1258 * arguments for the signal handler. 1259 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 1260 */ 1261 env->regs[1] = (abi_ulong)frame->pinfo; 1262 env->regs[2] = (abi_ulong)frame->puc; 1263 } 1264 1265 end: 1266 unlock_user_struct(frame, frame_addr, 1); 1267 1268 // return err; 1269 } 1270 1271 static int 1272 restore_sigcontext(CPUState *env, struct target_sigcontext *sc) 1273 { 1274 int err = 0; 1275 uint32_t cpsr; 1276 1277 __get_user_error(env->regs[0], &sc->arm_r0, err); 1278 __get_user_error(env->regs[1], &sc->arm_r1, err); 1279 __get_user_error(env->regs[2], &sc->arm_r2, err); 1280 __get_user_error(env->regs[3], &sc->arm_r3, err); 1281 __get_user_error(env->regs[4], &sc->arm_r4, err); 1282 __get_user_error(env->regs[5], &sc->arm_r5, err); 1283 __get_user_error(env->regs[6], &sc->arm_r6, err); 1284 __get_user_error(env->regs[7], &sc->arm_r7, err); 1285 __get_user_error(env->regs[8], &sc->arm_r8, err); 1286 __get_user_error(env->regs[9], &sc->arm_r9, err); 1287 __get_user_error(env->regs[10], &sc->arm_r10, err); 1288 __get_user_error(env->regs[11], &sc->arm_fp, err); 1289 __get_user_error(env->regs[12], &sc->arm_ip, err); 1290 __get_user_error(env->regs[13], &sc->arm_sp, err); 1291 __get_user_error(env->regs[14], &sc->arm_lr, err); 1292 __get_user_error(env->regs[15], &sc->arm_pc, err); 1293 #ifdef TARGET_CONFIG_CPU_32 1294 __get_user_error(cpsr, &sc->arm_cpsr, err); 1295 cpsr_write(env, cpsr, 0xffffffff); 1296 #endif 1297 1298 err |= !valid_user_regs(env); 1299 1300 return err; 1301 } 1302 1303 long do_sigreturn(CPUState *env) 1304 { 1305 struct sigframe *frame; 1306 target_sigset_t set; 1307 sigset_t host_set; 1308 int i; 1309 1310 /* 1311 * Since we stacked the signal on a 64-bit boundary, 1312 * then 'sp' should be word aligned here. If it's 1313 * not, then the user is trying to mess with us. 1314 */ 1315 if (env->regs[13] & 7) 1316 goto badframe; 1317 1318 frame = (struct sigframe *)g2h(env->regs[13]); 1319 1320 #if 0 1321 if (verify_area(VERIFY_READ, frame, sizeof (*frame))) 1322 goto badframe; 1323 #endif 1324 if (__get_user(set.sig[0], &frame->sc.oldmask)) 1325 goto badframe; 1326 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1327 if (__get_user(set.sig[i], &frame->extramask[i - 1])) 1328 goto badframe; 1329 } 1330 1331 target_to_host_sigset_internal(&host_set, &set); 1332 sigprocmask(SIG_SETMASK, &host_set, NULL); 1333 1334 if (restore_sigcontext(env, &frame->sc)) 1335 goto badframe; 1336 1337 #if 0 1338 /* Send SIGTRAP if we're single-stepping */ 1339 if (ptrace_cancel_bpt(current)) 1340 send_sig(SIGTRAP, current, 1); 1341 #endif 1342 return env->regs[0]; 1343 1344 badframe: 1345 force_sig(SIGSEGV /* , current */); 1346 return 0; 1347 } 1348 1349 long do_rt_sigreturn(CPUState *env) 1350 { 1351 struct rt_sigframe *frame; 1352 sigset_t host_set; 1353 1354 /* 1355 * Since we stacked the signal on a 64-bit boundary, 1356 * then 'sp' should be word aligned here. If it's 1357 * not, then the user is trying to mess with us. 1358 */ 1359 if (env->regs[13] & 7) 1360 goto badframe; 1361 1362 frame = (struct rt_sigframe *)env->regs[13]; 1363 1364 #if 0 1365 if (verify_area(VERIFY_READ, frame, sizeof (*frame))) 1366 goto badframe; 1367 #endif 1368 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 1369 sigprocmask(SIG_SETMASK, &host_set, NULL); 1370 1371 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) 1372 goto badframe; 1373 1374 if (do_sigaltstack(h2g(&frame->uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 1375 goto badframe; 1376 1377 #if 0 1378 /* Send SIGTRAP if we're single-stepping */ 1379 if (ptrace_cancel_bpt(current)) 1380 send_sig(SIGTRAP, current, 1); 1381 #endif 1382 return env->regs[0]; 1383 1384 badframe: 1385 force_sig(SIGSEGV /* , current */); 1386 return 0; 1387 } 1388 1389 #elif defined(TARGET_SPARC) 1390 1391 #define __SUNOS_MAXWIN 31 1392 1393 /* This is what SunOS does, so shall I. */ 1394 struct target_sigcontext { 1395 abi_ulong sigc_onstack; /* state to restore */ 1396 1397 abi_ulong sigc_mask; /* sigmask to restore */ 1398 abi_ulong sigc_sp; /* stack pointer */ 1399 abi_ulong sigc_pc; /* program counter */ 1400 abi_ulong sigc_npc; /* next program counter */ 1401 abi_ulong sigc_psr; /* for condition codes etc */ 1402 abi_ulong sigc_g1; /* User uses these two registers */ 1403 abi_ulong sigc_o0; /* within the trampoline code. */ 1404 1405 /* Now comes information regarding the users window set 1406 * at the time of the signal. 1407 */ 1408 abi_ulong sigc_oswins; /* outstanding windows */ 1409 1410 /* stack ptrs for each regwin buf */ 1411 char *sigc_spbuf[__SUNOS_MAXWIN]; 1412 1413 /* Windows to restore after signal */ 1414 struct { 1415 abi_ulong locals[8]; 1416 abi_ulong ins[8]; 1417 } sigc_wbuf[__SUNOS_MAXWIN]; 1418 }; 1419 /* A Sparc stack frame */ 1420 struct sparc_stackf { 1421 abi_ulong locals[8]; 1422 abi_ulong ins[6]; 1423 struct sparc_stackf *fp; 1424 abi_ulong callers_pc; 1425 char *structptr; 1426 abi_ulong xargs[6]; 1427 abi_ulong xxargs[1]; 1428 }; 1429 1430 typedef struct { 1431 struct { 1432 abi_ulong psr; 1433 abi_ulong pc; 1434 abi_ulong npc; 1435 abi_ulong y; 1436 abi_ulong u_regs[16]; /* globals and ins */ 1437 } si_regs; 1438 int si_mask; 1439 } __siginfo_t; 1440 1441 typedef struct { 1442 unsigned long si_float_regs [32]; 1443 unsigned long si_fsr; 1444 unsigned long si_fpqdepth; 1445 struct { 1446 unsigned long *insn_addr; 1447 unsigned long insn; 1448 } si_fpqueue [16]; 1449 } qemu_siginfo_fpu_t; 1450 1451 1452 struct target_signal_frame { 1453 struct sparc_stackf ss; 1454 __siginfo_t info; 1455 qemu_siginfo_fpu_t *fpu_save; 1456 abi_ulong insns[2] __attribute__ ((aligned (8))); 1457 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 1458 abi_ulong extra_size; /* Should be 0 */ 1459 qemu_siginfo_fpu_t fpu_state; 1460 }; 1461 struct target_rt_signal_frame { 1462 struct sparc_stackf ss; 1463 siginfo_t info; 1464 abi_ulong regs[20]; 1465 sigset_t mask; 1466 qemu_siginfo_fpu_t *fpu_save; 1467 unsigned int insns[2]; 1468 stack_t stack; 1469 unsigned int extra_size; /* Should be 0 */ 1470 qemu_siginfo_fpu_t fpu_state; 1471 }; 1472 1473 #define UREG_O0 16 1474 #define UREG_O6 22 1475 #define UREG_I0 0 1476 #define UREG_I1 1 1477 #define UREG_I2 2 1478 #define UREG_I3 3 1479 #define UREG_I4 4 1480 #define UREG_I5 5 1481 #define UREG_I6 6 1482 #define UREG_I7 7 1483 #define UREG_L0 8 1484 #define UREG_FP UREG_I6 1485 #define UREG_SP UREG_O6 1486 1487 static inline abi_ulong get_sigframe(struct emulated_sigaction *sa, 1488 CPUState *env, unsigned long framesize) 1489 { 1490 abi_ulong sp; 1491 1492 sp = env->regwptr[UREG_FP]; 1493 1494 /* This is the X/Open sanctioned signal stack switching. */ 1495 if (sa->sa.sa_flags & TARGET_SA_ONSTACK) { 1496 if (!on_sig_stack(sp) 1497 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) 1498 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1499 } 1500 return sp - framesize; 1501 } 1502 1503 static int 1504 setup___siginfo(__siginfo_t *si, CPUState *env, abi_ulong mask) 1505 { 1506 int err = 0, i; 1507 1508 err |= __put_user(env->psr, &si->si_regs.psr); 1509 err |= __put_user(env->pc, &si->si_regs.pc); 1510 err |= __put_user(env->npc, &si->si_regs.npc); 1511 err |= __put_user(env->y, &si->si_regs.y); 1512 for (i=0; i < 8; i++) { 1513 err |= __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 1514 } 1515 for (i=0; i < 8; i++) { 1516 err |= __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 1517 } 1518 err |= __put_user(mask, &si->si_mask); 1519 return err; 1520 } 1521 1522 #if 0 1523 static int 1524 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1525 CPUState *env, unsigned long mask) 1526 { 1527 int err = 0; 1528 1529 err |= __put_user(mask, &sc->sigc_mask); 1530 err |= __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 1531 err |= __put_user(env->pc, &sc->sigc_pc); 1532 err |= __put_user(env->npc, &sc->sigc_npc); 1533 err |= __put_user(env->psr, &sc->sigc_psr); 1534 err |= __put_user(env->gregs[1], &sc->sigc_g1); 1535 err |= __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 1536 1537 return err; 1538 } 1539 #endif 1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 1541 1542 static void setup_frame(int sig, struct emulated_sigaction *ka, 1543 target_sigset_t *set, CPUState *env) 1544 { 1545 abi_ulong sf_addr; 1546 struct target_signal_frame *sf; 1547 int sigframe_size, err, i; 1548 1549 /* 1. Make sure everything is clean */ 1550 //synchronize_user_stack(); 1551 1552 sigframe_size = NF_ALIGNEDSZ; 1553 sf_addr = get_sigframe(ka, env, sigframe_size); 1554 1555 sf = lock_user(VERIFY_WRITE, sf_addr, 1556 sizeof(struct target_signal_frame), 0); 1557 if (!sf) 1558 goto sigsegv; 1559 1560 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]); 1561 #if 0 1562 if (invalid_frame_pointer(sf, sigframe_size)) 1563 goto sigill_and_return; 1564 #endif 1565 /* 2. Save the current process state */ 1566 err = setup___siginfo(&sf->info, env, set->sig[0]); 1567 err |= __put_user(0, &sf->extra_size); 1568 1569 //err |= save_fpu_state(regs, &sf->fpu_state); 1570 //err |= __put_user(&sf->fpu_state, &sf->fpu_save); 1571 1572 err |= __put_user(set->sig[0], &sf->info.si_mask); 1573 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 1574 err |= __put_user(set->sig[i + 1], &sf->extramask[i]); 1575 } 1576 1577 for (i = 0; i < 8; i++) { 1578 err |= __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 1579 } 1580 for (i = 0; i < 8; i++) { 1581 err |= __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 1582 } 1583 if (err) 1584 goto sigsegv; 1585 1586 /* 3. signal handler back-trampoline and parameters */ 1587 env->regwptr[UREG_FP] = sf_addr; 1588 env->regwptr[UREG_I0] = sig; 1589 env->regwptr[UREG_I1] = sf_addr + 1590 offsetof(struct target_signal_frame, info); 1591 env->regwptr[UREG_I2] = sf_addr + 1592 offsetof(struct target_signal_frame, info); 1593 1594 /* 4. signal handler */ 1595 env->pc = ka->sa._sa_handler; 1596 env->npc = (env->pc + 4); 1597 /* 5. return to kernel instructions */ 1598 if (ka->sa.sa_restorer) 1599 env->regwptr[UREG_I7] = ka->sa.sa_restorer; 1600 else { 1601 uint32_t val32; 1602 1603 env->regwptr[UREG_I7] = sf_addr + 1604 offsetof(struct target_signal_frame, insns) - 2 * 4; 1605 1606 /* mov __NR_sigreturn, %g1 */ 1607 val32 = 0x821020d8; 1608 err |= __put_user(val32, &sf->insns[0]); 1609 1610 /* t 0x10 */ 1611 val32 = 0x91d02010; 1612 err |= __put_user(val32, &sf->insns[1]); 1613 if (err) 1614 goto sigsegv; 1615 1616 /* Flush instruction space. */ 1617 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 1618 // tb_flush(env); 1619 } 1620 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 1621 return; 1622 #if 0 1623 sigill_and_return: 1624 force_sig(TARGET_SIGILL); 1625 #endif 1626 sigsegv: 1627 //fprintf(stderr, "force_sig\n"); 1628 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 1629 force_sig(TARGET_SIGSEGV); 1630 } 1631 static inline int 1632 restore_fpu_state(CPUState *env, qemu_siginfo_fpu_t *fpu) 1633 { 1634 int err; 1635 #if 0 1636 #ifdef CONFIG_SMP 1637 if (current->flags & PF_USEDFPU) 1638 regs->psr &= ~PSR_EF; 1639 #else 1640 if (current == last_task_used_math) { 1641 last_task_used_math = 0; 1642 regs->psr &= ~PSR_EF; 1643 } 1644 #endif 1645 current->used_math = 1; 1646 current->flags &= ~PF_USEDFPU; 1647 #endif 1648 #if 0 1649 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu))) 1650 return -EFAULT; 1651 #endif 1652 1653 #if 0 1654 /* XXX: incorrect */ 1655 err = __copy_from_user(&env->fpr[0], &fpu->si_float_regs[0], 1656 (sizeof(unsigned long) * 32)); 1657 #endif 1658 err |= __get_user(env->fsr, &fpu->si_fsr); 1659 #if 0 1660 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); 1661 if (current->thread.fpqdepth != 0) 1662 err |= __copy_from_user(¤t->thread.fpqueue[0], 1663 &fpu->si_fpqueue[0], 1664 ((sizeof(unsigned long) + 1665 (sizeof(unsigned long *)))*16)); 1666 #endif 1667 return err; 1668 } 1669 1670 1671 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 1672 target_siginfo_t *info, 1673 target_sigset_t *set, CPUState *env) 1674 { 1675 fprintf(stderr, "setup_rt_frame: not implemented\n"); 1676 } 1677 1678 long do_sigreturn(CPUState *env) 1679 { 1680 struct target_signal_frame *sf; 1681 uint32_t up_psr, pc, npc; 1682 target_sigset_t set; 1683 sigset_t host_set; 1684 abi_ulong fpu_save; 1685 int err, i; 1686 1687 sf = (struct target_signal_frame *)g2h(env->regwptr[UREG_FP]); 1688 #if 0 1689 fprintf(stderr, "sigreturn\n"); 1690 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]); 1691 #endif 1692 //cpu_dump_state(env, stderr, fprintf, 0); 1693 1694 /* 1. Make sure we are not getting garbage from the user */ 1695 #if 0 1696 if (verify_area (VERIFY_READ, sf, sizeof (*sf))) 1697 goto segv_and_exit; 1698 #endif 1699 1700 if (((uint) sf) & 3) 1701 goto segv_and_exit; 1702 1703 err = __get_user(pc, &sf->info.si_regs.pc); 1704 err |= __get_user(npc, &sf->info.si_regs.npc); 1705 1706 if ((pc | npc) & 3) 1707 goto segv_and_exit; 1708 1709 /* 2. Restore the state */ 1710 err |= __get_user(up_psr, &sf->info.si_regs.psr); 1711 1712 /* User can only change condition codes and FPU enabling in %psr. */ 1713 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 1714 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 1715 1716 env->pc = pc; 1717 env->npc = npc; 1718 err |= __get_user(env->y, &sf->info.si_regs.y); 1719 for (i=0; i < 8; i++) { 1720 err |= __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 1721 } 1722 for (i=0; i < 8; i++) { 1723 err |= __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 1724 } 1725 1726 err |= __get_user(fpu_save, (abi_ulong *)&sf->fpu_save); 1727 1728 //if (fpu_save) 1729 // err |= restore_fpu_state(env, fpu_save); 1730 1731 /* This is pretty much atomic, no amount locking would prevent 1732 * the races which exist anyways. 1733 */ 1734 err |= __get_user(set.sig[0], &sf->info.si_mask); 1735 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1736 err |= (__get_user(set.sig[i], &sf->extramask[i - 1])); 1737 } 1738 1739 target_to_host_sigset_internal(&host_set, &set); 1740 sigprocmask(SIG_SETMASK, &host_set, NULL); 1741 1742 if (err) 1743 goto segv_and_exit; 1744 1745 return env->regwptr[0]; 1746 1747 segv_and_exit: 1748 force_sig(TARGET_SIGSEGV); 1749 } 1750 1751 long do_rt_sigreturn(CPUState *env) 1752 { 1753 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 1754 return -ENOSYS; 1755 } 1756 1757 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 1758 #define MC_TSTATE 0 1759 #define MC_PC 1 1760 #define MC_NPC 2 1761 #define MC_Y 3 1762 #define MC_G1 4 1763 #define MC_G2 5 1764 #define MC_G3 6 1765 #define MC_G4 7 1766 #define MC_G5 8 1767 #define MC_G6 9 1768 #define MC_G7 10 1769 #define MC_O0 11 1770 #define MC_O1 12 1771 #define MC_O2 13 1772 #define MC_O3 14 1773 #define MC_O4 15 1774 #define MC_O5 16 1775 #define MC_O6 17 1776 #define MC_O7 18 1777 #define MC_NGREG 19 1778 1779 typedef abi_ulong target_mc_greg_t; 1780 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 1781 1782 struct target_mc_fq { 1783 abi_ulong *mcfq_addr; 1784 uint32_t mcfq_insn; 1785 }; 1786 1787 struct target_mc_fpu { 1788 union { 1789 uint32_t sregs[32]; 1790 uint64_t dregs[32]; 1791 //uint128_t qregs[16]; 1792 } mcfpu_fregs; 1793 abi_ulong mcfpu_fsr; 1794 abi_ulong mcfpu_fprs; 1795 abi_ulong mcfpu_gsr; 1796 struct target_mc_fq *mcfpu_fq; 1797 unsigned char mcfpu_qcnt; 1798 unsigned char mcfpu_qentsz; 1799 unsigned char mcfpu_enab; 1800 }; 1801 typedef struct target_mc_fpu target_mc_fpu_t; 1802 1803 typedef struct { 1804 target_mc_gregset_t mc_gregs; 1805 target_mc_greg_t mc_fp; 1806 target_mc_greg_t mc_i7; 1807 target_mc_fpu_t mc_fpregs; 1808 } target_mcontext_t; 1809 1810 struct target_ucontext { 1811 struct target_ucontext *uc_link; 1812 abi_ulong uc_flags; 1813 target_sigset_t uc_sigmask; 1814 target_mcontext_t uc_mcontext; 1815 }; 1816 1817 /* A V9 register window */ 1818 struct target_reg_window { 1819 abi_ulong locals[8]; 1820 abi_ulong ins[8]; 1821 }; 1822 1823 #define TARGET_STACK_BIAS 2047 1824 1825 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 1826 void sparc64_set_context(CPUSPARCState *env) 1827 { 1828 abi_ulong ucp_addr; 1829 struct target_ucontext *ucp; 1830 target_mc_gregset_t *grp; 1831 abi_ulong pc, npc, tstate; 1832 abi_ulong fp, i7, w_addr; 1833 unsigned char fenab; 1834 int err; 1835 unsigned int i; 1836 1837 ucp_addr = env->regwptr[UREG_I0]; 1838 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) 1839 goto do_sigsegv; 1840 grp = &ucp->uc_mcontext.mc_gregs; 1841 err = __get_user(pc, &((*grp)[MC_PC])); 1842 err |= __get_user(npc, &((*grp)[MC_NPC])); 1843 if (err || ((pc | npc) & 3)) 1844 goto do_sigsegv; 1845 if (env->regwptr[UREG_I1]) { 1846 target_sigset_t target_set; 1847 sigset_t set; 1848 1849 if (TARGET_NSIG_WORDS == 1) { 1850 if (__get_user(target_set.sig[0], &ucp->uc_sigmask.sig[0])) 1851 goto do_sigsegv; 1852 } else { 1853 abi_ulong *src, *dst; 1854 src = ucp->uc_sigmask.sig; 1855 dst = target_set.sig; 1856 for (i = 0; i < sizeof(target_sigset_t) / sizeof(abi_ulong); 1857 i++, dst++, src++) 1858 err |= __get_user(*dst, src); 1859 if (err) 1860 goto do_sigsegv; 1861 } 1862 target_to_host_sigset_internal(&set, &target_set); 1863 sigprocmask(SIG_SETMASK, &set, NULL); 1864 } 1865 env->pc = pc; 1866 env->npc = npc; 1867 err |= __get_user(env->y, &((*grp)[MC_Y])); 1868 err |= __get_user(tstate, &((*grp)[MC_TSTATE])); 1869 env->asi = (tstate >> 24) & 0xff; 1870 PUT_CCR(env, tstate >> 32); 1871 PUT_CWP64(env, tstate & 0x1f); 1872 err |= __get_user(env->gregs[1], (&(*grp)[MC_G1])); 1873 err |= __get_user(env->gregs[2], (&(*grp)[MC_G2])); 1874 err |= __get_user(env->gregs[3], (&(*grp)[MC_G3])); 1875 err |= __get_user(env->gregs[4], (&(*grp)[MC_G4])); 1876 err |= __get_user(env->gregs[5], (&(*grp)[MC_G5])); 1877 err |= __get_user(env->gregs[6], (&(*grp)[MC_G6])); 1878 err |= __get_user(env->gregs[7], (&(*grp)[MC_G7])); 1879 err |= __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 1880 err |= __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 1881 err |= __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 1882 err |= __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 1883 err |= __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 1884 err |= __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 1885 err |= __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 1886 err |= __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 1887 1888 err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); 1889 err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); 1890 1891 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 1892 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 1893 abi_ulong) != 0) 1894 goto do_sigsegv; 1895 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 1896 abi_ulong) != 0) 1897 goto do_sigsegv; 1898 err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); 1899 err |= __get_user(env->fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); 1900 { 1901 uint32_t *src, *dst; 1902 src = ucp->uc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 1903 dst = env->fpr; 1904 /* XXX: check that the CPU storage is the same as user context */ 1905 for (i = 0; i < 64; i++, dst++, src++) 1906 err |= __get_user(*dst, src); 1907 } 1908 err |= __get_user(env->fsr, 1909 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); 1910 err |= __get_user(env->gsr, 1911 &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); 1912 if (err) 1913 goto do_sigsegv; 1914 unlock_user_struct(ucp, ucp_addr, 0); 1915 return; 1916 do_sigsegv: 1917 unlock_user_struct(ucp, ucp_addr, 0); 1918 force_sig(SIGSEGV); 1919 } 1920 1921 void sparc64_get_context(CPUSPARCState *env) 1922 { 1923 abi_ulong ucp_addr; 1924 struct target_ucontext *ucp; 1925 target_mc_gregset_t *grp; 1926 target_mcontext_t *mcp; 1927 abi_ulong fp, i7, w_addr; 1928 int err; 1929 unsigned int i; 1930 target_sigset_t target_set; 1931 sigset_t set; 1932 1933 ucp_addr = env->regwptr[UREG_I0]; 1934 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) 1935 goto do_sigsegv; 1936 1937 mcp = &ucp->uc_mcontext; 1938 grp = &mcp->mc_gregs; 1939 1940 /* Skip over the trap instruction, first. */ 1941 env->pc = env->npc; 1942 env->npc += 4; 1943 1944 err = 0; 1945 1946 sigprocmask(0, NULL, &set); 1947 host_to_target_sigset_internal(&target_set, &set); 1948 if (TARGET_NSIG_WORDS == 1) { 1949 err |= __put_user(target_set.sig[0], 1950 (abi_ulong *)&ucp->uc_sigmask); 1951 } else { 1952 abi_ulong *src, *dst; 1953 src = target_set.sig; 1954 dst = ucp->uc_sigmask.sig; 1955 for (i = 0; i < sizeof(target_sigset_t) / sizeof(abi_ulong); 1956 i++, dst++, src++) 1957 err |= __put_user(*src, dst); 1958 if (err) 1959 goto do_sigsegv; 1960 } 1961 1962 /* XXX: tstate must be saved properly */ 1963 // err |= __put_user(env->tstate, &((*grp)[MC_TSTATE])); 1964 err |= __put_user(env->pc, &((*grp)[MC_PC])); 1965 err |= __put_user(env->npc, &((*grp)[MC_NPC])); 1966 err |= __put_user(env->y, &((*grp)[MC_Y])); 1967 err |= __put_user(env->gregs[1], &((*grp)[MC_G1])); 1968 err |= __put_user(env->gregs[2], &((*grp)[MC_G2])); 1969 err |= __put_user(env->gregs[3], &((*grp)[MC_G3])); 1970 err |= __put_user(env->gregs[4], &((*grp)[MC_G4])); 1971 err |= __put_user(env->gregs[5], &((*grp)[MC_G5])); 1972 err |= __put_user(env->gregs[6], &((*grp)[MC_G6])); 1973 err |= __put_user(env->gregs[7], &((*grp)[MC_G7])); 1974 err |= __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 1975 err |= __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 1976 err |= __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 1977 err |= __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 1978 err |= __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 1979 err |= __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 1980 err |= __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 1981 err |= __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 1982 1983 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 1984 fp = i7 = 0; 1985 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 1986 abi_ulong) != 0) 1987 goto do_sigsegv; 1988 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 1989 abi_ulong) != 0) 1990 goto do_sigsegv; 1991 err |= __put_user(fp, &(mcp->mc_fp)); 1992 err |= __put_user(i7, &(mcp->mc_i7)); 1993 1994 { 1995 uint32_t *src, *dst; 1996 src = env->fpr; 1997 dst = ucp->uc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 1998 /* XXX: check that the CPU storage is the same as user context */ 1999 for (i = 0; i < 64; i++, dst++, src++) 2000 err |= __put_user(*src, dst); 2001 } 2002 err |= __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2003 err |= __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2004 err |= __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2005 2006 if (err) 2007 goto do_sigsegv; 2008 unlock_user_struct(ucp, ucp_addr, 1); 2009 return; 2010 do_sigsegv: 2011 unlock_user_struct(ucp, ucp_addr, 1); 2012 force_sig(SIGSEGV); 2013 } 2014 #endif 2015 #elif defined(TARGET_ABI_MIPSN64) 2016 2017 # warning signal handling not implemented 2018 2019 static void setup_frame(int sig, struct emulated_sigaction *ka, 2020 target_sigset_t *set, CPUState *env) 2021 { 2022 fprintf(stderr, "setup_frame: not implemented\n"); 2023 } 2024 2025 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 2026 target_siginfo_t *info, 2027 target_sigset_t *set, CPUState *env) 2028 { 2029 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2030 } 2031 2032 long do_sigreturn(CPUState *env) 2033 { 2034 fprintf(stderr, "do_sigreturn: not implemented\n"); 2035 return -ENOSYS; 2036 } 2037 2038 long do_rt_sigreturn(CPUState *env) 2039 { 2040 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2041 return -ENOSYS; 2042 } 2043 2044 #elif defined(TARGET_ABI_MIPSN32) 2045 2046 # warning signal handling not implemented 2047 2048 static void setup_frame(int sig, struct emulated_sigaction *ka, 2049 target_sigset_t *set, CPUState *env) 2050 { 2051 fprintf(stderr, "setup_frame: not implemented\n"); 2052 } 2053 2054 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 2055 target_siginfo_t *info, 2056 target_sigset_t *set, CPUState *env) 2057 { 2058 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2059 } 2060 2061 long do_sigreturn(CPUState *env) 2062 { 2063 fprintf(stderr, "do_sigreturn: not implemented\n"); 2064 return -ENOSYS; 2065 } 2066 2067 long do_rt_sigreturn(CPUState *env) 2068 { 2069 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2070 return -ENOSYS; 2071 } 2072 2073 #elif defined(TARGET_ABI_MIPSO32) 2074 2075 struct target_sigcontext { 2076 uint32_t sc_regmask; /* Unused */ 2077 uint32_t sc_status; 2078 uint64_t sc_pc; 2079 uint64_t sc_regs[32]; 2080 uint64_t sc_fpregs[32]; 2081 uint32_t sc_ownedfp; /* Unused */ 2082 uint32_t sc_fpc_csr; 2083 uint32_t sc_fpc_eir; /* Unused */ 2084 uint32_t sc_used_math; 2085 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2086 uint64_t sc_mdhi; 2087 uint64_t sc_mdlo; 2088 target_ulong sc_hi1; /* Was sc_cause */ 2089 target_ulong sc_lo1; /* Was sc_badvaddr */ 2090 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2091 target_ulong sc_lo2; 2092 target_ulong sc_hi3; 2093 target_ulong sc_lo3; 2094 }; 2095 2096 struct sigframe { 2097 uint32_t sf_ass[4]; /* argument save space for o32 */ 2098 uint32_t sf_code[2]; /* signal trampoline */ 2099 struct target_sigcontext sf_sc; 2100 target_sigset_t sf_mask; 2101 }; 2102 2103 /* Install trampoline to jump back from signal handler */ 2104 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2105 { 2106 int err; 2107 2108 /* 2109 * Set up the return code ... 2110 * 2111 * li v0, __NR__foo_sigreturn 2112 * syscall 2113 */ 2114 2115 err = __put_user(0x24020000 + syscall, tramp + 0); 2116 err |= __put_user(0x0000000c , tramp + 1); 2117 /* flush_cache_sigtramp((unsigned long) tramp); */ 2118 return err; 2119 } 2120 2121 static inline int 2122 setup_sigcontext(CPUState *regs, struct target_sigcontext *sc) 2123 { 2124 int err = 0; 2125 2126 err |= __put_user(regs->PC[regs->current_tc], &sc->sc_pc); 2127 2128 #define save_gp_reg(i) do { \ 2129 err |= __put_user(regs->gpr[i][regs->current_tc], &sc->sc_regs[i]); \ 2130 } while(0) 2131 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2); 2132 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); 2133 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); 2134 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); 2135 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); 2136 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); 2137 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); 2138 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30); 2139 save_gp_reg(31); 2140 #undef save_gp_reg 2141 2142 err |= __put_user(regs->HI[0][regs->current_tc], &sc->sc_mdhi); 2143 err |= __put_user(regs->LO[0][regs->current_tc], &sc->sc_mdlo); 2144 2145 /* Not used yet, but might be useful if we ever have DSP suppport */ 2146 #if 0 2147 if (cpu_has_dsp) { 2148 err |= __put_user(mfhi1(), &sc->sc_hi1); 2149 err |= __put_user(mflo1(), &sc->sc_lo1); 2150 err |= __put_user(mfhi2(), &sc->sc_hi2); 2151 err |= __put_user(mflo2(), &sc->sc_lo2); 2152 err |= __put_user(mfhi3(), &sc->sc_hi3); 2153 err |= __put_user(mflo3(), &sc->sc_lo3); 2154 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 2155 } 2156 /* same with 64 bit */ 2157 #ifdef CONFIG_64BIT 2158 err |= __put_user(regs->hi, &sc->sc_hi[0]); 2159 err |= __put_user(regs->lo, &sc->sc_lo[0]); 2160 if (cpu_has_dsp) { 2161 err |= __put_user(mfhi1(), &sc->sc_hi[1]); 2162 err |= __put_user(mflo1(), &sc->sc_lo[1]); 2163 err |= __put_user(mfhi2(), &sc->sc_hi[2]); 2164 err |= __put_user(mflo2(), &sc->sc_lo[2]); 2165 err |= __put_user(mfhi3(), &sc->sc_hi[3]); 2166 err |= __put_user(mflo3(), &sc->sc_lo[3]); 2167 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 2168 } 2169 #endif 2170 #endif 2171 2172 #if 0 2173 err |= __put_user(!!used_math(), &sc->sc_used_math); 2174 2175 if (!used_math()) 2176 goto out; 2177 2178 /* 2179 * Save FPU state to signal context. Signal handler will "inherit" 2180 * current FPU state. 2181 */ 2182 preempt_disable(); 2183 2184 if (!is_fpu_owner()) { 2185 own_fpu(); 2186 restore_fp(current); 2187 } 2188 err |= save_fp_context(sc); 2189 2190 preempt_enable(); 2191 out: 2192 #endif 2193 return err; 2194 } 2195 2196 static inline int 2197 restore_sigcontext(CPUState *regs, struct target_sigcontext *sc) 2198 { 2199 int err = 0; 2200 2201 err |= __get_user(regs->CP0_EPC, &sc->sc_pc); 2202 2203 err |= __get_user(regs->HI[0][regs->current_tc], &sc->sc_mdhi); 2204 err |= __get_user(regs->LO[0][regs->current_tc], &sc->sc_mdlo); 2205 2206 #define restore_gp_reg(i) do { \ 2207 err |= __get_user(regs->gpr[i][regs->current_tc], &sc->sc_regs[i]); \ 2208 } while(0) 2209 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3); 2210 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6); 2211 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9); 2212 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); 2213 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); 2214 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); 2215 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); 2216 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); 2217 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); 2218 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30); 2219 restore_gp_reg(31); 2220 #undef restore_gp_reg 2221 2222 #if 0 2223 if (cpu_has_dsp) { 2224 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 2225 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 2226 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 2227 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 2228 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 2229 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 2230 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 2231 } 2232 #ifdef CONFIG_64BIT 2233 err |= __get_user(regs->hi, &sc->sc_hi[0]); 2234 err |= __get_user(regs->lo, &sc->sc_lo[0]); 2235 if (cpu_has_dsp) { 2236 err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg); 2237 err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg); 2238 err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg); 2239 err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg); 2240 err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg); 2241 err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg); 2242 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 2243 } 2244 #endif 2245 2246 err |= __get_user(used_math, &sc->sc_used_math); 2247 conditional_used_math(used_math); 2248 2249 preempt_disable(); 2250 2251 if (used_math()) { 2252 /* restore fpu context if we have used it before */ 2253 own_fpu(); 2254 err |= restore_fp_context(sc); 2255 } else { 2256 /* signal handler may have used FPU. Give it up. */ 2257 lose_fpu(); 2258 } 2259 2260 preempt_enable(); 2261 #endif 2262 return err; 2263 } 2264 /* 2265 * Determine which stack to use.. 2266 */ 2267 static inline abi_ulong 2268 get_sigframe(struct emulated_sigaction *ka, CPUState *regs, size_t frame_size) 2269 { 2270 unsigned long sp; 2271 2272 /* Default to using normal stack */ 2273 sp = regs->gpr[29][regs->current_tc]; 2274 2275 /* 2276 * FPU emulator may have it's own trampoline active just 2277 * above the user stack, 16-bytes before the next lowest 2278 * 16 byte boundary. Try to avoid trashing it. 2279 */ 2280 sp -= 32; 2281 2282 /* This is the X/Open sanctioned signal stack switching. */ 2283 if ((ka->sa.sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 2284 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2285 } 2286 2287 return (sp - frame_size) & ~7; 2288 } 2289 2290 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 2291 static void setup_frame(int sig, struct emulated_sigaction * ka, 2292 target_sigset_t *set, CPUState *regs) 2293 { 2294 struct sigframe *frame; 2295 abi_ulong frame_addr; 2296 int i; 2297 2298 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2299 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 2300 goto give_sigsegv; 2301 2302 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 2303 2304 if(setup_sigcontext(regs, &frame->sf_sc)) 2305 goto give_sigsegv; 2306 2307 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2308 if(__put_user(set->sig[i], &frame->sf_mask.sig[i])) 2309 goto give_sigsegv; 2310 } 2311 2312 /* 2313 * Arguments to signal handler: 2314 * 2315 * a0 = signal number 2316 * a1 = 0 (should be cause) 2317 * a2 = pointer to struct sigcontext 2318 * 2319 * $25 and PC point to the signal handler, $29 points to the 2320 * struct sigframe. 2321 */ 2322 regs->gpr[ 4][regs->current_tc] = sig; 2323 regs->gpr[ 5][regs->current_tc] = 0; 2324 regs->gpr[ 6][regs->current_tc] = h2g(&frame->sf_sc); 2325 regs->gpr[29][regs->current_tc] = h2g(frame); 2326 regs->gpr[31][regs->current_tc] = h2g(frame->sf_code); 2327 /* The original kernel code sets CP0_EPC to the handler 2328 * since it returns to userland using eret 2329 * we cannot do this here, and we must set PC directly */ 2330 regs->PC[regs->current_tc] = regs->gpr[25][regs->current_tc] = ka->sa._sa_handler; 2331 unlock_user_struct(frame, frame_addr, 1); 2332 return; 2333 2334 give_sigsegv: 2335 unlock_user_struct(frame, frame_addr, 1); 2336 force_sig(TARGET_SIGSEGV/*, current*/); 2337 return; 2338 } 2339 2340 long do_sigreturn(CPUState *regs) 2341 { 2342 struct sigframe *frame; 2343 abi_ulong frame_addr; 2344 sigset_t blocked; 2345 target_sigset_t target_set; 2346 int i; 2347 2348 #if defined(DEBUG_SIGNAL) 2349 fprintf(stderr, "do_sigreturn\n"); 2350 #endif 2351 frame_addr = regs->gpr[29][regs->current_tc]; 2352 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 2353 goto badframe; 2354 2355 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2356 if(__get_user(target_set.sig[i], &frame->sf_mask.sig[i])) 2357 goto badframe; 2358 } 2359 2360 target_to_host_sigset_internal(&blocked, &target_set); 2361 sigprocmask(SIG_SETMASK, &blocked, NULL); 2362 2363 if (restore_sigcontext(regs, &frame->sf_sc)) 2364 goto badframe; 2365 2366 #if 0 2367 /* 2368 * Don't let your children do this ... 2369 */ 2370 __asm__ __volatile__( 2371 "move\t$29, %0\n\t" 2372 "j\tsyscall_exit" 2373 :/* no outputs */ 2374 :"r" (®s)); 2375 /* Unreached */ 2376 #endif 2377 2378 regs->PC[regs->current_tc] = regs->CP0_EPC; 2379 /* I am not sure this is right, but it seems to work 2380 * maybe a problem with nested signals ? */ 2381 regs->CP0_EPC = 0; 2382 return 0; 2383 2384 badframe: 2385 force_sig(TARGET_SIGSEGV/*, current*/); 2386 return 0; 2387 } 2388 2389 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 2390 target_siginfo_t *info, 2391 target_sigset_t *set, CPUState *env) 2392 { 2393 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2394 } 2395 2396 long do_rt_sigreturn(CPUState *env) 2397 { 2398 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2399 return -ENOSYS; 2400 } 2401 2402 #else 2403 2404 static void setup_frame(int sig, struct emulated_sigaction *ka, 2405 target_sigset_t *set, CPUState *env) 2406 { 2407 fprintf(stderr, "setup_frame: not implemented\n"); 2408 } 2409 2410 static void setup_rt_frame(int sig, struct emulated_sigaction *ka, 2411 target_siginfo_t *info, 2412 target_sigset_t *set, CPUState *env) 2413 { 2414 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2415 } 2416 2417 long do_sigreturn(CPUState *env) 2418 { 2419 fprintf(stderr, "do_sigreturn: not implemented\n"); 2420 return -ENOSYS; 2421 } 2422 2423 long do_rt_sigreturn(CPUState *env) 2424 { 2425 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2426 return -ENOSYS; 2427 } 2428 2429 #endif 2430 2431 void process_pending_signals(void *cpu_env) 2432 { 2433 int sig; 2434 abi_ulong handler; 2435 sigset_t set, old_set; 2436 target_sigset_t target_old_set; 2437 struct emulated_sigaction *k; 2438 struct sigqueue *q; 2439 2440 if (!signal_pending) 2441 return; 2442 2443 k = sigact_table; 2444 for(sig = 1; sig <= TARGET_NSIG; sig++) { 2445 if (k->pending) 2446 goto handle_signal; 2447 k++; 2448 } 2449 /* if no signal is pending, just return */ 2450 signal_pending = 0; 2451 return; 2452 2453 handle_signal: 2454 #ifdef DEBUG_SIGNAL 2455 fprintf(stderr, "qemu: process signal %d\n", sig); 2456 #endif 2457 /* dequeue signal */ 2458 q = k->first; 2459 k->first = q->next; 2460 if (!k->first) 2461 k->pending = 0; 2462 2463 sig = gdb_handlesig (cpu_env, sig); 2464 if (!sig) { 2465 fprintf (stderr, "Lost signal\n"); 2466 abort(); 2467 } 2468 2469 handler = k->sa._sa_handler; 2470 if (handler == TARGET_SIG_DFL) { 2471 /* default handler : ignore some signal. The other are fatal */ 2472 if (sig != TARGET_SIGCHLD && 2473 sig != TARGET_SIGURG && 2474 sig != TARGET_SIGWINCH) { 2475 force_sig(sig); 2476 } 2477 } else if (handler == TARGET_SIG_IGN) { 2478 /* ignore sig */ 2479 } else if (handler == TARGET_SIG_ERR) { 2480 force_sig(sig); 2481 } else { 2482 /* compute the blocked signals during the handler execution */ 2483 target_to_host_sigset(&set, &k->sa.sa_mask); 2484 /* SA_NODEFER indicates that the current signal should not be 2485 blocked during the handler */ 2486 if (!(k->sa.sa_flags & TARGET_SA_NODEFER)) 2487 sigaddset(&set, target_to_host_signal(sig)); 2488 2489 /* block signals in the handler using Linux */ 2490 sigprocmask(SIG_BLOCK, &set, &old_set); 2491 /* save the previous blocked signal state to restore it at the 2492 end of the signal execution (see do_sigreturn) */ 2493 host_to_target_sigset_internal(&target_old_set, &old_set); 2494 2495 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 2496 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 2497 { 2498 CPUX86State *env = cpu_env; 2499 if (env->eflags & VM_MASK) 2500 save_v86_state(env); 2501 } 2502 #endif 2503 /* prepare the stack frame of the virtual CPU */ 2504 if (k->sa.sa_flags & TARGET_SA_SIGINFO) 2505 setup_rt_frame(sig, k, &q->info, &target_old_set, cpu_env); 2506 else 2507 setup_frame(sig, k, &target_old_set, cpu_env); 2508 if (k->sa.sa_flags & TARGET_SA_RESETHAND) 2509 k->sa._sa_handler = TARGET_SIG_DFL; 2510 } 2511 if (q != &k->info) 2512 free_sigqueue(q); 2513 } 2514