1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "qemu/cutils.h"
22 #include "gdbstub/user.h"
23 #include "exec/page-protection.h"
24 #include "accel/tcg/cpu-ops.h"
25
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
28
29 #include "qemu.h"
30 #include "user-internals.h"
31 #include "strace.h"
32 #include "loader.h"
33 #include "trace.h"
34 #include "signal-common.h"
35 #include "host-signal.h"
36 #include "user/cpu_loop.h"
37 #include "user/page-protection.h"
38 #include "user/safe-syscall.h"
39 #include "user/signal.h"
40 #include "tcg/tcg.h"
41
42 /* target_siginfo_t must fit in gdbstub's siginfo save area. */
43 QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
44
45 static struct target_sigaction sigact_table[TARGET_NSIG];
46
47 static void host_signal_handler(int host_signum, siginfo_t *info,
48 void *puc);
49
50 /* Fallback addresses into sigtramp page. */
51 abi_ulong default_sigreturn;
52 abi_ulong default_rt_sigreturn;
53
54 /*
55 * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
56 * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
57 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
58 * a process exists without sending it a signal.
59 */
60 #ifdef __SIGRTMAX
61 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
62 #endif
63 static uint8_t host_to_target_signal_table[_NSIG] = {
64 #define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
65 MAKE_SIGNAL_LIST
66 #undef MAKE_SIG_ENTRY
67 };
68
69 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
70
71 /* valid sig is between 1 and _NSIG - 1 */
host_to_target_signal(int sig)72 int host_to_target_signal(int sig)
73 {
74 if (sig < 1) {
75 return sig;
76 }
77 if (sig >= _NSIG) {
78 return TARGET_NSIG + 1;
79 }
80 return host_to_target_signal_table[sig];
81 }
82
83 /* valid sig is between 1 and TARGET_NSIG */
target_to_host_signal(int sig)84 int target_to_host_signal(int sig)
85 {
86 if (sig < 1) {
87 return sig;
88 }
89 if (sig > TARGET_NSIG) {
90 return _NSIG;
91 }
92 return target_to_host_signal_table[sig];
93 }
94
target_sigaddset(target_sigset_t * set,int signum)95 static inline void target_sigaddset(target_sigset_t *set, int signum)
96 {
97 signum--;
98 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
99 set->sig[signum / TARGET_NSIG_BPW] |= mask;
100 }
101
target_sigismember(const target_sigset_t * set,int signum)102 static inline int target_sigismember(const target_sigset_t *set, int signum)
103 {
104 signum--;
105 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
106 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
107 }
108
host_to_target_sigset_internal(target_sigset_t * d,const sigset_t * s)109 void host_to_target_sigset_internal(target_sigset_t *d,
110 const sigset_t *s)
111 {
112 int host_sig, target_sig;
113 target_sigemptyset(d);
114 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
115 target_sig = host_to_target_signal(host_sig);
116 if (target_sig < 1 || target_sig > TARGET_NSIG) {
117 continue;
118 }
119 if (sigismember(s, host_sig)) {
120 target_sigaddset(d, target_sig);
121 }
122 }
123 }
124
host_to_target_sigset(target_sigset_t * d,const sigset_t * s)125 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
126 {
127 target_sigset_t d1;
128 int i;
129
130 host_to_target_sigset_internal(&d1, s);
131 for(i = 0;i < TARGET_NSIG_WORDS; i++)
132 d->sig[i] = tswapal(d1.sig[i]);
133 }
134
target_to_host_sigset_internal(sigset_t * d,const target_sigset_t * s)135 void target_to_host_sigset_internal(sigset_t *d,
136 const target_sigset_t *s)
137 {
138 int host_sig, target_sig;
139 sigemptyset(d);
140 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
141 host_sig = target_to_host_signal(target_sig);
142 if (host_sig < 1 || host_sig >= _NSIG) {
143 continue;
144 }
145 if (target_sigismember(s, target_sig)) {
146 sigaddset(d, host_sig);
147 }
148 }
149 }
150
target_to_host_sigset(sigset_t * d,const target_sigset_t * s)151 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
152 {
153 target_sigset_t s1;
154 int i;
155
156 for(i = 0;i < TARGET_NSIG_WORDS; i++)
157 s1.sig[i] = tswapal(s->sig[i]);
158 target_to_host_sigset_internal(d, &s1);
159 }
160
host_to_target_old_sigset(abi_ulong * old_sigset,const sigset_t * sigset)161 void host_to_target_old_sigset(abi_ulong *old_sigset,
162 const sigset_t *sigset)
163 {
164 target_sigset_t d;
165 host_to_target_sigset(&d, sigset);
166 *old_sigset = d.sig[0];
167 }
168
target_to_host_old_sigset(sigset_t * sigset,const abi_ulong * old_sigset)169 void target_to_host_old_sigset(sigset_t *sigset,
170 const abi_ulong *old_sigset)
171 {
172 target_sigset_t d;
173 int i;
174
175 d.sig[0] = *old_sigset;
176 for(i = 1;i < TARGET_NSIG_WORDS; i++)
177 d.sig[i] = 0;
178 target_to_host_sigset(sigset, &d);
179 }
180
block_signals(void)181 int block_signals(void)
182 {
183 TaskState *ts = get_task_state(thread_cpu);
184 sigset_t set;
185
186 /* It's OK to block everything including SIGSEGV, because we won't
187 * run any further guest code before unblocking signals in
188 * process_pending_signals().
189 */
190 sigfillset(&set);
191 sigprocmask(SIG_SETMASK, &set, 0);
192
193 return qatomic_xchg(&ts->signal_pending, 1);
194 }
195
196 /* Wrapper for sigprocmask function
197 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
198 * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
199 * a signal was already pending and the syscall must be restarted, or
200 * 0 on success.
201 * If set is NULL, this is guaranteed not to fail.
202 */
do_sigprocmask(int how,const sigset_t * set,sigset_t * oldset)203 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
204 {
205 TaskState *ts = get_task_state(thread_cpu);
206
207 if (oldset) {
208 *oldset = ts->signal_mask;
209 }
210
211 if (set) {
212 int i;
213
214 if (block_signals()) {
215 return -QEMU_ERESTARTSYS;
216 }
217
218 switch (how) {
219 case SIG_BLOCK:
220 sigorset(&ts->signal_mask, &ts->signal_mask, set);
221 break;
222 case SIG_UNBLOCK:
223 for (i = 1; i <= NSIG; ++i) {
224 if (sigismember(set, i)) {
225 sigdelset(&ts->signal_mask, i);
226 }
227 }
228 break;
229 case SIG_SETMASK:
230 ts->signal_mask = *set;
231 break;
232 default:
233 g_assert_not_reached();
234 }
235
236 /* Silently ignore attempts to change blocking status of KILL or STOP */
237 sigdelset(&ts->signal_mask, SIGKILL);
238 sigdelset(&ts->signal_mask, SIGSTOP);
239 }
240 return 0;
241 }
242
243 /* Just set the guest's signal mask to the specified value; the
244 * caller is assumed to have called block_signals() already.
245 */
set_sigmask(const sigset_t * set)246 void set_sigmask(const sigset_t *set)
247 {
248 TaskState *ts = get_task_state(thread_cpu);
249
250 ts->signal_mask = *set;
251 }
252
253 /* sigaltstack management */
254
on_sig_stack(unsigned long sp)255 int on_sig_stack(unsigned long sp)
256 {
257 TaskState *ts = get_task_state(thread_cpu);
258
259 return (sp - ts->sigaltstack_used.ss_sp
260 < ts->sigaltstack_used.ss_size);
261 }
262
sas_ss_flags(unsigned long sp)263 int sas_ss_flags(unsigned long sp)
264 {
265 TaskState *ts = get_task_state(thread_cpu);
266
267 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
268 : on_sig_stack(sp) ? SS_ONSTACK : 0);
269 }
270
target_sigsp(abi_ulong sp,struct target_sigaction * ka)271 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
272 {
273 /*
274 * This is the X/Open sanctioned signal stack switching.
275 */
276 TaskState *ts = get_task_state(thread_cpu);
277
278 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
279 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
280 }
281 return sp;
282 }
283
target_save_altstack(target_stack_t * uss,CPUArchState * env)284 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
285 {
286 TaskState *ts = get_task_state(thread_cpu);
287
288 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
289 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
290 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
291 }
292
target_restore_altstack(target_stack_t * uss,CPUArchState * env)293 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
294 {
295 TaskState *ts = get_task_state(thread_cpu);
296 size_t minstacksize = TARGET_MINSIGSTKSZ;
297 target_stack_t ss;
298
299 #if defined(TARGET_PPC64)
300 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
301 struct image_info *image = ts->info;
302 if (get_ppc64_abi(image) > 1) {
303 minstacksize = 4096;
304 }
305 #endif
306
307 __get_user(ss.ss_sp, &uss->ss_sp);
308 __get_user(ss.ss_size, &uss->ss_size);
309 __get_user(ss.ss_flags, &uss->ss_flags);
310
311 if (on_sig_stack(get_sp_from_cpustate(env))) {
312 return -TARGET_EPERM;
313 }
314
315 switch (ss.ss_flags) {
316 default:
317 return -TARGET_EINVAL;
318
319 case TARGET_SS_DISABLE:
320 ss.ss_size = 0;
321 ss.ss_sp = 0;
322 break;
323
324 case TARGET_SS_ONSTACK:
325 case 0:
326 if (ss.ss_size < minstacksize) {
327 return -TARGET_ENOMEM;
328 }
329 break;
330 }
331
332 ts->sigaltstack_used.ss_sp = ss.ss_sp;
333 ts->sigaltstack_used.ss_size = ss.ss_size;
334 return 0;
335 }
336
337 /* siginfo conversion */
338
host_to_target_siginfo_noswap(target_siginfo_t * tinfo,const siginfo_t * info)339 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
340 const siginfo_t *info)
341 {
342 int sig = host_to_target_signal(info->si_signo);
343 int si_code = info->si_code;
344 int si_type;
345 tinfo->si_signo = sig;
346 tinfo->si_errno = 0;
347 tinfo->si_code = info->si_code;
348
349 /* This memset serves two purposes:
350 * (1) ensure we don't leak random junk to the guest later
351 * (2) placate false positives from gcc about fields
352 * being used uninitialized if it chooses to inline both this
353 * function and tswap_siginfo() into host_to_target_siginfo().
354 */
355 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
356
357 /* This is awkward, because we have to use a combination of
358 * the si_code and si_signo to figure out which of the union's
359 * members are valid. (Within the host kernel it is always possible
360 * to tell, but the kernel carefully avoids giving userspace the
361 * high 16 bits of si_code, so we don't have the information to
362 * do this the easy way...) We therefore make our best guess,
363 * bearing in mind that a guest can spoof most of the si_codes
364 * via rt_sigqueueinfo() if it likes.
365 *
366 * Once we have made our guess, we record it in the top 16 bits of
367 * the si_code, so that tswap_siginfo() later can use it.
368 * tswap_siginfo() will strip these top bits out before writing
369 * si_code to the guest (sign-extending the lower bits).
370 */
371
372 switch (si_code) {
373 case SI_USER:
374 case SI_TKILL:
375 case SI_KERNEL:
376 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
377 * These are the only unspoofable si_code values.
378 */
379 tinfo->_sifields._kill._pid = info->si_pid;
380 tinfo->_sifields._kill._uid = info->si_uid;
381 si_type = QEMU_SI_KILL;
382 break;
383 default:
384 /* Everything else is spoofable. Make best guess based on signal */
385 switch (sig) {
386 case TARGET_SIGCHLD:
387 tinfo->_sifields._sigchld._pid = info->si_pid;
388 tinfo->_sifields._sigchld._uid = info->si_uid;
389 if (si_code == CLD_EXITED)
390 tinfo->_sifields._sigchld._status = info->si_status;
391 else
392 tinfo->_sifields._sigchld._status
393 = host_to_target_signal(info->si_status & 0x7f)
394 | (info->si_status & ~0x7f);
395 tinfo->_sifields._sigchld._utime = info->si_utime;
396 tinfo->_sifields._sigchld._stime = info->si_stime;
397 si_type = QEMU_SI_CHLD;
398 break;
399 case TARGET_SIGIO:
400 tinfo->_sifields._sigpoll._band = info->si_band;
401 tinfo->_sifields._sigpoll._fd = info->si_fd;
402 si_type = QEMU_SI_POLL;
403 break;
404 default:
405 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
406 tinfo->_sifields._rt._pid = info->si_pid;
407 tinfo->_sifields._rt._uid = info->si_uid;
408 /* XXX: potential problem if 64 bit */
409 tinfo->_sifields._rt._sigval.sival_ptr
410 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
411 si_type = QEMU_SI_RT;
412 break;
413 }
414 break;
415 }
416
417 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
418 }
419
tswap_siginfo(target_siginfo_t * tinfo,const target_siginfo_t * info)420 static void tswap_siginfo(target_siginfo_t *tinfo,
421 const target_siginfo_t *info)
422 {
423 int si_type = extract32(info->si_code, 16, 16);
424 int si_code = sextract32(info->si_code, 0, 16);
425
426 __put_user(info->si_signo, &tinfo->si_signo);
427 __put_user(info->si_errno, &tinfo->si_errno);
428 __put_user(si_code, &tinfo->si_code);
429
430 /* We can use our internal marker of which fields in the structure
431 * are valid, rather than duplicating the guesswork of
432 * host_to_target_siginfo_noswap() here.
433 */
434 switch (si_type) {
435 case QEMU_SI_KILL:
436 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
437 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
438 break;
439 case QEMU_SI_TIMER:
440 __put_user(info->_sifields._timer._timer1,
441 &tinfo->_sifields._timer._timer1);
442 __put_user(info->_sifields._timer._timer2,
443 &tinfo->_sifields._timer._timer2);
444 break;
445 case QEMU_SI_POLL:
446 __put_user(info->_sifields._sigpoll._band,
447 &tinfo->_sifields._sigpoll._band);
448 __put_user(info->_sifields._sigpoll._fd,
449 &tinfo->_sifields._sigpoll._fd);
450 break;
451 case QEMU_SI_FAULT:
452 __put_user(info->_sifields._sigfault._addr,
453 &tinfo->_sifields._sigfault._addr);
454 break;
455 case QEMU_SI_CHLD:
456 __put_user(info->_sifields._sigchld._pid,
457 &tinfo->_sifields._sigchld._pid);
458 __put_user(info->_sifields._sigchld._uid,
459 &tinfo->_sifields._sigchld._uid);
460 __put_user(info->_sifields._sigchld._status,
461 &tinfo->_sifields._sigchld._status);
462 __put_user(info->_sifields._sigchld._utime,
463 &tinfo->_sifields._sigchld._utime);
464 __put_user(info->_sifields._sigchld._stime,
465 &tinfo->_sifields._sigchld._stime);
466 break;
467 case QEMU_SI_RT:
468 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
469 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
470 __put_user(info->_sifields._rt._sigval.sival_ptr,
471 &tinfo->_sifields._rt._sigval.sival_ptr);
472 break;
473 default:
474 g_assert_not_reached();
475 }
476 }
477
host_to_target_siginfo(target_siginfo_t * tinfo,const siginfo_t * info)478 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
479 {
480 target_siginfo_t tgt_tmp;
481 host_to_target_siginfo_noswap(&tgt_tmp, info);
482 tswap_siginfo(tinfo, &tgt_tmp);
483 }
484
485 /* XXX: we support only POSIX RT signals are used. */
486 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
target_to_host_siginfo(siginfo_t * info,const target_siginfo_t * tinfo)487 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
488 {
489 /* This conversion is used only for the rt_sigqueueinfo syscall,
490 * and so we know that the _rt fields are the valid ones.
491 */
492 abi_ulong sival_ptr;
493
494 __get_user(info->si_signo, &tinfo->si_signo);
495 __get_user(info->si_errno, &tinfo->si_errno);
496 __get_user(info->si_code, &tinfo->si_code);
497 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
498 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
499 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
500 info->si_value.sival_ptr = (void *)(long)sival_ptr;
501 }
502
503 /* returns 1 if given signal should dump core if not handled */
core_dump_signal(int sig)504 static int core_dump_signal(int sig)
505 {
506 switch (sig) {
507 case TARGET_SIGABRT:
508 case TARGET_SIGFPE:
509 case TARGET_SIGILL:
510 case TARGET_SIGQUIT:
511 case TARGET_SIGSEGV:
512 case TARGET_SIGTRAP:
513 case TARGET_SIGBUS:
514 return (1);
515 default:
516 return (0);
517 }
518 }
519
520 int host_interrupt_signal;
521
signal_table_init(const char * rtsig_map)522 static void signal_table_init(const char *rtsig_map)
523 {
524 int hsig, tsig, count;
525
526 if (rtsig_map) {
527 /*
528 * Map host RT signals to target RT signals according to the
529 * user-provided specification.
530 */
531 const char *s = rtsig_map;
532
533 while (true) {
534 int i;
535
536 if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') {
537 fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n");
538 exit(EXIT_FAILURE);
539 }
540 if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') {
541 fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n");
542 exit(EXIT_FAILURE);
543 }
544 if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) {
545 fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n");
546 exit(EXIT_FAILURE);
547 }
548
549 for (i = 0; i < count; i++, tsig++, hsig++) {
550 if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) {
551 fprintf(stderr, "%d is not a target rt signal\n", tsig);
552 exit(EXIT_FAILURE);
553 }
554 if (hsig < SIGRTMIN || hsig > SIGRTMAX) {
555 fprintf(stderr, "%d is not a host rt signal\n", hsig);
556 exit(EXIT_FAILURE);
557 }
558 if (host_to_target_signal_table[hsig]) {
559 fprintf(stderr, "%d already maps %d\n",
560 hsig, host_to_target_signal_table[hsig]);
561 exit(EXIT_FAILURE);
562 }
563 host_to_target_signal_table[hsig] = tsig;
564 }
565
566 if (*s) {
567 s++;
568 } else {
569 break;
570 }
571 }
572 } else {
573 /*
574 * Default host-to-target RT signal mapping.
575 *
576 * Signals are supported starting from TARGET_SIGRTMIN and going up
577 * until we run out of host realtime signals. Glibc uses the lower 2
578 * RT signals and (hopefully) nobody uses the upper ones.
579 * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
580 * To fix this properly we would need to do manual signal delivery
581 * multiplexed over a single host signal.
582 * Attempts for configure "missing" signals via sigaction will be
583 * silently ignored.
584 *
585 * Reserve two signals for internal usage (see below).
586 */
587
588 hsig = SIGRTMIN + 2;
589 for (tsig = TARGET_SIGRTMIN;
590 hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
591 hsig++, tsig++) {
592 host_to_target_signal_table[hsig] = tsig;
593 }
594 }
595
596 /*
597 * Remap the target SIGABRT, so that we can distinguish host abort
598 * from guest abort. When the guest registers a signal handler or
599 * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
600 * arrives at dump_core_and_abort(), we will map back to host SIGABRT
601 * so that the parent (native or emulated) sees the correct signal.
602 * Finally, also map host to guest SIGABRT so that the emulated
603 * parent sees the correct mapping from wait status.
604 */
605
606 host_to_target_signal_table[SIGABRT] = 0;
607 for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) {
608 if (!host_to_target_signal_table[hsig]) {
609 if (host_interrupt_signal) {
610 host_to_target_signal_table[hsig] = TARGET_SIGABRT;
611 break;
612 } else {
613 host_interrupt_signal = hsig;
614 }
615 }
616 }
617 if (hsig > SIGRTMAX) {
618 fprintf(stderr,
619 "No rt signals left for interrupt and SIGABRT mapping\n");
620 exit(EXIT_FAILURE);
621 }
622
623 /* Invert the mapping that has already been assigned. */
624 for (hsig = 1; hsig < _NSIG; hsig++) {
625 tsig = host_to_target_signal_table[hsig];
626 if (tsig) {
627 if (target_to_host_signal_table[tsig]) {
628 fprintf(stderr, "%d is already mapped to %d\n",
629 tsig, target_to_host_signal_table[tsig]);
630 exit(EXIT_FAILURE);
631 }
632 target_to_host_signal_table[tsig] = hsig;
633 }
634 }
635
636 host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
637
638 /* Map everything else out-of-bounds. */
639 for (hsig = 1; hsig < _NSIG; hsig++) {
640 if (host_to_target_signal_table[hsig] == 0) {
641 host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
642 }
643 }
644 for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
645 if (target_to_host_signal_table[tsig] == 0) {
646 target_to_host_signal_table[tsig] = _NSIG;
647 count++;
648 }
649 }
650
651 trace_signal_table_init(count);
652 }
653
signal_init(const char * rtsig_map)654 void signal_init(const char *rtsig_map)
655 {
656 TaskState *ts = get_task_state(thread_cpu);
657 struct sigaction act, oact;
658
659 /* initialize signal conversion tables */
660 signal_table_init(rtsig_map);
661
662 /* Set the signal mask from the host mask. */
663 sigprocmask(0, 0, &ts->signal_mask);
664
665 sigfillset(&act.sa_mask);
666 act.sa_flags = SA_SIGINFO;
667 act.sa_sigaction = host_signal_handler;
668
669 /*
670 * A parent process may configure ignored signals, but all other
671 * signals are default. For any target signals that have no host
672 * mapping, set to ignore. For all core_dump_signal, install our
673 * host signal handler so that we may invoke dump_core_and_abort.
674 * This includes SIGSEGV and SIGBUS, which are also need our signal
675 * handler for paging and exceptions.
676 */
677 for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
678 int hsig = target_to_host_signal(tsig);
679 abi_ptr thand = TARGET_SIG_IGN;
680
681 if (hsig >= _NSIG) {
682 continue;
683 }
684
685 /* As we force remap SIGABRT, cannot probe and install in one step. */
686 if (tsig == TARGET_SIGABRT) {
687 sigaction(SIGABRT, NULL, &oact);
688 sigaction(hsig, &act, NULL);
689 } else {
690 struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
691 sigaction(hsig, iact, &oact);
692 }
693
694 if (oact.sa_sigaction != (void *)SIG_IGN) {
695 thand = TARGET_SIG_DFL;
696 }
697 sigact_table[tsig - 1]._sa_handler = thand;
698 }
699
700 sigaction(host_interrupt_signal, &act, NULL);
701 }
702
703 /* Force a synchronously taken signal. The kernel force_sig() function
704 * also forces the signal to "not blocked, not ignored", but for QEMU
705 * that work is done in process_pending_signals().
706 */
force_sig(int sig)707 void force_sig(int sig)
708 {
709 CPUState *cpu = thread_cpu;
710 target_siginfo_t info = {};
711
712 info.si_signo = sig;
713 info.si_errno = 0;
714 info.si_code = TARGET_SI_KERNEL;
715 info._sifields._kill._pid = 0;
716 info._sifields._kill._uid = 0;
717 queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
718 }
719
720 /*
721 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
722 * 'force' part is handled in process_pending_signals().
723 */
force_sig_fault(int sig,int code,abi_ulong addr)724 void force_sig_fault(int sig, int code, abi_ulong addr)
725 {
726 CPUState *cpu = thread_cpu;
727 target_siginfo_t info = {};
728
729 info.si_signo = sig;
730 info.si_errno = 0;
731 info.si_code = code;
732 info._sifields._sigfault._addr = addr;
733 queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
734 }
735
736 /* Force a SIGSEGV if we couldn't write to memory trying to set
737 * up the signal frame. oldsig is the signal we were trying to handle
738 * at the point of failure.
739 */
740 #if !defined(TARGET_RISCV)
force_sigsegv(int oldsig)741 void force_sigsegv(int oldsig)
742 {
743 if (oldsig == SIGSEGV) {
744 /* Make sure we don't try to deliver the signal again; this will
745 * end up with handle_pending_signal() calling dump_core_and_abort().
746 */
747 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
748 }
749 force_sig(TARGET_SIGSEGV);
750 }
751 #endif
752
cpu_loop_exit_sigsegv(CPUState * cpu,vaddr addr,MMUAccessType access_type,bool maperr,uintptr_t ra)753 void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
754 MMUAccessType access_type, bool maperr, uintptr_t ra)
755 {
756 const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
757
758 if (tcg_ops->record_sigsegv) {
759 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
760 }
761
762 force_sig_fault(TARGET_SIGSEGV,
763 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
764 addr);
765 cpu->exception_index = EXCP_INTERRUPT;
766 cpu_loop_exit_restore(cpu, ra);
767 }
768
cpu_loop_exit_sigbus(CPUState * cpu,vaddr addr,MMUAccessType access_type,uintptr_t ra)769 void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
770 MMUAccessType access_type, uintptr_t ra)
771 {
772 const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
773
774 if (tcg_ops->record_sigbus) {
775 tcg_ops->record_sigbus(cpu, addr, access_type, ra);
776 }
777
778 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
779 cpu->exception_index = EXCP_INTERRUPT;
780 cpu_loop_exit_restore(cpu, ra);
781 }
782
783 /* abort execution with signal */
784 static G_NORETURN
die_with_signal(int host_sig)785 void die_with_signal(int host_sig)
786 {
787 struct sigaction act = {
788 .sa_handler = SIG_DFL,
789 };
790
791 /*
792 * The proper exit code for dying from an uncaught signal is -<signal>.
793 * The kernel doesn't allow exit() or _exit() to pass a negative value.
794 * To get the proper exit code we need to actually die from an uncaught
795 * signal. Here the default signal handler is installed, we send
796 * the signal and we wait for it to arrive.
797 */
798 sigfillset(&act.sa_mask);
799 sigaction(host_sig, &act, NULL);
800
801 kill(getpid(), host_sig);
802
803 /* Make sure the signal isn't masked (reusing the mask inside of act). */
804 sigdelset(&act.sa_mask, host_sig);
805 sigsuspend(&act.sa_mask);
806
807 /* unreachable */
808 _exit(EXIT_FAILURE);
809 }
810
811 static G_NORETURN
dump_core_and_abort(CPUArchState * env,int target_sig)812 void dump_core_and_abort(CPUArchState *env, int target_sig)
813 {
814 CPUState *cpu = env_cpu(env);
815 TaskState *ts = get_task_state(cpu);
816 int host_sig, core_dumped = 0;
817
818 /* On exit, undo the remapping of SIGABRT. */
819 if (target_sig == TARGET_SIGABRT) {
820 host_sig = SIGABRT;
821 } else {
822 host_sig = target_to_host_signal(target_sig);
823 }
824 trace_user_dump_core_and_abort(env, target_sig, host_sig);
825 gdb_signalled(env, target_sig);
826
827 /* dump core if supported by target binary format */
828 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
829 stop_all_tasks();
830 core_dumped =
831 ((*ts->bprm->core_dump)(target_sig, env) == 0);
832 }
833 if (core_dumped) {
834 /* we already dumped the core of target process, we don't want
835 * a coredump of qemu itself */
836 struct rlimit nodump;
837 getrlimit(RLIMIT_CORE, &nodump);
838 nodump.rlim_cur=0;
839 setrlimit(RLIMIT_CORE, &nodump);
840 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
841 target_sig, strsignal(host_sig), "core dumped" );
842 }
843
844 preexit_cleanup(env, 128 + target_sig);
845 die_with_signal(host_sig);
846 }
847
848 /* queue a signal so that it will be send to the virtual CPU as soon
849 as possible */
queue_signal(CPUArchState * env,int sig,int si_type,target_siginfo_t * info)850 void queue_signal(CPUArchState *env, int sig, int si_type,
851 target_siginfo_t *info)
852 {
853 CPUState *cpu = env_cpu(env);
854 TaskState *ts = get_task_state(cpu);
855
856 trace_user_queue_signal(env, sig);
857
858 info->si_code = deposit32(info->si_code, 16, 16, si_type);
859
860 ts->sync_signal.info = *info;
861 ts->sync_signal.pending = sig;
862 /* signal that a new signal is pending */
863 qatomic_set(&ts->signal_pending, 1);
864 }
865
866
867 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
rewind_if_in_safe_syscall(void * puc)868 static inline void rewind_if_in_safe_syscall(void *puc)
869 {
870 host_sigcontext *uc = (host_sigcontext *)puc;
871 uintptr_t pcreg = host_signal_pc(uc);
872
873 if (pcreg > (uintptr_t)safe_syscall_start
874 && pcreg < (uintptr_t)safe_syscall_end) {
875 host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
876 }
877 }
878
879 static G_NORETURN
die_from_signal(siginfo_t * info)880 void die_from_signal(siginfo_t *info)
881 {
882 char sigbuf[4], codebuf[12];
883 const char *sig, *code = NULL;
884
885 switch (info->si_signo) {
886 case SIGSEGV:
887 sig = "SEGV";
888 switch (info->si_code) {
889 case SEGV_MAPERR:
890 code = "MAPERR";
891 break;
892 case SEGV_ACCERR:
893 code = "ACCERR";
894 break;
895 }
896 break;
897 case SIGBUS:
898 sig = "BUS";
899 switch (info->si_code) {
900 case BUS_ADRALN:
901 code = "ADRALN";
902 break;
903 case BUS_ADRERR:
904 code = "ADRERR";
905 break;
906 }
907 break;
908 case SIGILL:
909 sig = "ILL";
910 switch (info->si_code) {
911 case ILL_ILLOPC:
912 code = "ILLOPC";
913 break;
914 case ILL_ILLOPN:
915 code = "ILLOPN";
916 break;
917 case ILL_ILLADR:
918 code = "ILLADR";
919 break;
920 case ILL_PRVOPC:
921 code = "PRVOPC";
922 break;
923 case ILL_PRVREG:
924 code = "PRVREG";
925 break;
926 case ILL_COPROC:
927 code = "COPROC";
928 break;
929 }
930 break;
931 case SIGFPE:
932 sig = "FPE";
933 switch (info->si_code) {
934 case FPE_INTDIV:
935 code = "INTDIV";
936 break;
937 case FPE_INTOVF:
938 code = "INTOVF";
939 break;
940 }
941 break;
942 case SIGTRAP:
943 sig = "TRAP";
944 break;
945 default:
946 snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
947 sig = sigbuf;
948 break;
949 }
950 if (code == NULL) {
951 snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
952 code = codebuf;
953 }
954
955 error_report("QEMU internal SIG%s {code=%s, addr=%p}",
956 sig, code, info->si_addr);
957 die_with_signal(info->si_signo);
958 }
959
host_sigsegv_handler(CPUState * cpu,siginfo_t * info,host_sigcontext * uc)960 static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
961 host_sigcontext *uc)
962 {
963 uintptr_t host_addr = (uintptr_t)info->si_addr;
964 /*
965 * Convert forcefully to guest address space: addresses outside
966 * reserved_va are still valid to report via SEGV_MAPERR.
967 */
968 bool is_valid = h2g_valid(host_addr);
969 abi_ptr guest_addr = h2g_nocheck(host_addr);
970 uintptr_t pc = host_signal_pc(uc);
971 bool is_write = host_signal_write(info, uc);
972 MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
973 bool maperr;
974
975 /* If this was a write to a TB protected page, restart. */
976 if (is_write
977 && is_valid
978 && info->si_code == SEGV_ACCERR
979 && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
980 pc, guest_addr)) {
981 return;
982 }
983
984 /*
985 * If the access was not on behalf of the guest, within the executable
986 * mapping of the generated code buffer, then it is a host bug.
987 */
988 if (access_type != MMU_INST_FETCH
989 && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
990 die_from_signal(info);
991 }
992
993 maperr = true;
994 if (is_valid && info->si_code == SEGV_ACCERR) {
995 /*
996 * With reserved_va, the whole address space is PROT_NONE,
997 * which means that we may get ACCERR when we want MAPERR.
998 */
999 if (page_get_flags(guest_addr) & PAGE_VALID) {
1000 maperr = false;
1001 } else {
1002 info->si_code = SEGV_MAPERR;
1003 }
1004 }
1005
1006 sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
1007 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
1008 }
1009
host_sigbus_handler(CPUState * cpu,siginfo_t * info,host_sigcontext * uc)1010 static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
1011 host_sigcontext *uc)
1012 {
1013 uintptr_t pc = host_signal_pc(uc);
1014 bool is_write = host_signal_write(info, uc);
1015 MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
1016
1017 /*
1018 * If the access was not on behalf of the guest, within the executable
1019 * mapping of the generated code buffer, then it is a host bug.
1020 */
1021 if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
1022 die_from_signal(info);
1023 }
1024
1025 if (info->si_code == BUS_ADRALN) {
1026 uintptr_t host_addr = (uintptr_t)info->si_addr;
1027 abi_ptr guest_addr = h2g_nocheck(host_addr);
1028
1029 sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
1030 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
1031 }
1032 return pc;
1033 }
1034
host_signal_handler(int host_sig,siginfo_t * info,void * puc)1035 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
1036 {
1037 CPUState *cpu = thread_cpu;
1038 CPUArchState *env = cpu_env(cpu);
1039 TaskState *ts = get_task_state(cpu);
1040 target_siginfo_t tinfo;
1041 host_sigcontext *uc = puc;
1042 struct emulated_sigtable *k;
1043 int guest_sig;
1044 uintptr_t pc = 0;
1045 bool sync_sig = false;
1046 void *sigmask;
1047
1048 if (host_sig == host_interrupt_signal) {
1049 ts->signal_pending = 1;
1050 cpu_exit(thread_cpu);
1051 return;
1052 }
1053
1054 /*
1055 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
1056 * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
1057 * SIGFPE, SIGTRAP are always host bugs.
1058 */
1059 if (info->si_code > 0) {
1060 switch (host_sig) {
1061 case SIGSEGV:
1062 /* Only returns on handle_sigsegv_accerr_write success. */
1063 host_sigsegv_handler(cpu, info, uc);
1064 return;
1065 case SIGBUS:
1066 pc = host_sigbus_handler(cpu, info, uc);
1067 sync_sig = true;
1068 break;
1069 case SIGILL:
1070 case SIGFPE:
1071 case SIGTRAP:
1072 die_from_signal(info);
1073 }
1074 }
1075
1076 /* get target signal number */
1077 guest_sig = host_to_target_signal(host_sig);
1078 if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
1079 return;
1080 }
1081 trace_user_host_signal(env, host_sig, guest_sig);
1082
1083 host_to_target_siginfo_noswap(&tinfo, info);
1084 k = &ts->sigtab[guest_sig - 1];
1085 k->info = tinfo;
1086 k->pending = guest_sig;
1087 ts->signal_pending = 1;
1088
1089 /*
1090 * For synchronous signals, unwind the cpu state to the faulting
1091 * insn and then exit back to the main loop so that the signal
1092 * is delivered immediately.
1093 */
1094 if (sync_sig) {
1095 cpu->exception_index = EXCP_INTERRUPT;
1096 cpu_loop_exit_restore(cpu, pc);
1097 }
1098
1099 rewind_if_in_safe_syscall(puc);
1100
1101 /*
1102 * Block host signals until target signal handler entered. We
1103 * can't block SIGSEGV or SIGBUS while we're executing guest
1104 * code in case the guest code provokes one in the window between
1105 * now and it getting out to the main loop. Signals will be
1106 * unblocked again in process_pending_signals().
1107 *
1108 * WARNING: we cannot use sigfillset() here because the sigmask
1109 * field is a kernel sigset_t, which is much smaller than the
1110 * libc sigset_t which sigfillset() operates on. Using sigfillset()
1111 * would write 0xff bytes off the end of the structure and trash
1112 * data on the struct.
1113 */
1114 sigmask = host_signal_mask(uc);
1115 memset(sigmask, 0xff, SIGSET_T_SIZE);
1116 sigdelset(sigmask, SIGSEGV);
1117 sigdelset(sigmask, SIGBUS);
1118
1119 /* interrupt the virtual CPU as soon as possible */
1120 cpu_exit(thread_cpu);
1121 }
1122
1123 /* do_sigaltstack() returns target values and errnos. */
1124 /* compare linux/kernel/signal.c:do_sigaltstack() */
do_sigaltstack(abi_ulong uss_addr,abi_ulong uoss_addr,CPUArchState * env)1125 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
1126 CPUArchState *env)
1127 {
1128 target_stack_t oss, *uoss = NULL;
1129 abi_long ret = -TARGET_EFAULT;
1130
1131 if (uoss_addr) {
1132 /* Verify writability now, but do not alter user memory yet. */
1133 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
1134 goto out;
1135 }
1136 target_save_altstack(&oss, env);
1137 }
1138
1139 if (uss_addr) {
1140 target_stack_t *uss;
1141
1142 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
1143 goto out;
1144 }
1145 ret = target_restore_altstack(uss, env);
1146 if (ret) {
1147 goto out;
1148 }
1149 }
1150
1151 if (uoss_addr) {
1152 memcpy(uoss, &oss, sizeof(oss));
1153 unlock_user_struct(uoss, uoss_addr, 1);
1154 uoss = NULL;
1155 }
1156 ret = 0;
1157
1158 out:
1159 if (uoss) {
1160 unlock_user_struct(uoss, uoss_addr, 0);
1161 }
1162 return ret;
1163 }
1164
1165 /* do_sigaction() return target values and host errnos */
do_sigaction(int sig,const struct target_sigaction * act,struct target_sigaction * oact,abi_ulong ka_restorer)1166 int do_sigaction(int sig, const struct target_sigaction *act,
1167 struct target_sigaction *oact, abi_ulong ka_restorer)
1168 {
1169 struct target_sigaction *k;
1170 int host_sig;
1171 int ret = 0;
1172
1173 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
1174
1175 if (sig < 1 || sig > TARGET_NSIG) {
1176 return -TARGET_EINVAL;
1177 }
1178
1179 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
1180 return -TARGET_EINVAL;
1181 }
1182
1183 if (block_signals()) {
1184 return -QEMU_ERESTARTSYS;
1185 }
1186
1187 k = &sigact_table[sig - 1];
1188 if (oact) {
1189 __put_user(k->_sa_handler, &oact->_sa_handler);
1190 __put_user(k->sa_flags, &oact->sa_flags);
1191 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1192 __put_user(k->sa_restorer, &oact->sa_restorer);
1193 #endif
1194 /* Not swapped. */
1195 oact->sa_mask = k->sa_mask;
1196 }
1197 if (act) {
1198 __get_user(k->_sa_handler, &act->_sa_handler);
1199 __get_user(k->sa_flags, &act->sa_flags);
1200 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1201 __get_user(k->sa_restorer, &act->sa_restorer);
1202 #endif
1203 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1204 k->ka_restorer = ka_restorer;
1205 #endif
1206 /* To be swapped in target_to_host_sigset. */
1207 k->sa_mask = act->sa_mask;
1208
1209 /* we update the host linux signal state */
1210 host_sig = target_to_host_signal(sig);
1211 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1212 if (host_sig > SIGRTMAX) {
1213 /* we don't have enough host signals to map all target signals */
1214 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1215 sig);
1216 /*
1217 * we don't return an error here because some programs try to
1218 * register an handler for all possible rt signals even if they
1219 * don't need it.
1220 * An error here can abort them whereas there can be no problem
1221 * to not have the signal available later.
1222 * This is the case for golang,
1223 * See https://github.com/golang/go/issues/33746
1224 * So we silently ignore the error.
1225 */
1226 return 0;
1227 }
1228 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1229 struct sigaction act1;
1230
1231 sigfillset(&act1.sa_mask);
1232 act1.sa_flags = SA_SIGINFO;
1233 if (k->_sa_handler == TARGET_SIG_IGN) {
1234 /*
1235 * It is important to update the host kernel signal ignore
1236 * state to avoid getting unexpected interrupted syscalls.
1237 */
1238 act1.sa_sigaction = (void *)SIG_IGN;
1239 } else if (k->_sa_handler == TARGET_SIG_DFL) {
1240 if (core_dump_signal(sig)) {
1241 act1.sa_sigaction = host_signal_handler;
1242 } else {
1243 act1.sa_sigaction = (void *)SIG_DFL;
1244 }
1245 } else {
1246 act1.sa_sigaction = host_signal_handler;
1247 if (k->sa_flags & TARGET_SA_RESTART) {
1248 act1.sa_flags |= SA_RESTART;
1249 }
1250 }
1251 ret = sigaction(host_sig, &act1, NULL);
1252 }
1253 }
1254 return ret;
1255 }
1256
handle_pending_signal(CPUArchState * cpu_env,int sig,struct emulated_sigtable * k)1257 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1258 struct emulated_sigtable *k)
1259 {
1260 CPUState *cpu = env_cpu(cpu_env);
1261 abi_ulong handler;
1262 sigset_t set;
1263 target_siginfo_t unswapped;
1264 target_sigset_t target_old_set;
1265 struct target_sigaction *sa;
1266 TaskState *ts = get_task_state(cpu);
1267
1268 trace_user_handle_signal(cpu_env, sig);
1269 /* dequeue signal */
1270 k->pending = 0;
1271
1272 /*
1273 * Writes out siginfo values byteswapped, accordingly to the target.
1274 * It also cleans the si_type from si_code making it correct for
1275 * the target. We must hold on to the original unswapped copy for
1276 * strace below, because si_type is still required there.
1277 */
1278 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1279 unswapped = k->info;
1280 }
1281 tswap_siginfo(&k->info, &k->info);
1282
1283 sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
1284 if (!sig) {
1285 sa = NULL;
1286 handler = TARGET_SIG_IGN;
1287 } else {
1288 sa = &sigact_table[sig - 1];
1289 handler = sa->_sa_handler;
1290 }
1291
1292 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1293 print_taken_signal(sig, &unswapped);
1294 }
1295
1296 if (handler == TARGET_SIG_DFL) {
1297 /* default handler : ignore some signal. The other are job control or fatal */
1298 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1299 kill(getpid(),SIGSTOP);
1300 } else if (sig != TARGET_SIGCHLD &&
1301 sig != TARGET_SIGURG &&
1302 sig != TARGET_SIGWINCH &&
1303 sig != TARGET_SIGCONT) {
1304 dump_core_and_abort(cpu_env, sig);
1305 }
1306 } else if (handler == TARGET_SIG_IGN) {
1307 /* ignore sig */
1308 } else if (handler == TARGET_SIG_ERR) {
1309 dump_core_and_abort(cpu_env, sig);
1310 } else {
1311 /* compute the blocked signals during the handler execution */
1312 sigset_t *blocked_set;
1313
1314 target_to_host_sigset(&set, &sa->sa_mask);
1315 /* SA_NODEFER indicates that the current signal should not be
1316 blocked during the handler */
1317 if (!(sa->sa_flags & TARGET_SA_NODEFER))
1318 sigaddset(&set, target_to_host_signal(sig));
1319
1320 /* save the previous blocked signal state to restore it at the
1321 end of the signal execution (see do_sigreturn) */
1322 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1323
1324 /* block signals in the handler */
1325 blocked_set = ts->in_sigsuspend ?
1326 &ts->sigsuspend_mask : &ts->signal_mask;
1327 sigorset(&ts->signal_mask, blocked_set, &set);
1328 ts->in_sigsuspend = 0;
1329
1330 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1331 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1332 {
1333 CPUX86State *env = cpu_env;
1334 if (env->eflags & VM_MASK)
1335 save_v86_state(env);
1336 }
1337 #endif
1338 /* prepare the stack frame of the virtual CPU */
1339 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1340 if (sa->sa_flags & TARGET_SA_SIGINFO) {
1341 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1342 } else {
1343 setup_frame(sig, sa, &target_old_set, cpu_env);
1344 }
1345 #else
1346 /* These targets do not have traditional signals. */
1347 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1348 #endif
1349 if (sa->sa_flags & TARGET_SA_RESETHAND) {
1350 sa->_sa_handler = TARGET_SIG_DFL;
1351 }
1352 }
1353 }
1354
process_pending_signals(CPUArchState * cpu_env)1355 void process_pending_signals(CPUArchState *cpu_env)
1356 {
1357 CPUState *cpu = env_cpu(cpu_env);
1358 int sig;
1359 TaskState *ts = get_task_state(cpu);
1360 sigset_t set;
1361 sigset_t *blocked_set;
1362
1363 while (qatomic_read(&ts->signal_pending)) {
1364 sigfillset(&set);
1365 sigprocmask(SIG_SETMASK, &set, 0);
1366
1367 restart_scan:
1368 sig = ts->sync_signal.pending;
1369 if (sig) {
1370 /* Synchronous signals are forced,
1371 * see force_sig_info() and callers in Linux
1372 * Note that not all of our queue_signal() calls in QEMU correspond
1373 * to force_sig_info() calls in Linux (some are send_sig_info()).
1374 * However it seems like a kernel bug to me to allow the process
1375 * to block a synchronous signal since it could then just end up
1376 * looping round and round indefinitely.
1377 */
1378 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1379 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1380 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1381 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1382 }
1383
1384 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1385 }
1386
1387 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1388 blocked_set = ts->in_sigsuspend ?
1389 &ts->sigsuspend_mask : &ts->signal_mask;
1390
1391 if (ts->sigtab[sig - 1].pending &&
1392 (!sigismember(blocked_set,
1393 target_to_host_signal_table[sig]))) {
1394 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1395 /* Restart scan from the beginning, as handle_pending_signal
1396 * might have resulted in a new synchronous signal (eg SIGSEGV).
1397 */
1398 goto restart_scan;
1399 }
1400 }
1401
1402 /* if no signal is pending, unblock signals and recheck (the act
1403 * of unblocking might cause us to take another host signal which
1404 * will set signal_pending again).
1405 */
1406 qatomic_set(&ts->signal_pending, 0);
1407 ts->in_sigsuspend = 0;
1408 set = ts->signal_mask;
1409 sigdelset(&set, SIGSEGV);
1410 sigdelset(&set, SIGBUS);
1411 sigprocmask(SIG_SETMASK, &set, 0);
1412 }
1413 ts->in_sigsuspend = 0;
1414 }
1415
process_sigsuspend_mask(sigset_t ** pset,target_ulong sigset,target_ulong sigsize)1416 int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1417 target_ulong sigsize)
1418 {
1419 TaskState *ts = get_task_state(thread_cpu);
1420 sigset_t *host_set = &ts->sigsuspend_mask;
1421 target_sigset_t *target_sigset;
1422
1423 if (sigsize != sizeof(*target_sigset)) {
1424 /* Like the kernel, we enforce correct size sigsets */
1425 return -TARGET_EINVAL;
1426 }
1427
1428 target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1429 if (!target_sigset) {
1430 return -TARGET_EFAULT;
1431 }
1432 target_to_host_sigset(host_set, target_sigset);
1433 unlock_user(target_sigset, sigset, 0);
1434
1435 *pset = host_set;
1436 return 0;
1437 }
1438