1 /* 2 * Emulation of BSD signals 3 * 4 * Copyright (c) 2003 - 2008 Fabrice Bellard 5 * Copyright (c) 2013 Stacey Son 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu.h" 23 #include "signal-common.h" 24 #include "trace.h" 25 #include "hw/core/tcg-cpu-ops.h" 26 #include "host-signal.h" 27 28 /* 29 * Stubbed out routines until we merge signal support from bsd-user 30 * fork. 31 */ 32 33 static struct target_sigaction sigact_table[TARGET_NSIG]; 34 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc); 35 36 /* 37 * The BSD ABIs use the same singal numbers across all the CPU architectures, so 38 * (unlike Linux) these functions are just the identity mapping. This might not 39 * be true for XyzBSD running on AbcBSD, which doesn't currently work. 40 */ 41 int host_to_target_signal(int sig) 42 { 43 return sig; 44 } 45 46 int target_to_host_signal(int sig) 47 { 48 return sig; 49 } 50 51 /* Adjust the signal context to rewind out of safe-syscall if we're in it */ 52 static inline void rewind_if_in_safe_syscall(void *puc) 53 { 54 ucontext_t *uc = (ucontext_t *)puc; 55 uintptr_t pcreg = host_signal_pc(uc); 56 57 if (pcreg > (uintptr_t)safe_syscall_start 58 && pcreg < (uintptr_t)safe_syscall_end) { 59 host_signal_set_pc(uc, (uintptr_t)safe_syscall_start); 60 } 61 } 62 63 static bool has_trapno(int tsig) 64 { 65 return tsig == TARGET_SIGILL || 66 tsig == TARGET_SIGFPE || 67 tsig == TARGET_SIGSEGV || 68 tsig == TARGET_SIGBUS || 69 tsig == TARGET_SIGTRAP; 70 } 71 72 /* Siginfo conversion. */ 73 74 /* 75 * Populate tinfo w/o swapping based on guessing which fields are valid. 76 */ 77 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 78 const siginfo_t *info) 79 { 80 int sig = host_to_target_signal(info->si_signo); 81 int si_code = info->si_code; 82 int si_type; 83 84 /* 85 * Make sure we that the variable portion of the target siginfo is zeroed 86 * out so we don't leak anything into that. 87 */ 88 memset(&tinfo->_reason, 0, sizeof(tinfo->_reason)); 89 90 /* 91 * This is awkward, because we have to use a combination of the si_code and 92 * si_signo to figure out which of the union's members are valid.o We 93 * therefore make our best guess. 94 * 95 * Once we have made our guess, we record it in the top 16 bits of 96 * the si_code, so that tswap_siginfo() later can use it. 97 * tswap_siginfo() will strip these top bits out before writing 98 * si_code to the guest (sign-extending the lower bits). 99 */ 100 tinfo->si_signo = sig; 101 tinfo->si_errno = info->si_errno; 102 tinfo->si_code = info->si_code; 103 tinfo->si_pid = info->si_pid; 104 tinfo->si_uid = info->si_uid; 105 tinfo->si_status = info->si_status; 106 tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr; 107 /* 108 * si_value is opaque to kernel. On all FreeBSD platforms, 109 * sizeof(sival_ptr) >= sizeof(sival_int) so the following 110 * always will copy the larger element. 111 */ 112 tinfo->si_value.sival_ptr = 113 (abi_ulong)(unsigned long)info->si_value.sival_ptr; 114 115 switch (si_code) { 116 /* 117 * All the SI_xxx codes that are defined here are global to 118 * all the signals (they have values that none of the other, 119 * more specific signal info will set). 120 */ 121 case SI_USER: 122 case SI_LWP: 123 case SI_KERNEL: 124 case SI_QUEUE: 125 case SI_ASYNCIO: 126 /* 127 * Only the fixed parts are valid (though FreeBSD doesn't always 128 * set all the fields to non-zero values. 129 */ 130 si_type = QEMU_SI_NOINFO; 131 break; 132 case SI_TIMER: 133 tinfo->_reason._timer._timerid = info->_reason._timer._timerid; 134 tinfo->_reason._timer._overrun = info->_reason._timer._overrun; 135 si_type = QEMU_SI_TIMER; 136 break; 137 case SI_MESGQ: 138 tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd; 139 si_type = QEMU_SI_MESGQ; 140 break; 141 default: 142 /* 143 * We have to go based on the signal number now to figure out 144 * what's valid. 145 */ 146 if (has_trapno(sig)) { 147 tinfo->_reason._fault._trapno = info->_reason._fault._trapno; 148 si_type = QEMU_SI_FAULT; 149 } 150 #ifdef TARGET_SIGPOLL 151 /* 152 * FreeBSD never had SIGPOLL, but emulates it for Linux so there's 153 * a chance it may popup in the future. 154 */ 155 if (sig == TARGET_SIGPOLL) { 156 tinfo->_reason._poll._band = info->_reason._poll._band; 157 si_type = QEMU_SI_POLL; 158 } 159 #endif 160 /* 161 * Unsure that this can actually be generated, and our support for 162 * capsicum is somewhere between weak and non-existant, but if we get 163 * one, then we know what to save. 164 */ 165 if (sig == TARGET_SIGTRAP) { 166 tinfo->_reason._capsicum._syscall = 167 info->_reason._capsicum._syscall; 168 si_type = QEMU_SI_CAPSICUM; 169 } 170 break; 171 } 172 tinfo->si_code = deposit32(si_code, 24, 8, si_type); 173 } 174 175 /* 176 * Queue a signal so that it will be send to the virtual CPU as soon as 177 * possible. 178 */ 179 void queue_signal(CPUArchState *env, int sig, int si_type, 180 target_siginfo_t *info) 181 { 182 qemu_log_mask(LOG_UNIMP, "No signal queueing, dropping signal %d\n", sig); 183 } 184 185 static int fatal_signal(int sig) 186 { 187 188 switch (sig) { 189 case TARGET_SIGCHLD: 190 case TARGET_SIGURG: 191 case TARGET_SIGWINCH: 192 case TARGET_SIGINFO: 193 /* Ignored by default. */ 194 return 0; 195 case TARGET_SIGCONT: 196 case TARGET_SIGSTOP: 197 case TARGET_SIGTSTP: 198 case TARGET_SIGTTIN: 199 case TARGET_SIGTTOU: 200 /* Job control signals. */ 201 return 0; 202 default: 203 return 1; 204 } 205 } 206 207 /* 208 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the 209 * 'force' part is handled in process_pending_signals(). 210 */ 211 void force_sig_fault(int sig, int code, abi_ulong addr) 212 { 213 CPUState *cpu = thread_cpu; 214 CPUArchState *env = cpu->env_ptr; 215 target_siginfo_t info = {}; 216 217 info.si_signo = sig; 218 info.si_errno = 0; 219 info.si_code = code; 220 info.si_addr = addr; 221 queue_signal(env, sig, QEMU_SI_FAULT, &info); 222 } 223 224 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) 225 { 226 CPUArchState *env = thread_cpu->env_ptr; 227 CPUState *cpu = env_cpu(env); 228 TaskState *ts = cpu->opaque; 229 target_siginfo_t tinfo; 230 ucontext_t *uc = puc; 231 struct emulated_sigtable *k; 232 int guest_sig; 233 uintptr_t pc = 0; 234 bool sync_sig = false; 235 236 /* 237 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special 238 * handling wrt signal blocking and unwinding. 239 */ 240 if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { 241 MMUAccessType access_type; 242 uintptr_t host_addr; 243 abi_ptr guest_addr; 244 bool is_write; 245 246 host_addr = (uintptr_t)info->si_addr; 247 248 /* 249 * Convert forcefully to guest address space: addresses outside 250 * reserved_va are still valid to report via SEGV_MAPERR. 251 */ 252 guest_addr = h2g_nocheck(host_addr); 253 254 pc = host_signal_pc(uc); 255 is_write = host_signal_write(info, uc); 256 access_type = adjust_signal_pc(&pc, is_write); 257 258 if (host_sig == SIGSEGV) { 259 bool maperr = true; 260 261 if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { 262 /* If this was a write to a TB protected page, restart. */ 263 if (is_write && 264 handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, 265 pc, guest_addr)) { 266 return; 267 } 268 269 /* 270 * With reserved_va, the whole address space is PROT_NONE, 271 * which means that we may get ACCERR when we want MAPERR. 272 */ 273 if (page_get_flags(guest_addr) & PAGE_VALID) { 274 maperr = false; 275 } else { 276 info->si_code = SEGV_MAPERR; 277 } 278 } 279 280 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); 281 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); 282 } else { 283 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); 284 if (info->si_code == BUS_ADRALN) { 285 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); 286 } 287 } 288 289 sync_sig = true; 290 } 291 292 /* Get the target signal number. */ 293 guest_sig = host_to_target_signal(host_sig); 294 if (guest_sig < 1 || guest_sig > TARGET_NSIG) { 295 return; 296 } 297 trace_user_host_signal(cpu, host_sig, guest_sig); 298 299 host_to_target_siginfo_noswap(&tinfo, info); 300 301 k = &ts->sigtab[guest_sig - 1]; 302 k->info = tinfo; 303 k->pending = guest_sig; 304 ts->signal_pending = 1; 305 306 /* 307 * For synchronous signals, unwind the cpu state to the faulting 308 * insn and then exit back to the main loop so that the signal 309 * is delivered immediately. 310 */ 311 if (sync_sig) { 312 cpu->exception_index = EXCP_INTERRUPT; 313 cpu_loop_exit_restore(cpu, pc); 314 } 315 316 rewind_if_in_safe_syscall(puc); 317 318 /* 319 * Block host signals until target signal handler entered. We 320 * can't block SIGSEGV or SIGBUS while we're executing guest 321 * code in case the guest code provokes one in the window between 322 * now and it getting out to the main loop. Signals will be 323 * unblocked again in process_pending_signals(). 324 */ 325 sigfillset(&uc->uc_sigmask); 326 sigdelset(&uc->uc_sigmask, SIGSEGV); 327 sigdelset(&uc->uc_sigmask, SIGBUS); 328 329 /* Interrupt the virtual CPU as soon as possible. */ 330 cpu_exit(thread_cpu); 331 } 332 333 void signal_init(void) 334 { 335 TaskState *ts = (TaskState *)thread_cpu->opaque; 336 struct sigaction act; 337 struct sigaction oact; 338 int i; 339 int host_sig; 340 341 /* Set the signal mask from the host mask. */ 342 sigprocmask(0, 0, &ts->signal_mask); 343 344 sigfillset(&act.sa_mask); 345 act.sa_sigaction = host_signal_handler; 346 act.sa_flags = SA_SIGINFO; 347 348 for (i = 1; i <= TARGET_NSIG; i++) { 349 #ifdef CONFIG_GPROF 350 if (i == TARGET_SIGPROF) { 351 continue; 352 } 353 #endif 354 host_sig = target_to_host_signal(i); 355 sigaction(host_sig, NULL, &oact); 356 if (oact.sa_sigaction == (void *)SIG_IGN) { 357 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 358 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 359 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 360 } 361 /* 362 * If there's already a handler installed then something has 363 * gone horribly wrong, so don't even try to handle that case. 364 * Install some handlers for our own use. We need at least 365 * SIGSEGV and SIGBUS, to detect exceptions. We can not just 366 * trap all signals because it affects syscall interrupt 367 * behavior. But do trap all default-fatal signals. 368 */ 369 if (fatal_signal(i)) { 370 sigaction(host_sig, &act, NULL); 371 } 372 } 373 } 374 375 void process_pending_signals(CPUArchState *cpu_env) 376 { 377 } 378 379 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, 380 MMUAccessType access_type, bool maperr, uintptr_t ra) 381 { 382 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; 383 384 if (tcg_ops->record_sigsegv) { 385 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); 386 } 387 388 force_sig_fault(TARGET_SIGSEGV, 389 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, 390 addr); 391 cpu->exception_index = EXCP_INTERRUPT; 392 cpu_loop_exit_restore(cpu, ra); 393 } 394 395 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, 396 MMUAccessType access_type, uintptr_t ra) 397 { 398 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; 399 400 if (tcg_ops->record_sigbus) { 401 tcg_ops->record_sigbus(cpu, addr, access_type, ra); 402 } 403 404 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); 405 cpu->exception_index = EXCP_INTERRUPT; 406 cpu_loop_exit_restore(cpu, ra); 407 } 408