1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2004 PathScale, Inc
6  * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  */
8 
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <stdbool.h>
12 #include <errno.h>
13 #include <signal.h>
14 #include <string.h>
15 #include <strings.h>
16 #include <as-layout.h>
17 #include <kern_util.h>
18 #include <os.h>
19 #include <sysdep/mcontext.h>
20 #include <um_malloc.h>
21 #include <sys/ucontext.h>
22 #include <timetravel.h>
23 
24 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *, void *mc) = {
25 	[SIGTRAP]	= relay_signal,
26 	[SIGFPE]	= relay_signal,
27 	[SIGILL]	= relay_signal,
28 	[SIGWINCH]	= winch,
29 	[SIGBUS]	= relay_signal,
30 	[SIGSEGV]	= segv_handler,
31 	[SIGIO]		= sigio_handler,
32 	[SIGCHLD]	= sigchld_handler,
33 };
34 
35 static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
36 {
37 	struct uml_pt_regs r;
38 	int save_errno = errno;
39 
40 	r.is_user = 0;
41 	if (sig == SIGSEGV) {
42 		/* For segfaults, we want the data from the sigcontext. */
43 		get_regs_from_mc(&r, mc);
44 		GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
45 	}
46 
47 	/* enable signals if sig isn't IRQ signal */
48 	if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGCHLD))
49 		unblock_signals_trace();
50 
51 	(*sig_info[sig])(sig, si, &r, mc);
52 
53 	errno = save_errno;
54 }
55 
56 /*
57  * These are the asynchronous signals.  SIGPROF is excluded because we want to
58  * be able to profile all of UML, not just the non-critical sections.  If
59  * profiling is not thread-safe, then that is not my problem.  We can disable
60  * profiling when SMP is enabled in that case.
61  */
62 #define SIGIO_BIT 0
63 #define SIGIO_MASK (1 << SIGIO_BIT)
64 
65 #define SIGALRM_BIT 1
66 #define SIGALRM_MASK (1 << SIGALRM_BIT)
67 
68 #define SIGCHLD_BIT 2
69 #define SIGCHLD_MASK (1 << SIGCHLD_BIT)
70 
71 int signals_enabled;
72 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
73 static int signals_blocked, signals_blocked_pending;
74 #endif
75 static unsigned int signals_pending;
76 static unsigned int signals_active = 0;
77 
78 static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
79 {
80 	int enabled = signals_enabled;
81 
82 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
83 	if ((signals_blocked ||
84 	     __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
85 	    (sig == SIGIO)) {
86 		/* increment so unblock will do another round */
87 		__atomic_add_fetch(&signals_blocked_pending, 1,
88 				   __ATOMIC_SEQ_CST);
89 		return;
90 	}
91 #endif
92 
93 	if (!enabled && (sig == SIGIO)) {
94 		/*
95 		 * In TT_MODE_EXTERNAL, need to still call time-travel
96 		 * handlers. This will mark signals_pending by itself
97 		 * (only if necessary.)
98 		 * Note we won't get here if signals are hard-blocked
99 		 * (which is handled above), in that case the hard-
100 		 * unblock will handle things.
101 		 */
102 		if (time_travel_mode == TT_MODE_EXTERNAL)
103 			sigio_run_timetravel_handlers();
104 		else
105 			signals_pending |= SIGIO_MASK;
106 		return;
107 	}
108 
109 	if (!enabled && (sig == SIGCHLD)) {
110 		signals_pending |= SIGCHLD_MASK;
111 		return;
112 	}
113 
114 	block_signals_trace();
115 
116 	sig_handler_common(sig, si, mc);
117 
118 	um_set_signals_trace(enabled);
119 }
120 
121 static void timer_real_alarm_handler(mcontext_t *mc)
122 {
123 	struct uml_pt_regs regs;
124 
125 	if (mc != NULL)
126 		get_regs_from_mc(&regs, mc);
127 	else
128 		memset(&regs, 0, sizeof(regs));
129 	timer_handler(SIGALRM, NULL, &regs);
130 }
131 
132 static void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
133 {
134 	int enabled;
135 
136 	enabled = signals_enabled;
137 	if (!signals_enabled) {
138 		signals_pending |= SIGALRM_MASK;
139 		return;
140 	}
141 
142 	block_signals_trace();
143 
144 	signals_active |= SIGALRM_MASK;
145 
146 	timer_real_alarm_handler(mc);
147 
148 	signals_active &= ~SIGALRM_MASK;
149 
150 	um_set_signals_trace(enabled);
151 }
152 
153 void deliver_alarm(void) {
154     timer_alarm_handler(SIGALRM, NULL, NULL);
155 }
156 
157 void timer_set_signal_handler(void)
158 {
159 	set_handler(SIGALRM);
160 }
161 
162 void set_sigstack(void *sig_stack, int size)
163 {
164 	stack_t stack = {
165 		.ss_flags = 0,
166 		.ss_sp = sig_stack,
167 		.ss_size = size
168 	};
169 
170 	if (sigaltstack(&stack, NULL) != 0)
171 		panic("enabling signal stack failed, errno = %d\n", errno);
172 }
173 
174 static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
175 {
176 	uml_pm_wake();
177 }
178 
179 void register_pm_wake_signal(void)
180 {
181 	set_handler(SIGUSR1);
182 }
183 
184 static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
185 	[SIGSEGV] = sig_handler,
186 	[SIGBUS] = sig_handler,
187 	[SIGILL] = sig_handler,
188 	[SIGFPE] = sig_handler,
189 	[SIGTRAP] = sig_handler,
190 
191 	[SIGIO] = sig_handler,
192 	[SIGWINCH] = sig_handler,
193 	/* SIGCHLD is only actually registered in seccomp mode. */
194 	[SIGCHLD] = sig_handler,
195 	[SIGALRM] = timer_alarm_handler,
196 
197 	[SIGUSR1] = sigusr1_handler,
198 };
199 
200 static void hard_handler(int sig, siginfo_t *si, void *p)
201 {
202 	ucontext_t *uc = p;
203 	mcontext_t *mc = &uc->uc_mcontext;
204 
205 	(*handlers[sig])(sig, (struct siginfo *)si, mc);
206 }
207 
208 void set_handler(int sig)
209 {
210 	struct sigaction action;
211 	int flags = SA_SIGINFO | SA_ONSTACK;
212 	sigset_t sig_mask;
213 
214 	action.sa_sigaction = hard_handler;
215 
216 	/* block irq ones */
217 	sigemptyset(&action.sa_mask);
218 	sigaddset(&action.sa_mask, SIGIO);
219 	sigaddset(&action.sa_mask, SIGWINCH);
220 	sigaddset(&action.sa_mask, SIGALRM);
221 
222 	if (sig == SIGSEGV)
223 		flags |= SA_NODEFER;
224 
225 	if (sigismember(&action.sa_mask, sig))
226 		flags |= SA_RESTART; /* if it's an irq signal */
227 
228 	action.sa_flags = flags;
229 	action.sa_restorer = NULL;
230 	if (sigaction(sig, &action, NULL) < 0)
231 		panic("sigaction failed - errno = %d\n", errno);
232 
233 	sigemptyset(&sig_mask);
234 	sigaddset(&sig_mask, sig);
235 	if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
236 		panic("sigprocmask failed - errno = %d\n", errno);
237 }
238 
239 void send_sigio_to_self(void)
240 {
241 	kill(os_getpid(), SIGIO);
242 }
243 
244 int change_sig(int signal, int on)
245 {
246 	sigset_t sigset;
247 
248 	sigemptyset(&sigset);
249 	sigaddset(&sigset, signal);
250 	if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
251 		return -errno;
252 
253 	return 0;
254 }
255 
256 void block_signals(void)
257 {
258 	signals_enabled = 0;
259 	/*
260 	 * This must return with signals disabled, so this barrier
261 	 * ensures that writes are flushed out before the return.
262 	 * This might matter if gcc figures out how to inline this and
263 	 * decides to shuffle this code into the caller.
264 	 */
265 	barrier();
266 }
267 
268 void unblock_signals(void)
269 {
270 	int save_pending;
271 
272 	if (signals_enabled == 1)
273 		return;
274 
275 	signals_enabled = 1;
276 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
277 	deliver_time_travel_irqs();
278 #endif
279 
280 	/*
281 	 * We loop because the IRQ handler returns with interrupts off.  So,
282 	 * interrupts may have arrived and we need to re-enable them and
283 	 * recheck signals_pending.
284 	 */
285 	while (1) {
286 		/*
287 		 * Save and reset save_pending after enabling signals.  This
288 		 * way, signals_pending won't be changed while we're reading it.
289 		 *
290 		 * Setting signals_enabled and reading signals_pending must
291 		 * happen in this order, so have the barrier here.
292 		 */
293 		barrier();
294 
295 		save_pending = signals_pending;
296 		if (save_pending == 0)
297 			return;
298 
299 		signals_pending = 0;
300 
301 		/*
302 		 * We have pending interrupts, so disable signals, as the
303 		 * handlers expect them off when they are called.  They will
304 		 * be enabled again above. We need to trace this, as we're
305 		 * expected to be enabling interrupts already, but any more
306 		 * tracing that happens inside the handlers we call for the
307 		 * pending signals will mess up the tracing state.
308 		 */
309 		signals_enabled = 0;
310 		um_trace_signals_off();
311 
312 		/*
313 		 * Deal with SIGIO first because the alarm handler might
314 		 * schedule, leaving the pending SIGIO stranded until we come
315 		 * back here.
316 		 *
317 		 * SIGIO's handler doesn't use siginfo or mcontext,
318 		 * so they can be NULL.
319 		 */
320 		if (save_pending & SIGIO_MASK)
321 			sig_handler_common(SIGIO, NULL, NULL);
322 
323 		if (save_pending & SIGCHLD_MASK) {
324 			struct uml_pt_regs regs = {};
325 
326 			sigchld_handler(SIGCHLD, NULL, &regs, NULL);
327 		}
328 
329 		/* Do not reenter the handler */
330 
331 		if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK)))
332 			timer_real_alarm_handler(NULL);
333 
334 		/* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */
335 
336 		if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK))
337 			return;
338 
339 		/* Re-enable signals and trace that we're doing so. */
340 		um_trace_signals_on();
341 		signals_enabled = 1;
342 	}
343 }
344 
345 int um_set_signals(int enable)
346 {
347 	int ret;
348 	if (signals_enabled == enable)
349 		return enable;
350 
351 	ret = signals_enabled;
352 	if (enable)
353 		unblock_signals();
354 	else block_signals();
355 
356 	return ret;
357 }
358 
359 int um_set_signals_trace(int enable)
360 {
361 	int ret;
362 	if (signals_enabled == enable)
363 		return enable;
364 
365 	ret = signals_enabled;
366 	if (enable)
367 		unblock_signals_trace();
368 	else
369 		block_signals_trace();
370 
371 	return ret;
372 }
373 
374 #if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
375 void mark_sigio_pending(void)
376 {
377 	/*
378 	 * It would seem that this should be atomic so
379 	 * it isn't a read-modify-write with a signal
380 	 * that could happen in the middle, losing the
381 	 * value set by the signal.
382 	 *
383 	 * However, this function is only called when in
384 	 * time-travel=ext simulation mode, in which case
385 	 * the only signal ever pending is SIGIO, which
386 	 * is blocked while this can be called, and the
387 	 * timer signal (SIGALRM) cannot happen.
388 	 */
389 	signals_pending |= SIGIO_MASK;
390 }
391 
392 void block_signals_hard(void)
393 {
394 	signals_blocked++;
395 	barrier();
396 }
397 
398 void unblock_signals_hard(void)
399 {
400 	static bool unblocking;
401 
402 	if (!signals_blocked)
403 		panic("unblocking signals while not blocked");
404 
405 	if (--signals_blocked)
406 		return;
407 	/*
408 	 * Must be set to 0 before we check pending so the
409 	 * SIGIO handler will run as normal unless we're still
410 	 * going to process signals_blocked_pending.
411 	 */
412 	barrier();
413 
414 	/*
415 	 * Note that block_signals_hard()/unblock_signals_hard() can be called
416 	 * within the unblock_signals()/sigio_run_timetravel_handlers() below.
417 	 * This would still be prone to race conditions since it's actually a
418 	 * call _within_ e.g. vu_req_read_message(), where we observed this
419 	 * issue, which loops. Thus, if the inner call handles the recorded
420 	 * pending signals, we can get out of the inner call with the real
421 	 * signal hander no longer blocked, and still have a race. Thus don't
422 	 * handle unblocking in the inner call, if it happens, but only in
423 	 * the outermost call - 'unblocking' serves as an ownership for the
424 	 * signals_blocked_pending decrement.
425 	 */
426 	if (unblocking)
427 		return;
428 	unblocking = true;
429 
430 	while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
431 		if (signals_enabled) {
432 			/* signals are enabled so we can touch this */
433 			signals_pending |= SIGIO_MASK;
434 			/*
435 			 * this is a bit inefficient, but that's
436 			 * not really important
437 			 */
438 			block_signals();
439 			unblock_signals();
440 		} else {
441 			/*
442 			 * we need to run time-travel handlers even
443 			 * if not enabled
444 			 */
445 			sigio_run_timetravel_handlers();
446 		}
447 
448 		/*
449 		 * The decrement of signals_blocked_pending must be atomic so
450 		 * that the signal handler will either happen before or after
451 		 * the decrement, not during a read-modify-write:
452 		 *  - If it happens before, it can increment it and we'll
453 		 *    decrement it and do another round in the loop.
454 		 *  - If it happens after it'll see 0 for both signals_blocked
455 		 *    and signals_blocked_pending and thus run the handler as
456 		 *    usual (subject to signals_enabled, but that's unrelated.)
457 		 *
458 		 * Note that a call to unblock_signals_hard() within the calls
459 		 * to unblock_signals() or sigio_run_timetravel_handlers() above
460 		 * will do nothing due to the 'unblocking' state, so this cannot
461 		 * underflow as the only one decrementing will be the outermost
462 		 * one.
463 		 */
464 		if (__atomic_sub_fetch(&signals_blocked_pending, 1,
465 				       __ATOMIC_SEQ_CST) < 0)
466 			panic("signals_blocked_pending underflow");
467 	}
468 
469 	unblocking = false;
470 }
471 #endif
472