xref: /linux/kernel/signal.c (revision 07c3ef58223e2c75ea209d8c416b976ec30d9413)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
54 
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h>	/* for syscall_get_* */
61 
62 #include "time/posix-timers.h"
63 
64 /*
65  * SLAB caches for signal bits.
66  */
67 
68 static struct kmem_cache *sigqueue_cachep;
69 
70 int print_fatal_signals __read_mostly;
71 
sig_handler(struct task_struct * t,int sig)72 static void __user *sig_handler(struct task_struct *t, int sig)
73 {
74 	return t->sighand->action[sig - 1].sa.sa_handler;
75 }
76 
sig_handler_ignored(void __user * handler,int sig)77 static inline bool sig_handler_ignored(void __user *handler, int sig)
78 {
79 	/* Is it explicitly or implicitly ignored? */
80 	return handler == SIG_IGN ||
81 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
82 }
83 
sig_task_ignored(struct task_struct * t,int sig,bool force)84 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 {
86 	void __user *handler;
87 
88 	handler = sig_handler(t, sig);
89 
90 	/* SIGKILL and SIGSTOP may not be sent to the global init */
91 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 		return true;
93 
94 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 		return true;
97 
98 	/* Only allow kernel generated signals to this kthread */
99 	if (unlikely((t->flags & PF_KTHREAD) &&
100 		     (handler == SIG_KTHREAD_KERNEL) && !force))
101 		return true;
102 
103 	return sig_handler_ignored(handler, sig);
104 }
105 
sig_ignored(struct task_struct * t,int sig,bool force)106 static bool sig_ignored(struct task_struct *t, int sig, bool force)
107 {
108 	/*
109 	 * Blocked signals are never ignored, since the
110 	 * signal handler may change by the time it is
111 	 * unblocked.
112 	 */
113 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 		return false;
115 
116 	/*
117 	 * Tracers may want to know about even ignored signal unless it
118 	 * is SIGKILL which can't be reported anyway but can be ignored
119 	 * by SIGNAL_UNKILLABLE task.
120 	 */
121 	if (t->ptrace && sig != SIGKILL)
122 		return false;
123 
124 	return sig_task_ignored(t, sig, force);
125 }
126 
127 /*
128  * Re-calculate pending state from the set of locally pending
129  * signals, globally pending signals, and blocked signals.
130  */
has_pending_signals(sigset_t * signal,sigset_t * blocked)131 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 {
133 	unsigned long ready;
134 	long i;
135 
136 	switch (_NSIG_WORDS) {
137 	default:
138 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 			ready |= signal->sig[i] &~ blocked->sig[i];
140 		break;
141 
142 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
143 		ready |= signal->sig[2] &~ blocked->sig[2];
144 		ready |= signal->sig[1] &~ blocked->sig[1];
145 		ready |= signal->sig[0] &~ blocked->sig[0];
146 		break;
147 
148 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
149 		ready |= signal->sig[0] &~ blocked->sig[0];
150 		break;
151 
152 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
153 	}
154 	return ready !=	0;
155 }
156 
157 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158 
recalc_sigpending_tsk(struct task_struct * t)159 static bool recalc_sigpending_tsk(struct task_struct *t)
160 {
161 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 	    PENDING(&t->pending, &t->blocked) ||
163 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
164 	    cgroup_task_frozen(t)) {
165 		set_tsk_thread_flag(t, TIF_SIGPENDING);
166 		return true;
167 	}
168 
169 	/*
170 	 * We must never clear the flag in another thread, or in current
171 	 * when it's possible the current syscall is returning -ERESTART*.
172 	 * So we don't clear it here, and only callers who know they should do.
173 	 */
174 	return false;
175 }
176 
recalc_sigpending(void)177 void recalc_sigpending(void)
178 {
179 	if (!recalc_sigpending_tsk(current) && !freezing(current)) {
180 		if (unlikely(test_thread_flag(TIF_SIGPENDING)))
181 			clear_thread_flag(TIF_SIGPENDING);
182 	}
183 }
184 EXPORT_SYMBOL(recalc_sigpending);
185 
calculate_sigpending(void)186 void calculate_sigpending(void)
187 {
188 	/* Have any signals or users of TIF_SIGPENDING been delayed
189 	 * until after fork?
190 	 */
191 	spin_lock_irq(&current->sighand->siglock);
192 	set_tsk_thread_flag(current, TIF_SIGPENDING);
193 	recalc_sigpending();
194 	spin_unlock_irq(&current->sighand->siglock);
195 }
196 
197 /* Given the mask, find the first available signal that should be serviced. */
198 
199 #define SYNCHRONOUS_MASK \
200 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 
next_signal(struct sigpending * pending,sigset_t * mask)203 int next_signal(struct sigpending *pending, sigset_t *mask)
204 {
205 	unsigned long i, *s, *m, x;
206 	int sig = 0;
207 
208 	s = pending->signal.sig;
209 	m = mask->sig;
210 
211 	/*
212 	 * Handle the first word specially: it contains the
213 	 * synchronous signals that need to be dequeued first.
214 	 */
215 	x = *s &~ *m;
216 	if (x) {
217 		if (x & SYNCHRONOUS_MASK)
218 			x &= SYNCHRONOUS_MASK;
219 		sig = ffz(~x) + 1;
220 		return sig;
221 	}
222 
223 	switch (_NSIG_WORDS) {
224 	default:
225 		for (i = 1; i < _NSIG_WORDS; ++i) {
226 			x = *++s &~ *++m;
227 			if (!x)
228 				continue;
229 			sig = ffz(~x) + i*_NSIG_BPW + 1;
230 			break;
231 		}
232 		break;
233 
234 	case 2:
235 		x = s[1] &~ m[1];
236 		if (!x)
237 			break;
238 		sig = ffz(~x) + _NSIG_BPW + 1;
239 		break;
240 
241 	case 1:
242 		/* Nothing to do */
243 		break;
244 	}
245 
246 	return sig;
247 }
248 
print_dropped_signal(int sig)249 static inline void print_dropped_signal(int sig)
250 {
251 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 
253 	if (!print_fatal_signals)
254 		return;
255 
256 	if (!__ratelimit(&ratelimit_state))
257 		return;
258 
259 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 				current->comm, current->pid, sig);
261 }
262 
263 /**
264  * task_set_jobctl_pending - set jobctl pending bits
265  * @task: target task
266  * @mask: pending bits to set
267  *
268  * Clear @mask from @task->jobctl.  @mask must be subset of
269  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
270  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
271  * cleared.  If @task is already being killed or exiting, this function
272  * becomes noop.
273  *
274  * CONTEXT:
275  * Must be called with @task->sighand->siglock held.
276  *
277  * RETURNS:
278  * %true if @mask is set, %false if made noop because @task was dying.
279  */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 {
282 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
283 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
284 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 
286 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
287 		return false;
288 
289 	if (mask & JOBCTL_STOP_SIGMASK)
290 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 
292 	task->jobctl |= mask;
293 	return true;
294 }
295 
296 /**
297  * task_clear_jobctl_trapping - clear jobctl trapping bit
298  * @task: target task
299  *
300  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
301  * Clear it and wake up the ptracer.  Note that we don't need any further
302  * locking.  @task->siglock guarantees that @task->parent points to the
303  * ptracer.
304  *
305  * CONTEXT:
306  * Must be called with @task->sighand->siglock held.
307  */
task_clear_jobctl_trapping(struct task_struct * task)308 void task_clear_jobctl_trapping(struct task_struct *task)
309 {
310 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
311 		task->jobctl &= ~JOBCTL_TRAPPING;
312 		smp_mb();	/* advised by wake_up_bit() */
313 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 	}
315 }
316 
317 /**
318  * task_clear_jobctl_pending - clear jobctl pending bits
319  * @task: target task
320  * @mask: pending bits to clear
321  *
322  * Clear @mask from @task->jobctl.  @mask must be subset of
323  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
324  * STOP bits are cleared together.
325  *
326  * If clearing of @mask leaves no stop or trap pending, this function calls
327  * task_clear_jobctl_trapping().
328  *
329  * CONTEXT:
330  * Must be called with @task->sighand->siglock held.
331  */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 {
334 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 
336 	if (mask & JOBCTL_STOP_PENDING)
337 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 
339 	task->jobctl &= ~mask;
340 
341 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
342 		task_clear_jobctl_trapping(task);
343 }
344 
345 /**
346  * task_participate_group_stop - participate in a group stop
347  * @task: task participating in a group stop
348  *
349  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350  * Group stop states are cleared and the group stop count is consumed if
351  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
352  * stop, the appropriate `SIGNAL_*` flags are set.
353  *
354  * CONTEXT:
355  * Must be called with @task->sighand->siglock held.
356  *
357  * RETURNS:
358  * %true if group stop completion should be notified to the parent, %false
359  * otherwise.
360  */
task_participate_group_stop(struct task_struct * task)361 static bool task_participate_group_stop(struct task_struct *task)
362 {
363 	struct signal_struct *sig = task->signal;
364 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 
366 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 
368 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369 
370 	if (!consume)
371 		return false;
372 
373 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
374 		sig->group_stop_count--;
375 
376 	/*
377 	 * Tell the caller to notify completion iff we are entering into a
378 	 * fresh group stop.  Read comment in do_signal_stop() for details.
379 	 */
380 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
381 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
382 		return true;
383 	}
384 	return false;
385 }
386 
task_join_group_stop(struct task_struct * task)387 void task_join_group_stop(struct task_struct *task)
388 {
389 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
390 	struct signal_struct *sig = current->signal;
391 
392 	if (sig->group_stop_count) {
393 		sig->group_stop_count++;
394 		mask |= JOBCTL_STOP_CONSUME;
395 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
396 		return;
397 
398 	/* Have the new thread join an on-going signal group stop */
399 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
400 }
401 
sig_get_ucounts(struct task_struct * t,int sig,int override_rlimit)402 static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
403 				       int override_rlimit)
404 {
405 	struct ucounts *ucounts;
406 	long sigpending;
407 
408 	/*
409 	 * Protect access to @t credentials. This can go away when all
410 	 * callers hold rcu read lock.
411 	 *
412 	 * NOTE! A pending signal will hold on to the user refcount,
413 	 * and we get/put the refcount only when the sigpending count
414 	 * changes from/to zero.
415 	 */
416 	rcu_read_lock();
417 	ucounts = task_ucounts(t);
418 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
419 					    override_rlimit);
420 	rcu_read_unlock();
421 	if (!sigpending)
422 		return NULL;
423 
424 	if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
425 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
426 		print_dropped_signal(sig);
427 		return NULL;
428 	}
429 
430 	return ucounts;
431 }
432 
__sigqueue_init(struct sigqueue * q,struct ucounts * ucounts,const unsigned int sigqueue_flags)433 static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
434 			    const unsigned int sigqueue_flags)
435 {
436 	INIT_LIST_HEAD(&q->list);
437 	q->flags = sigqueue_flags;
438 	q->ucounts = ucounts;
439 }
440 
441 /*
442  * allocate a new signal queue record
443  * - this may be called without locks if and only if t == current, otherwise an
444  *   appropriate lock must be held to stop the target task from exiting
445  */
sigqueue_alloc(int sig,struct task_struct * t,gfp_t gfp_flags,int override_rlimit)446 static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
447 				       int override_rlimit)
448 {
449 	struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
450 	struct sigqueue *q;
451 
452 	if (!ucounts)
453 		return NULL;
454 
455 	q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
456 	if (!q) {
457 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 		return NULL;
459 	}
460 
461 	__sigqueue_init(q, ucounts, 0);
462 	return q;
463 }
464 
__sigqueue_free(struct sigqueue * q)465 static void __sigqueue_free(struct sigqueue *q)
466 {
467 	if (q->flags & SIGQUEUE_PREALLOC) {
468 		posixtimer_sigqueue_putref(q);
469 		return;
470 	}
471 	if (q->ucounts) {
472 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
473 		q->ucounts = NULL;
474 	}
475 	kmem_cache_free(sigqueue_cachep, q);
476 }
477 
flush_sigqueue(struct sigpending * queue)478 void flush_sigqueue(struct sigpending *queue)
479 {
480 	struct sigqueue *q;
481 
482 	sigemptyset(&queue->signal);
483 	while (!list_empty(&queue->list)) {
484 		q = list_entry(queue->list.next, struct sigqueue , list);
485 		list_del_init(&q->list);
486 		__sigqueue_free(q);
487 	}
488 }
489 
490 /*
491  * Flush all pending signals for this kthread.
492  */
flush_signals(struct task_struct * t)493 void flush_signals(struct task_struct *t)
494 {
495 	unsigned long flags;
496 
497 	spin_lock_irqsave(&t->sighand->siglock, flags);
498 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
499 	flush_sigqueue(&t->pending);
500 	flush_sigqueue(&t->signal->shared_pending);
501 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
502 }
503 EXPORT_SYMBOL(flush_signals);
504 
ignore_signals(struct task_struct * t)505 void ignore_signals(struct task_struct *t)
506 {
507 	int i;
508 
509 	for (i = 0; i < _NSIG; ++i)
510 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
511 
512 	flush_signals(t);
513 }
514 
515 /*
516  * Flush all handlers for a task.
517  */
518 
519 void
flush_signal_handlers(struct task_struct * t,int force_default)520 flush_signal_handlers(struct task_struct *t, int force_default)
521 {
522 	int i;
523 	struct k_sigaction *ka = &t->sighand->action[0];
524 	for (i = _NSIG ; i != 0 ; i--) {
525 		if (force_default || ka->sa.sa_handler != SIG_IGN)
526 			ka->sa.sa_handler = SIG_DFL;
527 		ka->sa.sa_flags = 0;
528 #ifdef __ARCH_HAS_SA_RESTORER
529 		ka->sa.sa_restorer = NULL;
530 #endif
531 		sigemptyset(&ka->sa.sa_mask);
532 		ka++;
533 	}
534 }
535 
unhandled_signal(struct task_struct * tsk,int sig)536 bool unhandled_signal(struct task_struct *tsk, int sig)
537 {
538 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
539 	if (is_global_init(tsk))
540 		return true;
541 
542 	if (handler != SIG_IGN && handler != SIG_DFL)
543 		return false;
544 
545 	/* If dying, we handle all new signals by ignoring them */
546 	if (fatal_signal_pending(tsk))
547 		return false;
548 
549 	/* if ptraced, let the tracer determine */
550 	return !tsk->ptrace;
551 }
552 
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,struct sigqueue ** timer_sigq)553 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
554 			   struct sigqueue **timer_sigq)
555 {
556 	struct sigqueue *q, *first = NULL;
557 
558 	/*
559 	 * Collect the siginfo appropriate to this signal.  Check if
560 	 * there is another siginfo for the same signal.
561 	*/
562 	list_for_each_entry(q, &list->list, list) {
563 		if (q->info.si_signo == sig) {
564 			if (first)
565 				goto still_pending;
566 			first = q;
567 		}
568 	}
569 
570 	sigdelset(&list->signal, sig);
571 
572 	if (first) {
573 still_pending:
574 		list_del_init(&first->list);
575 		copy_siginfo(info, &first->info);
576 
577 		/*
578 		 * posix-timer signals are preallocated and freed when the last
579 		 * reference count is dropped in posixtimer_deliver_signal() or
580 		 * immediately on timer deletion when the signal is not pending.
581 		 * Spare the extra round through __sigqueue_free() which is
582 		 * ignoring preallocated signals.
583 		 */
584 		if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
585 			*timer_sigq = first;
586 		else
587 			__sigqueue_free(first);
588 	} else {
589 		/*
590 		 * Ok, it wasn't in the queue.  This must be
591 		 * a fast-pathed signal or we must have been
592 		 * out of queue space.  So zero out the info.
593 		 */
594 		clear_siginfo(info);
595 		info->si_signo = sig;
596 		info->si_errno = 0;
597 		info->si_code = SI_USER;
598 		info->si_pid = 0;
599 		info->si_uid = 0;
600 	}
601 }
602 
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,struct sigqueue ** timer_sigq)603 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
604 			    kernel_siginfo_t *info, struct sigqueue **timer_sigq)
605 {
606 	int sig = next_signal(pending, mask);
607 
608 	if (sig)
609 		collect_signal(sig, pending, info, timer_sigq);
610 	return sig;
611 }
612 
613 /*
614  * Try to dequeue a signal. If a deliverable signal is found fill in the
615  * caller provided siginfo and return the signal number. Otherwise return
616  * 0.
617  */
dequeue_signal(sigset_t * mask,kernel_siginfo_t * info,enum pid_type * type)618 int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
619 {
620 	struct task_struct *tsk = current;
621 	struct sigqueue *timer_sigq;
622 	int signr;
623 
624 	lockdep_assert_held(&tsk->sighand->siglock);
625 
626 again:
627 	*type = PIDTYPE_PID;
628 	timer_sigq = NULL;
629 	signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
630 	if (!signr) {
631 		*type = PIDTYPE_TGID;
632 		signr = __dequeue_signal(&tsk->signal->shared_pending,
633 					 mask, info, &timer_sigq);
634 
635 		if (unlikely(signr == SIGALRM))
636 			posixtimer_rearm_itimer(tsk);
637 	}
638 
639 	recalc_sigpending();
640 	if (!signr)
641 		return 0;
642 
643 	if (unlikely(sig_kernel_stop(signr))) {
644 		/*
645 		 * Set a marker that we have dequeued a stop signal.  Our
646 		 * caller might release the siglock and then the pending
647 		 * stop signal it is about to process is no longer in the
648 		 * pending bitmasks, but must still be cleared by a SIGCONT
649 		 * (and overruled by a SIGKILL).  So those cases clear this
650 		 * shared flag after we've set it.  Note that this flag may
651 		 * remain set after the signal we return is ignored or
652 		 * handled.  That doesn't matter because its only purpose
653 		 * is to alert stop-signal processing code when another
654 		 * processor has come along and cleared the flag.
655 		 */
656 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
657 	}
658 
659 	if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
660 		if (!posixtimer_deliver_signal(info, timer_sigq))
661 			goto again;
662 	}
663 
664 	return signr;
665 }
666 EXPORT_SYMBOL_GPL(dequeue_signal);
667 
dequeue_synchronous_signal(kernel_siginfo_t * info)668 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
669 {
670 	struct task_struct *tsk = current;
671 	struct sigpending *pending = &tsk->pending;
672 	struct sigqueue *q, *sync = NULL;
673 
674 	/*
675 	 * Might a synchronous signal be in the queue?
676 	 */
677 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
678 		return 0;
679 
680 	/*
681 	 * Return the first synchronous signal in the queue.
682 	 */
683 	list_for_each_entry(q, &pending->list, list) {
684 		/* Synchronous signals have a positive si_code */
685 		if ((q->info.si_code > SI_USER) &&
686 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
687 			sync = q;
688 			goto next;
689 		}
690 	}
691 	return 0;
692 next:
693 	/*
694 	 * Check if there is another siginfo for the same signal.
695 	 */
696 	list_for_each_entry_continue(q, &pending->list, list) {
697 		if (q->info.si_signo == sync->info.si_signo)
698 			goto still_pending;
699 	}
700 
701 	sigdelset(&pending->signal, sync->info.si_signo);
702 	recalc_sigpending();
703 still_pending:
704 	list_del_init(&sync->list);
705 	copy_siginfo(info, &sync->info);
706 	__sigqueue_free(sync);
707 	return info->si_signo;
708 }
709 
710 /*
711  * Tell a process that it has a new active signal..
712  *
713  * NOTE! we rely on the previous spin_lock to
714  * lock interrupts for us! We can only be called with
715  * "siglock" held, and the local interrupt must
716  * have been disabled when that got acquired!
717  *
718  * No need to set need_resched since signal event passing
719  * goes through ->blocked
720  */
signal_wake_up_state(struct task_struct * t,unsigned int state)721 void signal_wake_up_state(struct task_struct *t, unsigned int state)
722 {
723 	lockdep_assert_held(&t->sighand->siglock);
724 
725 	set_tsk_thread_flag(t, TIF_SIGPENDING);
726 
727 	/*
728 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
729 	 * case. We don't check t->state here because there is a race with it
730 	 * executing another processor and just now entering stopped state.
731 	 * By using wake_up_state, we ensure the process will wake up and
732 	 * handle its death signal.
733 	 */
734 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
735 		kick_process(t);
736 }
737 
738 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
739 
sigqueue_free_ignored(struct task_struct * tsk,struct sigqueue * q)740 static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
741 {
742 	if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
743 		__sigqueue_free(q);
744 	else
745 		posixtimer_sig_ignore(tsk, q);
746 }
747 
748 /* Remove signals in mask from the pending set and queue. */
flush_sigqueue_mask(struct task_struct * p,sigset_t * mask,struct sigpending * s)749 static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
750 {
751 	struct sigqueue *q, *n;
752 	sigset_t m;
753 
754 	lockdep_assert_held(&p->sighand->siglock);
755 
756 	sigandsets(&m, mask, &s->signal);
757 	if (sigisemptyset(&m))
758 		return;
759 
760 	sigandnsets(&s->signal, &s->signal, mask);
761 	list_for_each_entry_safe(q, n, &s->list, list) {
762 		if (sigismember(mask, q->info.si_signo)) {
763 			list_del_init(&q->list);
764 			sigqueue_free_ignored(p, q);
765 		}
766 	}
767 }
768 
is_si_special(const struct kernel_siginfo * info)769 static inline int is_si_special(const struct kernel_siginfo *info)
770 {
771 	return info <= SEND_SIG_PRIV;
772 }
773 
si_fromuser(const struct kernel_siginfo * info)774 static inline bool si_fromuser(const struct kernel_siginfo *info)
775 {
776 	return info == SEND_SIG_NOINFO ||
777 		(!is_si_special(info) && SI_FROMUSER(info));
778 }
779 
780 /*
781  * called with RCU read lock from check_kill_permission()
782  */
kill_ok_by_cred(struct task_struct * t)783 static bool kill_ok_by_cred(struct task_struct *t)
784 {
785 	const struct cred *cred = current_cred();
786 	const struct cred *tcred = __task_cred(t);
787 
788 	return uid_eq(cred->euid, tcred->suid) ||
789 	       uid_eq(cred->euid, tcred->uid) ||
790 	       uid_eq(cred->uid, tcred->suid) ||
791 	       uid_eq(cred->uid, tcred->uid) ||
792 	       ns_capable(tcred->user_ns, CAP_KILL);
793 }
794 
795 /*
796  * Bad permissions for sending the signal
797  * - the caller must hold the RCU read lock
798  */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)799 static int check_kill_permission(int sig, struct kernel_siginfo *info,
800 				 struct task_struct *t)
801 {
802 	struct pid *sid;
803 	int error;
804 
805 	if (!valid_signal(sig))
806 		return -EINVAL;
807 
808 	if (!si_fromuser(info))
809 		return 0;
810 
811 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
812 	if (error)
813 		return error;
814 
815 	if (!same_thread_group(current, t) &&
816 	    !kill_ok_by_cred(t)) {
817 		switch (sig) {
818 		case SIGCONT:
819 			sid = task_session(t);
820 			/*
821 			 * We don't return the error if sid == NULL. The
822 			 * task was unhashed, the caller must notice this.
823 			 */
824 			if (!sid || sid == task_session(current))
825 				break;
826 			fallthrough;
827 		default:
828 			return -EPERM;
829 		}
830 	}
831 
832 	return security_task_kill(t, info, sig, NULL);
833 }
834 
835 /**
836  * ptrace_trap_notify - schedule trap to notify ptracer
837  * @t: tracee wanting to notify tracer
838  *
839  * This function schedules sticky ptrace trap which is cleared on the next
840  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
841  * ptracer.
842  *
843  * If @t is running, STOP trap will be taken.  If trapped for STOP and
844  * ptracer is listening for events, tracee is woken up so that it can
845  * re-trap for the new event.  If trapped otherwise, STOP trap will be
846  * eventually taken without returning to userland after the existing traps
847  * are finished by PTRACE_CONT.
848  *
849  * CONTEXT:
850  * Must be called with @task->sighand->siglock held.
851  */
ptrace_trap_notify(struct task_struct * t)852 static void ptrace_trap_notify(struct task_struct *t)
853 {
854 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
855 	lockdep_assert_held(&t->sighand->siglock);
856 
857 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
858 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
859 }
860 
861 /*
862  * Handle magic process-wide effects of stop/continue signals. Unlike
863  * the signal actions, these happen immediately at signal-generation
864  * time regardless of blocking, ignoring, or handling.  This does the
865  * actual continuing for SIGCONT, but not the actual stopping for stop
866  * signals. The process stop is done as a signal action for SIG_DFL.
867  *
868  * Returns true if the signal should be actually delivered, otherwise
869  * it should be dropped.
870  */
prepare_signal(int sig,struct task_struct * p,bool force)871 static bool prepare_signal(int sig, struct task_struct *p, bool force)
872 {
873 	struct signal_struct *signal = p->signal;
874 	struct task_struct *t;
875 	sigset_t flush;
876 
877 	if (signal->flags & SIGNAL_GROUP_EXIT) {
878 		if (signal->core_state)
879 			return sig == SIGKILL;
880 		/*
881 		 * The process is in the middle of dying, drop the signal.
882 		 */
883 		return false;
884 	} else if (sig_kernel_stop(sig)) {
885 		/*
886 		 * This is a stop signal.  Remove SIGCONT from all queues.
887 		 */
888 		siginitset(&flush, sigmask(SIGCONT));
889 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
890 		for_each_thread(p, t)
891 			flush_sigqueue_mask(p, &flush, &t->pending);
892 	} else if (sig == SIGCONT) {
893 		unsigned int why;
894 		/*
895 		 * Remove all stop signals from all queues, wake all threads.
896 		 */
897 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
898 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
899 		for_each_thread(p, t) {
900 			flush_sigqueue_mask(p, &flush, &t->pending);
901 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
902 			if (likely(!(t->ptrace & PT_SEIZED))) {
903 				t->jobctl &= ~JOBCTL_STOPPED;
904 				wake_up_state(t, __TASK_STOPPED);
905 			} else
906 				ptrace_trap_notify(t);
907 		}
908 
909 		/*
910 		 * Notify the parent with CLD_CONTINUED if we were stopped.
911 		 *
912 		 * If we were in the middle of a group stop, we pretend it
913 		 * was already finished, and then continued. Since SIGCHLD
914 		 * doesn't queue we report only CLD_STOPPED, as if the next
915 		 * CLD_CONTINUED was dropped.
916 		 */
917 		why = 0;
918 		if (signal->flags & SIGNAL_STOP_STOPPED)
919 			why |= SIGNAL_CLD_CONTINUED;
920 		else if (signal->group_stop_count)
921 			why |= SIGNAL_CLD_STOPPED;
922 
923 		if (why) {
924 			/*
925 			 * The first thread which returns from do_signal_stop()
926 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
927 			 * notify its parent. See get_signal().
928 			 */
929 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
930 			signal->group_stop_count = 0;
931 			signal->group_exit_code = 0;
932 		}
933 	}
934 
935 	return !sig_ignored(p, sig, force);
936 }
937 
938 /*
939  * Test if P wants to take SIG.  After we've checked all threads with this,
940  * it's equivalent to finding no threads not blocking SIG.  Any threads not
941  * blocking SIG were ruled out because they are not running and already
942  * have pending signals.  Such threads will dequeue from the shared queue
943  * as soon as they're available, so putting the signal on the shared queue
944  * will be equivalent to sending it to one such thread.
945  */
wants_signal(int sig,struct task_struct * p)946 static inline bool wants_signal(int sig, struct task_struct *p)
947 {
948 	if (sigismember(&p->blocked, sig))
949 		return false;
950 
951 	if (p->flags & PF_EXITING)
952 		return false;
953 
954 	if (sig == SIGKILL)
955 		return true;
956 
957 	if (task_is_stopped_or_traced(p))
958 		return false;
959 
960 	return task_curr(p) || !task_sigpending(p);
961 }
962 
complete_signal(int sig,struct task_struct * p,enum pid_type type)963 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
964 {
965 	struct signal_struct *signal = p->signal;
966 	struct task_struct *t;
967 
968 	/*
969 	 * Now find a thread we can wake up to take the signal off the queue.
970 	 *
971 	 * Try the suggested task first (may or may not be the main thread).
972 	 */
973 	if (wants_signal(sig, p))
974 		t = p;
975 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
976 		/*
977 		 * There is just one thread and it does not need to be woken.
978 		 * It will dequeue unblocked signals before it runs again.
979 		 */
980 		return;
981 	else {
982 		/*
983 		 * Otherwise try to find a suitable thread.
984 		 */
985 		t = signal->curr_target;
986 		while (!wants_signal(sig, t)) {
987 			t = next_thread(t);
988 			if (t == signal->curr_target)
989 				/*
990 				 * No thread needs to be woken.
991 				 * Any eligible threads will see
992 				 * the signal in the queue soon.
993 				 */
994 				return;
995 		}
996 		signal->curr_target = t;
997 	}
998 
999 	/*
1000 	 * Found a killable thread.  If the signal will be fatal,
1001 	 * then start taking the whole group down immediately.
1002 	 */
1003 	if (sig_fatal(p, sig) &&
1004 	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1005 	    !sigismember(&t->real_blocked, sig) &&
1006 	    (sig == SIGKILL || !p->ptrace)) {
1007 		/*
1008 		 * This signal will be fatal to the whole group.
1009 		 */
1010 		if (!sig_kernel_coredump(sig)) {
1011 			/*
1012 			 * Start a group exit and wake everybody up.
1013 			 * This way we don't have other threads
1014 			 * running and doing things after a slower
1015 			 * thread has the fatal signal pending.
1016 			 */
1017 			signal->flags = SIGNAL_GROUP_EXIT;
1018 			signal->group_exit_code = sig;
1019 			signal->group_stop_count = 0;
1020 			__for_each_thread(signal, t) {
1021 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1022 				sigaddset(&t->pending.signal, SIGKILL);
1023 				signal_wake_up(t, 1);
1024 			}
1025 			return;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * The signal is already in the shared-pending queue.
1031 	 * Tell the chosen thread to wake up and dequeue it.
1032 	 */
1033 	signal_wake_up(t, sig == SIGKILL);
1034 	return;
1035 }
1036 
legacy_queue(struct sigpending * signals,int sig)1037 static inline bool legacy_queue(struct sigpending *signals, int sig)
1038 {
1039 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1040 }
1041 
__send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1042 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1043 				struct task_struct *t, enum pid_type type, bool force)
1044 {
1045 	struct sigpending *pending;
1046 	struct sigqueue *q;
1047 	int override_rlimit;
1048 	int ret = 0, result;
1049 
1050 	lockdep_assert_held(&t->sighand->siglock);
1051 
1052 	result = TRACE_SIGNAL_IGNORED;
1053 	if (!prepare_signal(sig, t, force))
1054 		goto ret;
1055 
1056 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1057 	/*
1058 	 * Short-circuit ignored signals and support queuing
1059 	 * exactly one non-rt signal, so that we can get more
1060 	 * detailed information about the cause of the signal.
1061 	 */
1062 	result = TRACE_SIGNAL_ALREADY_PENDING;
1063 	if (legacy_queue(pending, sig))
1064 		goto ret;
1065 
1066 	result = TRACE_SIGNAL_DELIVERED;
1067 	/*
1068 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1069 	 */
1070 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1071 		goto out_set;
1072 
1073 	/*
1074 	 * Real-time signals must be queued if sent by sigqueue, or
1075 	 * some other real-time mechanism.  It is implementation
1076 	 * defined whether kill() does so.  We attempt to do so, on
1077 	 * the principle of least surprise, but since kill is not
1078 	 * allowed to fail with EAGAIN when low on memory we just
1079 	 * make sure at least one signal gets delivered and don't
1080 	 * pass on the info struct.
1081 	 */
1082 	if (sig < SIGRTMIN)
1083 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1084 	else
1085 		override_rlimit = 0;
1086 
1087 	q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1088 
1089 	if (q) {
1090 		list_add_tail(&q->list, &pending->list);
1091 		switch ((unsigned long) info) {
1092 		case (unsigned long) SEND_SIG_NOINFO:
1093 			clear_siginfo(&q->info);
1094 			q->info.si_signo = sig;
1095 			q->info.si_errno = 0;
1096 			q->info.si_code = SI_USER;
1097 			q->info.si_pid = task_tgid_nr_ns(current,
1098 							task_active_pid_ns(t));
1099 			rcu_read_lock();
1100 			q->info.si_uid =
1101 				from_kuid_munged(task_cred_xxx(t, user_ns),
1102 						 current_uid());
1103 			rcu_read_unlock();
1104 			break;
1105 		case (unsigned long) SEND_SIG_PRIV:
1106 			clear_siginfo(&q->info);
1107 			q->info.si_signo = sig;
1108 			q->info.si_errno = 0;
1109 			q->info.si_code = SI_KERNEL;
1110 			q->info.si_pid = 0;
1111 			q->info.si_uid = 0;
1112 			break;
1113 		default:
1114 			copy_siginfo(&q->info, info);
1115 			break;
1116 		}
1117 	} else if (!is_si_special(info) &&
1118 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1119 		/*
1120 		 * Queue overflow, abort.  We may abort if the
1121 		 * signal was rt and sent by user using something
1122 		 * other than kill().
1123 		 */
1124 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1125 		ret = -EAGAIN;
1126 		goto ret;
1127 	} else {
1128 		/*
1129 		 * This is a silent loss of information.  We still
1130 		 * send the signal, but the *info bits are lost.
1131 		 */
1132 		result = TRACE_SIGNAL_LOSE_INFO;
1133 	}
1134 
1135 out_set:
1136 	signalfd_notify(t, sig);
1137 	sigaddset(&pending->signal, sig);
1138 
1139 	/* Let multiprocess signals appear after on-going forks */
1140 	if (type > PIDTYPE_TGID) {
1141 		struct multiprocess_signals *delayed;
1142 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1143 			sigset_t *signal = &delayed->signal;
1144 			/* Can't queue both a stop and a continue signal */
1145 			if (sig == SIGCONT)
1146 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1147 			else if (sig_kernel_stop(sig))
1148 				sigdelset(signal, SIGCONT);
1149 			sigaddset(signal, sig);
1150 		}
1151 	}
1152 
1153 	complete_signal(sig, t, type);
1154 ret:
1155 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1156 	return ret;
1157 }
1158 
has_si_pid_and_uid(struct kernel_siginfo * info)1159 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1160 {
1161 	bool ret = false;
1162 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1163 	case SIL_KILL:
1164 	case SIL_CHLD:
1165 	case SIL_RT:
1166 		ret = true;
1167 		break;
1168 	case SIL_TIMER:
1169 	case SIL_POLL:
1170 	case SIL_FAULT:
1171 	case SIL_FAULT_TRAPNO:
1172 	case SIL_FAULT_MCEERR:
1173 	case SIL_FAULT_BNDERR:
1174 	case SIL_FAULT_PKUERR:
1175 	case SIL_FAULT_PERF_EVENT:
1176 	case SIL_SYS:
1177 		ret = false;
1178 		break;
1179 	}
1180 	return ret;
1181 }
1182 
send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1183 int send_signal_locked(int sig, struct kernel_siginfo *info,
1184 		       struct task_struct *t, enum pid_type type)
1185 {
1186 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1187 	bool force = false;
1188 
1189 	if (info == SEND_SIG_NOINFO) {
1190 		/* Force if sent from an ancestor pid namespace */
1191 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1192 	} else if (info == SEND_SIG_PRIV) {
1193 		/* Don't ignore kernel generated signals */
1194 		force = true;
1195 	} else if (has_si_pid_and_uid(info)) {
1196 		/* SIGKILL and SIGSTOP is special or has ids */
1197 		struct user_namespace *t_user_ns;
1198 
1199 		rcu_read_lock();
1200 		t_user_ns = task_cred_xxx(t, user_ns);
1201 		if (current_user_ns() != t_user_ns) {
1202 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1203 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1204 		}
1205 		rcu_read_unlock();
1206 
1207 		/* A kernel generated signal? */
1208 		force = (info->si_code == SI_KERNEL);
1209 
1210 		/* From an ancestor pid namespace? */
1211 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1212 			info->si_pid = 0;
1213 			force = true;
1214 		}
1215 	}
1216 	return __send_signal_locked(sig, info, t, type, force);
1217 }
1218 
print_fatal_signal(int signr)1219 static void print_fatal_signal(int signr)
1220 {
1221 	struct pt_regs *regs = task_pt_regs(current);
1222 	struct file *exe_file;
1223 
1224 	exe_file = get_task_exe_file(current);
1225 	if (exe_file) {
1226 		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1227 			exe_file, current->comm, signr);
1228 		fput(exe_file);
1229 	} else {
1230 		pr_info("%s: potentially unexpected fatal signal %d.\n",
1231 			current->comm, signr);
1232 	}
1233 
1234 #if defined(__i386__) && !defined(__arch_um__)
1235 	pr_info("code at %08lx: ", regs->ip);
1236 	{
1237 		int i;
1238 		for (i = 0; i < 16; i++) {
1239 			unsigned char insn;
1240 
1241 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1242 				break;
1243 			pr_cont("%02x ", insn);
1244 		}
1245 	}
1246 	pr_cont("\n");
1247 #endif
1248 	preempt_disable();
1249 	show_regs(regs);
1250 	preempt_enable();
1251 }
1252 
setup_print_fatal_signals(char * str)1253 static int __init setup_print_fatal_signals(char *str)
1254 {
1255 	get_option (&str, &print_fatal_signals);
1256 
1257 	return 1;
1258 }
1259 
1260 __setup("print-fatal-signals=", setup_print_fatal_signals);
1261 
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1262 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1263 			enum pid_type type)
1264 {
1265 	unsigned long flags;
1266 	int ret = -ESRCH;
1267 
1268 	if (lock_task_sighand(p, &flags)) {
1269 		ret = send_signal_locked(sig, info, p, type);
1270 		unlock_task_sighand(p, &flags);
1271 	}
1272 
1273 	return ret;
1274 }
1275 
1276 enum sig_handler {
1277 	HANDLER_CURRENT, /* If reachable use the current handler */
1278 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1279 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1280 };
1281 
1282 /*
1283  * Force a signal that the process can't ignore: if necessary
1284  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1285  *
1286  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1287  * since we do not want to have a signal handler that was blocked
1288  * be invoked when user space had explicitly blocked it.
1289  *
1290  * We don't want to have recursive SIGSEGV's etc, for example,
1291  * that is why we also clear SIGNAL_UNKILLABLE.
1292  */
1293 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t,enum sig_handler handler)1294 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1295 	enum sig_handler handler)
1296 {
1297 	unsigned long int flags;
1298 	int ret, blocked, ignored;
1299 	struct k_sigaction *action;
1300 	int sig = info->si_signo;
1301 
1302 	spin_lock_irqsave(&t->sighand->siglock, flags);
1303 	action = &t->sighand->action[sig-1];
1304 	ignored = action->sa.sa_handler == SIG_IGN;
1305 	blocked = sigismember(&t->blocked, sig);
1306 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1307 		action->sa.sa_handler = SIG_DFL;
1308 		if (handler == HANDLER_EXIT)
1309 			action->sa.sa_flags |= SA_IMMUTABLE;
1310 		if (blocked)
1311 			sigdelset(&t->blocked, sig);
1312 	}
1313 	/*
1314 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1315 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1316 	 */
1317 	if (action->sa.sa_handler == SIG_DFL &&
1318 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1319 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1320 	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1321 	/* This can happen if the signal was already pending and blocked */
1322 	if (!task_sigpending(t))
1323 		signal_wake_up(t, 0);
1324 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1325 
1326 	return ret;
1327 }
1328 
force_sig_info(struct kernel_siginfo * info)1329 int force_sig_info(struct kernel_siginfo *info)
1330 {
1331 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1332 }
1333 
1334 /*
1335  * Nuke all other threads in the group.
1336  */
zap_other_threads(struct task_struct * p)1337 int zap_other_threads(struct task_struct *p)
1338 {
1339 	struct task_struct *t;
1340 	int count = 0;
1341 
1342 	p->signal->group_stop_count = 0;
1343 
1344 	for_other_threads(p, t) {
1345 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1346 		count++;
1347 
1348 		/* Don't bother with already dead threads */
1349 		if (t->exit_state)
1350 			continue;
1351 		sigaddset(&t->pending.signal, SIGKILL);
1352 		signal_wake_up(t, 1);
1353 	}
1354 
1355 	return count;
1356 }
1357 
lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1358 struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1359 					 unsigned long *flags)
1360 {
1361 	struct sighand_struct *sighand;
1362 
1363 	rcu_read_lock();
1364 	for (;;) {
1365 		sighand = rcu_dereference(tsk->sighand);
1366 		if (unlikely(sighand == NULL))
1367 			break;
1368 
1369 		/*
1370 		 * This sighand can be already freed and even reused, but
1371 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1372 		 * initializes ->siglock: this slab can't go away, it has
1373 		 * the same object type, ->siglock can't be reinitialized.
1374 		 *
1375 		 * We need to ensure that tsk->sighand is still the same
1376 		 * after we take the lock, we can race with de_thread() or
1377 		 * __exit_signal(). In the latter case the next iteration
1378 		 * must see ->sighand == NULL.
1379 		 */
1380 		spin_lock_irqsave(&sighand->siglock, *flags);
1381 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1382 			break;
1383 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1384 	}
1385 	rcu_read_unlock();
1386 
1387 	return sighand;
1388 }
1389 
1390 #ifdef CONFIG_LOCKDEP
lockdep_assert_task_sighand_held(struct task_struct * task)1391 void lockdep_assert_task_sighand_held(struct task_struct *task)
1392 {
1393 	struct sighand_struct *sighand;
1394 
1395 	rcu_read_lock();
1396 	sighand = rcu_dereference(task->sighand);
1397 	if (sighand)
1398 		lockdep_assert_held(&sighand->siglock);
1399 	else
1400 		WARN_ON_ONCE(1);
1401 	rcu_read_unlock();
1402 }
1403 #endif
1404 
1405 /*
1406  * send signal info to all the members of a thread group or to the
1407  * individual thread if type == PIDTYPE_PID.
1408  */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1409 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1410 			struct task_struct *p, enum pid_type type)
1411 {
1412 	int ret;
1413 
1414 	rcu_read_lock();
1415 	ret = check_kill_permission(sig, info, p);
1416 	rcu_read_unlock();
1417 
1418 	if (!ret && sig)
1419 		ret = do_send_sig_info(sig, info, p, type);
1420 
1421 	return ret;
1422 }
1423 
1424 /*
1425  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1426  * control characters do (^C, ^Z etc)
1427  * - the caller must hold at least a readlock on tasklist_lock
1428  */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1429 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1430 {
1431 	struct task_struct *p = NULL;
1432 	int ret = -ESRCH;
1433 
1434 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1435 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1436 		/*
1437 		 * If group_send_sig_info() succeeds at least once ret
1438 		 * becomes 0 and after that the code below has no effect.
1439 		 * Otherwise we return the last err or -ESRCH if this
1440 		 * process group is empty.
1441 		 */
1442 		if (ret)
1443 			ret = err;
1444 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1445 
1446 	return ret;
1447 }
1448 
kill_pid_info_type(int sig,struct kernel_siginfo * info,struct pid * pid,enum pid_type type)1449 static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1450 				struct pid *pid, enum pid_type type)
1451 {
1452 	int error = -ESRCH;
1453 	struct task_struct *p;
1454 
1455 	for (;;) {
1456 		rcu_read_lock();
1457 		p = pid_task(pid, PIDTYPE_PID);
1458 		if (p)
1459 			error = group_send_sig_info(sig, info, p, type);
1460 		rcu_read_unlock();
1461 		if (likely(!p || error != -ESRCH))
1462 			return error;
1463 		/*
1464 		 * The task was unhashed in between, try again.  If it
1465 		 * is dead, pid_task() will return NULL, if we race with
1466 		 * de_thread() it will find the new leader.
1467 		 */
1468 	}
1469 }
1470 
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1471 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1472 {
1473 	return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1474 }
1475 
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1476 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1477 {
1478 	int error;
1479 	rcu_read_lock();
1480 	error = kill_pid_info(sig, info, find_vpid(pid));
1481 	rcu_read_unlock();
1482 	return error;
1483 }
1484 
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1485 static inline bool kill_as_cred_perm(const struct cred *cred,
1486 				     struct task_struct *target)
1487 {
1488 	const struct cred *pcred = __task_cred(target);
1489 
1490 	return uid_eq(cred->euid, pcred->suid) ||
1491 	       uid_eq(cred->euid, pcred->uid) ||
1492 	       uid_eq(cred->uid, pcred->suid) ||
1493 	       uid_eq(cred->uid, pcred->uid);
1494 }
1495 
1496 /*
1497  * The usb asyncio usage of siginfo is wrong.  The glibc support
1498  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1499  * AKA after the generic fields:
1500  *	kernel_pid_t	si_pid;
1501  *	kernel_uid32_t	si_uid;
1502  *	sigval_t	si_value;
1503  *
1504  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1505  * after the generic fields is:
1506  *	void __user 	*si_addr;
1507  *
1508  * This is a practical problem when there is a 64bit big endian kernel
1509  * and a 32bit userspace.  As the 32bit address will encoded in the low
1510  * 32bits of the pointer.  Those low 32bits will be stored at higher
1511  * address than appear in a 32 bit pointer.  So userspace will not
1512  * see the address it was expecting for it's completions.
1513  *
1514  * There is nothing in the encoding that can allow
1515  * copy_siginfo_to_user32 to detect this confusion of formats, so
1516  * handle this by requiring the caller of kill_pid_usb_asyncio to
1517  * notice when this situration takes place and to store the 32bit
1518  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1519  * parameter.
1520  */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1521 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1522 			 struct pid *pid, const struct cred *cred)
1523 {
1524 	struct kernel_siginfo info;
1525 	struct task_struct *p;
1526 	unsigned long flags;
1527 	int ret = -EINVAL;
1528 
1529 	if (!valid_signal(sig))
1530 		return ret;
1531 
1532 	clear_siginfo(&info);
1533 	info.si_signo = sig;
1534 	info.si_errno = errno;
1535 	info.si_code = SI_ASYNCIO;
1536 	*((sigval_t *)&info.si_pid) = addr;
1537 
1538 	rcu_read_lock();
1539 	p = pid_task(pid, PIDTYPE_PID);
1540 	if (!p) {
1541 		ret = -ESRCH;
1542 		goto out_unlock;
1543 	}
1544 	if (!kill_as_cred_perm(cred, p)) {
1545 		ret = -EPERM;
1546 		goto out_unlock;
1547 	}
1548 	ret = security_task_kill(p, &info, sig, cred);
1549 	if (ret)
1550 		goto out_unlock;
1551 
1552 	if (sig) {
1553 		if (lock_task_sighand(p, &flags)) {
1554 			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1555 			unlock_task_sighand(p, &flags);
1556 		} else
1557 			ret = -ESRCH;
1558 	}
1559 out_unlock:
1560 	rcu_read_unlock();
1561 	return ret;
1562 }
1563 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1564 
1565 /*
1566  * kill_something_info() interprets pid in interesting ways just like kill(2).
1567  *
1568  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1569  * is probably wrong.  Should make it like BSD or SYSV.
1570  */
1571 
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1572 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1573 {
1574 	int ret;
1575 
1576 	if (pid > 0)
1577 		return kill_proc_info(sig, info, pid);
1578 
1579 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1580 	if (pid == INT_MIN)
1581 		return -ESRCH;
1582 
1583 	read_lock(&tasklist_lock);
1584 	if (pid != -1) {
1585 		ret = __kill_pgrp_info(sig, info,
1586 				pid ? find_vpid(-pid) : task_pgrp(current));
1587 	} else {
1588 		int retval = 0, count = 0;
1589 		struct task_struct * p;
1590 
1591 		for_each_process(p) {
1592 			if (task_pid_vnr(p) > 1 &&
1593 					!same_thread_group(p, current)) {
1594 				int err = group_send_sig_info(sig, info, p,
1595 							      PIDTYPE_MAX);
1596 				++count;
1597 				if (err != -EPERM)
1598 					retval = err;
1599 			}
1600 		}
1601 		ret = count ? retval : -ESRCH;
1602 	}
1603 	read_unlock(&tasklist_lock);
1604 
1605 	return ret;
1606 }
1607 
1608 /*
1609  * These are for backward compatibility with the rest of the kernel source.
1610  */
1611 
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1612 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1613 {
1614 	/*
1615 	 * Make sure legacy kernel users don't send in bad values
1616 	 * (normal paths check this in check_kill_permission).
1617 	 */
1618 	if (!valid_signal(sig))
1619 		return -EINVAL;
1620 
1621 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1622 }
1623 EXPORT_SYMBOL(send_sig_info);
1624 
1625 #define __si_special(priv) \
1626 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1627 
1628 int
send_sig(int sig,struct task_struct * p,int priv)1629 send_sig(int sig, struct task_struct *p, int priv)
1630 {
1631 	return send_sig_info(sig, __si_special(priv), p);
1632 }
1633 EXPORT_SYMBOL(send_sig);
1634 
force_sig(int sig)1635 void force_sig(int sig)
1636 {
1637 	struct kernel_siginfo info;
1638 
1639 	clear_siginfo(&info);
1640 	info.si_signo = sig;
1641 	info.si_errno = 0;
1642 	info.si_code = SI_KERNEL;
1643 	info.si_pid = 0;
1644 	info.si_uid = 0;
1645 	force_sig_info(&info);
1646 }
1647 EXPORT_SYMBOL(force_sig);
1648 
force_fatal_sig(int sig)1649 void force_fatal_sig(int sig)
1650 {
1651 	struct kernel_siginfo info;
1652 
1653 	clear_siginfo(&info);
1654 	info.si_signo = sig;
1655 	info.si_errno = 0;
1656 	info.si_code = SI_KERNEL;
1657 	info.si_pid = 0;
1658 	info.si_uid = 0;
1659 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1660 }
1661 
force_exit_sig(int sig)1662 void force_exit_sig(int sig)
1663 {
1664 	struct kernel_siginfo info;
1665 
1666 	clear_siginfo(&info);
1667 	info.si_signo = sig;
1668 	info.si_errno = 0;
1669 	info.si_code = SI_KERNEL;
1670 	info.si_pid = 0;
1671 	info.si_uid = 0;
1672 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1673 }
1674 
1675 /*
1676  * When things go south during signal handling, we
1677  * will force a SIGSEGV. And if the signal that caused
1678  * the problem was already a SIGSEGV, we'll want to
1679  * make sure we don't even try to deliver the signal..
1680  */
force_sigsegv(int sig)1681 void force_sigsegv(int sig)
1682 {
1683 	if (sig == SIGSEGV)
1684 		force_fatal_sig(SIGSEGV);
1685 	else
1686 		force_sig(SIGSEGV);
1687 }
1688 
force_sig_fault_to_task(int sig,int code,void __user * addr,struct task_struct * t)1689 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1690 			    struct task_struct *t)
1691 {
1692 	struct kernel_siginfo info;
1693 
1694 	clear_siginfo(&info);
1695 	info.si_signo = sig;
1696 	info.si_errno = 0;
1697 	info.si_code  = code;
1698 	info.si_addr  = addr;
1699 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1700 }
1701 
force_sig_fault(int sig,int code,void __user * addr)1702 int force_sig_fault(int sig, int code, void __user *addr)
1703 {
1704 	return force_sig_fault_to_task(sig, code, addr, current);
1705 }
1706 
send_sig_fault(int sig,int code,void __user * addr,struct task_struct * t)1707 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1708 {
1709 	struct kernel_siginfo info;
1710 
1711 	clear_siginfo(&info);
1712 	info.si_signo = sig;
1713 	info.si_errno = 0;
1714 	info.si_code  = code;
1715 	info.si_addr  = addr;
1716 	return send_sig_info(info.si_signo, &info, t);
1717 }
1718 
force_sig_mceerr(int code,void __user * addr,short lsb)1719 int force_sig_mceerr(int code, void __user *addr, short lsb)
1720 {
1721 	struct kernel_siginfo info;
1722 
1723 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1724 	clear_siginfo(&info);
1725 	info.si_signo = SIGBUS;
1726 	info.si_errno = 0;
1727 	info.si_code = code;
1728 	info.si_addr = addr;
1729 	info.si_addr_lsb = lsb;
1730 	return force_sig_info(&info);
1731 }
1732 
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1733 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1734 {
1735 	struct kernel_siginfo info;
1736 
1737 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1738 	clear_siginfo(&info);
1739 	info.si_signo = SIGBUS;
1740 	info.si_errno = 0;
1741 	info.si_code = code;
1742 	info.si_addr = addr;
1743 	info.si_addr_lsb = lsb;
1744 	return send_sig_info(info.si_signo, &info, t);
1745 }
1746 EXPORT_SYMBOL(send_sig_mceerr);
1747 
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1748 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1749 {
1750 	struct kernel_siginfo info;
1751 
1752 	clear_siginfo(&info);
1753 	info.si_signo = SIGSEGV;
1754 	info.si_errno = 0;
1755 	info.si_code  = SEGV_BNDERR;
1756 	info.si_addr  = addr;
1757 	info.si_lower = lower;
1758 	info.si_upper = upper;
1759 	return force_sig_info(&info);
1760 }
1761 
1762 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1763 int force_sig_pkuerr(void __user *addr, u32 pkey)
1764 {
1765 	struct kernel_siginfo info;
1766 
1767 	clear_siginfo(&info);
1768 	info.si_signo = SIGSEGV;
1769 	info.si_errno = 0;
1770 	info.si_code  = SEGV_PKUERR;
1771 	info.si_addr  = addr;
1772 	info.si_pkey  = pkey;
1773 	return force_sig_info(&info);
1774 }
1775 #endif
1776 
send_sig_perf(void __user * addr,u32 type,u64 sig_data)1777 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1778 {
1779 	struct kernel_siginfo info;
1780 
1781 	clear_siginfo(&info);
1782 	info.si_signo     = SIGTRAP;
1783 	info.si_errno     = 0;
1784 	info.si_code      = TRAP_PERF;
1785 	info.si_addr      = addr;
1786 	info.si_perf_data = sig_data;
1787 	info.si_perf_type = type;
1788 
1789 	/*
1790 	 * Signals generated by perf events should not terminate the whole
1791 	 * process if SIGTRAP is blocked, however, delivering the signal
1792 	 * asynchronously is better than not delivering at all. But tell user
1793 	 * space if the signal was asynchronous, so it can clearly be
1794 	 * distinguished from normal synchronous ones.
1795 	 */
1796 	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1797 				     TRAP_PERF_FLAG_ASYNC :
1798 				     0;
1799 
1800 	return send_sig_info(info.si_signo, &info, current);
1801 }
1802 
1803 /**
1804  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1805  * @syscall: syscall number to send to userland
1806  * @reason: filter-supplied reason code to send to userland (via si_errno)
1807  * @force_coredump: true to trigger a coredump
1808  *
1809  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1810  */
force_sig_seccomp(int syscall,int reason,bool force_coredump)1811 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1812 {
1813 	struct kernel_siginfo info;
1814 
1815 	clear_siginfo(&info);
1816 	info.si_signo = SIGSYS;
1817 	info.si_code = SYS_SECCOMP;
1818 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1819 	info.si_errno = reason;
1820 	info.si_arch = syscall_get_arch(current);
1821 	info.si_syscall = syscall;
1822 	return force_sig_info_to_task(&info, current,
1823 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1824 }
1825 
1826 /* For the crazy architectures that include trap information in
1827  * the errno field, instead of an actual errno value.
1828  */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1829 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1830 {
1831 	struct kernel_siginfo info;
1832 
1833 	clear_siginfo(&info);
1834 	info.si_signo = SIGTRAP;
1835 	info.si_errno = errno;
1836 	info.si_code  = TRAP_HWBKPT;
1837 	info.si_addr  = addr;
1838 	return force_sig_info(&info);
1839 }
1840 
1841 /* For the rare architectures that include trap information using
1842  * si_trapno.
1843  */
force_sig_fault_trapno(int sig,int code,void __user * addr,int trapno)1844 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1845 {
1846 	struct kernel_siginfo info;
1847 
1848 	clear_siginfo(&info);
1849 	info.si_signo = sig;
1850 	info.si_errno = 0;
1851 	info.si_code  = code;
1852 	info.si_addr  = addr;
1853 	info.si_trapno = trapno;
1854 	return force_sig_info(&info);
1855 }
1856 
1857 /* For the rare architectures that include trap information using
1858  * si_trapno.
1859  */
send_sig_fault_trapno(int sig,int code,void __user * addr,int trapno,struct task_struct * t)1860 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1861 			  struct task_struct *t)
1862 {
1863 	struct kernel_siginfo info;
1864 
1865 	clear_siginfo(&info);
1866 	info.si_signo = sig;
1867 	info.si_errno = 0;
1868 	info.si_code  = code;
1869 	info.si_addr  = addr;
1870 	info.si_trapno = trapno;
1871 	return send_sig_info(info.si_signo, &info, t);
1872 }
1873 
kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1874 static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1875 {
1876 	int ret;
1877 	read_lock(&tasklist_lock);
1878 	ret = __kill_pgrp_info(sig, info, pgrp);
1879 	read_unlock(&tasklist_lock);
1880 	return ret;
1881 }
1882 
kill_pgrp(struct pid * pid,int sig,int priv)1883 int kill_pgrp(struct pid *pid, int sig, int priv)
1884 {
1885 	return kill_pgrp_info(sig, __si_special(priv), pid);
1886 }
1887 EXPORT_SYMBOL(kill_pgrp);
1888 
kill_pid(struct pid * pid,int sig,int priv)1889 int kill_pid(struct pid *pid, int sig, int priv)
1890 {
1891 	return kill_pid_info(sig, __si_special(priv), pid);
1892 }
1893 EXPORT_SYMBOL(kill_pid);
1894 
1895 #ifdef CONFIG_POSIX_TIMERS
1896 /*
1897  * These functions handle POSIX timer signals. POSIX timers use
1898  * preallocated sigqueue structs for sending signals.
1899  */
__flush_itimer_signals(struct sigpending * pending)1900 static void __flush_itimer_signals(struct sigpending *pending)
1901 {
1902 	sigset_t signal, retain;
1903 	struct sigqueue *q, *n;
1904 
1905 	signal = pending->signal;
1906 	sigemptyset(&retain);
1907 
1908 	list_for_each_entry_safe(q, n, &pending->list, list) {
1909 		int sig = q->info.si_signo;
1910 
1911 		if (likely(q->info.si_code != SI_TIMER)) {
1912 			sigaddset(&retain, sig);
1913 		} else {
1914 			sigdelset(&signal, sig);
1915 			list_del_init(&q->list);
1916 			__sigqueue_free(q);
1917 		}
1918 	}
1919 
1920 	sigorsets(&pending->signal, &signal, &retain);
1921 }
1922 
flush_itimer_signals(void)1923 void flush_itimer_signals(void)
1924 {
1925 	struct task_struct *tsk = current;
1926 
1927 	guard(spinlock_irqsave)(&tsk->sighand->siglock);
1928 	__flush_itimer_signals(&tsk->pending);
1929 	__flush_itimer_signals(&tsk->signal->shared_pending);
1930 }
1931 
posixtimer_init_sigqueue(struct sigqueue * q)1932 bool posixtimer_init_sigqueue(struct sigqueue *q)
1933 {
1934 	struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1935 
1936 	if (!ucounts)
1937 		return false;
1938 	clear_siginfo(&q->info);
1939 	__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1940 	return true;
1941 }
1942 
posixtimer_queue_sigqueue(struct sigqueue * q,struct task_struct * t,enum pid_type type)1943 static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1944 {
1945 	struct sigpending *pending;
1946 	int sig = q->info.si_signo;
1947 
1948 	signalfd_notify(t, sig);
1949 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1950 	list_add_tail(&q->list, &pending->list);
1951 	sigaddset(&pending->signal, sig);
1952 	complete_signal(sig, t, type);
1953 }
1954 
1955 /*
1956  * This function is used by POSIX timers to deliver a timer signal.
1957  * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1958  * set), the signal must be delivered to the specific thread (queues
1959  * into t->pending).
1960  *
1961  * Where type is not PIDTYPE_PID, signals must be delivered to the
1962  * process. In this case, prefer to deliver to current if it is in
1963  * the same thread group as the target process and its sighand is
1964  * stable, which avoids unnecessarily waking up a potentially idle task.
1965  */
posixtimer_get_target(struct k_itimer * tmr)1966 static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1967 {
1968 	struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1969 
1970 	if (t && tmr->it_pid_type != PIDTYPE_PID &&
1971 	    same_thread_group(t, current) && !current->exit_state)
1972 		t = current;
1973 	return t;
1974 }
1975 
posixtimer_send_sigqueue(struct k_itimer * tmr)1976 void posixtimer_send_sigqueue(struct k_itimer *tmr)
1977 {
1978 	struct sigqueue *q = &tmr->sigq;
1979 	int sig = q->info.si_signo;
1980 	struct task_struct *t;
1981 	unsigned long flags;
1982 	int result;
1983 
1984 	guard(rcu)();
1985 
1986 	t = posixtimer_get_target(tmr);
1987 	if (!t)
1988 		return;
1989 
1990 	if (!likely(lock_task_sighand(t, &flags)))
1991 		return;
1992 
1993 	/*
1994 	 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1995 	 * locked to prevent a race against dequeue_signal().
1996 	 */
1997 	tmr->it_sigqueue_seq = tmr->it_signal_seq;
1998 
1999 	/*
2000 	 * Set the signal delivery status under sighand lock, so that the
2001 	 * ignored signal handling can distinguish between a periodic and a
2002 	 * non-periodic timer.
2003 	 */
2004 	tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2005 
2006 	if (!prepare_signal(sig, t, false)) {
2007 		result = TRACE_SIGNAL_IGNORED;
2008 
2009 		if (!list_empty(&q->list)) {
2010 			/*
2011 			 * The signal was ignored and blocked. The timer
2012 			 * expiry queued it because blocked signals are
2013 			 * queued independent of the ignored state.
2014 			 *
2015 			 * The unblocking set SIGPENDING, but the signal
2016 			 * was not yet dequeued from the pending list.
2017 			 * So prepare_signal() sees unblocked and ignored,
2018 			 * which ends up here. Leave it queued like a
2019 			 * regular signal.
2020 			 *
2021 			 * The same happens when the task group is exiting
2022 			 * and the signal is already queued.
2023 			 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2024 			 * ignored independent of its queued state. This
2025 			 * gets cleaned up in __exit_signal().
2026 			 */
2027 			goto out;
2028 		}
2029 
2030 		/* Periodic timers with SIG_IGN are queued on the ignored list */
2031 		if (tmr->it_sig_periodic) {
2032 			/*
2033 			 * Already queued means the timer was rearmed after
2034 			 * the previous expiry got it on the ignore list.
2035 			 * Nothing to do for that case.
2036 			 */
2037 			if (hlist_unhashed(&tmr->ignored_list)) {
2038 				/*
2039 				 * Take a signal reference and queue it on
2040 				 * the ignored list.
2041 				 */
2042 				posixtimer_sigqueue_getref(q);
2043 				posixtimer_sig_ignore(t, q);
2044 			}
2045 		} else if (!hlist_unhashed(&tmr->ignored_list)) {
2046 			/*
2047 			 * Covers the case where a timer was periodic and
2048 			 * then the signal was ignored. Later it was rearmed
2049 			 * as oneshot timer. The previous signal is invalid
2050 			 * now, and this oneshot signal has to be dropped.
2051 			 * Remove it from the ignored list and drop the
2052 			 * reference count as the signal is not longer
2053 			 * queued.
2054 			 */
2055 			hlist_del_init(&tmr->ignored_list);
2056 			posixtimer_putref(tmr);
2057 		}
2058 		goto out;
2059 	}
2060 
2061 	if (unlikely(!list_empty(&q->list))) {
2062 		/* This holds a reference count already */
2063 		result = TRACE_SIGNAL_ALREADY_PENDING;
2064 		goto out;
2065 	}
2066 
2067 	/*
2068 	 * If the signal is on the ignore list, it got blocked after it was
2069 	 * ignored earlier. But nothing lifted the ignore. Move it back to
2070 	 * the pending list to be consistent with the regular signal
2071 	 * handling. This already holds a reference count.
2072 	 *
2073 	 * If it's not on the ignore list acquire a reference count.
2074 	 */
2075 	if (likely(hlist_unhashed(&tmr->ignored_list)))
2076 		posixtimer_sigqueue_getref(q);
2077 	else
2078 		hlist_del_init(&tmr->ignored_list);
2079 
2080 	posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2081 	result = TRACE_SIGNAL_DELIVERED;
2082 out:
2083 	trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2084 	unlock_task_sighand(t, &flags);
2085 }
2086 
posixtimer_sig_ignore(struct task_struct * tsk,struct sigqueue * q)2087 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2088 {
2089 	struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2090 
2091 	/*
2092 	 * If the timer is marked deleted already or the signal originates
2093 	 * from a non-periodic timer, then just drop the reference
2094 	 * count. Otherwise queue it on the ignored list.
2095 	 */
2096 	if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
2097 		hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2098 	else
2099 		posixtimer_putref(tmr);
2100 }
2101 
posixtimer_sig_unignore(struct task_struct * tsk,int sig)2102 static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2103 {
2104 	struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2105 	struct hlist_node *tmp;
2106 	struct k_itimer *tmr;
2107 
2108 	if (likely(hlist_empty(head)))
2109 		return;
2110 
2111 	/*
2112 	 * Rearming a timer with sighand lock held is not possible due to
2113 	 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2114 	 * let the signal delivery path deal with it whether it needs to be
2115 	 * rearmed or not. This cannot be decided here w/o dropping sighand
2116 	 * lock and creating a loop retry horror show.
2117 	 */
2118 	hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2119 		struct task_struct *target;
2120 
2121 		/*
2122 		 * tmr::sigq.info.si_signo is immutable, so accessing it
2123 		 * without holding tmr::it_lock is safe.
2124 		 */
2125 		if (tmr->sigq.info.si_signo != sig)
2126 			continue;
2127 
2128 		hlist_del_init(&tmr->ignored_list);
2129 
2130 		/* This should never happen and leaks a reference count */
2131 		if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2132 			continue;
2133 
2134 		/*
2135 		 * Get the target for the signal. If target is a thread and
2136 		 * has exited by now, drop the reference count.
2137 		 */
2138 		guard(rcu)();
2139 		target = posixtimer_get_target(tmr);
2140 		if (target)
2141 			posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2142 		else
2143 			posixtimer_putref(tmr);
2144 	}
2145 }
2146 #else /* CONFIG_POSIX_TIMERS */
posixtimer_sig_ignore(struct task_struct * tsk,struct sigqueue * q)2147 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
posixtimer_sig_unignore(struct task_struct * tsk,int sig)2148 static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2149 #endif /* !CONFIG_POSIX_TIMERS */
2150 
do_notify_pidfd(struct task_struct * task)2151 void do_notify_pidfd(struct task_struct *task)
2152 {
2153 	struct pid *pid = task_pid(task);
2154 
2155 	WARN_ON(task->exit_state == 0);
2156 
2157 	__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2158 			poll_to_key(EPOLLIN | EPOLLRDNORM));
2159 }
2160 
2161 /*
2162  * Let a parent know about the death of a child.
2163  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2164  *
2165  * Returns true if our parent ignored us and so we've switched to
2166  * self-reaping.
2167  */
do_notify_parent(struct task_struct * tsk,int sig)2168 bool do_notify_parent(struct task_struct *tsk, int sig)
2169 {
2170 	struct kernel_siginfo info;
2171 	unsigned long flags;
2172 	struct sighand_struct *psig;
2173 	bool autoreap = false;
2174 	u64 utime, stime;
2175 
2176 	WARN_ON_ONCE(sig == -1);
2177 
2178 	/* do_notify_parent_cldstop should have been called instead.  */
2179 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2180 
2181 	WARN_ON_ONCE(!tsk->ptrace &&
2182 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2183 
2184 	/* ptraced, or group-leader without sub-threads */
2185 	do_notify_pidfd(tsk);
2186 
2187 	if (sig != SIGCHLD) {
2188 		/*
2189 		 * This is only possible if parent == real_parent.
2190 		 * Check if it has changed security domain.
2191 		 */
2192 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2193 			sig = SIGCHLD;
2194 	}
2195 
2196 	clear_siginfo(&info);
2197 	info.si_signo = sig;
2198 	info.si_errno = 0;
2199 	/*
2200 	 * We are under tasklist_lock here so our parent is tied to
2201 	 * us and cannot change.
2202 	 *
2203 	 * task_active_pid_ns will always return the same pid namespace
2204 	 * until a task passes through release_task.
2205 	 *
2206 	 * write_lock() currently calls preempt_disable() which is the
2207 	 * same as rcu_read_lock(), but according to Oleg, this is not
2208 	 * correct to rely on this
2209 	 */
2210 	rcu_read_lock();
2211 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2212 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2213 				       task_uid(tsk));
2214 	rcu_read_unlock();
2215 
2216 	task_cputime(tsk, &utime, &stime);
2217 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2218 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2219 
2220 	info.si_status = tsk->exit_code & 0x7f;
2221 	if (tsk->exit_code & 0x80)
2222 		info.si_code = CLD_DUMPED;
2223 	else if (tsk->exit_code & 0x7f)
2224 		info.si_code = CLD_KILLED;
2225 	else {
2226 		info.si_code = CLD_EXITED;
2227 		info.si_status = tsk->exit_code >> 8;
2228 	}
2229 
2230 	psig = tsk->parent->sighand;
2231 	spin_lock_irqsave(&psig->siglock, flags);
2232 	if (!tsk->ptrace && sig == SIGCHLD &&
2233 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2234 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2235 		/*
2236 		 * We are exiting and our parent doesn't care.  POSIX.1
2237 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2238 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2239 		 * automatically and not left for our parent's wait4 call.
2240 		 * Rather than having the parent do it as a magic kind of
2241 		 * signal handler, we just set this to tell do_exit that we
2242 		 * can be cleaned up without becoming a zombie.  Note that
2243 		 * we still call __wake_up_parent in this case, because a
2244 		 * blocked sys_wait4 might now return -ECHILD.
2245 		 *
2246 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2247 		 * is implementation-defined: we do (if you don't want
2248 		 * it, just use SIG_IGN instead).
2249 		 */
2250 		autoreap = true;
2251 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2252 			sig = 0;
2253 	}
2254 	if (!tsk->ptrace && tsk->signal->autoreap) {
2255 		autoreap = true;
2256 		sig = 0;
2257 	}
2258 	/*
2259 	 * Send with __send_signal as si_pid and si_uid are in the
2260 	 * parent's namespaces.
2261 	 */
2262 	if (valid_signal(sig) && sig)
2263 		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2264 	__wake_up_parent(tsk, tsk->parent);
2265 	spin_unlock_irqrestore(&psig->siglock, flags);
2266 
2267 	return autoreap;
2268 }
2269 
2270 /**
2271  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2272  * @tsk: task reporting the state change
2273  * @for_ptracer: the notification is for ptracer
2274  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2275  *
2276  * Notify @tsk's parent that the stopped/continued state has changed.  If
2277  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2278  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2279  *
2280  * CONTEXT:
2281  * Must be called with tasklist_lock at least read locked.
2282  */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2283 static void do_notify_parent_cldstop(struct task_struct *tsk,
2284 				     bool for_ptracer, int why)
2285 {
2286 	struct kernel_siginfo info;
2287 	unsigned long flags;
2288 	struct task_struct *parent;
2289 	struct sighand_struct *sighand;
2290 	u64 utime, stime;
2291 
2292 	if (for_ptracer) {
2293 		parent = tsk->parent;
2294 	} else {
2295 		tsk = tsk->group_leader;
2296 		parent = tsk->real_parent;
2297 	}
2298 
2299 	clear_siginfo(&info);
2300 	info.si_signo = SIGCHLD;
2301 	info.si_errno = 0;
2302 	/*
2303 	 * see comment in do_notify_parent() about the following 4 lines
2304 	 */
2305 	rcu_read_lock();
2306 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2307 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2308 	rcu_read_unlock();
2309 
2310 	task_cputime(tsk, &utime, &stime);
2311 	info.si_utime = nsec_to_clock_t(utime);
2312 	info.si_stime = nsec_to_clock_t(stime);
2313 
2314  	info.si_code = why;
2315  	switch (why) {
2316  	case CLD_CONTINUED:
2317  		info.si_status = SIGCONT;
2318  		break;
2319  	case CLD_STOPPED:
2320  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2321  		break;
2322  	case CLD_TRAPPED:
2323  		info.si_status = tsk->exit_code & 0x7f;
2324  		break;
2325  	default:
2326  		BUG();
2327  	}
2328 
2329 	sighand = parent->sighand;
2330 	spin_lock_irqsave(&sighand->siglock, flags);
2331 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2332 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2333 		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2334 	/*
2335 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2336 	 */
2337 	__wake_up_parent(tsk, parent);
2338 	spin_unlock_irqrestore(&sighand->siglock, flags);
2339 }
2340 
2341 /*
2342  * This must be called with current->sighand->siglock held.
2343  *
2344  * This should be the path for all ptrace stops.
2345  * We always set current->last_siginfo while stopped here.
2346  * That makes it a way to test a stopped process for
2347  * being ptrace-stopped vs being job-control-stopped.
2348  *
2349  * Returns the signal the ptracer requested the code resume
2350  * with.  If the code did not stop because the tracer is gone,
2351  * the stop signal remains unchanged unless clear_code.
2352  */
ptrace_stop(int exit_code,int why,unsigned long message,kernel_siginfo_t * info)2353 static int ptrace_stop(int exit_code, int why, unsigned long message,
2354 		       kernel_siginfo_t *info)
2355 	__releases(&current->sighand->siglock)
2356 	__acquires(&current->sighand->siglock)
2357 {
2358 	bool gstop_done = false;
2359 
2360 	if (arch_ptrace_stop_needed()) {
2361 		/*
2362 		 * The arch code has something special to do before a
2363 		 * ptrace stop.  This is allowed to block, e.g. for faults
2364 		 * on user stack pages.  We can't keep the siglock while
2365 		 * calling arch_ptrace_stop, so we must release it now.
2366 		 * To preserve proper semantics, we must do this before
2367 		 * any signal bookkeeping like checking group_stop_count.
2368 		 */
2369 		spin_unlock_irq(&current->sighand->siglock);
2370 		arch_ptrace_stop();
2371 		spin_lock_irq(&current->sighand->siglock);
2372 	}
2373 
2374 	/*
2375 	 * After this point ptrace_signal_wake_up or signal_wake_up
2376 	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2377 	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2378 	 * signals here to prevent ptrace_stop sleeping in schedule.
2379 	 */
2380 	if (!current->ptrace || __fatal_signal_pending(current))
2381 		return exit_code;
2382 
2383 	set_special_state(TASK_TRACED);
2384 	current->jobctl |= JOBCTL_TRACED;
2385 
2386 	/*
2387 	 * We're committing to trapping.  TRACED should be visible before
2388 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2389 	 * Also, transition to TRACED and updates to ->jobctl should be
2390 	 * atomic with respect to siglock and should be done after the arch
2391 	 * hook as siglock is released and regrabbed across it.
2392 	 *
2393 	 *     TRACER				    TRACEE
2394 	 *
2395 	 *     ptrace_attach()
2396 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2397 	 *     do_wait()
2398 	 *       set_current_state()                smp_wmb();
2399 	 *       ptrace_do_wait()
2400 	 *         wait_task_stopped()
2401 	 *           task_stopped_code()
2402 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2403 	 */
2404 	smp_wmb();
2405 
2406 	current->ptrace_message = message;
2407 	current->last_siginfo = info;
2408 	current->exit_code = exit_code;
2409 
2410 	/*
2411 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2412 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2413 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2414 	 * could be clear now.  We act as if SIGCONT is received after
2415 	 * TASK_TRACED is entered - ignore it.
2416 	 */
2417 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2418 		gstop_done = task_participate_group_stop(current);
2419 
2420 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2421 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2422 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2423 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2424 
2425 	/* entering a trap, clear TRAPPING */
2426 	task_clear_jobctl_trapping(current);
2427 
2428 	spin_unlock_irq(&current->sighand->siglock);
2429 	read_lock(&tasklist_lock);
2430 	/*
2431 	 * Notify parents of the stop.
2432 	 *
2433 	 * While ptraced, there are two parents - the ptracer and
2434 	 * the real_parent of the group_leader.  The ptracer should
2435 	 * know about every stop while the real parent is only
2436 	 * interested in the completion of group stop.  The states
2437 	 * for the two don't interact with each other.  Notify
2438 	 * separately unless they're gonna be duplicates.
2439 	 */
2440 	if (current->ptrace)
2441 		do_notify_parent_cldstop(current, true, why);
2442 	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2443 		do_notify_parent_cldstop(current, false, why);
2444 
2445 	/*
2446 	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2447 	 * One a PREEMPTION kernel this can result in preemption requirement
2448 	 * which will be fulfilled after read_unlock() and the ptracer will be
2449 	 * put on the CPU.
2450 	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2451 	 * this task wait in schedule(). If this task gets preempted then it
2452 	 * remains enqueued on the runqueue. The ptracer will observe this and
2453 	 * then sleep for a delay of one HZ tick. In the meantime this task
2454 	 * gets scheduled, enters schedule() and will wait for the ptracer.
2455 	 *
2456 	 * This preemption point is not bad from a correctness point of
2457 	 * view but extends the runtime by one HZ tick time due to the
2458 	 * ptracer's sleep.  The preempt-disable section ensures that there
2459 	 * will be no preemption between unlock and schedule() and so
2460 	 * improving the performance since the ptracer will observe that
2461 	 * the tracee is scheduled out once it gets on the CPU.
2462 	 *
2463 	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2464 	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2465 	 * before unlocking tasklist_lock so there is no benefit in doing this.
2466 	 *
2467 	 * In fact disabling preemption is harmful on PREEMPT_RT because
2468 	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2469 	 * with preemption disabled due to the 'sleeping' spinlock
2470 	 * substitution of RT.
2471 	 */
2472 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2473 		preempt_disable();
2474 	read_unlock(&tasklist_lock);
2475 	cgroup_enter_frozen();
2476 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2477 		preempt_enable_no_resched();
2478 	schedule();
2479 	cgroup_leave_frozen(true);
2480 
2481 	/*
2482 	 * We are back.  Now reacquire the siglock before touching
2483 	 * last_siginfo, so that we are sure to have synchronized with
2484 	 * any signal-sending on another CPU that wants to examine it.
2485 	 */
2486 	spin_lock_irq(&current->sighand->siglock);
2487 	exit_code = current->exit_code;
2488 	current->last_siginfo = NULL;
2489 	current->ptrace_message = 0;
2490 	current->exit_code = 0;
2491 
2492 	/* LISTENING can be set only during STOP traps, clear it */
2493 	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2494 
2495 	/*
2496 	 * Queued signals ignored us while we were stopped for tracing.
2497 	 * So check for any that we should take before resuming user mode.
2498 	 * This sets TIF_SIGPENDING, but never clears it.
2499 	 */
2500 	recalc_sigpending_tsk(current);
2501 	return exit_code;
2502 }
2503 
ptrace_do_notify(int signr,int exit_code,int why,unsigned long message)2504 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2505 {
2506 	kernel_siginfo_t info;
2507 
2508 	clear_siginfo(&info);
2509 	info.si_signo = signr;
2510 	info.si_code = exit_code;
2511 	info.si_pid = task_pid_vnr(current);
2512 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2513 
2514 	/* Let the debugger run.  */
2515 	return ptrace_stop(exit_code, why, message, &info);
2516 }
2517 
ptrace_notify(int exit_code,unsigned long message)2518 int ptrace_notify(int exit_code, unsigned long message)
2519 {
2520 	int signr;
2521 
2522 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2523 	if (unlikely(task_work_pending(current)))
2524 		task_work_run();
2525 
2526 	spin_lock_irq(&current->sighand->siglock);
2527 	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2528 	spin_unlock_irq(&current->sighand->siglock);
2529 	return signr;
2530 }
2531 
2532 /**
2533  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2534  * @signr: signr causing group stop if initiating
2535  *
2536  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2537  * and participate in it.  If already set, participate in the existing
2538  * group stop.  If participated in a group stop (and thus slept), %true is
2539  * returned with siglock released.
2540  *
2541  * If ptraced, this function doesn't handle stop itself.  Instead,
2542  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2543  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2544  * places afterwards.
2545  *
2546  * CONTEXT:
2547  * Must be called with @current->sighand->siglock held, which is released
2548  * on %true return.
2549  *
2550  * RETURNS:
2551  * %false if group stop is already cancelled or ptrace trap is scheduled.
2552  * %true if participated in group stop.
2553  */
do_signal_stop(int signr)2554 static bool do_signal_stop(int signr)
2555 	__releases(&current->sighand->siglock)
2556 {
2557 	struct signal_struct *sig = current->signal;
2558 
2559 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2560 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2561 		struct task_struct *t;
2562 
2563 		/* signr will be recorded in task->jobctl for retries */
2564 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2565 
2566 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2567 		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2568 		    unlikely(sig->group_exec_task))
2569 			return false;
2570 		/*
2571 		 * There is no group stop already in progress.  We must
2572 		 * initiate one now.
2573 		 *
2574 		 * While ptraced, a task may be resumed while group stop is
2575 		 * still in effect and then receive a stop signal and
2576 		 * initiate another group stop.  This deviates from the
2577 		 * usual behavior as two consecutive stop signals can't
2578 		 * cause two group stops when !ptraced.  That is why we
2579 		 * also check !task_is_stopped(t) below.
2580 		 *
2581 		 * The condition can be distinguished by testing whether
2582 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2583 		 * group_exit_code in such case.
2584 		 *
2585 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2586 		 * an intervening stop signal is required to cause two
2587 		 * continued events regardless of ptrace.
2588 		 */
2589 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2590 			sig->group_exit_code = signr;
2591 
2592 		sig->group_stop_count = 0;
2593 		if (task_set_jobctl_pending(current, signr | gstop))
2594 			sig->group_stop_count++;
2595 
2596 		for_other_threads(current, t) {
2597 			/*
2598 			 * Setting state to TASK_STOPPED for a group
2599 			 * stop is always done with the siglock held,
2600 			 * so this check has no races.
2601 			 */
2602 			if (!task_is_stopped(t) &&
2603 			    task_set_jobctl_pending(t, signr | gstop)) {
2604 				sig->group_stop_count++;
2605 				if (likely(!(t->ptrace & PT_SEIZED)))
2606 					signal_wake_up(t, 0);
2607 				else
2608 					ptrace_trap_notify(t);
2609 			}
2610 		}
2611 	}
2612 
2613 	if (likely(!current->ptrace)) {
2614 		int notify = 0;
2615 
2616 		/*
2617 		 * If there are no other threads in the group, or if there
2618 		 * is a group stop in progress and we are the last to stop,
2619 		 * report to the parent.
2620 		 */
2621 		if (task_participate_group_stop(current))
2622 			notify = CLD_STOPPED;
2623 
2624 		current->jobctl |= JOBCTL_STOPPED;
2625 		set_special_state(TASK_STOPPED);
2626 		spin_unlock_irq(&current->sighand->siglock);
2627 
2628 		/*
2629 		 * Notify the parent of the group stop completion.  Because
2630 		 * we're not holding either the siglock or tasklist_lock
2631 		 * here, ptracer may attach inbetween; however, this is for
2632 		 * group stop and should always be delivered to the real
2633 		 * parent of the group leader.  The new ptracer will get
2634 		 * its notification when this task transitions into
2635 		 * TASK_TRACED.
2636 		 */
2637 		if (notify) {
2638 			read_lock(&tasklist_lock);
2639 			do_notify_parent_cldstop(current, false, notify);
2640 			read_unlock(&tasklist_lock);
2641 		}
2642 
2643 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2644 		cgroup_enter_frozen();
2645 		schedule();
2646 		return true;
2647 	} else {
2648 		/*
2649 		 * While ptraced, group stop is handled by STOP trap.
2650 		 * Schedule it and let the caller deal with it.
2651 		 */
2652 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2653 		return false;
2654 	}
2655 }
2656 
2657 /**
2658  * do_jobctl_trap - take care of ptrace jobctl traps
2659  *
2660  * When PT_SEIZED, it's used for both group stop and explicit
2661  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2662  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2663  * the stop signal; otherwise, %SIGTRAP.
2664  *
2665  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2666  * number as exit_code and no siginfo.
2667  *
2668  * CONTEXT:
2669  * Must be called with @current->sighand->siglock held, which may be
2670  * released and re-acquired before returning with intervening sleep.
2671  */
do_jobctl_trap(void)2672 static void do_jobctl_trap(void)
2673 {
2674 	struct signal_struct *signal = current->signal;
2675 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2676 
2677 	if (current->ptrace & PT_SEIZED) {
2678 		if (!signal->group_stop_count &&
2679 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2680 			signr = SIGTRAP;
2681 		WARN_ON_ONCE(!signr);
2682 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2683 				 CLD_STOPPED, 0);
2684 	} else {
2685 		WARN_ON_ONCE(!signr);
2686 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2687 	}
2688 }
2689 
2690 /**
2691  * do_freezer_trap - handle the freezer jobctl trap
2692  *
2693  * Puts the task into frozen state, if only the task is not about to quit.
2694  * In this case it drops JOBCTL_TRAP_FREEZE.
2695  *
2696  * CONTEXT:
2697  * Must be called with @current->sighand->siglock held,
2698  * which is always released before returning.
2699  */
do_freezer_trap(void)2700 static void do_freezer_trap(void)
2701 	__releases(&current->sighand->siglock)
2702 {
2703 	/*
2704 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2705 	 * let's make another loop to give it a chance to be handled.
2706 	 * In any case, we'll return back.
2707 	 */
2708 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2709 	     JOBCTL_TRAP_FREEZE) {
2710 		spin_unlock_irq(&current->sighand->siglock);
2711 		return;
2712 	}
2713 
2714 	/*
2715 	 * Now we're sure that there is no pending fatal signal and no
2716 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2717 	 * immediately (if there is a non-fatal signal pending), and
2718 	 * put the task into sleep.
2719 	 */
2720 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2721 	clear_thread_flag(TIF_SIGPENDING);
2722 	spin_unlock_irq(&current->sighand->siglock);
2723 	cgroup_enter_frozen();
2724 	schedule();
2725 
2726 	/*
2727 	 * We could've been woken by task_work, run it to clear
2728 	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2729 	 */
2730 	clear_notify_signal();
2731 	if (unlikely(task_work_pending(current)))
2732 		task_work_run();
2733 }
2734 
ptrace_signal(int signr,kernel_siginfo_t * info,enum pid_type type)2735 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2736 {
2737 	/*
2738 	 * We do not check sig_kernel_stop(signr) but set this marker
2739 	 * unconditionally because we do not know whether debugger will
2740 	 * change signr. This flag has no meaning unless we are going
2741 	 * to stop after return from ptrace_stop(). In this case it will
2742 	 * be checked in do_signal_stop(), we should only stop if it was
2743 	 * not cleared by SIGCONT while we were sleeping. See also the
2744 	 * comment in dequeue_signal().
2745 	 */
2746 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2747 	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2748 
2749 	/* We're back.  Did the debugger cancel the sig?  */
2750 	if (signr == 0)
2751 		return signr;
2752 
2753 	/*
2754 	 * Update the siginfo structure if the signal has
2755 	 * changed.  If the debugger wanted something
2756 	 * specific in the siginfo structure then it should
2757 	 * have updated *info via PTRACE_SETSIGINFO.
2758 	 */
2759 	if (signr != info->si_signo) {
2760 		clear_siginfo(info);
2761 		info->si_signo = signr;
2762 		info->si_errno = 0;
2763 		info->si_code = SI_USER;
2764 		rcu_read_lock();
2765 		info->si_pid = task_pid_vnr(current->parent);
2766 		info->si_uid = from_kuid_munged(current_user_ns(),
2767 						task_uid(current->parent));
2768 		rcu_read_unlock();
2769 	}
2770 
2771 	/* If the (new) signal is now blocked, requeue it.  */
2772 	if (sigismember(&current->blocked, signr) ||
2773 	    fatal_signal_pending(current)) {
2774 		send_signal_locked(signr, info, current, type);
2775 		signr = 0;
2776 	}
2777 
2778 	return signr;
2779 }
2780 
hide_si_addr_tag_bits(struct ksignal * ksig)2781 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2782 {
2783 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2784 	case SIL_FAULT:
2785 	case SIL_FAULT_TRAPNO:
2786 	case SIL_FAULT_MCEERR:
2787 	case SIL_FAULT_BNDERR:
2788 	case SIL_FAULT_PKUERR:
2789 	case SIL_FAULT_PERF_EVENT:
2790 		ksig->info.si_addr = arch_untagged_si_addr(
2791 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2792 		break;
2793 	case SIL_KILL:
2794 	case SIL_TIMER:
2795 	case SIL_POLL:
2796 	case SIL_CHLD:
2797 	case SIL_RT:
2798 	case SIL_SYS:
2799 		break;
2800 	}
2801 }
2802 
get_signal(struct ksignal * ksig)2803 bool get_signal(struct ksignal *ksig)
2804 {
2805 	struct sighand_struct *sighand = current->sighand;
2806 	struct signal_struct *signal = current->signal;
2807 	int signr;
2808 
2809 	clear_notify_signal();
2810 	if (unlikely(task_work_pending(current)))
2811 		task_work_run();
2812 
2813 	if (!task_sigpending(current))
2814 		return false;
2815 
2816 	if (unlikely(uprobe_deny_signal()))
2817 		return false;
2818 
2819 	/*
2820 	 * Do this once, we can't return to user-mode if freezing() == T.
2821 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2822 	 * thus do not need another check after return.
2823 	 */
2824 	try_to_freeze();
2825 
2826 relock:
2827 	spin_lock_irq(&sighand->siglock);
2828 
2829 	/*
2830 	 * Every stopped thread goes here after wakeup. Check to see if
2831 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2832 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2833 	 */
2834 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2835 		int why;
2836 
2837 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2838 			why = CLD_CONTINUED;
2839 		else
2840 			why = CLD_STOPPED;
2841 
2842 		signal->flags &= ~SIGNAL_CLD_MASK;
2843 
2844 		spin_unlock_irq(&sighand->siglock);
2845 
2846 		/*
2847 		 * Notify the parent that we're continuing.  This event is
2848 		 * always per-process and doesn't make whole lot of sense
2849 		 * for ptracers, who shouldn't consume the state via
2850 		 * wait(2) either, but, for backward compatibility, notify
2851 		 * the ptracer of the group leader too unless it's gonna be
2852 		 * a duplicate.
2853 		 */
2854 		read_lock(&tasklist_lock);
2855 		do_notify_parent_cldstop(current, false, why);
2856 
2857 		if (ptrace_reparented(current->group_leader))
2858 			do_notify_parent_cldstop(current->group_leader,
2859 						true, why);
2860 		read_unlock(&tasklist_lock);
2861 
2862 		goto relock;
2863 	}
2864 
2865 	for (;;) {
2866 		struct k_sigaction *ka;
2867 		enum pid_type type;
2868 
2869 		/* Has this task already been marked for death? */
2870 		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2871 		     signal->group_exec_task) {
2872 			signr = SIGKILL;
2873 			sigdelset(&current->pending.signal, SIGKILL);
2874 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2875 					     &sighand->action[SIGKILL-1]);
2876 			recalc_sigpending();
2877 			/*
2878 			 * implies do_group_exit() or return to PF_USER_WORKER,
2879 			 * no need to initialize ksig->info/etc.
2880 			 */
2881 			goto fatal;
2882 		}
2883 
2884 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2885 		    do_signal_stop(0))
2886 			goto relock;
2887 
2888 		if (unlikely(current->jobctl &
2889 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2890 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2891 				do_jobctl_trap();
2892 				spin_unlock_irq(&sighand->siglock);
2893 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2894 				do_freezer_trap();
2895 
2896 			goto relock;
2897 		}
2898 
2899 		/*
2900 		 * If the task is leaving the frozen state, let's update
2901 		 * cgroup counters and reset the frozen bit.
2902 		 */
2903 		if (unlikely(cgroup_task_frozen(current))) {
2904 			spin_unlock_irq(&sighand->siglock);
2905 			cgroup_leave_frozen(false);
2906 			goto relock;
2907 		}
2908 
2909 		/*
2910 		 * Signals generated by the execution of an instruction
2911 		 * need to be delivered before any other pending signals
2912 		 * so that the instruction pointer in the signal stack
2913 		 * frame points to the faulting instruction.
2914 		 */
2915 		type = PIDTYPE_PID;
2916 		signr = dequeue_synchronous_signal(&ksig->info);
2917 		if (!signr)
2918 			signr = dequeue_signal(&current->blocked, &ksig->info, &type);
2919 
2920 		if (!signr)
2921 			break; /* will return 0 */
2922 
2923 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2924 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2925 			signr = ptrace_signal(signr, &ksig->info, type);
2926 			if (!signr)
2927 				continue;
2928 		}
2929 
2930 		ka = &sighand->action[signr-1];
2931 
2932 		/* Trace actually delivered signals. */
2933 		trace_signal_deliver(signr, &ksig->info, ka);
2934 
2935 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2936 			continue;
2937 		if (ka->sa.sa_handler != SIG_DFL) {
2938 			/* Run the handler.  */
2939 			ksig->ka = *ka;
2940 
2941 			if (ka->sa.sa_flags & SA_ONESHOT)
2942 				ka->sa.sa_handler = SIG_DFL;
2943 
2944 			break; /* will return non-zero "signr" value */
2945 		}
2946 
2947 		/*
2948 		 * Now we are doing the default action for this signal.
2949 		 */
2950 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2951 			continue;
2952 
2953 		/*
2954 		 * Global init gets no signals it doesn't want.
2955 		 * Container-init gets no signals it doesn't want from same
2956 		 * container.
2957 		 *
2958 		 * Note that if global/container-init sees a sig_kernel_only()
2959 		 * signal here, the signal must have been generated internally
2960 		 * or must have come from an ancestor namespace. In either
2961 		 * case, the signal cannot be dropped.
2962 		 */
2963 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2964 				!sig_kernel_only(signr))
2965 			continue;
2966 
2967 		if (sig_kernel_stop(signr)) {
2968 			/*
2969 			 * The default action is to stop all threads in
2970 			 * the thread group.  The job control signals
2971 			 * do nothing in an orphaned pgrp, but SIGSTOP
2972 			 * always works.  Note that siglock needs to be
2973 			 * dropped during the call to is_orphaned_pgrp()
2974 			 * because of lock ordering with tasklist_lock.
2975 			 * This allows an intervening SIGCONT to be posted.
2976 			 * We need to check for that and bail out if necessary.
2977 			 */
2978 			if (signr != SIGSTOP) {
2979 				spin_unlock_irq(&sighand->siglock);
2980 
2981 				/* signals can be posted during this window */
2982 
2983 				if (is_current_pgrp_orphaned())
2984 					goto relock;
2985 
2986 				spin_lock_irq(&sighand->siglock);
2987 			}
2988 
2989 			if (likely(do_signal_stop(signr))) {
2990 				/* It released the siglock.  */
2991 				goto relock;
2992 			}
2993 
2994 			/*
2995 			 * We didn't actually stop, due to a race
2996 			 * with SIGCONT or something like that.
2997 			 */
2998 			continue;
2999 		}
3000 
3001 	fatal:
3002 		spin_unlock_irq(&sighand->siglock);
3003 		if (unlikely(cgroup_task_frozen(current)))
3004 			cgroup_leave_frozen(true);
3005 
3006 		/*
3007 		 * Anything else is fatal, maybe with a core dump.
3008 		 */
3009 		current->flags |= PF_SIGNALED;
3010 
3011 		if (sig_kernel_coredump(signr)) {
3012 			if (print_fatal_signals)
3013 				print_fatal_signal(signr);
3014 			proc_coredump_connector(current);
3015 			/*
3016 			 * If it was able to dump core, this kills all
3017 			 * other threads in the group and synchronizes with
3018 			 * their demise.  If we lost the race with another
3019 			 * thread getting here, it set group_exit_code
3020 			 * first and our do_group_exit call below will use
3021 			 * that value and ignore the one we pass it.
3022 			 */
3023 			vfs_coredump(&ksig->info);
3024 		}
3025 
3026 		/*
3027 		 * PF_USER_WORKER threads will catch and exit on fatal signals
3028 		 * themselves. They have cleanup that must be performed, so we
3029 		 * cannot call do_exit() on their behalf. Note that ksig won't
3030 		 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3031 		 */
3032 		if (current->flags & PF_USER_WORKER)
3033 			goto out;
3034 
3035 		/*
3036 		 * Death signals, no core dump.
3037 		 */
3038 		do_group_exit(signr);
3039 		/* NOTREACHED */
3040 	}
3041 	spin_unlock_irq(&sighand->siglock);
3042 
3043 	ksig->sig = signr;
3044 
3045 	if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3046 		hide_si_addr_tag_bits(ksig);
3047 out:
3048 	return signr > 0;
3049 }
3050 
3051 /**
3052  * signal_delivered - called after signal delivery to update blocked signals
3053  * @ksig:		kernel signal struct
3054  * @stepping:		nonzero if debugger single-step or block-step in use
3055  *
3056  * This function should be called when a signal has successfully been
3057  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3058  * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3059  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
3060  */
signal_delivered(struct ksignal * ksig,int stepping)3061 static void signal_delivered(struct ksignal *ksig, int stepping)
3062 {
3063 	sigset_t blocked;
3064 
3065 	/* A signal was successfully delivered, and the
3066 	   saved sigmask was stored on the signal frame,
3067 	   and will be restored by sigreturn.  So we can
3068 	   simply clear the restore sigmask flag.  */
3069 	clear_restore_sigmask();
3070 
3071 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
3072 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3073 		sigaddset(&blocked, ksig->sig);
3074 	set_current_blocked(&blocked);
3075 	if (current->sas_ss_flags & SS_AUTODISARM)
3076 		sas_ss_reset(current);
3077 	if (stepping)
3078 		ptrace_notify(SIGTRAP, 0);
3079 }
3080 
signal_setup_done(int failed,struct ksignal * ksig,int stepping)3081 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3082 {
3083 	if (failed)
3084 		force_sigsegv(ksig->sig);
3085 	else
3086 		signal_delivered(ksig, stepping);
3087 }
3088 
3089 /*
3090  * It could be that complete_signal() picked us to notify about the
3091  * group-wide signal. Other threads should be notified now to take
3092  * the shared signals in @which since we will not.
3093  */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)3094 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3095 {
3096 	sigset_t retarget;
3097 	struct task_struct *t;
3098 
3099 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3100 	if (sigisemptyset(&retarget))
3101 		return;
3102 
3103 	for_other_threads(tsk, t) {
3104 		if (t->flags & PF_EXITING)
3105 			continue;
3106 
3107 		if (!has_pending_signals(&retarget, &t->blocked))
3108 			continue;
3109 		/* Remove the signals this thread can handle. */
3110 		sigandsets(&retarget, &retarget, &t->blocked);
3111 
3112 		if (!task_sigpending(t))
3113 			signal_wake_up(t, 0);
3114 
3115 		if (sigisemptyset(&retarget))
3116 			break;
3117 	}
3118 }
3119 
exit_signals(struct task_struct * tsk)3120 void exit_signals(struct task_struct *tsk)
3121 {
3122 	int group_stop = 0;
3123 	sigset_t unblocked;
3124 
3125 	/*
3126 	 * @tsk is about to have PF_EXITING set - lock out users which
3127 	 * expect stable threadgroup.
3128 	 */
3129 	cgroup_threadgroup_change_begin(tsk);
3130 
3131 	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3132 		tsk->flags |= PF_EXITING;
3133 		cgroup_threadgroup_change_end(tsk);
3134 		return;
3135 	}
3136 
3137 	spin_lock_irq(&tsk->sighand->siglock);
3138 	/*
3139 	 * From now this task is not visible for group-wide signals,
3140 	 * see wants_signal(), do_signal_stop().
3141 	 */
3142 	tsk->flags |= PF_EXITING;
3143 
3144 	cgroup_threadgroup_change_end(tsk);
3145 
3146 	if (!task_sigpending(tsk))
3147 		goto out;
3148 
3149 	unblocked = tsk->blocked;
3150 	signotset(&unblocked);
3151 	retarget_shared_pending(tsk, &unblocked);
3152 
3153 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3154 	    task_participate_group_stop(tsk))
3155 		group_stop = CLD_STOPPED;
3156 out:
3157 	spin_unlock_irq(&tsk->sighand->siglock);
3158 
3159 	/*
3160 	 * If group stop has completed, deliver the notification.  This
3161 	 * should always go to the real parent of the group leader.
3162 	 */
3163 	if (unlikely(group_stop)) {
3164 		read_lock(&tasklist_lock);
3165 		do_notify_parent_cldstop(tsk, false, group_stop);
3166 		read_unlock(&tasklist_lock);
3167 	}
3168 }
3169 
3170 /*
3171  * System call entry points.
3172  */
3173 
3174 /**
3175  *  sys_restart_syscall - restart a system call
3176  */
SYSCALL_DEFINE0(restart_syscall)3177 SYSCALL_DEFINE0(restart_syscall)
3178 {
3179 	struct restart_block *restart = &current->restart_block;
3180 	return restart->fn(restart);
3181 }
3182 
do_no_restart_syscall(struct restart_block * param)3183 long do_no_restart_syscall(struct restart_block *param)
3184 {
3185 	return -EINTR;
3186 }
3187 
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)3188 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3189 {
3190 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3191 		sigset_t newblocked;
3192 		/* A set of now blocked but previously unblocked signals. */
3193 		sigandnsets(&newblocked, newset, &current->blocked);
3194 		retarget_shared_pending(tsk, &newblocked);
3195 	}
3196 	tsk->blocked = *newset;
3197 	recalc_sigpending();
3198 }
3199 
3200 /**
3201  * set_current_blocked - change current->blocked mask
3202  * @newset: new mask
3203  *
3204  * It is wrong to change ->blocked directly, this helper should be used
3205  * to ensure the process can't miss a shared signal we are going to block.
3206  */
set_current_blocked(sigset_t * newset)3207 void set_current_blocked(sigset_t *newset)
3208 {
3209 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3210 	__set_current_blocked(newset);
3211 }
3212 
__set_current_blocked(const sigset_t * newset)3213 void __set_current_blocked(const sigset_t *newset)
3214 {
3215 	struct task_struct *tsk = current;
3216 
3217 	/*
3218 	 * In case the signal mask hasn't changed, there is nothing we need
3219 	 * to do. The current->blocked shouldn't be modified by other task.
3220 	 */
3221 	if (sigequalsets(&tsk->blocked, newset))
3222 		return;
3223 
3224 	spin_lock_irq(&tsk->sighand->siglock);
3225 	__set_task_blocked(tsk, newset);
3226 	spin_unlock_irq(&tsk->sighand->siglock);
3227 }
3228 
3229 /*
3230  * This is also useful for kernel threads that want to temporarily
3231  * (or permanently) block certain signals.
3232  *
3233  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3234  * interface happily blocks "unblockable" signals like SIGKILL
3235  * and friends.
3236  */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)3237 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3238 {
3239 	struct task_struct *tsk = current;
3240 	sigset_t newset;
3241 
3242 	/* Lockless, only current can change ->blocked, never from irq */
3243 	if (oldset)
3244 		*oldset = tsk->blocked;
3245 
3246 	switch (how) {
3247 	case SIG_BLOCK:
3248 		sigorsets(&newset, &tsk->blocked, set);
3249 		break;
3250 	case SIG_UNBLOCK:
3251 		sigandnsets(&newset, &tsk->blocked, set);
3252 		break;
3253 	case SIG_SETMASK:
3254 		newset = *set;
3255 		break;
3256 	default:
3257 		return -EINVAL;
3258 	}
3259 
3260 	__set_current_blocked(&newset);
3261 	return 0;
3262 }
3263 EXPORT_SYMBOL(sigprocmask);
3264 
3265 /*
3266  * The api helps set app-provided sigmasks.
3267  *
3268  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3269  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3270  *
3271  * Note that it does set_restore_sigmask() in advance, so it must be always
3272  * paired with restore_saved_sigmask_unless() before return from syscall.
3273  */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)3274 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3275 {
3276 	sigset_t kmask;
3277 
3278 	if (!umask)
3279 		return 0;
3280 	if (sigsetsize != sizeof(sigset_t))
3281 		return -EINVAL;
3282 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3283 		return -EFAULT;
3284 
3285 	set_restore_sigmask();
3286 	current->saved_sigmask = current->blocked;
3287 	set_current_blocked(&kmask);
3288 
3289 	return 0;
3290 }
3291 
3292 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3293 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3294 			    size_t sigsetsize)
3295 {
3296 	sigset_t kmask;
3297 
3298 	if (!umask)
3299 		return 0;
3300 	if (sigsetsize != sizeof(compat_sigset_t))
3301 		return -EINVAL;
3302 	if (get_compat_sigset(&kmask, umask))
3303 		return -EFAULT;
3304 
3305 	set_restore_sigmask();
3306 	current->saved_sigmask = current->blocked;
3307 	set_current_blocked(&kmask);
3308 
3309 	return 0;
3310 }
3311 #endif
3312 
3313 /**
3314  *  sys_rt_sigprocmask - change the list of currently blocked signals
3315  *  @how: whether to add, remove, or set signals
3316  *  @nset: stores pending signals
3317  *  @oset: previous value of signal mask if non-null
3318  *  @sigsetsize: size of sigset_t type
3319  */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3320 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3321 		sigset_t __user *, oset, size_t, sigsetsize)
3322 {
3323 	sigset_t old_set, new_set;
3324 	int error;
3325 
3326 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3327 	if (sigsetsize != sizeof(sigset_t))
3328 		return -EINVAL;
3329 
3330 	old_set = current->blocked;
3331 
3332 	if (nset) {
3333 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3334 			return -EFAULT;
3335 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3336 
3337 		error = sigprocmask(how, &new_set, NULL);
3338 		if (error)
3339 			return error;
3340 	}
3341 
3342 	if (oset) {
3343 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3344 			return -EFAULT;
3345 	}
3346 
3347 	return 0;
3348 }
3349 
3350 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3351 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3352 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3353 {
3354 	sigset_t old_set = current->blocked;
3355 
3356 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3357 	if (sigsetsize != sizeof(sigset_t))
3358 		return -EINVAL;
3359 
3360 	if (nset) {
3361 		sigset_t new_set;
3362 		int error;
3363 		if (get_compat_sigset(&new_set, nset))
3364 			return -EFAULT;
3365 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3366 
3367 		error = sigprocmask(how, &new_set, NULL);
3368 		if (error)
3369 			return error;
3370 	}
3371 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3372 }
3373 #endif
3374 
do_sigpending(sigset_t * set)3375 static void do_sigpending(sigset_t *set)
3376 {
3377 	spin_lock_irq(&current->sighand->siglock);
3378 	sigorsets(set, &current->pending.signal,
3379 		  &current->signal->shared_pending.signal);
3380 	spin_unlock_irq(&current->sighand->siglock);
3381 
3382 	/* Outside the lock because only this thread touches it.  */
3383 	sigandsets(set, &current->blocked, set);
3384 }
3385 
3386 /**
3387  *  sys_rt_sigpending - examine a pending signal that has been raised
3388  *			while blocked
3389  *  @uset: stores pending signals
3390  *  @sigsetsize: size of sigset_t type or larger
3391  */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3392 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3393 {
3394 	sigset_t set;
3395 
3396 	if (sigsetsize > sizeof(*uset))
3397 		return -EINVAL;
3398 
3399 	do_sigpending(&set);
3400 
3401 	if (copy_to_user(uset, &set, sigsetsize))
3402 		return -EFAULT;
3403 
3404 	return 0;
3405 }
3406 
3407 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3408 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3409 		compat_size_t, sigsetsize)
3410 {
3411 	sigset_t set;
3412 
3413 	if (sigsetsize > sizeof(*uset))
3414 		return -EINVAL;
3415 
3416 	do_sigpending(&set);
3417 
3418 	return put_compat_sigset(uset, &set, sigsetsize);
3419 }
3420 #endif
3421 
3422 static const struct {
3423 	unsigned char limit, layout;
3424 } sig_sicodes[] = {
3425 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3426 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3427 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3428 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3429 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3430 #if defined(SIGEMT)
3431 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3432 #endif
3433 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3434 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3435 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3436 };
3437 
known_siginfo_layout(unsigned sig,int si_code)3438 static bool known_siginfo_layout(unsigned sig, int si_code)
3439 {
3440 	if (si_code == SI_KERNEL)
3441 		return true;
3442 	else if ((si_code > SI_USER)) {
3443 		if (sig_specific_sicodes(sig)) {
3444 			if (si_code <= sig_sicodes[sig].limit)
3445 				return true;
3446 		}
3447 		else if (si_code <= NSIGPOLL)
3448 			return true;
3449 	}
3450 	else if (si_code >= SI_DETHREAD)
3451 		return true;
3452 	else if (si_code == SI_ASYNCNL)
3453 		return true;
3454 	return false;
3455 }
3456 
siginfo_layout(unsigned sig,int si_code)3457 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3458 {
3459 	enum siginfo_layout layout = SIL_KILL;
3460 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3461 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3462 		    (si_code <= sig_sicodes[sig].limit)) {
3463 			layout = sig_sicodes[sig].layout;
3464 			/* Handle the exceptions */
3465 			if ((sig == SIGBUS) &&
3466 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3467 				layout = SIL_FAULT_MCEERR;
3468 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3469 				layout = SIL_FAULT_BNDERR;
3470 #ifdef SEGV_PKUERR
3471 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3472 				layout = SIL_FAULT_PKUERR;
3473 #endif
3474 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3475 				layout = SIL_FAULT_PERF_EVENT;
3476 			else if (IS_ENABLED(CONFIG_SPARC) &&
3477 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3478 				layout = SIL_FAULT_TRAPNO;
3479 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3480 				 ((sig == SIGFPE) ||
3481 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3482 				layout = SIL_FAULT_TRAPNO;
3483 		}
3484 		else if (si_code <= NSIGPOLL)
3485 			layout = SIL_POLL;
3486 	} else {
3487 		if (si_code == SI_TIMER)
3488 			layout = SIL_TIMER;
3489 		else if (si_code == SI_SIGIO)
3490 			layout = SIL_POLL;
3491 		else if (si_code < 0)
3492 			layout = SIL_RT;
3493 	}
3494 	return layout;
3495 }
3496 
si_expansion(const siginfo_t __user * info)3497 static inline char __user *si_expansion(const siginfo_t __user *info)
3498 {
3499 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3500 }
3501 
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3502 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3503 {
3504 	char __user *expansion = si_expansion(to);
3505 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3506 		return -EFAULT;
3507 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3508 		return -EFAULT;
3509 	return 0;
3510 }
3511 
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3512 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3513 				       const siginfo_t __user *from)
3514 {
3515 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3516 		char __user *expansion = si_expansion(from);
3517 		char buf[SI_EXPANSION_SIZE];
3518 		int i;
3519 		/*
3520 		 * An unknown si_code might need more than
3521 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3522 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3523 		 * will return this data to userspace exactly.
3524 		 */
3525 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3526 			return -EFAULT;
3527 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3528 			if (buf[i] != 0)
3529 				return -E2BIG;
3530 		}
3531 	}
3532 	return 0;
3533 }
3534 
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3535 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3536 				    const siginfo_t __user *from)
3537 {
3538 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3539 		return -EFAULT;
3540 	to->si_signo = signo;
3541 	return post_copy_siginfo_from_user(to, from);
3542 }
3543 
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3544 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3545 {
3546 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3547 		return -EFAULT;
3548 	return post_copy_siginfo_from_user(to, from);
3549 }
3550 
3551 #ifdef CONFIG_COMPAT
3552 /**
3553  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3554  * @to: compat siginfo destination
3555  * @from: kernel siginfo source
3556  *
3557  * Note: This function does not work properly for the SIGCHLD on x32, but
3558  * fortunately it doesn't have to.  The only valid callers for this function are
3559  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3560  * The latter does not care because SIGCHLD will never cause a coredump.
3561  */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3562 void copy_siginfo_to_external32(struct compat_siginfo *to,
3563 		const struct kernel_siginfo *from)
3564 {
3565 	memset(to, 0, sizeof(*to));
3566 
3567 	to->si_signo = from->si_signo;
3568 	to->si_errno = from->si_errno;
3569 	to->si_code  = from->si_code;
3570 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3571 	case SIL_KILL:
3572 		to->si_pid = from->si_pid;
3573 		to->si_uid = from->si_uid;
3574 		break;
3575 	case SIL_TIMER:
3576 		to->si_tid     = from->si_tid;
3577 		to->si_overrun = from->si_overrun;
3578 		to->si_int     = from->si_int;
3579 		break;
3580 	case SIL_POLL:
3581 		to->si_band = from->si_band;
3582 		to->si_fd   = from->si_fd;
3583 		break;
3584 	case SIL_FAULT:
3585 		to->si_addr = ptr_to_compat(from->si_addr);
3586 		break;
3587 	case SIL_FAULT_TRAPNO:
3588 		to->si_addr = ptr_to_compat(from->si_addr);
3589 		to->si_trapno = from->si_trapno;
3590 		break;
3591 	case SIL_FAULT_MCEERR:
3592 		to->si_addr = ptr_to_compat(from->si_addr);
3593 		to->si_addr_lsb = from->si_addr_lsb;
3594 		break;
3595 	case SIL_FAULT_BNDERR:
3596 		to->si_addr = ptr_to_compat(from->si_addr);
3597 		to->si_lower = ptr_to_compat(from->si_lower);
3598 		to->si_upper = ptr_to_compat(from->si_upper);
3599 		break;
3600 	case SIL_FAULT_PKUERR:
3601 		to->si_addr = ptr_to_compat(from->si_addr);
3602 		to->si_pkey = from->si_pkey;
3603 		break;
3604 	case SIL_FAULT_PERF_EVENT:
3605 		to->si_addr = ptr_to_compat(from->si_addr);
3606 		to->si_perf_data = from->si_perf_data;
3607 		to->si_perf_type = from->si_perf_type;
3608 		to->si_perf_flags = from->si_perf_flags;
3609 		break;
3610 	case SIL_CHLD:
3611 		to->si_pid = from->si_pid;
3612 		to->si_uid = from->si_uid;
3613 		to->si_status = from->si_status;
3614 		to->si_utime = from->si_utime;
3615 		to->si_stime = from->si_stime;
3616 		break;
3617 	case SIL_RT:
3618 		to->si_pid = from->si_pid;
3619 		to->si_uid = from->si_uid;
3620 		to->si_int = from->si_int;
3621 		break;
3622 	case SIL_SYS:
3623 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3624 		to->si_syscall   = from->si_syscall;
3625 		to->si_arch      = from->si_arch;
3626 		break;
3627 	}
3628 }
3629 
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3630 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3631 			   const struct kernel_siginfo *from)
3632 {
3633 	struct compat_siginfo new;
3634 
3635 	copy_siginfo_to_external32(&new, from);
3636 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3637 		return -EFAULT;
3638 	return 0;
3639 }
3640 
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3641 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3642 					 const struct compat_siginfo *from)
3643 {
3644 	clear_siginfo(to);
3645 	to->si_signo = from->si_signo;
3646 	to->si_errno = from->si_errno;
3647 	to->si_code  = from->si_code;
3648 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3649 	case SIL_KILL:
3650 		to->si_pid = from->si_pid;
3651 		to->si_uid = from->si_uid;
3652 		break;
3653 	case SIL_TIMER:
3654 		to->si_tid     = from->si_tid;
3655 		to->si_overrun = from->si_overrun;
3656 		to->si_int     = from->si_int;
3657 		break;
3658 	case SIL_POLL:
3659 		to->si_band = from->si_band;
3660 		to->si_fd   = from->si_fd;
3661 		break;
3662 	case SIL_FAULT:
3663 		to->si_addr = compat_ptr(from->si_addr);
3664 		break;
3665 	case SIL_FAULT_TRAPNO:
3666 		to->si_addr = compat_ptr(from->si_addr);
3667 		to->si_trapno = from->si_trapno;
3668 		break;
3669 	case SIL_FAULT_MCEERR:
3670 		to->si_addr = compat_ptr(from->si_addr);
3671 		to->si_addr_lsb = from->si_addr_lsb;
3672 		break;
3673 	case SIL_FAULT_BNDERR:
3674 		to->si_addr = compat_ptr(from->si_addr);
3675 		to->si_lower = compat_ptr(from->si_lower);
3676 		to->si_upper = compat_ptr(from->si_upper);
3677 		break;
3678 	case SIL_FAULT_PKUERR:
3679 		to->si_addr = compat_ptr(from->si_addr);
3680 		to->si_pkey = from->si_pkey;
3681 		break;
3682 	case SIL_FAULT_PERF_EVENT:
3683 		to->si_addr = compat_ptr(from->si_addr);
3684 		to->si_perf_data = from->si_perf_data;
3685 		to->si_perf_type = from->si_perf_type;
3686 		to->si_perf_flags = from->si_perf_flags;
3687 		break;
3688 	case SIL_CHLD:
3689 		to->si_pid    = from->si_pid;
3690 		to->si_uid    = from->si_uid;
3691 		to->si_status = from->si_status;
3692 #ifdef CONFIG_X86_X32_ABI
3693 		if (in_x32_syscall()) {
3694 			to->si_utime = from->_sifields._sigchld_x32._utime;
3695 			to->si_stime = from->_sifields._sigchld_x32._stime;
3696 		} else
3697 #endif
3698 		{
3699 			to->si_utime = from->si_utime;
3700 			to->si_stime = from->si_stime;
3701 		}
3702 		break;
3703 	case SIL_RT:
3704 		to->si_pid = from->si_pid;
3705 		to->si_uid = from->si_uid;
3706 		to->si_int = from->si_int;
3707 		break;
3708 	case SIL_SYS:
3709 		to->si_call_addr = compat_ptr(from->si_call_addr);
3710 		to->si_syscall   = from->si_syscall;
3711 		to->si_arch      = from->si_arch;
3712 		break;
3713 	}
3714 	return 0;
3715 }
3716 
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3717 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3718 				      const struct compat_siginfo __user *ufrom)
3719 {
3720 	struct compat_siginfo from;
3721 
3722 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3723 		return -EFAULT;
3724 
3725 	from.si_signo = signo;
3726 	return post_copy_siginfo_from_user32(to, &from);
3727 }
3728 
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3729 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3730 			     const struct compat_siginfo __user *ufrom)
3731 {
3732 	struct compat_siginfo from;
3733 
3734 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3735 		return -EFAULT;
3736 
3737 	return post_copy_siginfo_from_user32(to, &from);
3738 }
3739 #endif /* CONFIG_COMPAT */
3740 
3741 /**
3742  *  do_sigtimedwait - wait for queued signals specified in @which
3743  *  @which: queued signals to wait for
3744  *  @info: if non-null, the signal's siginfo is returned here
3745  *  @ts: upper bound on process time suspension
3746  */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3747 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3748 		    const struct timespec64 *ts)
3749 {
3750 	ktime_t *to = NULL, timeout = KTIME_MAX;
3751 	struct task_struct *tsk = current;
3752 	sigset_t mask = *which;
3753 	enum pid_type type;
3754 	int sig, ret = 0;
3755 
3756 	if (ts) {
3757 		if (!timespec64_valid(ts))
3758 			return -EINVAL;
3759 		timeout = timespec64_to_ktime(*ts);
3760 		to = &timeout;
3761 	}
3762 
3763 	/*
3764 	 * Invert the set of allowed signals to get those we want to block.
3765 	 */
3766 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3767 	signotset(&mask);
3768 
3769 	spin_lock_irq(&tsk->sighand->siglock);
3770 	sig = dequeue_signal(&mask, info, &type);
3771 	if (!sig && timeout) {
3772 		/*
3773 		 * None ready, temporarily unblock those we're interested
3774 		 * while we are sleeping in so that we'll be awakened when
3775 		 * they arrive. Unblocking is always fine, we can avoid
3776 		 * set_current_blocked().
3777 		 */
3778 		tsk->real_blocked = tsk->blocked;
3779 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3780 		recalc_sigpending();
3781 		spin_unlock_irq(&tsk->sighand->siglock);
3782 
3783 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3784 		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3785 					       HRTIMER_MODE_REL);
3786 		spin_lock_irq(&tsk->sighand->siglock);
3787 		__set_task_blocked(tsk, &tsk->real_blocked);
3788 		sigemptyset(&tsk->real_blocked);
3789 		sig = dequeue_signal(&mask, info, &type);
3790 	}
3791 	spin_unlock_irq(&tsk->sighand->siglock);
3792 
3793 	if (sig)
3794 		return sig;
3795 	return ret ? -EINTR : -EAGAIN;
3796 }
3797 
3798 /**
3799  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3800  *			in @uthese
3801  *  @uthese: queued signals to wait for
3802  *  @uinfo: if non-null, the signal's siginfo is returned here
3803  *  @uts: upper bound on process time suspension
3804  *  @sigsetsize: size of sigset_t type
3805  */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3806 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3807 		siginfo_t __user *, uinfo,
3808 		const struct __kernel_timespec __user *, uts,
3809 		size_t, sigsetsize)
3810 {
3811 	sigset_t these;
3812 	struct timespec64 ts;
3813 	kernel_siginfo_t info;
3814 	int ret;
3815 
3816 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3817 	if (sigsetsize != sizeof(sigset_t))
3818 		return -EINVAL;
3819 
3820 	if (copy_from_user(&these, uthese, sizeof(these)))
3821 		return -EFAULT;
3822 
3823 	if (uts) {
3824 		if (get_timespec64(&ts, uts))
3825 			return -EFAULT;
3826 	}
3827 
3828 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3829 
3830 	if (ret > 0 && uinfo) {
3831 		if (copy_siginfo_to_user(uinfo, &info))
3832 			ret = -EFAULT;
3833 	}
3834 
3835 	return ret;
3836 }
3837 
3838 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3839 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3840 		siginfo_t __user *, uinfo,
3841 		const struct old_timespec32 __user *, uts,
3842 		size_t, sigsetsize)
3843 {
3844 	sigset_t these;
3845 	struct timespec64 ts;
3846 	kernel_siginfo_t info;
3847 	int ret;
3848 
3849 	if (sigsetsize != sizeof(sigset_t))
3850 		return -EINVAL;
3851 
3852 	if (copy_from_user(&these, uthese, sizeof(these)))
3853 		return -EFAULT;
3854 
3855 	if (uts) {
3856 		if (get_old_timespec32(&ts, uts))
3857 			return -EFAULT;
3858 	}
3859 
3860 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3861 
3862 	if (ret > 0 && uinfo) {
3863 		if (copy_siginfo_to_user(uinfo, &info))
3864 			ret = -EFAULT;
3865 	}
3866 
3867 	return ret;
3868 }
3869 #endif
3870 
3871 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3872 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3873 		struct compat_siginfo __user *, uinfo,
3874 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3875 {
3876 	sigset_t s;
3877 	struct timespec64 t;
3878 	kernel_siginfo_t info;
3879 	long ret;
3880 
3881 	if (sigsetsize != sizeof(sigset_t))
3882 		return -EINVAL;
3883 
3884 	if (get_compat_sigset(&s, uthese))
3885 		return -EFAULT;
3886 
3887 	if (uts) {
3888 		if (get_timespec64(&t, uts))
3889 			return -EFAULT;
3890 	}
3891 
3892 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3893 
3894 	if (ret > 0 && uinfo) {
3895 		if (copy_siginfo_to_user32(uinfo, &info))
3896 			ret = -EFAULT;
3897 	}
3898 
3899 	return ret;
3900 }
3901 
3902 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3903 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3904 		struct compat_siginfo __user *, uinfo,
3905 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3906 {
3907 	sigset_t s;
3908 	struct timespec64 t;
3909 	kernel_siginfo_t info;
3910 	long ret;
3911 
3912 	if (sigsetsize != sizeof(sigset_t))
3913 		return -EINVAL;
3914 
3915 	if (get_compat_sigset(&s, uthese))
3916 		return -EFAULT;
3917 
3918 	if (uts) {
3919 		if (get_old_timespec32(&t, uts))
3920 			return -EFAULT;
3921 	}
3922 
3923 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3924 
3925 	if (ret > 0 && uinfo) {
3926 		if (copy_siginfo_to_user32(uinfo, &info))
3927 			ret = -EFAULT;
3928 	}
3929 
3930 	return ret;
3931 }
3932 #endif
3933 #endif
3934 
prepare_kill_siginfo(int sig,struct kernel_siginfo * info,enum pid_type type)3935 static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3936 				 enum pid_type type)
3937 {
3938 	clear_siginfo(info);
3939 	info->si_signo = sig;
3940 	info->si_errno = 0;
3941 	info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3942 	info->si_pid = task_tgid_vnr(current);
3943 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3944 }
3945 
3946 /**
3947  *  sys_kill - send a signal to a process
3948  *  @pid: the PID of the process
3949  *  @sig: signal to be sent
3950  */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3951 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3952 {
3953 	struct kernel_siginfo info;
3954 
3955 	prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3956 
3957 	return kill_something_info(sig, &info, pid);
3958 }
3959 
3960 /*
3961  * Verify that the signaler and signalee either are in the same pid namespace
3962  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3963  * namespace.
3964  */
access_pidfd_pidns(struct pid * pid)3965 static bool access_pidfd_pidns(struct pid *pid)
3966 {
3967 	struct pid_namespace *active = task_active_pid_ns(current);
3968 	struct pid_namespace *p = ns_of_pid(pid);
3969 
3970 	for (;;) {
3971 		if (!p)
3972 			return false;
3973 		if (p == active)
3974 			break;
3975 		p = p->parent;
3976 	}
3977 
3978 	return true;
3979 }
3980 
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t __user * info)3981 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3982 		siginfo_t __user *info)
3983 {
3984 #ifdef CONFIG_COMPAT
3985 	/*
3986 	 * Avoid hooking up compat syscalls and instead handle necessary
3987 	 * conversions here. Note, this is a stop-gap measure and should not be
3988 	 * considered a generic solution.
3989 	 */
3990 	if (in_compat_syscall())
3991 		return copy_siginfo_from_user32(
3992 			kinfo, (struct compat_siginfo __user *)info);
3993 #endif
3994 	return copy_siginfo_from_user(kinfo, info);
3995 }
3996 
pidfd_to_pid(const struct file * file)3997 static struct pid *pidfd_to_pid(const struct file *file)
3998 {
3999 	struct pid *pid;
4000 
4001 	pid = pidfd_pid(file);
4002 	if (!IS_ERR(pid))
4003 		return pid;
4004 
4005 	return tgid_pidfd_to_pid(file);
4006 }
4007 
4008 #define PIDFD_SEND_SIGNAL_FLAGS                            \
4009 	(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4010 	 PIDFD_SIGNAL_PROCESS_GROUP)
4011 
do_pidfd_send_signal(struct pid * pid,int sig,enum pid_type type,siginfo_t __user * info,unsigned int flags)4012 static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
4013 				siginfo_t __user *info, unsigned int flags)
4014 {
4015 	kernel_siginfo_t kinfo;
4016 
4017 	switch (flags) {
4018 	case PIDFD_SIGNAL_THREAD:
4019 		type = PIDTYPE_PID;
4020 		break;
4021 	case PIDFD_SIGNAL_THREAD_GROUP:
4022 		type = PIDTYPE_TGID;
4023 		break;
4024 	case PIDFD_SIGNAL_PROCESS_GROUP:
4025 		type = PIDTYPE_PGID;
4026 		break;
4027 	}
4028 
4029 	if (info) {
4030 		int ret;
4031 
4032 		ret = copy_siginfo_from_user_any(&kinfo, info);
4033 		if (unlikely(ret))
4034 			return ret;
4035 
4036 		if (unlikely(sig != kinfo.si_signo))
4037 			return -EINVAL;
4038 
4039 		/* Only allow sending arbitrary signals to yourself. */
4040 		if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4041 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4042 			return -EPERM;
4043 	} else {
4044 		prepare_kill_siginfo(sig, &kinfo, type);
4045 	}
4046 
4047 	if (type == PIDTYPE_PGID)
4048 		return kill_pgrp_info(sig, &kinfo, pid);
4049 
4050 	return kill_pid_info_type(sig, &kinfo, pid, type);
4051 }
4052 
4053 /**
4054  * sys_pidfd_send_signal - Signal a process through a pidfd
4055  * @pidfd:  file descriptor of the process
4056  * @sig:    signal to send
4057  * @info:   signal info
4058  * @flags:  future flags
4059  *
4060  * Send the signal to the thread group or to the individual thread depending
4061  * on PIDFD_THREAD.
4062  * In the future extension to @flags may be used to override the default scope
4063  * of @pidfd.
4064  *
4065  * Return: 0 on success, negative errno on failure
4066  */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)4067 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4068 		siginfo_t __user *, info, unsigned int, flags)
4069 {
4070 	struct pid *pid;
4071 	enum pid_type type;
4072 	int ret;
4073 
4074 	/* Enforce flags be set to 0 until we add an extension. */
4075 	if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4076 		return -EINVAL;
4077 
4078 	/* Ensure that only a single signal scope determining flag is set. */
4079 	if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4080 		return -EINVAL;
4081 
4082 	switch (pidfd) {
4083 	case PIDFD_SELF_THREAD:
4084 		pid = get_task_pid(current, PIDTYPE_PID);
4085 		type = PIDTYPE_PID;
4086 		break;
4087 	case PIDFD_SELF_THREAD_GROUP:
4088 		pid = get_task_pid(current, PIDTYPE_TGID);
4089 		type = PIDTYPE_TGID;
4090 		break;
4091 	default: {
4092 		CLASS(fd, f)(pidfd);
4093 		if (fd_empty(f))
4094 			return -EBADF;
4095 
4096 		/* Is this a pidfd? */
4097 		pid = pidfd_to_pid(fd_file(f));
4098 		if (IS_ERR(pid))
4099 			return PTR_ERR(pid);
4100 
4101 		if (!access_pidfd_pidns(pid))
4102 			return -EINVAL;
4103 
4104 		/* Infer scope from the type of pidfd. */
4105 		if (fd_file(f)->f_flags & PIDFD_THREAD)
4106 			type = PIDTYPE_PID;
4107 		else
4108 			type = PIDTYPE_TGID;
4109 
4110 		return do_pidfd_send_signal(pid, sig, type, info, flags);
4111 	}
4112 	}
4113 
4114 	ret = do_pidfd_send_signal(pid, sig, type, info, flags);
4115 	put_pid(pid);
4116 
4117 	return ret;
4118 }
4119 
4120 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)4121 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4122 {
4123 	struct task_struct *p;
4124 	int error = -ESRCH;
4125 
4126 	rcu_read_lock();
4127 	p = find_task_by_vpid(pid);
4128 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4129 		error = check_kill_permission(sig, info, p);
4130 		/*
4131 		 * The null signal is a permissions and process existence
4132 		 * probe.  No signal is actually delivered.
4133 		 */
4134 		if (!error && sig) {
4135 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4136 			/*
4137 			 * If lock_task_sighand() failed we pretend the task
4138 			 * dies after receiving the signal. The window is tiny,
4139 			 * and the signal is private anyway.
4140 			 */
4141 			if (unlikely(error == -ESRCH))
4142 				error = 0;
4143 		}
4144 	}
4145 	rcu_read_unlock();
4146 
4147 	return error;
4148 }
4149 
do_tkill(pid_t tgid,pid_t pid,int sig)4150 static int do_tkill(pid_t tgid, pid_t pid, int sig)
4151 {
4152 	struct kernel_siginfo info;
4153 
4154 	prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4155 
4156 	return do_send_specific(tgid, pid, sig, &info);
4157 }
4158 
4159 /**
4160  *  sys_tgkill - send signal to one specific thread
4161  *  @tgid: the thread group ID of the thread
4162  *  @pid: the PID of the thread
4163  *  @sig: signal to be sent
4164  *
4165  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
4166  *  exists but it's not belonging to the target process anymore. This
4167  *  method solves the problem of threads exiting and PIDs getting reused.
4168  */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)4169 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4170 {
4171 	/* This is only valid for single tasks */
4172 	if (pid <= 0 || tgid <= 0)
4173 		return -EINVAL;
4174 
4175 	return do_tkill(tgid, pid, sig);
4176 }
4177 
4178 /**
4179  *  sys_tkill - send signal to one specific task
4180  *  @pid: the PID of the task
4181  *  @sig: signal to be sent
4182  *
4183  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4184  */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)4185 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4186 {
4187 	/* This is only valid for single tasks */
4188 	if (pid <= 0)
4189 		return -EINVAL;
4190 
4191 	return do_tkill(0, pid, sig);
4192 }
4193 
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)4194 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4195 {
4196 	/* Not even root can pretend to send signals from the kernel.
4197 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4198 	 */
4199 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4200 	    (task_pid_vnr(current) != pid))
4201 		return -EPERM;
4202 
4203 	/* POSIX.1b doesn't mention process groups.  */
4204 	return kill_proc_info(sig, info, pid);
4205 }
4206 
4207 /**
4208  *  sys_rt_sigqueueinfo - send signal information to a signal
4209  *  @pid: the PID of the thread
4210  *  @sig: signal to be sent
4211  *  @uinfo: signal info to be sent
4212  */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4213 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4214 		siginfo_t __user *, uinfo)
4215 {
4216 	kernel_siginfo_t info;
4217 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4218 	if (unlikely(ret))
4219 		return ret;
4220 	return do_rt_sigqueueinfo(pid, sig, &info);
4221 }
4222 
4223 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4224 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4225 			compat_pid_t, pid,
4226 			int, sig,
4227 			struct compat_siginfo __user *, uinfo)
4228 {
4229 	kernel_siginfo_t info;
4230 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4231 	if (unlikely(ret))
4232 		return ret;
4233 	return do_rt_sigqueueinfo(pid, sig, &info);
4234 }
4235 #endif
4236 
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)4237 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4238 {
4239 	/* This is only valid for single tasks */
4240 	if (pid <= 0 || tgid <= 0)
4241 		return -EINVAL;
4242 
4243 	/* Not even root can pretend to send signals from the kernel.
4244 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4245 	 */
4246 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4247 	    (task_pid_vnr(current) != pid))
4248 		return -EPERM;
4249 
4250 	return do_send_specific(tgid, pid, sig, info);
4251 }
4252 
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4253 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4254 		siginfo_t __user *, uinfo)
4255 {
4256 	kernel_siginfo_t info;
4257 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4258 	if (unlikely(ret))
4259 		return ret;
4260 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4261 }
4262 
4263 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4264 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4265 			compat_pid_t, tgid,
4266 			compat_pid_t, pid,
4267 			int, sig,
4268 			struct compat_siginfo __user *, uinfo)
4269 {
4270 	kernel_siginfo_t info;
4271 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4272 	if (unlikely(ret))
4273 		return ret;
4274 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4275 }
4276 #endif
4277 
4278 /*
4279  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4280  */
kernel_sigaction(int sig,__sighandler_t action)4281 void kernel_sigaction(int sig, __sighandler_t action)
4282 {
4283 	spin_lock_irq(&current->sighand->siglock);
4284 	current->sighand->action[sig - 1].sa.sa_handler = action;
4285 	if (action == SIG_IGN) {
4286 		sigset_t mask;
4287 
4288 		sigemptyset(&mask);
4289 		sigaddset(&mask, sig);
4290 
4291 		flush_sigqueue_mask(current, &mask, &current->signal->shared_pending);
4292 		flush_sigqueue_mask(current, &mask, &current->pending);
4293 		recalc_sigpending();
4294 	}
4295 	spin_unlock_irq(&current->sighand->siglock);
4296 }
4297 EXPORT_SYMBOL(kernel_sigaction);
4298 
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)4299 void __weak sigaction_compat_abi(struct k_sigaction *act,
4300 		struct k_sigaction *oact)
4301 {
4302 }
4303 
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)4304 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4305 {
4306 	struct task_struct *p = current, *t;
4307 	struct k_sigaction *k;
4308 	sigset_t mask;
4309 
4310 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4311 		return -EINVAL;
4312 
4313 	k = &p->sighand->action[sig-1];
4314 
4315 	spin_lock_irq(&p->sighand->siglock);
4316 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4317 		spin_unlock_irq(&p->sighand->siglock);
4318 		return -EINVAL;
4319 	}
4320 	if (oact)
4321 		*oact = *k;
4322 
4323 	/*
4324 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4325 	 * e.g. by having an architecture use the bit in their uapi.
4326 	 */
4327 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4328 
4329 	/*
4330 	 * Clear unknown flag bits in order to allow userspace to detect missing
4331 	 * support for flag bits and to allow the kernel to use non-uapi bits
4332 	 * internally.
4333 	 */
4334 	if (act)
4335 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4336 	if (oact)
4337 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4338 
4339 	sigaction_compat_abi(act, oact);
4340 
4341 	if (act) {
4342 		bool was_ignored = k->sa.sa_handler == SIG_IGN;
4343 
4344 		sigdelsetmask(&act->sa.sa_mask,
4345 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4346 		*k = *act;
4347 		/*
4348 		 * POSIX 3.3.1.3:
4349 		 *  "Setting a signal action to SIG_IGN for a signal that is
4350 		 *   pending shall cause the pending signal to be discarded,
4351 		 *   whether or not it is blocked."
4352 		 *
4353 		 *  "Setting a signal action to SIG_DFL for a signal that is
4354 		 *   pending and whose default action is to ignore the signal
4355 		 *   (for example, SIGCHLD), shall cause the pending signal to
4356 		 *   be discarded, whether or not it is blocked"
4357 		 */
4358 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4359 			sigemptyset(&mask);
4360 			sigaddset(&mask, sig);
4361 			flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4362 			for_each_thread(p, t)
4363 				flush_sigqueue_mask(p, &mask, &t->pending);
4364 		} else if (was_ignored) {
4365 			posixtimer_sig_unignore(p, sig);
4366 		}
4367 	}
4368 
4369 	spin_unlock_irq(&p->sighand->siglock);
4370 	return 0;
4371 }
4372 
4373 #ifdef CONFIG_DYNAMIC_SIGFRAME
sigaltstack_lock(void)4374 static inline void sigaltstack_lock(void)
4375 	__acquires(&current->sighand->siglock)
4376 {
4377 	spin_lock_irq(&current->sighand->siglock);
4378 }
4379 
sigaltstack_unlock(void)4380 static inline void sigaltstack_unlock(void)
4381 	__releases(&current->sighand->siglock)
4382 {
4383 	spin_unlock_irq(&current->sighand->siglock);
4384 }
4385 #else
sigaltstack_lock(void)4386 static inline void sigaltstack_lock(void) { }
sigaltstack_unlock(void)4387 static inline void sigaltstack_unlock(void) { }
4388 #endif
4389 
4390 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4391 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4392 		size_t min_ss_size)
4393 {
4394 	struct task_struct *t = current;
4395 	int ret = 0;
4396 
4397 	if (oss) {
4398 		memset(oss, 0, sizeof(stack_t));
4399 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4400 		oss->ss_size = t->sas_ss_size;
4401 		oss->ss_flags = sas_ss_flags(sp) |
4402 			(current->sas_ss_flags & SS_FLAG_BITS);
4403 	}
4404 
4405 	if (ss) {
4406 		void __user *ss_sp = ss->ss_sp;
4407 		size_t ss_size = ss->ss_size;
4408 		unsigned ss_flags = ss->ss_flags;
4409 		int ss_mode;
4410 
4411 		if (unlikely(on_sig_stack(sp)))
4412 			return -EPERM;
4413 
4414 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4415 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4416 				ss_mode != 0))
4417 			return -EINVAL;
4418 
4419 		/*
4420 		 * Return before taking any locks if no actual
4421 		 * sigaltstack changes were requested.
4422 		 */
4423 		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4424 		    t->sas_ss_size == ss_size &&
4425 		    t->sas_ss_flags == ss_flags)
4426 			return 0;
4427 
4428 		sigaltstack_lock();
4429 		if (ss_mode == SS_DISABLE) {
4430 			ss_size = 0;
4431 			ss_sp = NULL;
4432 		} else {
4433 			if (unlikely(ss_size < min_ss_size))
4434 				ret = -ENOMEM;
4435 			if (!sigaltstack_size_valid(ss_size))
4436 				ret = -ENOMEM;
4437 		}
4438 		if (!ret) {
4439 			t->sas_ss_sp = (unsigned long) ss_sp;
4440 			t->sas_ss_size = ss_size;
4441 			t->sas_ss_flags = ss_flags;
4442 		}
4443 		sigaltstack_unlock();
4444 	}
4445 	return ret;
4446 }
4447 
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4448 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4449 {
4450 	stack_t new, old;
4451 	int err;
4452 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4453 		return -EFAULT;
4454 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4455 			      current_user_stack_pointer(),
4456 			      MINSIGSTKSZ);
4457 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4458 		err = -EFAULT;
4459 	return err;
4460 }
4461 
restore_altstack(const stack_t __user * uss)4462 int restore_altstack(const stack_t __user *uss)
4463 {
4464 	stack_t new;
4465 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4466 		return -EFAULT;
4467 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4468 			     MINSIGSTKSZ);
4469 	/* squash all but EFAULT for now */
4470 	return 0;
4471 }
4472 
__save_altstack(stack_t __user * uss,unsigned long sp)4473 int __save_altstack(stack_t __user *uss, unsigned long sp)
4474 {
4475 	struct task_struct *t = current;
4476 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4477 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4478 		__put_user(t->sas_ss_size, &uss->ss_size);
4479 	return err;
4480 }
4481 
4482 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4483 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4484 				 compat_stack_t __user *uoss_ptr)
4485 {
4486 	stack_t uss, uoss;
4487 	int ret;
4488 
4489 	if (uss_ptr) {
4490 		compat_stack_t uss32;
4491 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4492 			return -EFAULT;
4493 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4494 		uss.ss_flags = uss32.ss_flags;
4495 		uss.ss_size = uss32.ss_size;
4496 	}
4497 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4498 			     compat_user_stack_pointer(),
4499 			     COMPAT_MINSIGSTKSZ);
4500 	if (ret >= 0 && uoss_ptr)  {
4501 		compat_stack_t old;
4502 		memset(&old, 0, sizeof(old));
4503 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4504 		old.ss_flags = uoss.ss_flags;
4505 		old.ss_size = uoss.ss_size;
4506 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4507 			ret = -EFAULT;
4508 	}
4509 	return ret;
4510 }
4511 
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4512 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4513 			const compat_stack_t __user *, uss_ptr,
4514 			compat_stack_t __user *, uoss_ptr)
4515 {
4516 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4517 }
4518 
compat_restore_altstack(const compat_stack_t __user * uss)4519 int compat_restore_altstack(const compat_stack_t __user *uss)
4520 {
4521 	int err = do_compat_sigaltstack(uss, NULL);
4522 	/* squash all but -EFAULT for now */
4523 	return err == -EFAULT ? err : 0;
4524 }
4525 
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4526 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4527 {
4528 	int err;
4529 	struct task_struct *t = current;
4530 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4531 			 &uss->ss_sp) |
4532 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4533 		__put_user(t->sas_ss_size, &uss->ss_size);
4534 	return err;
4535 }
4536 #endif
4537 
4538 #ifdef __ARCH_WANT_SYS_SIGPENDING
4539 
4540 /**
4541  *  sys_sigpending - examine pending signals
4542  *  @uset: where mask of pending signal is returned
4543  */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4544 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4545 {
4546 	sigset_t set;
4547 
4548 	if (sizeof(old_sigset_t) > sizeof(*uset))
4549 		return -EINVAL;
4550 
4551 	do_sigpending(&set);
4552 
4553 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4554 		return -EFAULT;
4555 
4556 	return 0;
4557 }
4558 
4559 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4560 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4561 {
4562 	sigset_t set;
4563 
4564 	do_sigpending(&set);
4565 
4566 	return put_user(set.sig[0], set32);
4567 }
4568 #endif
4569 
4570 #endif
4571 
4572 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4573 /**
4574  *  sys_sigprocmask - examine and change blocked signals
4575  *  @how: whether to add, remove, or set signals
4576  *  @nset: signals to add or remove (if non-null)
4577  *  @oset: previous value of signal mask if non-null
4578  *
4579  * Some platforms have their own version with special arguments;
4580  * others support only sys_rt_sigprocmask.
4581  */
4582 
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4583 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4584 		old_sigset_t __user *, oset)
4585 {
4586 	old_sigset_t old_set, new_set;
4587 	sigset_t new_blocked;
4588 
4589 	old_set = current->blocked.sig[0];
4590 
4591 	if (nset) {
4592 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4593 			return -EFAULT;
4594 
4595 		new_blocked = current->blocked;
4596 
4597 		switch (how) {
4598 		case SIG_BLOCK:
4599 			sigaddsetmask(&new_blocked, new_set);
4600 			break;
4601 		case SIG_UNBLOCK:
4602 			sigdelsetmask(&new_blocked, new_set);
4603 			break;
4604 		case SIG_SETMASK:
4605 			new_blocked.sig[0] = new_set;
4606 			break;
4607 		default:
4608 			return -EINVAL;
4609 		}
4610 
4611 		set_current_blocked(&new_blocked);
4612 	}
4613 
4614 	if (oset) {
4615 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4616 			return -EFAULT;
4617 	}
4618 
4619 	return 0;
4620 }
4621 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4622 
4623 #ifndef CONFIG_ODD_RT_SIGACTION
4624 /**
4625  *  sys_rt_sigaction - alter an action taken by a process
4626  *  @sig: signal to be sent
4627  *  @act: new sigaction
4628  *  @oact: used to save the previous sigaction
4629  *  @sigsetsize: size of sigset_t type
4630  */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4631 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4632 		const struct sigaction __user *, act,
4633 		struct sigaction __user *, oact,
4634 		size_t, sigsetsize)
4635 {
4636 	struct k_sigaction new_sa, old_sa;
4637 	int ret;
4638 
4639 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4640 	if (sigsetsize != sizeof(sigset_t))
4641 		return -EINVAL;
4642 
4643 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4644 		return -EFAULT;
4645 
4646 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4647 	if (ret)
4648 		return ret;
4649 
4650 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4651 		return -EFAULT;
4652 
4653 	return 0;
4654 }
4655 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4656 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4657 		const struct compat_sigaction __user *, act,
4658 		struct compat_sigaction __user *, oact,
4659 		compat_size_t, sigsetsize)
4660 {
4661 	struct k_sigaction new_ka, old_ka;
4662 #ifdef __ARCH_HAS_SA_RESTORER
4663 	compat_uptr_t restorer;
4664 #endif
4665 	int ret;
4666 
4667 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4668 	if (sigsetsize != sizeof(compat_sigset_t))
4669 		return -EINVAL;
4670 
4671 	if (act) {
4672 		compat_uptr_t handler;
4673 		ret = get_user(handler, &act->sa_handler);
4674 		new_ka.sa.sa_handler = compat_ptr(handler);
4675 #ifdef __ARCH_HAS_SA_RESTORER
4676 		ret |= get_user(restorer, &act->sa_restorer);
4677 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4678 #endif
4679 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4680 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4681 		if (ret)
4682 			return -EFAULT;
4683 	}
4684 
4685 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4686 	if (!ret && oact) {
4687 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4688 			       &oact->sa_handler);
4689 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4690 					 sizeof(oact->sa_mask));
4691 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4692 #ifdef __ARCH_HAS_SA_RESTORER
4693 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4694 				&oact->sa_restorer);
4695 #endif
4696 	}
4697 	return ret;
4698 }
4699 #endif
4700 #endif /* !CONFIG_ODD_RT_SIGACTION */
4701 
4702 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4703 SYSCALL_DEFINE3(sigaction, int, sig,
4704 		const struct old_sigaction __user *, act,
4705 	        struct old_sigaction __user *, oact)
4706 {
4707 	struct k_sigaction new_ka, old_ka;
4708 	int ret;
4709 
4710 	if (act) {
4711 		old_sigset_t mask;
4712 		if (!access_ok(act, sizeof(*act)) ||
4713 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4714 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4715 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4716 		    __get_user(mask, &act->sa_mask))
4717 			return -EFAULT;
4718 #ifdef __ARCH_HAS_KA_RESTORER
4719 		new_ka.ka_restorer = NULL;
4720 #endif
4721 		siginitset(&new_ka.sa.sa_mask, mask);
4722 	}
4723 
4724 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4725 
4726 	if (!ret && oact) {
4727 		if (!access_ok(oact, sizeof(*oact)) ||
4728 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4729 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4730 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4731 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4732 			return -EFAULT;
4733 	}
4734 
4735 	return ret;
4736 }
4737 #endif
4738 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4739 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4740 		const struct compat_old_sigaction __user *, act,
4741 	        struct compat_old_sigaction __user *, oact)
4742 {
4743 	struct k_sigaction new_ka, old_ka;
4744 	int ret;
4745 	compat_old_sigset_t mask;
4746 	compat_uptr_t handler, restorer;
4747 
4748 	if (act) {
4749 		if (!access_ok(act, sizeof(*act)) ||
4750 		    __get_user(handler, &act->sa_handler) ||
4751 		    __get_user(restorer, &act->sa_restorer) ||
4752 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4753 		    __get_user(mask, &act->sa_mask))
4754 			return -EFAULT;
4755 
4756 #ifdef __ARCH_HAS_KA_RESTORER
4757 		new_ka.ka_restorer = NULL;
4758 #endif
4759 		new_ka.sa.sa_handler = compat_ptr(handler);
4760 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4761 		siginitset(&new_ka.sa.sa_mask, mask);
4762 	}
4763 
4764 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4765 
4766 	if (!ret && oact) {
4767 		if (!access_ok(oact, sizeof(*oact)) ||
4768 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4769 			       &oact->sa_handler) ||
4770 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4771 			       &oact->sa_restorer) ||
4772 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4773 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4774 			return -EFAULT;
4775 	}
4776 	return ret;
4777 }
4778 #endif
4779 
4780 #ifdef CONFIG_SGETMASK_SYSCALL
4781 
4782 /*
4783  * For backwards compatibility.  Functionality superseded by sigprocmask.
4784  */
SYSCALL_DEFINE0(sgetmask)4785 SYSCALL_DEFINE0(sgetmask)
4786 {
4787 	/* SMP safe */
4788 	return current->blocked.sig[0];
4789 }
4790 
SYSCALL_DEFINE1(ssetmask,int,newmask)4791 SYSCALL_DEFINE1(ssetmask, int, newmask)
4792 {
4793 	int old = current->blocked.sig[0];
4794 	sigset_t newset;
4795 
4796 	siginitset(&newset, newmask);
4797 	set_current_blocked(&newset);
4798 
4799 	return old;
4800 }
4801 #endif /* CONFIG_SGETMASK_SYSCALL */
4802 
4803 #ifdef __ARCH_WANT_SYS_SIGNAL
4804 /*
4805  * For backwards compatibility.  Functionality superseded by sigaction.
4806  */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4807 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4808 {
4809 	struct k_sigaction new_sa, old_sa;
4810 	int ret;
4811 
4812 	new_sa.sa.sa_handler = handler;
4813 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4814 	sigemptyset(&new_sa.sa.sa_mask);
4815 
4816 	ret = do_sigaction(sig, &new_sa, &old_sa);
4817 
4818 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4819 }
4820 #endif /* __ARCH_WANT_SYS_SIGNAL */
4821 
4822 #ifdef __ARCH_WANT_SYS_PAUSE
4823 
SYSCALL_DEFINE0(pause)4824 SYSCALL_DEFINE0(pause)
4825 {
4826 	while (!signal_pending(current)) {
4827 		__set_current_state(TASK_INTERRUPTIBLE);
4828 		schedule();
4829 	}
4830 	return -ERESTARTNOHAND;
4831 }
4832 
4833 #endif
4834 
sigsuspend(sigset_t * set)4835 static int sigsuspend(sigset_t *set)
4836 {
4837 	current->saved_sigmask = current->blocked;
4838 	set_current_blocked(set);
4839 
4840 	while (!signal_pending(current)) {
4841 		__set_current_state(TASK_INTERRUPTIBLE);
4842 		schedule();
4843 	}
4844 	set_restore_sigmask();
4845 	return -ERESTARTNOHAND;
4846 }
4847 
4848 /**
4849  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4850  *	@unewset value until a signal is received
4851  *  @unewset: new signal mask value
4852  *  @sigsetsize: size of sigset_t type
4853  */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4854 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4855 {
4856 	sigset_t newset;
4857 
4858 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4859 	if (sigsetsize != sizeof(sigset_t))
4860 		return -EINVAL;
4861 
4862 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4863 		return -EFAULT;
4864 	return sigsuspend(&newset);
4865 }
4866 
4867 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4868 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4869 {
4870 	sigset_t newset;
4871 
4872 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4873 	if (sigsetsize != sizeof(sigset_t))
4874 		return -EINVAL;
4875 
4876 	if (get_compat_sigset(&newset, unewset))
4877 		return -EFAULT;
4878 	return sigsuspend(&newset);
4879 }
4880 #endif
4881 
4882 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4883 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4884 {
4885 	sigset_t blocked;
4886 	siginitset(&blocked, mask);
4887 	return sigsuspend(&blocked);
4888 }
4889 #endif
4890 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4891 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4892 {
4893 	sigset_t blocked;
4894 	siginitset(&blocked, mask);
4895 	return sigsuspend(&blocked);
4896 }
4897 #endif
4898 
arch_vma_name(struct vm_area_struct * vma)4899 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4900 {
4901 	return NULL;
4902 }
4903 
siginfo_buildtime_checks(void)4904 static inline void siginfo_buildtime_checks(void)
4905 {
4906 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4907 
4908 	/* Verify the offsets in the two siginfos match */
4909 #define CHECK_OFFSET(field) \
4910 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4911 
4912 	/* kill */
4913 	CHECK_OFFSET(si_pid);
4914 	CHECK_OFFSET(si_uid);
4915 
4916 	/* timer */
4917 	CHECK_OFFSET(si_tid);
4918 	CHECK_OFFSET(si_overrun);
4919 	CHECK_OFFSET(si_value);
4920 
4921 	/* rt */
4922 	CHECK_OFFSET(si_pid);
4923 	CHECK_OFFSET(si_uid);
4924 	CHECK_OFFSET(si_value);
4925 
4926 	/* sigchld */
4927 	CHECK_OFFSET(si_pid);
4928 	CHECK_OFFSET(si_uid);
4929 	CHECK_OFFSET(si_status);
4930 	CHECK_OFFSET(si_utime);
4931 	CHECK_OFFSET(si_stime);
4932 
4933 	/* sigfault */
4934 	CHECK_OFFSET(si_addr);
4935 	CHECK_OFFSET(si_trapno);
4936 	CHECK_OFFSET(si_addr_lsb);
4937 	CHECK_OFFSET(si_lower);
4938 	CHECK_OFFSET(si_upper);
4939 	CHECK_OFFSET(si_pkey);
4940 	CHECK_OFFSET(si_perf_data);
4941 	CHECK_OFFSET(si_perf_type);
4942 	CHECK_OFFSET(si_perf_flags);
4943 
4944 	/* sigpoll */
4945 	CHECK_OFFSET(si_band);
4946 	CHECK_OFFSET(si_fd);
4947 
4948 	/* sigsys */
4949 	CHECK_OFFSET(si_call_addr);
4950 	CHECK_OFFSET(si_syscall);
4951 	CHECK_OFFSET(si_arch);
4952 #undef CHECK_OFFSET
4953 
4954 	/* usb asyncio */
4955 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4956 		     offsetof(struct siginfo, si_addr));
4957 	if (sizeof(int) == sizeof(void __user *)) {
4958 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4959 			     sizeof(void __user *));
4960 	} else {
4961 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4962 			      sizeof_field(struct siginfo, si_uid)) !=
4963 			     sizeof(void __user *));
4964 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4965 			     offsetof(struct siginfo, si_uid));
4966 	}
4967 #ifdef CONFIG_COMPAT
4968 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4969 		     offsetof(struct compat_siginfo, si_addr));
4970 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4971 		     sizeof(compat_uptr_t));
4972 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4973 		     sizeof_field(struct siginfo, si_pid));
4974 #endif
4975 }
4976 
4977 #if defined(CONFIG_SYSCTL)
4978 static const struct ctl_table signal_debug_table[] = {
4979 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4980 	{
4981 		.procname	= "exception-trace",
4982 		.data		= &show_unhandled_signals,
4983 		.maxlen		= sizeof(int),
4984 		.mode		= 0644,
4985 		.proc_handler	= proc_dointvec
4986 	},
4987 #endif
4988 };
4989 
4990 static const struct ctl_table signal_table[] = {
4991 	{
4992 		.procname	= "print-fatal-signals",
4993 		.data		= &print_fatal_signals,
4994 		.maxlen		= sizeof(int),
4995 		.mode		= 0644,
4996 		.proc_handler	= proc_dointvec,
4997 	},
4998 };
4999 
init_signal_sysctls(void)5000 static int __init init_signal_sysctls(void)
5001 {
5002 	register_sysctl_init("debug", signal_debug_table);
5003 	register_sysctl_init("kernel", signal_table);
5004 	return 0;
5005 }
5006 early_initcall(init_signal_sysctls);
5007 #endif /* CONFIG_SYSCTL */
5008 
signals_init(void)5009 void __init signals_init(void)
5010 {
5011 	siginfo_buildtime_checks();
5012 
5013 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
5014 }
5015 
5016 #ifdef CONFIG_KGDB_KDB
5017 #include <linux/kdb.h>
5018 /*
5019  * kdb_send_sig - Allows kdb to send signals without exposing
5020  * signal internals.  This function checks if the required locks are
5021  * available before calling the main signal code, to avoid kdb
5022  * deadlocks.
5023  */
kdb_send_sig(struct task_struct * t,int sig)5024 void kdb_send_sig(struct task_struct *t, int sig)
5025 {
5026 	static struct task_struct *kdb_prev_t;
5027 	int new_t, ret;
5028 	if (!spin_trylock(&t->sighand->siglock)) {
5029 		kdb_printf("Can't do kill command now.\n"
5030 			   "The sigmask lock is held somewhere else in "
5031 			   "kernel, try again later\n");
5032 		return;
5033 	}
5034 	new_t = kdb_prev_t != t;
5035 	kdb_prev_t = t;
5036 	if (!task_is_running(t) && new_t) {
5037 		spin_unlock(&t->sighand->siglock);
5038 		kdb_printf("Process is not RUNNING, sending a signal from "
5039 			   "kdb risks deadlock\n"
5040 			   "on the run queue locks. "
5041 			   "The signal has _not_ been sent.\n"
5042 			   "Reissue the kill command if you want to risk "
5043 			   "the deadlock.\n");
5044 		return;
5045 	}
5046 	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5047 	spin_unlock(&t->sighand->siglock);
5048 	if (ret)
5049 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
5050 			   sig, t->pid);
5051 	else
5052 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5053 }
5054 #endif	/* CONFIG_KGDB_KDB */
5055