xref: /linux/kernel/signal.c (revision 5c0f43e8535d619ff32400e2e916075109fc7a56)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 #include <uapi/linux/pidfd.h>
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/signal.h>
54 
55 #include <asm/param.h>
56 #include <linux/uaccess.h>
57 #include <asm/unistd.h>
58 #include <asm/siginfo.h>
59 #include <asm/cacheflush.h>
60 #include <asm/syscall.h>	/* for syscall_get_* */
61 
62 #include "time/posix-timers.h"
63 
64 /*
65  * SLAB caches for signal bits.
66  */
67 
68 static struct kmem_cache *sigqueue_cachep;
69 
70 int print_fatal_signals __read_mostly;
71 
72 static void __user *sig_handler(struct task_struct *t, int sig)
73 {
74 	return t->sighand->action[sig - 1].sa.sa_handler;
75 }
76 
77 static inline bool sig_handler_ignored(void __user *handler, int sig)
78 {
79 	/* Is it explicitly or implicitly ignored? */
80 	return handler == SIG_IGN ||
81 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
82 }
83 
84 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 {
86 	void __user *handler;
87 
88 	handler = sig_handler(t, sig);
89 
90 	/* SIGKILL and SIGSTOP may not be sent to the global init */
91 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 		return true;
93 
94 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 		return true;
97 
98 	/* Only allow kernel generated signals to this kthread */
99 	if (unlikely((t->flags & PF_KTHREAD) &&
100 		     (handler == SIG_KTHREAD_KERNEL) && !force))
101 		return true;
102 
103 	return sig_handler_ignored(handler, sig);
104 }
105 
106 static bool sig_ignored(struct task_struct *t, int sig, bool force)
107 {
108 	/*
109 	 * Blocked signals are never ignored, since the
110 	 * signal handler may change by the time it is
111 	 * unblocked.
112 	 */
113 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 		return false;
115 
116 	/*
117 	 * Tracers may want to know about even ignored signal unless it
118 	 * is SIGKILL which can't be reported anyway but can be ignored
119 	 * by SIGNAL_UNKILLABLE task.
120 	 */
121 	if (t->ptrace && sig != SIGKILL)
122 		return false;
123 
124 	return sig_task_ignored(t, sig, force);
125 }
126 
127 /*
128  * Re-calculate pending state from the set of locally pending
129  * signals, globally pending signals, and blocked signals.
130  */
131 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 {
133 	unsigned long ready;
134 	long i;
135 
136 	switch (_NSIG_WORDS) {
137 	default:
138 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 			ready |= signal->sig[i] &~ blocked->sig[i];
140 		break;
141 
142 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
143 		ready |= signal->sig[2] &~ blocked->sig[2];
144 		ready |= signal->sig[1] &~ blocked->sig[1];
145 		ready |= signal->sig[0] &~ blocked->sig[0];
146 		break;
147 
148 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
149 		ready |= signal->sig[0] &~ blocked->sig[0];
150 		break;
151 
152 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
153 	}
154 	return ready !=	0;
155 }
156 
157 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158 
159 static bool recalc_sigpending_tsk(struct task_struct *t)
160 {
161 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 	    PENDING(&t->pending, &t->blocked) ||
163 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
164 	    cgroup_task_frozen(t)) {
165 		set_tsk_thread_flag(t, TIF_SIGPENDING);
166 		return true;
167 	}
168 
169 	/*
170 	 * We must never clear the flag in another thread, or in current
171 	 * when it's possible the current syscall is returning -ERESTART*.
172 	 * So we don't clear it here, and only callers who know they should do.
173 	 */
174 	return false;
175 }
176 
177 void recalc_sigpending(void)
178 {
179 	if (!recalc_sigpending_tsk(current) && !freezing(current)) {
180 		if (unlikely(test_thread_flag(TIF_SIGPENDING)))
181 			clear_thread_flag(TIF_SIGPENDING);
182 	}
183 }
184 EXPORT_SYMBOL(recalc_sigpending);
185 
186 void calculate_sigpending(void)
187 {
188 	/* Have any signals or users of TIF_SIGPENDING been delayed
189 	 * until after fork?
190 	 */
191 	spin_lock_irq(&current->sighand->siglock);
192 	set_tsk_thread_flag(current, TIF_SIGPENDING);
193 	recalc_sigpending();
194 	spin_unlock_irq(&current->sighand->siglock);
195 }
196 
197 /* Given the mask, find the first available signal that should be serviced. */
198 
199 #define SYNCHRONOUS_MASK \
200 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 
203 int next_signal(struct sigpending *pending, sigset_t *mask)
204 {
205 	unsigned long i, *s, *m, x;
206 	int sig = 0;
207 
208 	s = pending->signal.sig;
209 	m = mask->sig;
210 
211 	/*
212 	 * Handle the first word specially: it contains the
213 	 * synchronous signals that need to be dequeued first.
214 	 */
215 	x = *s &~ *m;
216 	if (x) {
217 		if (x & SYNCHRONOUS_MASK)
218 			x &= SYNCHRONOUS_MASK;
219 		sig = ffz(~x) + 1;
220 		return sig;
221 	}
222 
223 	switch (_NSIG_WORDS) {
224 	default:
225 		for (i = 1; i < _NSIG_WORDS; ++i) {
226 			x = *++s &~ *++m;
227 			if (!x)
228 				continue;
229 			sig = ffz(~x) + i*_NSIG_BPW + 1;
230 			break;
231 		}
232 		break;
233 
234 	case 2:
235 		x = s[1] &~ m[1];
236 		if (!x)
237 			break;
238 		sig = ffz(~x) + _NSIG_BPW + 1;
239 		break;
240 
241 	case 1:
242 		/* Nothing to do */
243 		break;
244 	}
245 
246 	return sig;
247 }
248 
249 static inline void print_dropped_signal(int sig)
250 {
251 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 
253 	if (!print_fatal_signals)
254 		return;
255 
256 	if (!__ratelimit(&ratelimit_state))
257 		return;
258 
259 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 				current->comm, current->pid, sig);
261 }
262 
263 /**
264  * task_set_jobctl_pending - set jobctl pending bits
265  * @task: target task
266  * @mask: pending bits to set
267  *
268  * Clear @mask from @task->jobctl.  @mask must be subset of
269  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
270  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
271  * cleared.  If @task is already being killed or exiting, this function
272  * becomes noop.
273  *
274  * CONTEXT:
275  * Must be called with @task->sighand->siglock held.
276  *
277  * RETURNS:
278  * %true if @mask is set, %false if made noop because @task was dying.
279  */
280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 {
282 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
283 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
284 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 
286 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
287 		return false;
288 
289 	if (mask & JOBCTL_STOP_SIGMASK)
290 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 
292 	task->jobctl |= mask;
293 	return true;
294 }
295 
296 /**
297  * task_clear_jobctl_trapping - clear jobctl trapping bit
298  * @task: target task
299  *
300  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
301  * Clear it and wake up the ptracer.  Note that we don't need any further
302  * locking.  @task->siglock guarantees that @task->parent points to the
303  * ptracer.
304  *
305  * CONTEXT:
306  * Must be called with @task->sighand->siglock held.
307  */
308 void task_clear_jobctl_trapping(struct task_struct *task)
309 {
310 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
311 		task->jobctl &= ~JOBCTL_TRAPPING;
312 		smp_mb();	/* advised by wake_up_bit() */
313 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 	}
315 }
316 
317 /**
318  * task_clear_jobctl_pending - clear jobctl pending bits
319  * @task: target task
320  * @mask: pending bits to clear
321  *
322  * Clear @mask from @task->jobctl.  @mask must be subset of
323  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
324  * STOP bits are cleared together.
325  *
326  * If clearing of @mask leaves no stop or trap pending, this function calls
327  * task_clear_jobctl_trapping().
328  *
329  * CONTEXT:
330  * Must be called with @task->sighand->siglock held.
331  */
332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 {
334 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 
336 	if (mask & JOBCTL_STOP_PENDING)
337 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 
339 	task->jobctl &= ~mask;
340 
341 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
342 		task_clear_jobctl_trapping(task);
343 }
344 
345 /**
346  * task_participate_group_stop - participate in a group stop
347  * @task: task participating in a group stop
348  *
349  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
350  * Group stop states are cleared and the group stop count is consumed if
351  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
352  * stop, the appropriate `SIGNAL_*` flags are set.
353  *
354  * CONTEXT:
355  * Must be called with @task->sighand->siglock held.
356  *
357  * RETURNS:
358  * %true if group stop completion should be notified to the parent, %false
359  * otherwise.
360  */
361 static bool task_participate_group_stop(struct task_struct *task)
362 {
363 	struct signal_struct *sig = task->signal;
364 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 
366 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 
368 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369 
370 	if (!consume)
371 		return false;
372 
373 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
374 		sig->group_stop_count--;
375 
376 	/*
377 	 * Tell the caller to notify completion iff we are entering into a
378 	 * fresh group stop.  Read comment in do_signal_stop() for details.
379 	 */
380 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
381 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
382 		return true;
383 	}
384 	return false;
385 }
386 
387 void task_join_group_stop(struct task_struct *task)
388 {
389 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
390 	struct signal_struct *sig = current->signal;
391 
392 	if (sig->group_stop_count) {
393 		sig->group_stop_count++;
394 		mask |= JOBCTL_STOP_CONSUME;
395 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
396 		return;
397 
398 	/* Have the new thread join an on-going signal group stop */
399 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
400 }
401 
402 static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
403 				       int override_rlimit)
404 {
405 	struct ucounts *ucounts;
406 	long sigpending;
407 
408 	/*
409 	 * Protect access to @t credentials. This can go away when all
410 	 * callers hold rcu read lock.
411 	 *
412 	 * NOTE! A pending signal will hold on to the user refcount,
413 	 * and we get/put the refcount only when the sigpending count
414 	 * changes from/to zero.
415 	 */
416 	rcu_read_lock();
417 	ucounts = task_ucounts(t);
418 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
419 					    override_rlimit);
420 	rcu_read_unlock();
421 	if (!sigpending)
422 		return NULL;
423 
424 	if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
425 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
426 		print_dropped_signal(sig);
427 		return NULL;
428 	}
429 
430 	return ucounts;
431 }
432 
433 static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
434 			    const unsigned int sigqueue_flags)
435 {
436 	INIT_LIST_HEAD(&q->list);
437 	q->flags = sigqueue_flags;
438 	q->ucounts = ucounts;
439 }
440 
441 /*
442  * allocate a new signal queue record
443  * - this may be called without locks if and only if t == current, otherwise an
444  *   appropriate lock must be held to stop the target task from exiting
445  */
446 static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
447 				       int override_rlimit)
448 {
449 	struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
450 	struct sigqueue *q;
451 
452 	if (!ucounts)
453 		return NULL;
454 
455 	q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
456 	if (!q) {
457 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 		return NULL;
459 	}
460 
461 	__sigqueue_init(q, ucounts, 0);
462 	return q;
463 }
464 
465 static void __sigqueue_free(struct sigqueue *q)
466 {
467 	if (q->flags & SIGQUEUE_PREALLOC) {
468 		posixtimer_sigqueue_putref(q);
469 		return;
470 	}
471 	if (q->ucounts) {
472 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
473 		q->ucounts = NULL;
474 	}
475 	kmem_cache_free(sigqueue_cachep, q);
476 }
477 
478 void flush_sigqueue(struct sigpending *queue)
479 {
480 	struct sigqueue *q;
481 
482 	sigemptyset(&queue->signal);
483 	while (!list_empty(&queue->list)) {
484 		q = list_entry(queue->list.next, struct sigqueue , list);
485 		list_del_init(&q->list);
486 		__sigqueue_free(q);
487 	}
488 }
489 
490 /*
491  * Flush all pending signals for this kthread.
492  */
493 void flush_signals(struct task_struct *t)
494 {
495 	unsigned long flags;
496 
497 	spin_lock_irqsave(&t->sighand->siglock, flags);
498 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
499 	flush_sigqueue(&t->pending);
500 	flush_sigqueue(&t->signal->shared_pending);
501 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
502 }
503 EXPORT_SYMBOL(flush_signals);
504 
505 void ignore_signals(struct task_struct *t)
506 {
507 	int i;
508 
509 	for (i = 0; i < _NSIG; ++i)
510 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
511 
512 	flush_signals(t);
513 }
514 
515 /*
516  * Flush all handlers for a task.
517  */
518 
519 void
520 flush_signal_handlers(struct task_struct *t, int force_default)
521 {
522 	int i;
523 	struct k_sigaction *ka = &t->sighand->action[0];
524 	for (i = _NSIG ; i != 0 ; i--) {
525 		if (force_default || ka->sa.sa_handler != SIG_IGN)
526 			ka->sa.sa_handler = SIG_DFL;
527 		ka->sa.sa_flags = 0;
528 #ifdef __ARCH_HAS_SA_RESTORER
529 		ka->sa.sa_restorer = NULL;
530 #endif
531 		sigemptyset(&ka->sa.sa_mask);
532 		ka++;
533 	}
534 }
535 
536 bool unhandled_signal(struct task_struct *tsk, int sig)
537 {
538 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
539 	if (is_global_init(tsk))
540 		return true;
541 
542 	if (handler != SIG_IGN && handler != SIG_DFL)
543 		return false;
544 
545 	/* If dying, we handle all new signals by ignoring them */
546 	if (fatal_signal_pending(tsk))
547 		return false;
548 
549 	/* if ptraced, let the tracer determine */
550 	return !tsk->ptrace;
551 }
552 
553 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
554 			   struct sigqueue **timer_sigq)
555 {
556 	struct sigqueue *q, *first = NULL;
557 
558 	/*
559 	 * Collect the siginfo appropriate to this signal.  Check if
560 	 * there is another siginfo for the same signal.
561 	*/
562 	list_for_each_entry(q, &list->list, list) {
563 		if (q->info.si_signo == sig) {
564 			if (first)
565 				goto still_pending;
566 			first = q;
567 		}
568 	}
569 
570 	sigdelset(&list->signal, sig);
571 
572 	if (first) {
573 still_pending:
574 		list_del_init(&first->list);
575 		copy_siginfo(info, &first->info);
576 
577 		/*
578 		 * posix-timer signals are preallocated and freed when the last
579 		 * reference count is dropped in posixtimer_deliver_signal() or
580 		 * immediately on timer deletion when the signal is not pending.
581 		 * Spare the extra round through __sigqueue_free() which is
582 		 * ignoring preallocated signals.
583 		 */
584 		if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
585 			*timer_sigq = first;
586 		else
587 			__sigqueue_free(first);
588 	} else {
589 		/*
590 		 * Ok, it wasn't in the queue.  This must be
591 		 * a fast-pathed signal or we must have been
592 		 * out of queue space.  So zero out the info.
593 		 */
594 		clear_siginfo(info);
595 		info->si_signo = sig;
596 		info->si_errno = 0;
597 		info->si_code = SI_USER;
598 		info->si_pid = 0;
599 		info->si_uid = 0;
600 	}
601 }
602 
603 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
604 			    kernel_siginfo_t *info, struct sigqueue **timer_sigq)
605 {
606 	int sig = next_signal(pending, mask);
607 
608 	if (sig)
609 		collect_signal(sig, pending, info, timer_sigq);
610 	return sig;
611 }
612 
613 /*
614  * Try to dequeue a signal. If a deliverable signal is found fill in the
615  * caller provided siginfo and return the signal number. Otherwise return
616  * 0.
617  */
618 int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
619 {
620 	struct task_struct *tsk = current;
621 	struct sigqueue *timer_sigq;
622 	int signr;
623 
624 	lockdep_assert_held(&tsk->sighand->siglock);
625 
626 again:
627 	*type = PIDTYPE_PID;
628 	timer_sigq = NULL;
629 	signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
630 	if (!signr) {
631 		*type = PIDTYPE_TGID;
632 		signr = __dequeue_signal(&tsk->signal->shared_pending,
633 					 mask, info, &timer_sigq);
634 
635 		if (unlikely(signr == SIGALRM))
636 			posixtimer_rearm_itimer(tsk);
637 	}
638 
639 	recalc_sigpending();
640 	if (!signr)
641 		return 0;
642 
643 	if (unlikely(sig_kernel_stop(signr))) {
644 		/*
645 		 * Set a marker that we have dequeued a stop signal.  Our
646 		 * caller might release the siglock and then the pending
647 		 * stop signal it is about to process is no longer in the
648 		 * pending bitmasks, but must still be cleared by a SIGCONT
649 		 * (and overruled by a SIGKILL).  So those cases clear this
650 		 * shared flag after we've set it.  Note that this flag may
651 		 * remain set after the signal we return is ignored or
652 		 * handled.  That doesn't matter because its only purpose
653 		 * is to alert stop-signal processing code when another
654 		 * processor has come along and cleared the flag.
655 		 */
656 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
657 	}
658 
659 	if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
660 		if (!posixtimer_deliver_signal(info, timer_sigq))
661 			goto again;
662 	}
663 
664 	return signr;
665 }
666 EXPORT_SYMBOL_GPL(dequeue_signal);
667 
668 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
669 {
670 	struct task_struct *tsk = current;
671 	struct sigpending *pending = &tsk->pending;
672 	struct sigqueue *q, *sync = NULL;
673 
674 	/*
675 	 * Might a synchronous signal be in the queue?
676 	 */
677 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
678 		return 0;
679 
680 	/*
681 	 * Return the first synchronous signal in the queue.
682 	 */
683 	list_for_each_entry(q, &pending->list, list) {
684 		/* Synchronous signals have a positive si_code */
685 		if ((q->info.si_code > SI_USER) &&
686 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
687 			sync = q;
688 			goto next;
689 		}
690 	}
691 	return 0;
692 next:
693 	/*
694 	 * Check if there is another siginfo for the same signal.
695 	 */
696 	list_for_each_entry_continue(q, &pending->list, list) {
697 		if (q->info.si_signo == sync->info.si_signo)
698 			goto still_pending;
699 	}
700 
701 	sigdelset(&pending->signal, sync->info.si_signo);
702 	recalc_sigpending();
703 still_pending:
704 	list_del_init(&sync->list);
705 	copy_siginfo(info, &sync->info);
706 	__sigqueue_free(sync);
707 	return info->si_signo;
708 }
709 
710 /*
711  * Tell a process that it has a new active signal..
712  *
713  * NOTE! we rely on the previous spin_lock to
714  * lock interrupts for us! We can only be called with
715  * "siglock" held, and the local interrupt must
716  * have been disabled when that got acquired!
717  *
718  * No need to set need_resched since signal event passing
719  * goes through ->blocked
720  */
721 void signal_wake_up_state(struct task_struct *t, unsigned int state)
722 {
723 	lockdep_assert_held(&t->sighand->siglock);
724 
725 	set_tsk_thread_flag(t, TIF_SIGPENDING);
726 
727 	/*
728 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
729 	 * case. We don't check t->state here because there is a race with it
730 	 * executing another processor and just now entering stopped state.
731 	 * By using wake_up_state, we ensure the process will wake up and
732 	 * handle its death signal.
733 	 */
734 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
735 		kick_process(t);
736 }
737 
738 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
739 
740 static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
741 {
742 	if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
743 		__sigqueue_free(q);
744 	else
745 		posixtimer_sig_ignore(tsk, q);
746 }
747 
748 /* Remove signals in mask from the pending set and queue. */
749 static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
750 {
751 	struct sigqueue *q, *n;
752 	sigset_t m;
753 
754 	lockdep_assert_held(&p->sighand->siglock);
755 
756 	sigandsets(&m, mask, &s->signal);
757 	if (sigisemptyset(&m))
758 		return;
759 
760 	sigandnsets(&s->signal, &s->signal, mask);
761 	list_for_each_entry_safe(q, n, &s->list, list) {
762 		if (sigismember(mask, q->info.si_signo)) {
763 			list_del_init(&q->list);
764 			sigqueue_free_ignored(p, q);
765 		}
766 	}
767 }
768 
769 static inline int is_si_special(const struct kernel_siginfo *info)
770 {
771 	return info <= SEND_SIG_PRIV;
772 }
773 
774 static inline bool si_fromuser(const struct kernel_siginfo *info)
775 {
776 	return info == SEND_SIG_NOINFO ||
777 		(!is_si_special(info) && SI_FROMUSER(info));
778 }
779 
780 /*
781  * called with RCU read lock from check_kill_permission()
782  */
783 static bool kill_ok_by_cred(struct task_struct *t)
784 {
785 	const struct cred *cred = current_cred();
786 	const struct cred *tcred = __task_cred(t);
787 
788 	return uid_eq(cred->euid, tcred->suid) ||
789 	       uid_eq(cred->euid, tcred->uid) ||
790 	       uid_eq(cred->uid, tcred->suid) ||
791 	       uid_eq(cred->uid, tcred->uid) ||
792 	       ns_capable(tcred->user_ns, CAP_KILL);
793 }
794 
795 /*
796  * Bad permissions for sending the signal
797  * - the caller must hold the RCU read lock
798  */
799 static int check_kill_permission(int sig, struct kernel_siginfo *info,
800 				 struct task_struct *t)
801 {
802 	struct pid *sid;
803 	int error;
804 
805 	if (!valid_signal(sig))
806 		return -EINVAL;
807 
808 	if (!si_fromuser(info))
809 		return 0;
810 
811 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
812 	if (error)
813 		return error;
814 
815 	if (!same_thread_group(current, t) &&
816 	    !kill_ok_by_cred(t)) {
817 		switch (sig) {
818 		case SIGCONT:
819 			sid = task_session(t);
820 			/*
821 			 * We don't return the error if sid == NULL. The
822 			 * task was unhashed, the caller must notice this.
823 			 */
824 			if (!sid || sid == task_session(current))
825 				break;
826 			fallthrough;
827 		default:
828 			return -EPERM;
829 		}
830 	}
831 
832 	return security_task_kill(t, info, sig, NULL);
833 }
834 
835 /**
836  * ptrace_trap_notify - schedule trap to notify ptracer
837  * @t: tracee wanting to notify tracer
838  *
839  * This function schedules sticky ptrace trap which is cleared on the next
840  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
841  * ptracer.
842  *
843  * If @t is running, STOP trap will be taken.  If trapped for STOP and
844  * ptracer is listening for events, tracee is woken up so that it can
845  * re-trap for the new event.  If trapped otherwise, STOP trap will be
846  * eventually taken without returning to userland after the existing traps
847  * are finished by PTRACE_CONT.
848  *
849  * CONTEXT:
850  * Must be called with @task->sighand->siglock held.
851  */
852 static void ptrace_trap_notify(struct task_struct *t)
853 {
854 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
855 	lockdep_assert_held(&t->sighand->siglock);
856 
857 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
858 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
859 }
860 
861 /*
862  * Handle magic process-wide effects of stop/continue signals. Unlike
863  * the signal actions, these happen immediately at signal-generation
864  * time regardless of blocking, ignoring, or handling.  This does the
865  * actual continuing for SIGCONT, but not the actual stopping for stop
866  * signals. The process stop is done as a signal action for SIG_DFL.
867  *
868  * Returns true if the signal should be actually delivered, otherwise
869  * it should be dropped.
870  */
871 static bool prepare_signal(int sig, struct task_struct *p, bool force)
872 {
873 	struct signal_struct *signal = p->signal;
874 	struct task_struct *t;
875 	sigset_t flush;
876 
877 	if (signal->flags & SIGNAL_GROUP_EXIT) {
878 		if (signal->core_state)
879 			return sig == SIGKILL;
880 		/*
881 		 * The process is in the middle of dying, drop the signal.
882 		 */
883 		return false;
884 	} else if (sig_kernel_stop(sig)) {
885 		/*
886 		 * This is a stop signal.  Remove SIGCONT from all queues.
887 		 */
888 		siginitset(&flush, sigmask(SIGCONT));
889 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
890 		for_each_thread(p, t)
891 			flush_sigqueue_mask(p, &flush, &t->pending);
892 	} else if (sig == SIGCONT) {
893 		unsigned int why;
894 		/*
895 		 * Remove all stop signals from all queues, wake all threads.
896 		 */
897 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
898 		flush_sigqueue_mask(p, &flush, &signal->shared_pending);
899 		for_each_thread(p, t) {
900 			flush_sigqueue_mask(p, &flush, &t->pending);
901 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
902 			if (likely(!(t->ptrace & PT_SEIZED))) {
903 				t->jobctl &= ~JOBCTL_STOPPED;
904 				wake_up_state(t, __TASK_STOPPED);
905 			} else
906 				ptrace_trap_notify(t);
907 		}
908 
909 		/*
910 		 * Notify the parent with CLD_CONTINUED if we were stopped.
911 		 *
912 		 * If we were in the middle of a group stop, we pretend it
913 		 * was already finished, and then continued. Since SIGCHLD
914 		 * doesn't queue we report only CLD_STOPPED, as if the next
915 		 * CLD_CONTINUED was dropped.
916 		 */
917 		why = 0;
918 		if (signal->flags & SIGNAL_STOP_STOPPED)
919 			why |= SIGNAL_CLD_CONTINUED;
920 		else if (signal->group_stop_count)
921 			why |= SIGNAL_CLD_STOPPED;
922 
923 		if (why) {
924 			/*
925 			 * The first thread which returns from do_signal_stop()
926 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
927 			 * notify its parent. See get_signal().
928 			 */
929 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
930 			signal->group_stop_count = 0;
931 			signal->group_exit_code = 0;
932 		}
933 	}
934 
935 	return !sig_ignored(p, sig, force);
936 }
937 
938 /*
939  * Test if P wants to take SIG.  After we've checked all threads with this,
940  * it's equivalent to finding no threads not blocking SIG.  Any threads not
941  * blocking SIG were ruled out because they are not running and already
942  * have pending signals.  Such threads will dequeue from the shared queue
943  * as soon as they're available, so putting the signal on the shared queue
944  * will be equivalent to sending it to one such thread.
945  */
946 static inline bool wants_signal(int sig, struct task_struct *p)
947 {
948 	if (sigismember(&p->blocked, sig))
949 		return false;
950 
951 	if (p->flags & PF_EXITING)
952 		return false;
953 
954 	if (sig == SIGKILL)
955 		return true;
956 
957 	if (task_is_stopped_or_traced(p))
958 		return false;
959 
960 	return task_curr(p) || !task_sigpending(p);
961 }
962 
963 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
964 {
965 	struct signal_struct *signal = p->signal;
966 	struct task_struct *t;
967 
968 	/*
969 	 * Now find a thread we can wake up to take the signal off the queue.
970 	 *
971 	 * Try the suggested task first (may or may not be the main thread).
972 	 */
973 	if (wants_signal(sig, p))
974 		t = p;
975 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
976 		/*
977 		 * There is just one thread and it does not need to be woken.
978 		 * It will dequeue unblocked signals before it runs again.
979 		 */
980 		return;
981 	else {
982 		/*
983 		 * Otherwise try to find a suitable thread.
984 		 */
985 		t = signal->curr_target;
986 		while (!wants_signal(sig, t)) {
987 			t = next_thread(t);
988 			if (t == signal->curr_target)
989 				/*
990 				 * No thread needs to be woken.
991 				 * Any eligible threads will see
992 				 * the signal in the queue soon.
993 				 */
994 				return;
995 		}
996 		signal->curr_target = t;
997 	}
998 
999 	/*
1000 	 * Found a killable thread.  If the signal will be fatal,
1001 	 * then start taking the whole group down immediately.
1002 	 */
1003 	if (sig_fatal(p, sig) &&
1004 	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1005 	    !sigismember(&t->real_blocked, sig) &&
1006 	    (sig == SIGKILL || !p->ptrace)) {
1007 		/*
1008 		 * This signal will be fatal to the whole group.
1009 		 */
1010 		if (!sig_kernel_coredump(sig)) {
1011 			/*
1012 			 * Start a group exit and wake everybody up.
1013 			 * This way we don't have other threads
1014 			 * running and doing things after a slower
1015 			 * thread has the fatal signal pending.
1016 			 */
1017 			signal->flags = SIGNAL_GROUP_EXIT;
1018 			signal->group_exit_code = sig;
1019 			signal->group_stop_count = 0;
1020 			__for_each_thread(signal, t) {
1021 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1022 				sigaddset(&t->pending.signal, SIGKILL);
1023 				signal_wake_up(t, 1);
1024 			}
1025 			return;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * The signal is already in the shared-pending queue.
1031 	 * Tell the chosen thread to wake up and dequeue it.
1032 	 */
1033 	signal_wake_up(t, sig == SIGKILL);
1034 	return;
1035 }
1036 
1037 static inline bool legacy_queue(struct sigpending *signals, int sig)
1038 {
1039 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1040 }
1041 
1042 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1043 				struct task_struct *t, enum pid_type type, bool force)
1044 {
1045 	struct sigpending *pending;
1046 	struct sigqueue *q;
1047 	int override_rlimit;
1048 	int ret = 0, result;
1049 
1050 	lockdep_assert_held(&t->sighand->siglock);
1051 
1052 	result = TRACE_SIGNAL_IGNORED;
1053 	if (!prepare_signal(sig, t, force))
1054 		goto ret;
1055 
1056 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1057 	/*
1058 	 * Short-circuit ignored signals and support queuing
1059 	 * exactly one non-rt signal, so that we can get more
1060 	 * detailed information about the cause of the signal.
1061 	 */
1062 	result = TRACE_SIGNAL_ALREADY_PENDING;
1063 	if (legacy_queue(pending, sig))
1064 		goto ret;
1065 
1066 	result = TRACE_SIGNAL_DELIVERED;
1067 	/*
1068 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1069 	 */
1070 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1071 		goto out_set;
1072 
1073 	/*
1074 	 * Real-time signals must be queued if sent by sigqueue, or
1075 	 * some other real-time mechanism.  It is implementation
1076 	 * defined whether kill() does so.  We attempt to do so, on
1077 	 * the principle of least surprise, but since kill is not
1078 	 * allowed to fail with EAGAIN when low on memory we just
1079 	 * make sure at least one signal gets delivered and don't
1080 	 * pass on the info struct.
1081 	 */
1082 	if (sig < SIGRTMIN)
1083 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1084 	else
1085 		override_rlimit = 0;
1086 
1087 	q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1088 
1089 	if (q) {
1090 		list_add_tail(&q->list, &pending->list);
1091 		switch ((unsigned long) info) {
1092 		case (unsigned long) SEND_SIG_NOINFO:
1093 			clear_siginfo(&q->info);
1094 			q->info.si_signo = sig;
1095 			q->info.si_errno = 0;
1096 			q->info.si_code = SI_USER;
1097 			q->info.si_pid = task_tgid_nr_ns(current,
1098 							task_active_pid_ns(t));
1099 			rcu_read_lock();
1100 			q->info.si_uid =
1101 				from_kuid_munged(task_cred_xxx(t, user_ns),
1102 						 current_uid());
1103 			rcu_read_unlock();
1104 			break;
1105 		case (unsigned long) SEND_SIG_PRIV:
1106 			clear_siginfo(&q->info);
1107 			q->info.si_signo = sig;
1108 			q->info.si_errno = 0;
1109 			q->info.si_code = SI_KERNEL;
1110 			q->info.si_pid = 0;
1111 			q->info.si_uid = 0;
1112 			break;
1113 		default:
1114 			copy_siginfo(&q->info, info);
1115 			break;
1116 		}
1117 	} else if (!is_si_special(info) &&
1118 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1119 		/*
1120 		 * Queue overflow, abort.  We may abort if the
1121 		 * signal was rt and sent by user using something
1122 		 * other than kill().
1123 		 */
1124 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1125 		ret = -EAGAIN;
1126 		goto ret;
1127 	} else {
1128 		/*
1129 		 * This is a silent loss of information.  We still
1130 		 * send the signal, but the *info bits are lost.
1131 		 */
1132 		result = TRACE_SIGNAL_LOSE_INFO;
1133 	}
1134 
1135 out_set:
1136 	signalfd_notify(t, sig);
1137 	sigaddset(&pending->signal, sig);
1138 
1139 	/* Let multiprocess signals appear after on-going forks */
1140 	if (type > PIDTYPE_TGID) {
1141 		struct multiprocess_signals *delayed;
1142 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1143 			sigset_t *signal = &delayed->signal;
1144 			/* Can't queue both a stop and a continue signal */
1145 			if (sig == SIGCONT)
1146 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1147 			else if (sig_kernel_stop(sig))
1148 				sigdelset(signal, SIGCONT);
1149 			sigaddset(signal, sig);
1150 		}
1151 	}
1152 
1153 	complete_signal(sig, t, type);
1154 ret:
1155 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1156 	return ret;
1157 }
1158 
1159 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1160 {
1161 	bool ret = false;
1162 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1163 	case SIL_KILL:
1164 	case SIL_CHLD:
1165 	case SIL_RT:
1166 		ret = true;
1167 		break;
1168 	case SIL_TIMER:
1169 	case SIL_POLL:
1170 	case SIL_FAULT:
1171 	case SIL_FAULT_TRAPNO:
1172 	case SIL_FAULT_MCEERR:
1173 	case SIL_FAULT_BNDERR:
1174 	case SIL_FAULT_PKUERR:
1175 	case SIL_FAULT_PERF_EVENT:
1176 	case SIL_SYS:
1177 		ret = false;
1178 		break;
1179 	}
1180 	return ret;
1181 }
1182 
1183 int send_signal_locked(int sig, struct kernel_siginfo *info,
1184 		       struct task_struct *t, enum pid_type type)
1185 {
1186 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1187 	bool force = false;
1188 
1189 	if (info == SEND_SIG_NOINFO) {
1190 		/* Force if sent from an ancestor pid namespace */
1191 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1192 	} else if (info == SEND_SIG_PRIV) {
1193 		/* Don't ignore kernel generated signals */
1194 		force = true;
1195 	} else if (has_si_pid_and_uid(info)) {
1196 		/* SIGKILL and SIGSTOP is special or has ids */
1197 		struct user_namespace *t_user_ns;
1198 
1199 		rcu_read_lock();
1200 		t_user_ns = task_cred_xxx(t, user_ns);
1201 		if (current_user_ns() != t_user_ns) {
1202 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1203 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1204 		}
1205 		rcu_read_unlock();
1206 
1207 		/* A kernel generated signal? */
1208 		force = (info->si_code == SI_KERNEL);
1209 
1210 		/* From an ancestor pid namespace? */
1211 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1212 			info->si_pid = 0;
1213 			force = true;
1214 		}
1215 	}
1216 	return __send_signal_locked(sig, info, t, type, force);
1217 }
1218 
1219 static void print_fatal_signal(int signr)
1220 {
1221 	struct pt_regs *regs = task_pt_regs(current);
1222 	struct file *exe_file;
1223 
1224 	exe_file = get_task_exe_file(current);
1225 	if (exe_file) {
1226 		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1227 			exe_file, current->comm, signr);
1228 		fput(exe_file);
1229 	} else {
1230 		pr_info("%s: potentially unexpected fatal signal %d.\n",
1231 			current->comm, signr);
1232 	}
1233 
1234 #if defined(__i386__) && !defined(__arch_um__)
1235 	pr_info("code at %08lx: ", regs->ip);
1236 	{
1237 		int i;
1238 		for (i = 0; i < 16; i++) {
1239 			unsigned char insn;
1240 
1241 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1242 				break;
1243 			pr_cont("%02x ", insn);
1244 		}
1245 	}
1246 	pr_cont("\n");
1247 #endif
1248 	preempt_disable();
1249 	show_regs(regs);
1250 	preempt_enable();
1251 }
1252 
1253 static int __init setup_print_fatal_signals(char *str)
1254 {
1255 	get_option (&str, &print_fatal_signals);
1256 
1257 	return 1;
1258 }
1259 
1260 __setup("print-fatal-signals=", setup_print_fatal_signals);
1261 
1262 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1263 			enum pid_type type)
1264 {
1265 	unsigned long flags;
1266 	int ret = -ESRCH;
1267 
1268 	if (lock_task_sighand(p, &flags)) {
1269 		ret = send_signal_locked(sig, info, p, type);
1270 		unlock_task_sighand(p, &flags);
1271 	}
1272 
1273 	return ret;
1274 }
1275 
1276 enum sig_handler {
1277 	HANDLER_CURRENT, /* If reachable use the current handler */
1278 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1279 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1280 };
1281 
1282 /*
1283  * Force a signal that the process can't ignore: if necessary
1284  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1285  *
1286  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1287  * since we do not want to have a signal handler that was blocked
1288  * be invoked when user space had explicitly blocked it.
1289  *
1290  * We don't want to have recursive SIGSEGV's etc, for example,
1291  * that is why we also clear SIGNAL_UNKILLABLE.
1292  */
1293 static int
1294 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1295 	enum sig_handler handler)
1296 {
1297 	unsigned long int flags;
1298 	int ret, blocked, ignored;
1299 	struct k_sigaction *action;
1300 	int sig = info->si_signo;
1301 
1302 	spin_lock_irqsave(&t->sighand->siglock, flags);
1303 	action = &t->sighand->action[sig-1];
1304 	ignored = action->sa.sa_handler == SIG_IGN;
1305 	blocked = sigismember(&t->blocked, sig);
1306 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1307 		action->sa.sa_handler = SIG_DFL;
1308 		if (handler == HANDLER_EXIT)
1309 			action->sa.sa_flags |= SA_IMMUTABLE;
1310 		if (blocked)
1311 			sigdelset(&t->blocked, sig);
1312 	}
1313 	/*
1314 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1315 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1316 	 */
1317 	if (action->sa.sa_handler == SIG_DFL &&
1318 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1319 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1320 	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1321 	/* This can happen if the signal was already pending and blocked */
1322 	if (!task_sigpending(t))
1323 		signal_wake_up(t, 0);
1324 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1325 
1326 	return ret;
1327 }
1328 
1329 int force_sig_info(struct kernel_siginfo *info)
1330 {
1331 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1332 }
1333 
1334 /*
1335  * Nuke all other threads in the group.
1336  */
1337 int zap_other_threads(struct task_struct *p)
1338 {
1339 	struct task_struct *t;
1340 	int count = 0;
1341 
1342 	p->signal->group_stop_count = 0;
1343 
1344 	for_other_threads(p, t) {
1345 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1346 		count++;
1347 
1348 		/* Don't bother with already dead threads */
1349 		if (t->exit_state)
1350 			continue;
1351 		sigaddset(&t->pending.signal, SIGKILL);
1352 		signal_wake_up(t, 1);
1353 	}
1354 
1355 	return count;
1356 }
1357 
1358 struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1359 					 unsigned long *flags)
1360 {
1361 	struct sighand_struct *sighand;
1362 
1363 	rcu_read_lock();
1364 	for (;;) {
1365 		sighand = rcu_dereference(tsk->sighand);
1366 		if (unlikely(sighand == NULL))
1367 			break;
1368 
1369 		/*
1370 		 * This sighand can be already freed and even reused, but
1371 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1372 		 * initializes ->siglock: this slab can't go away, it has
1373 		 * the same object type, ->siglock can't be reinitialized.
1374 		 *
1375 		 * We need to ensure that tsk->sighand is still the same
1376 		 * after we take the lock, we can race with de_thread() or
1377 		 * __exit_signal(). In the latter case the next iteration
1378 		 * must see ->sighand == NULL.
1379 		 */
1380 		spin_lock_irqsave(&sighand->siglock, *flags);
1381 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1382 			break;
1383 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1384 	}
1385 	rcu_read_unlock();
1386 
1387 	return sighand;
1388 }
1389 
1390 #ifdef CONFIG_LOCKDEP
1391 void lockdep_assert_task_sighand_held(struct task_struct *task)
1392 {
1393 	struct sighand_struct *sighand;
1394 
1395 	rcu_read_lock();
1396 	sighand = rcu_dereference(task->sighand);
1397 	if (sighand)
1398 		lockdep_assert_held(&sighand->siglock);
1399 	else
1400 		WARN_ON_ONCE(1);
1401 	rcu_read_unlock();
1402 }
1403 #endif
1404 
1405 /*
1406  * send signal info to all the members of a thread group or to the
1407  * individual thread if type == PIDTYPE_PID.
1408  */
1409 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1410 			struct task_struct *p, enum pid_type type)
1411 {
1412 	int ret;
1413 
1414 	rcu_read_lock();
1415 	ret = check_kill_permission(sig, info, p);
1416 	rcu_read_unlock();
1417 
1418 	if (!ret && sig)
1419 		ret = do_send_sig_info(sig, info, p, type);
1420 
1421 	return ret;
1422 }
1423 
1424 /*
1425  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1426  * control characters do (^C, ^Z etc)
1427  * - the caller must hold at least a readlock on tasklist_lock
1428  */
1429 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1430 {
1431 	struct task_struct *p = NULL;
1432 	int ret = -ESRCH;
1433 
1434 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1435 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1436 		/*
1437 		 * If group_send_sig_info() succeeds at least once ret
1438 		 * becomes 0 and after that the code below has no effect.
1439 		 * Otherwise we return the last err or -ESRCH if this
1440 		 * process group is empty.
1441 		 */
1442 		if (ret)
1443 			ret = err;
1444 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1445 
1446 	return ret;
1447 }
1448 
1449 static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1450 				struct pid *pid, enum pid_type type)
1451 {
1452 	int error = -ESRCH;
1453 	struct task_struct *p;
1454 
1455 	for (;;) {
1456 		rcu_read_lock();
1457 		p = pid_task(pid, PIDTYPE_PID);
1458 		if (p)
1459 			error = group_send_sig_info(sig, info, p, type);
1460 		rcu_read_unlock();
1461 		if (likely(!p || error != -ESRCH))
1462 			return error;
1463 		/*
1464 		 * The task was unhashed in between, try again.  If it
1465 		 * is dead, pid_task() will return NULL, if we race with
1466 		 * de_thread() it will find the new leader.
1467 		 */
1468 	}
1469 }
1470 
1471 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1472 {
1473 	return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1474 }
1475 
1476 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1477 {
1478 	int error;
1479 	rcu_read_lock();
1480 	error = kill_pid_info(sig, info, find_vpid(pid));
1481 	rcu_read_unlock();
1482 	return error;
1483 }
1484 
1485 static inline bool kill_as_cred_perm(const struct cred *cred,
1486 				     struct task_struct *target)
1487 {
1488 	const struct cred *pcred = __task_cred(target);
1489 
1490 	return uid_eq(cred->euid, pcred->suid) ||
1491 	       uid_eq(cred->euid, pcred->uid) ||
1492 	       uid_eq(cred->uid, pcred->suid) ||
1493 	       uid_eq(cred->uid, pcred->uid);
1494 }
1495 
1496 /*
1497  * The usb asyncio usage of siginfo is wrong.  The glibc support
1498  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1499  * AKA after the generic fields:
1500  *	kernel_pid_t	si_pid;
1501  *	kernel_uid32_t	si_uid;
1502  *	sigval_t	si_value;
1503  *
1504  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1505  * after the generic fields is:
1506  *	void __user 	*si_addr;
1507  *
1508  * This is a practical problem when there is a 64bit big endian kernel
1509  * and a 32bit userspace.  As the 32bit address will encoded in the low
1510  * 32bits of the pointer.  Those low 32bits will be stored at higher
1511  * address than appear in a 32 bit pointer.  So userspace will not
1512  * see the address it was expecting for it's completions.
1513  *
1514  * There is nothing in the encoding that can allow
1515  * copy_siginfo_to_user32 to detect this confusion of formats, so
1516  * handle this by requiring the caller of kill_pid_usb_asyncio to
1517  * notice when this situration takes place and to store the 32bit
1518  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1519  * parameter.
1520  */
1521 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1522 			 struct pid *pid, const struct cred *cred)
1523 {
1524 	struct kernel_siginfo info;
1525 	struct task_struct *p;
1526 	unsigned long flags;
1527 	int ret = -EINVAL;
1528 
1529 	if (!valid_signal(sig))
1530 		return ret;
1531 
1532 	clear_siginfo(&info);
1533 	info.si_signo = sig;
1534 	info.si_errno = errno;
1535 	info.si_code = SI_ASYNCIO;
1536 	*((sigval_t *)&info.si_pid) = addr;
1537 
1538 	rcu_read_lock();
1539 	p = pid_task(pid, PIDTYPE_PID);
1540 	if (!p) {
1541 		ret = -ESRCH;
1542 		goto out_unlock;
1543 	}
1544 	if (!kill_as_cred_perm(cred, p)) {
1545 		ret = -EPERM;
1546 		goto out_unlock;
1547 	}
1548 	ret = security_task_kill(p, &info, sig, cred);
1549 	if (ret)
1550 		goto out_unlock;
1551 
1552 	if (sig) {
1553 		if (lock_task_sighand(p, &flags)) {
1554 			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1555 			unlock_task_sighand(p, &flags);
1556 		} else
1557 			ret = -ESRCH;
1558 	}
1559 out_unlock:
1560 	rcu_read_unlock();
1561 	return ret;
1562 }
1563 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1564 
1565 /*
1566  * kill_something_info() interprets pid in interesting ways just like kill(2).
1567  *
1568  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1569  * is probably wrong.  Should make it like BSD or SYSV.
1570  */
1571 
1572 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1573 {
1574 	int ret;
1575 
1576 	if (pid > 0)
1577 		return kill_proc_info(sig, info, pid);
1578 
1579 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1580 	if (pid == INT_MIN)
1581 		return -ESRCH;
1582 
1583 	read_lock(&tasklist_lock);
1584 	if (pid != -1) {
1585 		ret = __kill_pgrp_info(sig, info,
1586 				pid ? find_vpid(-pid) : task_pgrp(current));
1587 	} else {
1588 		int retval = 0, count = 0;
1589 		struct task_struct * p;
1590 
1591 		for_each_process(p) {
1592 			if (task_pid_vnr(p) > 1 &&
1593 					!same_thread_group(p, current)) {
1594 				int err = group_send_sig_info(sig, info, p,
1595 							      PIDTYPE_MAX);
1596 				++count;
1597 				if (err != -EPERM)
1598 					retval = err;
1599 			}
1600 		}
1601 		ret = count ? retval : -ESRCH;
1602 	}
1603 	read_unlock(&tasklist_lock);
1604 
1605 	return ret;
1606 }
1607 
1608 /*
1609  * These are for backward compatibility with the rest of the kernel source.
1610  */
1611 
1612 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1613 {
1614 	/*
1615 	 * Make sure legacy kernel users don't send in bad values
1616 	 * (normal paths check this in check_kill_permission).
1617 	 */
1618 	if (!valid_signal(sig))
1619 		return -EINVAL;
1620 
1621 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1622 }
1623 EXPORT_SYMBOL(send_sig_info);
1624 
1625 #define __si_special(priv) \
1626 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1627 
1628 int
1629 send_sig(int sig, struct task_struct *p, int priv)
1630 {
1631 	return send_sig_info(sig, __si_special(priv), p);
1632 }
1633 EXPORT_SYMBOL(send_sig);
1634 
1635 void force_sig(int sig)
1636 {
1637 	struct kernel_siginfo info;
1638 
1639 	clear_siginfo(&info);
1640 	info.si_signo = sig;
1641 	info.si_errno = 0;
1642 	info.si_code = SI_KERNEL;
1643 	info.si_pid = 0;
1644 	info.si_uid = 0;
1645 	force_sig_info(&info);
1646 }
1647 EXPORT_SYMBOL(force_sig);
1648 
1649 void force_fatal_sig(int sig)
1650 {
1651 	struct kernel_siginfo info;
1652 
1653 	clear_siginfo(&info);
1654 	info.si_signo = sig;
1655 	info.si_errno = 0;
1656 	info.si_code = SI_KERNEL;
1657 	info.si_pid = 0;
1658 	info.si_uid = 0;
1659 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1660 }
1661 
1662 void force_exit_sig(int sig)
1663 {
1664 	struct kernel_siginfo info;
1665 
1666 	clear_siginfo(&info);
1667 	info.si_signo = sig;
1668 	info.si_errno = 0;
1669 	info.si_code = SI_KERNEL;
1670 	info.si_pid = 0;
1671 	info.si_uid = 0;
1672 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1673 }
1674 
1675 /*
1676  * When things go south during signal handling, we
1677  * will force a SIGSEGV. And if the signal that caused
1678  * the problem was already a SIGSEGV, we'll want to
1679  * make sure we don't even try to deliver the signal..
1680  */
1681 void force_sigsegv(int sig)
1682 {
1683 	if (sig == SIGSEGV)
1684 		force_fatal_sig(SIGSEGV);
1685 	else
1686 		force_sig(SIGSEGV);
1687 }
1688 
1689 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1690 			    struct task_struct *t)
1691 {
1692 	struct kernel_siginfo info;
1693 
1694 	clear_siginfo(&info);
1695 	info.si_signo = sig;
1696 	info.si_errno = 0;
1697 	info.si_code  = code;
1698 	info.si_addr  = addr;
1699 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1700 }
1701 
1702 int force_sig_fault(int sig, int code, void __user *addr)
1703 {
1704 	return force_sig_fault_to_task(sig, code, addr, current);
1705 }
1706 
1707 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1708 {
1709 	struct kernel_siginfo info;
1710 
1711 	clear_siginfo(&info);
1712 	info.si_signo = sig;
1713 	info.si_errno = 0;
1714 	info.si_code  = code;
1715 	info.si_addr  = addr;
1716 	return send_sig_info(info.si_signo, &info, t);
1717 }
1718 
1719 int force_sig_mceerr(int code, void __user *addr, short lsb)
1720 {
1721 	struct kernel_siginfo info;
1722 
1723 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1724 	clear_siginfo(&info);
1725 	info.si_signo = SIGBUS;
1726 	info.si_errno = 0;
1727 	info.si_code = code;
1728 	info.si_addr = addr;
1729 	info.si_addr_lsb = lsb;
1730 	return force_sig_info(&info);
1731 }
1732 
1733 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1734 {
1735 	struct kernel_siginfo info;
1736 
1737 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1738 	clear_siginfo(&info);
1739 	info.si_signo = SIGBUS;
1740 	info.si_errno = 0;
1741 	info.si_code = code;
1742 	info.si_addr = addr;
1743 	info.si_addr_lsb = lsb;
1744 	return send_sig_info(info.si_signo, &info, t);
1745 }
1746 EXPORT_SYMBOL(send_sig_mceerr);
1747 
1748 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1749 {
1750 	struct kernel_siginfo info;
1751 
1752 	clear_siginfo(&info);
1753 	info.si_signo = SIGSEGV;
1754 	info.si_errno = 0;
1755 	info.si_code  = SEGV_BNDERR;
1756 	info.si_addr  = addr;
1757 	info.si_lower = lower;
1758 	info.si_upper = upper;
1759 	return force_sig_info(&info);
1760 }
1761 
1762 #ifdef SEGV_PKUERR
1763 int force_sig_pkuerr(void __user *addr, u32 pkey)
1764 {
1765 	struct kernel_siginfo info;
1766 
1767 	clear_siginfo(&info);
1768 	info.si_signo = SIGSEGV;
1769 	info.si_errno = 0;
1770 	info.si_code  = SEGV_PKUERR;
1771 	info.si_addr  = addr;
1772 	info.si_pkey  = pkey;
1773 	return force_sig_info(&info);
1774 }
1775 #endif
1776 
1777 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1778 {
1779 	struct kernel_siginfo info;
1780 
1781 	clear_siginfo(&info);
1782 	info.si_signo     = SIGTRAP;
1783 	info.si_errno     = 0;
1784 	info.si_code      = TRAP_PERF;
1785 	info.si_addr      = addr;
1786 	info.si_perf_data = sig_data;
1787 	info.si_perf_type = type;
1788 
1789 	/*
1790 	 * Signals generated by perf events should not terminate the whole
1791 	 * process if SIGTRAP is blocked, however, delivering the signal
1792 	 * asynchronously is better than not delivering at all. But tell user
1793 	 * space if the signal was asynchronous, so it can clearly be
1794 	 * distinguished from normal synchronous ones.
1795 	 */
1796 	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1797 				     TRAP_PERF_FLAG_ASYNC :
1798 				     0;
1799 
1800 	return send_sig_info(info.si_signo, &info, current);
1801 }
1802 
1803 /**
1804  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1805  * @syscall: syscall number to send to userland
1806  * @reason: filter-supplied reason code to send to userland (via si_errno)
1807  * @force_coredump: true to trigger a coredump
1808  *
1809  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1810  */
1811 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1812 {
1813 	struct kernel_siginfo info;
1814 
1815 	clear_siginfo(&info);
1816 	info.si_signo = SIGSYS;
1817 	info.si_code = SYS_SECCOMP;
1818 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1819 	info.si_errno = reason;
1820 	info.si_arch = syscall_get_arch(current);
1821 	info.si_syscall = syscall;
1822 	return force_sig_info_to_task(&info, current,
1823 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1824 }
1825 
1826 /* For the crazy architectures that include trap information in
1827  * the errno field, instead of an actual errno value.
1828  */
1829 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1830 {
1831 	struct kernel_siginfo info;
1832 
1833 	clear_siginfo(&info);
1834 	info.si_signo = SIGTRAP;
1835 	info.si_errno = errno;
1836 	info.si_code  = TRAP_HWBKPT;
1837 	info.si_addr  = addr;
1838 	return force_sig_info(&info);
1839 }
1840 
1841 /* For the rare architectures that include trap information using
1842  * si_trapno.
1843  */
1844 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1845 {
1846 	struct kernel_siginfo info;
1847 
1848 	clear_siginfo(&info);
1849 	info.si_signo = sig;
1850 	info.si_errno = 0;
1851 	info.si_code  = code;
1852 	info.si_addr  = addr;
1853 	info.si_trapno = trapno;
1854 	return force_sig_info(&info);
1855 }
1856 
1857 /* For the rare architectures that include trap information using
1858  * si_trapno.
1859  */
1860 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1861 			  struct task_struct *t)
1862 {
1863 	struct kernel_siginfo info;
1864 
1865 	clear_siginfo(&info);
1866 	info.si_signo = sig;
1867 	info.si_errno = 0;
1868 	info.si_code  = code;
1869 	info.si_addr  = addr;
1870 	info.si_trapno = trapno;
1871 	return send_sig_info(info.si_signo, &info, t);
1872 }
1873 
1874 static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1875 {
1876 	int ret;
1877 	read_lock(&tasklist_lock);
1878 	ret = __kill_pgrp_info(sig, info, pgrp);
1879 	read_unlock(&tasklist_lock);
1880 	return ret;
1881 }
1882 
1883 int kill_pgrp(struct pid *pid, int sig, int priv)
1884 {
1885 	return kill_pgrp_info(sig, __si_special(priv), pid);
1886 }
1887 EXPORT_SYMBOL(kill_pgrp);
1888 
1889 int kill_pid(struct pid *pid, int sig, int priv)
1890 {
1891 	return kill_pid_info(sig, __si_special(priv), pid);
1892 }
1893 EXPORT_SYMBOL(kill_pid);
1894 
1895 #ifdef CONFIG_POSIX_TIMERS
1896 /*
1897  * These functions handle POSIX timer signals. POSIX timers use
1898  * preallocated sigqueue structs for sending signals.
1899  */
1900 static void __flush_itimer_signals(struct sigpending *pending)
1901 {
1902 	sigset_t signal, retain;
1903 	struct sigqueue *q, *n;
1904 
1905 	signal = pending->signal;
1906 	sigemptyset(&retain);
1907 
1908 	list_for_each_entry_safe(q, n, &pending->list, list) {
1909 		int sig = q->info.si_signo;
1910 
1911 		if (likely(q->info.si_code != SI_TIMER)) {
1912 			sigaddset(&retain, sig);
1913 		} else {
1914 			sigdelset(&signal, sig);
1915 			list_del_init(&q->list);
1916 			__sigqueue_free(q);
1917 		}
1918 	}
1919 
1920 	sigorsets(&pending->signal, &signal, &retain);
1921 }
1922 
1923 void flush_itimer_signals(void)
1924 {
1925 	struct task_struct *tsk = current;
1926 
1927 	guard(spinlock_irqsave)(&tsk->sighand->siglock);
1928 	__flush_itimer_signals(&tsk->pending);
1929 	__flush_itimer_signals(&tsk->signal->shared_pending);
1930 }
1931 
1932 bool posixtimer_init_sigqueue(struct sigqueue *q)
1933 {
1934 	struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1935 
1936 	if (!ucounts)
1937 		return false;
1938 	clear_siginfo(&q->info);
1939 	__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1940 	return true;
1941 }
1942 
1943 static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1944 {
1945 	struct sigpending *pending;
1946 	int sig = q->info.si_signo;
1947 
1948 	signalfd_notify(t, sig);
1949 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1950 	list_add_tail(&q->list, &pending->list);
1951 	sigaddset(&pending->signal, sig);
1952 	complete_signal(sig, t, type);
1953 }
1954 
1955 /*
1956  * This function is used by POSIX timers to deliver a timer signal.
1957  * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1958  * set), the signal must be delivered to the specific thread (queues
1959  * into t->pending).
1960  *
1961  * Where type is not PIDTYPE_PID, signals must be delivered to the
1962  * process. In this case, prefer to deliver to current if it is in
1963  * the same thread group as the target process and its sighand is
1964  * stable, which avoids unnecessarily waking up a potentially idle task.
1965  */
1966 static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1967 {
1968 	struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1969 
1970 	if (t && tmr->it_pid_type != PIDTYPE_PID &&
1971 	    same_thread_group(t, current) && !current->exit_state)
1972 		t = current;
1973 	return t;
1974 }
1975 
1976 void posixtimer_send_sigqueue(struct k_itimer *tmr)
1977 {
1978 	struct sigqueue *q = &tmr->sigq;
1979 	int sig = q->info.si_signo;
1980 	struct task_struct *t;
1981 	unsigned long flags;
1982 	int result;
1983 
1984 	guard(rcu)();
1985 
1986 	t = posixtimer_get_target(tmr);
1987 	if (!t)
1988 		return;
1989 
1990 	if (!likely(lock_task_sighand(t, &flags)))
1991 		return;
1992 
1993 	/*
1994 	 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1995 	 * locked to prevent a race against dequeue_signal().
1996 	 */
1997 	tmr->it_sigqueue_seq = tmr->it_signal_seq;
1998 
1999 	/*
2000 	 * Set the signal delivery status under sighand lock, so that the
2001 	 * ignored signal handling can distinguish between a periodic and a
2002 	 * non-periodic timer.
2003 	 */
2004 	tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2005 
2006 	if (!prepare_signal(sig, t, false)) {
2007 		result = TRACE_SIGNAL_IGNORED;
2008 
2009 		if (!list_empty(&q->list)) {
2010 			/*
2011 			 * The signal was ignored and blocked. The timer
2012 			 * expiry queued it because blocked signals are
2013 			 * queued independent of the ignored state.
2014 			 *
2015 			 * The unblocking set SIGPENDING, but the signal
2016 			 * was not yet dequeued from the pending list.
2017 			 * So prepare_signal() sees unblocked and ignored,
2018 			 * which ends up here. Leave it queued like a
2019 			 * regular signal.
2020 			 *
2021 			 * The same happens when the task group is exiting
2022 			 * and the signal is already queued.
2023 			 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2024 			 * ignored independent of its queued state. This
2025 			 * gets cleaned up in __exit_signal().
2026 			 */
2027 			goto out;
2028 		}
2029 
2030 		/* Periodic timers with SIG_IGN are queued on the ignored list */
2031 		if (tmr->it_sig_periodic) {
2032 			/*
2033 			 * Already queued means the timer was rearmed after
2034 			 * the previous expiry got it on the ignore list.
2035 			 * Nothing to do for that case.
2036 			 */
2037 			if (hlist_unhashed(&tmr->ignored_list)) {
2038 				/*
2039 				 * Take a signal reference and queue it on
2040 				 * the ignored list.
2041 				 */
2042 				posixtimer_sigqueue_getref(q);
2043 				posixtimer_sig_ignore(t, q);
2044 			}
2045 		} else if (!hlist_unhashed(&tmr->ignored_list)) {
2046 			/*
2047 			 * Covers the case where a timer was periodic and
2048 			 * then the signal was ignored. Later it was rearmed
2049 			 * as oneshot timer. The previous signal is invalid
2050 			 * now, and this oneshot signal has to be dropped.
2051 			 * Remove it from the ignored list and drop the
2052 			 * reference count as the signal is not longer
2053 			 * queued.
2054 			 */
2055 			hlist_del_init(&tmr->ignored_list);
2056 			posixtimer_putref(tmr);
2057 		}
2058 		goto out;
2059 	}
2060 
2061 	if (unlikely(!list_empty(&q->list))) {
2062 		/* This holds a reference count already */
2063 		result = TRACE_SIGNAL_ALREADY_PENDING;
2064 		goto out;
2065 	}
2066 
2067 	/*
2068 	 * If the signal is on the ignore list, it got blocked after it was
2069 	 * ignored earlier. But nothing lifted the ignore. Move it back to
2070 	 * the pending list to be consistent with the regular signal
2071 	 * handling. This already holds a reference count.
2072 	 *
2073 	 * If it's not on the ignore list acquire a reference count.
2074 	 */
2075 	if (likely(hlist_unhashed(&tmr->ignored_list)))
2076 		posixtimer_sigqueue_getref(q);
2077 	else
2078 		hlist_del_init(&tmr->ignored_list);
2079 
2080 	posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2081 	result = TRACE_SIGNAL_DELIVERED;
2082 out:
2083 	trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2084 	unlock_task_sighand(t, &flags);
2085 }
2086 
2087 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2088 {
2089 	struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2090 
2091 	/*
2092 	 * If the timer is marked deleted already or the signal originates
2093 	 * from a non-periodic timer, then just drop the reference
2094 	 * count. Otherwise queue it on the ignored list.
2095 	 */
2096 	if (posixtimer_valid(tmr) && tmr->it_sig_periodic)
2097 		hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2098 	else
2099 		posixtimer_putref(tmr);
2100 }
2101 
2102 static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2103 {
2104 	struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2105 	struct hlist_node *tmp;
2106 	struct k_itimer *tmr;
2107 
2108 	if (likely(hlist_empty(head)))
2109 		return;
2110 
2111 	/*
2112 	 * Rearming a timer with sighand lock held is not possible due to
2113 	 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2114 	 * let the signal delivery path deal with it whether it needs to be
2115 	 * rearmed or not. This cannot be decided here w/o dropping sighand
2116 	 * lock and creating a loop retry horror show.
2117 	 */
2118 	hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2119 		struct task_struct *target;
2120 
2121 		/*
2122 		 * tmr::sigq.info.si_signo is immutable, so accessing it
2123 		 * without holding tmr::it_lock is safe.
2124 		 */
2125 		if (tmr->sigq.info.si_signo != sig)
2126 			continue;
2127 
2128 		hlist_del_init(&tmr->ignored_list);
2129 
2130 		/* This should never happen and leaks a reference count */
2131 		if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2132 			continue;
2133 
2134 		/*
2135 		 * Get the target for the signal. If target is a thread and
2136 		 * has exited by now, drop the reference count.
2137 		 */
2138 		guard(rcu)();
2139 		target = posixtimer_get_target(tmr);
2140 		if (target)
2141 			posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2142 		else
2143 			posixtimer_putref(tmr);
2144 	}
2145 }
2146 #else /* CONFIG_POSIX_TIMERS */
2147 static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
2148 static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2149 #endif /* !CONFIG_POSIX_TIMERS */
2150 
2151 void do_notify_pidfd(struct task_struct *task)
2152 {
2153 	struct pid *pid = task_pid(task);
2154 
2155 	WARN_ON(task->exit_state == 0);
2156 
2157 	__wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2158 			poll_to_key(EPOLLIN | EPOLLRDNORM));
2159 }
2160 
2161 /*
2162  * Let a parent know about the death of a child.
2163  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2164  *
2165  * Returns true if our parent ignored us and so we've switched to
2166  * self-reaping.
2167  */
2168 bool do_notify_parent(struct task_struct *tsk, int sig)
2169 {
2170 	struct kernel_siginfo info;
2171 	unsigned long flags;
2172 	struct sighand_struct *psig;
2173 	bool autoreap = false;
2174 	u64 utime, stime;
2175 
2176 	WARN_ON_ONCE(sig == -1);
2177 
2178 	/* do_notify_parent_cldstop should have been called instead.  */
2179 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2180 
2181 	WARN_ON_ONCE(!tsk->ptrace &&
2182 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2183 
2184 	/* ptraced, or group-leader without sub-threads */
2185 	do_notify_pidfd(tsk);
2186 
2187 	if (sig != SIGCHLD) {
2188 		/*
2189 		 * This is only possible if parent == real_parent.
2190 		 * Check if it has changed security domain.
2191 		 */
2192 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2193 			sig = SIGCHLD;
2194 	}
2195 
2196 	clear_siginfo(&info);
2197 	info.si_signo = sig;
2198 	info.si_errno = 0;
2199 	/*
2200 	 * We are under tasklist_lock here so our parent is tied to
2201 	 * us and cannot change.
2202 	 *
2203 	 * task_active_pid_ns will always return the same pid namespace
2204 	 * until a task passes through release_task.
2205 	 *
2206 	 * write_lock() currently calls preempt_disable() which is the
2207 	 * same as rcu_read_lock(), but according to Oleg, this is not
2208 	 * correct to rely on this
2209 	 */
2210 	rcu_read_lock();
2211 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2212 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2213 				       task_uid(tsk));
2214 	rcu_read_unlock();
2215 
2216 	task_cputime(tsk, &utime, &stime);
2217 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2218 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2219 
2220 	info.si_status = tsk->exit_code & 0x7f;
2221 	if (tsk->exit_code & 0x80)
2222 		info.si_code = CLD_DUMPED;
2223 	else if (tsk->exit_code & 0x7f)
2224 		info.si_code = CLD_KILLED;
2225 	else {
2226 		info.si_code = CLD_EXITED;
2227 		info.si_status = tsk->exit_code >> 8;
2228 	}
2229 
2230 	psig = tsk->parent->sighand;
2231 	spin_lock_irqsave(&psig->siglock, flags);
2232 	if (!tsk->ptrace && sig == SIGCHLD &&
2233 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2234 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2235 		/*
2236 		 * We are exiting and our parent doesn't care.  POSIX.1
2237 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2238 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2239 		 * automatically and not left for our parent's wait4 call.
2240 		 * Rather than having the parent do it as a magic kind of
2241 		 * signal handler, we just set this to tell do_exit that we
2242 		 * can be cleaned up without becoming a zombie.  Note that
2243 		 * we still call __wake_up_parent in this case, because a
2244 		 * blocked sys_wait4 might now return -ECHILD.
2245 		 *
2246 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2247 		 * is implementation-defined: we do (if you don't want
2248 		 * it, just use SIG_IGN instead).
2249 		 */
2250 		autoreap = true;
2251 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2252 			sig = 0;
2253 	}
2254 	if (!tsk->ptrace && tsk->signal->autoreap) {
2255 		autoreap = true;
2256 		sig = 0;
2257 	}
2258 	/*
2259 	 * Send with __send_signal as si_pid and si_uid are in the
2260 	 * parent's namespaces.
2261 	 */
2262 	if (valid_signal(sig) && sig)
2263 		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2264 	__wake_up_parent(tsk, tsk->parent);
2265 	spin_unlock_irqrestore(&psig->siglock, flags);
2266 
2267 	return autoreap;
2268 }
2269 
2270 /**
2271  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2272  * @tsk: task reporting the state change
2273  * @for_ptracer: the notification is for ptracer
2274  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2275  *
2276  * Notify @tsk's parent that the stopped/continued state has changed.  If
2277  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2278  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2279  *
2280  * CONTEXT:
2281  * Must be called with tasklist_lock at least read locked.
2282  */
2283 static void do_notify_parent_cldstop(struct task_struct *tsk,
2284 				     bool for_ptracer, int why)
2285 {
2286 	struct kernel_siginfo info;
2287 	unsigned long flags;
2288 	struct task_struct *parent;
2289 	struct sighand_struct *sighand;
2290 	u64 utime, stime;
2291 
2292 	if (for_ptracer) {
2293 		parent = tsk->parent;
2294 	} else {
2295 		tsk = tsk->group_leader;
2296 		parent = tsk->real_parent;
2297 	}
2298 
2299 	clear_siginfo(&info);
2300 	info.si_signo = SIGCHLD;
2301 	info.si_errno = 0;
2302 	/*
2303 	 * see comment in do_notify_parent() about the following 4 lines
2304 	 */
2305 	rcu_read_lock();
2306 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2307 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2308 	rcu_read_unlock();
2309 
2310 	task_cputime(tsk, &utime, &stime);
2311 	info.si_utime = nsec_to_clock_t(utime);
2312 	info.si_stime = nsec_to_clock_t(stime);
2313 
2314  	info.si_code = why;
2315  	switch (why) {
2316  	case CLD_CONTINUED:
2317  		info.si_status = SIGCONT;
2318  		break;
2319  	case CLD_STOPPED:
2320  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2321  		break;
2322  	case CLD_TRAPPED:
2323  		info.si_status = tsk->exit_code & 0x7f;
2324  		break;
2325  	default:
2326  		BUG();
2327  	}
2328 
2329 	sighand = parent->sighand;
2330 	spin_lock_irqsave(&sighand->siglock, flags);
2331 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2332 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2333 		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2334 	/*
2335 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2336 	 */
2337 	__wake_up_parent(tsk, parent);
2338 	spin_unlock_irqrestore(&sighand->siglock, flags);
2339 }
2340 
2341 /*
2342  * This must be called with current->sighand->siglock held.
2343  *
2344  * This should be the path for all ptrace stops.
2345  * We always set current->last_siginfo while stopped here.
2346  * That makes it a way to test a stopped process for
2347  * being ptrace-stopped vs being job-control-stopped.
2348  *
2349  * Returns the signal the ptracer requested the code resume
2350  * with.  If the code did not stop because the tracer is gone,
2351  * the stop signal remains unchanged unless clear_code.
2352  */
2353 static int ptrace_stop(int exit_code, int why, unsigned long message,
2354 		       kernel_siginfo_t *info)
2355 	__releases(&current->sighand->siglock)
2356 	__acquires(&current->sighand->siglock)
2357 {
2358 	bool gstop_done = false;
2359 
2360 	if (arch_ptrace_stop_needed()) {
2361 		/*
2362 		 * The arch code has something special to do before a
2363 		 * ptrace stop.  This is allowed to block, e.g. for faults
2364 		 * on user stack pages.  We can't keep the siglock while
2365 		 * calling arch_ptrace_stop, so we must release it now.
2366 		 * To preserve proper semantics, we must do this before
2367 		 * any signal bookkeeping like checking group_stop_count.
2368 		 */
2369 		spin_unlock_irq(&current->sighand->siglock);
2370 		arch_ptrace_stop();
2371 		spin_lock_irq(&current->sighand->siglock);
2372 	}
2373 
2374 	/*
2375 	 * After this point ptrace_signal_wake_up or signal_wake_up
2376 	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2377 	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2378 	 * signals here to prevent ptrace_stop sleeping in schedule.
2379 	 */
2380 	if (!current->ptrace || __fatal_signal_pending(current))
2381 		return exit_code;
2382 
2383 	set_special_state(TASK_TRACED);
2384 	current->jobctl |= JOBCTL_TRACED;
2385 
2386 	/*
2387 	 * We're committing to trapping.  TRACED should be visible before
2388 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2389 	 * Also, transition to TRACED and updates to ->jobctl should be
2390 	 * atomic with respect to siglock and should be done after the arch
2391 	 * hook as siglock is released and regrabbed across it.
2392 	 *
2393 	 *     TRACER				    TRACEE
2394 	 *
2395 	 *     ptrace_attach()
2396 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2397 	 *     do_wait()
2398 	 *       set_current_state()                smp_wmb();
2399 	 *       ptrace_do_wait()
2400 	 *         wait_task_stopped()
2401 	 *           task_stopped_code()
2402 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2403 	 */
2404 	smp_wmb();
2405 
2406 	current->ptrace_message = message;
2407 	current->last_siginfo = info;
2408 	current->exit_code = exit_code;
2409 
2410 	/*
2411 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2412 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2413 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2414 	 * could be clear now.  We act as if SIGCONT is received after
2415 	 * TASK_TRACED is entered - ignore it.
2416 	 */
2417 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2418 		gstop_done = task_participate_group_stop(current);
2419 
2420 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2421 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2422 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2423 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2424 
2425 	/* entering a trap, clear TRAPPING */
2426 	task_clear_jobctl_trapping(current);
2427 
2428 	spin_unlock_irq(&current->sighand->siglock);
2429 	read_lock(&tasklist_lock);
2430 	/*
2431 	 * Notify parents of the stop.
2432 	 *
2433 	 * While ptraced, there are two parents - the ptracer and
2434 	 * the real_parent of the group_leader.  The ptracer should
2435 	 * know about every stop while the real parent is only
2436 	 * interested in the completion of group stop.  The states
2437 	 * for the two don't interact with each other.  Notify
2438 	 * separately unless they're gonna be duplicates.
2439 	 */
2440 	if (current->ptrace)
2441 		do_notify_parent_cldstop(current, true, why);
2442 	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2443 		do_notify_parent_cldstop(current, false, why);
2444 
2445 	/*
2446 	 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2447 	 * One a PREEMPTION kernel this can result in preemption requirement
2448 	 * which will be fulfilled after read_unlock() and the ptracer will be
2449 	 * put on the CPU.
2450 	 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2451 	 * this task wait in schedule(). If this task gets preempted then it
2452 	 * remains enqueued on the runqueue. The ptracer will observe this and
2453 	 * then sleep for a delay of one HZ tick. In the meantime this task
2454 	 * gets scheduled, enters schedule() and will wait for the ptracer.
2455 	 *
2456 	 * This preemption point is not bad from a correctness point of
2457 	 * view but extends the runtime by one HZ tick time due to the
2458 	 * ptracer's sleep.  The preempt-disable section ensures that there
2459 	 * will be no preemption between unlock and schedule() and so
2460 	 * improving the performance since the ptracer will observe that
2461 	 * the tracee is scheduled out once it gets on the CPU.
2462 	 *
2463 	 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2464 	 * Therefore the task can be preempted after do_notify_parent_cldstop()
2465 	 * before unlocking tasklist_lock so there is no benefit in doing this.
2466 	 *
2467 	 * In fact disabling preemption is harmful on PREEMPT_RT because
2468 	 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2469 	 * with preemption disabled due to the 'sleeping' spinlock
2470 	 * substitution of RT.
2471 	 */
2472 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2473 		preempt_disable();
2474 	read_unlock(&tasklist_lock);
2475 	cgroup_enter_frozen();
2476 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2477 		preempt_enable_no_resched();
2478 	schedule();
2479 	cgroup_leave_frozen(true);
2480 
2481 	/*
2482 	 * We are back.  Now reacquire the siglock before touching
2483 	 * last_siginfo, so that we are sure to have synchronized with
2484 	 * any signal-sending on another CPU that wants to examine it.
2485 	 */
2486 	spin_lock_irq(&current->sighand->siglock);
2487 	exit_code = current->exit_code;
2488 	current->last_siginfo = NULL;
2489 	current->ptrace_message = 0;
2490 	current->exit_code = 0;
2491 
2492 	/* LISTENING can be set only during STOP traps, clear it */
2493 	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2494 
2495 	/*
2496 	 * Queued signals ignored us while we were stopped for tracing.
2497 	 * So check for any that we should take before resuming user mode.
2498 	 * This sets TIF_SIGPENDING, but never clears it.
2499 	 */
2500 	recalc_sigpending_tsk(current);
2501 	return exit_code;
2502 }
2503 
2504 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2505 {
2506 	kernel_siginfo_t info;
2507 
2508 	clear_siginfo(&info);
2509 	info.si_signo = signr;
2510 	info.si_code = exit_code;
2511 	info.si_pid = task_pid_vnr(current);
2512 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2513 
2514 	/* Let the debugger run.  */
2515 	return ptrace_stop(exit_code, why, message, &info);
2516 }
2517 
2518 int ptrace_notify(int exit_code, unsigned long message)
2519 {
2520 	int signr;
2521 
2522 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2523 	if (unlikely(task_work_pending(current)))
2524 		task_work_run();
2525 
2526 	spin_lock_irq(&current->sighand->siglock);
2527 	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2528 	spin_unlock_irq(&current->sighand->siglock);
2529 	return signr;
2530 }
2531 
2532 /**
2533  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2534  * @signr: signr causing group stop if initiating
2535  *
2536  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2537  * and participate in it.  If already set, participate in the existing
2538  * group stop.  If participated in a group stop (and thus slept), %true is
2539  * returned with siglock released.
2540  *
2541  * If ptraced, this function doesn't handle stop itself.  Instead,
2542  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2543  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2544  * places afterwards.
2545  *
2546  * CONTEXT:
2547  * Must be called with @current->sighand->siglock held, which is released
2548  * on %true return.
2549  *
2550  * RETURNS:
2551  * %false if group stop is already cancelled or ptrace trap is scheduled.
2552  * %true if participated in group stop.
2553  */
2554 static bool do_signal_stop(int signr)
2555 	__releases(&current->sighand->siglock)
2556 {
2557 	struct signal_struct *sig = current->signal;
2558 
2559 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2560 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2561 		struct task_struct *t;
2562 
2563 		/* signr will be recorded in task->jobctl for retries */
2564 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2565 
2566 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2567 		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2568 		    unlikely(sig->group_exec_task))
2569 			return false;
2570 		/*
2571 		 * There is no group stop already in progress.  We must
2572 		 * initiate one now.
2573 		 *
2574 		 * While ptraced, a task may be resumed while group stop is
2575 		 * still in effect and then receive a stop signal and
2576 		 * initiate another group stop.  This deviates from the
2577 		 * usual behavior as two consecutive stop signals can't
2578 		 * cause two group stops when !ptraced.  That is why we
2579 		 * also check !task_is_stopped(t) below.
2580 		 *
2581 		 * The condition can be distinguished by testing whether
2582 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2583 		 * group_exit_code in such case.
2584 		 *
2585 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2586 		 * an intervening stop signal is required to cause two
2587 		 * continued events regardless of ptrace.
2588 		 */
2589 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2590 			sig->group_exit_code = signr;
2591 
2592 		sig->group_stop_count = 0;
2593 		if (task_set_jobctl_pending(current, signr | gstop))
2594 			sig->group_stop_count++;
2595 
2596 		for_other_threads(current, t) {
2597 			/*
2598 			 * Setting state to TASK_STOPPED for a group
2599 			 * stop is always done with the siglock held,
2600 			 * so this check has no races.
2601 			 */
2602 			if (!task_is_stopped(t) &&
2603 			    task_set_jobctl_pending(t, signr | gstop)) {
2604 				sig->group_stop_count++;
2605 				if (likely(!(t->ptrace & PT_SEIZED)))
2606 					signal_wake_up(t, 0);
2607 				else
2608 					ptrace_trap_notify(t);
2609 			}
2610 		}
2611 	}
2612 
2613 	if (likely(!current->ptrace)) {
2614 		int notify = 0;
2615 
2616 		/*
2617 		 * If there are no other threads in the group, or if there
2618 		 * is a group stop in progress and we are the last to stop,
2619 		 * report to the parent.
2620 		 */
2621 		if (task_participate_group_stop(current))
2622 			notify = CLD_STOPPED;
2623 
2624 		current->jobctl |= JOBCTL_STOPPED;
2625 		set_special_state(TASK_STOPPED);
2626 		spin_unlock_irq(&current->sighand->siglock);
2627 
2628 		/*
2629 		 * Notify the parent of the group stop completion.  Because
2630 		 * we're not holding either the siglock or tasklist_lock
2631 		 * here, ptracer may attach inbetween; however, this is for
2632 		 * group stop and should always be delivered to the real
2633 		 * parent of the group leader.  The new ptracer will get
2634 		 * its notification when this task transitions into
2635 		 * TASK_TRACED.
2636 		 */
2637 		if (notify) {
2638 			read_lock(&tasklist_lock);
2639 			do_notify_parent_cldstop(current, false, notify);
2640 			read_unlock(&tasklist_lock);
2641 		}
2642 
2643 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2644 		cgroup_enter_frozen();
2645 		schedule();
2646 		return true;
2647 	} else {
2648 		/*
2649 		 * While ptraced, group stop is handled by STOP trap.
2650 		 * Schedule it and let the caller deal with it.
2651 		 */
2652 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2653 		return false;
2654 	}
2655 }
2656 
2657 /**
2658  * do_jobctl_trap - take care of ptrace jobctl traps
2659  *
2660  * When PT_SEIZED, it's used for both group stop and explicit
2661  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2662  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2663  * the stop signal; otherwise, %SIGTRAP.
2664  *
2665  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2666  * number as exit_code and no siginfo.
2667  *
2668  * CONTEXT:
2669  * Must be called with @current->sighand->siglock held, which may be
2670  * released and re-acquired before returning with intervening sleep.
2671  */
2672 static void do_jobctl_trap(void)
2673 {
2674 	struct signal_struct *signal = current->signal;
2675 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2676 
2677 	if (current->ptrace & PT_SEIZED) {
2678 		if (!signal->group_stop_count &&
2679 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2680 			signr = SIGTRAP;
2681 		WARN_ON_ONCE(!signr);
2682 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2683 				 CLD_STOPPED, 0);
2684 	} else {
2685 		WARN_ON_ONCE(!signr);
2686 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2687 	}
2688 }
2689 
2690 /**
2691  * do_freezer_trap - handle the freezer jobctl trap
2692  *
2693  * Puts the task into frozen state, if only the task is not about to quit.
2694  * In this case it drops JOBCTL_TRAP_FREEZE.
2695  *
2696  * CONTEXT:
2697  * Must be called with @current->sighand->siglock held,
2698  * which is always released before returning.
2699  */
2700 static void do_freezer_trap(void)
2701 	__releases(&current->sighand->siglock)
2702 {
2703 	/*
2704 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2705 	 * let's make another loop to give it a chance to be handled.
2706 	 * In any case, we'll return back.
2707 	 */
2708 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2709 	     JOBCTL_TRAP_FREEZE) {
2710 		spin_unlock_irq(&current->sighand->siglock);
2711 		return;
2712 	}
2713 
2714 	/*
2715 	 * Now we're sure that there is no pending fatal signal and no
2716 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2717 	 * immediately (if there is a non-fatal signal pending), and
2718 	 * put the task into sleep.
2719 	 */
2720 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2721 	clear_thread_flag(TIF_SIGPENDING);
2722 	spin_unlock_irq(&current->sighand->siglock);
2723 	cgroup_enter_frozen();
2724 	schedule();
2725 
2726 	/*
2727 	 * We could've been woken by task_work, run it to clear
2728 	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2729 	 */
2730 	clear_notify_signal();
2731 	if (unlikely(task_work_pending(current)))
2732 		task_work_run();
2733 }
2734 
2735 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2736 {
2737 	/*
2738 	 * We do not check sig_kernel_stop(signr) but set this marker
2739 	 * unconditionally because we do not know whether debugger will
2740 	 * change signr. This flag has no meaning unless we are going
2741 	 * to stop after return from ptrace_stop(). In this case it will
2742 	 * be checked in do_signal_stop(), we should only stop if it was
2743 	 * not cleared by SIGCONT while we were sleeping. See also the
2744 	 * comment in dequeue_signal().
2745 	 */
2746 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2747 	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2748 
2749 	/* We're back.  Did the debugger cancel the sig?  */
2750 	if (signr == 0)
2751 		return signr;
2752 
2753 	/*
2754 	 * Update the siginfo structure if the signal has
2755 	 * changed.  If the debugger wanted something
2756 	 * specific in the siginfo structure then it should
2757 	 * have updated *info via PTRACE_SETSIGINFO.
2758 	 */
2759 	if (signr != info->si_signo) {
2760 		clear_siginfo(info);
2761 		info->si_signo = signr;
2762 		info->si_errno = 0;
2763 		info->si_code = SI_USER;
2764 		rcu_read_lock();
2765 		info->si_pid = task_pid_vnr(current->parent);
2766 		info->si_uid = from_kuid_munged(current_user_ns(),
2767 						task_uid(current->parent));
2768 		rcu_read_unlock();
2769 	}
2770 
2771 	/* If the (new) signal is now blocked, requeue it.  */
2772 	if (sigismember(&current->blocked, signr) ||
2773 	    fatal_signal_pending(current)) {
2774 		send_signal_locked(signr, info, current, type);
2775 		signr = 0;
2776 	}
2777 
2778 	return signr;
2779 }
2780 
2781 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2782 {
2783 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2784 	case SIL_FAULT:
2785 	case SIL_FAULT_TRAPNO:
2786 	case SIL_FAULT_MCEERR:
2787 	case SIL_FAULT_BNDERR:
2788 	case SIL_FAULT_PKUERR:
2789 	case SIL_FAULT_PERF_EVENT:
2790 		ksig->info.si_addr = arch_untagged_si_addr(
2791 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2792 		break;
2793 	case SIL_KILL:
2794 	case SIL_TIMER:
2795 	case SIL_POLL:
2796 	case SIL_CHLD:
2797 	case SIL_RT:
2798 	case SIL_SYS:
2799 		break;
2800 	}
2801 }
2802 
2803 bool get_signal(struct ksignal *ksig)
2804 {
2805 	struct sighand_struct *sighand = current->sighand;
2806 	struct signal_struct *signal = current->signal;
2807 	int signr;
2808 
2809 	clear_notify_signal();
2810 	if (unlikely(task_work_pending(current)))
2811 		task_work_run();
2812 
2813 	if (!task_sigpending(current))
2814 		return false;
2815 
2816 	if (unlikely(uprobe_deny_signal()))
2817 		return false;
2818 
2819 	/*
2820 	 * Do this once, we can't return to user-mode if freezing() == T.
2821 	 * do_signal_stop() and ptrace_stop() set TASK_STOPPED/TASK_TRACED
2822 	 * and the freezer handles those states via TASK_FROZEN, thus they
2823 	 * do not need another check after return.
2824 	 */
2825 	try_to_freeze();
2826 
2827 relock:
2828 	spin_lock_irq(&sighand->siglock);
2829 
2830 	/*
2831 	 * Every stopped thread goes here after wakeup. Check to see if
2832 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2833 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2834 	 */
2835 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2836 		int why;
2837 
2838 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2839 			why = CLD_CONTINUED;
2840 		else
2841 			why = CLD_STOPPED;
2842 
2843 		signal->flags &= ~SIGNAL_CLD_MASK;
2844 
2845 		spin_unlock_irq(&sighand->siglock);
2846 
2847 		/*
2848 		 * Notify the parent that we're continuing.  This event is
2849 		 * always per-process and doesn't make whole lot of sense
2850 		 * for ptracers, who shouldn't consume the state via
2851 		 * wait(2) either, but, for backward compatibility, notify
2852 		 * the ptracer of the group leader too unless it's gonna be
2853 		 * a duplicate.
2854 		 */
2855 		read_lock(&tasklist_lock);
2856 		do_notify_parent_cldstop(current, false, why);
2857 
2858 		if (ptrace_reparented(current->group_leader))
2859 			do_notify_parent_cldstop(current->group_leader,
2860 						true, why);
2861 		read_unlock(&tasklist_lock);
2862 
2863 		goto relock;
2864 	}
2865 
2866 	for (;;) {
2867 		struct k_sigaction *ka;
2868 		enum pid_type type;
2869 
2870 		/* Has this task already been marked for death? */
2871 		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2872 		     signal->group_exec_task) {
2873 			signr = SIGKILL;
2874 			sigdelset(&current->pending.signal, SIGKILL);
2875 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2876 					     &sighand->action[SIGKILL-1]);
2877 			recalc_sigpending();
2878 			/*
2879 			 * implies do_group_exit() or return to PF_USER_WORKER,
2880 			 * no need to initialize ksig->info/etc.
2881 			 */
2882 			goto fatal;
2883 		}
2884 
2885 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2886 		    do_signal_stop(0))
2887 			goto relock;
2888 
2889 		if (unlikely(current->jobctl &
2890 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2891 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2892 				do_jobctl_trap();
2893 				spin_unlock_irq(&sighand->siglock);
2894 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2895 				do_freezer_trap();
2896 
2897 			goto relock;
2898 		}
2899 
2900 		/*
2901 		 * If the task is leaving the frozen state, let's update
2902 		 * cgroup counters and reset the frozen bit.
2903 		 */
2904 		if (unlikely(cgroup_task_frozen(current))) {
2905 			spin_unlock_irq(&sighand->siglock);
2906 			cgroup_leave_frozen(false);
2907 			goto relock;
2908 		}
2909 
2910 		/*
2911 		 * Signals generated by the execution of an instruction
2912 		 * need to be delivered before any other pending signals
2913 		 * so that the instruction pointer in the signal stack
2914 		 * frame points to the faulting instruction.
2915 		 */
2916 		type = PIDTYPE_PID;
2917 		signr = dequeue_synchronous_signal(&ksig->info);
2918 		if (!signr)
2919 			signr = dequeue_signal(&current->blocked, &ksig->info, &type);
2920 
2921 		if (!signr)
2922 			break; /* will return 0 */
2923 
2924 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2925 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2926 			signr = ptrace_signal(signr, &ksig->info, type);
2927 			if (!signr)
2928 				continue;
2929 		}
2930 
2931 		ka = &sighand->action[signr-1];
2932 
2933 		/* Trace actually delivered signals. */
2934 		trace_signal_deliver(signr, &ksig->info, ka);
2935 
2936 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2937 			continue;
2938 		if (ka->sa.sa_handler != SIG_DFL) {
2939 			/* Run the handler.  */
2940 			ksig->ka = *ka;
2941 
2942 			if (ka->sa.sa_flags & SA_ONESHOT)
2943 				ka->sa.sa_handler = SIG_DFL;
2944 
2945 			break; /* will return non-zero "signr" value */
2946 		}
2947 
2948 		/*
2949 		 * Now we are doing the default action for this signal.
2950 		 */
2951 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2952 			continue;
2953 
2954 		/*
2955 		 * Global init gets no signals it doesn't want.
2956 		 * Container-init gets no signals it doesn't want from same
2957 		 * container.
2958 		 *
2959 		 * Note that if global/container-init sees a sig_kernel_only()
2960 		 * signal here, the signal must have been generated internally
2961 		 * or must have come from an ancestor namespace. In either
2962 		 * case, the signal cannot be dropped.
2963 		 */
2964 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2965 				!sig_kernel_only(signr))
2966 			continue;
2967 
2968 		if (sig_kernel_stop(signr)) {
2969 			/*
2970 			 * The default action is to stop all threads in
2971 			 * the thread group.  The job control signals
2972 			 * do nothing in an orphaned pgrp, but SIGSTOP
2973 			 * always works.  Note that siglock needs to be
2974 			 * dropped during the call to is_orphaned_pgrp()
2975 			 * because of lock ordering with tasklist_lock.
2976 			 * This allows an intervening SIGCONT to be posted.
2977 			 * We need to check for that and bail out if necessary.
2978 			 */
2979 			if (signr != SIGSTOP) {
2980 				spin_unlock_irq(&sighand->siglock);
2981 
2982 				/* signals can be posted during this window */
2983 
2984 				if (is_current_pgrp_orphaned())
2985 					goto relock;
2986 
2987 				spin_lock_irq(&sighand->siglock);
2988 			}
2989 
2990 			if (likely(do_signal_stop(signr))) {
2991 				/* It released the siglock.  */
2992 				goto relock;
2993 			}
2994 
2995 			/*
2996 			 * We didn't actually stop, due to a race
2997 			 * with SIGCONT or something like that.
2998 			 */
2999 			continue;
3000 		}
3001 
3002 	fatal:
3003 		spin_unlock_irq(&sighand->siglock);
3004 		if (unlikely(cgroup_task_frozen(current)))
3005 			cgroup_leave_frozen(true);
3006 
3007 		/*
3008 		 * Anything else is fatal, maybe with a core dump.
3009 		 */
3010 		current->flags |= PF_SIGNALED;
3011 
3012 		if (sig_kernel_coredump(signr)) {
3013 			if (print_fatal_signals)
3014 				print_fatal_signal(signr);
3015 			proc_coredump_connector(current);
3016 			/*
3017 			 * If it was able to dump core, this kills all
3018 			 * other threads in the group and synchronizes with
3019 			 * their demise.  If we lost the race with another
3020 			 * thread getting here, it set group_exit_code
3021 			 * first and our do_group_exit call below will use
3022 			 * that value and ignore the one we pass it.
3023 			 */
3024 			vfs_coredump(&ksig->info);
3025 		}
3026 
3027 		/*
3028 		 * PF_USER_WORKER threads will catch and exit on fatal signals
3029 		 * themselves. They have cleanup that must be performed, so we
3030 		 * cannot call do_exit() on their behalf. Note that ksig won't
3031 		 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3032 		 */
3033 		if (current->flags & PF_USER_WORKER)
3034 			goto out;
3035 
3036 		/*
3037 		 * Death signals, no core dump.
3038 		 */
3039 		do_group_exit(signr);
3040 		/* NOTREACHED */
3041 	}
3042 	spin_unlock_irq(&sighand->siglock);
3043 
3044 	ksig->sig = signr;
3045 
3046 	if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3047 		hide_si_addr_tag_bits(ksig);
3048 out:
3049 	return signr > 0;
3050 }
3051 
3052 /**
3053  * signal_delivered - called after signal delivery to update blocked signals
3054  * @ksig:		kernel signal struct
3055  * @stepping:		nonzero if debugger single-step or block-step in use
3056  *
3057  * This function should be called when a signal has successfully been
3058  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3059  * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3060  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
3061  */
3062 static void signal_delivered(struct ksignal *ksig, int stepping)
3063 {
3064 	sigset_t blocked;
3065 
3066 	/* A signal was successfully delivered, and the
3067 	   saved sigmask was stored on the signal frame,
3068 	   and will be restored by sigreturn.  So we can
3069 	   simply clear the restore sigmask flag.  */
3070 	clear_restore_sigmask();
3071 
3072 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
3073 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3074 		sigaddset(&blocked, ksig->sig);
3075 	set_current_blocked(&blocked);
3076 	if (current->sas_ss_flags & SS_AUTODISARM)
3077 		sas_ss_reset(current);
3078 	if (stepping)
3079 		ptrace_notify(SIGTRAP, 0);
3080 }
3081 
3082 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3083 {
3084 	if (failed)
3085 		force_sigsegv(ksig->sig);
3086 	else
3087 		signal_delivered(ksig, stepping);
3088 }
3089 
3090 /*
3091  * It could be that complete_signal() picked us to notify about the
3092  * group-wide signal. Other threads should be notified now to take
3093  * the shared signals in @which since we will not.
3094  */
3095 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3096 {
3097 	sigset_t retarget;
3098 	struct task_struct *t;
3099 
3100 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3101 	if (sigisemptyset(&retarget))
3102 		return;
3103 
3104 	for_other_threads(tsk, t) {
3105 		if (t->flags & PF_EXITING)
3106 			continue;
3107 
3108 		if (!has_pending_signals(&retarget, &t->blocked))
3109 			continue;
3110 		/* Remove the signals this thread can handle. */
3111 		sigandsets(&retarget, &retarget, &t->blocked);
3112 
3113 		if (!task_sigpending(t))
3114 			signal_wake_up(t, 0);
3115 
3116 		if (sigisemptyset(&retarget))
3117 			break;
3118 	}
3119 }
3120 
3121 void exit_signals(struct task_struct *tsk)
3122 {
3123 	int group_stop = 0;
3124 	sigset_t unblocked;
3125 
3126 	/*
3127 	 * @tsk is about to have PF_EXITING set - lock out users which
3128 	 * expect stable threadgroup.
3129 	 */
3130 	cgroup_threadgroup_change_begin(tsk);
3131 
3132 	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3133 		tsk->flags |= PF_EXITING;
3134 		cgroup_threadgroup_change_end(tsk);
3135 		return;
3136 	}
3137 
3138 	spin_lock_irq(&tsk->sighand->siglock);
3139 	/*
3140 	 * From now this task is not visible for group-wide signals,
3141 	 * see wants_signal(), do_signal_stop().
3142 	 */
3143 	tsk->flags |= PF_EXITING;
3144 
3145 	cgroup_threadgroup_change_end(tsk);
3146 
3147 	if (!task_sigpending(tsk))
3148 		goto out;
3149 
3150 	unblocked = tsk->blocked;
3151 	signotset(&unblocked);
3152 	retarget_shared_pending(tsk, &unblocked);
3153 
3154 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3155 	    task_participate_group_stop(tsk))
3156 		group_stop = CLD_STOPPED;
3157 out:
3158 	spin_unlock_irq(&tsk->sighand->siglock);
3159 
3160 	/*
3161 	 * If group stop has completed, deliver the notification.  This
3162 	 * should always go to the real parent of the group leader.
3163 	 */
3164 	if (unlikely(group_stop)) {
3165 		read_lock(&tasklist_lock);
3166 		do_notify_parent_cldstop(tsk, false, group_stop);
3167 		read_unlock(&tasklist_lock);
3168 	}
3169 }
3170 
3171 /*
3172  * System call entry points.
3173  */
3174 
3175 /**
3176  *  sys_restart_syscall - restart a system call
3177  */
3178 SYSCALL_DEFINE0(restart_syscall)
3179 {
3180 	struct restart_block *restart = &current->restart_block;
3181 	return restart->fn(restart);
3182 }
3183 
3184 long do_no_restart_syscall(struct restart_block *param)
3185 {
3186 	return -EINTR;
3187 }
3188 
3189 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3190 {
3191 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3192 		sigset_t newblocked;
3193 		/* A set of now blocked but previously unblocked signals. */
3194 		sigandnsets(&newblocked, newset, &current->blocked);
3195 		retarget_shared_pending(tsk, &newblocked);
3196 	}
3197 	tsk->blocked = *newset;
3198 	recalc_sigpending();
3199 }
3200 
3201 /**
3202  * set_current_blocked - change current->blocked mask
3203  * @newset: new mask
3204  *
3205  * It is wrong to change ->blocked directly, this helper should be used
3206  * to ensure the process can't miss a shared signal we are going to block.
3207  */
3208 void set_current_blocked(sigset_t *newset)
3209 {
3210 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3211 	__set_current_blocked(newset);
3212 }
3213 
3214 void __set_current_blocked(const sigset_t *newset)
3215 {
3216 	struct task_struct *tsk = current;
3217 
3218 	/*
3219 	 * In case the signal mask hasn't changed, there is nothing we need
3220 	 * to do. The current->blocked shouldn't be modified by other task.
3221 	 */
3222 	if (sigequalsets(&tsk->blocked, newset))
3223 		return;
3224 
3225 	spin_lock_irq(&tsk->sighand->siglock);
3226 	__set_task_blocked(tsk, newset);
3227 	spin_unlock_irq(&tsk->sighand->siglock);
3228 }
3229 
3230 /*
3231  * This is also useful for kernel threads that want to temporarily
3232  * (or permanently) block certain signals.
3233  *
3234  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3235  * interface happily blocks "unblockable" signals like SIGKILL
3236  * and friends.
3237  */
3238 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3239 {
3240 	struct task_struct *tsk = current;
3241 	sigset_t newset;
3242 
3243 	/* Lockless, only current can change ->blocked, never from irq */
3244 	if (oldset)
3245 		*oldset = tsk->blocked;
3246 
3247 	switch (how) {
3248 	case SIG_BLOCK:
3249 		sigorsets(&newset, &tsk->blocked, set);
3250 		break;
3251 	case SIG_UNBLOCK:
3252 		sigandnsets(&newset, &tsk->blocked, set);
3253 		break;
3254 	case SIG_SETMASK:
3255 		newset = *set;
3256 		break;
3257 	default:
3258 		return -EINVAL;
3259 	}
3260 
3261 	__set_current_blocked(&newset);
3262 	return 0;
3263 }
3264 EXPORT_SYMBOL(sigprocmask);
3265 
3266 /*
3267  * The api helps set app-provided sigmasks.
3268  *
3269  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3270  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3271  *
3272  * Note that it does set_restore_sigmask() in advance, so it must be always
3273  * paired with restore_saved_sigmask_unless() before return from syscall.
3274  */
3275 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3276 {
3277 	sigset_t kmask;
3278 
3279 	if (!umask)
3280 		return 0;
3281 	if (sigsetsize != sizeof(sigset_t))
3282 		return -EINVAL;
3283 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3284 		return -EFAULT;
3285 
3286 	set_restore_sigmask();
3287 	current->saved_sigmask = current->blocked;
3288 	set_current_blocked(&kmask);
3289 
3290 	return 0;
3291 }
3292 
3293 #ifdef CONFIG_COMPAT
3294 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3295 			    size_t sigsetsize)
3296 {
3297 	sigset_t kmask;
3298 
3299 	if (!umask)
3300 		return 0;
3301 	if (sigsetsize != sizeof(compat_sigset_t))
3302 		return -EINVAL;
3303 	if (get_compat_sigset(&kmask, umask))
3304 		return -EFAULT;
3305 
3306 	set_restore_sigmask();
3307 	current->saved_sigmask = current->blocked;
3308 	set_current_blocked(&kmask);
3309 
3310 	return 0;
3311 }
3312 #endif
3313 
3314 /**
3315  *  sys_rt_sigprocmask - change the list of currently blocked signals
3316  *  @how: whether to add, remove, or set signals
3317  *  @nset: stores pending signals
3318  *  @oset: previous value of signal mask if non-null
3319  *  @sigsetsize: size of sigset_t type
3320  */
3321 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3322 		sigset_t __user *, oset, size_t, sigsetsize)
3323 {
3324 	sigset_t old_set, new_set;
3325 	int error;
3326 
3327 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3328 	if (sigsetsize != sizeof(sigset_t))
3329 		return -EINVAL;
3330 
3331 	old_set = current->blocked;
3332 
3333 	if (nset) {
3334 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3335 			return -EFAULT;
3336 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3337 
3338 		error = sigprocmask(how, &new_set, NULL);
3339 		if (error)
3340 			return error;
3341 	}
3342 
3343 	if (oset) {
3344 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3345 			return -EFAULT;
3346 	}
3347 
3348 	return 0;
3349 }
3350 
3351 #ifdef CONFIG_COMPAT
3352 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3353 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3354 {
3355 	sigset_t old_set = current->blocked;
3356 
3357 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3358 	if (sigsetsize != sizeof(sigset_t))
3359 		return -EINVAL;
3360 
3361 	if (nset) {
3362 		sigset_t new_set;
3363 		int error;
3364 		if (get_compat_sigset(&new_set, nset))
3365 			return -EFAULT;
3366 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3367 
3368 		error = sigprocmask(how, &new_set, NULL);
3369 		if (error)
3370 			return error;
3371 	}
3372 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3373 }
3374 #endif
3375 
3376 static void do_sigpending(sigset_t *set)
3377 {
3378 	spin_lock_irq(&current->sighand->siglock);
3379 	sigorsets(set, &current->pending.signal,
3380 		  &current->signal->shared_pending.signal);
3381 	spin_unlock_irq(&current->sighand->siglock);
3382 
3383 	/* Outside the lock because only this thread touches it.  */
3384 	sigandsets(set, &current->blocked, set);
3385 }
3386 
3387 /**
3388  *  sys_rt_sigpending - examine a pending signal that has been raised
3389  *			while blocked
3390  *  @uset: stores pending signals
3391  *  @sigsetsize: size of sigset_t type or larger
3392  */
3393 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3394 {
3395 	sigset_t set;
3396 
3397 	if (sigsetsize > sizeof(*uset))
3398 		return -EINVAL;
3399 
3400 	do_sigpending(&set);
3401 
3402 	if (copy_to_user(uset, &set, sigsetsize))
3403 		return -EFAULT;
3404 
3405 	return 0;
3406 }
3407 
3408 #ifdef CONFIG_COMPAT
3409 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3410 		compat_size_t, sigsetsize)
3411 {
3412 	sigset_t set;
3413 
3414 	if (sigsetsize > sizeof(*uset))
3415 		return -EINVAL;
3416 
3417 	do_sigpending(&set);
3418 
3419 	return put_compat_sigset(uset, &set, sigsetsize);
3420 }
3421 #endif
3422 
3423 static const struct {
3424 	unsigned char limit, layout;
3425 } sig_sicodes[] = {
3426 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3427 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3428 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3429 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3430 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3431 #if defined(SIGEMT)
3432 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3433 #endif
3434 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3435 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3436 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3437 };
3438 
3439 static bool known_siginfo_layout(unsigned sig, int si_code)
3440 {
3441 	if (si_code == SI_KERNEL)
3442 		return true;
3443 	else if ((si_code > SI_USER)) {
3444 		if (sig_specific_sicodes(sig)) {
3445 			if (si_code <= sig_sicodes[sig].limit)
3446 				return true;
3447 		}
3448 		else if (si_code <= NSIGPOLL)
3449 			return true;
3450 	}
3451 	else if (si_code >= SI_DETHREAD)
3452 		return true;
3453 	else if (si_code == SI_ASYNCNL)
3454 		return true;
3455 	return false;
3456 }
3457 
3458 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3459 {
3460 	enum siginfo_layout layout = SIL_KILL;
3461 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3462 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3463 		    (si_code <= sig_sicodes[sig].limit)) {
3464 			layout = sig_sicodes[sig].layout;
3465 			/* Handle the exceptions */
3466 			if ((sig == SIGBUS) &&
3467 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3468 				layout = SIL_FAULT_MCEERR;
3469 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3470 				layout = SIL_FAULT_BNDERR;
3471 #ifdef SEGV_PKUERR
3472 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3473 				layout = SIL_FAULT_PKUERR;
3474 #endif
3475 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3476 				layout = SIL_FAULT_PERF_EVENT;
3477 			else if (IS_ENABLED(CONFIG_SPARC) &&
3478 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3479 				layout = SIL_FAULT_TRAPNO;
3480 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3481 				 ((sig == SIGFPE) ||
3482 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3483 				layout = SIL_FAULT_TRAPNO;
3484 		}
3485 		else if (si_code <= NSIGPOLL)
3486 			layout = SIL_POLL;
3487 	} else {
3488 		if (si_code == SI_TIMER)
3489 			layout = SIL_TIMER;
3490 		else if (si_code == SI_SIGIO)
3491 			layout = SIL_POLL;
3492 		else if (si_code < 0)
3493 			layout = SIL_RT;
3494 	}
3495 	return layout;
3496 }
3497 
3498 static inline char __user *si_expansion(const siginfo_t __user *info)
3499 {
3500 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3501 }
3502 
3503 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3504 {
3505 	char __user *expansion = si_expansion(to);
3506 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3507 		return -EFAULT;
3508 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3509 		return -EFAULT;
3510 	return 0;
3511 }
3512 
3513 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3514 				       const siginfo_t __user *from)
3515 {
3516 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3517 		char __user *expansion = si_expansion(from);
3518 		char buf[SI_EXPANSION_SIZE];
3519 		int i;
3520 		/*
3521 		 * An unknown si_code might need more than
3522 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3523 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3524 		 * will return this data to userspace exactly.
3525 		 */
3526 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3527 			return -EFAULT;
3528 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3529 			if (buf[i] != 0)
3530 				return -E2BIG;
3531 		}
3532 	}
3533 	return 0;
3534 }
3535 
3536 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3537 				    const siginfo_t __user *from)
3538 {
3539 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3540 		return -EFAULT;
3541 	to->si_signo = signo;
3542 	return post_copy_siginfo_from_user(to, from);
3543 }
3544 
3545 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3546 {
3547 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3548 		return -EFAULT;
3549 	return post_copy_siginfo_from_user(to, from);
3550 }
3551 
3552 #ifdef CONFIG_COMPAT
3553 /**
3554  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3555  * @to: compat siginfo destination
3556  * @from: kernel siginfo source
3557  *
3558  * Note: This function does not work properly for the SIGCHLD on x32, but
3559  * fortunately it doesn't have to.  The only valid callers for this function are
3560  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3561  * The latter does not care because SIGCHLD will never cause a coredump.
3562  */
3563 void copy_siginfo_to_external32(struct compat_siginfo *to,
3564 		const struct kernel_siginfo *from)
3565 {
3566 	memset(to, 0, sizeof(*to));
3567 
3568 	to->si_signo = from->si_signo;
3569 	to->si_errno = from->si_errno;
3570 	to->si_code  = from->si_code;
3571 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3572 	case SIL_KILL:
3573 		to->si_pid = from->si_pid;
3574 		to->si_uid = from->si_uid;
3575 		break;
3576 	case SIL_TIMER:
3577 		to->si_tid     = from->si_tid;
3578 		to->si_overrun = from->si_overrun;
3579 		to->si_int     = from->si_int;
3580 		break;
3581 	case SIL_POLL:
3582 		to->si_band = from->si_band;
3583 		to->si_fd   = from->si_fd;
3584 		break;
3585 	case SIL_FAULT:
3586 		to->si_addr = ptr_to_compat(from->si_addr);
3587 		break;
3588 	case SIL_FAULT_TRAPNO:
3589 		to->si_addr = ptr_to_compat(from->si_addr);
3590 		to->si_trapno = from->si_trapno;
3591 		break;
3592 	case SIL_FAULT_MCEERR:
3593 		to->si_addr = ptr_to_compat(from->si_addr);
3594 		to->si_addr_lsb = from->si_addr_lsb;
3595 		break;
3596 	case SIL_FAULT_BNDERR:
3597 		to->si_addr = ptr_to_compat(from->si_addr);
3598 		to->si_lower = ptr_to_compat(from->si_lower);
3599 		to->si_upper = ptr_to_compat(from->si_upper);
3600 		break;
3601 	case SIL_FAULT_PKUERR:
3602 		to->si_addr = ptr_to_compat(from->si_addr);
3603 		to->si_pkey = from->si_pkey;
3604 		break;
3605 	case SIL_FAULT_PERF_EVENT:
3606 		to->si_addr = ptr_to_compat(from->si_addr);
3607 		to->si_perf_data = from->si_perf_data;
3608 		to->si_perf_type = from->si_perf_type;
3609 		to->si_perf_flags = from->si_perf_flags;
3610 		break;
3611 	case SIL_CHLD:
3612 		to->si_pid = from->si_pid;
3613 		to->si_uid = from->si_uid;
3614 		to->si_status = from->si_status;
3615 		to->si_utime = from->si_utime;
3616 		to->si_stime = from->si_stime;
3617 		break;
3618 	case SIL_RT:
3619 		to->si_pid = from->si_pid;
3620 		to->si_uid = from->si_uid;
3621 		to->si_int = from->si_int;
3622 		break;
3623 	case SIL_SYS:
3624 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3625 		to->si_syscall   = from->si_syscall;
3626 		to->si_arch      = from->si_arch;
3627 		break;
3628 	}
3629 }
3630 
3631 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3632 			   const struct kernel_siginfo *from)
3633 {
3634 	struct compat_siginfo new;
3635 
3636 	copy_siginfo_to_external32(&new, from);
3637 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3638 		return -EFAULT;
3639 	return 0;
3640 }
3641 
3642 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3643 					 const struct compat_siginfo *from)
3644 {
3645 	clear_siginfo(to);
3646 	to->si_signo = from->si_signo;
3647 	to->si_errno = from->si_errno;
3648 	to->si_code  = from->si_code;
3649 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3650 	case SIL_KILL:
3651 		to->si_pid = from->si_pid;
3652 		to->si_uid = from->si_uid;
3653 		break;
3654 	case SIL_TIMER:
3655 		to->si_tid     = from->si_tid;
3656 		to->si_overrun = from->si_overrun;
3657 		to->si_int     = from->si_int;
3658 		break;
3659 	case SIL_POLL:
3660 		to->si_band = from->si_band;
3661 		to->si_fd   = from->si_fd;
3662 		break;
3663 	case SIL_FAULT:
3664 		to->si_addr = compat_ptr(from->si_addr);
3665 		break;
3666 	case SIL_FAULT_TRAPNO:
3667 		to->si_addr = compat_ptr(from->si_addr);
3668 		to->si_trapno = from->si_trapno;
3669 		break;
3670 	case SIL_FAULT_MCEERR:
3671 		to->si_addr = compat_ptr(from->si_addr);
3672 		to->si_addr_lsb = from->si_addr_lsb;
3673 		break;
3674 	case SIL_FAULT_BNDERR:
3675 		to->si_addr = compat_ptr(from->si_addr);
3676 		to->si_lower = compat_ptr(from->si_lower);
3677 		to->si_upper = compat_ptr(from->si_upper);
3678 		break;
3679 	case SIL_FAULT_PKUERR:
3680 		to->si_addr = compat_ptr(from->si_addr);
3681 		to->si_pkey = from->si_pkey;
3682 		break;
3683 	case SIL_FAULT_PERF_EVENT:
3684 		to->si_addr = compat_ptr(from->si_addr);
3685 		to->si_perf_data = from->si_perf_data;
3686 		to->si_perf_type = from->si_perf_type;
3687 		to->si_perf_flags = from->si_perf_flags;
3688 		break;
3689 	case SIL_CHLD:
3690 		to->si_pid    = from->si_pid;
3691 		to->si_uid    = from->si_uid;
3692 		to->si_status = from->si_status;
3693 #ifdef CONFIG_X86_X32_ABI
3694 		if (in_x32_syscall()) {
3695 			to->si_utime = from->_sifields._sigchld_x32._utime;
3696 			to->si_stime = from->_sifields._sigchld_x32._stime;
3697 		} else
3698 #endif
3699 		{
3700 			to->si_utime = from->si_utime;
3701 			to->si_stime = from->si_stime;
3702 		}
3703 		break;
3704 	case SIL_RT:
3705 		to->si_pid = from->si_pid;
3706 		to->si_uid = from->si_uid;
3707 		to->si_int = from->si_int;
3708 		break;
3709 	case SIL_SYS:
3710 		to->si_call_addr = compat_ptr(from->si_call_addr);
3711 		to->si_syscall   = from->si_syscall;
3712 		to->si_arch      = from->si_arch;
3713 		break;
3714 	}
3715 	return 0;
3716 }
3717 
3718 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3719 				      const struct compat_siginfo __user *ufrom)
3720 {
3721 	struct compat_siginfo from;
3722 
3723 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3724 		return -EFAULT;
3725 
3726 	from.si_signo = signo;
3727 	return post_copy_siginfo_from_user32(to, &from);
3728 }
3729 
3730 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3731 			     const struct compat_siginfo __user *ufrom)
3732 {
3733 	struct compat_siginfo from;
3734 
3735 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3736 		return -EFAULT;
3737 
3738 	return post_copy_siginfo_from_user32(to, &from);
3739 }
3740 #endif /* CONFIG_COMPAT */
3741 
3742 /**
3743  *  do_sigtimedwait - wait for queued signals specified in @which
3744  *  @which: queued signals to wait for
3745  *  @info: if non-null, the signal's siginfo is returned here
3746  *  @ts: upper bound on process time suspension
3747  */
3748 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3749 		    const struct timespec64 *ts)
3750 {
3751 	ktime_t *to = NULL, timeout = KTIME_MAX;
3752 	struct task_struct *tsk = current;
3753 	sigset_t mask = *which;
3754 	enum pid_type type;
3755 	int sig, ret = 0;
3756 
3757 	if (ts) {
3758 		if (!timespec64_valid(ts))
3759 			return -EINVAL;
3760 		timeout = timespec64_to_ktime(*ts);
3761 		to = &timeout;
3762 	}
3763 
3764 	/*
3765 	 * Invert the set of allowed signals to get those we want to block.
3766 	 */
3767 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3768 	signotset(&mask);
3769 
3770 	spin_lock_irq(&tsk->sighand->siglock);
3771 	sig = dequeue_signal(&mask, info, &type);
3772 	if (!sig && timeout) {
3773 		/*
3774 		 * None ready, temporarily unblock those we're interested
3775 		 * while we are sleeping in so that we'll be awakened when
3776 		 * they arrive. Unblocking is always fine, we can avoid
3777 		 * set_current_blocked().
3778 		 */
3779 		tsk->real_blocked = tsk->blocked;
3780 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3781 		recalc_sigpending();
3782 		spin_unlock_irq(&tsk->sighand->siglock);
3783 
3784 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3785 		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3786 					       HRTIMER_MODE_REL);
3787 		spin_lock_irq(&tsk->sighand->siglock);
3788 		__set_task_blocked(tsk, &tsk->real_blocked);
3789 		sigemptyset(&tsk->real_blocked);
3790 		sig = dequeue_signal(&mask, info, &type);
3791 	}
3792 	spin_unlock_irq(&tsk->sighand->siglock);
3793 
3794 	if (sig)
3795 		return sig;
3796 	return ret ? -EINTR : -EAGAIN;
3797 }
3798 
3799 /**
3800  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3801  *			in @uthese
3802  *  @uthese: queued signals to wait for
3803  *  @uinfo: if non-null, the signal's siginfo is returned here
3804  *  @uts: upper bound on process time suspension
3805  *  @sigsetsize: size of sigset_t type
3806  */
3807 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3808 		siginfo_t __user *, uinfo,
3809 		const struct __kernel_timespec __user *, uts,
3810 		size_t, sigsetsize)
3811 {
3812 	sigset_t these;
3813 	struct timespec64 ts;
3814 	kernel_siginfo_t info;
3815 	int ret;
3816 
3817 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3818 	if (sigsetsize != sizeof(sigset_t))
3819 		return -EINVAL;
3820 
3821 	if (copy_from_user(&these, uthese, sizeof(these)))
3822 		return -EFAULT;
3823 
3824 	if (uts) {
3825 		if (get_timespec64(&ts, uts))
3826 			return -EFAULT;
3827 	}
3828 
3829 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3830 
3831 	if (ret > 0 && uinfo) {
3832 		if (copy_siginfo_to_user(uinfo, &info))
3833 			ret = -EFAULT;
3834 	}
3835 
3836 	return ret;
3837 }
3838 
3839 #ifdef CONFIG_COMPAT_32BIT_TIME
3840 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3841 		siginfo_t __user *, uinfo,
3842 		const struct old_timespec32 __user *, uts,
3843 		size_t, sigsetsize)
3844 {
3845 	sigset_t these;
3846 	struct timespec64 ts;
3847 	kernel_siginfo_t info;
3848 	int ret;
3849 
3850 	if (sigsetsize != sizeof(sigset_t))
3851 		return -EINVAL;
3852 
3853 	if (copy_from_user(&these, uthese, sizeof(these)))
3854 		return -EFAULT;
3855 
3856 	if (uts) {
3857 		if (get_old_timespec32(&ts, uts))
3858 			return -EFAULT;
3859 	}
3860 
3861 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3862 
3863 	if (ret > 0 && uinfo) {
3864 		if (copy_siginfo_to_user(uinfo, &info))
3865 			ret = -EFAULT;
3866 	}
3867 
3868 	return ret;
3869 }
3870 #endif
3871 
3872 #ifdef CONFIG_COMPAT
3873 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3874 		struct compat_siginfo __user *, uinfo,
3875 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3876 {
3877 	sigset_t s;
3878 	struct timespec64 t;
3879 	kernel_siginfo_t info;
3880 	long ret;
3881 
3882 	if (sigsetsize != sizeof(sigset_t))
3883 		return -EINVAL;
3884 
3885 	if (get_compat_sigset(&s, uthese))
3886 		return -EFAULT;
3887 
3888 	if (uts) {
3889 		if (get_timespec64(&t, uts))
3890 			return -EFAULT;
3891 	}
3892 
3893 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3894 
3895 	if (ret > 0 && uinfo) {
3896 		if (copy_siginfo_to_user32(uinfo, &info))
3897 			ret = -EFAULT;
3898 	}
3899 
3900 	return ret;
3901 }
3902 
3903 #ifdef CONFIG_COMPAT_32BIT_TIME
3904 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3905 		struct compat_siginfo __user *, uinfo,
3906 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3907 {
3908 	sigset_t s;
3909 	struct timespec64 t;
3910 	kernel_siginfo_t info;
3911 	long ret;
3912 
3913 	if (sigsetsize != sizeof(sigset_t))
3914 		return -EINVAL;
3915 
3916 	if (get_compat_sigset(&s, uthese))
3917 		return -EFAULT;
3918 
3919 	if (uts) {
3920 		if (get_old_timespec32(&t, uts))
3921 			return -EFAULT;
3922 	}
3923 
3924 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3925 
3926 	if (ret > 0 && uinfo) {
3927 		if (copy_siginfo_to_user32(uinfo, &info))
3928 			ret = -EFAULT;
3929 	}
3930 
3931 	return ret;
3932 }
3933 #endif
3934 #endif
3935 
3936 static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3937 				 enum pid_type type)
3938 {
3939 	clear_siginfo(info);
3940 	info->si_signo = sig;
3941 	info->si_errno = 0;
3942 	info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3943 	info->si_pid = task_tgid_vnr(current);
3944 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3945 }
3946 
3947 /**
3948  *  sys_kill - send a signal to a process
3949  *  @pid: the PID of the process
3950  *  @sig: signal to be sent
3951  */
3952 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3953 {
3954 	struct kernel_siginfo info;
3955 
3956 	prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3957 
3958 	return kill_something_info(sig, &info, pid);
3959 }
3960 
3961 /*
3962  * Verify that the signaler and signalee either are in the same pid namespace
3963  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3964  * namespace.
3965  */
3966 static bool access_pidfd_pidns(struct pid *pid)
3967 {
3968 	struct pid_namespace *active = task_active_pid_ns(current);
3969 	struct pid_namespace *p = ns_of_pid(pid);
3970 
3971 	for (;;) {
3972 		if (!p)
3973 			return false;
3974 		if (p == active)
3975 			break;
3976 		p = p->parent;
3977 	}
3978 
3979 	return true;
3980 }
3981 
3982 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3983 		siginfo_t __user *info)
3984 {
3985 #ifdef CONFIG_COMPAT
3986 	/*
3987 	 * Avoid hooking up compat syscalls and instead handle necessary
3988 	 * conversions here. Note, this is a stop-gap measure and should not be
3989 	 * considered a generic solution.
3990 	 */
3991 	if (in_compat_syscall())
3992 		return copy_siginfo_from_user32(
3993 			kinfo, (struct compat_siginfo __user *)info);
3994 #endif
3995 	return copy_siginfo_from_user(kinfo, info);
3996 }
3997 
3998 static struct pid *pidfd_to_pid(const struct file *file)
3999 {
4000 	struct pid *pid;
4001 
4002 	pid = pidfd_pid(file);
4003 	if (!IS_ERR(pid))
4004 		return pid;
4005 
4006 	return tgid_pidfd_to_pid(file);
4007 }
4008 
4009 #define PIDFD_SEND_SIGNAL_FLAGS                            \
4010 	(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4011 	 PIDFD_SIGNAL_PROCESS_GROUP)
4012 
4013 static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
4014 				siginfo_t __user *info, unsigned int flags)
4015 {
4016 	kernel_siginfo_t kinfo;
4017 
4018 	switch (flags) {
4019 	case PIDFD_SIGNAL_THREAD:
4020 		type = PIDTYPE_PID;
4021 		break;
4022 	case PIDFD_SIGNAL_THREAD_GROUP:
4023 		type = PIDTYPE_TGID;
4024 		break;
4025 	case PIDFD_SIGNAL_PROCESS_GROUP:
4026 		type = PIDTYPE_PGID;
4027 		break;
4028 	}
4029 
4030 	if (info) {
4031 		int ret;
4032 
4033 		ret = copy_siginfo_from_user_any(&kinfo, info);
4034 		if (unlikely(ret))
4035 			return ret;
4036 
4037 		if (unlikely(sig != kinfo.si_signo))
4038 			return -EINVAL;
4039 
4040 		/* Only allow sending arbitrary signals to yourself. */
4041 		if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4042 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4043 			return -EPERM;
4044 	} else {
4045 		prepare_kill_siginfo(sig, &kinfo, type);
4046 	}
4047 
4048 	if (type == PIDTYPE_PGID)
4049 		return kill_pgrp_info(sig, &kinfo, pid);
4050 
4051 	return kill_pid_info_type(sig, &kinfo, pid, type);
4052 }
4053 
4054 /**
4055  * sys_pidfd_send_signal - Signal a process through a pidfd
4056  * @pidfd:  file descriptor of the process
4057  * @sig:    signal to send
4058  * @info:   signal info
4059  * @flags:  future flags
4060  *
4061  * Send the signal to the thread group or to the individual thread depending
4062  * on PIDFD_THREAD.
4063  * In the future extension to @flags may be used to override the default scope
4064  * of @pidfd.
4065  *
4066  * Return: 0 on success, negative errno on failure
4067  */
4068 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4069 		siginfo_t __user *, info, unsigned int, flags)
4070 {
4071 	struct pid *pid;
4072 	enum pid_type type;
4073 	int ret;
4074 
4075 	/* Enforce flags be set to 0 until we add an extension. */
4076 	if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4077 		return -EINVAL;
4078 
4079 	/* Ensure that only a single signal scope determining flag is set. */
4080 	if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4081 		return -EINVAL;
4082 
4083 	switch (pidfd) {
4084 	case PIDFD_SELF_THREAD:
4085 		pid = get_task_pid(current, PIDTYPE_PID);
4086 		type = PIDTYPE_PID;
4087 		break;
4088 	case PIDFD_SELF_THREAD_GROUP:
4089 		pid = get_task_pid(current, PIDTYPE_TGID);
4090 		type = PIDTYPE_TGID;
4091 		break;
4092 	default: {
4093 		CLASS(fd, f)(pidfd);
4094 		if (fd_empty(f))
4095 			return -EBADF;
4096 
4097 		/* Is this a pidfd? */
4098 		pid = pidfd_to_pid(fd_file(f));
4099 		if (IS_ERR(pid))
4100 			return PTR_ERR(pid);
4101 
4102 		if (!access_pidfd_pidns(pid))
4103 			return -EINVAL;
4104 
4105 		/* Infer scope from the type of pidfd. */
4106 		if (fd_file(f)->f_flags & PIDFD_THREAD)
4107 			type = PIDTYPE_PID;
4108 		else
4109 			type = PIDTYPE_TGID;
4110 
4111 		return do_pidfd_send_signal(pid, sig, type, info, flags);
4112 	}
4113 	}
4114 
4115 	ret = do_pidfd_send_signal(pid, sig, type, info, flags);
4116 	put_pid(pid);
4117 
4118 	return ret;
4119 }
4120 
4121 static int
4122 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4123 {
4124 	struct task_struct *p;
4125 	int error = -ESRCH;
4126 
4127 	rcu_read_lock();
4128 	p = find_task_by_vpid(pid);
4129 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4130 		error = check_kill_permission(sig, info, p);
4131 		/*
4132 		 * The null signal is a permissions and process existence
4133 		 * probe.  No signal is actually delivered.
4134 		 */
4135 		if (!error && sig) {
4136 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4137 			/*
4138 			 * If lock_task_sighand() failed we pretend the task
4139 			 * dies after receiving the signal. The window is tiny,
4140 			 * and the signal is private anyway.
4141 			 */
4142 			if (unlikely(error == -ESRCH))
4143 				error = 0;
4144 		}
4145 	}
4146 	rcu_read_unlock();
4147 
4148 	return error;
4149 }
4150 
4151 static int do_tkill(pid_t tgid, pid_t pid, int sig)
4152 {
4153 	struct kernel_siginfo info;
4154 
4155 	prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4156 
4157 	return do_send_specific(tgid, pid, sig, &info);
4158 }
4159 
4160 /**
4161  *  sys_tgkill - send signal to one specific thread
4162  *  @tgid: the thread group ID of the thread
4163  *  @pid: the PID of the thread
4164  *  @sig: signal to be sent
4165  *
4166  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
4167  *  exists but it's not belonging to the target process anymore. This
4168  *  method solves the problem of threads exiting and PIDs getting reused.
4169  */
4170 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4171 {
4172 	/* This is only valid for single tasks */
4173 	if (pid <= 0 || tgid <= 0)
4174 		return -EINVAL;
4175 
4176 	return do_tkill(tgid, pid, sig);
4177 }
4178 
4179 /**
4180  *  sys_tkill - send signal to one specific task
4181  *  @pid: the PID of the task
4182  *  @sig: signal to be sent
4183  *
4184  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4185  */
4186 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4187 {
4188 	/* This is only valid for single tasks */
4189 	if (pid <= 0)
4190 		return -EINVAL;
4191 
4192 	return do_tkill(0, pid, sig);
4193 }
4194 
4195 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4196 {
4197 	/* Not even root can pretend to send signals from the kernel.
4198 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4199 	 */
4200 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4201 	    (task_pid_vnr(current) != pid))
4202 		return -EPERM;
4203 
4204 	/* POSIX.1b doesn't mention process groups.  */
4205 	return kill_proc_info(sig, info, pid);
4206 }
4207 
4208 /**
4209  *  sys_rt_sigqueueinfo - send signal information to a signal
4210  *  @pid: the PID of the thread
4211  *  @sig: signal to be sent
4212  *  @uinfo: signal info to be sent
4213  */
4214 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4215 		siginfo_t __user *, uinfo)
4216 {
4217 	kernel_siginfo_t info;
4218 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4219 	if (unlikely(ret))
4220 		return ret;
4221 	return do_rt_sigqueueinfo(pid, sig, &info);
4222 }
4223 
4224 #ifdef CONFIG_COMPAT
4225 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4226 			compat_pid_t, pid,
4227 			int, sig,
4228 			struct compat_siginfo __user *, uinfo)
4229 {
4230 	kernel_siginfo_t info;
4231 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4232 	if (unlikely(ret))
4233 		return ret;
4234 	return do_rt_sigqueueinfo(pid, sig, &info);
4235 }
4236 #endif
4237 
4238 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4239 {
4240 	/* This is only valid for single tasks */
4241 	if (pid <= 0 || tgid <= 0)
4242 		return -EINVAL;
4243 
4244 	/* Not even root can pretend to send signals from the kernel.
4245 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4246 	 */
4247 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4248 	    (task_pid_vnr(current) != pid))
4249 		return -EPERM;
4250 
4251 	return do_send_specific(tgid, pid, sig, info);
4252 }
4253 
4254 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4255 		siginfo_t __user *, uinfo)
4256 {
4257 	kernel_siginfo_t info;
4258 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4259 	if (unlikely(ret))
4260 		return ret;
4261 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4262 }
4263 
4264 #ifdef CONFIG_COMPAT
4265 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4266 			compat_pid_t, tgid,
4267 			compat_pid_t, pid,
4268 			int, sig,
4269 			struct compat_siginfo __user *, uinfo)
4270 {
4271 	kernel_siginfo_t info;
4272 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4273 	if (unlikely(ret))
4274 		return ret;
4275 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4276 }
4277 #endif
4278 
4279 /*
4280  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4281  */
4282 void kernel_sigaction(int sig, __sighandler_t action)
4283 {
4284 	spin_lock_irq(&current->sighand->siglock);
4285 	current->sighand->action[sig - 1].sa.sa_handler = action;
4286 	if (action == SIG_IGN) {
4287 		sigset_t mask;
4288 
4289 		sigemptyset(&mask);
4290 		sigaddset(&mask, sig);
4291 
4292 		flush_sigqueue_mask(current, &mask, &current->signal->shared_pending);
4293 		flush_sigqueue_mask(current, &mask, &current->pending);
4294 		recalc_sigpending();
4295 	}
4296 	spin_unlock_irq(&current->sighand->siglock);
4297 }
4298 EXPORT_SYMBOL(kernel_sigaction);
4299 
4300 void __weak sigaction_compat_abi(struct k_sigaction *act,
4301 		struct k_sigaction *oact)
4302 {
4303 }
4304 
4305 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4306 {
4307 	struct task_struct *p = current, *t;
4308 	struct k_sigaction *k;
4309 	sigset_t mask;
4310 
4311 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4312 		return -EINVAL;
4313 
4314 	k = &p->sighand->action[sig-1];
4315 
4316 	spin_lock_irq(&p->sighand->siglock);
4317 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4318 		spin_unlock_irq(&p->sighand->siglock);
4319 		return -EINVAL;
4320 	}
4321 	if (oact)
4322 		*oact = *k;
4323 
4324 	/*
4325 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4326 	 * e.g. by having an architecture use the bit in their uapi.
4327 	 */
4328 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4329 
4330 	/*
4331 	 * Clear unknown flag bits in order to allow userspace to detect missing
4332 	 * support for flag bits and to allow the kernel to use non-uapi bits
4333 	 * internally.
4334 	 */
4335 	if (act)
4336 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4337 	if (oact)
4338 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4339 
4340 	sigaction_compat_abi(act, oact);
4341 
4342 	if (act) {
4343 		bool was_ignored = k->sa.sa_handler == SIG_IGN;
4344 
4345 		sigdelsetmask(&act->sa.sa_mask,
4346 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4347 		*k = *act;
4348 		/*
4349 		 * POSIX 3.3.1.3:
4350 		 *  "Setting a signal action to SIG_IGN for a signal that is
4351 		 *   pending shall cause the pending signal to be discarded,
4352 		 *   whether or not it is blocked."
4353 		 *
4354 		 *  "Setting a signal action to SIG_DFL for a signal that is
4355 		 *   pending and whose default action is to ignore the signal
4356 		 *   (for example, SIGCHLD), shall cause the pending signal to
4357 		 *   be discarded, whether or not it is blocked"
4358 		 */
4359 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4360 			sigemptyset(&mask);
4361 			sigaddset(&mask, sig);
4362 			flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4363 			for_each_thread(p, t)
4364 				flush_sigqueue_mask(p, &mask, &t->pending);
4365 		} else if (was_ignored) {
4366 			posixtimer_sig_unignore(p, sig);
4367 		}
4368 	}
4369 
4370 	spin_unlock_irq(&p->sighand->siglock);
4371 	return 0;
4372 }
4373 
4374 #ifdef CONFIG_DYNAMIC_SIGFRAME
4375 static inline void sigaltstack_lock(void)
4376 	__acquires(&current->sighand->siglock)
4377 {
4378 	spin_lock_irq(&current->sighand->siglock);
4379 }
4380 
4381 static inline void sigaltstack_unlock(void)
4382 	__releases(&current->sighand->siglock)
4383 {
4384 	spin_unlock_irq(&current->sighand->siglock);
4385 }
4386 #else
4387 static inline void sigaltstack_lock(void) { }
4388 static inline void sigaltstack_unlock(void) { }
4389 #endif
4390 
4391 static int
4392 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4393 		size_t min_ss_size)
4394 {
4395 	struct task_struct *t = current;
4396 	int ret = 0;
4397 
4398 	if (oss) {
4399 		memset(oss, 0, sizeof(stack_t));
4400 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4401 		oss->ss_size = t->sas_ss_size;
4402 		oss->ss_flags = sas_ss_flags(sp) |
4403 			(current->sas_ss_flags & SS_FLAG_BITS);
4404 	}
4405 
4406 	if (ss) {
4407 		void __user *ss_sp = ss->ss_sp;
4408 		size_t ss_size = ss->ss_size;
4409 		unsigned ss_flags = ss->ss_flags;
4410 		int ss_mode;
4411 
4412 		if (unlikely(on_sig_stack(sp)))
4413 			return -EPERM;
4414 
4415 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4416 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4417 				ss_mode != 0))
4418 			return -EINVAL;
4419 
4420 		/*
4421 		 * Return before taking any locks if no actual
4422 		 * sigaltstack changes were requested.
4423 		 */
4424 		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4425 		    t->sas_ss_size == ss_size &&
4426 		    t->sas_ss_flags == ss_flags)
4427 			return 0;
4428 
4429 		sigaltstack_lock();
4430 		if (ss_mode == SS_DISABLE) {
4431 			ss_size = 0;
4432 			ss_sp = NULL;
4433 		} else {
4434 			if (unlikely(ss_size < min_ss_size))
4435 				ret = -ENOMEM;
4436 			if (!sigaltstack_size_valid(ss_size))
4437 				ret = -ENOMEM;
4438 		}
4439 		if (!ret) {
4440 			t->sas_ss_sp = (unsigned long) ss_sp;
4441 			t->sas_ss_size = ss_size;
4442 			t->sas_ss_flags = ss_flags;
4443 		}
4444 		sigaltstack_unlock();
4445 	}
4446 	return ret;
4447 }
4448 
4449 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4450 {
4451 	stack_t new, old;
4452 	int err;
4453 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4454 		return -EFAULT;
4455 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4456 			      current_user_stack_pointer(),
4457 			      MINSIGSTKSZ);
4458 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4459 		err = -EFAULT;
4460 	return err;
4461 }
4462 
4463 int restore_altstack(const stack_t __user *uss)
4464 {
4465 	stack_t new;
4466 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4467 		return -EFAULT;
4468 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4469 			     MINSIGSTKSZ);
4470 	/* squash all but EFAULT for now */
4471 	return 0;
4472 }
4473 
4474 int __save_altstack(stack_t __user *uss, unsigned long sp)
4475 {
4476 	struct task_struct *t = current;
4477 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4478 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4479 		__put_user(t->sas_ss_size, &uss->ss_size);
4480 	return err;
4481 }
4482 
4483 #ifdef CONFIG_COMPAT
4484 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4485 				 compat_stack_t __user *uoss_ptr)
4486 {
4487 	stack_t uss, uoss;
4488 	int ret;
4489 
4490 	if (uss_ptr) {
4491 		compat_stack_t uss32;
4492 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4493 			return -EFAULT;
4494 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4495 		uss.ss_flags = uss32.ss_flags;
4496 		uss.ss_size = uss32.ss_size;
4497 	}
4498 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4499 			     compat_user_stack_pointer(),
4500 			     COMPAT_MINSIGSTKSZ);
4501 	if (ret >= 0 && uoss_ptr)  {
4502 		compat_stack_t old;
4503 		memset(&old, 0, sizeof(old));
4504 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4505 		old.ss_flags = uoss.ss_flags;
4506 		old.ss_size = uoss.ss_size;
4507 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4508 			ret = -EFAULT;
4509 	}
4510 	return ret;
4511 }
4512 
4513 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4514 			const compat_stack_t __user *, uss_ptr,
4515 			compat_stack_t __user *, uoss_ptr)
4516 {
4517 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4518 }
4519 
4520 int compat_restore_altstack(const compat_stack_t __user *uss)
4521 {
4522 	int err = do_compat_sigaltstack(uss, NULL);
4523 	/* squash all but -EFAULT for now */
4524 	return err == -EFAULT ? err : 0;
4525 }
4526 
4527 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4528 {
4529 	int err;
4530 	struct task_struct *t = current;
4531 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4532 			 &uss->ss_sp) |
4533 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4534 		__put_user(t->sas_ss_size, &uss->ss_size);
4535 	return err;
4536 }
4537 #endif
4538 
4539 #ifdef __ARCH_WANT_SYS_SIGPENDING
4540 
4541 /**
4542  *  sys_sigpending - examine pending signals
4543  *  @uset: where mask of pending signal is returned
4544  */
4545 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4546 {
4547 	sigset_t set;
4548 
4549 	if (sizeof(old_sigset_t) > sizeof(*uset))
4550 		return -EINVAL;
4551 
4552 	do_sigpending(&set);
4553 
4554 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4555 		return -EFAULT;
4556 
4557 	return 0;
4558 }
4559 
4560 #ifdef CONFIG_COMPAT
4561 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4562 {
4563 	sigset_t set;
4564 
4565 	do_sigpending(&set);
4566 
4567 	return put_user(set.sig[0], set32);
4568 }
4569 #endif
4570 
4571 #endif
4572 
4573 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4574 /**
4575  *  sys_sigprocmask - examine and change blocked signals
4576  *  @how: whether to add, remove, or set signals
4577  *  @nset: signals to add or remove (if non-null)
4578  *  @oset: previous value of signal mask if non-null
4579  *
4580  * Some platforms have their own version with special arguments;
4581  * others support only sys_rt_sigprocmask.
4582  */
4583 
4584 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4585 		old_sigset_t __user *, oset)
4586 {
4587 	old_sigset_t old_set, new_set;
4588 	sigset_t new_blocked;
4589 
4590 	old_set = current->blocked.sig[0];
4591 
4592 	if (nset) {
4593 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4594 			return -EFAULT;
4595 
4596 		new_blocked = current->blocked;
4597 
4598 		switch (how) {
4599 		case SIG_BLOCK:
4600 			sigaddsetmask(&new_blocked, new_set);
4601 			break;
4602 		case SIG_UNBLOCK:
4603 			sigdelsetmask(&new_blocked, new_set);
4604 			break;
4605 		case SIG_SETMASK:
4606 			new_blocked.sig[0] = new_set;
4607 			break;
4608 		default:
4609 			return -EINVAL;
4610 		}
4611 
4612 		set_current_blocked(&new_blocked);
4613 	}
4614 
4615 	if (oset) {
4616 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4617 			return -EFAULT;
4618 	}
4619 
4620 	return 0;
4621 }
4622 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4623 
4624 #ifndef CONFIG_ODD_RT_SIGACTION
4625 /**
4626  *  sys_rt_sigaction - alter an action taken by a process
4627  *  @sig: signal to be sent
4628  *  @act: new sigaction
4629  *  @oact: used to save the previous sigaction
4630  *  @sigsetsize: size of sigset_t type
4631  */
4632 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4633 		const struct sigaction __user *, act,
4634 		struct sigaction __user *, oact,
4635 		size_t, sigsetsize)
4636 {
4637 	struct k_sigaction new_sa, old_sa;
4638 	int ret;
4639 
4640 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4641 	if (sigsetsize != sizeof(sigset_t))
4642 		return -EINVAL;
4643 
4644 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4645 		return -EFAULT;
4646 
4647 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4648 	if (ret)
4649 		return ret;
4650 
4651 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4652 		return -EFAULT;
4653 
4654 	return 0;
4655 }
4656 #ifdef CONFIG_COMPAT
4657 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4658 		const struct compat_sigaction __user *, act,
4659 		struct compat_sigaction __user *, oact,
4660 		compat_size_t, sigsetsize)
4661 {
4662 	struct k_sigaction new_ka, old_ka;
4663 #ifdef __ARCH_HAS_SA_RESTORER
4664 	compat_uptr_t restorer;
4665 #endif
4666 	int ret;
4667 
4668 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4669 	if (sigsetsize != sizeof(compat_sigset_t))
4670 		return -EINVAL;
4671 
4672 	if (act) {
4673 		compat_uptr_t handler;
4674 		ret = get_user(handler, &act->sa_handler);
4675 		new_ka.sa.sa_handler = compat_ptr(handler);
4676 #ifdef __ARCH_HAS_SA_RESTORER
4677 		ret |= get_user(restorer, &act->sa_restorer);
4678 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4679 #endif
4680 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4681 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4682 		if (ret)
4683 			return -EFAULT;
4684 	}
4685 
4686 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4687 	if (!ret && oact) {
4688 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4689 			       &oact->sa_handler);
4690 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4691 					 sizeof(oact->sa_mask));
4692 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4693 #ifdef __ARCH_HAS_SA_RESTORER
4694 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4695 				&oact->sa_restorer);
4696 #endif
4697 	}
4698 	return ret;
4699 }
4700 #endif
4701 #endif /* !CONFIG_ODD_RT_SIGACTION */
4702 
4703 #ifdef CONFIG_OLD_SIGACTION
4704 SYSCALL_DEFINE3(sigaction, int, sig,
4705 		const struct old_sigaction __user *, act,
4706 	        struct old_sigaction __user *, oact)
4707 {
4708 	struct k_sigaction new_ka, old_ka;
4709 	int ret;
4710 
4711 	if (act) {
4712 		old_sigset_t mask;
4713 		if (!access_ok(act, sizeof(*act)) ||
4714 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4715 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4716 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4717 		    __get_user(mask, &act->sa_mask))
4718 			return -EFAULT;
4719 #ifdef __ARCH_HAS_KA_RESTORER
4720 		new_ka.ka_restorer = NULL;
4721 #endif
4722 		siginitset(&new_ka.sa.sa_mask, mask);
4723 	}
4724 
4725 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4726 
4727 	if (!ret && oact) {
4728 		if (!access_ok(oact, sizeof(*oact)) ||
4729 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4730 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4731 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4732 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4733 			return -EFAULT;
4734 	}
4735 
4736 	return ret;
4737 }
4738 #endif
4739 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4740 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4741 		const struct compat_old_sigaction __user *, act,
4742 	        struct compat_old_sigaction __user *, oact)
4743 {
4744 	struct k_sigaction new_ka, old_ka;
4745 	int ret;
4746 	compat_old_sigset_t mask;
4747 	compat_uptr_t handler, restorer;
4748 
4749 	if (act) {
4750 		if (!access_ok(act, sizeof(*act)) ||
4751 		    __get_user(handler, &act->sa_handler) ||
4752 		    __get_user(restorer, &act->sa_restorer) ||
4753 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4754 		    __get_user(mask, &act->sa_mask))
4755 			return -EFAULT;
4756 
4757 #ifdef __ARCH_HAS_KA_RESTORER
4758 		new_ka.ka_restorer = NULL;
4759 #endif
4760 		new_ka.sa.sa_handler = compat_ptr(handler);
4761 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4762 		siginitset(&new_ka.sa.sa_mask, mask);
4763 	}
4764 
4765 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4766 
4767 	if (!ret && oact) {
4768 		if (!access_ok(oact, sizeof(*oact)) ||
4769 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4770 			       &oact->sa_handler) ||
4771 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4772 			       &oact->sa_restorer) ||
4773 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4774 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4775 			return -EFAULT;
4776 	}
4777 	return ret;
4778 }
4779 #endif
4780 
4781 #ifdef CONFIG_SGETMASK_SYSCALL
4782 
4783 /*
4784  * For backwards compatibility.  Functionality superseded by sigprocmask.
4785  */
4786 SYSCALL_DEFINE0(sgetmask)
4787 {
4788 	/* SMP safe */
4789 	return current->blocked.sig[0];
4790 }
4791 
4792 SYSCALL_DEFINE1(ssetmask, int, newmask)
4793 {
4794 	int old = current->blocked.sig[0];
4795 	sigset_t newset;
4796 
4797 	siginitset(&newset, newmask);
4798 	set_current_blocked(&newset);
4799 
4800 	return old;
4801 }
4802 #endif /* CONFIG_SGETMASK_SYSCALL */
4803 
4804 #ifdef __ARCH_WANT_SYS_SIGNAL
4805 /*
4806  * For backwards compatibility.  Functionality superseded by sigaction.
4807  */
4808 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4809 {
4810 	struct k_sigaction new_sa, old_sa;
4811 	int ret;
4812 
4813 	new_sa.sa.sa_handler = handler;
4814 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4815 	sigemptyset(&new_sa.sa.sa_mask);
4816 
4817 	ret = do_sigaction(sig, &new_sa, &old_sa);
4818 
4819 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4820 }
4821 #endif /* __ARCH_WANT_SYS_SIGNAL */
4822 
4823 #ifdef __ARCH_WANT_SYS_PAUSE
4824 
4825 SYSCALL_DEFINE0(pause)
4826 {
4827 	while (!signal_pending(current)) {
4828 		__set_current_state(TASK_INTERRUPTIBLE);
4829 		schedule();
4830 	}
4831 	return -ERESTARTNOHAND;
4832 }
4833 
4834 #endif
4835 
4836 static int sigsuspend(sigset_t *set)
4837 {
4838 	current->saved_sigmask = current->blocked;
4839 	set_current_blocked(set);
4840 
4841 	while (!signal_pending(current)) {
4842 		__set_current_state(TASK_INTERRUPTIBLE);
4843 		schedule();
4844 	}
4845 	set_restore_sigmask();
4846 	return -ERESTARTNOHAND;
4847 }
4848 
4849 /**
4850  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4851  *	@unewset value until a signal is received
4852  *  @unewset: new signal mask value
4853  *  @sigsetsize: size of sigset_t type
4854  */
4855 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4856 {
4857 	sigset_t newset;
4858 
4859 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4860 	if (sigsetsize != sizeof(sigset_t))
4861 		return -EINVAL;
4862 
4863 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4864 		return -EFAULT;
4865 	return sigsuspend(&newset);
4866 }
4867 
4868 #ifdef CONFIG_COMPAT
4869 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4870 {
4871 	sigset_t newset;
4872 
4873 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4874 	if (sigsetsize != sizeof(sigset_t))
4875 		return -EINVAL;
4876 
4877 	if (get_compat_sigset(&newset, unewset))
4878 		return -EFAULT;
4879 	return sigsuspend(&newset);
4880 }
4881 #endif
4882 
4883 #ifdef CONFIG_OLD_SIGSUSPEND
4884 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4885 {
4886 	sigset_t blocked;
4887 	siginitset(&blocked, mask);
4888 	return sigsuspend(&blocked);
4889 }
4890 #endif
4891 #ifdef CONFIG_OLD_SIGSUSPEND3
4892 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4893 {
4894 	sigset_t blocked;
4895 	siginitset(&blocked, mask);
4896 	return sigsuspend(&blocked);
4897 }
4898 #endif
4899 
4900 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4901 {
4902 	return NULL;
4903 }
4904 
4905 static inline void siginfo_buildtime_checks(void)
4906 {
4907 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4908 
4909 	/* Verify the offsets in the two siginfos match */
4910 #define CHECK_OFFSET(field) \
4911 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4912 
4913 	/* kill */
4914 	CHECK_OFFSET(si_pid);
4915 	CHECK_OFFSET(si_uid);
4916 
4917 	/* timer */
4918 	CHECK_OFFSET(si_tid);
4919 	CHECK_OFFSET(si_overrun);
4920 	CHECK_OFFSET(si_value);
4921 
4922 	/* rt */
4923 	CHECK_OFFSET(si_pid);
4924 	CHECK_OFFSET(si_uid);
4925 	CHECK_OFFSET(si_value);
4926 
4927 	/* sigchld */
4928 	CHECK_OFFSET(si_pid);
4929 	CHECK_OFFSET(si_uid);
4930 	CHECK_OFFSET(si_status);
4931 	CHECK_OFFSET(si_utime);
4932 	CHECK_OFFSET(si_stime);
4933 
4934 	/* sigfault */
4935 	CHECK_OFFSET(si_addr);
4936 	CHECK_OFFSET(si_trapno);
4937 	CHECK_OFFSET(si_addr_lsb);
4938 	CHECK_OFFSET(si_lower);
4939 	CHECK_OFFSET(si_upper);
4940 	CHECK_OFFSET(si_pkey);
4941 	CHECK_OFFSET(si_perf_data);
4942 	CHECK_OFFSET(si_perf_type);
4943 	CHECK_OFFSET(si_perf_flags);
4944 
4945 	/* sigpoll */
4946 	CHECK_OFFSET(si_band);
4947 	CHECK_OFFSET(si_fd);
4948 
4949 	/* sigsys */
4950 	CHECK_OFFSET(si_call_addr);
4951 	CHECK_OFFSET(si_syscall);
4952 	CHECK_OFFSET(si_arch);
4953 #undef CHECK_OFFSET
4954 
4955 	/* usb asyncio */
4956 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4957 		     offsetof(struct siginfo, si_addr));
4958 	if (sizeof(int) == sizeof(void __user *)) {
4959 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4960 			     sizeof(void __user *));
4961 	} else {
4962 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4963 			      sizeof_field(struct siginfo, si_uid)) !=
4964 			     sizeof(void __user *));
4965 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4966 			     offsetof(struct siginfo, si_uid));
4967 	}
4968 #ifdef CONFIG_COMPAT
4969 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4970 		     offsetof(struct compat_siginfo, si_addr));
4971 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4972 		     sizeof(compat_uptr_t));
4973 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4974 		     sizeof_field(struct siginfo, si_pid));
4975 #endif
4976 }
4977 
4978 #if defined(CONFIG_SYSCTL)
4979 static const struct ctl_table signal_debug_table[] = {
4980 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4981 	{
4982 		.procname	= "exception-trace",
4983 		.data		= &show_unhandled_signals,
4984 		.maxlen		= sizeof(int),
4985 		.mode		= 0644,
4986 		.proc_handler	= proc_dointvec
4987 	},
4988 #endif
4989 };
4990 
4991 static const struct ctl_table signal_table[] = {
4992 	{
4993 		.procname	= "print-fatal-signals",
4994 		.data		= &print_fatal_signals,
4995 		.maxlen		= sizeof(int),
4996 		.mode		= 0644,
4997 		.proc_handler	= proc_dointvec,
4998 	},
4999 };
5000 
5001 static int __init init_signal_sysctls(void)
5002 {
5003 	register_sysctl_init("debug", signal_debug_table);
5004 	register_sysctl_init("kernel", signal_table);
5005 	return 0;
5006 }
5007 early_initcall(init_signal_sysctls);
5008 #endif /* CONFIG_SYSCTL */
5009 
5010 void __init signals_init(void)
5011 {
5012 	siginfo_buildtime_checks();
5013 
5014 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
5015 }
5016 
5017 #ifdef CONFIG_KGDB_KDB
5018 #include <linux/kdb.h>
5019 /*
5020  * kdb_send_sig - Allows kdb to send signals without exposing
5021  * signal internals.  This function checks if the required locks are
5022  * available before calling the main signal code, to avoid kdb
5023  * deadlocks.
5024  */
5025 void kdb_send_sig(struct task_struct *t, int sig)
5026 {
5027 	static struct task_struct *kdb_prev_t;
5028 	int new_t, ret;
5029 	if (!spin_trylock(&t->sighand->siglock)) {
5030 		kdb_printf("Can't do kill command now.\n"
5031 			   "The sigmask lock is held somewhere else in "
5032 			   "kernel, try again later\n");
5033 		return;
5034 	}
5035 	new_t = kdb_prev_t != t;
5036 	kdb_prev_t = t;
5037 	if (!task_is_running(t) && new_t) {
5038 		spin_unlock(&t->sighand->siglock);
5039 		kdb_printf("Process is not RUNNING, sending a signal from "
5040 			   "kdb risks deadlock\n"
5041 			   "on the run queue locks. "
5042 			   "The signal has _not_ been sent.\n"
5043 			   "Reissue the kill command if you want to risk "
5044 			   "the deadlock.\n");
5045 		return;
5046 	}
5047 	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5048 	spin_unlock(&t->sighand->siglock);
5049 	if (ret)
5050 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
5051 			   sig, t->pid);
5052 	else
5053 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5054 }
5055 #endif	/* CONFIG_KGDB_KDB */
5056