xref: /linux/kernel/softirq.c (revision c1fe867b5bf9c57ab7856486d342720e2b205eed)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/softirq.c
4  *
5  *	Copyright (C) 1992 Linus Torvalds
6  *
7  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30 #include <linux/workqueue.h>
31 
32 #include <asm/softirq_stack.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/irq.h>
36 
37 /*
38    - No shared variables, all the data are CPU local.
39    - If a softirq needs serialization, let it serialize itself
40      by its own spinlocks.
41    - Even if softirq is serialized, only local cpu is marked for
42      execution. Hence, we get something sort of weak cpu binding.
43      Though it is still not clear, will it result in better locality
44      or will not.
45 
46    Examples:
47    - NET RX softirq. It is multithreaded and does not require
48      any global serialization.
49    - NET TX softirq. It kicks software netdevice queues, hence
50      it is logically serialized per device, but this serialization
51      is invisible to common code.
52    - Tasklets: serialized wrt itself.
53  */
54 
55 #ifndef __ARCH_IRQ_STAT
56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57 EXPORT_PER_CPU_SYMBOL(irq_stat);
58 #endif
59 
60 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61 
62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63 
64 const char * const softirq_to_name[NR_SOFTIRQS] = {
65 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 	"TASKLET", "SCHED", "HRTIMER", "RCU"
67 };
68 
69 /*
70  * we cannot loop indefinitely here to avoid userspace starvation,
71  * but we also don't want to introduce a worst case 1/HZ latency
72  * to the pending events, so lets the scheduler to balance
73  * the softirq load for us.
74  */
75 static void wakeup_softirqd(void)
76 {
77 	/* Interrupts are disabled: no need to stop preemption */
78 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79 
80 	if (tsk)
81 		wake_up_process(tsk);
82 }
83 
84 #ifdef CONFIG_TRACE_IRQFLAGS
85 DEFINE_PER_CPU(int, hardirqs_enabled);
86 DEFINE_PER_CPU(int, hardirq_context);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89 #endif
90 
91 /*
92  * SOFTIRQ_OFFSET usage:
93  *
94  * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95  * to a per CPU counter and to task::softirqs_disabled_cnt.
96  *
97  * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98  *   processing.
99  *
100  * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101  *   on local_bh_disable or local_bh_enable.
102  *
103  * This lets us distinguish between whether we are currently processing
104  * softirq and whether we just have bh disabled.
105  */
106 #ifdef CONFIG_PREEMPT_RT
107 
108 /*
109  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111  * softirq disabled section to be preempted.
112  *
113  * The per task counter is used for softirq_count(), in_softirq() and
114  * in_serving_softirqs() because these counts are only valid when the task
115  * holding softirq_ctrl::lock is running.
116  *
117  * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118  * the task which is in a softirq disabled section is preempted or blocks.
119  */
120 struct softirq_ctrl {
121 	local_lock_t	lock;
122 	int		cnt;
123 };
124 
125 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
127 };
128 
129 #ifdef CONFIG_DEBUG_LOCK_ALLOC
130 static struct lock_class_key bh_lock_key;
131 struct lockdep_map bh_lock_map = {
132 	.name			= "local_bh",
133 	.key			= &bh_lock_key,
134 	.wait_type_outer	= LD_WAIT_FREE,
135 	.wait_type_inner	= LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
136 	.lock_type		= LD_LOCK_PERCPU,
137 };
138 EXPORT_SYMBOL_GPL(bh_lock_map);
139 #endif
140 
141 /**
142  * local_bh_blocked() - Check for idle whether BH processing is blocked
143  *
144  * Returns false if the per CPU softirq::cnt is 0 otherwise true.
145  *
146  * This is invoked from the idle task to guard against false positive
147  * softirq pending warnings, which would happen when the task which holds
148  * softirq_ctrl::lock was the only running task on the CPU and blocks on
149  * some other lock.
150  */
151 bool local_bh_blocked(void)
152 {
153 	return __this_cpu_read(softirq_ctrl.cnt) != 0;
154 }
155 
156 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
157 {
158 	unsigned long flags;
159 	int newcnt;
160 
161 	WARN_ON_ONCE(in_hardirq());
162 
163 	lock_map_acquire_read(&bh_lock_map);
164 
165 	/* First entry of a task into a BH disabled section? */
166 	if (!current->softirq_disable_cnt) {
167 		if (preemptible()) {
168 			if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
169 				local_lock(&softirq_ctrl.lock);
170 			else
171 				migrate_disable();
172 
173 			/* Required to meet the RCU bottomhalf requirements. */
174 			rcu_read_lock();
175 		} else {
176 			DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
177 		}
178 	}
179 
180 	/*
181 	 * Track the per CPU softirq disabled state. On RT this is per CPU
182 	 * state to allow preemption of bottom half disabled sections.
183 	 */
184 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
185 		newcnt = this_cpu_add_return(softirq_ctrl.cnt, cnt);
186 		/*
187 		 * Reflect the result in the task state to prevent recursion on the
188 		 * local lock and to make softirq_count() & al work.
189 		 */
190 		current->softirq_disable_cnt = newcnt;
191 
192 		if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
193 			raw_local_irq_save(flags);
194 			lockdep_softirqs_off(ip);
195 			raw_local_irq_restore(flags);
196 		}
197 	} else {
198 		bool sirq_dis = false;
199 
200 		if (!current->softirq_disable_cnt)
201 			sirq_dis = true;
202 
203 		this_cpu_add(softirq_ctrl.cnt, cnt);
204 		current->softirq_disable_cnt += cnt;
205 		WARN_ON_ONCE(current->softirq_disable_cnt < 0);
206 
207 		if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_dis) {
208 			raw_local_irq_save(flags);
209 			lockdep_softirqs_off(ip);
210 			raw_local_irq_restore(flags);
211 		}
212 	}
213 }
214 EXPORT_SYMBOL(__local_bh_disable_ip);
215 
216 static void __local_bh_enable(unsigned int cnt, bool unlock)
217 {
218 	unsigned long flags;
219 	bool sirq_en = false;
220 	int newcnt;
221 
222 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
223 		DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
224 				    this_cpu_read(softirq_ctrl.cnt));
225 		if (softirq_count() == cnt)
226 			sirq_en = true;
227 	} else {
228 		if (current->softirq_disable_cnt == cnt)
229 			sirq_en = true;
230 	}
231 
232 	if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_en) {
233 		raw_local_irq_save(flags);
234 		lockdep_softirqs_on(_RET_IP_);
235 		raw_local_irq_restore(flags);
236 	}
237 
238 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
239 		newcnt = this_cpu_sub_return(softirq_ctrl.cnt, cnt);
240 		current->softirq_disable_cnt = newcnt;
241 
242 		if (!newcnt && unlock) {
243 			rcu_read_unlock();
244 			local_unlock(&softirq_ctrl.lock);
245 		}
246 	} else {
247 		current->softirq_disable_cnt -= cnt;
248 		this_cpu_sub(softirq_ctrl.cnt, cnt);
249 		if (unlock && !current->softirq_disable_cnt) {
250 			migrate_enable();
251 			rcu_read_unlock();
252 		} else {
253 			WARN_ON_ONCE(current->softirq_disable_cnt < 0);
254 		}
255 	}
256 }
257 
258 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
259 {
260 	bool preempt_on = preemptible();
261 	unsigned long flags;
262 	u32 pending;
263 	int curcnt;
264 
265 	WARN_ON_ONCE(in_hardirq());
266 	lockdep_assert_irqs_enabled();
267 
268 	lock_map_release(&bh_lock_map);
269 
270 	local_irq_save(flags);
271 	if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
272 		curcnt = this_cpu_read(softirq_ctrl.cnt);
273 	else
274 		curcnt = current->softirq_disable_cnt;
275 
276 	/*
277 	 * If this is not reenabling soft interrupts, no point in trying to
278 	 * run pending ones.
279 	 */
280 	if (curcnt != cnt)
281 		goto out;
282 
283 	pending = local_softirq_pending();
284 	if (!pending)
285 		goto out;
286 
287 	/*
288 	 * If this was called from non preemptible context, wake up the
289 	 * softirq daemon.
290 	 */
291 	if (!preempt_on) {
292 		wakeup_softirqd();
293 		goto out;
294 	}
295 
296 	/*
297 	 * Adjust softirq count to SOFTIRQ_OFFSET which makes
298 	 * in_serving_softirq() become true.
299 	 */
300 	cnt = SOFTIRQ_OFFSET;
301 	__local_bh_enable(cnt, false);
302 	__do_softirq();
303 
304 out:
305 	__local_bh_enable(cnt, preempt_on);
306 	local_irq_restore(flags);
307 }
308 EXPORT_SYMBOL(__local_bh_enable_ip);
309 
310 /*
311  * Invoked from ksoftirqd_run() outside of the interrupt disabled section
312  * to acquire the per CPU local lock for reentrancy protection.
313  */
314 static inline void ksoftirqd_run_begin(void)
315 {
316 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
317 	local_irq_disable();
318 }
319 
320 /* Counterpart to ksoftirqd_run_begin() */
321 static inline void ksoftirqd_run_end(void)
322 {
323 	/* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
324 	lock_map_release(&bh_lock_map);
325 	__local_bh_enable(SOFTIRQ_OFFSET, true);
326 	WARN_ON_ONCE(in_interrupt());
327 	local_irq_enable();
328 }
329 
330 static inline void softirq_handle_begin(void) { }
331 static inline void softirq_handle_end(void) { }
332 
333 static inline bool should_wake_ksoftirqd(void)
334 {
335 	return !this_cpu_read(softirq_ctrl.cnt);
336 }
337 
338 static inline void invoke_softirq(void)
339 {
340 	if (should_wake_ksoftirqd())
341 		wakeup_softirqd();
342 }
343 
344 #define SCHED_SOFTIRQ_MASK	BIT(SCHED_SOFTIRQ)
345 
346 /*
347  * flush_smp_call_function_queue() can raise a soft interrupt in a function
348  * call. On RT kernels this is undesired and the only known functionalities
349  * are in the block layer which is disabled on RT, and in the scheduler for
350  * idle load balancing. If soft interrupts get raised which haven't been
351  * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
352  * investigated.
353  */
354 void do_softirq_post_smp_call_flush(unsigned int was_pending)
355 {
356 	unsigned int is_pending = local_softirq_pending();
357 
358 	if (unlikely(was_pending != is_pending)) {
359 		WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
360 		invoke_softirq();
361 	}
362 }
363 
364 #else /* CONFIG_PREEMPT_RT */
365 
366 /*
367  * This one is for softirq.c-internal use, where hardirqs are disabled
368  * legitimately:
369  */
370 #ifdef CONFIG_TRACE_IRQFLAGS
371 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
372 {
373 	unsigned long flags;
374 
375 	WARN_ON_ONCE(in_hardirq());
376 
377 	raw_local_irq_save(flags);
378 	/*
379 	 * The preempt tracer hooks into preempt_count_add and will break
380 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
381 	 * is set and before current->softirq_enabled is cleared.
382 	 * We must manually increment preempt_count here and manually
383 	 * call the trace_preempt_off later.
384 	 */
385 	__preempt_count_add(cnt);
386 	/*
387 	 * Were softirqs turned off above:
388 	 */
389 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
390 		lockdep_softirqs_off(ip);
391 	raw_local_irq_restore(flags);
392 
393 	if (preempt_count() == cnt) {
394 #ifdef CONFIG_DEBUG_PREEMPT
395 		current->preempt_disable_ip = get_lock_parent_ip();
396 #endif
397 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
398 	}
399 }
400 EXPORT_SYMBOL(__local_bh_disable_ip);
401 #endif /* CONFIG_TRACE_IRQFLAGS */
402 
403 static void __local_bh_enable(unsigned int cnt)
404 {
405 	lockdep_assert_irqs_disabled();
406 
407 	if (preempt_count() == cnt)
408 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
409 
410 	if (softirq_count() == (cnt & SOFTIRQ_MASK))
411 		lockdep_softirqs_on(_RET_IP_);
412 
413 	__preempt_count_sub(cnt);
414 }
415 
416 /*
417  * Special-case - softirqs can safely be enabled by __do_softirq(),
418  * without processing still-pending softirqs:
419  */
420 void _local_bh_enable(void)
421 {
422 	WARN_ON_ONCE(in_hardirq());
423 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
424 }
425 EXPORT_SYMBOL(_local_bh_enable);
426 
427 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
428 {
429 	WARN_ON_ONCE(in_hardirq());
430 	lockdep_assert_irqs_enabled();
431 #ifdef CONFIG_TRACE_IRQFLAGS
432 	local_irq_disable();
433 #endif
434 	/*
435 	 * Are softirqs going to be turned on now:
436 	 */
437 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
438 		lockdep_softirqs_on(ip);
439 	/*
440 	 * Keep preemption disabled until we are done with
441 	 * softirq processing:
442 	 */
443 	__preempt_count_sub(cnt - 1);
444 
445 	if (unlikely(!in_interrupt() && local_softirq_pending())) {
446 		/*
447 		 * Run softirq if any pending. And do it in its own stack
448 		 * as we may be calling this deep in a task call stack already.
449 		 */
450 		do_softirq();
451 	}
452 
453 	preempt_count_dec();
454 #ifdef CONFIG_TRACE_IRQFLAGS
455 	local_irq_enable();
456 #endif
457 	preempt_check_resched();
458 }
459 EXPORT_SYMBOL(__local_bh_enable_ip);
460 
461 static inline void softirq_handle_begin(void)
462 {
463 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
464 }
465 
466 static inline void softirq_handle_end(void)
467 {
468 	__local_bh_enable(SOFTIRQ_OFFSET);
469 	WARN_ON_ONCE(in_interrupt());
470 }
471 
472 static inline void ksoftirqd_run_begin(void)
473 {
474 	local_irq_disable();
475 }
476 
477 static inline void ksoftirqd_run_end(void)
478 {
479 	local_irq_enable();
480 }
481 
482 static inline bool should_wake_ksoftirqd(void)
483 {
484 	return true;
485 }
486 
487 static inline void invoke_softirq(void)
488 {
489 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
490 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
491 		/*
492 		 * We can safely execute softirq on the current stack if
493 		 * it is the irq stack, because it should be near empty
494 		 * at this stage.
495 		 */
496 		__do_softirq();
497 #else
498 		/*
499 		 * Otherwise, irq_exit() is called on the task stack that can
500 		 * be potentially deep already. So call softirq in its own stack
501 		 * to prevent from any overrun.
502 		 */
503 		do_softirq_own_stack();
504 #endif
505 	} else {
506 		wakeup_softirqd();
507 	}
508 }
509 
510 asmlinkage __visible void do_softirq(void)
511 {
512 	__u32 pending;
513 	unsigned long flags;
514 
515 	if (in_interrupt())
516 		return;
517 
518 	local_irq_save(flags);
519 
520 	pending = local_softirq_pending();
521 
522 	if (pending)
523 		do_softirq_own_stack();
524 
525 	local_irq_restore(flags);
526 }
527 
528 #endif /* !CONFIG_PREEMPT_RT */
529 
530 /*
531  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
532  * but break the loop if need_resched() is set or after 2 ms.
533  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
534  * certain cases, such as stop_machine(), jiffies may cease to
535  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
536  * well to make sure we eventually return from this method.
537  *
538  * These limits have been established via experimentation.
539  * The two things to balance is latency against fairness -
540  * we want to handle softirqs as soon as possible, but they
541  * should not be able to lock up the box.
542  */
543 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
544 #define MAX_SOFTIRQ_RESTART 10
545 
546 #ifdef CONFIG_TRACE_IRQFLAGS
547 /*
548  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
549  * to keep the lockdep irq context tracking as tight as possible in order to
550  * not miss-qualify lock contexts and miss possible deadlocks.
551  */
552 
553 static inline bool lockdep_softirq_start(void)
554 {
555 	bool in_hardirq = false;
556 
557 	if (lockdep_hardirq_context()) {
558 		in_hardirq = true;
559 		lockdep_hardirq_exit();
560 	}
561 
562 	lockdep_softirq_enter();
563 
564 	return in_hardirq;
565 }
566 
567 static inline void lockdep_softirq_end(bool in_hardirq)
568 {
569 	lockdep_softirq_exit();
570 
571 	if (in_hardirq)
572 		lockdep_hardirq_enter();
573 }
574 #else
575 static inline bool lockdep_softirq_start(void) { return false; }
576 static inline void lockdep_softirq_end(bool in_hardirq) { }
577 #endif
578 
579 static void handle_softirqs(bool ksirqd)
580 {
581 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
582 	unsigned long old_flags = current->flags;
583 	int max_restart = MAX_SOFTIRQ_RESTART;
584 	struct softirq_action *h;
585 	bool in_hardirq;
586 	__u32 pending;
587 	int softirq_bit;
588 
589 	/*
590 	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
591 	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
592 	 * again if the socket is related to swapping.
593 	 */
594 	current->flags &= ~PF_MEMALLOC;
595 
596 	pending = local_softirq_pending();
597 
598 	softirq_handle_begin();
599 	in_hardirq = lockdep_softirq_start();
600 	account_softirq_enter(current);
601 
602 restart:
603 	/* Reset the pending bitmask before enabling irqs */
604 	set_softirq_pending(0);
605 
606 	local_irq_enable();
607 
608 	h = softirq_vec;
609 
610 	while ((softirq_bit = ffs(pending))) {
611 		unsigned int vec_nr;
612 		int prev_count;
613 
614 		h += softirq_bit - 1;
615 
616 		vec_nr = h - softirq_vec;
617 		prev_count = preempt_count();
618 
619 		kstat_incr_softirqs_this_cpu(vec_nr);
620 
621 		trace_softirq_entry(vec_nr);
622 		h->action();
623 		trace_softirq_exit(vec_nr);
624 		if (unlikely(prev_count != preempt_count())) {
625 			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
626 			       vec_nr, softirq_to_name[vec_nr], h->action,
627 			       prev_count, preempt_count());
628 			preempt_count_set(prev_count);
629 		}
630 		h++;
631 		pending >>= softirq_bit;
632 	}
633 
634 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
635 		rcu_softirq_qs();
636 
637 	local_irq_disable();
638 
639 	pending = local_softirq_pending();
640 	if (pending) {
641 		if (time_before(jiffies, end) && !need_resched() &&
642 		    --max_restart)
643 			goto restart;
644 
645 		wakeup_softirqd();
646 	}
647 
648 	account_softirq_exit(current);
649 	lockdep_softirq_end(in_hardirq);
650 	softirq_handle_end();
651 	current_restore_flags(old_flags, PF_MEMALLOC);
652 }
653 
654 asmlinkage __visible void __softirq_entry __do_softirq(void)
655 {
656 	handle_softirqs(false);
657 }
658 
659 /**
660  * irq_enter_rcu - Enter an interrupt context with RCU watching
661  */
662 void irq_enter_rcu(void)
663 {
664 	__irq_enter_raw();
665 
666 	/*
667 	 * If this is a nested interrupt that hits the exit_to_user_mode_loop
668 	 * where it has enabled interrupts but before it has hit schedule() we
669 	 * could have hrtimers in an undefined state. Fix it up here.
670 	 */
671 	hrtimer_rearm_deferred();
672 
673 	if (tick_nohz_full_cpu(smp_processor_id()) ||
674 	    (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
675 		tick_irq_enter();
676 
677 	account_hardirq_enter(current);
678 }
679 
680 /**
681  * irq_enter - Enter an interrupt context including RCU update
682  */
683 void irq_enter(void)
684 {
685 	ct_irq_enter();
686 	irq_enter_rcu();
687 }
688 
689 static inline void tick_irq_exit(void)
690 {
691 #ifdef CONFIG_NO_HZ_COMMON
692 	int cpu = smp_processor_id();
693 
694 	/* Make sure that timer wheel updates are propagated */
695 	if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
696 		if (!in_hardirq())
697 			tick_nohz_irq_exit();
698 	}
699 #endif
700 }
701 
702 #ifdef CONFIG_IRQ_FORCED_THREADING
703 DEFINE_PER_CPU(struct task_struct *, ktimerd);
704 DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
705 
706 static void wake_timersd(void)
707 {
708 	struct task_struct *tsk = __this_cpu_read(ktimerd);
709 
710 	if (tsk)
711 		wake_up_process(tsk);
712 }
713 
714 #else
715 
716 static inline void wake_timersd(void) { }
717 
718 #endif
719 
720 static inline void __irq_exit_rcu(void)
721 {
722 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
723 	local_irq_disable();
724 #else
725 	lockdep_assert_irqs_disabled();
726 #endif
727 	account_hardirq_exit(current);
728 	preempt_count_sub(HARDIRQ_OFFSET);
729 	if (!in_interrupt() && local_softirq_pending()) {
730 		/*
731 		 * If we left hrtimers unarmed, make sure to arm them now,
732 		 * before enabling interrupts to run SoftIRQ.
733 		 */
734 		hrtimer_rearm_deferred();
735 		invoke_softirq();
736 	}
737 
738 	if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
739 	    local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
740 		wake_timersd();
741 
742 	tick_irq_exit();
743 }
744 
745 /**
746  * irq_exit_rcu() - Exit an interrupt context without updating RCU
747  *
748  * Also processes softirqs if needed and possible.
749  */
750 void irq_exit_rcu(void)
751 {
752 	__irq_exit_rcu();
753 	 /* must be last! */
754 	lockdep_hardirq_exit();
755 }
756 
757 /**
758  * irq_exit - Exit an interrupt context, update RCU and lockdep
759  *
760  * Also processes softirqs if needed and possible.
761  */
762 void irq_exit(void)
763 {
764 	__irq_exit_rcu();
765 	ct_irq_exit();
766 	 /* must be last! */
767 	lockdep_hardirq_exit();
768 }
769 
770 /*
771  * This function must run with irqs disabled!
772  */
773 inline void raise_softirq_irqoff(unsigned int nr)
774 {
775 	__raise_softirq_irqoff(nr);
776 
777 	/*
778 	 * If we're in an interrupt or softirq, we're done
779 	 * (this also catches softirq-disabled code). We will
780 	 * actually run the softirq once we return from
781 	 * the irq or softirq.
782 	 *
783 	 * Otherwise we wake up ksoftirqd to make sure we
784 	 * schedule the softirq soon.
785 	 */
786 	if (!in_interrupt() && should_wake_ksoftirqd())
787 		wakeup_softirqd();
788 }
789 
790 void raise_softirq(unsigned int nr)
791 {
792 	unsigned long flags;
793 
794 	local_irq_save(flags);
795 	raise_softirq_irqoff(nr);
796 	local_irq_restore(flags);
797 }
798 
799 void __raise_softirq_irqoff(unsigned int nr)
800 {
801 	lockdep_assert_irqs_disabled();
802 	trace_softirq_raise(nr);
803 	or_softirq_pending(1UL << nr);
804 }
805 
806 void open_softirq(int nr, void (*action)(void))
807 {
808 	softirq_vec[nr].action = action;
809 }
810 
811 /*
812  * Tasklets
813  */
814 struct tasklet_head {
815 	struct tasklet_struct *head;
816 	struct tasklet_struct **tail;
817 };
818 
819 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
820 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
821 
822 static void __tasklet_schedule_common(struct tasklet_struct *t,
823 				      struct tasklet_head __percpu *headp,
824 				      unsigned int softirq_nr)
825 {
826 	struct tasklet_head *head;
827 	unsigned long flags;
828 
829 	local_irq_save(flags);
830 	head = this_cpu_ptr(headp);
831 	t->next = NULL;
832 	*head->tail = t;
833 	head->tail = &(t->next);
834 	raise_softirq_irqoff(softirq_nr);
835 	local_irq_restore(flags);
836 }
837 
838 void __tasklet_schedule(struct tasklet_struct *t)
839 {
840 	__tasklet_schedule_common(t, &tasklet_vec,
841 				  TASKLET_SOFTIRQ);
842 }
843 EXPORT_SYMBOL(__tasklet_schedule);
844 
845 void __tasklet_hi_schedule(struct tasklet_struct *t)
846 {
847 	__tasklet_schedule_common(t, &tasklet_hi_vec,
848 				  HI_SOFTIRQ);
849 }
850 EXPORT_SYMBOL(__tasklet_hi_schedule);
851 
852 static bool tasklet_clear_sched(struct tasklet_struct *t)
853 {
854 	if (test_and_clear_wake_up_bit(TASKLET_STATE_SCHED, &t->state))
855 		return true;
856 
857 	WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
858 		  t->use_callback ? "callback" : "func",
859 		  t->use_callback ? (void *)t->callback : (void *)t->func);
860 
861 	return false;
862 }
863 
864 #ifdef CONFIG_PREEMPT_RT
865 struct tasklet_sync_callback {
866 	spinlock_t	cb_lock;
867 	atomic_t	cb_waiters;
868 };
869 
870 static DEFINE_PER_CPU(struct tasklet_sync_callback, tasklet_sync_callback) = {
871 	.cb_lock	= __SPIN_LOCK_UNLOCKED(tasklet_sync_callback.cb_lock),
872 	.cb_waiters	= ATOMIC_INIT(0),
873 };
874 
875 static void tasklet_lock_callback(void)
876 {
877 	spin_lock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
878 }
879 
880 static void tasklet_unlock_callback(void)
881 {
882 	spin_unlock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
883 }
884 
885 static void tasklet_callback_cancel_wait_running(void)
886 {
887 	struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
888 
889 	atomic_inc(&sync_cb->cb_waiters);
890 	spin_lock(&sync_cb->cb_lock);
891 	atomic_dec(&sync_cb->cb_waiters);
892 	spin_unlock(&sync_cb->cb_lock);
893 }
894 
895 static void tasklet_callback_sync_wait_running(void)
896 {
897 	struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
898 
899 	if (atomic_read(&sync_cb->cb_waiters)) {
900 		spin_unlock(&sync_cb->cb_lock);
901 		spin_lock(&sync_cb->cb_lock);
902 	}
903 }
904 
905 #else /* !CONFIG_PREEMPT_RT: */
906 
907 static void tasklet_lock_callback(void) { }
908 static void tasklet_unlock_callback(void) { }
909 static void tasklet_callback_sync_wait_running(void) { }
910 
911 #ifdef CONFIG_SMP
912 static void tasklet_callback_cancel_wait_running(void) { }
913 #endif
914 #endif /* !CONFIG_PREEMPT_RT */
915 
916 static void tasklet_action_common(struct tasklet_head *tl_head,
917 				  unsigned int softirq_nr)
918 {
919 	struct tasklet_struct *list;
920 
921 	local_irq_disable();
922 	list = tl_head->head;
923 	tl_head->head = NULL;
924 	tl_head->tail = &tl_head->head;
925 	local_irq_enable();
926 
927 	tasklet_lock_callback();
928 	while (list) {
929 		struct tasklet_struct *t = list;
930 
931 		list = list->next;
932 
933 		if (tasklet_trylock(t)) {
934 			if (!atomic_read(&t->count)) {
935 				if (tasklet_clear_sched(t)) {
936 					if (t->use_callback) {
937 						trace_tasklet_entry(t, t->callback);
938 						t->callback(t);
939 						trace_tasklet_exit(t, t->callback);
940 					} else {
941 						trace_tasklet_entry(t, t->func);
942 						t->func(t->data);
943 						trace_tasklet_exit(t, t->func);
944 					}
945 				}
946 				tasklet_unlock(t);
947 				tasklet_callback_sync_wait_running();
948 				continue;
949 			}
950 			tasklet_unlock(t);
951 		}
952 
953 		local_irq_disable();
954 		t->next = NULL;
955 		*tl_head->tail = t;
956 		tl_head->tail = &t->next;
957 		__raise_softirq_irqoff(softirq_nr);
958 		local_irq_enable();
959 	}
960 	tasklet_unlock_callback();
961 }
962 
963 static __latent_entropy void tasklet_action(void)
964 {
965 	workqueue_softirq_action(false);
966 	tasklet_action_common(this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
967 }
968 
969 static __latent_entropy void tasklet_hi_action(void)
970 {
971 	workqueue_softirq_action(true);
972 	tasklet_action_common(this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
973 }
974 
975 void tasklet_setup(struct tasklet_struct *t,
976 		   void (*callback)(struct tasklet_struct *))
977 {
978 	t->next = NULL;
979 	t->state = 0;
980 	atomic_set(&t->count, 0);
981 	t->callback = callback;
982 	t->use_callback = true;
983 	t->data = 0;
984 }
985 EXPORT_SYMBOL(tasklet_setup);
986 
987 void tasklet_init(struct tasklet_struct *t,
988 		  void (*func)(unsigned long), unsigned long data)
989 {
990 	t->next = NULL;
991 	t->state = 0;
992 	atomic_set(&t->count, 0);
993 	t->func = func;
994 	t->use_callback = false;
995 	t->data = data;
996 }
997 EXPORT_SYMBOL(tasklet_init);
998 
999 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1000 /*
1001  * Do not use in new code. Waiting for tasklets from atomic contexts is
1002  * error prone and should be avoided.
1003  */
1004 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
1005 {
1006 	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
1007 		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1008 			/*
1009 			 * Prevent a live lock when current preempted soft
1010 			 * interrupt processing or prevents ksoftirqd from
1011 			 * running.
1012 			 */
1013 			tasklet_callback_cancel_wait_running();
1014 		} else {
1015 			cpu_relax();
1016 		}
1017 	}
1018 }
1019 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
1020 #endif
1021 
1022 void tasklet_kill(struct tasklet_struct *t)
1023 {
1024 	if (in_interrupt())
1025 		pr_notice("Attempt to kill tasklet from interrupt\n");
1026 
1027 	wait_on_bit_lock(&t->state, TASKLET_STATE_SCHED, TASK_UNINTERRUPTIBLE);
1028 
1029 	tasklet_unlock_wait(t);
1030 	tasklet_clear_sched(t);
1031 }
1032 EXPORT_SYMBOL(tasklet_kill);
1033 
1034 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1035 void tasklet_unlock(struct tasklet_struct *t)
1036 {
1037 	clear_and_wake_up_bit(TASKLET_STATE_RUN, &t->state);
1038 }
1039 EXPORT_SYMBOL_GPL(tasklet_unlock);
1040 
1041 void tasklet_unlock_wait(struct tasklet_struct *t)
1042 {
1043 	wait_on_bit(&t->state, TASKLET_STATE_RUN, TASK_UNINTERRUPTIBLE);
1044 }
1045 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
1046 #endif
1047 
1048 void __init softirq_init(void)
1049 {
1050 	int cpu;
1051 
1052 	for_each_possible_cpu(cpu) {
1053 		per_cpu(tasklet_vec, cpu).tail =
1054 			&per_cpu(tasklet_vec, cpu).head;
1055 		per_cpu(tasklet_hi_vec, cpu).tail =
1056 			&per_cpu(tasklet_hi_vec, cpu).head;
1057 	}
1058 
1059 	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
1060 	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1061 }
1062 
1063 static int ksoftirqd_should_run(unsigned int cpu)
1064 {
1065 	return local_softirq_pending();
1066 }
1067 
1068 static void run_ksoftirqd(unsigned int cpu)
1069 {
1070 	ksoftirqd_run_begin();
1071 	if (local_softirq_pending()) {
1072 		/*
1073 		 * We can safely run softirq on inline stack, as we are not deep
1074 		 * in the task stack here.
1075 		 */
1076 		handle_softirqs(true);
1077 		ksoftirqd_run_end();
1078 		cond_resched();
1079 		return;
1080 	}
1081 	ksoftirqd_run_end();
1082 }
1083 
1084 #ifdef CONFIG_HOTPLUG_CPU
1085 static int takeover_tasklets(unsigned int cpu)
1086 {
1087 	workqueue_softirq_dead(cpu);
1088 
1089 	/* CPU is dead, so no lock needed. */
1090 	local_irq_disable();
1091 
1092 	/* Find end, append list for that CPU. */
1093 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
1094 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
1095 		__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
1096 		per_cpu(tasklet_vec, cpu).head = NULL;
1097 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
1098 	}
1099 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
1100 
1101 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
1102 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
1103 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
1104 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
1105 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
1106 	}
1107 	raise_softirq_irqoff(HI_SOFTIRQ);
1108 
1109 	local_irq_enable();
1110 	return 0;
1111 }
1112 #else
1113 #define takeover_tasklets	NULL
1114 #endif /* CONFIG_HOTPLUG_CPU */
1115 
1116 static struct smp_hotplug_thread softirq_threads = {
1117 	.store			= &ksoftirqd,
1118 	.thread_should_run	= ksoftirqd_should_run,
1119 	.thread_fn		= run_ksoftirqd,
1120 	.thread_comm		= "ksoftirqd/%u",
1121 };
1122 
1123 #ifdef CONFIG_IRQ_FORCED_THREADING
1124 static void ktimerd_setup(unsigned int cpu)
1125 {
1126 	/* Above SCHED_NORMAL to handle timers before regular tasks. */
1127 	sched_set_fifo_low(current);
1128 }
1129 
1130 static int ktimerd_should_run(unsigned int cpu)
1131 {
1132 	return local_timers_pending_force_th();
1133 }
1134 
1135 void raise_ktimers_thread(unsigned int nr)
1136 {
1137 	trace_softirq_raise(nr);
1138 	__this_cpu_or(pending_timer_softirq, BIT(nr));
1139 }
1140 
1141 static void run_ktimerd(unsigned int cpu)
1142 {
1143 	unsigned int timer_si;
1144 
1145 	ksoftirqd_run_begin();
1146 
1147 	timer_si = local_timers_pending_force_th();
1148 	__this_cpu_write(pending_timer_softirq, 0);
1149 	or_softirq_pending(timer_si);
1150 
1151 	__do_softirq();
1152 
1153 	ksoftirqd_run_end();
1154 }
1155 
1156 static struct smp_hotplug_thread timer_thread = {
1157 	.store			= &ktimerd,
1158 	.setup			= ktimerd_setup,
1159 	.thread_should_run	= ktimerd_should_run,
1160 	.thread_fn		= run_ktimerd,
1161 	.thread_comm		= "ktimers/%u",
1162 };
1163 #endif
1164 
1165 static __init int spawn_ksoftirqd(void)
1166 {
1167 	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1168 				  takeover_tasklets);
1169 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1170 #ifdef CONFIG_IRQ_FORCED_THREADING
1171 	if (force_irqthreads())
1172 		BUG_ON(smpboot_register_percpu_thread(&timer_thread));
1173 #endif
1174 	return 0;
1175 }
1176 early_initcall(spawn_ksoftirqd);
1177 
1178 /*
1179  * [ These __weak aliases are kept in a separate compilation unit, so that
1180  *   GCC does not inline them incorrectly. ]
1181  */
1182 
1183 int __init __weak early_irq_init(void)
1184 {
1185 	return 0;
1186 }
1187 
1188 int __init __weak arch_probe_nr_irqs(void)
1189 {
1190 	return NR_IRQS_LEGACY;
1191 }
1192 
1193 int __init __weak arch_early_irq_init(void)
1194 {
1195 	return 0;
1196 }
1197 
1198 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1199 {
1200 	return from;
1201 }
1202