1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30 #include <linux/workqueue.h>
31
32 #include <asm/softirq_stack.h>
33
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/irq.h>
36
37 /*
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
40 by its own spinlocks.
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
44 or will not.
45
46 Examples:
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
53 */
54
55 #ifndef __ARCH_IRQ_STAT
56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57 EXPORT_PER_CPU_SYMBOL(irq_stat);
58 #endif
59
60 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61
62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63
64 const char * const softirq_to_name[NR_SOFTIRQS] = {
65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 "TASKLET", "SCHED", "HRTIMER", "RCU"
67 };
68
69 /*
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
74 */
wakeup_softirqd(void)75 static void wakeup_softirqd(void)
76 {
77 /* Interrupts are disabled: no need to stop preemption */
78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
79
80 if (tsk)
81 wake_up_process(tsk);
82 }
83
84 #ifdef CONFIG_TRACE_IRQFLAGS
85 DEFINE_PER_CPU(int, hardirqs_enabled);
86 DEFINE_PER_CPU(int, hardirq_context);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89 #endif
90
91 /*
92 * SOFTIRQ_OFFSET usage:
93 *
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 *
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98 * processing.
99 *
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101 * on local_bh_disable or local_bh_enable.
102 *
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
105 */
106 #ifdef CONFIG_PREEMPT_RT
107
108 /*
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
112 *
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
116 *
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
119 */
120 struct softirq_ctrl {
121 local_lock_t lock;
122 int cnt;
123 };
124
125 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
127 };
128
129 #ifdef CONFIG_DEBUG_LOCK_ALLOC
130 static struct lock_class_key bh_lock_key;
131 struct lockdep_map bh_lock_map = {
132 .name = "local_bh",
133 .key = &bh_lock_key,
134 .wait_type_outer = LD_WAIT_FREE,
135 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
136 .lock_type = LD_LOCK_PERCPU,
137 };
138 EXPORT_SYMBOL_GPL(bh_lock_map);
139 #endif
140
141 /**
142 * local_bh_blocked() - Check for idle whether BH processing is blocked
143 *
144 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
145 *
146 * This is invoked from the idle task to guard against false positive
147 * softirq pending warnings, which would happen when the task which holds
148 * softirq_ctrl::lock was the only running task on the CPU and blocks on
149 * some other lock.
150 */
local_bh_blocked(void)151 bool local_bh_blocked(void)
152 {
153 return __this_cpu_read(softirq_ctrl.cnt) != 0;
154 }
155
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)156 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
157 {
158 unsigned long flags;
159 int newcnt;
160
161 WARN_ON_ONCE(in_hardirq());
162
163 lock_map_acquire_read(&bh_lock_map);
164
165 /* First entry of a task into a BH disabled section? */
166 if (!current->softirq_disable_cnt) {
167 if (preemptible()) {
168 local_lock(&softirq_ctrl.lock);
169 /* Required to meet the RCU bottomhalf requirements. */
170 rcu_read_lock();
171 } else {
172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
173 }
174 }
175
176 /*
177 * Track the per CPU softirq disabled state. On RT this is per CPU
178 * state to allow preemption of bottom half disabled sections.
179 */
180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
181 /*
182 * Reflect the result in the task state to prevent recursion on the
183 * local lock and to make softirq_count() & al work.
184 */
185 current->softirq_disable_cnt = newcnt;
186
187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
188 raw_local_irq_save(flags);
189 lockdep_softirqs_off(ip);
190 raw_local_irq_restore(flags);
191 }
192 }
193 EXPORT_SYMBOL(__local_bh_disable_ip);
194
__local_bh_enable(unsigned int cnt,bool unlock)195 static void __local_bh_enable(unsigned int cnt, bool unlock)
196 {
197 unsigned long flags;
198 int newcnt;
199
200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
201 this_cpu_read(softirq_ctrl.cnt));
202
203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
204 raw_local_irq_save(flags);
205 lockdep_softirqs_on(_RET_IP_);
206 raw_local_irq_restore(flags);
207 }
208
209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
210 current->softirq_disable_cnt = newcnt;
211
212 if (!newcnt && unlock) {
213 rcu_read_unlock();
214 local_unlock(&softirq_ctrl.lock);
215 }
216 }
217
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)218 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
219 {
220 bool preempt_on = preemptible();
221 unsigned long flags;
222 u32 pending;
223 int curcnt;
224
225 WARN_ON_ONCE(in_hardirq());
226 lockdep_assert_irqs_enabled();
227
228 lock_map_release(&bh_lock_map);
229
230 local_irq_save(flags);
231 curcnt = __this_cpu_read(softirq_ctrl.cnt);
232
233 /*
234 * If this is not reenabling soft interrupts, no point in trying to
235 * run pending ones.
236 */
237 if (curcnt != cnt)
238 goto out;
239
240 pending = local_softirq_pending();
241 if (!pending)
242 goto out;
243
244 /*
245 * If this was called from non preemptible context, wake up the
246 * softirq daemon.
247 */
248 if (!preempt_on) {
249 wakeup_softirqd();
250 goto out;
251 }
252
253 /*
254 * Adjust softirq count to SOFTIRQ_OFFSET which makes
255 * in_serving_softirq() become true.
256 */
257 cnt = SOFTIRQ_OFFSET;
258 __local_bh_enable(cnt, false);
259 __do_softirq();
260
261 out:
262 __local_bh_enable(cnt, preempt_on);
263 local_irq_restore(flags);
264 }
265 EXPORT_SYMBOL(__local_bh_enable_ip);
266
267 /*
268 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
269 * to acquire the per CPU local lock for reentrancy protection.
270 */
ksoftirqd_run_begin(void)271 static inline void ksoftirqd_run_begin(void)
272 {
273 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
274 local_irq_disable();
275 }
276
277 /* Counterpart to ksoftirqd_run_begin() */
ksoftirqd_run_end(void)278 static inline void ksoftirqd_run_end(void)
279 {
280 /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
281 lock_map_release(&bh_lock_map);
282 __local_bh_enable(SOFTIRQ_OFFSET, true);
283 WARN_ON_ONCE(in_interrupt());
284 local_irq_enable();
285 }
286
softirq_handle_begin(void)287 static inline void softirq_handle_begin(void) { }
softirq_handle_end(void)288 static inline void softirq_handle_end(void) { }
289
should_wake_ksoftirqd(void)290 static inline bool should_wake_ksoftirqd(void)
291 {
292 return !this_cpu_read(softirq_ctrl.cnt);
293 }
294
invoke_softirq(void)295 static inline void invoke_softirq(void)
296 {
297 if (should_wake_ksoftirqd())
298 wakeup_softirqd();
299 }
300
301 #define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
302
303 /*
304 * flush_smp_call_function_queue() can raise a soft interrupt in a function
305 * call. On RT kernels this is undesired and the only known functionalities
306 * are in the block layer which is disabled on RT, and in the scheduler for
307 * idle load balancing. If soft interrupts get raised which haven't been
308 * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
309 * investigated.
310 */
do_softirq_post_smp_call_flush(unsigned int was_pending)311 void do_softirq_post_smp_call_flush(unsigned int was_pending)
312 {
313 unsigned int is_pending = local_softirq_pending();
314
315 if (unlikely(was_pending != is_pending)) {
316 WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
317 invoke_softirq();
318 }
319 }
320
321 #else /* CONFIG_PREEMPT_RT */
322
323 /*
324 * This one is for softirq.c-internal use, where hardirqs are disabled
325 * legitimately:
326 */
327 #ifdef CONFIG_TRACE_IRQFLAGS
__local_bh_disable_ip(unsigned long ip,unsigned int cnt)328 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
329 {
330 unsigned long flags;
331
332 WARN_ON_ONCE(in_hardirq());
333
334 raw_local_irq_save(flags);
335 /*
336 * The preempt tracer hooks into preempt_count_add and will break
337 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
338 * is set and before current->softirq_enabled is cleared.
339 * We must manually increment preempt_count here and manually
340 * call the trace_preempt_off later.
341 */
342 __preempt_count_add(cnt);
343 /*
344 * Were softirqs turned off above:
345 */
346 if (softirq_count() == (cnt & SOFTIRQ_MASK))
347 lockdep_softirqs_off(ip);
348 raw_local_irq_restore(flags);
349
350 if (preempt_count() == cnt) {
351 #ifdef CONFIG_DEBUG_PREEMPT
352 current->preempt_disable_ip = get_lock_parent_ip();
353 #endif
354 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
355 }
356 }
357 EXPORT_SYMBOL(__local_bh_disable_ip);
358 #endif /* CONFIG_TRACE_IRQFLAGS */
359
__local_bh_enable(unsigned int cnt)360 static void __local_bh_enable(unsigned int cnt)
361 {
362 lockdep_assert_irqs_disabled();
363
364 if (preempt_count() == cnt)
365 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
366
367 if (softirq_count() == (cnt & SOFTIRQ_MASK))
368 lockdep_softirqs_on(_RET_IP_);
369
370 __preempt_count_sub(cnt);
371 }
372
373 /*
374 * Special-case - softirqs can safely be enabled by __do_softirq(),
375 * without processing still-pending softirqs:
376 */
_local_bh_enable(void)377 void _local_bh_enable(void)
378 {
379 WARN_ON_ONCE(in_hardirq());
380 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
381 }
382 EXPORT_SYMBOL(_local_bh_enable);
383
__local_bh_enable_ip(unsigned long ip,unsigned int cnt)384 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
385 {
386 WARN_ON_ONCE(in_hardirq());
387 lockdep_assert_irqs_enabled();
388 #ifdef CONFIG_TRACE_IRQFLAGS
389 local_irq_disable();
390 #endif
391 /*
392 * Are softirqs going to be turned on now:
393 */
394 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
395 lockdep_softirqs_on(ip);
396 /*
397 * Keep preemption disabled until we are done with
398 * softirq processing:
399 */
400 __preempt_count_sub(cnt - 1);
401
402 if (unlikely(!in_interrupt() && local_softirq_pending())) {
403 /*
404 * Run softirq if any pending. And do it in its own stack
405 * as we may be calling this deep in a task call stack already.
406 */
407 do_softirq();
408 }
409
410 preempt_count_dec();
411 #ifdef CONFIG_TRACE_IRQFLAGS
412 local_irq_enable();
413 #endif
414 preempt_check_resched();
415 }
416 EXPORT_SYMBOL(__local_bh_enable_ip);
417
softirq_handle_begin(void)418 static inline void softirq_handle_begin(void)
419 {
420 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
421 }
422
softirq_handle_end(void)423 static inline void softirq_handle_end(void)
424 {
425 __local_bh_enable(SOFTIRQ_OFFSET);
426 WARN_ON_ONCE(in_interrupt());
427 }
428
ksoftirqd_run_begin(void)429 static inline void ksoftirqd_run_begin(void)
430 {
431 local_irq_disable();
432 }
433
ksoftirqd_run_end(void)434 static inline void ksoftirqd_run_end(void)
435 {
436 local_irq_enable();
437 }
438
should_wake_ksoftirqd(void)439 static inline bool should_wake_ksoftirqd(void)
440 {
441 return true;
442 }
443
invoke_softirq(void)444 static inline void invoke_softirq(void)
445 {
446 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
447 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
448 /*
449 * We can safely execute softirq on the current stack if
450 * it is the irq stack, because it should be near empty
451 * at this stage.
452 */
453 __do_softirq();
454 #else
455 /*
456 * Otherwise, irq_exit() is called on the task stack that can
457 * be potentially deep already. So call softirq in its own stack
458 * to prevent from any overrun.
459 */
460 do_softirq_own_stack();
461 #endif
462 } else {
463 wakeup_softirqd();
464 }
465 }
466
do_softirq(void)467 asmlinkage __visible void do_softirq(void)
468 {
469 __u32 pending;
470 unsigned long flags;
471
472 if (in_interrupt())
473 return;
474
475 local_irq_save(flags);
476
477 pending = local_softirq_pending();
478
479 if (pending)
480 do_softirq_own_stack();
481
482 local_irq_restore(flags);
483 }
484
485 #endif /* !CONFIG_PREEMPT_RT */
486
487 /*
488 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
489 * but break the loop if need_resched() is set or after 2 ms.
490 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
491 * certain cases, such as stop_machine(), jiffies may cease to
492 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
493 * well to make sure we eventually return from this method.
494 *
495 * These limits have been established via experimentation.
496 * The two things to balance is latency against fairness -
497 * we want to handle softirqs as soon as possible, but they
498 * should not be able to lock up the box.
499 */
500 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
501 #define MAX_SOFTIRQ_RESTART 10
502
503 #ifdef CONFIG_TRACE_IRQFLAGS
504 /*
505 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
506 * to keep the lockdep irq context tracking as tight as possible in order to
507 * not miss-qualify lock contexts and miss possible deadlocks.
508 */
509
lockdep_softirq_start(void)510 static inline bool lockdep_softirq_start(void)
511 {
512 bool in_hardirq = false;
513
514 if (lockdep_hardirq_context()) {
515 in_hardirq = true;
516 lockdep_hardirq_exit();
517 }
518
519 lockdep_softirq_enter();
520
521 return in_hardirq;
522 }
523
lockdep_softirq_end(bool in_hardirq)524 static inline void lockdep_softirq_end(bool in_hardirq)
525 {
526 lockdep_softirq_exit();
527
528 if (in_hardirq)
529 lockdep_hardirq_enter();
530 }
531 #else
lockdep_softirq_start(void)532 static inline bool lockdep_softirq_start(void) { return false; }
lockdep_softirq_end(bool in_hardirq)533 static inline void lockdep_softirq_end(bool in_hardirq) { }
534 #endif
535
handle_softirqs(bool ksirqd)536 static void handle_softirqs(bool ksirqd)
537 {
538 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
539 unsigned long old_flags = current->flags;
540 int max_restart = MAX_SOFTIRQ_RESTART;
541 struct softirq_action *h;
542 bool in_hardirq;
543 __u32 pending;
544 int softirq_bit;
545
546 /*
547 * Mask out PF_MEMALLOC as the current task context is borrowed for the
548 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
549 * again if the socket is related to swapping.
550 */
551 current->flags &= ~PF_MEMALLOC;
552
553 pending = local_softirq_pending();
554
555 softirq_handle_begin();
556 in_hardirq = lockdep_softirq_start();
557 account_softirq_enter(current);
558
559 restart:
560 /* Reset the pending bitmask before enabling irqs */
561 set_softirq_pending(0);
562
563 local_irq_enable();
564
565 h = softirq_vec;
566
567 while ((softirq_bit = ffs(pending))) {
568 unsigned int vec_nr;
569 int prev_count;
570
571 h += softirq_bit - 1;
572
573 vec_nr = h - softirq_vec;
574 prev_count = preempt_count();
575
576 kstat_incr_softirqs_this_cpu(vec_nr);
577
578 trace_softirq_entry(vec_nr);
579 h->action();
580 trace_softirq_exit(vec_nr);
581 if (unlikely(prev_count != preempt_count())) {
582 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
583 vec_nr, softirq_to_name[vec_nr], h->action,
584 prev_count, preempt_count());
585 preempt_count_set(prev_count);
586 }
587 h++;
588 pending >>= softirq_bit;
589 }
590
591 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
592 rcu_softirq_qs();
593
594 local_irq_disable();
595
596 pending = local_softirq_pending();
597 if (pending) {
598 if (time_before(jiffies, end) && !need_resched() &&
599 --max_restart)
600 goto restart;
601
602 wakeup_softirqd();
603 }
604
605 account_softirq_exit(current);
606 lockdep_softirq_end(in_hardirq);
607 softirq_handle_end();
608 current_restore_flags(old_flags, PF_MEMALLOC);
609 }
610
__do_softirq(void)611 asmlinkage __visible void __softirq_entry __do_softirq(void)
612 {
613 handle_softirqs(false);
614 }
615
616 /**
617 * irq_enter_rcu - Enter an interrupt context with RCU watching
618 */
irq_enter_rcu(void)619 void irq_enter_rcu(void)
620 {
621 __irq_enter_raw();
622
623 if (tick_nohz_full_cpu(smp_processor_id()) ||
624 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
625 tick_irq_enter();
626
627 account_hardirq_enter(current);
628 }
629
630 /**
631 * irq_enter - Enter an interrupt context including RCU update
632 */
irq_enter(void)633 void irq_enter(void)
634 {
635 ct_irq_enter();
636 irq_enter_rcu();
637 }
638
tick_irq_exit(void)639 static inline void tick_irq_exit(void)
640 {
641 #ifdef CONFIG_NO_HZ_COMMON
642 int cpu = smp_processor_id();
643
644 /* Make sure that timer wheel updates are propagated */
645 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
646 if (!in_hardirq())
647 tick_nohz_irq_exit();
648 }
649 #endif
650 }
651
652 #ifdef CONFIG_IRQ_FORCED_THREADING
653 DEFINE_PER_CPU(struct task_struct *, ktimerd);
654 DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
655
wake_timersd(void)656 static void wake_timersd(void)
657 {
658 struct task_struct *tsk = __this_cpu_read(ktimerd);
659
660 if (tsk)
661 wake_up_process(tsk);
662 }
663
664 #else
665
wake_timersd(void)666 static inline void wake_timersd(void) { }
667
668 #endif
669
__irq_exit_rcu(void)670 static inline void __irq_exit_rcu(void)
671 {
672 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
673 local_irq_disable();
674 #else
675 lockdep_assert_irqs_disabled();
676 #endif
677 account_hardirq_exit(current);
678 preempt_count_sub(HARDIRQ_OFFSET);
679 if (!in_interrupt() && local_softirq_pending())
680 invoke_softirq();
681
682 if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
683 local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
684 wake_timersd();
685
686 tick_irq_exit();
687 }
688
689 /**
690 * irq_exit_rcu() - Exit an interrupt context without updating RCU
691 *
692 * Also processes softirqs if needed and possible.
693 */
irq_exit_rcu(void)694 void irq_exit_rcu(void)
695 {
696 __irq_exit_rcu();
697 /* must be last! */
698 lockdep_hardirq_exit();
699 }
700
701 /**
702 * irq_exit - Exit an interrupt context, update RCU and lockdep
703 *
704 * Also processes softirqs if needed and possible.
705 */
irq_exit(void)706 void irq_exit(void)
707 {
708 __irq_exit_rcu();
709 ct_irq_exit();
710 /* must be last! */
711 lockdep_hardirq_exit();
712 }
713
714 /*
715 * This function must run with irqs disabled!
716 */
raise_softirq_irqoff(unsigned int nr)717 inline void raise_softirq_irqoff(unsigned int nr)
718 {
719 __raise_softirq_irqoff(nr);
720
721 /*
722 * If we're in an interrupt or softirq, we're done
723 * (this also catches softirq-disabled code). We will
724 * actually run the softirq once we return from
725 * the irq or softirq.
726 *
727 * Otherwise we wake up ksoftirqd to make sure we
728 * schedule the softirq soon.
729 */
730 if (!in_interrupt() && should_wake_ksoftirqd())
731 wakeup_softirqd();
732 }
733
raise_softirq(unsigned int nr)734 void raise_softirq(unsigned int nr)
735 {
736 unsigned long flags;
737
738 local_irq_save(flags);
739 raise_softirq_irqoff(nr);
740 local_irq_restore(flags);
741 }
742
__raise_softirq_irqoff(unsigned int nr)743 void __raise_softirq_irqoff(unsigned int nr)
744 {
745 lockdep_assert_irqs_disabled();
746 trace_softirq_raise(nr);
747 or_softirq_pending(1UL << nr);
748 }
749
open_softirq(int nr,void (* action)(void))750 void open_softirq(int nr, void (*action)(void))
751 {
752 softirq_vec[nr].action = action;
753 }
754
755 /*
756 * Tasklets
757 */
758 struct tasklet_head {
759 struct tasklet_struct *head;
760 struct tasklet_struct **tail;
761 };
762
763 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
764 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
765
__tasklet_schedule_common(struct tasklet_struct * t,struct tasklet_head __percpu * headp,unsigned int softirq_nr)766 static void __tasklet_schedule_common(struct tasklet_struct *t,
767 struct tasklet_head __percpu *headp,
768 unsigned int softirq_nr)
769 {
770 struct tasklet_head *head;
771 unsigned long flags;
772
773 local_irq_save(flags);
774 head = this_cpu_ptr(headp);
775 t->next = NULL;
776 *head->tail = t;
777 head->tail = &(t->next);
778 raise_softirq_irqoff(softirq_nr);
779 local_irq_restore(flags);
780 }
781
__tasklet_schedule(struct tasklet_struct * t)782 void __tasklet_schedule(struct tasklet_struct *t)
783 {
784 __tasklet_schedule_common(t, &tasklet_vec,
785 TASKLET_SOFTIRQ);
786 }
787 EXPORT_SYMBOL(__tasklet_schedule);
788
__tasklet_hi_schedule(struct tasklet_struct * t)789 void __tasklet_hi_schedule(struct tasklet_struct *t)
790 {
791 __tasklet_schedule_common(t, &tasklet_hi_vec,
792 HI_SOFTIRQ);
793 }
794 EXPORT_SYMBOL(__tasklet_hi_schedule);
795
tasklet_clear_sched(struct tasklet_struct * t)796 static bool tasklet_clear_sched(struct tasklet_struct *t)
797 {
798 if (test_and_clear_wake_up_bit(TASKLET_STATE_SCHED, &t->state))
799 return true;
800
801 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
802 t->use_callback ? "callback" : "func",
803 t->use_callback ? (void *)t->callback : (void *)t->func);
804
805 return false;
806 }
807
tasklet_action_common(struct tasklet_head * tl_head,unsigned int softirq_nr)808 static void tasklet_action_common(struct tasklet_head *tl_head,
809 unsigned int softirq_nr)
810 {
811 struct tasklet_struct *list;
812
813 local_irq_disable();
814 list = tl_head->head;
815 tl_head->head = NULL;
816 tl_head->tail = &tl_head->head;
817 local_irq_enable();
818
819 while (list) {
820 struct tasklet_struct *t = list;
821
822 list = list->next;
823
824 if (tasklet_trylock(t)) {
825 if (!atomic_read(&t->count)) {
826 if (tasklet_clear_sched(t)) {
827 if (t->use_callback) {
828 trace_tasklet_entry(t, t->callback);
829 t->callback(t);
830 trace_tasklet_exit(t, t->callback);
831 } else {
832 trace_tasklet_entry(t, t->func);
833 t->func(t->data);
834 trace_tasklet_exit(t, t->func);
835 }
836 }
837 tasklet_unlock(t);
838 continue;
839 }
840 tasklet_unlock(t);
841 }
842
843 local_irq_disable();
844 t->next = NULL;
845 *tl_head->tail = t;
846 tl_head->tail = &t->next;
847 __raise_softirq_irqoff(softirq_nr);
848 local_irq_enable();
849 }
850 }
851
tasklet_action(void)852 static __latent_entropy void tasklet_action(void)
853 {
854 workqueue_softirq_action(false);
855 tasklet_action_common(this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
856 }
857
tasklet_hi_action(void)858 static __latent_entropy void tasklet_hi_action(void)
859 {
860 workqueue_softirq_action(true);
861 tasklet_action_common(this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
862 }
863
tasklet_setup(struct tasklet_struct * t,void (* callback)(struct tasklet_struct *))864 void tasklet_setup(struct tasklet_struct *t,
865 void (*callback)(struct tasklet_struct *))
866 {
867 t->next = NULL;
868 t->state = 0;
869 atomic_set(&t->count, 0);
870 t->callback = callback;
871 t->use_callback = true;
872 t->data = 0;
873 }
874 EXPORT_SYMBOL(tasklet_setup);
875
tasklet_init(struct tasklet_struct * t,void (* func)(unsigned long),unsigned long data)876 void tasklet_init(struct tasklet_struct *t,
877 void (*func)(unsigned long), unsigned long data)
878 {
879 t->next = NULL;
880 t->state = 0;
881 atomic_set(&t->count, 0);
882 t->func = func;
883 t->use_callback = false;
884 t->data = data;
885 }
886 EXPORT_SYMBOL(tasklet_init);
887
888 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
889 /*
890 * Do not use in new code. Waiting for tasklets from atomic contexts is
891 * error prone and should be avoided.
892 */
tasklet_unlock_spin_wait(struct tasklet_struct * t)893 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
894 {
895 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
896 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
897 /*
898 * Prevent a live lock when current preempted soft
899 * interrupt processing or prevents ksoftirqd from
900 * running. If the tasklet runs on a different CPU
901 * then this has no effect other than doing the BH
902 * disable/enable dance for nothing.
903 */
904 local_bh_disable();
905 local_bh_enable();
906 } else {
907 cpu_relax();
908 }
909 }
910 }
911 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
912 #endif
913
tasklet_kill(struct tasklet_struct * t)914 void tasklet_kill(struct tasklet_struct *t)
915 {
916 if (in_interrupt())
917 pr_notice("Attempt to kill tasklet from interrupt\n");
918
919 wait_on_bit_lock(&t->state, TASKLET_STATE_SCHED, TASK_UNINTERRUPTIBLE);
920
921 tasklet_unlock_wait(t);
922 tasklet_clear_sched(t);
923 }
924 EXPORT_SYMBOL(tasklet_kill);
925
926 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
tasklet_unlock(struct tasklet_struct * t)927 void tasklet_unlock(struct tasklet_struct *t)
928 {
929 clear_and_wake_up_bit(TASKLET_STATE_RUN, &t->state);
930 }
931 EXPORT_SYMBOL_GPL(tasklet_unlock);
932
tasklet_unlock_wait(struct tasklet_struct * t)933 void tasklet_unlock_wait(struct tasklet_struct *t)
934 {
935 wait_on_bit(&t->state, TASKLET_STATE_RUN, TASK_UNINTERRUPTIBLE);
936 }
937 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
938 #endif
939
softirq_init(void)940 void __init softirq_init(void)
941 {
942 int cpu;
943
944 for_each_possible_cpu(cpu) {
945 per_cpu(tasklet_vec, cpu).tail =
946 &per_cpu(tasklet_vec, cpu).head;
947 per_cpu(tasklet_hi_vec, cpu).tail =
948 &per_cpu(tasklet_hi_vec, cpu).head;
949 }
950
951 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
952 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
953 }
954
ksoftirqd_should_run(unsigned int cpu)955 static int ksoftirqd_should_run(unsigned int cpu)
956 {
957 return local_softirq_pending();
958 }
959
run_ksoftirqd(unsigned int cpu)960 static void run_ksoftirqd(unsigned int cpu)
961 {
962 ksoftirqd_run_begin();
963 if (local_softirq_pending()) {
964 /*
965 * We can safely run softirq on inline stack, as we are not deep
966 * in the task stack here.
967 */
968 handle_softirqs(true);
969 ksoftirqd_run_end();
970 cond_resched();
971 return;
972 }
973 ksoftirqd_run_end();
974 }
975
976 #ifdef CONFIG_HOTPLUG_CPU
takeover_tasklets(unsigned int cpu)977 static int takeover_tasklets(unsigned int cpu)
978 {
979 workqueue_softirq_dead(cpu);
980
981 /* CPU is dead, so no lock needed. */
982 local_irq_disable();
983
984 /* Find end, append list for that CPU. */
985 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
986 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
987 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
988 per_cpu(tasklet_vec, cpu).head = NULL;
989 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
990 }
991 raise_softirq_irqoff(TASKLET_SOFTIRQ);
992
993 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
994 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
995 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
996 per_cpu(tasklet_hi_vec, cpu).head = NULL;
997 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
998 }
999 raise_softirq_irqoff(HI_SOFTIRQ);
1000
1001 local_irq_enable();
1002 return 0;
1003 }
1004 #else
1005 #define takeover_tasklets NULL
1006 #endif /* CONFIG_HOTPLUG_CPU */
1007
1008 static struct smp_hotplug_thread softirq_threads = {
1009 .store = &ksoftirqd,
1010 .thread_should_run = ksoftirqd_should_run,
1011 .thread_fn = run_ksoftirqd,
1012 .thread_comm = "ksoftirqd/%u",
1013 };
1014
1015 #ifdef CONFIG_IRQ_FORCED_THREADING
ktimerd_setup(unsigned int cpu)1016 static void ktimerd_setup(unsigned int cpu)
1017 {
1018 /* Above SCHED_NORMAL to handle timers before regular tasks. */
1019 sched_set_fifo_low(current);
1020 }
1021
ktimerd_should_run(unsigned int cpu)1022 static int ktimerd_should_run(unsigned int cpu)
1023 {
1024 return local_timers_pending_force_th();
1025 }
1026
raise_ktimers_thread(unsigned int nr)1027 void raise_ktimers_thread(unsigned int nr)
1028 {
1029 trace_softirq_raise(nr);
1030 __this_cpu_or(pending_timer_softirq, BIT(nr));
1031 }
1032
run_ktimerd(unsigned int cpu)1033 static void run_ktimerd(unsigned int cpu)
1034 {
1035 unsigned int timer_si;
1036
1037 ksoftirqd_run_begin();
1038
1039 timer_si = local_timers_pending_force_th();
1040 __this_cpu_write(pending_timer_softirq, 0);
1041 or_softirq_pending(timer_si);
1042
1043 __do_softirq();
1044
1045 ksoftirqd_run_end();
1046 }
1047
1048 static struct smp_hotplug_thread timer_thread = {
1049 .store = &ktimerd,
1050 .setup = ktimerd_setup,
1051 .thread_should_run = ktimerd_should_run,
1052 .thread_fn = run_ktimerd,
1053 .thread_comm = "ktimers/%u",
1054 };
1055 #endif
1056
spawn_ksoftirqd(void)1057 static __init int spawn_ksoftirqd(void)
1058 {
1059 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1060 takeover_tasklets);
1061 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1062 #ifdef CONFIG_IRQ_FORCED_THREADING
1063 if (force_irqthreads())
1064 BUG_ON(smpboot_register_percpu_thread(&timer_thread));
1065 #endif
1066 return 0;
1067 }
1068 early_initcall(spawn_ksoftirqd);
1069
1070 /*
1071 * [ These __weak aliases are kept in a separate compilation unit, so that
1072 * GCC does not inline them incorrectly. ]
1073 */
1074
early_irq_init(void)1075 int __init __weak early_irq_init(void)
1076 {
1077 return 0;
1078 }
1079
arch_probe_nr_irqs(void)1080 int __init __weak arch_probe_nr_irqs(void)
1081 {
1082 return NR_IRQS_LEGACY;
1083 }
1084
arch_early_irq_init(void)1085 int __init __weak arch_early_irq_init(void)
1086 {
1087 return 0;
1088 }
1089
arch_dynirq_lower_bound(unsigned int from)1090 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1091 {
1092 return from;
1093 }
1094