xref: /linux/include/linux/preempt.h (revision 7f8d5f70fffe2177afcc62f02feead5827dfe8dd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_PREEMPT_H
3 #define __LINUX_PREEMPT_H
4 
5 /*
6  * include/linux/preempt.h - macros for accessing and manipulating
7  * preempt_count (used for kernel preemption, interrupt count, etc.)
8  */
9 
10 #include <linux/linkage.h>
11 #include <linux/cleanup.h>
12 #include <linux/types.h>
13 
14 /*
15  * We put the hardirq and softirq counter into the preemption
16  * counter. The bitmask has the following meaning:
17  *
18  * - bits 0-7 are the preemption count (max preemption depth: 256)
19  * - bits 8-15 are the softirq count (max # of softirqs: 256)
20  *
21  * The hardirq count could in theory be the same as the number of
22  * interrupts in the system, but we run all interrupt handlers with
23  * interrupts disabled, so we cannot have nesting interrupts. Though
24  * there are a few palaeontologic drivers which reenable interrupts in
25  * the handler, so we need more than one bit here.
26  *
27  *         PREEMPT_MASK:	0x000000ff
28  *         SOFTIRQ_MASK:	0x0000ff00
29  *         HARDIRQ_MASK:	0x000f0000
30  *             NMI_MASK:	0x00f00000
31  * PREEMPT_NEED_RESCHED:	0x80000000
32  */
33 #define PREEMPT_BITS	8
34 #define SOFTIRQ_BITS	8
35 #define HARDIRQ_BITS	4
36 #define NMI_BITS	4
37 
38 #define PREEMPT_SHIFT	0
39 #define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
40 #define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
41 #define NMI_SHIFT	(HARDIRQ_SHIFT + HARDIRQ_BITS)
42 
43 #define __IRQ_MASK(x)	((1UL << (x))-1)
44 
45 #define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
46 #define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
47 #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
48 #define NMI_MASK	(__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
49 
50 #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
51 #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
52 #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
53 #define NMI_OFFSET	(1UL << NMI_SHIFT)
54 
55 #define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
56 
57 #define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
58 
59 /*
60  * Disable preemption until the scheduler is running -- use an unconditional
61  * value so that it also works on !PREEMPT_COUNT kernels.
62  *
63  * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
64  */
65 #define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
66 
67 /*
68  * Initial preempt_count value; reflects the preempt_count schedule invariant
69  * which states that during context switches:
70  *
71  *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
72  *
73  * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
74  * Note: See finish_task_switch().
75  */
76 #define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
77 
78 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
79 #include <asm/preempt.h>
80 
81 /**
82  * interrupt_context_level - return interrupt context level
83  *
84  * Returns the current interrupt context level.
85  *  0 - normal context
86  *  1 - softirq context
87  *  2 - hardirq context
88  *  3 - NMI context
89  */
interrupt_context_level(void)90 static __always_inline unsigned char interrupt_context_level(void)
91 {
92 	unsigned long pc = preempt_count();
93 	unsigned char level = 0;
94 
95 	level += !!(pc & (NMI_MASK));
96 	level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
97 	level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
98 
99 	return level;
100 }
101 
102 /*
103  * These macro definitions avoid redundant invocations of preempt_count()
104  * because such invocations would result in redundant loads given that
105  * preempt_count() is commonly implemented with READ_ONCE().
106  */
107 
108 #define nmi_count()	(preempt_count() & NMI_MASK)
109 #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
110 #ifdef CONFIG_PREEMPT_RT
111 # define softirq_count()	(current->softirq_disable_cnt & SOFTIRQ_MASK)
112 # define irq_count()		((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
113 #else
114 # define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
115 # define irq_count()		(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
116 #endif
117 
118 /*
119  * Macros to retrieve the current execution context:
120  *
121  * in_nmi()		- We're in NMI context
122  * in_hardirq()		- We're in hard IRQ context
123  * in_serving_softirq()	- We're in softirq context
124  * in_task()		- We're in task context
125  */
126 #define in_nmi()		(nmi_count())
127 #define in_hardirq()		(hardirq_count())
128 #define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
129 #ifdef CONFIG_PREEMPT_RT
130 # define in_task()		(!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
131 #else
132 # define in_task()		(!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
133 #endif
134 
135 /*
136  * The following macros are deprecated and should not be used in new code:
137  * in_softirq()   - We have BH disabled, or are processing softirqs
138  * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
139  */
140 #define in_softirq()		(softirq_count())
141 #define in_interrupt()		(irq_count())
142 
143 /*
144  * The preempt_count offset after preempt_disable();
145  */
146 #if defined(CONFIG_PREEMPT_COUNT)
147 # define PREEMPT_DISABLE_OFFSET	PREEMPT_OFFSET
148 #else
149 # define PREEMPT_DISABLE_OFFSET	0
150 #endif
151 
152 /*
153  * The preempt_count offset after spin_lock()
154  */
155 #if !defined(CONFIG_PREEMPT_RT)
156 #define PREEMPT_LOCK_OFFSET		PREEMPT_DISABLE_OFFSET
157 #else
158 /* Locks on RT do not disable preemption */
159 #define PREEMPT_LOCK_OFFSET		0
160 #endif
161 
162 /*
163  * The preempt_count offset needed for things like:
164  *
165  *  spin_lock_bh()
166  *
167  * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
168  * softirqs, such that unlock sequences of:
169  *
170  *  spin_unlock();
171  *  local_bh_enable();
172  *
173  * Work as expected.
174  */
175 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
176 
177 /*
178  * Are we running in atomic context?  WARNING: this macro cannot
179  * always detect atomic context; in particular, it cannot know about
180  * held spinlocks in non-preemptible kernels.  Thus it should not be
181  * used in the general case to determine whether sleeping is possible.
182  * Do not use in_atomic() in driver code.
183  */
184 #define in_atomic()	(preempt_count() != 0)
185 
186 /*
187  * Check whether we were atomic before we did preempt_disable():
188  * (used by the scheduler)
189  */
190 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
191 
192 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
193 extern void preempt_count_add(int val);
194 extern void preempt_count_sub(int val);
195 #define preempt_count_dec_and_test() \
196 	({ preempt_count_sub(1); should_resched(0); })
197 #else
198 #define preempt_count_add(val)	__preempt_count_add(val)
199 #define preempt_count_sub(val)	__preempt_count_sub(val)
200 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
201 #endif
202 
203 #define __preempt_count_inc() __preempt_count_add(1)
204 #define __preempt_count_dec() __preempt_count_sub(1)
205 
206 #define preempt_count_inc() preempt_count_add(1)
207 #define preempt_count_dec() preempt_count_sub(1)
208 
209 #ifdef CONFIG_PREEMPT_COUNT
210 
211 #define preempt_disable() \
212 do { \
213 	preempt_count_inc(); \
214 	barrier(); \
215 } while (0)
216 
217 #define sched_preempt_enable_no_resched() \
218 do { \
219 	barrier(); \
220 	preempt_count_dec(); \
221 } while (0)
222 
223 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
224 
225 #define preemptible()	(preempt_count() == 0 && !irqs_disabled())
226 
227 #ifdef CONFIG_PREEMPTION
228 #define preempt_enable() \
229 do { \
230 	barrier(); \
231 	if (unlikely(preempt_count_dec_and_test())) \
232 		__preempt_schedule(); \
233 } while (0)
234 
235 #define preempt_enable_notrace() \
236 do { \
237 	barrier(); \
238 	if (unlikely(__preempt_count_dec_and_test())) \
239 		__preempt_schedule_notrace(); \
240 } while (0)
241 
242 #define preempt_check_resched() \
243 do { \
244 	if (should_resched(0)) \
245 		__preempt_schedule(); \
246 } while (0)
247 
248 #else /* !CONFIG_PREEMPTION */
249 #define preempt_enable() \
250 do { \
251 	barrier(); \
252 	preempt_count_dec(); \
253 } while (0)
254 
255 #define preempt_enable_notrace() \
256 do { \
257 	barrier(); \
258 	__preempt_count_dec(); \
259 } while (0)
260 
261 #define preempt_check_resched() do { } while (0)
262 #endif /* CONFIG_PREEMPTION */
263 
264 #define preempt_disable_notrace() \
265 do { \
266 	__preempt_count_inc(); \
267 	barrier(); \
268 } while (0)
269 
270 #define preempt_enable_no_resched_notrace() \
271 do { \
272 	barrier(); \
273 	__preempt_count_dec(); \
274 } while (0)
275 
276 #else /* !CONFIG_PREEMPT_COUNT */
277 
278 /*
279  * Even if we don't have any preemption, we need preempt disable/enable
280  * to be barriers, so that we don't have things like get_user/put_user
281  * that can cause faults and scheduling migrate into our preempt-protected
282  * region.
283  */
284 #define preempt_disable()			barrier()
285 #define sched_preempt_enable_no_resched()	barrier()
286 #define preempt_enable_no_resched()		barrier()
287 #define preempt_enable()			barrier()
288 #define preempt_check_resched()			do { } while (0)
289 
290 #define preempt_disable_notrace()		barrier()
291 #define preempt_enable_no_resched_notrace()	barrier()
292 #define preempt_enable_notrace()		barrier()
293 #define preemptible()				0
294 
295 #endif /* CONFIG_PREEMPT_COUNT */
296 
297 #ifdef MODULE
298 /*
299  * Modules have no business playing preemption tricks.
300  */
301 #undef sched_preempt_enable_no_resched
302 #undef preempt_enable_no_resched
303 #undef preempt_enable_no_resched_notrace
304 #undef preempt_check_resched
305 #endif
306 
307 #define preempt_set_need_resched() \
308 do { \
309 	set_preempt_need_resched(); \
310 } while (0)
311 #define preempt_fold_need_resched() \
312 do { \
313 	if (tif_need_resched()) \
314 		set_preempt_need_resched(); \
315 } while (0)
316 
317 #ifdef CONFIG_PREEMPT_NOTIFIERS
318 
319 struct preempt_notifier;
320 struct task_struct;
321 
322 /**
323  * preempt_ops - notifiers called when a task is preempted and rescheduled
324  * @sched_in: we're about to be rescheduled:
325  *    notifier: struct preempt_notifier for the task being scheduled
326  *    cpu:  cpu we're scheduled on
327  * @sched_out: we've just been preempted
328  *    notifier: struct preempt_notifier for the task being preempted
329  *    next: the task that's kicking us out
330  *
331  * Please note that sched_in and out are called under different
332  * contexts.  sched_out is called with rq lock held and irq disabled
333  * while sched_in is called without rq lock and irq enabled.  This
334  * difference is intentional and depended upon by its users.
335  */
336 struct preempt_ops {
337 	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
338 	void (*sched_out)(struct preempt_notifier *notifier,
339 			  struct task_struct *next);
340 };
341 
342 /**
343  * preempt_notifier - key for installing preemption notifiers
344  * @link: internal use
345  * @ops: defines the notifier functions to be called
346  *
347  * Usually used in conjunction with container_of().
348  */
349 struct preempt_notifier {
350 	struct hlist_node link;
351 	struct preempt_ops *ops;
352 };
353 
354 void preempt_notifier_inc(void);
355 void preempt_notifier_dec(void);
356 void preempt_notifier_register(struct preempt_notifier *notifier);
357 void preempt_notifier_unregister(struct preempt_notifier *notifier);
358 
preempt_notifier_init(struct preempt_notifier * notifier,struct preempt_ops * ops)359 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
360 				     struct preempt_ops *ops)
361 {
362 	/* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
363 	notifier->link.next = NULL;
364 	notifier->link.pprev = NULL;
365 	notifier->ops = ops;
366 }
367 
368 #endif
369 
370 /*
371  * Migrate-Disable and why it is undesired.
372  *
373  * When a preempted task becomes eligible to run under the ideal model (IOW it
374  * becomes one of the M highest priority tasks), it might still have to wait
375  * for the preemptee's migrate_disable() section to complete. Thereby suffering
376  * a reduction in bandwidth in the exact duration of the migrate_disable()
377  * section.
378  *
379  * Per this argument, the change from preempt_disable() to migrate_disable()
380  * gets us:
381  *
382  * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
383  *   it would have had to wait for the lower priority task.
384  *
385  * - a lower priority tasks; which under preempt_disable() could've instantly
386  *   migrated away when another CPU becomes available, is now constrained
387  *   by the ability to push the higher priority task away, which might itself be
388  *   in a migrate_disable() section, reducing its available bandwidth.
389  *
390  * IOW it trades latency / moves the interference term, but it stays in the
391  * system, and as long as it remains unbounded, the system is not fully
392  * deterministic.
393  *
394  *
395  * The reason we have it anyway.
396  *
397  * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
398  * number of primitives into becoming preemptible, they would also allow
399  * migration. This turns out to break a bunch of per-cpu usage. To this end,
400  * all these primitives employ migrate_disable() to restore this implicit
401  * assumption.
402  *
403  * This is a 'temporary' work-around at best. The correct solution is getting
404  * rid of the above assumptions and reworking the code to employ explicit
405  * per-cpu locking or short preempt-disable regions.
406  *
407  * The end goal must be to get rid of migrate_disable(), alternatively we need
408  * a schedulability theory that does not depend on arbitrary migration.
409  *
410  *
411  * Notes on the implementation.
412  *
413  * The implementation is particularly tricky since existing code patterns
414  * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
415  * This means that it cannot use cpus_read_lock() to serialize against hotplug,
416  * nor can it easily migrate itself into a pending affinity mask change on
417  * migrate_enable().
418  *
419  *
420  * Note: even non-work-conserving schedulers like semi-partitioned depends on
421  *       migration, so migrate_disable() is not only a problem for
422  *       work-conserving schedulers.
423  *
424  */
425 
426 /**
427  * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
428  *
429  * Use for code which requires preemption protection inside a critical
430  * section which has preemption disabled implicitly on non-PREEMPT_RT
431  * enabled kernels, by e.g.:
432  *  - holding a spinlock/rwlock
433  *  - soft interrupt context
434  *  - regular interrupt handlers
435  *
436  * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
437  * interrupt context and regular interrupt handlers are preemptible and
438  * only prevent migration. preempt_disable_nested() ensures that preemption
439  * is disabled for cases which require CPU local serialization even on
440  * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
441  *
442  * The use cases are code sequences which are not serialized by a
443  * particular lock instance, e.g.:
444  *  - seqcount write side critical sections where the seqcount is not
445  *    associated to a particular lock and therefore the automatic
446  *    protection mechanism does not work. This prevents a live lock
447  *    against a preempting high priority reader.
448  *  - RMW per CPU variable updates like vmstat.
449  */
450 /* Macro to avoid header recursion hell vs. lockdep */
451 #define preempt_disable_nested()				\
452 do {								\
453 	if (IS_ENABLED(CONFIG_PREEMPT_RT))			\
454 		preempt_disable();				\
455 	else							\
456 		lockdep_assert_preemption_disabled();		\
457 } while (0)
458 
459 /**
460  * preempt_enable_nested - Undo the effect of preempt_disable_nested()
461  */
preempt_enable_nested(void)462 static __always_inline void preempt_enable_nested(void)
463 {
464 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
465 		preempt_enable();
466 }
467 
468 DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
469 DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
470 
471 #ifdef CONFIG_PREEMPT_DYNAMIC
472 
473 extern bool preempt_model_none(void);
474 extern bool preempt_model_voluntary(void);
475 extern bool preempt_model_full(void);
476 extern bool preempt_model_lazy(void);
477 
478 #else
479 
480 static inline bool preempt_model_none(void)
481 {
482 	return IS_ENABLED(CONFIG_PREEMPT_NONE);
483 }
484 static inline bool preempt_model_voluntary(void)
485 {
486 	return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
487 }
488 static inline bool preempt_model_full(void)
489 {
490 	return IS_ENABLED(CONFIG_PREEMPT);
491 }
492 
493 static inline bool preempt_model_lazy(void)
494 {
495 	return IS_ENABLED(CONFIG_PREEMPT_LAZY);
496 }
497 
498 #endif
499 
preempt_model_rt(void)500 static inline bool preempt_model_rt(void)
501 {
502 	return IS_ENABLED(CONFIG_PREEMPT_RT);
503 }
504 
505 extern const char *preempt_model_str(void);
506 
507 /*
508  * Does the preemption model allow non-cooperative preemption?
509  *
510  * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
511  * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
512  * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
513  * PREEMPT_NONE model.
514  */
preempt_model_preemptible(void)515 static inline bool preempt_model_preemptible(void)
516 {
517 	return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
518 }
519 
520 #endif /* __LINUX_PREEMPT_H */
521