1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Context tracking: Probe on high level context boundaries such as kernel,
4 * userspace, guest or idle.
5 *
6 * This is used by RCU to remove its dependency on the timer tick while a CPU
7 * runs in idle, userspace or guest mode.
8 *
9 * User/guest tracking started by Frederic Weisbecker:
10 *
11 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker
12 *
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
14 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
15 *
16 * RCU extended quiescent state bits imported from kernel/rcu/tree.c
17 * where the relevant authorship may be found.
18 */
19
20 #include <linux/context_tracking.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched.h>
23 #include <linux/hardirq.h>
24 #include <linux/export.h>
25 #include <linux/kprobes.h>
26 #include <trace/events/rcu.h>
27
28
29 DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
30 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
31 .nesting = 1,
32 .nmi_nesting = CT_NESTING_IRQ_NONIDLE,
33 #endif
34 .state = ATOMIC_INIT(CT_RCU_WATCHING),
35 };
36 EXPORT_SYMBOL_GPL(context_tracking);
37
38 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
39 #define TPS(x) tracepoint_string(x)
40
41 /* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
rcu_task_exit(void)42 static __always_inline void rcu_task_exit(void)
43 {
44 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
45 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
46 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
47 }
48
49 /* Record no current task on entering RCU-tasks (dyntick-idle exit). */
rcu_task_enter(void)50 static __always_inline void rcu_task_enter(void)
51 {
52 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
53 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
54 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
55 }
56
57 /*
58 * Record entry into an extended quiescent state. This is only to be
59 * called when not already in an extended quiescent state, that is,
60 * RCU is watching prior to the call to this function and is no longer
61 * watching upon return.
62 */
ct_kernel_exit_state(int offset)63 static noinstr void ct_kernel_exit_state(int offset)
64 {
65 /*
66 * CPUs seeing atomic_add_return() must see prior RCU read-side
67 * critical sections, and we also must force ordering with the
68 * next idle sojourn.
69 */
70 // RCU is still watching. Better not be in extended quiescent state!
71 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !rcu_is_watching_curr_cpu());
72 (void)ct_state_inc(offset);
73 // RCU is no longer watching.
74 }
75
76 /*
77 * Record exit from an extended quiescent state. This is only to be
78 * called from an extended quiescent state, that is, RCU is not watching
79 * prior to the call to this function and is watching upon return.
80 */
ct_kernel_enter_state(int offset)81 static noinstr void ct_kernel_enter_state(int offset)
82 {
83 int seq;
84
85 /*
86 * CPUs seeing atomic_add_return() must see prior idle sojourns,
87 * and we also must force ordering with the next RCU read-side
88 * critical section.
89 */
90 seq = ct_state_inc(offset);
91 // RCU is now watching. Better not be in an extended quiescent state!
92 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING));
93 }
94
95 /*
96 * Enter an RCU extended quiescent state, which can be either the
97 * idle loop or adaptive-tickless usermode execution.
98 *
99 * We crowbar the ->nmi_nesting field to zero to allow for
100 * the possibility of usermode upcalls having messed up our count
101 * of interrupt nesting level during the prior busy period.
102 */
ct_kernel_exit(bool user,int offset)103 static void noinstr ct_kernel_exit(bool user, int offset)
104 {
105 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
106
107 WARN_ON_ONCE(ct_nmi_nesting() != CT_NESTING_IRQ_NONIDLE);
108 WRITE_ONCE(ct->nmi_nesting, 0);
109 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
110 ct_nesting() == 0);
111 if (ct_nesting() != 1) {
112 // RCU will still be watching, so just do accounting and leave.
113 ct->nesting--;
114 return;
115 }
116
117 instrumentation_begin();
118 lockdep_assert_irqs_disabled();
119 trace_rcu_watching(TPS("End"), ct_nesting(), 0, ct_rcu_watching());
120 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
121 rcu_preempt_deferred_qs(current);
122
123 // instrumentation for the noinstr ct_kernel_exit_state()
124 instrument_atomic_write(&ct->state, sizeof(ct->state));
125
126 instrumentation_end();
127 WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */
128 // RCU is watching here ...
129 ct_kernel_exit_state(offset);
130 // ... but is no longer watching here.
131 rcu_task_exit();
132 }
133
134 /*
135 * Exit an RCU extended quiescent state, which can be either the
136 * idle loop or adaptive-tickless usermode execution.
137 *
138 * We crowbar the ->nmi_nesting field to CT_NESTING_IRQ_NONIDLE to
139 * allow for the possibility of usermode upcalls messing up our count of
140 * interrupt nesting level during the busy period that is just now starting.
141 */
ct_kernel_enter(bool user,int offset)142 static void noinstr ct_kernel_enter(bool user, int offset)
143 {
144 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
145 long oldval;
146
147 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
148 oldval = ct_nesting();
149 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
150 if (oldval) {
151 // RCU was already watching, so just do accounting and leave.
152 ct->nesting++;
153 return;
154 }
155 rcu_task_enter();
156 // RCU is not watching here ...
157 ct_kernel_enter_state(offset);
158 // ... but is watching here.
159 instrumentation_begin();
160
161 // instrumentation for the noinstr ct_kernel_enter_state()
162 instrument_atomic_write(&ct->state, sizeof(ct->state));
163
164 trace_rcu_watching(TPS("Start"), ct_nesting(), 1, ct_rcu_watching());
165 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
166 WRITE_ONCE(ct->nesting, 1);
167 WARN_ON_ONCE(ct_nmi_nesting());
168 WRITE_ONCE(ct->nmi_nesting, CT_NESTING_IRQ_NONIDLE);
169 instrumentation_end();
170 }
171
172 /**
173 * ct_nmi_exit - inform RCU of exit from NMI context
174 *
175 * If we are returning from the outermost NMI handler that interrupted an
176 * RCU-idle period, update ct->state and ct->nmi_nesting
177 * to let the RCU grace-period handling know that the CPU is back to
178 * being RCU-idle.
179 *
180 * If you add or remove a call to ct_nmi_exit(), be sure to test
181 * with CONFIG_RCU_EQS_DEBUG=y.
182 */
ct_nmi_exit(void)183 void noinstr ct_nmi_exit(void)
184 {
185 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
186
187 instrumentation_begin();
188 /*
189 * Check for ->nmi_nesting underflow and bad CT state.
190 * (We are exiting an NMI handler, so RCU better be paying attention
191 * to us!)
192 */
193 WARN_ON_ONCE(ct_nmi_nesting() <= 0);
194 WARN_ON_ONCE(!rcu_is_watching_curr_cpu());
195
196 /*
197 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
198 * leave it in non-RCU-idle state.
199 */
200 if (ct_nmi_nesting() != 1) {
201 trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2,
202 ct_rcu_watching());
203 WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */
204 ct_nmi_nesting() - 2);
205 instrumentation_end();
206 return;
207 }
208
209 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
210 trace_rcu_watching(TPS("Endirq"), ct_nmi_nesting(), 0, ct_rcu_watching());
211 WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */
212
213 // instrumentation for the noinstr ct_kernel_exit_state()
214 instrument_atomic_write(&ct->state, sizeof(ct->state));
215 instrumentation_end();
216
217 // RCU is watching here ...
218 ct_kernel_exit_state(CT_RCU_WATCHING);
219 // ... but is no longer watching here.
220
221 if (!in_nmi())
222 rcu_task_exit();
223 }
224
225 /**
226 * ct_nmi_enter - inform RCU of entry to NMI context
227 *
228 * If the CPU was idle from RCU's viewpoint, update ct->state and
229 * ct->nmi_nesting to let the RCU grace-period handling know
230 * that the CPU is active. This implementation permits nested NMIs, as
231 * long as the nesting level does not overflow an int. (You will probably
232 * run out of stack space first.)
233 *
234 * If you add or remove a call to ct_nmi_enter(), be sure to test
235 * with CONFIG_RCU_EQS_DEBUG=y.
236 */
ct_nmi_enter(void)237 void noinstr ct_nmi_enter(void)
238 {
239 long incby = 2;
240 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
241
242 /* Complain about underflow. */
243 WARN_ON_ONCE(ct_nmi_nesting() < 0);
244
245 /*
246 * If idle from RCU viewpoint, atomically increment CT state
247 * to mark non-idle and increment ->nmi_nesting by one.
248 * Otherwise, increment ->nmi_nesting by two. This means
249 * if ->nmi_nesting is equal to one, we are guaranteed
250 * to be in the outermost NMI handler that interrupted an RCU-idle
251 * period (observation due to Andy Lutomirski).
252 */
253 if (!rcu_is_watching_curr_cpu()) {
254
255 if (!in_nmi())
256 rcu_task_enter();
257
258 // RCU is not watching here ...
259 ct_kernel_enter_state(CT_RCU_WATCHING);
260 // ... but is watching here.
261
262 instrumentation_begin();
263 // instrumentation for the noinstr rcu_is_watching_curr_cpu()
264 instrument_atomic_read(&ct->state, sizeof(ct->state));
265 // instrumentation for the noinstr ct_kernel_enter_state()
266 instrument_atomic_write(&ct->state, sizeof(ct->state));
267
268 incby = 1;
269 } else if (!in_nmi()) {
270 instrumentation_begin();
271 rcu_irq_enter_check_tick();
272 } else {
273 instrumentation_begin();
274 }
275
276 trace_rcu_watching(incby == 1 ? TPS("Startirq") : TPS("++="),
277 ct_nmi_nesting(),
278 ct_nmi_nesting() + incby, ct_rcu_watching());
279 instrumentation_end();
280 WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */
281 ct_nmi_nesting() + incby);
282 barrier();
283 }
284
285 /**
286 * ct_idle_enter - inform RCU that current CPU is entering idle
287 *
288 * Enter idle mode, in other words, -leave- the mode in which RCU
289 * read-side critical sections can occur. (Though RCU read-side
290 * critical sections can occur in irq handlers in idle, a possibility
291 * handled by irq_enter() and irq_exit().)
292 *
293 * If you add or remove a call to ct_idle_enter(), be sure to test with
294 * CONFIG_RCU_EQS_DEBUG=y.
295 */
ct_idle_enter(void)296 void noinstr ct_idle_enter(void)
297 {
298 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
299 ct_kernel_exit(false, CT_RCU_WATCHING + CT_STATE_IDLE);
300 }
301 EXPORT_SYMBOL_GPL(ct_idle_enter);
302
303 /**
304 * ct_idle_exit - inform RCU that current CPU is leaving idle
305 *
306 * Exit idle mode, in other words, -enter- the mode in which RCU
307 * read-side critical sections can occur.
308 *
309 * If you add or remove a call to ct_idle_exit(), be sure to test with
310 * CONFIG_RCU_EQS_DEBUG=y.
311 */
ct_idle_exit(void)312 void noinstr ct_idle_exit(void)
313 {
314 unsigned long flags;
315
316 raw_local_irq_save(flags);
317 ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE);
318 raw_local_irq_restore(flags);
319 }
320 EXPORT_SYMBOL_GPL(ct_idle_exit);
321
322 /**
323 * ct_irq_enter - inform RCU that current CPU is entering irq away from idle
324 *
325 * Enter an interrupt handler, which might possibly result in exiting
326 * idle mode, in other words, entering the mode in which read-side critical
327 * sections can occur. The caller must have disabled interrupts.
328 *
329 * Note that the Linux kernel is fully capable of entering an interrupt
330 * handler that it never exits, for example when doing upcalls to user mode!
331 * This code assumes that the idle loop never does upcalls to user mode.
332 * If your architecture's idle loop does do upcalls to user mode (or does
333 * anything else that results in unbalanced calls to the irq_enter() and
334 * irq_exit() functions), RCU will give you what you deserve, good and hard.
335 * But very infrequently and irreproducibly.
336 *
337 * Use things like work queues to work around this limitation.
338 *
339 * You have been warned.
340 *
341 * If you add or remove a call to ct_irq_enter(), be sure to test with
342 * CONFIG_RCU_EQS_DEBUG=y.
343 */
ct_irq_enter(void)344 noinstr void ct_irq_enter(void)
345 {
346 lockdep_assert_irqs_disabled();
347 ct_nmi_enter();
348 }
349
350 /**
351 * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
352 *
353 * Exit from an interrupt handler, which might possibly result in entering
354 * idle mode, in other words, leaving the mode in which read-side critical
355 * sections can occur. The caller must have disabled interrupts.
356 *
357 * This code assumes that the idle loop never does anything that might
358 * result in unbalanced calls to irq_enter() and irq_exit(). If your
359 * architecture's idle loop violates this assumption, RCU will give you what
360 * you deserve, good and hard. But very infrequently and irreproducibly.
361 *
362 * Use things like work queues to work around this limitation.
363 *
364 * You have been warned.
365 *
366 * If you add or remove a call to ct_irq_exit(), be sure to test with
367 * CONFIG_RCU_EQS_DEBUG=y.
368 */
ct_irq_exit(void)369 noinstr void ct_irq_exit(void)
370 {
371 lockdep_assert_irqs_disabled();
372 ct_nmi_exit();
373 }
374
375 /*
376 * Wrapper for ct_irq_enter() where interrupts are enabled.
377 *
378 * If you add or remove a call to ct_irq_enter_irqson(), be sure to test
379 * with CONFIG_RCU_EQS_DEBUG=y.
380 */
ct_irq_enter_irqson(void)381 void ct_irq_enter_irqson(void)
382 {
383 unsigned long flags;
384
385 local_irq_save(flags);
386 ct_irq_enter();
387 local_irq_restore(flags);
388 }
389
390 /*
391 * Wrapper for ct_irq_exit() where interrupts are enabled.
392 *
393 * If you add or remove a call to ct_irq_exit_irqson(), be sure to test
394 * with CONFIG_RCU_EQS_DEBUG=y.
395 */
ct_irq_exit_irqson(void)396 void ct_irq_exit_irqson(void)
397 {
398 unsigned long flags;
399
400 local_irq_save(flags);
401 ct_irq_exit();
402 local_irq_restore(flags);
403 }
404 #else
ct_kernel_exit(bool user,int offset)405 static __always_inline void ct_kernel_exit(bool user, int offset) { }
ct_kernel_enter(bool user,int offset)406 static __always_inline void ct_kernel_enter(bool user, int offset) { }
407 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
408
409 #ifdef CONFIG_CONTEXT_TRACKING_USER
410
411 #define CREATE_TRACE_POINTS
412 #include <trace/events/context_tracking.h>
413
414 DEFINE_STATIC_KEY_FALSE_RO(context_tracking_key);
415 EXPORT_SYMBOL_GPL(context_tracking_key);
416
context_tracking_recursion_enter(void)417 static noinstr bool context_tracking_recursion_enter(void)
418 {
419 int recursion;
420
421 recursion = __this_cpu_inc_return(context_tracking.recursion);
422 if (recursion == 1)
423 return true;
424
425 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
426 __this_cpu_dec(context_tracking.recursion);
427
428 return false;
429 }
430
context_tracking_recursion_exit(void)431 static __always_inline void context_tracking_recursion_exit(void)
432 {
433 __this_cpu_dec(context_tracking.recursion);
434 }
435
436 /**
437 * __ct_user_enter - Inform the context tracking that the CPU is going
438 * to enter user or guest space mode.
439 *
440 * @state: userspace context-tracking state to enter.
441 *
442 * This function must be called right before we switch from the kernel
443 * to user or guest space, when it's guaranteed the remaining kernel
444 * instructions to execute won't use any RCU read side critical section
445 * because this function sets RCU in extended quiescent state.
446 */
__ct_user_enter(enum ctx_state state)447 void noinstr __ct_user_enter(enum ctx_state state)
448 {
449 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
450 lockdep_assert_irqs_disabled();
451
452 /* Kernel threads aren't supposed to go to userspace */
453 WARN_ON_ONCE(!current->mm);
454
455 if (!context_tracking_recursion_enter())
456 return;
457
458 if (__ct_state() != state) {
459 if (ct->active) {
460 /*
461 * At this stage, only low level arch entry code remains and
462 * then we'll run in userspace. We can assume there won't be
463 * any RCU read-side critical section until the next call to
464 * user_exit() or ct_irq_enter(). Let's remove RCU's dependency
465 * on the tick.
466 */
467 if (state == CT_STATE_USER) {
468 instrumentation_begin();
469 trace_user_enter(0);
470 vtime_user_enter(current);
471 instrumentation_end();
472 }
473 /*
474 * Other than generic entry implementation, we may be past the last
475 * rescheduling opportunity in the entry code. Trigger a self IPI
476 * that will fire and reschedule once we resume in user/guest mode.
477 */
478 rcu_irq_work_resched();
479
480 /*
481 * Enter RCU idle mode right before resuming userspace. No use of RCU
482 * is permitted between this call and rcu_eqs_exit(). This way the
483 * CPU doesn't need to maintain the tick for RCU maintenance purposes
484 * when the CPU runs in userspace.
485 */
486 ct_kernel_exit(true, CT_RCU_WATCHING + state);
487
488 /*
489 * Special case if we only track user <-> kernel transitions for tickless
490 * cputime accounting but we don't support RCU extended quiescent state.
491 * In this we case we don't care about any concurrency/ordering.
492 */
493 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
494 raw_atomic_set(&ct->state, state);
495 } else {
496 /*
497 * Even if context tracking is disabled on this CPU, because it's outside
498 * the full dynticks mask for example, we still have to keep track of the
499 * context transitions and states to prevent inconsistency on those of
500 * other CPUs.
501 * If a task triggers an exception in userspace, sleep on the exception
502 * handler and then migrate to another CPU, that new CPU must know where
503 * the exception returns by the time we call exception_exit().
504 * This information can only be provided by the previous CPU when it called
505 * exception_enter().
506 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
507 * is false because we know that CPU is not tickless.
508 */
509 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
510 /* Tracking for vtime only, no concurrent RCU EQS accounting */
511 raw_atomic_set(&ct->state, state);
512 } else {
513 /*
514 * Tracking for vtime and RCU EQS. Make sure we don't race
515 * with NMIs. OTOH we don't care about ordering here since
516 * RCU only requires CT_RCU_WATCHING increments to be fully
517 * ordered.
518 */
519 raw_atomic_add(state, &ct->state);
520 }
521 }
522 }
523 context_tracking_recursion_exit();
524 }
525 EXPORT_SYMBOL_GPL(__ct_user_enter);
526
527 /*
528 * OBSOLETE:
529 * This function should be noinstr but the below local_irq_restore() is
530 * unsafe because it involves illegal RCU uses through tracing and lockdep.
531 * This is unlikely to be fixed as this function is obsolete. The preferred
532 * way is to call __context_tracking_enter() through user_enter_irqoff()
533 * or context_tracking_guest_enter(). It should be the arch entry code
534 * responsibility to call into context tracking with IRQs disabled.
535 */
ct_user_enter(enum ctx_state state)536 void ct_user_enter(enum ctx_state state)
537 {
538 unsigned long flags;
539
540 /*
541 * Some contexts may involve an exception occuring in an irq,
542 * leading to that nesting:
543 * ct_irq_enter() rcu_eqs_exit(true) rcu_eqs_enter(true) ct_irq_exit()
544 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
545 * helpers are enough to protect RCU uses inside the exception. So
546 * just return immediately if we detect we are in an IRQ.
547 */
548 if (in_interrupt())
549 return;
550
551 local_irq_save(flags);
552 __ct_user_enter(state);
553 local_irq_restore(flags);
554 }
555 NOKPROBE_SYMBOL(ct_user_enter);
556 EXPORT_SYMBOL_GPL(ct_user_enter);
557
558 /**
559 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
560 * archs that didn't manage to check the context tracking
561 * static key from low level code.
562 *
563 * This OBSOLETE function should be noinstr but it unsafely calls
564 * local_irq_restore(), involving illegal RCU uses through tracing and lockdep.
565 * This is unlikely to be fixed as this function is obsolete. The preferred
566 * way is to call user_enter_irqoff(). It should be the arch entry code
567 * responsibility to call into context tracking with IRQs disabled.
568 */
user_enter_callable(void)569 void user_enter_callable(void)
570 {
571 user_enter();
572 }
573 NOKPROBE_SYMBOL(user_enter_callable);
574
575 /**
576 * __ct_user_exit - Inform the context tracking that the CPU is
577 * exiting user or guest mode and entering the kernel.
578 *
579 * @state: userspace context-tracking state being exited from.
580 *
581 * This function must be called after we entered the kernel from user or
582 * guest space before any use of RCU read side critical section. This
583 * potentially include any high level kernel code like syscalls, exceptions,
584 * signal handling, etc...
585 *
586 * This call supports re-entrancy. This way it can be called from any exception
587 * handler without needing to know if we came from userspace or not.
588 */
__ct_user_exit(enum ctx_state state)589 void noinstr __ct_user_exit(enum ctx_state state)
590 {
591 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
592
593 if (!context_tracking_recursion_enter())
594 return;
595
596 if (__ct_state() == state) {
597 if (ct->active) {
598 /*
599 * Exit RCU idle mode while entering the kernel because it can
600 * run a RCU read side critical section anytime.
601 */
602 ct_kernel_enter(true, CT_RCU_WATCHING - state);
603 if (state == CT_STATE_USER) {
604 instrumentation_begin();
605 vtime_user_exit(current);
606 trace_user_exit(0);
607 instrumentation_end();
608 }
609
610 /*
611 * Special case if we only track user <-> kernel transitions for tickless
612 * cputime accounting but we don't support RCU extended quiescent state.
613 * In this we case we don't care about any concurrency/ordering.
614 */
615 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
616 raw_atomic_set(&ct->state, CT_STATE_KERNEL);
617
618 } else {
619 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
620 /* Tracking for vtime only, no concurrent RCU EQS accounting */
621 raw_atomic_set(&ct->state, CT_STATE_KERNEL);
622 } else {
623 /*
624 * Tracking for vtime and RCU EQS. Make sure we don't race
625 * with NMIs. OTOH we don't care about ordering here since
626 * RCU only requires CT_RCU_WATCHING increments to be fully
627 * ordered.
628 */
629 raw_atomic_sub(state, &ct->state);
630 }
631 }
632 }
633 context_tracking_recursion_exit();
634 }
635 EXPORT_SYMBOL_GPL(__ct_user_exit);
636
637 /*
638 * OBSOLETE:
639 * This function should be noinstr but the below local_irq_save() is
640 * unsafe because it involves illegal RCU uses through tracing and lockdep.
641 * This is unlikely to be fixed as this function is obsolete. The preferred
642 * way is to call __context_tracking_exit() through user_exit_irqoff()
643 * or context_tracking_guest_exit(). It should be the arch entry code
644 * responsibility to call into context tracking with IRQs disabled.
645 */
ct_user_exit(enum ctx_state state)646 void ct_user_exit(enum ctx_state state)
647 {
648 unsigned long flags;
649
650 if (in_interrupt())
651 return;
652
653 local_irq_save(flags);
654 __ct_user_exit(state);
655 local_irq_restore(flags);
656 }
657 NOKPROBE_SYMBOL(ct_user_exit);
658 EXPORT_SYMBOL_GPL(ct_user_exit);
659
660 /**
661 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for
662 * archs that didn't manage to check the context tracking
663 * static key from low level code.
664 *
665 * This OBSOLETE function should be noinstr but it unsafely calls local_irq_save(),
666 * involving illegal RCU uses through tracing and lockdep. This is unlikely
667 * to be fixed as this function is obsolete. The preferred way is to call
668 * user_exit_irqoff(). It should be the arch entry code responsibility to
669 * call into context tracking with IRQs disabled.
670 */
user_exit_callable(void)671 void user_exit_callable(void)
672 {
673 user_exit();
674 }
675 NOKPROBE_SYMBOL(user_exit_callable);
676
ct_cpu_track_user(int cpu)677 void __init ct_cpu_track_user(int cpu)
678 {
679 static __initdata bool initialized = false;
680
681 if (!per_cpu(context_tracking.active, cpu)) {
682 per_cpu(context_tracking.active, cpu) = true;
683 static_branch_inc(&context_tracking_key);
684 }
685
686 if (initialized)
687 return;
688
689 #ifdef CONFIG_HAVE_TIF_NOHZ
690 /*
691 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
692 * This assumes that init is the only task at this early boot stage.
693 */
694 set_tsk_thread_flag(&init_task, TIF_NOHZ);
695 #endif
696 WARN_ON_ONCE(!tasklist_empty());
697
698 initialized = true;
699 }
700
701 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
context_tracking_init(void)702 void __init context_tracking_init(void)
703 {
704 int cpu;
705
706 for_each_possible_cpu(cpu)
707 ct_cpu_track_user(cpu);
708 }
709 #endif
710
711 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */
712