Lines Matching +full:idle +full:- +full:state

1 // SPDX-License-Identifier: GPL-2.0-only
4 * userspace, guest or idle.
7 * runs in idle, userspace or guest mode.
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
16 * RCU extended quiescent state bits imported from kernel/rcu/tree.c
34 .state = ATOMIC_INIT(CT_RCU_WATCHING),
41 /* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
45 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); in rcu_task_exit()
49 /* Record no current task on entering RCU-tasks (dyntick-idle exit). */
53 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); in rcu_task_enter()
62 current->trc_reader_special.b.need_mb = true; in rcu_task_trace_heavyweight_enter()
71 current->trc_reader_special.b.need_mb = false; in rcu_task_trace_heavyweight_exit()
76 * Record entry into an extended quiescent state. This is only to be
77 * called when not already in an extended quiescent state, that is,
84 * CPUs seeing atomic_add_return() must see prior RCU read-side in ct_kernel_exit_state()
86 * next idle sojourn. in ct_kernel_exit_state()
88 rcu_task_trace_heavyweight_enter(); // Before CT state update! in ct_kernel_exit_state()
89 // RCU is still watching. Better not be in extended quiescent state! in ct_kernel_exit_state()
96 * Record exit from an extended quiescent state. This is only to be
97 * called from an extended quiescent state, that is, RCU is not watching
105 * CPUs seeing atomic_add_return() must see prior idle sojourns, in ct_kernel_enter_state()
106 * and we also must force ordering with the next RCU read-side in ct_kernel_enter_state()
110 // RCU is now watching. Better not be in an extended quiescent state! in ct_kernel_enter_state()
111 rcu_task_trace_heavyweight_exit(); // After CT state update! in ct_kernel_enter_state()
116 * Enter an RCU extended quiescent state, which can be either the
117 * idle loop or adaptive-tickless usermode execution.
119 * We crowbar the ->nmi_nesting field to zero to allow for
128 WRITE_ONCE(ct->nmi_nesting, 0); in ct_kernel_exit()
133 ct->nesting--; in ct_kernel_exit()
144 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_exit()
147 WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */ in ct_kernel_exit()
155 * Exit an RCU extended quiescent state, which can be either the
156 * idle loop or adaptive-tickless usermode execution.
158 * We crowbar the ->nmi_nesting field to CT_NESTING_IRQ_NONIDLE to
172 ct->nesting++; in ct_kernel_enter()
182 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_enter()
186 WRITE_ONCE(ct->nesting, 1); in ct_kernel_enter()
188 WRITE_ONCE(ct->nmi_nesting, CT_NESTING_IRQ_NONIDLE); in ct_kernel_enter()
193 * ct_nmi_exit - inform RCU of exit from NMI context
196 * RCU-idle period, update ct->state and ct->nmi_nesting
197 * to let the RCU grace-period handling know that the CPU is back to
198 * being RCU-idle.
209 * Check for ->nmi_nesting underflow and bad CT state. in ct_nmi_exit()
217 * If the nesting level is not 1, the CPU wasn't RCU-idle, so in ct_nmi_exit()
218 * leave it in non-RCU-idle state. in ct_nmi_exit()
221 trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2, in ct_nmi_exit()
223 WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */ in ct_nmi_exit()
224 ct_nmi_nesting() - 2); in ct_nmi_exit()
229 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ in ct_nmi_exit()
231 WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */ in ct_nmi_exit()
234 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_exit()
246 * ct_nmi_enter - inform RCU of entry to NMI context
248 * If the CPU was idle from RCU's viewpoint, update ct->state and
249 * ct->nmi_nesting to let the RCU grace-period handling know
266 * If idle from RCU viewpoint, atomically increment CT state in ct_nmi_enter()
267 * to mark non-idle and increment ->nmi_nesting by one. in ct_nmi_enter()
268 * Otherwise, increment ->nmi_nesting by two. This means in ct_nmi_enter()
269 * if ->nmi_nesting is equal to one, we are guaranteed in ct_nmi_enter()
270 * to be in the outermost NMI handler that interrupted an RCU-idle in ct_nmi_enter()
284 instrument_atomic_read(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
286 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
300 WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */ in ct_nmi_enter()
306 * ct_idle_enter - inform RCU that current CPU is entering idle
308 * Enter idle mode, in other words, -leave- the mode in which RCU
309 * read-side critical sections can occur. (Though RCU read-side
310 * critical sections can occur in irq handlers in idle, a possibility
324 * ct_idle_exit - inform RCU that current CPU is leaving idle
326 * Exit idle mode, in other words, -enter- the mode in which RCU
327 * read-side critical sections can occur.
337 ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE); in ct_idle_exit()
343 * ct_irq_enter - inform RCU that current CPU is entering irq away from idle
346 * idle mode, in other words, entering the mode in which read-side critical
351 * This code assumes that the idle loop never does upcalls to user mode.
352 * If your architecture's idle loop does do upcalls to user mode (or does
371 * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
374 * idle mode, in other words, leaving the mode in which read-side critical
377 * This code assumes that the idle loop never does anything that might
379 * architecture's idle loop violates this assumption, RCU will give you what
457 * __ct_user_enter - Inform the context tracking that the CPU is going
460 * @state: userspace context-tracking state to enter.
465 * because this function sets RCU in extended quiescent state.
467 void noinstr __ct_user_enter(enum ctx_state state) in __ct_user_enter() argument
473 WARN_ON_ONCE(!current->mm); in __ct_user_enter()
478 if (__ct_state() != state) { in __ct_user_enter()
479 if (ct->active) { in __ct_user_enter()
483 * any RCU read-side critical section until the next call to in __ct_user_enter()
487 if (state == CT_STATE_USER) { in __ct_user_enter()
501 * Enter RCU idle mode right before resuming userspace. No use of RCU in __ct_user_enter()
506 ct_kernel_exit(true, CT_RCU_WATCHING + state); in __ct_user_enter()
509 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_enter()
510 * cputime accounting but we don't support RCU extended quiescent state. in __ct_user_enter()
514 raw_atomic_set(&ct->state, state); in __ct_user_enter()
531 raw_atomic_set(&ct->state, state); in __ct_user_enter()
539 raw_atomic_add(state, &ct->state); in __ct_user_enter()
556 void ct_user_enter(enum ctx_state state) in ct_user_enter() argument
572 __ct_user_enter(state); in ct_user_enter()
579 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
596 * __ct_user_exit - Inform the context tracking that the CPU is
599 * @state: userspace context-tracking state being exited from.
606 * This call supports re-entrancy. This way it can be called from any exception
609 void noinstr __ct_user_exit(enum ctx_state state) in __ct_user_exit() argument
616 if (__ct_state() == state) { in __ct_user_exit()
617 if (ct->active) { in __ct_user_exit()
619 * Exit RCU idle mode while entering the kernel because it can in __ct_user_exit()
622 ct_kernel_enter(true, CT_RCU_WATCHING - state); in __ct_user_exit()
623 if (state == CT_STATE_USER) { in __ct_user_exit()
631 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_exit()
632 * cputime accounting but we don't support RCU extended quiescent state. in __ct_user_exit()
636 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
641 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
649 raw_atomic_sub(state, &ct->state); in __ct_user_exit()
666 void ct_user_exit(enum ctx_state state) in ct_user_exit() argument
674 __ct_user_exit(state); in ct_user_exit()
681 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for