Lines Matching +full:support +full:- +full:nesting
1 // SPDX-License-Identifier: GPL-2.0-only
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
31 .nesting = 1,
41 /* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
45 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); in rcu_task_exit()
49 /* Record no current task on entering RCU-tasks (dyntick-idle exit). */
53 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); in rcu_task_enter()
62 current->trc_reader_special.b.need_mb = true; in rcu_task_trace_heavyweight_enter()
71 current->trc_reader_special.b.need_mb = false; in rcu_task_trace_heavyweight_exit()
84 * CPUs seeing atomic_add_return() must see prior RCU read-side in ct_kernel_exit_state()
106 * and we also must force ordering with the next RCU read-side in ct_kernel_enter_state()
117 * idle loop or adaptive-tickless usermode execution.
119 * We crowbar the ->nmi_nesting field to zero to allow for
121 * of interrupt nesting level during the prior busy period.
128 WRITE_ONCE(ct->nmi_nesting, 0); in ct_kernel_exit()
133 ct->nesting--; in ct_kernel_exit()
144 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_exit()
147 WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */ in ct_kernel_exit()
156 * idle loop or adaptive-tickless usermode execution.
158 * We crowbar the ->nmi_nesting field to CT_NESTING_IRQ_NONIDLE to
160 * interrupt nesting level during the busy period that is just now starting.
172 ct->nesting++; in ct_kernel_enter()
182 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_enter()
186 WRITE_ONCE(ct->nesting, 1); in ct_kernel_enter()
188 WRITE_ONCE(ct->nmi_nesting, CT_NESTING_IRQ_NONIDLE); in ct_kernel_enter()
193 * ct_nmi_exit - inform RCU of exit from NMI context
196 * RCU-idle period, update ct->state and ct->nmi_nesting
197 * to let the RCU grace-period handling know that the CPU is back to
198 * being RCU-idle.
209 * Check for ->nmi_nesting underflow and bad CT state. in ct_nmi_exit()
217 * If the nesting level is not 1, the CPU wasn't RCU-idle, so in ct_nmi_exit()
218 * leave it in non-RCU-idle state. in ct_nmi_exit()
221 trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2, in ct_nmi_exit()
223 WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */ in ct_nmi_exit()
224 ct_nmi_nesting() - 2); in ct_nmi_exit()
229 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ in ct_nmi_exit()
231 WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */ in ct_nmi_exit()
234 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_exit()
246 * ct_nmi_enter - inform RCU of entry to NMI context
248 * If the CPU was idle from RCU's viewpoint, update ct->state and
249 * ct->nmi_nesting to let the RCU grace-period handling know
251 * long as the nesting level does not overflow an int. (You will probably
267 * to mark non-idle and increment ->nmi_nesting by one. in ct_nmi_enter()
268 * Otherwise, increment ->nmi_nesting by two. This means in ct_nmi_enter()
269 * if ->nmi_nesting is equal to one, we are guaranteed in ct_nmi_enter()
270 * to be in the outermost NMI handler that interrupted an RCU-idle in ct_nmi_enter()
284 instrument_atomic_read(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
286 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
300 WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */ in ct_nmi_enter()
306 * ct_idle_enter - inform RCU that current CPU is entering idle
308 * Enter idle mode, in other words, -leave- the mode in which RCU
309 * read-side critical sections can occur. (Though RCU read-side
324 * ct_idle_exit - inform RCU that current CPU is leaving idle
326 * Exit idle mode, in other words, -enter- the mode in which RCU
327 * read-side critical sections can occur.
337 ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE); in ct_idle_exit()
343 * ct_irq_enter - inform RCU that current CPU is entering irq away from idle
346 * idle mode, in other words, entering the mode in which read-side critical
371 * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
374 * idle mode, in other words, leaving the mode in which read-side critical
457 * __ct_user_enter - Inform the context tracking that the CPU is going
460 * @state: userspace context-tracking state to enter.
473 WARN_ON_ONCE(!current->mm); in __ct_user_enter()
479 if (ct->active) { in __ct_user_enter()
483 * any RCU read-side critical section until the next call to in __ct_user_enter()
509 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_enter()
510 * cputime accounting but we don't support RCU extended quiescent state. in __ct_user_enter()
514 raw_atomic_set(&ct->state, state); in __ct_user_enter()
531 raw_atomic_set(&ct->state, state); in __ct_user_enter()
539 raw_atomic_add(state, &ct->state); in __ct_user_enter()
562 * leading to that nesting: in ct_user_enter()
579 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
596 * __ct_user_exit - Inform the context tracking that the CPU is
599 * @state: userspace context-tracking state being exited from.
606 * This call supports re-entrancy. This way it can be called from any exception
617 if (ct->active) { in __ct_user_exit()
622 ct_kernel_enter(true, CT_RCU_WATCHING - state); in __ct_user_exit()
631 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_exit()
632 * cputime accounting but we don't support RCU extended quiescent state. in __ct_user_exit()
636 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
641 raw_atomic_set(&ct->state, CT_STATE_KERNEL); in __ct_user_exit()
649 raw_atomic_sub(state, &ct->state); in __ct_user_exit()
681 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for