Lines Matching +full:wait +full:- +full:state

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
23 * Definition for a Tasks-RCU-like mechanism.
26 * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29 * @gp_func: This flavor's grace-period-wait function.
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
34 * @gp_start: Most recent grace-period start in jiffies.
37 * @n_ipis_fails: Number of IPI-send failures.
38 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
41 * @holdout_func: This flavor's holdout-list scan function (optional).
42 * @postgp_func: This flavor's post-grace-period function (optional).
43 * @call_func: This flavor's call_rcu()-equivalent function.
97 /* RCU tasks grace-period state for debugging. */
131 /* Record grace-period phase and time. */
134 rtp->gp_state = newstate; in set_tasks_gp_state()
135 rtp->gp_jiffies = jiffies; in set_tasks_gp_state()
139 /* Return state name. */
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
158 rhp->next = NULL; in call_rcu_tasks_generic()
159 rhp->func = func; in call_rcu_tasks_generic()
160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags); in call_rcu_tasks_generic()
161 needwake = !rtp->cbs_head; in call_rcu_tasks_generic()
162 WRITE_ONCE(*rtp->cbs_tail, rhp); in call_rcu_tasks_generic()
163 rtp->cbs_tail = &rhp->next; in call_rcu_tasks_generic()
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); in call_rcu_tasks_generic()
166 if (needwake && READ_ONCE(rtp->kthread_ptr)) in call_rcu_tasks_generic()
167 wake_up(&rtp->cbs_wq); in call_rcu_tasks_generic()
170 // Wait for a grace period for the specified flavor of Tasks RCU.
177 /* Wait for the grace period. */ in synchronize_rcu_tasks_generic()
178 wait_rcu_gp(rtp->call_func); in synchronize_rcu_tasks_generic()
181 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! in rcu_tasks_kthread()
196 * one RCU-tasks grace period and then invokes the callbacks. in rcu_tasks_kthread()
197 * This loop is terminated by the system going down. ;-) in rcu_tasks_kthread()
202 raw_spin_lock_irqsave(&rtp->cbs_lock, flags); in rcu_tasks_kthread()
204 list = rtp->cbs_head; in rcu_tasks_kthread()
205 rtp->cbs_head = NULL; in rcu_tasks_kthread()
206 rtp->cbs_tail = &rtp->cbs_head; in rcu_tasks_kthread()
207 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); in rcu_tasks_kthread()
209 /* If there were none, wait a bit and start over. */ in rcu_tasks_kthread()
211 wait_event_interruptible(rtp->cbs_wq, in rcu_tasks_kthread()
212 READ_ONCE(rtp->cbs_head)); in rcu_tasks_kthread()
213 if (!rtp->cbs_head) { in rcu_tasks_kthread()
221 // Wait for one grace period. in rcu_tasks_kthread()
223 rtp->gp_start = jiffies; in rcu_tasks_kthread()
224 rtp->gp_func(rtp); in rcu_tasks_kthread()
225 rtp->n_gps++; in rcu_tasks_kthread()
230 next = list->next; in rcu_tasks_kthread()
232 list->func(list); in rcu_tasks_kthread()
238 schedule_timeout_idle(rtp->gp_sleep); in rcu_tasks_kthread()
244 /* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
249 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
250 …NCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __fu… in rcu_spawn_tasks_kthread_generic()
258 * Print any non-default Tasks RCU settings.
264 …pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_s… in rcu_tasks_bootup_oddness()
280 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
284 rtp->kname, in show_rcu_tasks_generic_gp_kthread()
285 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
286 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
287 data_race(rtp->n_gps), in show_rcu_tasks_generic_gp_kthread()
288 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
289 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
290 ".C"[!!data_race(rtp->cbs_head)], in show_rcu_tasks_generic_gp_kthread()
301 // Shared code between task-list-scanning variants of Tasks RCU.
303 /* Wait for one RCU-tasks grace period. */
312 rtp->pregp_func(); in rcu_tasks_wait_gp()
315 * There were callbacks, so we need to wait for an RCU-tasks in rcu_tasks_wait_gp()
323 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
327 rtp->postscan_func(&holdouts); in rcu_tasks_wait_gp()
336 // Start off with initial wait and slowly back off to 1 HZ wait. in rcu_tasks_wait_gp()
337 fract = rtp->init_fract; in rcu_tasks_wait_gp()
354 fract--; in rcu_tasks_wait_gp()
363 rtp->holdouts_func(&holdouts, needreport, &firstreport); in rcu_tasks_wait_gp()
367 rtp->postgp_func(rtp); in rcu_tasks_wait_gp()
377 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
379 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
381 // state for some of the manipulations involved in tracing and the like.
383 // rates from multiple CPUs. If this is required, per-CPU callback lists
386 /* Pre-grace-period preparation. */
390 * Wait for all pre-existing t->on_rq and t->nvcsw transitions in rcu_tasks_pregp_step()
393 * synchronize_rcu(), a read-side critical section that started in rcu_tasks_pregp_step()
398 * memory barrier on the first store to t->rcu_tasks_holdout, in rcu_tasks_pregp_step()
405 /* Per-task initial processing. */
408 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { in rcu_tasks_pertask()
410 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
411 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
412 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
420 * Wait for tasks that are in the process of exiting. This in rcu_tasks_postscan()
435 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
436 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
437 !READ_ONCE(t->on_rq) || in check_holdout_task()
439 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { in check_holdout_task()
440 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
441 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
456 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
457 t->rcu_tasks_idle_cpu, cpu); in check_holdout_task()
473 /* Finish off the Tasks-RCU grace period. */
477 * Because ->on_rq and ->nvcsw are not guaranteed to have a full in rcu_tasks_postgp()
479 * reordering on other CPUs could cause their RCU-tasks read-side in rcu_tasks_postgp()
481 * However, because these ->nvcsw updates are carried out with in rcu_tasks_postgp()
485 * This synchronize_rcu() also confines all ->rcu_tasks_holdout in rcu_tasks_postgp()
487 * memory barriers for ->rcu_tasks_holdout accesses. in rcu_tasks_postgp()
500 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
506 * read-side critical sections have completed. call_rcu_tasks() assumes
507 * that the read-side critical sections end at a voluntary context
509 * or transition to usermode execution. As such, there are no read-side
512 * through a safe state, not so much for data-strcuture synchronization.
524 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
526 * Control will return to the caller some time after a full rcu-tasks
528 * executing rcu-tasks read-side critical sections have elapsed. These
529 * read-side critical sections are delimited by calls to schedule(),
548 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
550 * Although the current implementation is guaranteed to wait, it is not
555 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks()
585 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); in exit_tasks_rcu_start()
595 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); in exit_tasks_rcu_finish()
622 // Wait for one rude RCU-tasks grace period.
625 rtp->n_ipis += cpumask_weight(cpu_online_mask); in rcu_tasks_rude_wait_gp()
634 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
640 * read-side critical sections have completed. call_rcu_tasks_rude()
641 * assumes that the read-side critical sections end at context switch,
643 * there are no read-side primitives analogous to rcu_read_lock() and
645 * that all tasks have passed through a safe state, not so much for
646 * data-strcuture synchronization.
658 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
660 * Control will return to the caller some time after a rude rcu-tasks
662 * executing rcu-tasks read-side critical sections have elapsed. These
663 * read-side critical sections are delimited by calls to schedule(),
682 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
684 * Although the current implementation is guaranteed to wait, it is not
689 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks_rude()
719 // 1. Has explicit read-side markers to allow finite grace periods
720 // in the face of in-kernel loops for PREEMPT=n builds.
723 // CPU-hotplug code paths, similar to the capabilities of SRCU.
725 // 3. Avoids expensive read-side instruction, having overhead similar
728 // There are of course downsides. The grace-period code can send IPIs to
739 // The lockdep state must be outside of #ifdef to be useful.
749 static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
755 // The number of detections of task quiescent state relying on
775 /* If we are the last reader, wake up the grace-period kthread. */
778 int nq = t->trc_reader_special.b.need_qs; in rcu_read_unlock_trace_special()
781 t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
782 smp_mb(); // Pairs with update-side barriers. in rcu_read_unlock_trace_special()
783 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. in rcu_read_unlock_trace_special()
785 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); in rcu_read_unlock_trace_special()
786 WRITE_ONCE(t->trc_reader_nesting, nesting); in rcu_read_unlock_trace_special()
795 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
797 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
804 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
805 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
810 /* IPI handler to check task state. */
823 // If the task is not in a read-side critical section, and in trc_read_check_handler()
824 // if this is the last reader, awaken the grace-period kthread. in trc_read_check_handler()
825 if (likely(!t->trc_reader_nesting)) { in trc_read_check_handler()
830 WRITE_ONCE(t->trc_reader_checked, true); in trc_read_check_handler()
834 if (unlikely(t->trc_reader_nesting < 0)) { in trc_read_check_handler()
839 WRITE_ONCE(t->trc_reader_checked, true); in trc_read_check_handler()
841 // Get here if the task is in a read-side critical section. Set in trc_read_check_handler()
842 // its state so that it will awaken the grace-period kthread upon in trc_read_check_handler()
844 WARN_ON_ONCE(t->trc_reader_special.b.need_qs); in trc_read_check_handler()
845 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); in trc_read_check_handler()
852 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ in trc_read_check_handler()
855 /* Callback function for scheduler to check locked-down task. */
870 // we can inspect its state despite its currently running. in trc_inspect_reader()
871 // However, we cannot safely change its state. in trc_inspect_reader()
874 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
875 return false; // No quiescent state, do it the hard way. in trc_inspect_reader()
881 in_qs = likely(!t->trc_reader_nesting); in trc_inspect_reader()
884 // Mark as checked. Because this is called from the grace-period in trc_inspect_reader()
886 t->trc_reader_checked = true; in trc_inspect_reader()
890 return true; // Already in quiescent state, done!!! in trc_inspect_reader()
892 // The task is in a read-side critical section, so set up its in trc_inspect_reader()
893 // state so that it will awaken the grace-period kthread upon exit in trc_inspect_reader()
895 atomic_inc(&trc_n_readers_need_end); // One more to wait on. in trc_inspect_reader()
896 WARN_ON_ONCE(t->trc_reader_special.b.need_qs); in trc_inspect_reader()
897 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); in trc_inspect_reader()
901 /* Attempt to extract the state for the specified task. */
908 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
911 // The current task had better be in a quiescent state. in trc_wait_for_one_reader()
913 t->trc_reader_checked = true; in trc_wait_for_one_reader()
915 WARN_ON_ONCE(t->trc_reader_nesting); in trc_wait_for_one_reader()
935 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
940 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
948 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
957 /* Initialize for a new RCU-tasks-trace grace period. */
962 // Allow for fast-acting IPIs. in rcu_tasks_trace_pregp_step()
970 // This also waits for all readers in CPU-hotplug code paths. in rcu_tasks_trace_pregp_step()
974 /* Do first-round processing for the specified task. */
978 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); in rcu_tasks_trace_pertask()
979 WRITE_ONCE(t->trc_reader_checked, false); in rcu_tasks_trace_pertask()
980 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask()
995 // Re-enable CPU hotplug now that the tasklist scan has completed. in rcu_tasks_trace_postscan()
998 // Wait for late-stage exiting tasks to finish exiting. in rcu_tasks_trace_postscan()
1001 // Any tasks that exit after this point will set ->trc_reader_checked. in rcu_tasks_trace_postscan()
1004 /* Show the state of a task stalling the current RCU tasks trace GP. */
1016 t->pid, in show_stalled_task_trace()
1017 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0], in show_stalled_task_trace()
1020 t->trc_reader_nesting, in show_stalled_task_trace()
1021 " N"[!!t->trc_reader_special.b.need_qs], in show_stalled_task_trace()
1047 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1048 !READ_ONCE(t->trc_reader_checked)) in check_all_holdout_tasks_trace()
1052 if (READ_ONCE(t->trc_reader_checked)) in check_all_holdout_tasks_trace()
1058 // Re-enable CPU hotplug now that the holdout list scan has completed. in check_all_holdout_tasks_trace()
1068 /* Wait for grace period to complete and provide ordering. */
1081 // Wait for readers. in rcu_tasks_trace_postgp()
1093 if (READ_ONCE(t->trc_reader_special.b.need_qs)) in rcu_tasks_trace_postgp()
1098 if (READ_ONCE(t->trc_reader_special.b.need_qs)) in rcu_tasks_trace_postgp()
1111 /* Report any needed quiescent state for this exiting task. */
1114 WRITE_ONCE(t->trc_reader_checked, true); in exit_tasks_rcu_finish_trace()
1115 WARN_ON_ONCE(t->trc_reader_nesting); in exit_tasks_rcu_finish_trace()
1116 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
1117 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) in exit_tasks_rcu_finish_trace()
1122 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1128 * read-side critical sections have completed. call_rcu_tasks_trace()
1129 * assumes that the read-side critical sections end at context switch,
1131 * there are no read-side primitives analogous to rcu_read_lock() and
1133 * that all tasks have passed through a safe state, not so much for
1134 * data-strcuture synchronization.
1146 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1148 * Control will return to the caller some time after a trace rcu-tasks
1150 * rcu-tasks read-side critical sections have elapsed. These read-side
1164 …ce_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section… in synchronize_rcu_tasks_trace()
1170 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1172 * Although the current implementation is guaranteed to wait, it is not
1177 /* There is only one callback queue, so this is easy. ;-) */ in rcu_barrier_tasks_trace()