Lines Matching +full:cpu +full:- +full:read
1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
26 * @lock: Lock protecting per-CPU callback list.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
35 * @cpu: CPU number corresponding to this entry.
49 int cpu; member
54 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
57 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
58 * @gp_func: This flavor's grace-period-wait function.
60 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
63 * @gp_start: Most recent grace-period start in jiffies.
66 * @n_ipis_fails: Number of IPI-send failures.
67 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
69 * @pregp_func: This flavor's pre-grace-period function (optional).
70 * @pertask_func: This flavor's per-task scan function (optional).
71 * @postscan_func: This flavor's post-task scan function (optional).
72 * @holdouts_func: This flavor's holdout-list scan function (optional).
73 * @postgp_func: This flavor's post-grace-period function (optional).
74 * @call_func: This flavor's call_rcu()-equivalent function.
76 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
77 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
78 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
79 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
142 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
171 static int rcu_task_enqueue_lim __read_mostly = -1;
182 /* RCU tasks grace-period state for debugging. */
218 /* Record grace-period phase and time. */
221 rtp->gp_state = newstate; in set_tasks_gp_state()
222 rtp->gp_jiffies = jiffies; in set_tasks_gp_state()
229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races in tasks_gp_state_getname()
238 // Initialize per-CPU callback lists for the specified flavor of
242 int cpu; in cblist_init_generic() local
258 if (((nr_cpu_ids - 1) >> shift) >= lim) in cblist_init_generic()
260 WRITE_ONCE(rtp->percpu_enqueue_shift, shift); in cblist_init_generic()
261 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); in cblist_init_generic()
262 smp_store_release(&rtp->percpu_enqueue_lim, lim); in cblist_init_generic()
263 for_each_possible_cpu(cpu) { in cblist_init_generic()
264 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in cblist_init_generic()
267 if (cpu) in cblist_init_generic()
270 if (rcu_segcblist_empty(&rtpcp->cblist)) in cblist_init_generic()
271 rcu_segcblist_init(&rtpcp->cblist); in cblist_init_generic()
273 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); in cblist_init_generic()
274 rtpcp->cpu = cpu; in cblist_init_generic()
275 rtpcp->rtpp = rtp; in cblist_init_generic()
276 if (!rtpcp->rtp_blkd_tasks.next) in cblist_init_generic()
277 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); in cblist_init_generic()
280 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, in cblist_init_generic()
281 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); in cblist_init_generic()
287 return jiffies + rtp->lazy_jiffies; in rcu_tasks_lazy_time()
298 rtp = rtpcp->rtpp; in call_rcu_tasks_generic_timer()
300 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { in call_rcu_tasks_generic_timer()
301 if (!rtpcp->urgent_gp) in call_rcu_tasks_generic_timer()
302 rtpcp->urgent_gp = 1; in call_rcu_tasks_generic_timer()
304 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); in call_rcu_tasks_generic_timer()
308 rcuwait_wake_up(&rtp->cbs_wait); in call_rcu_tasks_generic_timer()
311 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
317 rtp = rtpcp->rtpp; in call_rcu_tasks_iw_wakeup()
318 rcuwait_wake_up(&rtp->cbs_wait); in call_rcu_tasks_iw_wakeup()
327 bool havekthread = smp_load_acquire(&rtp->kthread_ptr); in call_rcu_tasks_generic()
334 rhp->next = NULL; in call_rcu_tasks_generic()
335 rhp->func = func; in call_rcu_tasks_generic()
338 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); in call_rcu_tasks_generic()
339 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); in call_rcu_tasks_generic()
340 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); in call_rcu_tasks_generic()
344 if (rtpcp->rtp_jiffies != j) { in call_rcu_tasks_generic()
345 rtpcp->rtp_jiffies = j; in call_rcu_tasks_generic()
346 rtpcp->rtp_n_lock_retries = 0; in call_rcu_tasks_generic()
348 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && in call_rcu_tasks_generic()
349 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) in call_rcu_tasks_generic()
353 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) in call_rcu_tasks_generic()
354 rcu_segcblist_init(&rtpcp->cblist); in call_rcu_tasks_generic()
356 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); in call_rcu_tasks_generic()
357 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { in call_rcu_tasks_generic()
358 if (rtp->lazy_jiffies) in call_rcu_tasks_generic()
359 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); in call_rcu_tasks_generic()
361 needwake = rcu_segcblist_empty(&rtpcp->cblist); in call_rcu_tasks_generic()
364 rtpcp->urgent_gp = 3; in call_rcu_tasks_generic()
365 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); in call_rcu_tasks_generic()
368 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
369 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { in call_rcu_tasks_generic()
370 WRITE_ONCE(rtp->percpu_enqueue_shift, 0); in call_rcu_tasks_generic()
371 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); in call_rcu_tasks_generic()
372 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); in call_rcu_tasks_generic()
373 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); in call_rcu_tasks_generic()
375 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in call_rcu_tasks_generic()
379 if (needwake && READ_ONCE(rtp->kthread_ptr)) in call_rcu_tasks_generic()
380 irq_work_queue(&rtpcp->rtp_irq_work); in call_rcu_tasks_generic()
390 rtp = rtpcp->rtpp; in rcu_barrier_tasks_generic_cb()
391 if (atomic_dec_and_test(&rtp->barrier_q_count)) in rcu_barrier_tasks_generic_cb()
392 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic_cb()
395 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
399 int cpu; in rcu_barrier_tasks_generic() local
402 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
404 mutex_lock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
405 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { in rcu_barrier_tasks_generic()
407 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
410 rcu_seq_start(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
411 init_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
412 atomic_set(&rtp->barrier_q_count, 2); in rcu_barrier_tasks_generic()
413 for_each_possible_cpu(cpu) { in rcu_barrier_tasks_generic()
414 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) in rcu_barrier_tasks_generic()
416 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_barrier_tasks_generic()
417 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; in rcu_barrier_tasks_generic()
419 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) in rcu_barrier_tasks_generic()
420 atomic_inc(&rtp->barrier_q_count); in rcu_barrier_tasks_generic()
423 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) in rcu_barrier_tasks_generic()
424 complete(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
425 wait_for_completion(&rtp->barrier_q_completion); in rcu_barrier_tasks_generic()
426 rcu_seq_end(&rtp->barrier_q_seq); in rcu_barrier_tasks_generic()
427 mutex_unlock(&rtp->barrier_q_mutex); in rcu_barrier_tasks_generic()
434 int cpu; in rcu_tasks_need_gpcb() local
437 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); in rcu_tasks_need_gpcb()
443 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); in rcu_tasks_need_gpcb()
444 for (cpu = 0; cpu < dequeue_limit; cpu++) { in rcu_tasks_need_gpcb()
445 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
448 if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) in rcu_tasks_need_gpcb()
452 n = rcu_segcblist_n_cbs(&rtpcp->cblist); in rcu_tasks_need_gpcb()
455 if (cpu > 0) in rcu_tasks_need_gpcb()
458 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
459 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_need_gpcb()
460 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { in rcu_tasks_need_gpcb()
461 if (rtp->lazy_jiffies) in rcu_tasks_need_gpcb()
462 rtpcp->urgent_gp--; in rcu_tasks_need_gpcb()
464 } else if (rcu_segcblist_empty(&rtpcp->cblist)) { in rcu_tasks_need_gpcb()
465 rtpcp->urgent_gp = 0; in rcu_tasks_need_gpcb()
467 if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) in rcu_tasks_need_gpcb()
474 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other in rcu_tasks_need_gpcb()
475 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, in rcu_tasks_need_gpcb()
477 // to CPU 0. Note the matching RCU read-side critical section in in rcu_tasks_need_gpcb()
480 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
481 if (rtp->percpu_enqueue_lim > 1) { in rcu_tasks_need_gpcb()
482 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); in rcu_tasks_need_gpcb()
483 smp_store_release(&rtp->percpu_enqueue_lim, 1); in rcu_tasks_need_gpcb()
484 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); in rcu_tasks_need_gpcb()
486 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
488 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
491 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
492 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { in rcu_tasks_need_gpcb()
493 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); in rcu_tasks_need_gpcb()
494 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); in rcu_tasks_need_gpcb()
496 if (rtp->percpu_dequeue_lim == 1) { in rcu_tasks_need_gpcb()
497 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { in rcu_tasks_need_gpcb()
498 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_need_gpcb()
500 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); in rcu_tasks_need_gpcb()
503 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); in rcu_tasks_need_gpcb()
512 int cpu; in rcu_tasks_invoke_cbs() local
521 cpu = rtpcp->cpu; in rcu_tasks_invoke_cbs()
522 cpunext = cpu * 2 + 1; in rcu_tasks_invoke_cbs()
523 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
524 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); in rcu_tasks_invoke_cbs()
526 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); in rcu_tasks_invoke_cbs()
528 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { in rcu_tasks_invoke_cbs()
529 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); in rcu_tasks_invoke_cbs()
531 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); in rcu_tasks_invoke_cbs()
535 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) in rcu_tasks_invoke_cbs()
538 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
539 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); in rcu_tasks_invoke_cbs()
545 rhp->func(rhp); in rcu_tasks_invoke_cbs()
550 rcu_segcblist_add_len(&rtpcp->cblist, -len); in rcu_tasks_invoke_cbs()
551 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); in rcu_tasks_invoke_cbs()
561 rtp = rtpcp->rtpp; in rcu_tasks_invoke_cbs_wq()
570 mutex_lock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
576 mutex_unlock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
578 rcuwait_wait_event(&rtp->cbs_wait, in rcu_tasks_one_gp()
581 mutex_lock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
587 rtp->gp_start = jiffies; in rcu_tasks_one_gp()
588 rcu_seq_start(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
589 rtp->gp_func(rtp); in rcu_tasks_one_gp()
590 rcu_seq_end(&rtp->tasks_gp_seq); in rcu_tasks_one_gp()
595 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); in rcu_tasks_one_gp()
596 mutex_unlock(&rtp->tasks_gp_mutex); in rcu_tasks_one_gp()
599 // RCU-tasks kthread that detects grace periods and invokes callbacks.
602 int cpu; in rcu_tasks_kthread() local
605 for_each_possible_cpu(cpu) { in rcu_tasks_kthread()
606 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in rcu_tasks_kthread()
608 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); in rcu_tasks_kthread()
609 rtpcp->urgent_gp = 1; in rcu_tasks_kthread()
614 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! in rcu_tasks_kthread()
619 * one RCU-tasks grace period and then invokes the callbacks. in rcu_tasks_kthread()
620 * This loop is terminated by the system going down. ;-) in rcu_tasks_kthread()
628 schedule_timeout_idle(rtp->gp_sleep); in rcu_tasks_kthread()
637 "synchronize_%s() called too soon", rtp->name)) in synchronize_rcu_tasks_generic()
640 // If the grace-period kthread is running, use it. in synchronize_rcu_tasks_generic()
641 if (READ_ONCE(rtp->kthread_ptr)) { in synchronize_rcu_tasks_generic()
642 wait_rcu_gp(rtp->call_func); in synchronize_rcu_tasks_generic()
648 /* Spawn RCU-tasks grace-period kthread. */
653 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
654 …NCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __fu… in rcu_spawn_tasks_kthread_generic()
662 * Print any non-default Tasks RCU settings.
670 …pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_s… in rcu_tasks_bootup_oddness()
673 …pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsim… in rcu_tasks_bootup_oddness()
691 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
694 int cpu; in show_rcu_tasks_generic_gp_kthread() local
699 for_each_possible_cpu(cpu) { in show_rcu_tasks_generic_gp_kthread()
700 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); in show_rcu_tasks_generic_gp_kthread()
702 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) in show_rcu_tasks_generic_gp_kthread()
704 if (data_race(rtpcp->urgent_gp)) in show_rcu_tasks_generic_gp_kthread()
706 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) in show_rcu_tasks_generic_gp_kthread()
712 rtp->kname, in show_rcu_tasks_generic_gp_kthread()
713 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), in show_rcu_tasks_generic_gp_kthread()
714 jiffies - data_race(rtp->gp_jiffies), in show_rcu_tasks_generic_gp_kthread()
715 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), in show_rcu_tasks_generic_gp_kthread()
716 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), in show_rcu_tasks_generic_gp_kthread()
717 ".k"[!!data_race(rtp->kthread_ptr)], in show_rcu_tasks_generic_gp_kthread()
721 rtp->lazy_jiffies, in show_rcu_tasks_generic_gp_kthread()
732 // Shared code between task-list-scanning variants of Tasks RCU.
734 /* Wait for one RCU-tasks grace period. */
748 rtp->pregp_func(&holdouts); in rcu_tasks_wait_gp()
751 * There were callbacks, so we need to wait for an RCU-tasks in rcu_tasks_wait_gp()
757 if (rtp->pertask_func) { in rcu_tasks_wait_gp()
760 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
765 rtp->postscan_func(&holdouts); in rcu_tasks_wait_gp()
777 fract = rtp->init_fract; in rcu_tasks_wait_gp()
807 rtp->holdouts_func(&holdouts, needreport, &firstreport); in rcu_tasks_wait_gp()
809 // Print pre-stall informational messages if needed. in rcu_tasks_wait_gp()
815 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); in rcu_tasks_wait_gp()
820 rtp->postgp_func(rtp); in rcu_tasks_wait_gp()
830 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
832 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
836 // rates from multiple CPUs. If this is required, per-CPU callback lists
845 // Invokes synchronize_rcu() in order to wait for all in-flight
846 // t->on_rq and t->nvcsw transitions to complete. This works because
848 // rcu_tasks_pertask(), invoked on every non-idle task:
849 // For every runnable non-idle task other than the current one, use
864 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
871 // read-side critical sections waited for by rcu_tasks_postscan().
873 // Pre-grace-period update-side code is ordered before the grace
874 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
879 /* Pre-grace-period preparation. */
883 * Wait for all pre-existing t->on_rq and t->nvcsw transitions in rcu_tasks_pregp_step()
886 * synchronize_rcu(), a read-side critical section that started in rcu_tasks_pregp_step()
891 * memory barrier on the first store to t->rcu_tasks_holdout, in rcu_tasks_pregp_step()
901 int cpu; in rcu_tasks_is_holdout() local
904 if (!READ_ONCE(t->on_rq)) in rcu_tasks_is_holdout()
908 * Idle tasks (or idle injection) within the idle loop are RCU-tasks in rcu_tasks_is_holdout()
909 * quiescent states. But CPU boot code performed by the idle task in rcu_tasks_is_holdout()
915 cpu = task_cpu(t); in rcu_tasks_is_holdout()
917 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ in rcu_tasks_is_holdout()
918 if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) in rcu_tasks_is_holdout()
924 /* Per-task initial processing. */
929 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
930 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
931 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
949 * read side critical sections: in rcu_tasks_postscan()
951 * 1) An _SRCU_ read side starting before calling exit_notify(), in rcu_tasks_postscan()
955 * 2) An _RCU_ read side starting with the final preempt_disable() in rcu_tasks_postscan()
972 int cpu; in check_holdout_task() local
974 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
975 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
978 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { in check_holdout_task()
979 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
980 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
991 cpu = task_cpu(t); in check_holdout_task()
994 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], in check_holdout_task()
995 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
996 data_race(t->rcu_tasks_idle_cpu), cpu); in check_holdout_task()
1012 /* Finish off the Tasks-RCU grace period. */
1016 * Because ->on_rq and ->nvcsw are not guaranteed to have a full in rcu_tasks_postgp()
1018 * reordering on other CPUs could cause their RCU-tasks read-side in rcu_tasks_postgp()
1020 * However, because these ->nvcsw updates are carried out with in rcu_tasks_postgp()
1024 * This synchronize_rcu() also confines all ->rcu_tasks_holdout in rcu_tasks_postgp()
1026 * memory barriers for ->rcu_tasks_holdout accesses. in rcu_tasks_postgp()
1033 * read side critical section. in rcu_tasks_postgp()
1049 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); in tasks_rcu_exit_srcu_stall()
1057 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1063 * read-side critical sections have completed. call_rcu_tasks() assumes
1064 * that the read-side critical sections end at a voluntary context
1066 * or transition to usermode execution. As such, there are no read-side
1069 * through a safe state, not so much for data-structure synchronization.
1081 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1083 * Control will return to the caller some time after a full rcu-tasks
1085 * executing rcu-tasks read-side critical sections have elapsed. These
1086 * read-side critical sections are delimited by calls to schedule(),
1105 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1116 static int rcu_tasks_lazy_ms = -1;
1156 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); in exit_tasks_rcu_start()
1168 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); in exit_tasks_rcu_stop()
1202 // Ordering is provided by the scheduler's context-switch code.
1209 // Wait for one rude RCU-tasks grace period.
1212 rtp->n_ipis += cpumask_weight(cpu_online_mask); in rcu_tasks_rude_wait_gp()
1221 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1227 * read-side critical sections have completed. call_rcu_tasks_rude()
1228 * assumes that the read-side critical sections end at context switch,
1230 * usermode execution is schedulable). As such, there are no read-side
1233 * through a safe state, not so much for data-structure synchronization.
1245 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1247 * Control will return to the caller some time after a rude rcu-tasks
1249 * executing rcu-tasks read-side critical sections have elapsed. These
1250 * read-side critical sections are delimited by calls to schedule(),
1269 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1280 int rcu_tasks_rude_lazy_ms = -1;
1315 // 1. Has explicit read-side markers to allow finite grace periods
1316 // in the face of in-kernel loops for PREEMPT=n builds.
1319 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1321 // 3. Avoids expensive read-side instructions, having overhead similar
1324 // There are of course downsides. For example, the grace-period code
1340 // Disables CPU hotplug, adds all currently executing tasks to the
1342 // or were preempted within their current RCU Tasks Trace read-side
1344 // Finally, this function re-enables CPU hotplug.
1345 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1347 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1359 // Pre-grace-period update-side code is ordered before the grace period
1360 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1361 // read-side code is ordered before the grace period by atomic operations
1363 // context-switch ordering (for locked-down non-running readers).
1375 // Record outstanding IPIs to each CPU. No point in sending two...
1389 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1392 smp_mb(); // Enforce full grace-period ordering. in rcu_ld_need_qs()
1393 return smp_load_acquire(&t->trc_reader_special.b.need_qs); in rcu_ld_need_qs()
1396 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1399 smp_store_release(&t->trc_reader_special.b.need_qs, v); in rcu_st_need_qs()
1400 smp_mb(); // Enforce full grace-period ordering. in rcu_st_need_qs()
1404 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1405 * the four-byte operand-size restriction of some platforms.
1411 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); in rcu_trc_cmpxchg_need_qs()
1417 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); in rcu_trc_cmpxchg_need_qs()
1423 * If we are the last reader, signal the grace-period kthread.
1424 * Also remove from the per-CPU list of blocked tasks.
1432 // Open-coded full-word version of rcu_ld_need_qs(). in rcu_read_unlock_trace_special()
1433 smp_mb(); // Enforce full grace-period ordering. in rcu_read_unlock_trace_special()
1434 trs = smp_load_acquire(&t->trc_reader_special); in rcu_read_unlock_trace_special()
1436 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
1437 smp_mb(); // Pairs with update-side barriers. in rcu_read_unlock_trace_special()
1438 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. in rcu_read_unlock_trace_special()
1446 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); in rcu_read_unlock_trace_special()
1448 list_del_init(&t->trc_blkd_node); in rcu_read_unlock_trace_special()
1449 WRITE_ONCE(t->trc_reader_special.b.blocked, false); in rcu_read_unlock_trace_special()
1452 WRITE_ONCE(t->trc_reader_nesting, 0); in rcu_read_unlock_trace_special()
1456 /* Add a newly blocked reader task to its CPU's list. */
1465 t->trc_blkd_cpu = smp_processor_id(); in rcu_tasks_trace_qs_blkd()
1466 if (!rtpcp->rtp_blkd_tasks.next) in rcu_tasks_trace_qs_blkd()
1467 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_qs_blkd()
1468 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_qs_blkd()
1469 WRITE_ONCE(t->trc_reader_special.b.blocked, true); in rcu_tasks_trace_qs_blkd()
1477 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
1479 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
1487 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
1488 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
1490 n_trc_holdouts--; in trc_del_holdout()
1501 // If the task is no longer running on this CPU, leave. in trc_read_check_handler()
1505 // If the task is not in a read-side critical section, and in trc_read_check_handler()
1506 // if this is the last reader, awaken the grace-period kthread. in trc_read_check_handler()
1507 nesting = READ_ONCE(t->trc_reader_nesting); in trc_read_check_handler()
1516 // Get here if the task is in a read-side critical section. in trc_read_check_handler()
1517 // Set its state so that it will update state for the grace-period in trc_read_check_handler()
1522 // Allow future IPIs to be sent on CPU and for task. in trc_read_check_handler()
1526 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ in trc_read_check_handler()
1529 /* Callback function for scheduler to check locked-down task. */
1533 int cpu = task_cpu(t); in trc_inspect_reader() local
1535 bool ofl = cpu_is_offline(cpu); in trc_inspect_reader()
1540 return -EINVAL; in trc_inspect_reader()
1547 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
1548 return -EINVAL; // No quiescent state, do it the hard way. in trc_inspect_reader()
1552 // The task is not running, so C-language access is safe. in trc_inspect_reader()
1553 nesting = t->trc_reader_nesting; in trc_inspect_reader()
1559 // If not exiting a read-side critical section, mark as checked in trc_inspect_reader()
1560 // so that the grace-period kthread will remove it from the in trc_inspect_reader()
1567 return -EINVAL; // Reader transitioning, try again later. in trc_inspect_reader()
1569 // The task is in a read-side critical section, so set up its in trc_inspect_reader()
1581 int cpu; in trc_wait_for_one_reader() local
1584 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
1590 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in trc_wait_for_one_reader()
1603 // an RCU read-side critical section. Otherwise, the invocation of in trc_wait_for_one_reader()
1613 cpu = task_cpu(t); in trc_wait_for_one_reader()
1616 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
1619 per_cpu(trc_ipi_to_cpu, cpu) = true; in trc_wait_for_one_reader()
1620 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1622 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { in trc_wait_for_one_reader()
1624 // failure than the target CPU being offline. in trc_wait_for_one_reader()
1625 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", in trc_wait_for_one_reader()
1626 __func__, cpu); in trc_wait_for_one_reader()
1628 per_cpu(trc_ipi_to_cpu, cpu) = false; in trc_wait_for_one_reader()
1629 t->trc_ipi_to_cpu = -1; in trc_wait_for_one_reader()
1635 * Initialize for first-round processing for the specified task.
1640 // During early boot when there is only the one boot CPU, there in rcu_tasks_trace_pertask_prep()
1641 // is no idle task for the other CPUs. Also, the grace-period in rcu_tasks_trace_pertask_prep()
1644 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) in rcu_tasks_trace_pertask_prep()
1648 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask_prep()
1652 /* Do first-round processing for the specified task. */
1659 /* Initialize for a new RCU-tasks-trace grace period. */
1663 int cpu; in rcu_tasks_trace_pregp_step() local
1669 for_each_possible_cpu(cpu) in rcu_tasks_trace_pregp_step()
1670 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); in rcu_tasks_trace_pregp_step()
1672 // Disable CPU hotplug across the CPU scan for the benefit of in rcu_tasks_trace_pregp_step()
1674 // in CPU-hotplug code paths. in rcu_tasks_trace_pregp_step()
1679 for_each_online_cpu(cpu) { in rcu_tasks_trace_pregp_step()
1681 t = cpu_curr_snapshot(cpu); in rcu_tasks_trace_pregp_step()
1690 // current RCU tasks trace read-side critical section. in rcu_tasks_trace_pregp_step()
1691 for_each_possible_cpu(cpu) { in rcu_tasks_trace_pregp_step()
1692 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); in rcu_tasks_trace_pregp_step()
1694 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); in rcu_tasks_trace_pregp_step()
1698 list_del_init(&t->trc_blkd_node); in rcu_tasks_trace_pregp_step()
1699 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_pregp_step()
1709 // Re-enable CPU hotplug now that the holdout list is populated. in rcu_tasks_trace_pregp_step()
1718 // Wait for late-stage exiting tasks to finish exiting. in rcu_tasks_trace_postscan()
1724 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. in rcu_tasks_trace_postscan()
1740 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); in trc_check_slow_task()
1741 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); in trc_check_slow_task()
1742 trc_rdrp->needqs = rcu_ld_need_qs(t); in trc_check_slow_task()
1749 int cpu; in show_stalled_task_trace() local
1757 cpu = task_cpu(t); in show_stalled_task_trace()
1760 t->pid, in show_stalled_task_trace()
1761 ".I"[t->trc_ipi_to_cpu >= 0], in show_stalled_task_trace()
1764 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", in show_stalled_task_trace()
1765 t->pid, in show_stalled_task_trace()
1768 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], in show_stalled_task_trace()
1769 ".B"[!!data_race(t->trc_reader_special.b.blocked)], in show_stalled_task_trace()
1773 cpu, cpu_online(cpu) ? "" : "(offline)"); in show_stalled_task_trace()
1780 int cpu; in show_stalled_ipi_trace() local
1782 for_each_possible_cpu(cpu) in show_stalled_ipi_trace()
1783 if (per_cpu(trc_ipi_to_cpu, cpu)) in show_stalled_ipi_trace()
1784 pr_alert("\tIPI outstanding to CPU %d\n", cpu); in show_stalled_ipi_trace()
1793 // Disable CPU hotplug across the holdout list scan for IPIs. in check_all_holdout_tasks_trace()
1798 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1803 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1811 // Re-enable CPU hotplug now that the holdout list scan has completed. in check_all_holdout_tasks_trace()
1828 int cpu; in rcu_tasks_trace_postgp() local
1831 // if a CPU has gone offline or transitioned to userspace in the in rcu_tasks_trace_postgp()
1835 for_each_online_cpu(cpu) in rcu_tasks_trace_postgp()
1836 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) in rcu_tasks_trace_postgp()
1837 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); in rcu_tasks_trace_postgp()
1846 union rcu_special trs = READ_ONCE(t->trc_reader_special); in exit_tasks_rcu_finish_trace()
1849 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in exit_tasks_rcu_finish_trace()
1853 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
1857 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1861 * The callback function will be invoked some time after a trace rcu-tasks
1863 * trace rcu-tasks read-side critical sections have completed. These
1864 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1877 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1879 * Control will return to the caller some time after a trace rcu-tasks
1881 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1895 …race_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical secti… in synchronize_rcu_tasks_trace()
1901 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1912 int rcu_tasks_trace_lazy_ms = -1;
2004 pr_info("Callback from %s invoked.\n", rttd->name); in test_rcu_tasks_callback()
2006 rttd->notrun = false; in test_rcu_tasks_callback()
2034 * Return: 0 - test passed
2035 * 1 - test failed, but have not timed out yet
2036 * -1 - test failed and timed out
2049 pr_err("%s has failed boot-time tests.\n", tests[i].name); in rcu_tasks_verify_self_tests()
2050 ret = -1; in rcu_tasks_verify_self_tests()
2103 // Run the self-tests. in rcu_init_tasks_generic()