Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
21 return lockdep_is_held(&rdp->nocb_lock); in rcu_lockdep_is_held_nocb()
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread) in rcu_current_is_nocb_kthread()
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread) in rcu_current_is_nocb_kthread()
37 * Offload callback processing from the boot-time-specified set of CPUs
39 * created that pull the callbacks from the corresponding CPU, wait for
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
53 * Offloading of callbacks can also be used as an energy-efficiency
55 * about entering dyntick-idle mode.
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
68 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); in rcu_nocb_setup()
85 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
87 * on ->nocb_lock, which only can happen at high call_rcu() rates.
93 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
94 * lock isn't immediately available, increment ->nocb_lock_contended to
98 __acquires(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_lock()
101 if (raw_spin_trylock(&rdp->nocb_bypass_lock)) in rcu_nocb_bypass_lock()
103 atomic_inc(&rdp->nocb_lock_contended); in rcu_nocb_bypass_lock()
104 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_bypass_lock()
106 raw_spin_lock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_lock()
108 atomic_dec(&rdp->nocb_lock_contended); in rcu_nocb_bypass_lock()
112 * Spinwait until the specified rcu_data structure's ->nocb_lock is
113 * not contended. Please note that this is extremely special-purpose,
114 * relying on the fact that at most two kthreads and one CPU contend for
116 * grace-period-duration time intervals between successive acquisitions
118 * mechanism, and further to apply it only to the CPU doing floods of
123 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_wait_contended()
124 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) in rcu_nocb_wait_contended()
130 * ->nocb_bypass_lock.
135 return raw_spin_trylock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_trylock()
139 * Release the specified rcu_data structure's ->nocb_bypass_lock.
142 __releases(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_unlock()
145 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_unlock()
149 * Acquire the specified rcu_data structure's ->nocb_lock, but only
150 * if it corresponds to a no-CBs CPU.
157 raw_spin_lock(&rdp->nocb_lock); in rcu_nocb_lock()
161 * Release the specified rcu_data structure's ->nocb_lock, but only
162 * if it corresponds to a no-CBs CPU.
168 raw_spin_unlock(&rdp->nocb_lock); in rcu_nocb_unlock()
173 * Release the specified rcu_data structure's ->nocb_lock and restore
174 * interrupts, but only if it corresponds to a no-CBs CPU.
181 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_unlock_irqrestore()
187 /* Lockdep check that ->cblist may be safely accessed. */
192 lockdep_assert_held(&rdp->nocb_lock); in rcu_lockdep_assert_cblist_protected()
196 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
206 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get()
211 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
212 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
218 __releases(rdp_gp->nocb_gp_lock) in __wake_nocb_gp()
222 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { in __wake_nocb_gp()
223 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp()
224 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __wake_nocb_gp()
229 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { in __wake_nocb_gp()
230 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); in __wake_nocb_gp()
231 del_timer(&rdp_gp->nocb_timer); in __wake_nocb_gp()
234 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in __wake_nocb_gp()
235 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); in __wake_nocb_gp()
238 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp()
240 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in __wake_nocb_gp()
241 wake_up_process(rdp_gp->nocb_gp_kthread); in __wake_nocb_gp()
253 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in wake_nocb_gp()
255 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp()
285 * Arrange to wake the GP kthread for this NOCB group at some future
292 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in wake_nocb_gp_defer()
294 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp_defer()
298 * callback storms, no need to wake up too early. in wake_nocb_gp_defer()
301 rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) { in wake_nocb_gp_defer()
302 mod_timer(&rdp_gp->nocb_timer, jiffies + jiffies_till_flush); in wake_nocb_gp_defer()
303 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
305 mod_timer(&rdp_gp->nocb_timer, jiffies + 2); in wake_nocb_gp_defer()
306 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
308 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE) in wake_nocb_gp_defer()
309 mod_timer(&rdp_gp->nocb_timer, jiffies + 1); in wake_nocb_gp_defer()
310 if (rdp_gp->nocb_defer_wakeup < waketype) in wake_nocb_gp_defer()
311 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
314 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp_defer()
316 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer()
320 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
321 * However, if there is a callback to be enqueued and if ->nocb_bypass
322 * proves to be initially empty, just return false because the no-CB GP
338 lockdep_assert_held(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
339 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
340 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
343 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */ in rcu_nocb_do_flush_bypass()
345 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_do_flush_bypass()
349 * ->cblist so that we can take advantage of the grace-period that will in rcu_nocb_do_flush_bypass()
354 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
357 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
358 WRITE_ONCE(rdp->lazy_len, 0); in rcu_nocb_do_flush_bypass()
360 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); in rcu_nocb_do_flush_bypass()
361 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_do_flush_bypass()
367 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
368 * However, if there is a callback to be enqueued and if ->nocb_bypass
369 * proves to be initially empty, just return false because the no-CB GP
385 * If the ->nocb_bypass_lock is immediately available, flush the
386 * ->nocb_bypass queue into ->cblist.
398 * See whether it is appropriate to use the ->nocb_bypass list in order
399 * to control contention on ->nocb_lock. A limited number of direct
400 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
401 * is non-empty, further callbacks must be placed into ->nocb_bypass,
403 * back to direct use of ->cblist. However, ->nocb_bypass should not be
404 * used if ->cblist is empty, because otherwise callbacks can be stranded
405 * on ->nocb_bypass because we cannot count on the current CPU ever again
406 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
407 * non-empty, the corresponding no-CBs grace-period kthread must not be
411 * as doing so would confuse the auto-initialization code. Besides
413 * there is only one CPU in operation.
422 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
423 bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len)); in rcu_nocb_try_bypass()
430 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
434 // In the process of (de-)offloading: no bypassing, but in rcu_nocb_try_bypass()
436 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { in rcu_nocb_try_bypass()
438 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
442 // Don't use ->nocb_bypass during early boot. in rcu_nocb_try_bypass()
445 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
446 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
451 // moving back from ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
452 if (j == rdp->nocb_nobypass_last) { in rcu_nocb_try_bypass()
453 c = rdp->nocb_nobypass_count + 1; in rcu_nocb_try_bypass()
455 WRITE_ONCE(rdp->nocb_nobypass_last, j); in rcu_nocb_try_bypass()
456 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; in rcu_nocb_try_bypass()
457 if (ULONG_CMP_LT(rdp->nocb_nobypass_count, in rcu_nocb_try_bypass()
463 WRITE_ONCE(rdp->nocb_nobypass_count, c); in rcu_nocb_try_bypass()
465 // If there hasn't yet been all that many ->cblist enqueues in rcu_nocb_try_bypass()
466 // this jiffy, tell the caller to enqueue onto ->cblist. But flush in rcu_nocb_try_bypass()
467 // ->nocb_bypass first. in rcu_nocb_try_bypass()
469 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) { in rcu_nocb_try_bypass()
471 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
473 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
477 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
481 // If ->nocb_bypass has been used too long or is too full, in rcu_nocb_try_bypass()
482 // flush ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
483 if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) || in rcu_nocb_try_bypass()
485 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush))) || in rcu_nocb_try_bypass()
488 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
492 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
494 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
497 if (j != rdp->nocb_gp_adv_time && in rcu_nocb_try_bypass()
498 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in rcu_nocb_try_bypass()
499 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in rcu_nocb_try_bypass()
500 rcu_advance_cbs_nowake(rdp->mynode, rdp); in rcu_nocb_try_bypass()
501 rdp->nocb_gp_adv_time = j; in rcu_nocb_try_bypass()
505 // Don't wait for the wake up timer as it may be too far ahead. in rcu_nocb_try_bypass()
506 // Wake up the GP thread now instead, if the cblist was empty. in rcu_nocb_try_bypass()
515 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
516 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_try_bypass()
517 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_try_bypass()
520 WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1); in rcu_nocb_try_bypass()
523 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_try_bypass()
524 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass()
527 smp_mb(); /* Order enqueue before wake. */ in rcu_nocb_try_bypass()
528 // A wake up of the grace period kthread or timer adjustment in rcu_nocb_try_bypass()
534 // b. The new CB is non-lazy. in rcu_nocb_try_bypass()
538 // No-CBs GP kthread might be indefinitely asleep, if so, wake. in rcu_nocb_try_bypass()
540 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { in rcu_nocb_try_bypass()
541 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
545 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
554 * Awaken the no-CBs grace-period kthread if needed, either due to it
557 * If warranted, also wake up the kthread servicing this CPUs queues.
561 __releases(rdp->nocb_lock) in __call_rcu_nocb_wake()
571 t = READ_ONCE(rdp->nocb_gp_kthread); in __call_rcu_nocb_wake()
574 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
579 len = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_nocb_wake()
580 bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass); in __call_rcu_nocb_wake()
581 lazy_len = READ_ONCE(rdp->lazy_len); in __call_rcu_nocb_wake()
583 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
593 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
600 } else if (len > rdp->qlen_last_fqs_check + qhimark) { in __call_rcu_nocb_wake()
602 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
604 if (j != rdp->nocb_gp_adv_time && in __call_rcu_nocb_wake()
605 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in __call_rcu_nocb_wake()
606 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in __call_rcu_nocb_wake()
607 rcu_advance_cbs_nowake(rdp->mynode, rdp); in __call_rcu_nocb_wake()
608 rdp->nocb_gp_adv_time = j; in __call_rcu_nocb_wake()
611 if ((rdp->nocb_cb_sleep || in __call_rcu_nocb_wake()
612 !rcu_segcblist_ready_cbs(&rdp->cblist)) && in __call_rcu_nocb_wake()
613 !timer_pending(&rdp->nocb_timer)) { in __call_rcu_nocb_wake()
619 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); in __call_rcu_nocb_wake()
623 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); in __call_rcu_nocb_wake()
630 struct rcu_segcblist *cblist = &rdp->cblist; in nocb_gp_toggle_rdp()
639 * We will handle this rdp until it ever gets de-offloaded. in nocb_gp_toggle_rdp()
648 * De-offloading. Clear our flag and notify the de-offload worker. in nocb_gp_toggle_rdp()
649 * We will ignore this rdp until it ever gets re-offloaded. in nocb_gp_toggle_rdp()
657 ret = -1; in nocb_gp_toggle_rdp()
665 static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu) in nocb_gp_sleep() argument
667 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep")); in nocb_gp_sleep()
668 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, in nocb_gp_sleep()
669 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_sleep()
670 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep")); in nocb_gp_sleep()
674 * No-CBs GP kthreads come here to wait for additional callbacks to show up
680 int __maybe_unused cpu = my_rdp->cpu; in nocb_gp_wait() local
697 * and the global grace-period kthread are awakened if needed. in nocb_gp_wait()
699 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); in nocb_gp_wait()
702 * CPU is de-offloaded and added to the list before that CPU is in nocb_gp_wait()
703 * (re-)offloaded. If the following loop happens to be referencing in nocb_gp_wait()
705 * CPU is de-offloaded and then immediately re-offloaded, this in nocb_gp_wait()
710 * entire loop is forced after a given CPU's rcu_data structure in nocb_gp_wait()
711 * is added to the list, so the skipped-over rcu_data structures in nocb_gp_wait()
714 list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) { in nocb_gp_wait()
719 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); in nocb_gp_wait()
721 lockdep_assert_held(&rdp->nocb_lock); in nocb_gp_wait()
722 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
723 lazy_ncbs = READ_ONCE(rdp->lazy_len); in nocb_gp_wait()
726 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush) || in nocb_gp_wait()
730 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || in nocb_gp_wait()
733 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { in nocb_gp_wait()
741 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
742 lazy_ncbs = READ_ONCE(rdp->lazy_len); in nocb_gp_wait()
746 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
753 rnp = rdp->mynode; in nocb_gp_wait()
757 if (!rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
759 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in nocb_gp_wait()
760 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait()
763 wasempty = rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
769 !rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
771 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { in nocb_gp_wait()
776 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
779 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { in nocb_gp_wait()
780 needwake = rdp->nocb_cb_sleep; in nocb_gp_wait()
781 WRITE_ONCE(rdp->nocb_cb_sleep, false); in nocb_gp_wait()
782 smp_mb(); /* CB invocation -after- GP end. */ in nocb_gp_wait()
788 swake_up_one(&rdp->nocb_cb_wq); in nocb_gp_wait()
795 my_rdp->nocb_gp_bypass = bypass; in nocb_gp_wait()
796 my_rdp->nocb_gp_gp = needwait_gp; in nocb_gp_wait()
797 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; in nocb_gp_wait()
799 // At least one child with non-empty ->nocb_bypass, so set in nocb_gp_wait()
800 // timer in order to avoid stranding its callbacks. in nocb_gp_wait()
802 // If bypass list only has lazy CBs. Add a deferred lazy wake up. in nocb_gp_wait()
806 // Otherwise add a deferred bypass wake up. in nocb_gp_wait()
816 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll")); in nocb_gp_wait()
817 if (list_empty(&my_rdp->nocb_head_rdp)) { in nocb_gp_wait()
818 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
819 if (!my_rdp->nocb_toggling_rdp) in nocb_gp_wait()
820 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); in nocb_gp_wait()
821 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
823 nocb_gp_sleep(my_rdp, cpu); in nocb_gp_wait()
829 nocb_gp_sleep(my_rdp, cpu); in nocb_gp_wait()
831 rnp = my_rdp->mynode; in nocb_gp_wait()
834 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], in nocb_gp_wait()
835 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || in nocb_gp_wait()
836 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_wait()
841 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
842 // (De-)queue an rdp to/from the group if its nocb state is changing in nocb_gp_wait()
843 rdp_toggling = my_rdp->nocb_toggling_rdp; in nocb_gp_wait()
845 my_rdp->nocb_toggling_rdp = NULL; in nocb_gp_wait()
847 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { in nocb_gp_wait()
848 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); in nocb_gp_wait()
849 del_timer(&my_rdp->nocb_timer); in nocb_gp_wait()
851 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); in nocb_gp_wait()
852 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
854 rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp); in nocb_gp_wait()
863 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
864 my_rdp->nocb_toggling_rdp = NULL; in nocb_gp_wait()
865 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
875 list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp); in nocb_gp_wait()
877 list_del(&rdp_toggling->nocb_entry_rdp); in nocb_gp_wait()
879 swake_up_one(&rdp_toggling->nocb_state_wq); in nocb_gp_wait()
882 my_rdp->nocb_gp_seq = -1; in nocb_gp_wait()
887 * No-CBs grace-period-wait kthread. There is one of these per group
888 * of CPUs, but only once at least one CPU in that group has come online
892 * that then have callback-invocation work to do.
899 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); in rcu_nocb_gp_kthread()
910 return rcu_segcblist_test_flags(&rdp->cblist, flags); in nocb_cb_can_run()
915 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep); in nocb_cb_wait_cond()
919 * Invoke any ready callbacks from the corresponding no-CBs CPU,
924 struct rcu_segcblist *cblist = &rdp->cblist; in nocb_cb_wait()
930 struct rcu_node *rnp = rdp->mynode; in nocb_cb_wait()
933 swait_event_interruptible_exclusive(rdp->nocb_cb_wq, in nocb_cb_wait()
937 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ in nocb_cb_wait()
939 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); in nocb_cb_wait()
949 * transitioning to/from NOCB mode, a self-requeuing callback might in nocb_cb_wait()
959 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && in nocb_cb_wait()
961 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); in nocb_cb_wait()
975 * De-offloading. Clear our flag and notify the de-offload worker. in nocb_cb_wait()
977 * get re-offloaded. in nocb_cb_wait()
985 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep); in nocb_cb_wait()
987 if (rdp->nocb_cb_sleep) in nocb_cb_wait()
988 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); in nocb_cb_wait()
995 swake_up_one(&rdp->nocb_state_wq); in nocb_cb_wait()
999 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
1018 return READ_ONCE(rdp->nocb_defer_wakeup) >= level; in rcu_nocb_need_deferred_wakeup()
1025 __releases(rdp_gp->nocb_gp_lock) in do_nocb_deferred_wakeup_common()
1031 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup_common()
1035 ndw = rdp_gp->nocb_defer_wakeup; in do_nocb_deferred_wakeup_common()
1037 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); in do_nocb_deferred_wakeup_common()
1042 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
1048 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp); in do_nocb_deferred_wakeup_timer()
1049 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); in do_nocb_deferred_wakeup_timer()
1051 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup_timer()
1052 smp_mb__after_spinlock(); /* Timer expire before wakeup. */ in do_nocb_deferred_wakeup_timer()
1058 * This means we do an inexact common-case check. Note that if
1059 * we miss, ->nocb_timer will eventually clean things up.
1064 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in do_nocb_deferred_wakeup()
1069 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup()
1081 __releases(rdp->nocb_lock) in rdp_offload_toggle()
1083 struct rcu_segcblist *cblist = &rdp->cblist; in rdp_offload_toggle()
1084 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rdp_offload_toggle()
1089 if (rdp->nocb_cb_sleep) in rdp_offload_toggle()
1090 rdp->nocb_cb_sleep = false; in rdp_offload_toggle()
1094 * Ignore former value of nocb_cb_sleep and force wake up as it could in rdp_offload_toggle()
1097 swake_up_one(&rdp->nocb_cb_wq); in rdp_offload_toggle()
1099 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in rdp_offload_toggle()
1101 WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp); in rdp_offload_toggle()
1102 if (rdp_gp->nocb_gp_sleep) { in rdp_offload_toggle()
1103 rdp_gp->nocb_gp_sleep = false; in rdp_offload_toggle()
1106 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in rdp_offload_toggle()
1114 struct rcu_segcblist *cblist = &rdp->cblist; in rcu_nocb_rdp_deoffload()
1117 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rcu_nocb_rdp_deoffload()
1121 * rcuog/o[p] spawn failed, because at this time the rdp->cpu in rcu_nocb_rdp_deoffload()
1124 WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu)); in rcu_nocb_rdp_deoffload()
1126 pr_info("De-offloading %d\n", rdp->cpu); in rcu_nocb_rdp_deoffload()
1131 * running on the target CPU holding ->nocb_lock (thus having in rcu_nocb_rdp_deoffload()
1150 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_nocb_rdp_deoffload()
1151 if (rdp_gp->nocb_gp_kthread) { in rcu_nocb_rdp_deoffload()
1153 wake_up_process(rdp_gp->nocb_gp_kthread); in rcu_nocb_rdp_deoffload()
1159 if (!rdp->nocb_cb_kthread) { in rcu_nocb_rdp_deoffload()
1161 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); in rcu_nocb_rdp_deoffload()
1165 swait_event_exclusive(rdp->nocb_state_wq, in rcu_nocb_rdp_deoffload()
1175 rcu_segcblist_clear_flags(&rdp->cblist, in rcu_nocb_rdp_deoffload()
1179 list_del(&rdp->nocb_entry_rdp); in rcu_nocb_rdp_deoffload()
1181 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_nocb_rdp_deoffload()
1197 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_rdp_deoffload()
1200 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_rdp_deoffload()
1206 int rcu_nocb_cpu_deoffload(int cpu) in rcu_nocb_cpu_deoffload() argument
1208 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_nocb_cpu_deoffload()
1214 if (cpu_online(cpu)) { in rcu_nocb_cpu_deoffload()
1215 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp); in rcu_nocb_cpu_deoffload()
1217 cpumask_clear_cpu(cpu, rcu_nocb_mask); in rcu_nocb_cpu_deoffload()
1219 pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu); in rcu_nocb_cpu_deoffload()
1220 ret = -EINVAL; in rcu_nocb_cpu_deoffload()
1233 struct rcu_segcblist *cblist = &rdp->cblist; in rcu_nocb_rdp_offload()
1236 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rcu_nocb_rdp_offload()
1238 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id()); in rcu_nocb_rdp_offload()
1240 * For now we only support re-offload, ie: the rdp must have been in rcu_nocb_rdp_offload()
1243 if (!rdp->nocb_gp_rdp) in rcu_nocb_rdp_offload()
1244 return -EINVAL; in rcu_nocb_rdp_offload()
1246 if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread)) in rcu_nocb_rdp_offload()
1247 return -EINVAL; in rcu_nocb_rdp_offload()
1249 pr_info("Offloading %d\n", rdp->cpu); in rcu_nocb_rdp_offload()
1255 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); in rcu_nocb_rdp_offload()
1259 * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode). in rcu_nocb_rdp_offload()
1261 * rdp->cblist must be visible remotely by the nocb kthreads in rcu_nocb_rdp_offload()
1262 * upon wake up after reading the cblist flags. in rcu_nocb_rdp_offload()
1267 * ------------------------- ---------------------------- in rcu_nocb_rdp_offload()
1275 wake_up_process(rdp_gp->nocb_gp_kthread); in rcu_nocb_rdp_offload()
1276 swait_event_exclusive(rdp->nocb_state_wq, in rcu_nocb_rdp_offload()
1291 int rcu_nocb_cpu_offload(int cpu) in rcu_nocb_cpu_offload() argument
1293 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_nocb_cpu_offload()
1299 if (cpu_online(cpu)) { in rcu_nocb_cpu_offload()
1300 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp); in rcu_nocb_cpu_offload()
1302 cpumask_set_cpu(cpu, rcu_nocb_mask); in rcu_nocb_cpu_offload()
1304 pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu); in rcu_nocb_cpu_offload()
1305 ret = -EINVAL; in rcu_nocb_cpu_offload()
1319 int cpu; in lazy_rcu_shrink_count() local
1325 /* Protect rcu_nocb_mask against concurrent (de-)offloading. */ in lazy_rcu_shrink_count()
1330 for_each_cpu(cpu, rcu_nocb_mask) { in lazy_rcu_shrink_count()
1331 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in lazy_rcu_shrink_count()
1333 count += READ_ONCE(rdp->lazy_len); in lazy_rcu_shrink_count()
1344 int cpu; in lazy_rcu_shrink_scan() local
1351 * Protect against concurrent (de-)offloading. Otherwise nocb locking in lazy_rcu_shrink_scan()
1365 for_each_cpu(cpu, rcu_nocb_mask) { in lazy_rcu_shrink_scan()
1366 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in lazy_rcu_shrink_scan()
1372 if (!READ_ONCE(rdp->lazy_len)) in lazy_rcu_shrink_scan()
1381 _count = READ_ONCE(rdp->lazy_len); in lazy_rcu_shrink_scan()
1389 sc->nr_to_scan -= _count; in lazy_rcu_shrink_scan()
1391 if (sc->nr_to_scan <= 0) in lazy_rcu_shrink_scan()
1403 int cpu; in rcu_init_nohz() local
1433 lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy"); in rcu_init_nohz()
1437 lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count; in rcu_init_nohz()
1438 lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan; in rcu_init_nohz()
1455 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); in rcu_init_nohz()
1457 for_each_cpu(cpu, rcu_nocb_mask) { in rcu_init_nohz()
1458 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_init_nohz()
1459 if (rcu_segcblist_empty(&rdp->cblist)) in rcu_init_nohz()
1460 rcu_segcblist_init(&rdp->cblist); in rcu_init_nohz()
1461 rcu_segcblist_offload(&rdp->cblist, true); in rcu_init_nohz()
1462 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP); in rcu_init_nohz()
1463 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE); in rcu_init_nohz()
1468 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1471 init_swait_queue_head(&rdp->nocb_cb_wq); in rcu_boot_init_nocb_percpu_data()
1472 init_swait_queue_head(&rdp->nocb_gp_wq); in rcu_boot_init_nocb_percpu_data()
1473 init_swait_queue_head(&rdp->nocb_state_wq); in rcu_boot_init_nocb_percpu_data()
1474 raw_spin_lock_init(&rdp->nocb_lock); in rcu_boot_init_nocb_percpu_data()
1475 raw_spin_lock_init(&rdp->nocb_bypass_lock); in rcu_boot_init_nocb_percpu_data()
1476 raw_spin_lock_init(&rdp->nocb_gp_lock); in rcu_boot_init_nocb_percpu_data()
1477 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); in rcu_boot_init_nocb_percpu_data()
1478 rcu_cblist_init(&rdp->nocb_bypass); in rcu_boot_init_nocb_percpu_data()
1479 WRITE_ONCE(rdp->lazy_len, 0); in rcu_boot_init_nocb_percpu_data()
1480 mutex_init(&rdp->nocb_gp_kthread_mutex); in rcu_boot_init_nocb_percpu_data()
1484 * If the specified CPU is a no-CBs CPU that does not already have its
1486 * for this CPU's group has not yet been created, spawn it as well.
1488 static void rcu_spawn_cpu_nocb_kthread(int cpu) in rcu_spawn_cpu_nocb_kthread() argument
1490 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_spawn_cpu_nocb_kthread()
1499 if (rdp->nocb_cb_kthread) in rcu_spawn_cpu_nocb_kthread()
1504 rdp_gp = rdp->nocb_gp_rdp; in rcu_spawn_cpu_nocb_kthread()
1505 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1506 if (!rdp_gp->nocb_gp_kthread) { in rcu_spawn_cpu_nocb_kthread()
1508 "rcuog/%d", rdp_gp->cpu); in rcu_spawn_cpu_nocb_kthread()
1510 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1513 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); in rcu_spawn_cpu_nocb_kthread()
1517 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1519 /* Spawn the kthread for this CPU. */ in rcu_spawn_cpu_nocb_kthread()
1521 "rcuo%c/%d", rcu_state.abbr, cpu); in rcu_spawn_cpu_nocb_kthread()
1528 WRITE_ONCE(rdp->nocb_cb_kthread, t); in rcu_spawn_cpu_nocb_kthread()
1529 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); in rcu_spawn_cpu_nocb_kthread()
1535 cpumask_clear_cpu(cpu, rcu_nocb_mask); in rcu_spawn_cpu_nocb_kthread()
1540 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1541 static int rcu_nocb_gp_stride = -1;
1545 * Initialize GP-CB relationships for all no-CBs CPU.
1549 int cpu; in rcu_organize_nocb_kthreads() local
1560 if (ls == -1) { in rcu_organize_nocb_kthreads()
1567 * Should the corresponding CPU come online in the future, then in rcu_organize_nocb_kthreads()
1570 for_each_possible_cpu(cpu) { in rcu_organize_nocb_kthreads()
1571 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_organize_nocb_kthreads()
1572 if (rdp->cpu >= nl) { in rcu_organize_nocb_kthreads()
1575 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; in rcu_organize_nocb_kthreads()
1577 INIT_LIST_HEAD(&rdp->nocb_head_rdp); in rcu_organize_nocb_kthreads()
1584 pr_alert("%s: No-CB GP kthread CPU %d:", in rcu_organize_nocb_kthreads()
1585 __func__, cpu); in rcu_organize_nocb_kthreads()
1591 pr_cont(" %d", cpu); in rcu_organize_nocb_kthreads()
1593 rdp->nocb_gp_rdp = rdp_gp; in rcu_organize_nocb_kthreads()
1594 if (cpumask_test_cpu(cpu, rcu_nocb_mask)) in rcu_organize_nocb_kthreads()
1595 list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); in rcu_organize_nocb_kthreads()
1608 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); in rcu_bind_current_to_nocb()
1612 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1616 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : ""; in show_rcu_should_be_on_cpu()
1626 * Dump out nocb grace-period kthread state for the specified rcu_data
1631 struct rcu_node *rnp = rdp->mynode; in show_rcu_nocb_gp_state()
1633 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n", in show_rcu_nocb_gp_state()
1634 rdp->cpu, in show_rcu_nocb_gp_state()
1635 "kK"[!!rdp->nocb_gp_kthread], in show_rcu_nocb_gp_state()
1636 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], in show_rcu_nocb_gp_state()
1637 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_gp_state()
1638 "tT"[timer_pending(&rdp->nocb_timer)], in show_rcu_nocb_gp_state()
1639 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_gp_state()
1640 ".W"[swait_active(&rdp->nocb_gp_wq)], in show_rcu_nocb_gp_state()
1641 ".W"[swait_active(&rnp->nocb_gp_wq[0])], in show_rcu_nocb_gp_state()
1642 ".W"[swait_active(&rnp->nocb_gp_wq[1])], in show_rcu_nocb_gp_state()
1643 ".B"[!!rdp->nocb_gp_bypass], in show_rcu_nocb_gp_state()
1644 ".G"[!!rdp->nocb_gp_gp], in show_rcu_nocb_gp_state()
1645 (long)rdp->nocb_gp_seq, in show_rcu_nocb_gp_state()
1646 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops), in show_rcu_nocb_gp_state()
1647 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.', in show_rcu_nocb_gp_state()
1648 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state()
1649 show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread)); in show_rcu_nocb_gp_state()
1658 struct rcu_segcblist *rsclp = &rdp->cblist; in show_rcu_nocb_state()
1662 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
1665 nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp, in show_rcu_nocb_state()
1666 &rdp->nocb_entry_rdp, in show_rcu_nocb_state()
1670 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); in show_rcu_nocb_state()
1671 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); in show_rcu_nocb_state()
1672 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", in show_rcu_nocb_state()
1673 rdp->cpu, rdp->nocb_gp_rdp->cpu, in show_rcu_nocb_state()
1674 nocb_next_rdp ? nocb_next_rdp->cpu : -1, in show_rcu_nocb_state()
1675 "kK"[!!rdp->nocb_cb_kthread], in show_rcu_nocb_state()
1676 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], in show_rcu_nocb_state()
1677 "cC"[!!atomic_read(&rdp->nocb_lock_contended)], in show_rcu_nocb_state()
1678 "lL"[raw_spin_is_locked(&rdp->nocb_lock)], in show_rcu_nocb_state()
1679 "sS"[!!rdp->nocb_cb_sleep], in show_rcu_nocb_state()
1680 ".W"[swait_active(&rdp->nocb_cb_wq)], in show_rcu_nocb_state()
1681 jiffies - rdp->nocb_bypass_first, in show_rcu_nocb_state()
1682 jiffies - rdp->nocb_nobypass_last, in show_rcu_nocb_state()
1683 rdp->nocb_nobypass_count, in show_rcu_nocb_state()
1690 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], in show_rcu_nocb_state()
1691 rcu_segcblist_n_cbs(&rdp->cblist), in show_rcu_nocb_state()
1692 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.', in show_rcu_nocb_state()
1693 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
1694 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread)); in show_rcu_nocb_state()
1697 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
1700 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); in show_rcu_nocb_state()
1701 wassleep = swait_active(&rdp->nocb_gp_wq); in show_rcu_nocb_state()
1702 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep) in show_rcu_nocb_state()
1705 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n", in show_rcu_nocb_state()
1707 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_state()
1708 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_state()
1724 /* No ->nocb_lock to acquire. */
1729 /* No ->nocb_lock to release. */
1734 /* No ->nocb_lock to release. */
1741 /* Lockdep check that ->cblist may be safely accessed. */
1797 static void rcu_spawn_cpu_nocb_kthread(int cpu) in rcu_spawn_cpu_nocb_kthread() argument