Lines Matching +full:i +full:- +full:leak +full:- +full:current
1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
11 * For detailed explanation of Read-Copy Update mechanism see -
33 /* Holdoff in nanoseconds for auto-expediting. */
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
71 /* Early-boot callback-management, so early that no lock is required! */
117 * Initialize SRCU per-CPU data. Note that statically allocated
120 * parameter is set, don't initialize ->srcu_ctrs[].srcu_locks and
121 * ->srcu_ctrs[].srcu_unlocks.
129 * Initialize the per-CPU srcu_data array, which feeds into the in init_srcu_struct_data()
133 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_data()
135 rcu_segcblist_init(&sdp->srcu_cblist); in init_srcu_struct_data()
136 sdp->srcu_cblist_invoking = false; in init_srcu_struct_data()
137 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
138 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq; in init_srcu_struct_data()
139 sdp->srcu_barrier_head.next = &sdp->srcu_barrier_head; in init_srcu_struct_data()
140 sdp->mynode = NULL; in init_srcu_struct_data()
141 sdp->cpu = cpu; in init_srcu_struct_data()
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_data()
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0); in init_srcu_struct_data()
144 sdp->ssp = ssp; in init_srcu_struct_data()
167 int i; in init_srcu_struct_nodes() local
176 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags); in init_srcu_struct_nodes()
177 if (!ssp->srcu_sup->node) in init_srcu_struct_nodes()
181 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0]; in init_srcu_struct_nodes()
182 for (i = 1; i < rcu_num_lvls; i++) in init_srcu_struct_nodes()
183 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1]; in init_srcu_struct_nodes()
189 BUILD_BUG_ON(ARRAY_SIZE(snp->srcu_have_cbs) != in init_srcu_struct_nodes()
190 ARRAY_SIZE(snp->srcu_data_have_cbs)); in init_srcu_struct_nodes()
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { in init_srcu_struct_nodes()
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ; in init_srcu_struct_nodes()
193 snp->srcu_data_have_cbs[i] = 0; in init_srcu_struct_nodes()
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ; in init_srcu_struct_nodes()
196 snp->grplo = -1; in init_srcu_struct_nodes()
197 snp->grphi = -1; in init_srcu_struct_nodes()
198 if (snp == &ssp->srcu_sup->node[0]) { in init_srcu_struct_nodes()
200 snp->srcu_parent = NULL; in init_srcu_struct_nodes()
204 /* Non-root node. */ in init_srcu_struct_nodes()
205 if (snp == ssp->srcu_sup->level[level + 1]) in init_srcu_struct_nodes()
207 snp->srcu_parent = ssp->srcu_sup->level[level - 1] + in init_srcu_struct_nodes()
208 (snp - ssp->srcu_sup->level[level]) / in init_srcu_struct_nodes()
209 levelspread[level - 1]; in init_srcu_struct_nodes()
213 * Initialize the per-CPU srcu_data array, which feeds into the in init_srcu_struct_nodes()
216 level = rcu_num_lvls - 1; in init_srcu_struct_nodes()
217 snp_first = ssp->srcu_sup->level[level]; in init_srcu_struct_nodes()
219 sdp = per_cpu_ptr(ssp->sda, cpu); in init_srcu_struct_nodes()
220 sdp->mynode = &snp_first[cpu / levelspread[level]]; in init_srcu_struct_nodes()
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { in init_srcu_struct_nodes()
222 if (snp->grplo < 0) in init_srcu_struct_nodes()
223 snp->grplo = cpu; in init_srcu_struct_nodes()
224 snp->grphi = cpu; in init_srcu_struct_nodes()
226 sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo); in init_srcu_struct_nodes()
228 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER); in init_srcu_struct_nodes()
233 * Initialize non-compile-time initialized fields, including the
235 * tells us that ->sda has already been wired up to srcu_data.
240 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL); in init_srcu_struct_fields()
241 if (!ssp->srcu_sup) in init_srcu_struct_fields()
242 return -ENOMEM; in init_srcu_struct_fields()
244 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in init_srcu_struct_fields()
245 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL; in init_srcu_struct_fields()
246 ssp->srcu_sup->node = NULL; in init_srcu_struct_fields()
247 mutex_init(&ssp->srcu_sup->srcu_cb_mutex); in init_srcu_struct_fields()
248 mutex_init(&ssp->srcu_sup->srcu_gp_mutex); in init_srcu_struct_fields()
249 ssp->srcu_sup->srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
250 ssp->srcu_sup->srcu_barrier_seq = 0; in init_srcu_struct_fields()
251 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex); in init_srcu_struct_fields()
252 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0); in init_srcu_struct_fields()
253 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu); in init_srcu_struct_fields()
254 ssp->srcu_sup->sda_is_static = is_static; in init_srcu_struct_fields()
256 ssp->sda = alloc_percpu(struct srcu_data); in init_srcu_struct_fields()
257 ssp->srcu_ctrp = &ssp->sda->srcu_ctrs[0]; in init_srcu_struct_fields()
259 if (!ssp->sda) in init_srcu_struct_fields()
262 ssp->srcu_sup->srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL; in init_srcu_struct_fields()
263 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns(); in init_srcu_struct_fields()
264 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) { in init_srcu_struct_fields()
267 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG); in init_srcu_struct_fields()
269 ssp->srcu_sup->srcu_ssp = ssp; in init_srcu_struct_fields()
270 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, in init_srcu_struct_fields()
276 free_percpu(ssp->sda); in init_srcu_struct_fields()
277 ssp->sda = NULL; in init_srcu_struct_fields()
281 kfree(ssp->srcu_sup); in init_srcu_struct_fields()
282 ssp->srcu_sup = NULL; in init_srcu_struct_fields()
284 return -ENOMEM; in init_srcu_struct_fields()
292 /* Don't re-initialize a lock while it is held. */ in __init_srcu_struct()
294 lockdep_init_map(&ssp->dep_map, name, key, 0); in __init_srcu_struct()
302 * init_srcu_struct - initialize a sleep-RCU structure
322 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in __srcu_transition_to_big()
323 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC); in __srcu_transition_to_big()
333 /* Double-checked locking on ->srcu_size-state. */ in srcu_transition_to_big()
334 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) in srcu_transition_to_big()
336 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
337 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) { in srcu_transition_to_big()
338 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
342 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_transition_to_big()
346 * Check to see if the just-encountered contention event justifies
353 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state) in spin_lock_irqsave_check_contention()
356 if (ssp->srcu_sup->srcu_size_jiffies != j) { in spin_lock_irqsave_check_contention()
357 ssp->srcu_sup->srcu_size_jiffies = j; in spin_lock_irqsave_check_contention()
358 ssp->srcu_sup->srcu_n_lock_retries = 0; in spin_lock_irqsave_check_contention()
360 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim) in spin_lock_irqsave_check_contention()
366 * Acquire the specified srcu_data structure's ->lock, but check for
373 struct srcu_struct *ssp = sdp->ssp; in spin_lock_irqsave_sdp_contention()
377 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
379 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_sdp_contention()
384 * Acquire the specified srcu_struct structure's ->lock, but check for
391 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags)) in spin_lock_irqsave_ssp_contention()
393 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags); in spin_lock_irqsave_ssp_contention()
398 * First-use initialization of statically allocated srcu_struct
400 * done with compile-time initialization, so this check is added
401 * to each update-side SRCU primitive. Use ssp->lock, which -is-
402 * compile-time initialized, to resolve races involving multiple
403 * CPUs trying to garner first-use privileges.
410 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/ in check_init_srcu_struct()
412 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
413 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) { in check_init_srcu_struct()
414 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
418 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in check_init_srcu_struct()
422 * Is the current or any upcoming grace period to be expedited?
426 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_is_expedited()
428 return ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)); in srcu_gp_is_expedited()
432 * Computes approximate total of the readers' ->srcu_ctrs[].srcu_locks
433 * values for the rank of per-CPU counters specified by idx, and returns
444 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx()
446 sum += atomic_long_read(&sdp->srcu_ctrs[idx].srcu_locks); in srcu_readers_lock_idx()
448 mask = mask | READ_ONCE(sdp->srcu_reader_flavor); in srcu_readers_lock_idx()
450 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)), in srcu_readers_lock_idx()
458 * Returns approximate total of the readers' ->srcu_ctrs[].srcu_unlocks
459 * values for the rank of per-CPU counters specified by idx.
468 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx()
470 sum += atomic_long_read(&sdp->srcu_ctrs[idx].srcu_unlocks); in srcu_readers_unlock_idx()
471 mask = mask | READ_ONCE(sdp->srcu_reader_flavor); in srcu_readers_unlock_idx()
473 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)), in srcu_readers_unlock_idx()
480 * Return true if the number of pre-existing readers is determined to
512 * the current ->srcu_ctrp but not yet have incremented its CPU's in srcu_readers_active_idx_check()
513 * ->srcu_ctrs[idx].srcu_locks counter. In fact, it is possible in srcu_readers_active_idx_check()
515 * ->srcu_ctrp and incrementing ->srcu_ctrs[idx].srcu_locks. And in srcu_readers_active_idx_check()
521 * point in the code for a long time. That now-preempted in srcu_readers_active_idx_check()
522 * updater has already flipped ->srcu_ctrp (possibly during in srcu_readers_active_idx_check()
525 * the ->srcu_ctrs[idx].srcu_unlocks counters. How many times in srcu_readers_active_idx_check()
527 * old ->srcu_ctrp value's ->srcu_ctrs[idx].srcu_locks counter, in srcu_readers_active_idx_check()
531 * the old value of ->srcu_ctrp and is just about to use that in srcu_readers_active_idx_check()
532 * value to index its increment of ->srcu_ctrs[idx].srcu_locks. in srcu_readers_active_idx_check()
533 * But as soon as it leaves that SRCU read-side critical section, in srcu_readers_active_idx_check()
534 * it will increment ->srcu_ctrs[idx].srcu_unlocks, which must in srcu_readers_active_idx_check()
537 * ->srcu_ctrp, that task will be guaranteed to get the new index. in srcu_readers_active_idx_check()
538 * Except that the increment of ->srcu_ctrs[idx].srcu_unlocks in srcu_readers_active_idx_check()
540 * from ->srcu_ctrp in __srcu_read_lock() is before the smp_mb(). in srcu_readers_active_idx_check()
541 * Thus, that task might not see the new value of ->srcu_ctrp until in srcu_readers_active_idx_check()
542 * the -second- __srcu_read_lock(), which in turn means that this in srcu_readers_active_idx_check()
543 * task might well increment ->srcu_ctrs[idx].srcu_locks for the in srcu_readers_active_idx_check()
544 * old value of ->srcu_ctrp twice, not just once. in srcu_readers_active_idx_check()
551 * of ->srcu_ctrs[idx].srcu_locks for the old index, where Nc in srcu_readers_active_idx_check()
553 * the task_struct structure limits the value of Nt and current in srcu_readers_active_idx_check()
561 * comfortably beyond excessive. Especially on 64-bit systems, in srcu_readers_active_idx_check()
569 * srcu_readers_active - returns true if there are readers. and false
583 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active()
585 sum += atomic_long_read(&sdp->srcu_ctrs[0].srcu_locks); in srcu_readers_active()
586 sum += atomic_long_read(&sdp->srcu_ctrs[1].srcu_locks); in srcu_readers_active()
587 sum -= atomic_long_read(&sdp->srcu_ctrs[0].srcu_unlocks); in srcu_readers_active()
588 sum -= atomic_long_read(&sdp->srcu_ctrs[1].srcu_unlocks); in srcu_readers_active()
597 * their read-side critical sections. If there are still some readers
599 * The blocking time is increased as the grace-period age increases,
610 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
611 // no-delay instances.
612 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
613 // no-delay instances.
618 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
624 // Maximum per-GP-phase consecutive no-delay instances.
633 // Maximum consecutive no-delay instances.
641 * Return grace-period delay, zero if there are expedited grace
649 struct srcu_usage *sup = ssp->srcu_sup; in srcu_get_delay()
651 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in srcu_get_delay()
654 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) { in srcu_get_delay()
655 j = jiffies - 1; in srcu_get_delay()
656 gpstart = READ_ONCE(sup->srcu_gp_start); in srcu_get_delay()
658 jbase += j - gpstart; in srcu_get_delay()
660 ASSERT_EXCLUSIVE_WRITER(sup->srcu_n_exp_nodelay); in srcu_get_delay()
661 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1); in srcu_get_delay()
662 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase) in srcu_get_delay()
670 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
674 * was initialized via init_srcu_struct(), else you leak memory.
680 struct srcu_usage *sup = ssp->srcu_sup; in cleanup_srcu_struct()
682 spin_lock_irq_rcu_node(ssp->srcu_sup); in cleanup_srcu_struct()
684 spin_unlock_irq_rcu_node(ssp->srcu_sup); in cleanup_srcu_struct()
686 return; /* Just leak it! */ in cleanup_srcu_struct()
688 return; /* Just leak it! */ in cleanup_srcu_struct()
689 flush_delayed_work(&sup->work); in cleanup_srcu_struct()
691 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); in cleanup_srcu_struct()
693 timer_delete_sync(&sdp->delay_work); in cleanup_srcu_struct()
694 flush_work(&sdp->work); in cleanup_srcu_struct()
695 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) in cleanup_srcu_struct()
696 return; /* Forgot srcu_barrier(), so just leak it! */ in cleanup_srcu_struct()
698 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) || in cleanup_srcu_struct()
699 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) || in cleanup_srcu_struct()
702 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)), in cleanup_srcu_struct()
703 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed); in cleanup_srcu_struct()
709 kfree(sup->node); in cleanup_srcu_struct()
710 sup->node = NULL; in cleanup_srcu_struct()
711 sup->srcu_size_state = SRCU_SIZE_SMALL; in cleanup_srcu_struct()
712 if (!sup->sda_is_static) { in cleanup_srcu_struct()
713 free_percpu(ssp->sda); in cleanup_srcu_struct()
714 ssp->sda = NULL; in cleanup_srcu_struct()
716 ssp->srcu_sup = NULL; in cleanup_srcu_struct()
729 /* NMI-unsafe use in NMI is a bad sign, as is multi-bit read_flavor values. */ in __srcu_check_read_flavor()
731 WARN_ON_ONCE(read_flavor & (read_flavor - 1)); in __srcu_check_read_flavor()
733 sdp = raw_cpu_ptr(ssp->sda); in __srcu_check_read_flavor()
734 old_read_flavor = READ_ONCE(sdp->srcu_reader_flavor); in __srcu_check_read_flavor()
736 old_read_flavor = cmpxchg(&sdp->srcu_reader_flavor, 0, read_flavor); in __srcu_check_read_flavor()
740 …WARN_ONCE(old_read_flavor != read_flavor, "CPU %d old state %d new state %d\n", sdp->cpu, old_read… in __srcu_check_read_flavor()
745 * Counts the new reader in the appropriate per-CPU element of the
747 * Returns a guaranteed non-negative index that must be passed to the
752 struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp); in __srcu_read_lock()
754 this_cpu_inc(scp->srcu_locks.counter); in __srcu_read_lock()
761 * Removes the count for the old reader from the appropriate per-CPU
768 this_cpu_inc(__srcu_ctr_to_ptr(ssp, idx)->srcu_unlocks.counter); in __srcu_read_unlock()
775 * Counts the new reader in the appropriate per-CPU element of the
776 * srcu_struct, but in an NMI-safe manner using RMW atomics.
781 struct srcu_ctr __percpu *scpp = READ_ONCE(ssp->srcu_ctrp); in __srcu_read_lock_nmisafe()
784 atomic_long_inc(&scp->srcu_locks); in __srcu_read_lock_nmisafe()
791 * Removes the count for the old reader from the appropriate per-CPU
798 atomic_long_inc(&raw_cpu_ptr(__srcu_ctr_to_ptr(ssp, idx))->srcu_unlocks); in __srcu_read_unlock_nmisafe()
811 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock)); in srcu_gp_start()
812 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)); in srcu_gp_start()
813 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies); in srcu_gp_start()
814 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0); in srcu_gp_start()
815 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ in srcu_gp_start()
816 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
817 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start()
826 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_delay_timer()
833 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); in srcu_queue_delayed_work_on()
837 timer_reduce(&sdp->delay_work, jiffies + delay); in srcu_queue_delayed_work_on()
852 * just-completed grace period, the one corresponding to idx. If possible,
860 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { in srcu_schedule_cbs_snp()
861 if (!(mask & (1UL << (cpu - snp->grplo)))) in srcu_schedule_cbs_snp()
863 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); in srcu_schedule_cbs_snp()
871 * The ->srcu_cb_mutex acquisition does not protect any data, but
873 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
889 struct srcu_usage *sup = ssp->srcu_sup; in srcu_gp_end()
892 mutex_lock(&sup->srcu_cb_mutex); in srcu_gp_end()
894 /* End the current grace period. */ in srcu_gp_end()
896 idx = rcu_seq_state(sup->srcu_gp_seq); in srcu_gp_end()
901 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns()); in srcu_gp_end()
902 rcu_seq_end(&sup->srcu_gp_seq); in srcu_gp_end()
903 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
904 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq)) in srcu_gp_end()
905 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
907 mutex_unlock(&sup->srcu_gp_mutex); in srcu_gp_end()
911 ss_state = smp_load_acquire(&sup->srcu_size_state); in srcu_gp_end()
913 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()), in srcu_gp_end()
916 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); in srcu_gp_end()
920 last_lvl = snp >= sup->level[rcu_num_lvls - 1]; in srcu_gp_end()
922 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq; in srcu_gp_end()
923 snp->srcu_have_cbs[idx] = gpseq; in srcu_gp_end()
924 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); in srcu_gp_end()
925 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_gp_end()
927 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); in srcu_gp_end()
931 mask = snp->srcu_data_have_cbs[idx]; in srcu_gp_end()
932 snp->srcu_data_have_cbs[idx] = 0; in srcu_gp_end()
942 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_gp_end()
944 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100)) in srcu_gp_end()
945 sdp->srcu_gp_seq_needed = gpseq; in srcu_gp_end()
946 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100)) in srcu_gp_end()
947 sdp->srcu_gp_seq_needed_exp = gpseq; in srcu_gp_end()
952 mutex_unlock(&sup->srcu_cb_mutex); in srcu_gp_end()
956 gpseq = rcu_seq_current(&sup->srcu_gp_seq); in srcu_gp_end()
958 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) { in srcu_gp_end()
971 smp_store_release(&sup->srcu_size_state, ss_state + 1); in srcu_gp_end()
976 * Funnel-locking scheme to scalably mediate many concurrent expedited
977 * grace-period requests. This function is invoked for the first known
989 for (; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_exp_start()
990 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp); in srcu_funnel_exp_start()
991 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) || in srcu_funnel_exp_start()
995 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_funnel_exp_start()
1000 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
1004 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_exp_start()
1005 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_exp_start()
1006 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags); in srcu_funnel_exp_start()
1010 * Funnel-locking scheme to scalably mediate many concurrent grace-period
1012 * period s. Losers must either ensure that their desired grace-period
1026 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); in srcu_funnel_gp_start()
1031 struct srcu_usage *sup = ssp->srcu_sup; in srcu_funnel_gp_start()
1034 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_funnel_gp_start()
1037 snp_leaf = sdp->mynode; in srcu_funnel_gp_start()
1041 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) { in srcu_funnel_gp_start()
1042 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf) in srcu_funnel_gp_start()
1045 snp_seq = snp->srcu_have_cbs[idx]; in srcu_funnel_gp_start()
1048 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
1058 snp->srcu_have_cbs[idx] = s; in srcu_funnel_gp_start()
1060 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; in srcu_funnel_gp_start()
1061 sgsne = snp->srcu_gp_seq_needed_exp; in srcu_funnel_gp_start()
1063 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
1069 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) { in srcu_funnel_gp_start()
1074 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/ in srcu_funnel_gp_start()
1076 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s)) in srcu_funnel_gp_start()
1077 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s); in srcu_funnel_gp_start()
1080 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && in srcu_funnel_gp_start()
1081 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) { in srcu_funnel_gp_start()
1090 queue_delayed_work(rcu_gp_wq, &sup->work, in srcu_funnel_gp_start()
1092 else if (list_empty(&sup->work.work.entry)) in srcu_funnel_gp_start()
1093 list_add(&sup->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start()
1101 * The caller must ensure that ->srcu_ctrp is not changed while checking.
1107 spin_lock_irq_rcu_node(ssp->srcu_sup); in try_check_zero()
1109 spin_unlock_irq_rcu_node(ssp->srcu_sup); in try_check_zero()
1114 if ((--trycount + curdelay) <= 0) in try_check_zero()
1121 * Increment the ->srcu_ctrp counter so that future SRCU readers will
1122 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1123 * us to wait for pre-existing readers in a starvation-free manner.
1128 * Because the flip of ->srcu_ctrp is executed only if the in srcu_flip()
1130 * the ->srcu_ctrs[].srcu_unlocks and ->srcu_ctrs[].srcu_locks sums in srcu_flip()
1136 * of ->srcu_ctrp from before the previous call to srcu_flip(), in srcu_flip()
1140 * to be new) value of ->srcu_ctrp. in srcu_flip()
1142 * This sum-equality check and ordering also ensures that if in srcu_flip()
1144 * ->srcu_ctrp, this updater's earlier scans cannot have seen in srcu_flip()
1159 WRITE_ONCE(ssp->srcu_ctrp, in srcu_flip()
1160 &ssp->sda->srcu_ctrs[!(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0])]); in srcu_flip()
1178 * Note that it is OK for several current from-idle requests for a new
1182 * Note also that if any CPU (including the current one) is still invoking
1189 * This function is also subject to counter-wrap errors, but let's face
1194 * of a needlessly non-expedited grace period is similarly negligible.
1206 if (this_cpu_read(ssp->sda->srcu_reader_flavor) & SRCU_READ_FLAVOR_SLOWGP) in srcu_should_expedite()
1209 sdp = raw_cpu_ptr(ssp->sda); in srcu_should_expedite()
1211 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { in srcu_should_expedite()
1225 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end); in srcu_should_expedite()
1231 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcu_should_expedite()
1232 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ in srcu_should_expedite()
1233 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed))) in srcu_should_expedite()
1235 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ in srcu_should_expedite()
1236 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)) in srcu_should_expedite()
1242 * SRCU callback function to leak a callback.
1249 * Start an SRCU grace period, and also queue the callback if non-NULL.
1266 * SRCU read-side critical section so that the grace-period in srcu_gp_start_if_needed()
1270 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state); in srcu_gp_start_if_needed()
1272 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id()); in srcu_gp_start_if_needed()
1274 sdp = raw_cpu_ptr(ssp->sda); in srcu_gp_start_if_needed()
1277 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
1280 * reading the current gp_seq that is used for advancing. This is in srcu_gp_start_if_needed()
1286 * In such a scenario, an 'acceleration leak' can occur, where new in srcu_gp_start_if_needed()
1314 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in srcu_gp_start_if_needed()
1316 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_gp_start_if_needed()
1317 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_gp_start_if_needed()
1319 * Acceleration can never fail because the base current gp_seq in srcu_gp_start_if_needed()
1325 WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s)); in srcu_gp_start_if_needed()
1327 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { in srcu_gp_start_if_needed()
1328 sdp->srcu_gp_seq_needed = s; in srcu_gp_start_if_needed()
1331 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { in srcu_gp_start_if_needed()
1332 sdp->srcu_gp_seq_needed_exp = s; in srcu_gp_start_if_needed()
1341 sdp_mynode = sdp->mynode; in srcu_gp_start_if_needed()
1353 * the current CPU and the specified srcu_struct structure, initiating
1354 * grace-period processing if it is not already running.
1357 * all pre-existing SRCU read-side critical section. On systems with
1360 * its last corresponding SRCU read-side critical section whose beginning
1362 * an SRCU read-side critical section that continues beyond the start of
1364 * but before the beginning of that SRCU read-side critical section.
1383 /* Probable double call_srcu(), so leak the callback. */ in __call_srcu()
1384 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
1388 rhp->func = func; in __call_srcu()
1393 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1399 * grace period elapses, in other words after all pre-existing SRCU
1400 * read-side critical sections have completed. However, the callback
1401 * function might well execute concurrently with other SRCU read-side
1403 * read-side critical sections are delimited by srcu_read_lock() and
1427 srcu_lock_sync(&ssp->dep_map); in __synchronize_srcu()
1433 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); in __synchronize_srcu()
1449 * because the current CPU might have been totally uninvolved with in __synchronize_srcu()
1456 * synchronize_srcu_expedited - Brute-force SRCU grace period
1463 * memory-ordering properties as does synchronize_srcu().
1472 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1477 * the index=!(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]) to drain to zero
1478 * at first, and then flip the ->srcu_ctrp and wait for the count of the
1484 * SRCU read-side critical section; doing so will result in deadlock.
1486 * srcu_struct from some other srcu_struct's read-side critical section,
1489 * There are memory-ordering constraints implied by synchronize_srcu().
1492 * the end of its last corresponding SRCU read-side critical section
1494 * each CPU having an SRCU read-side critical section that extends beyond
1497 * the beginning of that SRCU read-side critical section. Note that these
1507 * Of course, these memory-ordering guarantees apply only when
1511 * Implementation of these memory-ordering guarantees is similar to
1530 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1541 // Any prior manipulation of SRCU-protected data must happen in get_state_synchronize_srcu()
1542 // before the load from ->srcu_gp_seq. in get_state_synchronize_srcu()
1544 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq); in get_state_synchronize_srcu()
1549 * start_poll_synchronize_srcu - Provide cookie and start grace period
1565 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1575 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1577 * 25-microsecond expedited SRCU grace periods. However, a more likely
1579 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1585 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1592 !rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie)) in poll_state_synchronize_srcu()
1609 rhp->next = rhp; // Mark the callback as having been invoked. in srcu_barrier_cb()
1611 ssp = sdp->ssp; in srcu_barrier_cb()
1612 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier_cb()
1613 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier_cb()
1618 * structure's ->cblist. but only if that ->cblist already has at least one
1627 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1628 sdp->srcu_barrier_head.func = srcu_barrier_cb; in srcu_barrier_one_cpu()
1629 debug_rcu_head_queue(&sdp->srcu_barrier_head); in srcu_barrier_one_cpu()
1630 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, in srcu_barrier_one_cpu()
1631 &sdp->srcu_barrier_head)) { in srcu_barrier_one_cpu()
1632 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); in srcu_barrier_one_cpu()
1633 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt); in srcu_barrier_one_cpu()
1639 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1640 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1646 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1649 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1650 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) { in srcu_barrier()
1652 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1655 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1656 init_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1659 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1); in srcu_barrier()
1662 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER) in srcu_barrier()
1663 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id())); in srcu_barrier()
1666 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu)); in srcu_barrier()
1670 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt)) in srcu_barrier()
1671 complete(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1672 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion); in srcu_barrier()
1674 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq); in srcu_barrier()
1675 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex); in srcu_barrier()
1680 * srcu_batches_completed - return batches completed.
1688 return READ_ONCE(ssp->srcu_sup->srcu_gp_seq); in srcu_batches_completed()
1693 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1701 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1705 * fetching ->srcu_ctrp for their index, at any point in time there in srcu_advance_state()
1710 * The load-acquire ensures that we see the accesses performed in srcu_advance_state()
1713 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */ in srcu_advance_state()
1715 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1716 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_advance_state()
1717 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1718 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1719 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1722 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)); in srcu_advance_state()
1725 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1727 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1732 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) { in srcu_advance_state()
1733 idx = !(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]); in srcu_advance_state()
1735 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1739 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1740 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2); in srcu_advance_state()
1741 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1742 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_advance_state()
1745 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) { in srcu_advance_state()
1748 * SRCU read-side critical sections are normally short, in srcu_advance_state()
1751 idx = !(ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]); in srcu_advance_state()
1753 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex); in srcu_advance_state()
1756 ssp->srcu_sup->srcu_n_exp_nodelay = 0; in srcu_advance_state()
1757 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ in srcu_advance_state()
1778 ssp = sdp->ssp; in srcu_invoke_callbacks()
1781 WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL)); in srcu_invoke_callbacks()
1782 rcu_segcblist_advance(&sdp->srcu_cblist, in srcu_invoke_callbacks()
1783 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); in srcu_invoke_callbacks()
1785 * Although this function is theoretically re-entrant, concurrent in srcu_invoke_callbacks()
1789 if (sdp->srcu_cblist_invoking || in srcu_invoke_callbacks()
1790 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { in srcu_invoke_callbacks()
1796 sdp->srcu_cblist_invoking = true; in srcu_invoke_callbacks()
1797 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); in srcu_invoke_callbacks()
1805 rhp->func(rhp); in srcu_invoke_callbacks()
1815 rcu_segcblist_add_len(&sdp->srcu_cblist, -len); in srcu_invoke_callbacks()
1816 sdp->srcu_cblist_invoking = false; in srcu_invoke_callbacks()
1817 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); in srcu_invoke_callbacks()
1826 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1832 spin_lock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1833 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) { in srcu_reschedule()
1834 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) { in srcu_reschedule()
1838 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) { in srcu_reschedule()
1842 spin_unlock_irq_rcu_node(ssp->srcu_sup); in srcu_reschedule()
1845 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay); in srcu_reschedule()
1849 * This is the work-queue function that handles SRCU grace periods.
1859 ssp = sup->srcu_ssp; in process_srcu()
1862 spin_lock_irq_rcu_node(ssp->srcu_sup); in process_srcu()
1864 spin_unlock_irq_rcu_node(ssp->srcu_sup); in process_srcu()
1866 WRITE_ONCE(sup->reschedule_count, 0); in process_srcu()
1869 if (READ_ONCE(sup->reschedule_jiffies) == j) { in process_srcu()
1870 ASSERT_EXCLUSIVE_WRITER(sup->reschedule_count); in process_srcu()
1871 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1); in process_srcu()
1872 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay) in process_srcu()
1875 WRITE_ONCE(sup->reschedule_count, 1); in process_srcu()
1876 WRITE_ONCE(sup->reschedule_jiffies, j); in process_srcu()
1886 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq); in srcutorture_get_gp_data()
1908 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state); in srcu_torture_stats_print()
1911 idx = ssp->srcu_ctrp - &ssp->sda->srcu_ctrs[0]; in srcu_torture_stats_print()
1913 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1; in srcu_torture_stats_print()
1915 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state, in srcu_torture_stats_print()
1917 if (!ssp->sda) { in srcu_torture_stats_print()
1919 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n"); in srcu_torture_stats_print()
1921 pr_cont(" per-CPU(idx=%d):", idx); in srcu_torture_stats_print()
1928 sdp = per_cpu_ptr(ssp->sda, cpu); in srcu_torture_stats_print()
1929 u0 = data_race(atomic_long_read(&sdp->srcu_ctrs[!idx].srcu_unlocks)); in srcu_torture_stats_print()
1930 u1 = data_race(atomic_long_read(&sdp->srcu_ctrs[idx].srcu_unlocks)); in srcu_torture_stats_print()
1938 l0 = data_race(atomic_long_read(&sdp->srcu_ctrs[!idx].srcu_locks)); in srcu_torture_stats_print()
1939 l1 = data_race(atomic_long_read(&sdp->srcu_ctrs[idx].srcu_locks)); in srcu_torture_stats_print()
1941 c0 = l0 - u0; in srcu_torture_stats_print()
1942 c1 = l1 - u1; in srcu_torture_stats_print()
1945 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); in srcu_torture_stats_print()
1960 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); in srcu_bootup_announce()
1962 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay); in srcu_bootup_announce()
1964 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay); in srcu_bootup_announce()
1965 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase); in srcu_bootup_announce()
1974 /* Decide on srcu_struct-size strategy. */ in srcu_init()
1994 list_del_init(&sup->work.work.entry); in srcu_init()
1996 sup->srcu_size_state == SRCU_SIZE_SMALL) in srcu_init()
1997 sup->srcu_size_state = SRCU_SIZE_ALLOC; in srcu_init()
1998 queue_work(rcu_gp_wq, &sup->work.work); in srcu_init()
2004 /* Initialize any global-scope srcu_struct structures used by this module. */
2007 int i; in srcu_module_coming() local
2009 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_coming()
2011 for (i = 0; i < mod->num_srcu_structs; i++) { in srcu_module_coming()
2013 ssp->sda = alloc_percpu(struct srcu_data); in srcu_module_coming()
2014 if (WARN_ON_ONCE(!ssp->sda)) in srcu_module_coming()
2015 return -ENOMEM; in srcu_module_coming()
2016 ssp->srcu_ctrp = &ssp->sda->srcu_ctrs[0]; in srcu_module_coming()
2021 /* Clean up any global-scope srcu_struct structures used by this module. */
2024 int i; in srcu_module_going() local
2026 struct srcu_struct **sspp = mod->srcu_struct_ptrs; in srcu_module_going()
2028 for (i = 0; i < mod->num_srcu_structs; i++) { in srcu_module_going()
2030 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) && in srcu_module_going()
2031 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static)) in srcu_module_going()
2034 free_percpu(ssp->sda); in srcu_module_going()