Lines Matching +full:y +full:- +full:rp

1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
57 /* Bits for ->extendables field, extendables param, and related definitions. */
82 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
84 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
88 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
90 "Use conditional/async full-stateexpedited GP wait primitives");
92 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
95 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
96 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
102 torture_param(int, nreaders, -1, "Number of RCU reader threads");
103 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
108 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
109 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
117 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
123 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
149 // Mailbox-like structure to check RCU global memory ordering.
158 // Update-side data structure used to check RCU readers.
286 * Stop aggressive CPU-hog tests a bit before the end of the test in order
291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); in shutdown_time_arrived()
334 list_add_tail(&p->rtort_free, &rcu_torture_freelist); in rcu_torture_free()
425 started = cur_ops->get_gp_seq(); in rcu_read_delay()
430 rtrsp->rt_delay_ms = longdelay_ms; in rcu_read_delay()
431 completed = cur_ops->get_gp_seq(); in rcu_read_delay()
432 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, in rcu_read_delay()
437 rtrsp->rt_delay_us = shortdelay_us; in rcu_read_delay()
442 rtrsp->rt_preempted = true; in rcu_read_delay()
455 rcu_torture_pipe_update_one(struct rcu_torture *rp) in rcu_torture_pipe_update_one() argument
458 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); in rcu_torture_pipe_update_one()
461 WRITE_ONCE(rp->rtort_chkp, NULL); in rcu_torture_pipe_update_one()
462 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). in rcu_torture_pipe_update_one()
464 i = READ_ONCE(rp->rtort_pipe_count); in rcu_torture_pipe_update_one()
468 WRITE_ONCE(rp->rtort_pipe_count, i + 1); in rcu_torture_pipe_update_one()
469 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { in rcu_torture_pipe_update_one()
470 rp->rtort_mbtest = 0; in rcu_torture_pipe_update_one()
477 * Update all callbacks in the pipe. Suitable for synchronous grace-period
483 struct rcu_torture *rp; in rcu_torture_pipe_update() local
487 list_add(&old_rp->rtort_free, &rcu_torture_removed); in rcu_torture_pipe_update()
488 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { in rcu_torture_pipe_update()
489 if (rcu_torture_pipe_update_one(rp)) { in rcu_torture_pipe_update()
490 list_del(&rp->rtort_free); in rcu_torture_pipe_update()
491 rcu_torture_free(rp); in rcu_torture_pipe_update()
499 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); in rcu_torture_cb() local
506 if (rcu_torture_pipe_update_one(rp)) in rcu_torture_cb()
507 rcu_torture_free(rp); in rcu_torture_cb()
509 cur_ops->deferred_free(rp); in rcu_torture_cb()
519 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb); in rcu_torture_deferred_free()
582 * buggy-RCU error messages.
587 rcu_torture_cb(&p->rtort_rcu); in rcu_busted_torture_deferred_free()
645 /* We want there to be long-running readers, but not all the time. */ in srcu_read_delay()
651 rtrsp->rt_delay_jiffies = longdelay; in srcu_read_delay()
675 static void srcu_torture_deferred_free(struct rcu_torture *rp) in srcu_torture_deferred_free() argument
677 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); in srcu_torture_deferred_free()
805 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
814 torture_sched_setaffinity(current->pid, cpumask_of(cpu)); in synchronize_rcu_trivial()
849 * Definitions for RCU-tasks torture testing.
863 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); in rcu_tasks_torture_deferred_free()
903 * Definitions for rude RCU-tasks torture testing.
908 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); in rcu_tasks_rude_torture_deferred_free()
928 .name = "tasks-rude"
943 * Definitions for tracing RCU-tasks torture testing.
959 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); in rcu_tasks_tracing_torture_deferred_free()
981 .name = "tasks-tracing"
995 if (!cur_ops->gp_diff) in rcutorture_seq_diff()
996 return new - old; in rcutorture_seq_diff()
997 return cur_ops->gp_diff(new, old); in rcutorture_seq_diff()
1001 * RCU torture priority-boost testing. Runs one real-time thread per
1007 static int old_rt_runtime = -1;
1013 * throttled. Only possible if rcutorture is built-in otherwise the in rcu_torture_disable_rt_throttle()
1017 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) in rcu_torture_disable_rt_throttle()
1021 sysctl_sched_rt_runtime = -1; in rcu_torture_disable_rt_throttle()
1026 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) in rcu_torture_enable_rt_throttle()
1030 old_rt_runtime = -1; in rcu_torture_enable_rt_throttle()
1042 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; in rcu_torture_boost_failed()
1044 if (end - *start > mininterval) { in rcu_torture_boost_failed()
1046 smp_mb(); // Time check before grace-period check. in rcu_torture_boost_failed()
1047 if (cur_ops->poll_gp_state(gp_state)) in rcu_torture_boost_failed()
1049 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { in rcu_torture_boost_failed()
1059 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { in rcu_torture_boost_failed()
1060 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", in rcu_torture_boost_failed()
1061 current->rt_priority, gp_state, end - *start); in rcu_torture_boost_failed()
1062 cur_ops->gp_kthread_dbg(); in rcu_torture_boost_failed()
1064 gp_done = cur_ops->poll_gp_state(gp_state); in rcu_torture_boost_failed()
1071 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { in rcu_torture_boost_failed()
1087 /* Set real-time priority. */ in rcu_torture_boost()
1090 /* Each pass through the following loop does one boost-test cycle. */ in rcu_torture_boost()
1101 schedule_timeout_interruptible(oldstarttime - jiffies); in rcu_torture_boost()
1108 // Do one boost-test interval. in rcu_torture_boost()
1112 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) in rcu_torture_boost()
1115 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { in rcu_torture_boost()
1116 gp_state = cur_ops->start_gp_poll(); in rcu_torture_boost()
1125 if (cur_ops->poll_gp_state(gp_state)) in rcu_torture_boost()
1133 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) in rcu_torture_boost()
1171 * RCU torture force-quiescent-state kthread. Repeatedly induces
1192 cur_ops->fqs(); in rcu_torture_fqs()
1194 fqs_burst_remaining -= fqs_holdoff; in rcu_torture_fqs()
1203 // Used by writers to randomly choose from the available grace-period primitives.
1208 * Determine which grace-period primitives are available.
1241 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { in rcu_torture_write_types()
1244 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { in rcu_torture_write_types()
1247 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) { in rcu_torture_write_types()
1250 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) { in rcu_torture_write_types()
1253 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) { in rcu_torture_write_types()
1255 pr_info("%s: Testing conditional full-state GPs.\n", __func__); in rcu_torture_write_types()
1256 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) { in rcu_torture_write_types()
1259 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) { in rcu_torture_write_types()
1261 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__); in rcu_torture_write_types()
1263 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) { in rcu_torture_write_types()
1266 if (gp_exp1 && cur_ops->exp_sync) { in rcu_torture_write_types()
1269 } else if (gp_exp && !cur_ops->exp_sync) { in rcu_torture_write_types()
1272 if (gp_normal1 && cur_ops->deferred_free) { in rcu_torture_write_types()
1275 } else if (gp_normal && !cur_ops->deferred_free) { in rcu_torture_write_types()
1278 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state && in rcu_torture_write_types()
1279 cur_ops->start_gp_poll && cur_ops->poll_gp_state) { in rcu_torture_write_types()
1282 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { in rcu_torture_write_types()
1285 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full in rcu_torture_write_types()
1286 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) { in rcu_torture_write_types()
1288 pr_info("%s: Testing polling full-state GPs.\n", __func__); in rcu_torture_write_types()
1289 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) { in rcu_torture_write_types()
1292 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) { in rcu_torture_write_types()
1295 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) { in rcu_torture_write_types()
1298 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) { in rcu_torture_write_types()
1300 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__); in rcu_torture_write_types()
1302 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) { in rcu_torture_write_types()
1305 if (gp_sync1 && cur_ops->sync) { in rcu_torture_write_types()
1308 } else if (gp_sync && !cur_ops->sync) { in rcu_torture_write_types()
1315 * while also testing out the polled APIs. Note well that the single-CPU
1316 * grace-period optimizations must be accounted for.
1326 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300); in do_rtws_sync()
1327 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00); in do_rtws_sync()
1331 cookie = cur_ops->get_gp_state(); in do_rtws_sync()
1333 cur_ops->get_gp_state_full(&cookie_full); in do_rtws_sync()
1334 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full)) in do_rtws_sync()
1337 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie), in do_rtws_sync()
1340 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full), in do_rtws_sync()
1368 struct rcu_torture *rp; in rcu_torture_writer() local
1378 torture_type, cur_ops->name); in rcu_torture_writer()
1380 "%s: No update-side primitives.\n", __func__)) { in rcu_torture_writer()
1394 rp = rcu_torture_alloc(); in rcu_torture_writer()
1395 if (rp == NULL) in rcu_torture_writer()
1397 rp->rtort_pipe_count = 0; in rcu_torture_writer()
1403 rp->rtort_mbtest = 1; in rcu_torture_writer()
1404 rcu_assign_pointer(rcu_torture_current, rp); in rcu_torture_writer()
1407 i = old_rp->rtort_pipe_count; in rcu_torture_writer()
1411 WRITE_ONCE(old_rp->rtort_pipe_count, in rcu_torture_writer()
1412 old_rp->rtort_pipe_count + 1); in rcu_torture_writer()
1415 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { in rcu_torture_writer()
1416 idx = cur_ops->readlock(); in rcu_torture_writer()
1417 cookie = cur_ops->get_gp_state(); in rcu_torture_writer()
1418 WARN_ONCE(cur_ops->poll_gp_state(cookie), in rcu_torture_writer()
1419 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", in rcu_torture_writer()
1423 cookie, cur_ops->get_gp_state()); in rcu_torture_writer()
1424 if (cur_ops->get_gp_completed) { in rcu_torture_writer()
1425 cookie = cur_ops->get_gp_completed(); in rcu_torture_writer()
1426 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie)); in rcu_torture_writer()
1428 cur_ops->readunlock(idx); in rcu_torture_writer()
1430 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) { in rcu_torture_writer()
1431 idx = cur_ops->readlock(); in rcu_torture_writer()
1432 cur_ops->get_gp_state_full(&cookie_full); in rcu_torture_writer()
1433 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), in rcu_torture_writer()
1439 if (cur_ops->get_gp_completed_full) { in rcu_torture_writer()
1440 cur_ops->get_gp_completed_full(&cookie_full); in rcu_torture_writer()
1441 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full)); in rcu_torture_writer()
1443 cur_ops->readunlock(idx); in rcu_torture_writer()
1448 cur_ops->deferred_free(old_rp); in rcu_torture_writer()
1452 do_rtws_sync(&rand, cur_ops->exp_sync); in rcu_torture_writer()
1457 gp_snap = cur_ops->get_gp_state(); in rcu_torture_writer()
1460 cur_ops->cond_sync(gp_snap); in rcu_torture_writer()
1465 gp_snap = cur_ops->get_gp_state_exp(); in rcu_torture_writer()
1468 cur_ops->cond_sync_exp(gp_snap); in rcu_torture_writer()
1473 cur_ops->get_gp_state_full(&gp_snap_full); in rcu_torture_writer()
1476 cur_ops->cond_sync_full(&gp_snap_full); in rcu_torture_writer()
1481 cur_ops->get_gp_state_full(&gp_snap_full); in rcu_torture_writer()
1484 cur_ops->cond_sync_exp_full(&gp_snap_full); in rcu_torture_writer()
1490 ulo[i] = cur_ops->get_comp_state(); in rcu_torture_writer()
1491 gp_snap = cur_ops->start_gp_poll(); in rcu_torture_writer()
1493 while (!cur_ops->poll_gp_state(gp_snap)) { in rcu_torture_writer()
1494 gp_snap1 = cur_ops->get_gp_state(); in rcu_torture_writer()
1496 if (cur_ops->poll_gp_state(ulo[i]) || in rcu_torture_writer()
1497 cur_ops->same_gp_state(ulo[i], gp_snap1)) { in rcu_torture_writer()
1510 cur_ops->get_comp_state_full(&rgo[i]); in rcu_torture_writer()
1511 cur_ops->start_gp_poll_full(&gp_snap_full); in rcu_torture_writer()
1513 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { in rcu_torture_writer()
1514 cur_ops->get_gp_state_full(&gp_snap1_full); in rcu_torture_writer()
1516 if (cur_ops->poll_gp_state_full(&rgo[i]) || in rcu_torture_writer()
1517 cur_ops->same_gp_state_full(&rgo[i], in rcu_torture_writer()
1530 gp_snap = cur_ops->start_gp_poll_exp(); in rcu_torture_writer()
1532 while (!cur_ops->poll_gp_state_exp(gp_snap)) in rcu_torture_writer()
1539 cur_ops->start_gp_poll_exp_full(&gp_snap_full); in rcu_torture_writer()
1541 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) in rcu_torture_writer()
1548 do_rtws_sync(&rand, cur_ops->sync); in rcu_torture_writer()
1560 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { in rcu_torture_writer()
1567 expediting = -expediting; in rcu_torture_writer()
1577 !cur_ops->slow_gps && in rcu_torture_writer()
1595 expediting = -expediting; in rcu_torture_writer()
1601 " Dynamic grace-period expediting was disabled.\n", in rcu_torture_writer()
1623 "%s: No update-side primitives.\n", __func__)) { in rcu_torture_fakewriter()
1635 if (cur_ops->cb_barrier != NULL && in rcu_torture_fakewriter()
1637 cur_ops->cb_barrier(); in rcu_torture_fakewriter()
1643 cur_ops->exp_sync(); in rcu_torture_fakewriter()
1646 gp_snap = cur_ops->get_gp_state(); in rcu_torture_fakewriter()
1648 cur_ops->cond_sync(gp_snap); in rcu_torture_fakewriter()
1651 gp_snap = cur_ops->get_gp_state_exp(); in rcu_torture_fakewriter()
1653 cur_ops->cond_sync_exp(gp_snap); in rcu_torture_fakewriter()
1656 cur_ops->get_gp_state_full(&gp_snap_full); in rcu_torture_fakewriter()
1658 cur_ops->cond_sync_full(&gp_snap_full); in rcu_torture_fakewriter()
1661 cur_ops->get_gp_state_full(&gp_snap_full); in rcu_torture_fakewriter()
1663 cur_ops->cond_sync_exp_full(&gp_snap_full); in rcu_torture_fakewriter()
1666 gp_snap = cur_ops->start_gp_poll(); in rcu_torture_fakewriter()
1667 while (!cur_ops->poll_gp_state(gp_snap)) { in rcu_torture_fakewriter()
1673 cur_ops->start_gp_poll_full(&gp_snap_full); in rcu_torture_fakewriter()
1674 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { in rcu_torture_fakewriter()
1680 gp_snap = cur_ops->start_gp_poll_exp(); in rcu_torture_fakewriter()
1681 while (!cur_ops->poll_gp_state_exp(gp_snap)) { in rcu_torture_fakewriter()
1687 cur_ops->start_gp_poll_exp_full(&gp_snap_full); in rcu_torture_fakewriter()
1688 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) { in rcu_torture_fakewriter()
1694 cur_ops->sync(); in rcu_torture_fakewriter()
1731 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); in rcu_torture_reader_do_mbchk()
1739 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. in rcu_torture_reader_do_mbchk()
1740 !READ_ONCE(rtp->rtort_chkp) && in rcu_torture_reader_do_mbchk()
1741 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. in rcu_torture_reader_do_mbchk()
1742 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); in rcu_torture_reader_do_mbchk()
1743 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); in rcu_torture_reader_do_mbchk()
1744 rtrcp->rtc_chkrdr = rdrchked; in rcu_torture_reader_do_mbchk()
1745 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. in rcu_torture_reader_do_mbchk()
1746 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || in rcu_torture_reader_do_mbchk()
1747 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) in rcu_torture_reader_do_mbchk()
1748 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. in rcu_torture_reader_do_mbchk()
1752 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); in rcu_torture_reader_do_mbchk()
1753 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) in rcu_torture_reader_do_mbchk()
1755 rdrchked = rtrcp_assigner->rtc_chkrdr; in rcu_torture_reader_do_mbchk()
1759 loops = READ_ONCE(rtrcp_chked->rtc_myloops); in rcu_torture_reader_do_mbchk()
1761 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) in rcu_torture_reader_do_mbchk()
1763 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; in rcu_torture_reader_do_mbchk()
1764 rtrcp_assigner->rtc_ready = 0; in rcu_torture_reader_do_mbchk()
1765 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. in rcu_torture_reader_do_mbchk()
1766 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. in rcu_torture_reader_do_mbchk()
1770 * Do one extension of an RCU read-side critical section using the
1774 * and random-number-generator state in trsp. If this is neither the
1776 * change, do a ->read_delay().
1783 int idxnew1 = -1; in rcutorture_one_extend()
1784 int idxnew2 = -1; in rcutorture_one_extend()
1792 rtrsp->rt_readstate = newstate; in rcutorture_one_extend()
1794 /* First, put new protection in place to avoid critical-section gap. */ in rcutorture_one_extend()
1806 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; in rcutorture_one_extend()
1808 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; in rcutorture_one_extend()
1828 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); in rcutorture_one_extend()
1829 WARN_ON_ONCE(idxnew2 != -1); in rcutorture_one_extend()
1835 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); in rcutorture_one_extend()
1837 raw_spin_lock_irqsave(&current->pi_lock, flags); in rcutorture_one_extend()
1838 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); in rcutorture_one_extend()
1839 WARN_ON_ONCE(idxnew1 != -1); in rcutorture_one_extend()
1842 raw_spin_unlock_irqrestore(&current->pi_lock, flags); in rcutorture_one_extend()
1847 cur_ops->read_delay(trsp, rtrsp); in rcutorture_one_extend()
1850 if (idxnew1 == -1) in rcutorture_one_extend()
1855 if (idxnew2 == -1) in rcutorture_one_extend()
1871 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; in rcutorture_extend_mask_max()
1911 * them on non-RT. in rcutorture_extend_mask()
1925 * Do a randomly selected number of extensions of an existing RCU read-side
1936 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ in rcutorture_loop_extend()
1937 if (!((mask - 1) & mask)) in rcutorture_loop_extend()
1950 * Do one read-side critical section, returning false if there was
1975 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) in rcu_torture_one_read()
1976 cookie = cur_ops->get_gp_state(); in rcu_torture_one_read()
1977 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) in rcu_torture_one_read()
1978 cur_ops->get_gp_state_full(&cookie_full); in rcu_torture_one_read()
1980 started = cur_ops->get_gp_seq(); in rcu_torture_one_read()
1983 !cur_ops->readlock_held || cur_ops->readlock_held()); in rcu_torture_one_read()
1989 if (p->rtort_mbtest == 0) in rcu_torture_one_read()
1994 pipe_count = READ_ONCE(p->rtort_pipe_count); in rcu_torture_one_read()
1999 completed = cur_ops->get_gp_seq(); in rcu_torture_one_read()
2001 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, in rcu_torture_one_read()
2014 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) in rcu_torture_one_read()
2015 WARN_ONCE(cur_ops->poll_gp_state(cookie), in rcu_torture_one_read()
2016 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", in rcu_torture_one_read()
2020 cookie, cur_ops->get_gp_state()); in rcu_torture_one_read()
2021 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) in rcu_torture_one_read()
2022 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full), in rcu_torture_one_read()
2032 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. in rcu_torture_one_read()
2033 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); in rcu_torture_one_read()
2057 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); in rcu_torture_timer()
2060 if (cur_ops->call) { in rcu_torture_timer()
2064 cur_ops->call(rhp, rcu_torture_timer_cb); in rcu_torture_timer()
2085 if (irqreader && cur_ops->irq_capable) in rcu_torture_reader()
2089 if (irqreader && cur_ops->irq_capable) { in rcu_torture_reader()
2103 if (irqreader && cur_ops->irq_capable) { in rcu_torture_reader()
2113 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2119 int maxcpu = -1; in rcu_nocb_toggle()
2185 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) { in rcu_torture_stats_print()
2214 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. in rcu_torture_stats_print()
2215 pr_cont("nocb-toggles: %ld:%ld\n", in rcu_torture_stats_print()
2230 WARN_ON_ONCE(i > 1); // Too-short grace period in rcu_torture_stats_print()
2244 pr_cont("Free-Block Circulation: "); in rcu_torture_stats_print()
2250 if (cur_ops->stats) in rcu_torture_stats_print()
2251 cur_ops->stats(); in rcu_torture_stats_print()
2258 rcutorture_get_gp_data(cur_ops->ttype, in rcu_torture_stats_print()
2260 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, in rcu_torture_stats_print()
2263 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", in rcu_torture_stats_print()
2266 wtp == NULL ? ~0U : wtp->__state, in rcu_torture_stats_print()
2267 wtp == NULL ? -1 : (int)task_cpu(wtp)); in rcu_torture_stats_print()
2272 if (cur_ops->gp_kthread_dbg) in rcu_torture_stats_print()
2273 cur_ops->gp_kthread_dbg(); in rcu_torture_stats_print()
2320 pr_alert("mem_dump_obj(%px):", &rhp->func); in rcu_torture_mem_dump_obj()
2321 mem_dump_obj(&rhp->func); in rcu_torture_mem_dump_obj()
2332 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); in rcu_torture_mem_dump_obj()
2333 mem_dump_obj(&rhp->func); in rcu_torture_mem_dump_obj()
2341 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); in rcu_torture_mem_dump_obj()
2342 mem_dump_obj(&rhp->func); in rcu_torture_mem_dump_obj()
2350 "--- %s: nreaders=%d nfakewriters=%d " in rcu_torture_print_module_parms()
2366 test_boost, cur_ops->can_boost, in rcu_torture_print_module_parms()
2443 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2477 idx = cur_ops->readlock(); in rcu_torture_stall()
2499 cur_ops->readunlock(idx); in rcu_torture_stall()
2513 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2521 /* State structure for forward-progress self-propagating RCU callback. */
2528 * Forward-progress self-propagating RCU callback function. Because
2529 * callbacks run from softirq, this function is an implicit RCU read-side
2536 if (READ_ONCE(fcsp->stop)) { in rcu_torture_fwd_prog_cb()
2537 WRITE_ONCE(fcsp->stop, 2); in rcu_torture_fwd_prog_cb()
2540 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); in rcu_torture_fwd_prog_cb()
2543 /* State for continuous-flood RCU callbacks. */
2586 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) in rcu_torture_fwd_cb_hist()
2587 if (rfp->n_launders_hist[i].n_launders > 0) in rcu_torture_fwd_cb_hist()
2589 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", in rcu_torture_fwd_cb_hist()
2590 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); in rcu_torture_fwd_cb_hist()
2591 gps_old = rfp->rcu_launder_gp_seq_start; in rcu_torture_fwd_cb_hist()
2593 gps = rfp->n_launders_hist[j].launder_gp_seq; in rcu_torture_fwd_cb_hist()
2596 rfp->n_launders_hist[j].n_launders, in rcu_torture_fwd_cb_hist()
2603 /* Callback function for continuous-flood RCU callbacks. */
2610 struct rcu_fwd *rfp = rfcp->rfc_rfp; in rcu_torture_fwd_cb_cr()
2612 rfcp->rfc_next = NULL; in rcu_torture_fwd_cb_cr()
2613 rfcp->rfc_gps++; in rcu_torture_fwd_cb_cr()
2614 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); in rcu_torture_fwd_cb_cr()
2615 rfcpp = rfp->rcu_fwd_cb_tail; in rcu_torture_fwd_cb_cr()
2616 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; in rcu_torture_fwd_cb_cr()
2618 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); in rcu_torture_fwd_cb_cr()
2619 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); in rcu_torture_fwd_cb_cr()
2620 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) in rcu_torture_fwd_cb_cr()
2621 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; in rcu_torture_fwd_cb_cr()
2622 rfp->n_launders_hist[i].n_launders++; in rcu_torture_fwd_cb_cr()
2623 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); in rcu_torture_fwd_cb_cr()
2624 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); in rcu_torture_fwd_cb_cr()
2651 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); in rcu_torture_fwd_prog_cbfree()
2652 rfcp = rfp->rcu_fwd_cb_head; in rcu_torture_fwd_prog_cbfree()
2654 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); in rcu_torture_fwd_prog_cbfree()
2657 rfp->rcu_fwd_cb_head = rfcp->rfc_next; in rcu_torture_fwd_prog_cbfree()
2658 if (!rfp->rcu_fwd_cb_head) in rcu_torture_fwd_prog_cbfree()
2659 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; in rcu_torture_fwd_prog_cbfree()
2660 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); in rcu_torture_fwd_prog_cbfree()
2673 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2688 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); in rcu_torture_fwd_prog_nr()
2689 if (!cur_ops->sync) in rcu_torture_fwd_prog_nr()
2690 return; // Cannot do need_resched() forward progress testing without ->sync. in rcu_torture_fwd_prog_nr()
2691 if (cur_ops->call && cur_ops->cb_barrier) { in rcu_torture_fwd_prog_nr()
2698 cur_ops->sync(); /* Later readers see above write. */ in rcu_torture_fwd_prog_nr()
2701 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); in rcu_torture_fwd_prog_nr()
2704 gps = cur_ops->get_gp_seq(); in rcu_torture_fwd_prog_nr()
2705 sd = cur_ops->stall_dur() + 1; in rcu_torture_fwd_prog_nr()
2706 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; in rcu_torture_fwd_prog_nr()
2707 dur = sd4 + torture_random(&trs) % (sd - sd4); in rcu_torture_fwd_prog_nr()
2708 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); in rcu_torture_fwd_prog_nr()
2709 stopat = rfp->rcu_fwd_startat + dur; in rcu_torture_fwd_prog_nr()
2713 idx = cur_ops->readlock(); in rcu_torture_fwd_prog_nr()
2715 cur_ops->readunlock(idx); in rcu_torture_fwd_prog_nr()
2724 cver = READ_ONCE(rcu_torture_current_version) - cver; in rcu_torture_fwd_prog_nr()
2725 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); in rcu_torture_fwd_prog_nr()
2728 rfp->rcu_fwd_id, dur, cver, gps); in rcu_torture_fwd_prog_nr()
2732 cur_ops->sync(); /* Wait for running CB to complete. */ in rcu_torture_fwd_prog_nr()
2733 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); in rcu_torture_fwd_prog_nr()
2734 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ in rcu_torture_fwd_prog_nr()
2745 /* Carry out call_rcu() forward-progress testing. */
2762 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); in rcu_torture_fwd_prog_cr()
2765 if (!cur_ops->call) in rcu_torture_fwd_prog_cr()
2766 return; /* Can't do call_rcu() fwd prog without ->call. */ in rcu_torture_fwd_prog_cr()
2770 cur_ops->sync(); /* Later readers see above write. */ in rcu_torture_fwd_prog_cr()
2771 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); in rcu_torture_fwd_prog_cr()
2772 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; in rcu_torture_fwd_prog_cr()
2774 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread in rcu_torture_fwd_prog_cr()
2778 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) in rcu_torture_fwd_prog_cr()
2779 rfp->n_launders_hist[i].n_launders = 0; in rcu_torture_fwd_prog_cr()
2781 gps = cur_ops->get_gp_seq(); in rcu_torture_fwd_prog_cr()
2782 rfp->rcu_launder_gp_seq_start = gps; in rcu_torture_fwd_prog_cr()
2787 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); in rcu_torture_fwd_prog_cr()
2790 rfcpn = READ_ONCE(rfcp->rfc_next); in rcu_torture_fwd_prog_cr()
2792 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && in rcu_torture_fwd_prog_cr()
2795 rfp->rcu_fwd_cb_head = rfcpn; in rcu_torture_fwd_prog_cr()
2798 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { in rcu_torture_fwd_prog_cr()
2806 rfcp->rfc_gps = 0; in rcu_torture_fwd_prog_cr()
2807 rfcp->rfc_rfp = rfp; in rcu_torture_fwd_prog_cr()
2812 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); in rcu_torture_fwd_prog_cr()
2821 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); in rcu_torture_fwd_prog_cr()
2822 cver = READ_ONCE(rcu_torture_current_version) - cver; in rcu_torture_fwd_prog_cr()
2823 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); in rcu_torture_fwd_prog_cr()
2824 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); in rcu_torture_fwd_prog_cr()
2825 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ in rcu_torture_fwd_prog_cr()
2833 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, in rcu_torture_fwd_prog_cr()
2834 n_launders + n_max_cbs - n_launders_cb_snap, in rcu_torture_fwd_prog_cr()
2850 * current forward-progress test.
2865 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", in rcutorture_oom_notify()
2869 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); in rcutorture_oom_notify()
2877 cur_ops->cb_barrier(); in rcutorture_oom_notify()
2882 cur_ops->cb_barrier(); in rcutorture_oom_notify()
2898 /* Carry out grace-period forward-progress testing. */
2914 if (!rfp->rcu_fwd_id) { in rcu_torture_fwd_prog()
2928 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); in rcu_torture_fwd_prog()
2929 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) in rcu_torture_fwd_prog()
2931 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && in rcu_torture_fwd_prog()
2934 torture_num_online_cpus() > rfp->rcu_fwd_id))) in rcu_torture_fwd_prog()
2941 /* Short runs might not contain a valid forward-progress attempt. */ in rcu_torture_fwd_prog()
2942 if (!rfp->rcu_fwd_id) { in rcu_torture_fwd_prog()
2950 /* If forward-progress checking is requested and feasible, spawn the thread. */
2965 if ((!cur_ops->sync && !cur_ops->call) || in rcu_torture_fwd_prog_init()
2966 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || in rcu_torture_fwd_prog_init()
2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); in rcu_torture_fwd_prog_init()
2976 return -EINVAL; /* In module, can fail back to user. */ in rcu_torture_fwd_prog_init()
2991 return -ENOMEM; in rcu_torture_fwd_prog_init()
3042 cur_ops->call(rhp, rcu_torture_barrier_cbf); in rcu_torture_barrier1cb()
3066 * is ordered before the following ->call(). in rcu_torture_barrier_cbs()
3071 cur_ops->call(&rcu, rcu_torture_barrier_cbf); in rcu_torture_barrier_cbs()
3076 if (cur_ops->cb_barrier != NULL) in rcu_torture_barrier_cbs()
3077 cur_ops->cb_barrier(); in rcu_torture_barrier_cbs()
3102 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ in rcu_torture_barrier()
3115 cur_ops->cb_barrier(); in rcu_torture_barrier()
3140 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { in rcu_torture_barrier_init()
3143 torture_type, cur_ops->name); in rcu_torture_barrier_init()
3157 return -ENOMEM; in rcu_torture_barrier_init()
3193 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) in rcu_torture_can_boost()
3195 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) in rcu_torture_can_boost()
3227 (void)rcu_torture_one_read(trsp, -1); in rcu_torture_read_exit_child()
3231 // Parent kthread which creates and destroys read-exit child kthreads.
3243 // Each pass through this loop does one read-exit episode. in rcu_torture_read_exit()
3319 …WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", tes… in rcutorture_test_nmis()
3334 if (cur_ops->cb_barrier != NULL) { in rcu_torture_cleanup()
3335 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); in rcu_torture_cleanup()
3336 cur_ops->cb_barrier(); in rcu_torture_cleanup()
3349 if (cur_ops->gp_kthread_dbg) in rcu_torture_cleanup()
3350 cur_ops->gp_kthread_dbg(); in rcu_torture_cleanup()
3382 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); in rcu_torture_cleanup()
3383 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); in rcu_torture_cleanup()
3384 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", in rcu_torture_cleanup()
3385 cur_ops->name, (long)gp_seq, flags, in rcu_torture_cleanup()
3393 * Wait for all RCU callbacks to fire, then do torture-type-specific in rcu_torture_cleanup()
3396 if (cur_ops->cb_barrier != NULL) { in rcu_torture_cleanup()
3397 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); in rcu_torture_cleanup()
3398 cur_ops->cb_barrier(); in rcu_torture_cleanup()
3400 if (cur_ops->cleanup != NULL) in rcu_torture_cleanup()
3401 cur_ops->cleanup(); in rcu_torture_cleanup()
3405 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ in rcu_torture_cleanup()
3408 pr_alert("Failure/close-call rcutorture reader segments:\n"); in rcu_torture_cleanup()
3453 * This -might- happen due to race conditions, but is unlikely. in rcu_torture_err_cb()
3459 * does happen, the debug-objects subsystem won't have splatted. in rcu_torture_err_cb()
3466 * Verify that double-free causes debug-objects to complain, but only
3467 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3511 if (cur_ops->sync && !(++n & 0xfff)) in rcutorture_sync()
3512 cur_ops->sync(); in rcutorture_sync()
3554 j = deadlock ? 0 : -1; in srcu_lockdep_next()
3562 // Test lockdep on SRCU-based deadlock scenarios.
3591 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n", in rcu_torture_init_srcu_lockdep()
3598 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n", in rcu_torture_init_srcu_lockdep()
3599 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); in rcu_torture_init_srcu_lockdep()
3614 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n", in rcu_torture_init_srcu_lockdep()
3615 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); in rcu_torture_init_srcu_lockdep()
3635 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n", in rcu_torture_init_srcu_lockdep()
3636 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); in rcu_torture_init_srcu_lockdep()
3657 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n", in rcu_torture_init_srcu_lockdep()
3658 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-"); in rcu_torture_init_srcu_lockdep()
3663 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace" in rcu_torture_init_srcu_lockdep()
3673 if (i == cyclelen - 1) in rcu_torture_init_srcu_lockdep()
3712 return -EBUSY; in rcu_torture_init()
3717 if (strcmp(torture_type, cur_ops->name) == 0) in rcu_torture_init()
3721 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", in rcu_torture_init()
3723 pr_alert("rcu-torture types:"); in rcu_torture_init()
3725 pr_cont(" %s", torture_ops[i]->name); in rcu_torture_init()
3727 firsterr = -EINVAL; in rcu_torture_init()
3731 if (cur_ops->fqs == NULL && fqs_duration != 0) { in rcu_torture_init()
3732 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); in rcu_torture_init()
3737 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n", in rcu_torture_init()
3738 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU)); in rcu_torture_init()
3741 if (cur_ops->init) in rcu_torture_init()
3742 cur_ops->init(); in rcu_torture_init()
3749 nrealreaders = num_online_cpus() - 2 - nreaders; in rcu_torture_init()
3754 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); in rcu_torture_init()
3755 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); in rcu_torture_init()
3757 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", in rcu_torture_init()
3758 cur_ops->name, (long)gp_seq, flags); in rcu_torture_init()
3808 firsterr = -ENOMEM; in rcu_torture_init()
3824 firsterr = -ENOMEM; in rcu_torture_init()
3828 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; in rcu_torture_init()
3843 firsterr = -ENOMEM; in rcu_torture_init()
3870 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; in rcu_torture_init()