110462d6fSPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+ 210462d6fSPaul E. McKenney /* 310462d6fSPaul E. McKenney * RCU CPU stall warnings for normal RCU grace periods 410462d6fSPaul E. McKenney * 510462d6fSPaul E. McKenney * Copyright IBM Corporation, 2019 610462d6fSPaul E. McKenney * 710462d6fSPaul E. McKenney * Author: Paul E. McKenney <paulmck@linux.ibm.com> 810462d6fSPaul E. McKenney */ 910462d6fSPaul E. McKenney 10e23344c2SPaul E. McKenney ////////////////////////////////////////////////////////////////////////////// 11e23344c2SPaul E. McKenney // 12e23344c2SPaul E. McKenney // Controlling CPU stall warnings, including delay calculation. 1310462d6fSPaul E. McKenney 1432255d51SPaul E. McKenney /* panic() on RCU Stall sysctl. */ 1532255d51SPaul E. McKenney int sysctl_panic_on_rcu_stall __read_mostly; 16dfe56404Schao int sysctl_max_rcu_stall_to_panic __read_mostly; 1732255d51SPaul E. McKenney 1810462d6fSPaul E. McKenney #ifdef CONFIG_PROVE_RCU 1910462d6fSPaul E. McKenney #define RCU_STALL_DELAY_DELTA (5 * HZ) 2010462d6fSPaul E. McKenney #else 2110462d6fSPaul E. McKenney #define RCU_STALL_DELAY_DELTA 0 2210462d6fSPaul E. McKenney #endif 236be7436dSPaul E. McKenney #define RCU_STALL_MIGHT_DIV 8 246be7436dSPaul E. McKenney #define RCU_STALL_MIGHT_MIN (2 * HZ) 2510462d6fSPaul E. McKenney 26e23344c2SPaul E. McKenney /* Limit-check stall timeouts specified at boottime and runtime. */ 2710462d6fSPaul E. McKenney int rcu_jiffies_till_stall_check(void) 2810462d6fSPaul E. McKenney { 2910462d6fSPaul E. McKenney int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 3010462d6fSPaul E. McKenney 3110462d6fSPaul E. McKenney /* 3210462d6fSPaul E. McKenney * Limit check must be consistent with the Kconfig limits 3310462d6fSPaul E. McKenney * for CONFIG_RCU_CPU_STALL_TIMEOUT. 3410462d6fSPaul E. McKenney */ 3510462d6fSPaul E. McKenney if (till_stall_check < 3) { 3610462d6fSPaul E. McKenney WRITE_ONCE(rcu_cpu_stall_timeout, 3); 3710462d6fSPaul E. McKenney till_stall_check = 3; 3810462d6fSPaul E. McKenney } else if (till_stall_check > 300) { 3910462d6fSPaul E. McKenney WRITE_ONCE(rcu_cpu_stall_timeout, 300); 4010462d6fSPaul E. McKenney till_stall_check = 300; 4110462d6fSPaul E. McKenney } 4210462d6fSPaul E. McKenney return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 4310462d6fSPaul E. McKenney } 4410462d6fSPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); 4510462d6fSPaul E. McKenney 466be7436dSPaul E. McKenney /** 476be7436dSPaul E. McKenney * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? 486be7436dSPaul E. McKenney * 496be7436dSPaul E. McKenney * Returns @true if the current grace period is sufficiently old that 506be7436dSPaul E. McKenney * it is reasonable to assume that it might be stalled. This can be 516be7436dSPaul E. McKenney * useful when deciding whether to allocate memory to enable RCU-mediated 526be7436dSPaul E. McKenney * freeing on the one hand or just invoking synchronize_rcu() on the other. 536be7436dSPaul E. McKenney * The latter is preferable when the grace period is stalled. 546be7436dSPaul E. McKenney * 556be7436dSPaul E. McKenney * Note that sampling of the .gp_start and .gp_seq fields must be done 566be7436dSPaul E. McKenney * carefully to avoid false positives at the beginnings and ends of 576be7436dSPaul E. McKenney * grace periods. 586be7436dSPaul E. McKenney */ 596be7436dSPaul E. McKenney bool rcu_gp_might_be_stalled(void) 606be7436dSPaul E. McKenney { 616be7436dSPaul E. McKenney unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; 626be7436dSPaul E. McKenney unsigned long j = jiffies; 636be7436dSPaul E. McKenney 646be7436dSPaul E. McKenney if (d < RCU_STALL_MIGHT_MIN) 656be7436dSPaul E. McKenney d = RCU_STALL_MIGHT_MIN; 666be7436dSPaul E. McKenney smp_mb(); // jiffies before .gp_seq to avoid false positives. 676be7436dSPaul E. McKenney if (!rcu_gp_in_progress()) 686be7436dSPaul E. McKenney return false; 696be7436dSPaul E. McKenney // Long delays at this point avoids false positive, but a delay 706be7436dSPaul E. McKenney // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. 716be7436dSPaul E. McKenney smp_mb(); // .gp_seq before second .gp_start 726be7436dSPaul E. McKenney // And ditto here. 736be7436dSPaul E. McKenney return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); 746be7436dSPaul E. McKenney } 756be7436dSPaul E. McKenney 76e23344c2SPaul E. McKenney /* Don't do RCU CPU stall warnings during long sysrq printouts. */ 7710462d6fSPaul E. McKenney void rcu_sysrq_start(void) 7810462d6fSPaul E. McKenney { 7910462d6fSPaul E. McKenney if (!rcu_cpu_stall_suppress) 8010462d6fSPaul E. McKenney rcu_cpu_stall_suppress = 2; 8110462d6fSPaul E. McKenney } 8210462d6fSPaul E. McKenney 8310462d6fSPaul E. McKenney void rcu_sysrq_end(void) 8410462d6fSPaul E. McKenney { 8510462d6fSPaul E. McKenney if (rcu_cpu_stall_suppress == 2) 8610462d6fSPaul E. McKenney rcu_cpu_stall_suppress = 0; 8710462d6fSPaul E. McKenney } 8810462d6fSPaul E. McKenney 89e23344c2SPaul E. McKenney /* Don't print RCU CPU stall warnings during a kernel panic. */ 9010462d6fSPaul E. McKenney static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 9110462d6fSPaul E. McKenney { 9210462d6fSPaul E. McKenney rcu_cpu_stall_suppress = 1; 9310462d6fSPaul E. McKenney return NOTIFY_DONE; 9410462d6fSPaul E. McKenney } 9510462d6fSPaul E. McKenney 9610462d6fSPaul E. McKenney static struct notifier_block rcu_panic_block = { 9710462d6fSPaul E. McKenney .notifier_call = rcu_panic, 9810462d6fSPaul E. McKenney }; 9910462d6fSPaul E. McKenney 10010462d6fSPaul E. McKenney static int __init check_cpu_stall_init(void) 10110462d6fSPaul E. McKenney { 10210462d6fSPaul E. McKenney atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 10310462d6fSPaul E. McKenney return 0; 10410462d6fSPaul E. McKenney } 10510462d6fSPaul E. McKenney early_initcall(check_cpu_stall_init); 1063fc3d170SPaul E. McKenney 107e23344c2SPaul E. McKenney /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ 108e23344c2SPaul E. McKenney static void panic_on_rcu_stall(void) 109e23344c2SPaul E. McKenney { 110dfe56404Schao static int cpu_stall; 111dfe56404Schao 112dfe56404Schao if (++cpu_stall < sysctl_max_rcu_stall_to_panic) 113dfe56404Schao return; 114dfe56404Schao 115e23344c2SPaul E. McKenney if (sysctl_panic_on_rcu_stall) 116e23344c2SPaul E. McKenney panic("RCU Stall\n"); 117e23344c2SPaul E. McKenney } 118e23344c2SPaul E. McKenney 119e23344c2SPaul E. McKenney /** 120e23344c2SPaul E. McKenney * rcu_cpu_stall_reset - prevent further stall warnings in current grace period 121e23344c2SPaul E. McKenney * 122e23344c2SPaul E. McKenney * Set the stall-warning timeout way off into the future, thus preventing 123e23344c2SPaul E. McKenney * any RCU CPU stall-warning messages from appearing in the current set of 124e23344c2SPaul E. McKenney * RCU grace periods. 125e23344c2SPaul E. McKenney * 126e23344c2SPaul E. McKenney * The caller must disable hard irqs. 127e23344c2SPaul E. McKenney */ 128e23344c2SPaul E. McKenney void rcu_cpu_stall_reset(void) 129e23344c2SPaul E. McKenney { 130e23344c2SPaul E. McKenney WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); 131e23344c2SPaul E. McKenney } 132e23344c2SPaul E. McKenney 133e23344c2SPaul E. McKenney ////////////////////////////////////////////////////////////////////////////// 134e23344c2SPaul E. McKenney // 135e23344c2SPaul E. McKenney // Interaction with RCU grace periods 136e23344c2SPaul E. McKenney 137e23344c2SPaul E. McKenney /* Start of new grace period, so record stall time (and forcing times). */ 138e23344c2SPaul E. McKenney static void record_gp_stall_check_time(void) 139e23344c2SPaul E. McKenney { 140e23344c2SPaul E. McKenney unsigned long j = jiffies; 141e23344c2SPaul E. McKenney unsigned long j1; 142e23344c2SPaul E. McKenney 14359881bcdSPaul E. McKenney WRITE_ONCE(rcu_state.gp_start, j); 144e23344c2SPaul E. McKenney j1 = rcu_jiffies_till_stall_check(); 1456be7436dSPaul E. McKenney smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. 1466be7436dSPaul E. McKenney WRITE_ONCE(rcu_state.jiffies_stall, j + j1); 147e23344c2SPaul E. McKenney rcu_state.jiffies_resched = j + j1 / 2; 148e23344c2SPaul E. McKenney rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); 149e23344c2SPaul E. McKenney } 150e23344c2SPaul E. McKenney 151e23344c2SPaul E. McKenney /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ 152e23344c2SPaul E. McKenney static void zero_cpu_stall_ticks(struct rcu_data *rdp) 153e23344c2SPaul E. McKenney { 154e23344c2SPaul E. McKenney rdp->ticks_this_gp = 0; 155e23344c2SPaul E. McKenney rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); 156e23344c2SPaul E. McKenney WRITE_ONCE(rdp->last_fqs_resched, jiffies); 157e23344c2SPaul E. McKenney } 158e23344c2SPaul E. McKenney 159e23344c2SPaul E. McKenney /* 160e23344c2SPaul E. McKenney * If too much time has passed in the current grace period, and if 161e23344c2SPaul E. McKenney * so configured, go kick the relevant kthreads. 162e23344c2SPaul E. McKenney */ 163e23344c2SPaul E. McKenney static void rcu_stall_kick_kthreads(void) 164e23344c2SPaul E. McKenney { 165e23344c2SPaul E. McKenney unsigned long j; 166e23344c2SPaul E. McKenney 167fe63b723SPaul E. McKenney if (!READ_ONCE(rcu_kick_kthreads)) 168e23344c2SPaul E. McKenney return; 169e23344c2SPaul E. McKenney j = READ_ONCE(rcu_state.jiffies_kick_kthreads); 170e23344c2SPaul E. McKenney if (time_after(jiffies, j) && rcu_state.gp_kthread && 171e23344c2SPaul E. McKenney (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { 172e23344c2SPaul E. McKenney WARN_ONCE(1, "Kicking %s grace-period kthread\n", 173e23344c2SPaul E. McKenney rcu_state.name); 174e23344c2SPaul E. McKenney rcu_ftrace_dump(DUMP_ALL); 175e23344c2SPaul E. McKenney wake_up_process(rcu_state.gp_kthread); 176e23344c2SPaul E. McKenney WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); 177e23344c2SPaul E. McKenney } 178e23344c2SPaul E. McKenney } 179e23344c2SPaul E. McKenney 1807ac1907cSPaul E. McKenney /* 1817ac1907cSPaul E. McKenney * Handler for the irq_work request posted about halfway into the RCU CPU 1827ac1907cSPaul E. McKenney * stall timeout, and used to detect excessive irq disabling. Set state 1837ac1907cSPaul E. McKenney * appropriately, but just complain if there is unexpected state on entry. 1847ac1907cSPaul E. McKenney */ 1857ac1907cSPaul E. McKenney static void rcu_iw_handler(struct irq_work *iwp) 1867ac1907cSPaul E. McKenney { 1877ac1907cSPaul E. McKenney struct rcu_data *rdp; 1887ac1907cSPaul E. McKenney struct rcu_node *rnp; 1897ac1907cSPaul E. McKenney 1907ac1907cSPaul E. McKenney rdp = container_of(iwp, struct rcu_data, rcu_iw); 1917ac1907cSPaul E. McKenney rnp = rdp->mynode; 1927ac1907cSPaul E. McKenney raw_spin_lock_rcu_node(rnp); 1937ac1907cSPaul E. McKenney if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { 1947ac1907cSPaul E. McKenney rdp->rcu_iw_gp_seq = rnp->gp_seq; 1957ac1907cSPaul E. McKenney rdp->rcu_iw_pending = false; 1967ac1907cSPaul E. McKenney } 1977ac1907cSPaul E. McKenney raw_spin_unlock_rcu_node(rnp); 1987ac1907cSPaul E. McKenney } 1997ac1907cSPaul E. McKenney 200e23344c2SPaul E. McKenney ////////////////////////////////////////////////////////////////////////////// 201e23344c2SPaul E. McKenney // 202e23344c2SPaul E. McKenney // Printing RCU CPU stall warnings 203e23344c2SPaul E. McKenney 204c130d2dcSLai Jiangshan #ifdef CONFIG_PREEMPT_RCU 2053fc3d170SPaul E. McKenney 2063fc3d170SPaul E. McKenney /* 2073fc3d170SPaul E. McKenney * Dump detailed information for all tasks blocking the current RCU 2083fc3d170SPaul E. McKenney * grace period on the specified rcu_node structure. 2093fc3d170SPaul E. McKenney */ 2103fc3d170SPaul E. McKenney static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 2113fc3d170SPaul E. McKenney { 2123fc3d170SPaul E. McKenney unsigned long flags; 2133fc3d170SPaul E. McKenney struct task_struct *t; 2143fc3d170SPaul E. McKenney 2153fc3d170SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 2163fc3d170SPaul E. McKenney if (!rcu_preempt_blocked_readers_cgp(rnp)) { 2173fc3d170SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2183fc3d170SPaul E. McKenney return; 2193fc3d170SPaul E. McKenney } 2203fc3d170SPaul E. McKenney t = list_entry(rnp->gp_tasks->prev, 2213fc3d170SPaul E. McKenney struct task_struct, rcu_node_entry); 2223fc3d170SPaul E. McKenney list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 2233fc3d170SPaul E. McKenney /* 2243fc3d170SPaul E. McKenney * We could be printing a lot while holding a spinlock. 2253fc3d170SPaul E. McKenney * Avoid triggering hard lockup. 2263fc3d170SPaul E. McKenney */ 2273fc3d170SPaul E. McKenney touch_nmi_watchdog(); 2283fc3d170SPaul E. McKenney sched_show_task(t); 2293fc3d170SPaul E. McKenney } 2303fc3d170SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2313fc3d170SPaul E. McKenney } 2323fc3d170SPaul E. McKenney 2335bef8da6SPaul E. McKenney // Communicate task state back to the RCU CPU stall warning request. 2345bef8da6SPaul E. McKenney struct rcu_stall_chk_rdr { 2355bef8da6SPaul E. McKenney int nesting; 2365bef8da6SPaul E. McKenney union rcu_special rs; 2375bef8da6SPaul E. McKenney bool on_blkd_list; 2385bef8da6SPaul E. McKenney }; 2395bef8da6SPaul E. McKenney 2405bef8da6SPaul E. McKenney /* 2415bef8da6SPaul E. McKenney * Report out the state of a not-running task that is stalling the 2425bef8da6SPaul E. McKenney * current RCU grace period. 2435bef8da6SPaul E. McKenney */ 2445bef8da6SPaul E. McKenney static bool check_slow_task(struct task_struct *t, void *arg) 2455bef8da6SPaul E. McKenney { 2465bef8da6SPaul E. McKenney struct rcu_stall_chk_rdr *rscrp = arg; 2475bef8da6SPaul E. McKenney 2485bef8da6SPaul E. McKenney if (task_curr(t)) 2495bef8da6SPaul E. McKenney return false; // It is running, so decline to inspect it. 2505bef8da6SPaul E. McKenney rscrp->nesting = t->rcu_read_lock_nesting; 2515bef8da6SPaul E. McKenney rscrp->rs = t->rcu_read_unlock_special; 2525bef8da6SPaul E. McKenney rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); 2535bef8da6SPaul E. McKenney return true; 2545bef8da6SPaul E. McKenney } 2555bef8da6SPaul E. McKenney 2563fc3d170SPaul E. McKenney /* 2573fc3d170SPaul E. McKenney * Scan the current list of tasks blocked within RCU read-side critical 258c583bcb8SPaul E. McKenney * sections, printing out the tid of each of the first few of them. 2593fc3d170SPaul E. McKenney */ 260c583bcb8SPaul E. McKenney static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) 261c583bcb8SPaul E. McKenney __releases(rnp->lock) 2623fc3d170SPaul E. McKenney { 263c583bcb8SPaul E. McKenney int i = 0; 2643fc3d170SPaul E. McKenney int ndetected = 0; 2655bef8da6SPaul E. McKenney struct rcu_stall_chk_rdr rscr; 2665bef8da6SPaul E. McKenney struct task_struct *t; 267c583bcb8SPaul E. McKenney struct task_struct *ts[8]; 2683fc3d170SPaul E. McKenney 269a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 2703fc3d170SPaul E. McKenney if (!rcu_preempt_blocked_readers_cgp(rnp)) 2713fc3d170SPaul E. McKenney return 0; 27221d0d79aSPaul E. McKenney pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", 27321d0d79aSPaul E. McKenney rnp->level, rnp->grplo, rnp->grphi); 2743fc3d170SPaul E. McKenney t = list_entry(rnp->gp_tasks->prev, 2753fc3d170SPaul E. McKenney struct task_struct, rcu_node_entry); 2763fc3d170SPaul E. McKenney list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { 277c583bcb8SPaul E. McKenney get_task_struct(t); 278c583bcb8SPaul E. McKenney ts[i++] = t; 279c583bcb8SPaul E. McKenney if (i >= ARRAY_SIZE(ts)) 280c583bcb8SPaul E. McKenney break; 281c583bcb8SPaul E. McKenney } 282c583bcb8SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 283c583bcb8SPaul E. McKenney for (i--; i; i--) { 284c583bcb8SPaul E. McKenney t = ts[i]; 2855bef8da6SPaul E. McKenney if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr)) 2863fc3d170SPaul E. McKenney pr_cont(" P%d", t->pid); 2875bef8da6SPaul E. McKenney else 2885bef8da6SPaul E. McKenney pr_cont(" P%d/%d:%c%c%c%c", 2895bef8da6SPaul E. McKenney t->pid, rscr.nesting, 2905bef8da6SPaul E. McKenney ".b"[rscr.rs.b.blocked], 2915bef8da6SPaul E. McKenney ".q"[rscr.rs.b.need_qs], 2925bef8da6SPaul E. McKenney ".e"[rscr.rs.b.exp_hint], 2935bef8da6SPaul E. McKenney ".l"[rscr.on_blkd_list]); 294a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 295c583bcb8SPaul E. McKenney put_task_struct(t); 2963fc3d170SPaul E. McKenney ndetected++; 2973fc3d170SPaul E. McKenney } 29821d0d79aSPaul E. McKenney pr_cont("\n"); 2993fc3d170SPaul E. McKenney return ndetected; 3003fc3d170SPaul E. McKenney } 3013fc3d170SPaul E. McKenney 302c130d2dcSLai Jiangshan #else /* #ifdef CONFIG_PREEMPT_RCU */ 3033fc3d170SPaul E. McKenney 3043fc3d170SPaul E. McKenney /* 3053fc3d170SPaul E. McKenney * Because preemptible RCU does not exist, we never have to check for 3063fc3d170SPaul E. McKenney * tasks blocked within RCU read-side critical sections. 3073fc3d170SPaul E. McKenney */ 30821d0d79aSPaul E. McKenney static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 3093fc3d170SPaul E. McKenney { 3103fc3d170SPaul E. McKenney } 3113fc3d170SPaul E. McKenney 3123fc3d170SPaul E. McKenney /* 3133fc3d170SPaul E. McKenney * Because preemptible RCU does not exist, we never have to check for 3143fc3d170SPaul E. McKenney * tasks blocked within RCU read-side critical sections. 3153fc3d170SPaul E. McKenney */ 316c583bcb8SPaul E. McKenney static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) 317c70360c3SJules Irenge __releases(rnp->lock) 3183fc3d170SPaul E. McKenney { 319c583bcb8SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3203fc3d170SPaul E. McKenney return 0; 3213fc3d170SPaul E. McKenney } 322c130d2dcSLai Jiangshan #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 32332255d51SPaul E. McKenney 32432255d51SPaul E. McKenney /* 32532255d51SPaul E. McKenney * Dump stacks of all tasks running on stalled CPUs. First try using 32632255d51SPaul E. McKenney * NMIs, but fall back to manual remote stack tracing on architectures 32732255d51SPaul E. McKenney * that don't support NMI-based stack dumps. The NMI-triggered stack 32832255d51SPaul E. McKenney * traces are more accurate because they are printed by the target CPU. 32932255d51SPaul E. McKenney */ 33032255d51SPaul E. McKenney static void rcu_dump_cpu_stacks(void) 33132255d51SPaul E. McKenney { 33232255d51SPaul E. McKenney int cpu; 33332255d51SPaul E. McKenney unsigned long flags; 33432255d51SPaul E. McKenney struct rcu_node *rnp; 33532255d51SPaul E. McKenney 33632255d51SPaul E. McKenney rcu_for_each_leaf_node(rnp) { 33732255d51SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 33832255d51SPaul E. McKenney for_each_leaf_node_possible_cpu(rnp, cpu) 339725969acSPaul E. McKenney if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { 340725969acSPaul E. McKenney if (cpu_is_offline(cpu)) 341725969acSPaul E. McKenney pr_err("Offline CPU %d blocking current GP.\n", cpu); 342725969acSPaul E. McKenney else if (!trigger_single_cpu_backtrace(cpu)) 34332255d51SPaul E. McKenney dump_cpu_task(cpu); 344725969acSPaul E. McKenney } 34532255d51SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 34632255d51SPaul E. McKenney } 34732255d51SPaul E. McKenney } 34832255d51SPaul E. McKenney 34959b73a27SPaul E. McKenney #ifdef CONFIG_RCU_FAST_NO_HZ 35059b73a27SPaul E. McKenney 35159b73a27SPaul E. McKenney static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 35259b73a27SPaul E. McKenney { 35359b73a27SPaul E. McKenney struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 35459b73a27SPaul E. McKenney 35577a40f97SJoel Fernandes (Google) sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", 35659b73a27SPaul E. McKenney rdp->last_accelerate & 0xffff, jiffies & 0xffff, 35777a40f97SJoel Fernandes (Google) !!rdp->tick_nohz_enabled_snap); 35859b73a27SPaul E. McKenney } 35959b73a27SPaul E. McKenney 36059b73a27SPaul E. McKenney #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 36159b73a27SPaul E. McKenney 36259b73a27SPaul E. McKenney static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 36359b73a27SPaul E. McKenney { 36459b73a27SPaul E. McKenney *cp = '\0'; 36559b73a27SPaul E. McKenney } 36659b73a27SPaul E. McKenney 36759b73a27SPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ 36859b73a27SPaul E. McKenney 369e2167b38SLai Jiangshan static const char * const gp_state_names[] = { 370e2167b38SLai Jiangshan [RCU_GP_IDLE] = "RCU_GP_IDLE", 371e2167b38SLai Jiangshan [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", 372e2167b38SLai Jiangshan [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", 373e2167b38SLai Jiangshan [RCU_GP_ONOFF] = "RCU_GP_ONOFF", 374e2167b38SLai Jiangshan [RCU_GP_INIT] = "RCU_GP_INIT", 375e2167b38SLai Jiangshan [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", 376e2167b38SLai Jiangshan [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", 377e2167b38SLai Jiangshan [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", 378e2167b38SLai Jiangshan [RCU_GP_CLEANED] = "RCU_GP_CLEANED", 379e2167b38SLai Jiangshan }; 380e2167b38SLai Jiangshan 381e2167b38SLai Jiangshan /* 382e2167b38SLai Jiangshan * Convert a ->gp_state value to a character string. 383e2167b38SLai Jiangshan */ 384e2167b38SLai Jiangshan static const char *gp_state_getname(short gs) 385e2167b38SLai Jiangshan { 386e2167b38SLai Jiangshan if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 387e2167b38SLai Jiangshan return "???"; 388e2167b38SLai Jiangshan return gp_state_names[gs]; 389e2167b38SLai Jiangshan } 390e2167b38SLai Jiangshan 39188375825SPaul E. McKenney /* Is the RCU grace-period kthread being starved of CPU time? */ 39288375825SPaul E. McKenney static bool rcu_is_gp_kthread_starving(unsigned long *jp) 39388375825SPaul E. McKenney { 39488375825SPaul E. McKenney unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); 39588375825SPaul E. McKenney 39688375825SPaul E. McKenney if (jp) 39788375825SPaul E. McKenney *jp = j; 39888375825SPaul E. McKenney return j > 2 * HZ; 39988375825SPaul E. McKenney } 40088375825SPaul E. McKenney 40159b73a27SPaul E. McKenney /* 40259b73a27SPaul E. McKenney * Print out diagnostic information for the specified stalled CPU. 40359b73a27SPaul E. McKenney * 40459b73a27SPaul E. McKenney * If the specified CPU is aware of the current RCU grace period, then 40559b73a27SPaul E. McKenney * print the number of scheduling clock interrupts the CPU has taken 40659b73a27SPaul E. McKenney * during the time that it has been aware. Otherwise, print the number 40759b73a27SPaul E. McKenney * of RCU grace periods that this CPU is ignorant of, for example, "1" 40859b73a27SPaul E. McKenney * if the CPU was aware of the previous grace period. 40959b73a27SPaul E. McKenney * 41059b73a27SPaul E. McKenney * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. 41159b73a27SPaul E. McKenney */ 41259b73a27SPaul E. McKenney static void print_cpu_stall_info(int cpu) 41359b73a27SPaul E. McKenney { 41459b73a27SPaul E. McKenney unsigned long delta; 41588375825SPaul E. McKenney bool falsepositive; 41659b73a27SPaul E. McKenney char fast_no_hz[72]; 41759b73a27SPaul E. McKenney struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 41859b73a27SPaul E. McKenney char *ticks_title; 41959b73a27SPaul E. McKenney unsigned long ticks_value; 42059b73a27SPaul E. McKenney 42159b73a27SPaul E. McKenney /* 42259b73a27SPaul E. McKenney * We could be printing a lot while holding a spinlock. Avoid 42359b73a27SPaul E. McKenney * triggering hard lockup. 42459b73a27SPaul E. McKenney */ 42559b73a27SPaul E. McKenney touch_nmi_watchdog(); 42659b73a27SPaul E. McKenney 42759b73a27SPaul E. McKenney ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); 42859b73a27SPaul E. McKenney if (ticks_value) { 42959b73a27SPaul E. McKenney ticks_title = "GPs behind"; 43059b73a27SPaul E. McKenney } else { 43159b73a27SPaul E. McKenney ticks_title = "ticks this GP"; 43259b73a27SPaul E. McKenney ticks_value = rdp->ticks_this_gp; 43359b73a27SPaul E. McKenney } 43459b73a27SPaul E. McKenney print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 43559b73a27SPaul E. McKenney delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); 43688375825SPaul E. McKenney falsepositive = rcu_is_gp_kthread_starving(NULL) && 43788375825SPaul E. McKenney rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); 43888375825SPaul E. McKenney pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n", 43959b73a27SPaul E. McKenney cpu, 44059b73a27SPaul E. McKenney "O."[!!cpu_online(cpu)], 44159b73a27SPaul E. McKenney "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], 44259b73a27SPaul E. McKenney "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], 44359b73a27SPaul E. McKenney !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : 44459b73a27SPaul E. McKenney rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : 44559b73a27SPaul E. McKenney "!."[!delta], 44659b73a27SPaul E. McKenney ticks_value, ticks_title, 44759b73a27SPaul E. McKenney rcu_dynticks_snap(rdp) & 0xfff, 44859b73a27SPaul E. McKenney rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, 44959b73a27SPaul E. McKenney rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 45088375825SPaul E. McKenney data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, 45188375825SPaul E. McKenney fast_no_hz, 45288375825SPaul E. McKenney falsepositive ? " (false positive?)" : ""); 45359b73a27SPaul E. McKenney } 45459b73a27SPaul E. McKenney 455e23344c2SPaul E. McKenney /* Complain about starvation of grace-period kthread. */ 456e23344c2SPaul E. McKenney static void rcu_check_gp_kthread_starvation(void) 45759b73a27SPaul E. McKenney { 458243027a3SPaul E. McKenney int cpu; 459e23344c2SPaul E. McKenney struct task_struct *gpk = rcu_state.gp_kthread; 460e23344c2SPaul E. McKenney unsigned long j; 461e23344c2SPaul E. McKenney 46288375825SPaul E. McKenney if (rcu_is_gp_kthread_starving(&j)) { 463243027a3SPaul E. McKenney cpu = gpk ? task_cpu(gpk) : -1; 4642f064a59SPeter Zijlstra pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", 465e23344c2SPaul E. McKenney rcu_state.name, j, 466e23344c2SPaul E. McKenney (long)rcu_seq_current(&rcu_state.gp_seq), 46747fbb074SPaul E. McKenney data_race(rcu_state.gp_flags), 468e23344c2SPaul E. McKenney gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, 4692f064a59SPeter Zijlstra gpk ? gpk->__state : ~0, cpu); 470e23344c2SPaul E. McKenney if (gpk) { 47188375825SPaul E. McKenney pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); 472e23344c2SPaul E. McKenney pr_err("RCU grace-period kthread stack dump:\n"); 473e23344c2SPaul E. McKenney sched_show_task(gpk); 474243027a3SPaul E. McKenney if (cpu >= 0) { 475725969acSPaul E. McKenney if (cpu_is_offline(cpu)) { 476725969acSPaul E. McKenney pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); 477725969acSPaul E. McKenney } else { 478725969acSPaul E. McKenney pr_err("Stack dump where RCU GP kthread last ran:\n"); 479243027a3SPaul E. McKenney if (!trigger_single_cpu_backtrace(cpu)) 480243027a3SPaul E. McKenney dump_cpu_task(cpu); 481243027a3SPaul E. McKenney } 482725969acSPaul E. McKenney } 483e23344c2SPaul E. McKenney wake_up_process(gpk); 484e23344c2SPaul E. McKenney } 485e23344c2SPaul E. McKenney } 48659b73a27SPaul E. McKenney } 48759b73a27SPaul E. McKenney 488683954e5SNeeraj Upadhyay /* Complain about missing wakeups from expired fqs wait timer */ 489683954e5SNeeraj Upadhyay static void rcu_check_gp_kthread_expired_fqs_timer(void) 490683954e5SNeeraj Upadhyay { 491683954e5SNeeraj Upadhyay struct task_struct *gpk = rcu_state.gp_kthread; 492683954e5SNeeraj Upadhyay short gp_state; 493683954e5SNeeraj Upadhyay unsigned long jiffies_fqs; 494683954e5SNeeraj Upadhyay int cpu; 495683954e5SNeeraj Upadhyay 496683954e5SNeeraj Upadhyay /* 497683954e5SNeeraj Upadhyay * Order reads of .gp_state and .jiffies_force_qs. 498683954e5SNeeraj Upadhyay * Matching smp_wmb() is present in rcu_gp_fqs_loop(). 499683954e5SNeeraj Upadhyay */ 500683954e5SNeeraj Upadhyay gp_state = smp_load_acquire(&rcu_state.gp_state); 501683954e5SNeeraj Upadhyay jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); 502683954e5SNeeraj Upadhyay 503683954e5SNeeraj Upadhyay if (gp_state == RCU_GP_WAIT_FQS && 504683954e5SNeeraj Upadhyay time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && 505683954e5SNeeraj Upadhyay gpk && !READ_ONCE(gpk->on_rq)) { 506683954e5SNeeraj Upadhyay cpu = task_cpu(gpk); 5072f064a59SPeter Zijlstra pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", 508683954e5SNeeraj Upadhyay rcu_state.name, (jiffies - jiffies_fqs), 509683954e5SNeeraj Upadhyay (long)rcu_seq_current(&rcu_state.gp_seq), 510683954e5SNeeraj Upadhyay data_race(rcu_state.gp_flags), 511683954e5SNeeraj Upadhyay gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, 5122f064a59SPeter Zijlstra gpk->__state); 513683954e5SNeeraj Upadhyay pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", 514683954e5SNeeraj Upadhyay cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); 515683954e5SNeeraj Upadhyay } 516683954e5SNeeraj Upadhyay } 517683954e5SNeeraj Upadhyay 518fcbcc0e7SZhaolong Zhang static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) 51932255d51SPaul E. McKenney { 52032255d51SPaul E. McKenney int cpu; 52132255d51SPaul E. McKenney unsigned long flags; 52232255d51SPaul E. McKenney unsigned long gpa; 52332255d51SPaul E. McKenney unsigned long j; 52432255d51SPaul E. McKenney int ndetected = 0; 52521d0d79aSPaul E. McKenney struct rcu_node *rnp; 52632255d51SPaul E. McKenney long totqlen = 0; 52732255d51SPaul E. McKenney 528a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 529a649d25dSPaul E. McKenney 53032255d51SPaul E. McKenney /* Kick and suppress, if so configured. */ 53132255d51SPaul E. McKenney rcu_stall_kick_kthreads(); 53258c53360SPaul E. McKenney if (rcu_stall_is_suppressed()) 53332255d51SPaul E. McKenney return; 53432255d51SPaul E. McKenney 53532255d51SPaul E. McKenney /* 53632255d51SPaul E. McKenney * OK, time to rat on our buddy... 537f2286ab9SMauro Carvalho Chehab * See Documentation/RCU/stallwarn.rst for info on how to debug 53832255d51SPaul E. McKenney * RCU CPU stall warnings. 53932255d51SPaul E. McKenney */ 540565cfb9eSSangmoon Kim trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected")); 54140e69ac7SPaul E. McKenney pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); 54232255d51SPaul E. McKenney rcu_for_each_leaf_node(rnp) { 54332255d51SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 54432255d51SPaul E. McKenney if (rnp->qsmask != 0) { 54532255d51SPaul E. McKenney for_each_leaf_node_possible_cpu(rnp, cpu) 54632255d51SPaul E. McKenney if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { 54732255d51SPaul E. McKenney print_cpu_stall_info(cpu); 54832255d51SPaul E. McKenney ndetected++; 54932255d51SPaul E. McKenney } 55032255d51SPaul E. McKenney } 551c583bcb8SPaul E. McKenney ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. 552a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 55332255d51SPaul E. McKenney } 55432255d51SPaul E. McKenney 55532255d51SPaul E. McKenney for_each_possible_cpu(cpu) 55632255d51SPaul E. McKenney totqlen += rcu_get_n_cbs_cpu(cpu); 55740e69ac7SPaul E. McKenney pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", 558fcbcc0e7SZhaolong Zhang smp_processor_id(), (long)(jiffies - gps), 55932255d51SPaul E. McKenney (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); 56032255d51SPaul E. McKenney if (ndetected) { 56132255d51SPaul E. McKenney rcu_dump_cpu_stacks(); 56232255d51SPaul E. McKenney 56332255d51SPaul E. McKenney /* Complain about tasks blocking the grace period. */ 56421d0d79aSPaul E. McKenney rcu_for_each_leaf_node(rnp) 56521d0d79aSPaul E. McKenney rcu_print_detail_task_stall_rnp(rnp); 56632255d51SPaul E. McKenney } else { 56732255d51SPaul E. McKenney if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { 56832255d51SPaul E. McKenney pr_err("INFO: Stall ended before state dump start\n"); 56932255d51SPaul E. McKenney } else { 57032255d51SPaul E. McKenney j = jiffies; 57147fbb074SPaul E. McKenney gpa = data_race(rcu_state.gp_activity); 57232255d51SPaul E. McKenney pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", 57332255d51SPaul E. McKenney rcu_state.name, j - gpa, j, gpa, 57447fbb074SPaul E. McKenney data_race(jiffies_till_next_fqs), 57532255d51SPaul E. McKenney rcu_get_root()->qsmask); 57632255d51SPaul E. McKenney } 57732255d51SPaul E. McKenney } 57832255d51SPaul E. McKenney /* Rewrite if needed in case of slow consoles. */ 57932255d51SPaul E. McKenney if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) 58032255d51SPaul E. McKenney WRITE_ONCE(rcu_state.jiffies_stall, 58132255d51SPaul E. McKenney jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 58232255d51SPaul E. McKenney 583683954e5SNeeraj Upadhyay rcu_check_gp_kthread_expired_fqs_timer(); 58432255d51SPaul E. McKenney rcu_check_gp_kthread_starvation(); 58532255d51SPaul E. McKenney 58632255d51SPaul E. McKenney panic_on_rcu_stall(); 58732255d51SPaul E. McKenney 58832255d51SPaul E. McKenney rcu_force_quiescent_state(); /* Kick them all. */ 58932255d51SPaul E. McKenney } 59032255d51SPaul E. McKenney 591fcbcc0e7SZhaolong Zhang static void print_cpu_stall(unsigned long gps) 59232255d51SPaul E. McKenney { 59332255d51SPaul E. McKenney int cpu; 59432255d51SPaul E. McKenney unsigned long flags; 59532255d51SPaul E. McKenney struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 59632255d51SPaul E. McKenney struct rcu_node *rnp = rcu_get_root(); 59732255d51SPaul E. McKenney long totqlen = 0; 59832255d51SPaul E. McKenney 599a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 600a649d25dSPaul E. McKenney 60132255d51SPaul E. McKenney /* Kick and suppress, if so configured. */ 60232255d51SPaul E. McKenney rcu_stall_kick_kthreads(); 60358c53360SPaul E. McKenney if (rcu_stall_is_suppressed()) 60432255d51SPaul E. McKenney return; 60532255d51SPaul E. McKenney 60632255d51SPaul E. McKenney /* 60732255d51SPaul E. McKenney * OK, time to rat on ourselves... 608f2286ab9SMauro Carvalho Chehab * See Documentation/RCU/stallwarn.rst for info on how to debug 60932255d51SPaul E. McKenney * RCU CPU stall warnings. 61032255d51SPaul E. McKenney */ 611565cfb9eSSangmoon Kim trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); 61240e69ac7SPaul E. McKenney pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); 61332255d51SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); 61432255d51SPaul E. McKenney print_cpu_stall_info(smp_processor_id()); 61532255d51SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); 61632255d51SPaul E. McKenney for_each_possible_cpu(cpu) 61732255d51SPaul E. McKenney totqlen += rcu_get_n_cbs_cpu(cpu); 61840e69ac7SPaul E. McKenney pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", 619fcbcc0e7SZhaolong Zhang jiffies - gps, 62032255d51SPaul E. McKenney (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); 62132255d51SPaul E. McKenney 622683954e5SNeeraj Upadhyay rcu_check_gp_kthread_expired_fqs_timer(); 62332255d51SPaul E. McKenney rcu_check_gp_kthread_starvation(); 62432255d51SPaul E. McKenney 62532255d51SPaul E. McKenney rcu_dump_cpu_stacks(); 62632255d51SPaul E. McKenney 62732255d51SPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 62832255d51SPaul E. McKenney /* Rewrite if needed in case of slow consoles. */ 62932255d51SPaul E. McKenney if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) 63032255d51SPaul E. McKenney WRITE_ONCE(rcu_state.jiffies_stall, 63132255d51SPaul E. McKenney jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 63232255d51SPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 63332255d51SPaul E. McKenney 63432255d51SPaul E. McKenney panic_on_rcu_stall(); 63532255d51SPaul E. McKenney 63632255d51SPaul E. McKenney /* 63732255d51SPaul E. McKenney * Attempt to revive the RCU machinery by forcing a context switch. 63832255d51SPaul E. McKenney * 63932255d51SPaul E. McKenney * A context switch would normally allow the RCU state machine to make 64032255d51SPaul E. McKenney * progress and it could be we're stuck in kernel space without context 64132255d51SPaul E. McKenney * switches for an entirely unreasonable amount of time. 64232255d51SPaul E. McKenney */ 64332255d51SPaul E. McKenney set_tsk_need_resched(current); 64432255d51SPaul E. McKenney set_preempt_need_resched(); 64532255d51SPaul E. McKenney } 64632255d51SPaul E. McKenney 64732255d51SPaul E. McKenney static void check_cpu_stall(struct rcu_data *rdp) 64832255d51SPaul E. McKenney { 64932255d51SPaul E. McKenney unsigned long gs1; 65032255d51SPaul E. McKenney unsigned long gs2; 65132255d51SPaul E. McKenney unsigned long gps; 65232255d51SPaul E. McKenney unsigned long j; 65332255d51SPaul E. McKenney unsigned long jn; 65432255d51SPaul E. McKenney unsigned long js; 65532255d51SPaul E. McKenney struct rcu_node *rnp; 65632255d51SPaul E. McKenney 657a649d25dSPaul E. McKenney lockdep_assert_irqs_disabled(); 658fe63b723SPaul E. McKenney if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || 65932255d51SPaul E. McKenney !rcu_gp_in_progress()) 66032255d51SPaul E. McKenney return; 66132255d51SPaul E. McKenney rcu_stall_kick_kthreads(); 66232255d51SPaul E. McKenney j = jiffies; 66332255d51SPaul E. McKenney 66432255d51SPaul E. McKenney /* 66532255d51SPaul E. McKenney * Lots of memory barriers to reject false positives. 66632255d51SPaul E. McKenney * 66732255d51SPaul E. McKenney * The idea is to pick up rcu_state.gp_seq, then 66832255d51SPaul E. McKenney * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally 66932255d51SPaul E. McKenney * another copy of rcu_state.gp_seq. These values are updated in 67032255d51SPaul E. McKenney * the opposite order with memory barriers (or equivalent) during 67132255d51SPaul E. McKenney * grace-period initialization and cleanup. Now, a false positive 67232255d51SPaul E. McKenney * can occur if we get an new value of rcu_state.gp_start and a old 67332255d51SPaul E. McKenney * value of rcu_state.jiffies_stall. But given the memory barriers, 67432255d51SPaul E. McKenney * the only way that this can happen is if one grace period ends 67532255d51SPaul E. McKenney * and another starts between these two fetches. This is detected 67632255d51SPaul E. McKenney * by comparing the second fetch of rcu_state.gp_seq with the 67732255d51SPaul E. McKenney * previous fetch from rcu_state.gp_seq. 67832255d51SPaul E. McKenney * 67932255d51SPaul E. McKenney * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, 68032255d51SPaul E. McKenney * and rcu_state.gp_start suffice to forestall false positives. 68132255d51SPaul E. McKenney */ 68232255d51SPaul E. McKenney gs1 = READ_ONCE(rcu_state.gp_seq); 68332255d51SPaul E. McKenney smp_rmb(); /* Pick up ->gp_seq first... */ 68432255d51SPaul E. McKenney js = READ_ONCE(rcu_state.jiffies_stall); 68532255d51SPaul E. McKenney smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 68632255d51SPaul E. McKenney gps = READ_ONCE(rcu_state.gp_start); 68732255d51SPaul E. McKenney smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ 68832255d51SPaul E. McKenney gs2 = READ_ONCE(rcu_state.gp_seq); 68932255d51SPaul E. McKenney if (gs1 != gs2 || 69032255d51SPaul E. McKenney ULONG_CMP_LT(j, js) || 69132255d51SPaul E. McKenney ULONG_CMP_GE(gps, js)) 69232255d51SPaul E. McKenney return; /* No stall or GP completed since entering function. */ 69332255d51SPaul E. McKenney rnp = rdp->mynode; 69432255d51SPaul E. McKenney jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; 69532255d51SPaul E. McKenney if (rcu_gp_in_progress() && 69632255d51SPaul E. McKenney (READ_ONCE(rnp->qsmask) & rdp->grpmask) && 69732255d51SPaul E. McKenney cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { 69832255d51SPaul E. McKenney 69932255d51SPaul E. McKenney /* We haven't checked in, so go dump stack. */ 700fcbcc0e7SZhaolong Zhang print_cpu_stall(gps); 7011ef5a442SPaul E. McKenney if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) 702cdc694b2SPaul E. McKenney rcu_ftrace_dump(DUMP_ALL); 70332255d51SPaul E. McKenney 70432255d51SPaul E. McKenney } else if (rcu_gp_in_progress() && 70532255d51SPaul E. McKenney ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && 70632255d51SPaul E. McKenney cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { 70732255d51SPaul E. McKenney 70832255d51SPaul E. McKenney /* They had a few time units to dump stack, so complain. */ 709fcbcc0e7SZhaolong Zhang print_other_cpu_stall(gs2, gps); 7101ef5a442SPaul E. McKenney if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) 711cdc694b2SPaul E. McKenney rcu_ftrace_dump(DUMP_ALL); 71232255d51SPaul E. McKenney } 71332255d51SPaul E. McKenney } 714b51bcbbfSPaul E. McKenney 715b51bcbbfSPaul E. McKenney ////////////////////////////////////////////////////////////////////////////// 716b51bcbbfSPaul E. McKenney // 717b51bcbbfSPaul E. McKenney // RCU forward-progress mechanisms, including of callback invocation. 718b51bcbbfSPaul E. McKenney 719b51bcbbfSPaul E. McKenney 720b51bcbbfSPaul E. McKenney /* 7210260b92eSPaul E. McKenney * Check to see if a failure to end RCU priority inversion was due to 7220260b92eSPaul E. McKenney * a CPU not passing through a quiescent state. When this happens, there 7230260b92eSPaul E. McKenney * is nothing that RCU priority boosting can do to help, so we shouldn't 7240260b92eSPaul E. McKenney * count this as an RCU priority boosting failure. A return of true says 7250260b92eSPaul E. McKenney * RCU priority boosting is to blame, and false says otherwise. If false 7260260b92eSPaul E. McKenney * is returned, the first of the CPUs to blame is stored through cpup. 7275390473eSPaul E. McKenney * If there was no CPU blocking the current grace period, but also nothing 7285390473eSPaul E. McKenney * in need of being boosted, *cpup is set to -1. This can happen in case 7295390473eSPaul E. McKenney * of vCPU preemption while the last CPU is reporting its quiscent state, 7305390473eSPaul E. McKenney * for example. 731063f5a4dSPaul E. McKenney * 732063f5a4dSPaul E. McKenney * If cpup is NULL, then a lockless quick check is carried out, suitable 733063f5a4dSPaul E. McKenney * for high-rate usage. On the other hand, if cpup is non-NULL, each 734063f5a4dSPaul E. McKenney * rcu_node structure's ->lock is acquired, ruling out high-rate usage. 7350260b92eSPaul E. McKenney */ 7360260b92eSPaul E. McKenney bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) 7370260b92eSPaul E. McKenney { 7385390473eSPaul E. McKenney bool atb = false; 7390260b92eSPaul E. McKenney int cpu; 7400260b92eSPaul E. McKenney unsigned long flags; 7410260b92eSPaul E. McKenney struct rcu_node *rnp; 7420260b92eSPaul E. McKenney 7430260b92eSPaul E. McKenney rcu_for_each_leaf_node(rnp) { 744063f5a4dSPaul E. McKenney if (!cpup) { 7455390473eSPaul E. McKenney if (READ_ONCE(rnp->qsmask)) { 746063f5a4dSPaul E. McKenney return false; 7475390473eSPaul E. McKenney } else { 7485390473eSPaul E. McKenney if (READ_ONCE(rnp->gp_tasks)) 7495390473eSPaul E. McKenney atb = true; 750063f5a4dSPaul E. McKenney continue; 751063f5a4dSPaul E. McKenney } 7525390473eSPaul E. McKenney } 7535390473eSPaul E. McKenney *cpup = -1; 7540260b92eSPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 7555390473eSPaul E. McKenney if (rnp->gp_tasks) 7565390473eSPaul E. McKenney atb = true; 7570260b92eSPaul E. McKenney if (!rnp->qsmask) { 7580260b92eSPaul E. McKenney // No CPUs without quiescent states for this rnp. 7590260b92eSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 7600260b92eSPaul E. McKenney continue; 7610260b92eSPaul E. McKenney } 7620260b92eSPaul E. McKenney // Find the first holdout CPU. 7630260b92eSPaul E. McKenney for_each_leaf_node_possible_cpu(rnp, cpu) { 7640260b92eSPaul E. McKenney if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { 7650260b92eSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 7660260b92eSPaul E. McKenney *cpup = cpu; 7670260b92eSPaul E. McKenney return false; 7680260b92eSPaul E. McKenney } 7690260b92eSPaul E. McKenney } 7700260b92eSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 7710260b92eSPaul E. McKenney } 7720260b92eSPaul E. McKenney // Can't blame CPUs, so must blame RCU priority boosting. 7735390473eSPaul E. McKenney return atb; 7740260b92eSPaul E. McKenney } 7750260b92eSPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_check_boost_fail); 7760260b92eSPaul E. McKenney 7770260b92eSPaul E. McKenney /* 778b51bcbbfSPaul E. McKenney * Show the state of the grace-period kthreads. 779b51bcbbfSPaul E. McKenney */ 780b51bcbbfSPaul E. McKenney void show_rcu_gp_kthreads(void) 781b51bcbbfSPaul E. McKenney { 782e816d56fSPaul E. McKenney unsigned long cbs = 0; 783b51bcbbfSPaul E. McKenney int cpu; 784b51bcbbfSPaul E. McKenney unsigned long j; 785b51bcbbfSPaul E. McKenney unsigned long ja; 786b51bcbbfSPaul E. McKenney unsigned long jr; 787e44111edSPaul E. McKenney unsigned long js; 788b51bcbbfSPaul E. McKenney unsigned long jw; 789b51bcbbfSPaul E. McKenney struct rcu_data *rdp; 790b51bcbbfSPaul E. McKenney struct rcu_node *rnp; 7915648d659SPaul E. McKenney struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 792b51bcbbfSPaul E. McKenney 793b51bcbbfSPaul E. McKenney j = jiffies; 79447fbb074SPaul E. McKenney ja = j - data_race(rcu_state.gp_activity); 79547fbb074SPaul E. McKenney jr = j - data_race(rcu_state.gp_req_activity); 796e44111edSPaul E. McKenney js = j - data_race(rcu_state.gp_start); 79747fbb074SPaul E. McKenney jw = j - data_race(rcu_state.gp_wake_time); 798*2a2ed561SPaul E. McKenney pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", 799b51bcbbfSPaul E. McKenney rcu_state.name, gp_state_getname(rcu_state.gp_state), 800*2a2ed561SPaul E. McKenney rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU, 801e44111edSPaul E. McKenney js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), 80247fbb074SPaul E. McKenney (long)data_race(rcu_state.gp_seq), 80347fbb074SPaul E. McKenney (long)data_race(rcu_get_root()->gp_seq_needed), 80427ba76e1SPaul E. McKenney data_race(rcu_state.gp_max), 80547fbb074SPaul E. McKenney data_race(rcu_state.gp_flags)); 806b51bcbbfSPaul E. McKenney rcu_for_each_node_breadth_first(rnp) { 807b1580501SPaul E. McKenney if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && 808b1580501SPaul E. McKenney !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) && 809b1580501SPaul E. McKenney !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks)) 810b51bcbbfSPaul E. McKenney continue; 811396eba65SPaul E. McKenney pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", 812396eba65SPaul E. McKenney rnp->grplo, rnp->grphi, 813396eba65SPaul E. McKenney (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed), 814396eba65SPaul E. McKenney data_race(rnp->qsmask), 815396eba65SPaul E. McKenney ".b"[!!data_race(rnp->boost_kthread_task)], 816396eba65SPaul E. McKenney ".B"[!!data_race(rnp->boost_tasks)], 817396eba65SPaul E. McKenney ".E"[!!data_race(rnp->exp_tasks)], 818396eba65SPaul E. McKenney ".G"[!!data_race(rnp->gp_tasks)], 819396eba65SPaul E. McKenney data_race(rnp->n_boosts)); 820b51bcbbfSPaul E. McKenney if (!rcu_is_leaf_node(rnp)) 821b51bcbbfSPaul E. McKenney continue; 822b51bcbbfSPaul E. McKenney for_each_leaf_node_possible_cpu(rnp, cpu) { 823b51bcbbfSPaul E. McKenney rdp = per_cpu_ptr(&rcu_data, cpu); 824a5b89501SPaul E. McKenney if (READ_ONCE(rdp->gpwrap) || 8258ff37290SPaul E. McKenney ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), 8268ff37290SPaul E. McKenney READ_ONCE(rdp->gp_seq_needed))) 827b51bcbbfSPaul E. McKenney continue; 828b51bcbbfSPaul E. McKenney pr_info("\tcpu %d ->gp_seq_needed %ld\n", 82947fbb074SPaul E. McKenney cpu, (long)data_race(rdp->gp_seq_needed)); 830b51bcbbfSPaul E. McKenney } 831b51bcbbfSPaul E. McKenney } 832f7a81b12SPaul E. McKenney for_each_possible_cpu(cpu) { 833f7a81b12SPaul E. McKenney rdp = per_cpu_ptr(&rcu_data, cpu); 834e816d56fSPaul E. McKenney cbs += data_race(rdp->n_cbs_invoked); 835f7a81b12SPaul E. McKenney if (rcu_segcblist_is_offloaded(&rdp->cblist)) 836f7a81b12SPaul E. McKenney show_rcu_nocb_state(rdp); 837f7a81b12SPaul E. McKenney } 838e816d56fSPaul E. McKenney pr_info("RCU callbacks invoked since boot: %lu\n", cbs); 839e21408ceSPaul E. McKenney show_rcu_tasks_gp_kthreads(); 840b51bcbbfSPaul E. McKenney } 841b51bcbbfSPaul E. McKenney EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 842b51bcbbfSPaul E. McKenney 843b51bcbbfSPaul E. McKenney /* 844b51bcbbfSPaul E. McKenney * This function checks for grace-period requests that fail to motivate 845b51bcbbfSPaul E. McKenney * RCU to come out of its idle mode. 846b51bcbbfSPaul E. McKenney */ 847b51bcbbfSPaul E. McKenney static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, 848b51bcbbfSPaul E. McKenney const unsigned long gpssdelay) 849b51bcbbfSPaul E. McKenney { 850b51bcbbfSPaul E. McKenney unsigned long flags; 851b51bcbbfSPaul E. McKenney unsigned long j; 852b51bcbbfSPaul E. McKenney struct rcu_node *rnp_root = rcu_get_root(); 853b51bcbbfSPaul E. McKenney static atomic_t warned = ATOMIC_INIT(0); 854b51bcbbfSPaul E. McKenney 855b51bcbbfSPaul E. McKenney if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || 8568ff37290SPaul E. McKenney ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 8575648d659SPaul E. McKenney READ_ONCE(rnp_root->gp_seq_needed)) || 8585648d659SPaul E. McKenney !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. 859b51bcbbfSPaul E. McKenney return; 860b51bcbbfSPaul E. McKenney j = jiffies; /* Expensive access, and in common case don't get here. */ 861b51bcbbfSPaul E. McKenney if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 862b51bcbbfSPaul E. McKenney time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 863b51bcbbfSPaul E. McKenney atomic_read(&warned)) 864b51bcbbfSPaul E. McKenney return; 865b51bcbbfSPaul E. McKenney 866b51bcbbfSPaul E. McKenney raw_spin_lock_irqsave_rcu_node(rnp, flags); 867b51bcbbfSPaul E. McKenney j = jiffies; 868b51bcbbfSPaul E. McKenney if (rcu_gp_in_progress() || 8698ff37290SPaul E. McKenney ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 8708ff37290SPaul E. McKenney READ_ONCE(rnp_root->gp_seq_needed)) || 871b51bcbbfSPaul E. McKenney time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 872b51bcbbfSPaul E. McKenney time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 873b51bcbbfSPaul E. McKenney atomic_read(&warned)) { 874b51bcbbfSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 875b51bcbbfSPaul E. McKenney return; 876b51bcbbfSPaul E. McKenney } 877b51bcbbfSPaul E. McKenney /* Hold onto the leaf lock to make others see warned==1. */ 878b51bcbbfSPaul E. McKenney 879b51bcbbfSPaul E. McKenney if (rnp_root != rnp) 880b51bcbbfSPaul E. McKenney raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 881b51bcbbfSPaul E. McKenney j = jiffies; 882b51bcbbfSPaul E. McKenney if (rcu_gp_in_progress() || 8838ff37290SPaul E. McKenney ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), 8848ff37290SPaul E. McKenney READ_ONCE(rnp_root->gp_seq_needed)) || 8858ff37290SPaul E. McKenney time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || 8868ff37290SPaul E. McKenney time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || 887b51bcbbfSPaul E. McKenney atomic_xchg(&warned, 1)) { 8883ae976a7SNeeraj Upadhyay if (rnp_root != rnp) 8893ae976a7SNeeraj Upadhyay /* irqs remain disabled. */ 8903ae976a7SNeeraj Upadhyay raw_spin_unlock_rcu_node(rnp_root); 891b51bcbbfSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 892b51bcbbfSPaul E. McKenney return; 893b51bcbbfSPaul E. McKenney } 894b51bcbbfSPaul E. McKenney WARN_ON(1); 895b51bcbbfSPaul E. McKenney if (rnp_root != rnp) 896b51bcbbfSPaul E. McKenney raw_spin_unlock_rcu_node(rnp_root); 897b51bcbbfSPaul E. McKenney raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 898b51bcbbfSPaul E. McKenney show_rcu_gp_kthreads(); 899b51bcbbfSPaul E. McKenney } 900b51bcbbfSPaul E. McKenney 901b51bcbbfSPaul E. McKenney /* 902b51bcbbfSPaul E. McKenney * Do a forward-progress check for rcutorture. This is normally invoked 903b51bcbbfSPaul E. McKenney * due to an OOM event. The argument "j" gives the time period during 904b51bcbbfSPaul E. McKenney * which rcutorture would like progress to have been made. 905b51bcbbfSPaul E. McKenney */ 906b51bcbbfSPaul E. McKenney void rcu_fwd_progress_check(unsigned long j) 907b51bcbbfSPaul E. McKenney { 908b51bcbbfSPaul E. McKenney unsigned long cbs; 909b51bcbbfSPaul E. McKenney int cpu; 910b51bcbbfSPaul E. McKenney unsigned long max_cbs = 0; 911b51bcbbfSPaul E. McKenney int max_cpu = -1; 912b51bcbbfSPaul E. McKenney struct rcu_data *rdp; 913b51bcbbfSPaul E. McKenney 914b51bcbbfSPaul E. McKenney if (rcu_gp_in_progress()) { 915b51bcbbfSPaul E. McKenney pr_info("%s: GP age %lu jiffies\n", 916b51bcbbfSPaul E. McKenney __func__, jiffies - rcu_state.gp_start); 917b51bcbbfSPaul E. McKenney show_rcu_gp_kthreads(); 918b51bcbbfSPaul E. McKenney } else { 919b51bcbbfSPaul E. McKenney pr_info("%s: Last GP end %lu jiffies ago\n", 920b51bcbbfSPaul E. McKenney __func__, jiffies - rcu_state.gp_end); 921b51bcbbfSPaul E. McKenney preempt_disable(); 922b51bcbbfSPaul E. McKenney rdp = this_cpu_ptr(&rcu_data); 923b51bcbbfSPaul E. McKenney rcu_check_gp_start_stall(rdp->mynode, rdp, j); 924b51bcbbfSPaul E. McKenney preempt_enable(); 925b51bcbbfSPaul E. McKenney } 926b51bcbbfSPaul E. McKenney for_each_possible_cpu(cpu) { 927b51bcbbfSPaul E. McKenney cbs = rcu_get_n_cbs_cpu(cpu); 928b51bcbbfSPaul E. McKenney if (!cbs) 929b51bcbbfSPaul E. McKenney continue; 930b51bcbbfSPaul E. McKenney if (max_cpu < 0) 931b51bcbbfSPaul E. McKenney pr_info("%s: callbacks", __func__); 932b51bcbbfSPaul E. McKenney pr_cont(" %d: %lu", cpu, cbs); 933b51bcbbfSPaul E. McKenney if (cbs <= max_cbs) 934b51bcbbfSPaul E. McKenney continue; 935b51bcbbfSPaul E. McKenney max_cbs = cbs; 936b51bcbbfSPaul E. McKenney max_cpu = cpu; 937b51bcbbfSPaul E. McKenney } 938b51bcbbfSPaul E. McKenney if (max_cpu >= 0) 939b51bcbbfSPaul E. McKenney pr_cont("\n"); 940b51bcbbfSPaul E. McKenney } 941b51bcbbfSPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); 942b51bcbbfSPaul E. McKenney 943b51bcbbfSPaul E. McKenney /* Commandeer a sysrq key to dump RCU's tree. */ 944b51bcbbfSPaul E. McKenney static bool sysrq_rcu; 945b51bcbbfSPaul E. McKenney module_param(sysrq_rcu, bool, 0444); 946b51bcbbfSPaul E. McKenney 947b51bcbbfSPaul E. McKenney /* Dump grace-period-request information due to commandeered sysrq. */ 948b51bcbbfSPaul E. McKenney static void sysrq_show_rcu(int key) 949b51bcbbfSPaul E. McKenney { 950b51bcbbfSPaul E. McKenney show_rcu_gp_kthreads(); 951b51bcbbfSPaul E. McKenney } 952b51bcbbfSPaul E. McKenney 9530ca650c4SEmil Velikov static const struct sysrq_key_op sysrq_rcudump_op = { 954b51bcbbfSPaul E. McKenney .handler = sysrq_show_rcu, 955b51bcbbfSPaul E. McKenney .help_msg = "show-rcu(y)", 956b51bcbbfSPaul E. McKenney .action_msg = "Show RCU tree", 957b51bcbbfSPaul E. McKenney .enable_mask = SYSRQ_ENABLE_DUMP, 958b51bcbbfSPaul E. McKenney }; 959b51bcbbfSPaul E. McKenney 960b51bcbbfSPaul E. McKenney static int __init rcu_sysrq_init(void) 961b51bcbbfSPaul E. McKenney { 962b51bcbbfSPaul E. McKenney if (sysrq_rcu) 963b51bcbbfSPaul E. McKenney return register_sysrq_key('y', &sysrq_rcudump_op); 964b51bcbbfSPaul E. McKenney return 0; 965b51bcbbfSPaul E. McKenney } 966b51bcbbfSPaul E. McKenney early_initcall(rcu_sysrq_init); 967