xref: /linux/kernel/rcu/tree_stall.h (revision b51bcbbf16ef0ea352e8b924dd8638112e4037a5)
110462d6fSPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
210462d6fSPaul E. McKenney /*
310462d6fSPaul E. McKenney  * RCU CPU stall warnings for normal RCU grace periods
410462d6fSPaul E. McKenney  *
510462d6fSPaul E. McKenney  * Copyright IBM Corporation, 2019
610462d6fSPaul E. McKenney  *
710462d6fSPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
810462d6fSPaul E. McKenney  */
910462d6fSPaul E. McKenney 
10e23344c2SPaul E. McKenney //////////////////////////////////////////////////////////////////////////////
11e23344c2SPaul E. McKenney //
12e23344c2SPaul E. McKenney // Controlling CPU stall warnings, including delay calculation.
1310462d6fSPaul E. McKenney 
1432255d51SPaul E. McKenney /* panic() on RCU Stall sysctl. */
1532255d51SPaul E. McKenney int sysctl_panic_on_rcu_stall __read_mostly;
1632255d51SPaul E. McKenney 
1710462d6fSPaul E. McKenney #ifdef CONFIG_PROVE_RCU
1810462d6fSPaul E. McKenney #define RCU_STALL_DELAY_DELTA	       (5 * HZ)
1910462d6fSPaul E. McKenney #else
2010462d6fSPaul E. McKenney #define RCU_STALL_DELAY_DELTA	       0
2110462d6fSPaul E. McKenney #endif
2210462d6fSPaul E. McKenney 
23e23344c2SPaul E. McKenney /* Limit-check stall timeouts specified at boottime and runtime. */
2410462d6fSPaul E. McKenney int rcu_jiffies_till_stall_check(void)
2510462d6fSPaul E. McKenney {
2610462d6fSPaul E. McKenney 	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
2710462d6fSPaul E. McKenney 
2810462d6fSPaul E. McKenney 	/*
2910462d6fSPaul E. McKenney 	 * Limit check must be consistent with the Kconfig limits
3010462d6fSPaul E. McKenney 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
3110462d6fSPaul E. McKenney 	 */
3210462d6fSPaul E. McKenney 	if (till_stall_check < 3) {
3310462d6fSPaul E. McKenney 		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
3410462d6fSPaul E. McKenney 		till_stall_check = 3;
3510462d6fSPaul E. McKenney 	} else if (till_stall_check > 300) {
3610462d6fSPaul E. McKenney 		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
3710462d6fSPaul E. McKenney 		till_stall_check = 300;
3810462d6fSPaul E. McKenney 	}
3910462d6fSPaul E. McKenney 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
4010462d6fSPaul E. McKenney }
4110462d6fSPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
4210462d6fSPaul E. McKenney 
43e23344c2SPaul E. McKenney /* Don't do RCU CPU stall warnings during long sysrq printouts. */
4410462d6fSPaul E. McKenney void rcu_sysrq_start(void)
4510462d6fSPaul E. McKenney {
4610462d6fSPaul E. McKenney 	if (!rcu_cpu_stall_suppress)
4710462d6fSPaul E. McKenney 		rcu_cpu_stall_suppress = 2;
4810462d6fSPaul E. McKenney }
4910462d6fSPaul E. McKenney 
5010462d6fSPaul E. McKenney void rcu_sysrq_end(void)
5110462d6fSPaul E. McKenney {
5210462d6fSPaul E. McKenney 	if (rcu_cpu_stall_suppress == 2)
5310462d6fSPaul E. McKenney 		rcu_cpu_stall_suppress = 0;
5410462d6fSPaul E. McKenney }
5510462d6fSPaul E. McKenney 
56e23344c2SPaul E. McKenney /* Don't print RCU CPU stall warnings during a kernel panic. */
5710462d6fSPaul E. McKenney static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
5810462d6fSPaul E. McKenney {
5910462d6fSPaul E. McKenney 	rcu_cpu_stall_suppress = 1;
6010462d6fSPaul E. McKenney 	return NOTIFY_DONE;
6110462d6fSPaul E. McKenney }
6210462d6fSPaul E. McKenney 
6310462d6fSPaul E. McKenney static struct notifier_block rcu_panic_block = {
6410462d6fSPaul E. McKenney 	.notifier_call = rcu_panic,
6510462d6fSPaul E. McKenney };
6610462d6fSPaul E. McKenney 
6710462d6fSPaul E. McKenney static int __init check_cpu_stall_init(void)
6810462d6fSPaul E. McKenney {
6910462d6fSPaul E. McKenney 	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
7010462d6fSPaul E. McKenney 	return 0;
7110462d6fSPaul E. McKenney }
7210462d6fSPaul E. McKenney early_initcall(check_cpu_stall_init);
733fc3d170SPaul E. McKenney 
74e23344c2SPaul E. McKenney /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
75e23344c2SPaul E. McKenney static void panic_on_rcu_stall(void)
76e23344c2SPaul E. McKenney {
77e23344c2SPaul E. McKenney 	if (sysctl_panic_on_rcu_stall)
78e23344c2SPaul E. McKenney 		panic("RCU Stall\n");
79e23344c2SPaul E. McKenney }
80e23344c2SPaul E. McKenney 
81e23344c2SPaul E. McKenney /**
82e23344c2SPaul E. McKenney  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
83e23344c2SPaul E. McKenney  *
84e23344c2SPaul E. McKenney  * Set the stall-warning timeout way off into the future, thus preventing
85e23344c2SPaul E. McKenney  * any RCU CPU stall-warning messages from appearing in the current set of
86e23344c2SPaul E. McKenney  * RCU grace periods.
87e23344c2SPaul E. McKenney  *
88e23344c2SPaul E. McKenney  * The caller must disable hard irqs.
89e23344c2SPaul E. McKenney  */
90e23344c2SPaul E. McKenney void rcu_cpu_stall_reset(void)
91e23344c2SPaul E. McKenney {
92e23344c2SPaul E. McKenney 	WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
93e23344c2SPaul E. McKenney }
94e23344c2SPaul E. McKenney 
95e23344c2SPaul E. McKenney //////////////////////////////////////////////////////////////////////////////
96e23344c2SPaul E. McKenney //
97e23344c2SPaul E. McKenney // Interaction with RCU grace periods
98e23344c2SPaul E. McKenney 
99e23344c2SPaul E. McKenney /* Start of new grace period, so record stall time (and forcing times). */
100e23344c2SPaul E. McKenney static void record_gp_stall_check_time(void)
101e23344c2SPaul E. McKenney {
102e23344c2SPaul E. McKenney 	unsigned long j = jiffies;
103e23344c2SPaul E. McKenney 	unsigned long j1;
104e23344c2SPaul E. McKenney 
105e23344c2SPaul E. McKenney 	rcu_state.gp_start = j;
106e23344c2SPaul E. McKenney 	j1 = rcu_jiffies_till_stall_check();
107e23344c2SPaul E. McKenney 	/* Record ->gp_start before ->jiffies_stall. */
108e23344c2SPaul E. McKenney 	smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
109e23344c2SPaul E. McKenney 	rcu_state.jiffies_resched = j + j1 / 2;
110e23344c2SPaul E. McKenney 	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
111e23344c2SPaul E. McKenney }
112e23344c2SPaul E. McKenney 
113e23344c2SPaul E. McKenney /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
114e23344c2SPaul E. McKenney static void zero_cpu_stall_ticks(struct rcu_data *rdp)
115e23344c2SPaul E. McKenney {
116e23344c2SPaul E. McKenney 	rdp->ticks_this_gp = 0;
117e23344c2SPaul E. McKenney 	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
118e23344c2SPaul E. McKenney 	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
119e23344c2SPaul E. McKenney }
120e23344c2SPaul E. McKenney 
121e23344c2SPaul E. McKenney /*
122e23344c2SPaul E. McKenney  * If too much time has passed in the current grace period, and if
123e23344c2SPaul E. McKenney  * so configured, go kick the relevant kthreads.
124e23344c2SPaul E. McKenney  */
125e23344c2SPaul E. McKenney static void rcu_stall_kick_kthreads(void)
126e23344c2SPaul E. McKenney {
127e23344c2SPaul E. McKenney 	unsigned long j;
128e23344c2SPaul E. McKenney 
129e23344c2SPaul E. McKenney 	if (!rcu_kick_kthreads)
130e23344c2SPaul E. McKenney 		return;
131e23344c2SPaul E. McKenney 	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
132e23344c2SPaul E. McKenney 	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
133e23344c2SPaul E. McKenney 	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
134e23344c2SPaul E. McKenney 		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
135e23344c2SPaul E. McKenney 			  rcu_state.name);
136e23344c2SPaul E. McKenney 		rcu_ftrace_dump(DUMP_ALL);
137e23344c2SPaul E. McKenney 		wake_up_process(rcu_state.gp_kthread);
138e23344c2SPaul E. McKenney 		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
139e23344c2SPaul E. McKenney 	}
140e23344c2SPaul E. McKenney }
141e23344c2SPaul E. McKenney 
1427ac1907cSPaul E. McKenney /*
1437ac1907cSPaul E. McKenney  * Handler for the irq_work request posted about halfway into the RCU CPU
1447ac1907cSPaul E. McKenney  * stall timeout, and used to detect excessive irq disabling.  Set state
1457ac1907cSPaul E. McKenney  * appropriately, but just complain if there is unexpected state on entry.
1467ac1907cSPaul E. McKenney  */
1477ac1907cSPaul E. McKenney static void rcu_iw_handler(struct irq_work *iwp)
1487ac1907cSPaul E. McKenney {
1497ac1907cSPaul E. McKenney 	struct rcu_data *rdp;
1507ac1907cSPaul E. McKenney 	struct rcu_node *rnp;
1517ac1907cSPaul E. McKenney 
1527ac1907cSPaul E. McKenney 	rdp = container_of(iwp, struct rcu_data, rcu_iw);
1537ac1907cSPaul E. McKenney 	rnp = rdp->mynode;
1547ac1907cSPaul E. McKenney 	raw_spin_lock_rcu_node(rnp);
1557ac1907cSPaul E. McKenney 	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1567ac1907cSPaul E. McKenney 		rdp->rcu_iw_gp_seq = rnp->gp_seq;
1577ac1907cSPaul E. McKenney 		rdp->rcu_iw_pending = false;
1587ac1907cSPaul E. McKenney 	}
1597ac1907cSPaul E. McKenney 	raw_spin_unlock_rcu_node(rnp);
1607ac1907cSPaul E. McKenney }
1617ac1907cSPaul E. McKenney 
162e23344c2SPaul E. McKenney //////////////////////////////////////////////////////////////////////////////
163e23344c2SPaul E. McKenney //
164e23344c2SPaul E. McKenney // Printing RCU CPU stall warnings
165e23344c2SPaul E. McKenney 
1663fc3d170SPaul E. McKenney #ifdef CONFIG_PREEMPT
1673fc3d170SPaul E. McKenney 
1683fc3d170SPaul E. McKenney /*
1693fc3d170SPaul E. McKenney  * Dump detailed information for all tasks blocking the current RCU
1703fc3d170SPaul E. McKenney  * grace period on the specified rcu_node structure.
1713fc3d170SPaul E. McKenney  */
1723fc3d170SPaul E. McKenney static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
1733fc3d170SPaul E. McKenney {
1743fc3d170SPaul E. McKenney 	unsigned long flags;
1753fc3d170SPaul E. McKenney 	struct task_struct *t;
1763fc3d170SPaul E. McKenney 
1773fc3d170SPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1783fc3d170SPaul E. McKenney 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
1793fc3d170SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1803fc3d170SPaul E. McKenney 		return;
1813fc3d170SPaul E. McKenney 	}
1823fc3d170SPaul E. McKenney 	t = list_entry(rnp->gp_tasks->prev,
1833fc3d170SPaul E. McKenney 		       struct task_struct, rcu_node_entry);
1843fc3d170SPaul E. McKenney 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
1853fc3d170SPaul E. McKenney 		/*
1863fc3d170SPaul E. McKenney 		 * We could be printing a lot while holding a spinlock.
1873fc3d170SPaul E. McKenney 		 * Avoid triggering hard lockup.
1883fc3d170SPaul E. McKenney 		 */
1893fc3d170SPaul E. McKenney 		touch_nmi_watchdog();
1903fc3d170SPaul E. McKenney 		sched_show_task(t);
1913fc3d170SPaul E. McKenney 	}
1923fc3d170SPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1933fc3d170SPaul E. McKenney }
1943fc3d170SPaul E. McKenney 
1953fc3d170SPaul E. McKenney /*
1963fc3d170SPaul E. McKenney  * Scan the current list of tasks blocked within RCU read-side critical
1973fc3d170SPaul E. McKenney  * sections, printing out the tid of each.
1983fc3d170SPaul E. McKenney  */
1993fc3d170SPaul E. McKenney static int rcu_print_task_stall(struct rcu_node *rnp)
2003fc3d170SPaul E. McKenney {
2013fc3d170SPaul E. McKenney 	struct task_struct *t;
2023fc3d170SPaul E. McKenney 	int ndetected = 0;
2033fc3d170SPaul E. McKenney 
2043fc3d170SPaul E. McKenney 	if (!rcu_preempt_blocked_readers_cgp(rnp))
2053fc3d170SPaul E. McKenney 		return 0;
20621d0d79aSPaul E. McKenney 	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
20721d0d79aSPaul E. McKenney 	       rnp->level, rnp->grplo, rnp->grphi);
2083fc3d170SPaul E. McKenney 	t = list_entry(rnp->gp_tasks->prev,
2093fc3d170SPaul E. McKenney 		       struct task_struct, rcu_node_entry);
2103fc3d170SPaul E. McKenney 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
2113fc3d170SPaul E. McKenney 		pr_cont(" P%d", t->pid);
2123fc3d170SPaul E. McKenney 		ndetected++;
2133fc3d170SPaul E. McKenney 	}
21421d0d79aSPaul E. McKenney 	pr_cont("\n");
2153fc3d170SPaul E. McKenney 	return ndetected;
2163fc3d170SPaul E. McKenney }
2173fc3d170SPaul E. McKenney 
2183fc3d170SPaul E. McKenney #else /* #ifdef CONFIG_PREEMPT */
2193fc3d170SPaul E. McKenney 
2203fc3d170SPaul E. McKenney /*
2213fc3d170SPaul E. McKenney  * Because preemptible RCU does not exist, we never have to check for
2223fc3d170SPaul E. McKenney  * tasks blocked within RCU read-side critical sections.
2233fc3d170SPaul E. McKenney  */
22421d0d79aSPaul E. McKenney static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
2253fc3d170SPaul E. McKenney {
2263fc3d170SPaul E. McKenney }
2273fc3d170SPaul E. McKenney 
2283fc3d170SPaul E. McKenney /*
2293fc3d170SPaul E. McKenney  * Because preemptible RCU does not exist, we never have to check for
2303fc3d170SPaul E. McKenney  * tasks blocked within RCU read-side critical sections.
2313fc3d170SPaul E. McKenney  */
2323fc3d170SPaul E. McKenney static int rcu_print_task_stall(struct rcu_node *rnp)
2333fc3d170SPaul E. McKenney {
2343fc3d170SPaul E. McKenney 	return 0;
2353fc3d170SPaul E. McKenney }
2363fc3d170SPaul E. McKenney #endif /* #else #ifdef CONFIG_PREEMPT */
23732255d51SPaul E. McKenney 
23832255d51SPaul E. McKenney /*
23932255d51SPaul E. McKenney  * Dump stacks of all tasks running on stalled CPUs.  First try using
24032255d51SPaul E. McKenney  * NMIs, but fall back to manual remote stack tracing on architectures
24132255d51SPaul E. McKenney  * that don't support NMI-based stack dumps.  The NMI-triggered stack
24232255d51SPaul E. McKenney  * traces are more accurate because they are printed by the target CPU.
24332255d51SPaul E. McKenney  */
24432255d51SPaul E. McKenney static void rcu_dump_cpu_stacks(void)
24532255d51SPaul E. McKenney {
24632255d51SPaul E. McKenney 	int cpu;
24732255d51SPaul E. McKenney 	unsigned long flags;
24832255d51SPaul E. McKenney 	struct rcu_node *rnp;
24932255d51SPaul E. McKenney 
25032255d51SPaul E. McKenney 	rcu_for_each_leaf_node(rnp) {
25132255d51SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
25232255d51SPaul E. McKenney 		for_each_leaf_node_possible_cpu(rnp, cpu)
25332255d51SPaul E. McKenney 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
25432255d51SPaul E. McKenney 				if (!trigger_single_cpu_backtrace(cpu))
25532255d51SPaul E. McKenney 					dump_cpu_task(cpu);
25632255d51SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
25732255d51SPaul E. McKenney 	}
25832255d51SPaul E. McKenney }
25932255d51SPaul E. McKenney 
26059b73a27SPaul E. McKenney #ifdef CONFIG_RCU_FAST_NO_HZ
26159b73a27SPaul E. McKenney 
26259b73a27SPaul E. McKenney static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
26359b73a27SPaul E. McKenney {
26459b73a27SPaul E. McKenney 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
26559b73a27SPaul E. McKenney 
26659b73a27SPaul E. McKenney 	sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
26759b73a27SPaul E. McKenney 		rdp->last_accelerate & 0xffff, jiffies & 0xffff,
26859b73a27SPaul E. McKenney 		".l"[rdp->all_lazy],
26959b73a27SPaul E. McKenney 		".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
27059b73a27SPaul E. McKenney 		".D"[!rdp->tick_nohz_enabled_snap]);
27159b73a27SPaul E. McKenney }
27259b73a27SPaul E. McKenney 
27359b73a27SPaul E. McKenney #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
27459b73a27SPaul E. McKenney 
27559b73a27SPaul E. McKenney static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
27659b73a27SPaul E. McKenney {
27759b73a27SPaul E. McKenney 	*cp = '\0';
27859b73a27SPaul E. McKenney }
27959b73a27SPaul E. McKenney 
28059b73a27SPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
28159b73a27SPaul E. McKenney 
28259b73a27SPaul E. McKenney /*
28359b73a27SPaul E. McKenney  * Print out diagnostic information for the specified stalled CPU.
28459b73a27SPaul E. McKenney  *
28559b73a27SPaul E. McKenney  * If the specified CPU is aware of the current RCU grace period, then
28659b73a27SPaul E. McKenney  * print the number of scheduling clock interrupts the CPU has taken
28759b73a27SPaul E. McKenney  * during the time that it has been aware.  Otherwise, print the number
28859b73a27SPaul E. McKenney  * of RCU grace periods that this CPU is ignorant of, for example, "1"
28959b73a27SPaul E. McKenney  * if the CPU was aware of the previous grace period.
29059b73a27SPaul E. McKenney  *
29159b73a27SPaul E. McKenney  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
29259b73a27SPaul E. McKenney  */
29359b73a27SPaul E. McKenney static void print_cpu_stall_info(int cpu)
29459b73a27SPaul E. McKenney {
29559b73a27SPaul E. McKenney 	unsigned long delta;
29659b73a27SPaul E. McKenney 	char fast_no_hz[72];
29759b73a27SPaul E. McKenney 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
29859b73a27SPaul E. McKenney 	char *ticks_title;
29959b73a27SPaul E. McKenney 	unsigned long ticks_value;
30059b73a27SPaul E. McKenney 
30159b73a27SPaul E. McKenney 	/*
30259b73a27SPaul E. McKenney 	 * We could be printing a lot while holding a spinlock.  Avoid
30359b73a27SPaul E. McKenney 	 * triggering hard lockup.
30459b73a27SPaul E. McKenney 	 */
30559b73a27SPaul E. McKenney 	touch_nmi_watchdog();
30659b73a27SPaul E. McKenney 
30759b73a27SPaul E. McKenney 	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
30859b73a27SPaul E. McKenney 	if (ticks_value) {
30959b73a27SPaul E. McKenney 		ticks_title = "GPs behind";
31059b73a27SPaul E. McKenney 	} else {
31159b73a27SPaul E. McKenney 		ticks_title = "ticks this GP";
31259b73a27SPaul E. McKenney 		ticks_value = rdp->ticks_this_gp;
31359b73a27SPaul E. McKenney 	}
31459b73a27SPaul E. McKenney 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
31559b73a27SPaul E. McKenney 	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
31659b73a27SPaul E. McKenney 	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
31759b73a27SPaul E. McKenney 	       cpu,
31859b73a27SPaul E. McKenney 	       "O."[!!cpu_online(cpu)],
31959b73a27SPaul E. McKenney 	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
32059b73a27SPaul E. McKenney 	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
32159b73a27SPaul E. McKenney 	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
32259b73a27SPaul E. McKenney 			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
32359b73a27SPaul E. McKenney 				"!."[!delta],
32459b73a27SPaul E. McKenney 	       ticks_value, ticks_title,
32559b73a27SPaul E. McKenney 	       rcu_dynticks_snap(rdp) & 0xfff,
32659b73a27SPaul E. McKenney 	       rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
32759b73a27SPaul E. McKenney 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
32859b73a27SPaul E. McKenney 	       READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
32959b73a27SPaul E. McKenney 	       fast_no_hz);
33059b73a27SPaul E. McKenney }
33159b73a27SPaul E. McKenney 
332e23344c2SPaul E. McKenney /* Complain about starvation of grace-period kthread.  */
333e23344c2SPaul E. McKenney static void rcu_check_gp_kthread_starvation(void)
33459b73a27SPaul E. McKenney {
335e23344c2SPaul E. McKenney 	struct task_struct *gpk = rcu_state.gp_kthread;
336e23344c2SPaul E. McKenney 	unsigned long j;
337e23344c2SPaul E. McKenney 
338e23344c2SPaul E. McKenney 	j = jiffies - READ_ONCE(rcu_state.gp_activity);
339e23344c2SPaul E. McKenney 	if (j > 2 * HZ) {
340e23344c2SPaul E. McKenney 		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
341e23344c2SPaul E. McKenney 		       rcu_state.name, j,
342e23344c2SPaul E. McKenney 		       (long)rcu_seq_current(&rcu_state.gp_seq),
343e23344c2SPaul E. McKenney 		       READ_ONCE(rcu_state.gp_flags),
344e23344c2SPaul E. McKenney 		       gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
345e23344c2SPaul E. McKenney 		       gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
346e23344c2SPaul E. McKenney 		if (gpk) {
347e23344c2SPaul E. McKenney 			pr_err("RCU grace-period kthread stack dump:\n");
348e23344c2SPaul E. McKenney 			sched_show_task(gpk);
349e23344c2SPaul E. McKenney 			wake_up_process(gpk);
350e23344c2SPaul E. McKenney 		}
351e23344c2SPaul E. McKenney 	}
35259b73a27SPaul E. McKenney }
35359b73a27SPaul E. McKenney 
35432255d51SPaul E. McKenney static void print_other_cpu_stall(unsigned long gp_seq)
35532255d51SPaul E. McKenney {
35632255d51SPaul E. McKenney 	int cpu;
35732255d51SPaul E. McKenney 	unsigned long flags;
35832255d51SPaul E. McKenney 	unsigned long gpa;
35932255d51SPaul E. McKenney 	unsigned long j;
36032255d51SPaul E. McKenney 	int ndetected = 0;
36121d0d79aSPaul E. McKenney 	struct rcu_node *rnp;
36232255d51SPaul E. McKenney 	long totqlen = 0;
36332255d51SPaul E. McKenney 
36432255d51SPaul E. McKenney 	/* Kick and suppress, if so configured. */
36532255d51SPaul E. McKenney 	rcu_stall_kick_kthreads();
36632255d51SPaul E. McKenney 	if (rcu_cpu_stall_suppress)
36732255d51SPaul E. McKenney 		return;
36832255d51SPaul E. McKenney 
36932255d51SPaul E. McKenney 	/*
37032255d51SPaul E. McKenney 	 * OK, time to rat on our buddy...
37132255d51SPaul E. McKenney 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
37232255d51SPaul E. McKenney 	 * RCU CPU stall warnings.
37332255d51SPaul E. McKenney 	 */
37440e69ac7SPaul E. McKenney 	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
37532255d51SPaul E. McKenney 	rcu_for_each_leaf_node(rnp) {
37632255d51SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
37732255d51SPaul E. McKenney 		ndetected += rcu_print_task_stall(rnp);
37832255d51SPaul E. McKenney 		if (rnp->qsmask != 0) {
37932255d51SPaul E. McKenney 			for_each_leaf_node_possible_cpu(rnp, cpu)
38032255d51SPaul E. McKenney 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
38132255d51SPaul E. McKenney 					print_cpu_stall_info(cpu);
38232255d51SPaul E. McKenney 					ndetected++;
38332255d51SPaul E. McKenney 				}
38432255d51SPaul E. McKenney 		}
38532255d51SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
38632255d51SPaul E. McKenney 	}
38732255d51SPaul E. McKenney 
38832255d51SPaul E. McKenney 	for_each_possible_cpu(cpu)
38932255d51SPaul E. McKenney 		totqlen += rcu_get_n_cbs_cpu(cpu);
39040e69ac7SPaul E. McKenney 	pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
39132255d51SPaul E. McKenney 	       smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
39232255d51SPaul E. McKenney 	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
39332255d51SPaul E. McKenney 	if (ndetected) {
39432255d51SPaul E. McKenney 		rcu_dump_cpu_stacks();
39532255d51SPaul E. McKenney 
39632255d51SPaul E. McKenney 		/* Complain about tasks blocking the grace period. */
39721d0d79aSPaul E. McKenney 		rcu_for_each_leaf_node(rnp)
39821d0d79aSPaul E. McKenney 			rcu_print_detail_task_stall_rnp(rnp);
39932255d51SPaul E. McKenney 	} else {
40032255d51SPaul E. McKenney 		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
40132255d51SPaul E. McKenney 			pr_err("INFO: Stall ended before state dump start\n");
40232255d51SPaul E. McKenney 		} else {
40332255d51SPaul E. McKenney 			j = jiffies;
40432255d51SPaul E. McKenney 			gpa = READ_ONCE(rcu_state.gp_activity);
40532255d51SPaul E. McKenney 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
40632255d51SPaul E. McKenney 			       rcu_state.name, j - gpa, j, gpa,
40732255d51SPaul E. McKenney 			       READ_ONCE(jiffies_till_next_fqs),
40832255d51SPaul E. McKenney 			       rcu_get_root()->qsmask);
40932255d51SPaul E. McKenney 			/* In this case, the current CPU might be at fault. */
41032255d51SPaul E. McKenney 			sched_show_task(current);
41132255d51SPaul E. McKenney 		}
41232255d51SPaul E. McKenney 	}
41332255d51SPaul E. McKenney 	/* Rewrite if needed in case of slow consoles. */
41432255d51SPaul E. McKenney 	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
41532255d51SPaul E. McKenney 		WRITE_ONCE(rcu_state.jiffies_stall,
41632255d51SPaul E. McKenney 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
41732255d51SPaul E. McKenney 
41832255d51SPaul E. McKenney 	rcu_check_gp_kthread_starvation();
41932255d51SPaul E. McKenney 
42032255d51SPaul E. McKenney 	panic_on_rcu_stall();
42132255d51SPaul E. McKenney 
42232255d51SPaul E. McKenney 	rcu_force_quiescent_state();  /* Kick them all. */
42332255d51SPaul E. McKenney }
42432255d51SPaul E. McKenney 
42532255d51SPaul E. McKenney static void print_cpu_stall(void)
42632255d51SPaul E. McKenney {
42732255d51SPaul E. McKenney 	int cpu;
42832255d51SPaul E. McKenney 	unsigned long flags;
42932255d51SPaul E. McKenney 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
43032255d51SPaul E. McKenney 	struct rcu_node *rnp = rcu_get_root();
43132255d51SPaul E. McKenney 	long totqlen = 0;
43232255d51SPaul E. McKenney 
43332255d51SPaul E. McKenney 	/* Kick and suppress, if so configured. */
43432255d51SPaul E. McKenney 	rcu_stall_kick_kthreads();
43532255d51SPaul E. McKenney 	if (rcu_cpu_stall_suppress)
43632255d51SPaul E. McKenney 		return;
43732255d51SPaul E. McKenney 
43832255d51SPaul E. McKenney 	/*
43932255d51SPaul E. McKenney 	 * OK, time to rat on ourselves...
44032255d51SPaul E. McKenney 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
44132255d51SPaul E. McKenney 	 * RCU CPU stall warnings.
44232255d51SPaul E. McKenney 	 */
44340e69ac7SPaul E. McKenney 	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
44432255d51SPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
44532255d51SPaul E. McKenney 	print_cpu_stall_info(smp_processor_id());
44632255d51SPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
44732255d51SPaul E. McKenney 	for_each_possible_cpu(cpu)
44832255d51SPaul E. McKenney 		totqlen += rcu_get_n_cbs_cpu(cpu);
44940e69ac7SPaul E. McKenney 	pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
45032255d51SPaul E. McKenney 		jiffies - rcu_state.gp_start,
45132255d51SPaul E. McKenney 		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
45232255d51SPaul E. McKenney 
45332255d51SPaul E. McKenney 	rcu_check_gp_kthread_starvation();
45432255d51SPaul E. McKenney 
45532255d51SPaul E. McKenney 	rcu_dump_cpu_stacks();
45632255d51SPaul E. McKenney 
45732255d51SPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
45832255d51SPaul E. McKenney 	/* Rewrite if needed in case of slow consoles. */
45932255d51SPaul E. McKenney 	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
46032255d51SPaul E. McKenney 		WRITE_ONCE(rcu_state.jiffies_stall,
46132255d51SPaul E. McKenney 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
46232255d51SPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
46332255d51SPaul E. McKenney 
46432255d51SPaul E. McKenney 	panic_on_rcu_stall();
46532255d51SPaul E. McKenney 
46632255d51SPaul E. McKenney 	/*
46732255d51SPaul E. McKenney 	 * Attempt to revive the RCU machinery by forcing a context switch.
46832255d51SPaul E. McKenney 	 *
46932255d51SPaul E. McKenney 	 * A context switch would normally allow the RCU state machine to make
47032255d51SPaul E. McKenney 	 * progress and it could be we're stuck in kernel space without context
47132255d51SPaul E. McKenney 	 * switches for an entirely unreasonable amount of time.
47232255d51SPaul E. McKenney 	 */
47332255d51SPaul E. McKenney 	set_tsk_need_resched(current);
47432255d51SPaul E. McKenney 	set_preempt_need_resched();
47532255d51SPaul E. McKenney }
47632255d51SPaul E. McKenney 
47732255d51SPaul E. McKenney static void check_cpu_stall(struct rcu_data *rdp)
47832255d51SPaul E. McKenney {
47932255d51SPaul E. McKenney 	unsigned long gs1;
48032255d51SPaul E. McKenney 	unsigned long gs2;
48132255d51SPaul E. McKenney 	unsigned long gps;
48232255d51SPaul E. McKenney 	unsigned long j;
48332255d51SPaul E. McKenney 	unsigned long jn;
48432255d51SPaul E. McKenney 	unsigned long js;
48532255d51SPaul E. McKenney 	struct rcu_node *rnp;
48632255d51SPaul E. McKenney 
48732255d51SPaul E. McKenney 	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
48832255d51SPaul E. McKenney 	    !rcu_gp_in_progress())
48932255d51SPaul E. McKenney 		return;
49032255d51SPaul E. McKenney 	rcu_stall_kick_kthreads();
49132255d51SPaul E. McKenney 	j = jiffies;
49232255d51SPaul E. McKenney 
49332255d51SPaul E. McKenney 	/*
49432255d51SPaul E. McKenney 	 * Lots of memory barriers to reject false positives.
49532255d51SPaul E. McKenney 	 *
49632255d51SPaul E. McKenney 	 * The idea is to pick up rcu_state.gp_seq, then
49732255d51SPaul E. McKenney 	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
49832255d51SPaul E. McKenney 	 * another copy of rcu_state.gp_seq.  These values are updated in
49932255d51SPaul E. McKenney 	 * the opposite order with memory barriers (or equivalent) during
50032255d51SPaul E. McKenney 	 * grace-period initialization and cleanup.  Now, a false positive
50132255d51SPaul E. McKenney 	 * can occur if we get an new value of rcu_state.gp_start and a old
50232255d51SPaul E. McKenney 	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
50332255d51SPaul E. McKenney 	 * the only way that this can happen is if one grace period ends
50432255d51SPaul E. McKenney 	 * and another starts between these two fetches.  This is detected
50532255d51SPaul E. McKenney 	 * by comparing the second fetch of rcu_state.gp_seq with the
50632255d51SPaul E. McKenney 	 * previous fetch from rcu_state.gp_seq.
50732255d51SPaul E. McKenney 	 *
50832255d51SPaul E. McKenney 	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
50932255d51SPaul E. McKenney 	 * and rcu_state.gp_start suffice to forestall false positives.
51032255d51SPaul E. McKenney 	 */
51132255d51SPaul E. McKenney 	gs1 = READ_ONCE(rcu_state.gp_seq);
51232255d51SPaul E. McKenney 	smp_rmb(); /* Pick up ->gp_seq first... */
51332255d51SPaul E. McKenney 	js = READ_ONCE(rcu_state.jiffies_stall);
51432255d51SPaul E. McKenney 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
51532255d51SPaul E. McKenney 	gps = READ_ONCE(rcu_state.gp_start);
51632255d51SPaul E. McKenney 	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
51732255d51SPaul E. McKenney 	gs2 = READ_ONCE(rcu_state.gp_seq);
51832255d51SPaul E. McKenney 	if (gs1 != gs2 ||
51932255d51SPaul E. McKenney 	    ULONG_CMP_LT(j, js) ||
52032255d51SPaul E. McKenney 	    ULONG_CMP_GE(gps, js))
52132255d51SPaul E. McKenney 		return; /* No stall or GP completed since entering function. */
52232255d51SPaul E. McKenney 	rnp = rdp->mynode;
52332255d51SPaul E. McKenney 	jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
52432255d51SPaul E. McKenney 	if (rcu_gp_in_progress() &&
52532255d51SPaul E. McKenney 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
52632255d51SPaul E. McKenney 	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
52732255d51SPaul E. McKenney 
52832255d51SPaul E. McKenney 		/* We haven't checked in, so go dump stack. */
52932255d51SPaul E. McKenney 		print_cpu_stall();
53032255d51SPaul E. McKenney 
53132255d51SPaul E. McKenney 	} else if (rcu_gp_in_progress() &&
53232255d51SPaul E. McKenney 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
53332255d51SPaul E. McKenney 		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
53432255d51SPaul E. McKenney 
53532255d51SPaul E. McKenney 		/* They had a few time units to dump stack, so complain. */
53632255d51SPaul E. McKenney 		print_other_cpu_stall(gs2);
53732255d51SPaul E. McKenney 	}
53832255d51SPaul E. McKenney }
539*b51bcbbfSPaul E. McKenney 
540*b51bcbbfSPaul E. McKenney //////////////////////////////////////////////////////////////////////////////
541*b51bcbbfSPaul E. McKenney //
542*b51bcbbfSPaul E. McKenney // RCU forward-progress mechanisms, including of callback invocation.
543*b51bcbbfSPaul E. McKenney 
544*b51bcbbfSPaul E. McKenney 
545*b51bcbbfSPaul E. McKenney /*
546*b51bcbbfSPaul E. McKenney  * Show the state of the grace-period kthreads.
547*b51bcbbfSPaul E. McKenney  */
548*b51bcbbfSPaul E. McKenney void show_rcu_gp_kthreads(void)
549*b51bcbbfSPaul E. McKenney {
550*b51bcbbfSPaul E. McKenney 	int cpu;
551*b51bcbbfSPaul E. McKenney 	unsigned long j;
552*b51bcbbfSPaul E. McKenney 	unsigned long ja;
553*b51bcbbfSPaul E. McKenney 	unsigned long jr;
554*b51bcbbfSPaul E. McKenney 	unsigned long jw;
555*b51bcbbfSPaul E. McKenney 	struct rcu_data *rdp;
556*b51bcbbfSPaul E. McKenney 	struct rcu_node *rnp;
557*b51bcbbfSPaul E. McKenney 
558*b51bcbbfSPaul E. McKenney 	j = jiffies;
559*b51bcbbfSPaul E. McKenney 	ja = j - READ_ONCE(rcu_state.gp_activity);
560*b51bcbbfSPaul E. McKenney 	jr = j - READ_ONCE(rcu_state.gp_req_activity);
561*b51bcbbfSPaul E. McKenney 	jw = j - READ_ONCE(rcu_state.gp_wake_time);
562*b51bcbbfSPaul E. McKenney 	pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
563*b51bcbbfSPaul E. McKenney 		rcu_state.name, gp_state_getname(rcu_state.gp_state),
564*b51bcbbfSPaul E. McKenney 		rcu_state.gp_state,
565*b51bcbbfSPaul E. McKenney 		rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
566*b51bcbbfSPaul E. McKenney 		ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
567*b51bcbbfSPaul E. McKenney 		(long)READ_ONCE(rcu_state.gp_seq),
568*b51bcbbfSPaul E. McKenney 		(long)READ_ONCE(rcu_get_root()->gp_seq_needed),
569*b51bcbbfSPaul E. McKenney 		READ_ONCE(rcu_state.gp_flags));
570*b51bcbbfSPaul E. McKenney 	rcu_for_each_node_breadth_first(rnp) {
571*b51bcbbfSPaul E. McKenney 		if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
572*b51bcbbfSPaul E. McKenney 			continue;
573*b51bcbbfSPaul E. McKenney 		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
574*b51bcbbfSPaul E. McKenney 			rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
575*b51bcbbfSPaul E. McKenney 			(long)rnp->gp_seq_needed);
576*b51bcbbfSPaul E. McKenney 		if (!rcu_is_leaf_node(rnp))
577*b51bcbbfSPaul E. McKenney 			continue;
578*b51bcbbfSPaul E. McKenney 		for_each_leaf_node_possible_cpu(rnp, cpu) {
579*b51bcbbfSPaul E. McKenney 			rdp = per_cpu_ptr(&rcu_data, cpu);
580*b51bcbbfSPaul E. McKenney 			if (rdp->gpwrap ||
581*b51bcbbfSPaul E. McKenney 			    ULONG_CMP_GE(rcu_state.gp_seq,
582*b51bcbbfSPaul E. McKenney 					 rdp->gp_seq_needed))
583*b51bcbbfSPaul E. McKenney 				continue;
584*b51bcbbfSPaul E. McKenney 			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
585*b51bcbbfSPaul E. McKenney 				cpu, (long)rdp->gp_seq_needed);
586*b51bcbbfSPaul E. McKenney 		}
587*b51bcbbfSPaul E. McKenney 	}
588*b51bcbbfSPaul E. McKenney 	/* sched_show_task(rcu_state.gp_kthread); */
589*b51bcbbfSPaul E. McKenney }
590*b51bcbbfSPaul E. McKenney EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
591*b51bcbbfSPaul E. McKenney 
592*b51bcbbfSPaul E. McKenney /*
593*b51bcbbfSPaul E. McKenney  * This function checks for grace-period requests that fail to motivate
594*b51bcbbfSPaul E. McKenney  * RCU to come out of its idle mode.
595*b51bcbbfSPaul E. McKenney  */
596*b51bcbbfSPaul E. McKenney static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
597*b51bcbbfSPaul E. McKenney 				     const unsigned long gpssdelay)
598*b51bcbbfSPaul E. McKenney {
599*b51bcbbfSPaul E. McKenney 	unsigned long flags;
600*b51bcbbfSPaul E. McKenney 	unsigned long j;
601*b51bcbbfSPaul E. McKenney 	struct rcu_node *rnp_root = rcu_get_root();
602*b51bcbbfSPaul E. McKenney 	static atomic_t warned = ATOMIC_INIT(0);
603*b51bcbbfSPaul E. McKenney 
604*b51bcbbfSPaul E. McKenney 	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
605*b51bcbbfSPaul E. McKenney 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
606*b51bcbbfSPaul E. McKenney 		return;
607*b51bcbbfSPaul E. McKenney 	j = jiffies; /* Expensive access, and in common case don't get here. */
608*b51bcbbfSPaul E. McKenney 	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
609*b51bcbbfSPaul E. McKenney 	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
610*b51bcbbfSPaul E. McKenney 	    atomic_read(&warned))
611*b51bcbbfSPaul E. McKenney 		return;
612*b51bcbbfSPaul E. McKenney 
613*b51bcbbfSPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
614*b51bcbbfSPaul E. McKenney 	j = jiffies;
615*b51bcbbfSPaul E. McKenney 	if (rcu_gp_in_progress() ||
616*b51bcbbfSPaul E. McKenney 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
617*b51bcbbfSPaul E. McKenney 	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
618*b51bcbbfSPaul E. McKenney 	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
619*b51bcbbfSPaul E. McKenney 	    atomic_read(&warned)) {
620*b51bcbbfSPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
621*b51bcbbfSPaul E. McKenney 		return;
622*b51bcbbfSPaul E. McKenney 	}
623*b51bcbbfSPaul E. McKenney 	/* Hold onto the leaf lock to make others see warned==1. */
624*b51bcbbfSPaul E. McKenney 
625*b51bcbbfSPaul E. McKenney 	if (rnp_root != rnp)
626*b51bcbbfSPaul E. McKenney 		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
627*b51bcbbfSPaul E. McKenney 	j = jiffies;
628*b51bcbbfSPaul E. McKenney 	if (rcu_gp_in_progress() ||
629*b51bcbbfSPaul E. McKenney 	    ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
630*b51bcbbfSPaul E. McKenney 	    time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
631*b51bcbbfSPaul E. McKenney 	    time_before(j, rcu_state.gp_activity + gpssdelay) ||
632*b51bcbbfSPaul E. McKenney 	    atomic_xchg(&warned, 1)) {
633*b51bcbbfSPaul E. McKenney 		raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
634*b51bcbbfSPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
635*b51bcbbfSPaul E. McKenney 		return;
636*b51bcbbfSPaul E. McKenney 	}
637*b51bcbbfSPaul E. McKenney 	WARN_ON(1);
638*b51bcbbfSPaul E. McKenney 	if (rnp_root != rnp)
639*b51bcbbfSPaul E. McKenney 		raw_spin_unlock_rcu_node(rnp_root);
640*b51bcbbfSPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
641*b51bcbbfSPaul E. McKenney 	show_rcu_gp_kthreads();
642*b51bcbbfSPaul E. McKenney }
643*b51bcbbfSPaul E. McKenney 
644*b51bcbbfSPaul E. McKenney /*
645*b51bcbbfSPaul E. McKenney  * Do a forward-progress check for rcutorture.  This is normally invoked
646*b51bcbbfSPaul E. McKenney  * due to an OOM event.  The argument "j" gives the time period during
647*b51bcbbfSPaul E. McKenney  * which rcutorture would like progress to have been made.
648*b51bcbbfSPaul E. McKenney  */
649*b51bcbbfSPaul E. McKenney void rcu_fwd_progress_check(unsigned long j)
650*b51bcbbfSPaul E. McKenney {
651*b51bcbbfSPaul E. McKenney 	unsigned long cbs;
652*b51bcbbfSPaul E. McKenney 	int cpu;
653*b51bcbbfSPaul E. McKenney 	unsigned long max_cbs = 0;
654*b51bcbbfSPaul E. McKenney 	int max_cpu = -1;
655*b51bcbbfSPaul E. McKenney 	struct rcu_data *rdp;
656*b51bcbbfSPaul E. McKenney 
657*b51bcbbfSPaul E. McKenney 	if (rcu_gp_in_progress()) {
658*b51bcbbfSPaul E. McKenney 		pr_info("%s: GP age %lu jiffies\n",
659*b51bcbbfSPaul E. McKenney 			__func__, jiffies - rcu_state.gp_start);
660*b51bcbbfSPaul E. McKenney 		show_rcu_gp_kthreads();
661*b51bcbbfSPaul E. McKenney 	} else {
662*b51bcbbfSPaul E. McKenney 		pr_info("%s: Last GP end %lu jiffies ago\n",
663*b51bcbbfSPaul E. McKenney 			__func__, jiffies - rcu_state.gp_end);
664*b51bcbbfSPaul E. McKenney 		preempt_disable();
665*b51bcbbfSPaul E. McKenney 		rdp = this_cpu_ptr(&rcu_data);
666*b51bcbbfSPaul E. McKenney 		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
667*b51bcbbfSPaul E. McKenney 		preempt_enable();
668*b51bcbbfSPaul E. McKenney 	}
669*b51bcbbfSPaul E. McKenney 	for_each_possible_cpu(cpu) {
670*b51bcbbfSPaul E. McKenney 		cbs = rcu_get_n_cbs_cpu(cpu);
671*b51bcbbfSPaul E. McKenney 		if (!cbs)
672*b51bcbbfSPaul E. McKenney 			continue;
673*b51bcbbfSPaul E. McKenney 		if (max_cpu < 0)
674*b51bcbbfSPaul E. McKenney 			pr_info("%s: callbacks", __func__);
675*b51bcbbfSPaul E. McKenney 		pr_cont(" %d: %lu", cpu, cbs);
676*b51bcbbfSPaul E. McKenney 		if (cbs <= max_cbs)
677*b51bcbbfSPaul E. McKenney 			continue;
678*b51bcbbfSPaul E. McKenney 		max_cbs = cbs;
679*b51bcbbfSPaul E. McKenney 		max_cpu = cpu;
680*b51bcbbfSPaul E. McKenney 	}
681*b51bcbbfSPaul E. McKenney 	if (max_cpu >= 0)
682*b51bcbbfSPaul E. McKenney 		pr_cont("\n");
683*b51bcbbfSPaul E. McKenney }
684*b51bcbbfSPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
685*b51bcbbfSPaul E. McKenney 
686*b51bcbbfSPaul E. McKenney /* Commandeer a sysrq key to dump RCU's tree. */
687*b51bcbbfSPaul E. McKenney static bool sysrq_rcu;
688*b51bcbbfSPaul E. McKenney module_param(sysrq_rcu, bool, 0444);
689*b51bcbbfSPaul E. McKenney 
690*b51bcbbfSPaul E. McKenney /* Dump grace-period-request information due to commandeered sysrq. */
691*b51bcbbfSPaul E. McKenney static void sysrq_show_rcu(int key)
692*b51bcbbfSPaul E. McKenney {
693*b51bcbbfSPaul E. McKenney 	show_rcu_gp_kthreads();
694*b51bcbbfSPaul E. McKenney }
695*b51bcbbfSPaul E. McKenney 
696*b51bcbbfSPaul E. McKenney static struct sysrq_key_op sysrq_rcudump_op = {
697*b51bcbbfSPaul E. McKenney 	.handler = sysrq_show_rcu,
698*b51bcbbfSPaul E. McKenney 	.help_msg = "show-rcu(y)",
699*b51bcbbfSPaul E. McKenney 	.action_msg = "Show RCU tree",
700*b51bcbbfSPaul E. McKenney 	.enable_mask = SYSRQ_ENABLE_DUMP,
701*b51bcbbfSPaul E. McKenney };
702*b51bcbbfSPaul E. McKenney 
703*b51bcbbfSPaul E. McKenney static int __init rcu_sysrq_init(void)
704*b51bcbbfSPaul E. McKenney {
705*b51bcbbfSPaul E. McKenney 	if (sysrq_rcu)
706*b51bcbbfSPaul E. McKenney 		return register_sysrq_key('y', &sysrq_rcudump_op);
707*b51bcbbfSPaul E. McKenney 	return 0;
708*b51bcbbfSPaul E. McKenney }
709*b51bcbbfSPaul E. McKenney early_initcall(rcu_sysrq_init);
710