1 /*
2  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21  *
22  * For detailed explanation of Read-Copy Update mechanism see -
23  *		Documentation/RCU
24  */
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/notifier.h>
28 #include <linux/rcupdate.h>
29 #include <linux/kernel.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/init.h>
35 #include <linux/time.h>
36 #include <linux/cpu.h>
37 #include <linux/prefetch.h>
38 
39 #ifdef CONFIG_RCU_TRACE
40 #include <trace/events/rcu.h>
41 #endif /* #else #ifdef CONFIG_RCU_TRACE */
42 
43 #include "rcu.h"
44 
45 /* Forward declarations for rcutiny_plugin.h. */
46 struct rcu_ctrlblk;
47 static void invoke_rcu_callbacks(void);
48 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49 static void rcu_process_callbacks(struct softirq_action *unused);
50 static void __call_rcu(struct rcu_head *head,
51 		       void (*func)(struct rcu_head *rcu),
52 		       struct rcu_ctrlblk *rcp);
53 
54 #include "rcutiny_plugin.h"
55 
56 static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
57 
58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
rcu_idle_enter_common(long long oldval)59 static void rcu_idle_enter_common(long long oldval)
60 {
61 	if (rcu_dynticks_nesting) {
62 		RCU_TRACE(trace_rcu_dyntick("--=",
63 					    oldval, rcu_dynticks_nesting));
64 		return;
65 	}
66 	RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
67 	if (!is_idle_task(current)) {
68 		struct task_struct *idle = idle_task(smp_processor_id());
69 
70 		RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 					    oldval, rcu_dynticks_nesting));
72 		ftrace_dump(DUMP_ALL);
73 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 			  current->pid, current->comm,
75 			  idle->pid, idle->comm); /* must be idle task! */
76 	}
77 	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
78 }
79 
80 /*
81  * Enter idle, which is an extended quiescent state if we have fully
82  * entered that mode (i.e., if the new value of dynticks_nesting is zero).
83  */
rcu_idle_enter(void)84 void rcu_idle_enter(void)
85 {
86 	unsigned long flags;
87 	long long oldval;
88 
89 	local_irq_save(flags);
90 	oldval = rcu_dynticks_nesting;
91 	rcu_dynticks_nesting = 0;
92 	rcu_idle_enter_common(oldval);
93 	local_irq_restore(flags);
94 }
95 
96 /*
97  * Exit an interrupt handler towards idle.
98  */
rcu_irq_exit(void)99 void rcu_irq_exit(void)
100 {
101 	unsigned long flags;
102 	long long oldval;
103 
104 	local_irq_save(flags);
105 	oldval = rcu_dynticks_nesting;
106 	rcu_dynticks_nesting--;
107 	WARN_ON_ONCE(rcu_dynticks_nesting < 0);
108 	rcu_idle_enter_common(oldval);
109 	local_irq_restore(flags);
110 }
111 
112 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
rcu_idle_exit_common(long long oldval)113 static void rcu_idle_exit_common(long long oldval)
114 {
115 	if (oldval) {
116 		RCU_TRACE(trace_rcu_dyntick("++=",
117 					    oldval, rcu_dynticks_nesting));
118 		return;
119 	}
120 	RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
121 	if (!is_idle_task(current)) {
122 		struct task_struct *idle = idle_task(smp_processor_id());
123 
124 		RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
125 			  oldval, rcu_dynticks_nesting));
126 		ftrace_dump(DUMP_ALL);
127 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
128 			  current->pid, current->comm,
129 			  idle->pid, idle->comm); /* must be idle task! */
130 	}
131 }
132 
133 /*
134  * Exit idle, so that we are no longer in an extended quiescent state.
135  */
rcu_idle_exit(void)136 void rcu_idle_exit(void)
137 {
138 	unsigned long flags;
139 	long long oldval;
140 
141 	local_irq_save(flags);
142 	oldval = rcu_dynticks_nesting;
143 	WARN_ON_ONCE(oldval != 0);
144 	rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
145 	rcu_idle_exit_common(oldval);
146 	local_irq_restore(flags);
147 }
148 
149 /*
150  * Enter an interrupt handler, moving away from idle.
151  */
rcu_irq_enter(void)152 void rcu_irq_enter(void)
153 {
154 	unsigned long flags;
155 	long long oldval;
156 
157 	local_irq_save(flags);
158 	oldval = rcu_dynticks_nesting;
159 	rcu_dynticks_nesting++;
160 	WARN_ON_ONCE(rcu_dynticks_nesting == 0);
161 	rcu_idle_exit_common(oldval);
162 	local_irq_restore(flags);
163 }
164 
165 #ifdef CONFIG_PROVE_RCU
166 
167 /*
168  * Test whether RCU thinks that the current CPU is idle.
169  */
rcu_is_cpu_idle(void)170 int rcu_is_cpu_idle(void)
171 {
172 	return !rcu_dynticks_nesting;
173 }
174 EXPORT_SYMBOL(rcu_is_cpu_idle);
175 
176 #endif /* #ifdef CONFIG_PROVE_RCU */
177 
178 /*
179  * Test whether the current CPU was interrupted from idle.  Nested
180  * interrupts don't count, we must be running at the first interrupt
181  * level.
182  */
rcu_is_cpu_rrupt_from_idle(void)183 int rcu_is_cpu_rrupt_from_idle(void)
184 {
185 	return rcu_dynticks_nesting <= 0;
186 }
187 
188 /*
189  * Helper function for rcu_sched_qs() and rcu_bh_qs().
190  * Also irqs are disabled to avoid confusion due to interrupt handlers
191  * invoking call_rcu().
192  */
rcu_qsctr_help(struct rcu_ctrlblk * rcp)193 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
194 {
195 	if (rcp->rcucblist != NULL &&
196 	    rcp->donetail != rcp->curtail) {
197 		rcp->donetail = rcp->curtail;
198 		return 1;
199 	}
200 
201 	return 0;
202 }
203 
204 /*
205  * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
206  * are at it, given that any rcu quiescent state is also an rcu_bh
207  * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
208  */
rcu_sched_qs(int cpu)209 void rcu_sched_qs(int cpu)
210 {
211 	unsigned long flags;
212 
213 	local_irq_save(flags);
214 	if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
215 	    rcu_qsctr_help(&rcu_bh_ctrlblk))
216 		invoke_rcu_callbacks();
217 	local_irq_restore(flags);
218 }
219 
220 /*
221  * Record an rcu_bh quiescent state.
222  */
rcu_bh_qs(int cpu)223 void rcu_bh_qs(int cpu)
224 {
225 	unsigned long flags;
226 
227 	local_irq_save(flags);
228 	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
229 		invoke_rcu_callbacks();
230 	local_irq_restore(flags);
231 }
232 
233 /*
234  * Check to see if the scheduling-clock interrupt came from an extended
235  * quiescent state, and, if so, tell RCU about it.  This function must
236  * be called from hardirq context.  It is normally called from the
237  * scheduling-clock interrupt.
238  */
rcu_check_callbacks(int cpu,int user)239 void rcu_check_callbacks(int cpu, int user)
240 {
241 	if (user || rcu_is_cpu_rrupt_from_idle())
242 		rcu_sched_qs(cpu);
243 	else if (!in_softirq())
244 		rcu_bh_qs(cpu);
245 	rcu_preempt_check_callbacks();
246 }
247 
248 /*
249  * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
250  * whose grace period has elapsed.
251  */
__rcu_process_callbacks(struct rcu_ctrlblk * rcp)252 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
253 {
254 	char *rn = NULL;
255 	struct rcu_head *next, *list;
256 	unsigned long flags;
257 	RCU_TRACE(int cb_count = 0);
258 
259 	/* If no RCU callbacks ready to invoke, just return. */
260 	if (&rcp->rcucblist == rcp->donetail) {
261 		RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
262 		RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
263 					      ACCESS_ONCE(rcp->rcucblist),
264 					      need_resched(),
265 					      is_idle_task(current),
266 					      rcu_is_callbacks_kthread()));
267 		return;
268 	}
269 
270 	/* Move the ready-to-invoke callbacks to a local list. */
271 	local_irq_save(flags);
272 	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
273 	list = rcp->rcucblist;
274 	rcp->rcucblist = *rcp->donetail;
275 	*rcp->donetail = NULL;
276 	if (rcp->curtail == rcp->donetail)
277 		rcp->curtail = &rcp->rcucblist;
278 	rcu_preempt_remove_callbacks(rcp);
279 	rcp->donetail = &rcp->rcucblist;
280 	local_irq_restore(flags);
281 
282 	/* Invoke the callbacks on the local list. */
283 	RCU_TRACE(rn = rcp->name);
284 	while (list) {
285 		next = list->next;
286 		prefetch(next);
287 		debug_rcu_head_unqueue(list);
288 		local_bh_disable();
289 		__rcu_reclaim(rn, list);
290 		local_bh_enable();
291 		list = next;
292 		RCU_TRACE(cb_count++);
293 	}
294 	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
295 	RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
296 				      is_idle_task(current),
297 				      rcu_is_callbacks_kthread()));
298 }
299 
rcu_process_callbacks(struct softirq_action * unused)300 static void rcu_process_callbacks(struct softirq_action *unused)
301 {
302 	__rcu_process_callbacks(&rcu_sched_ctrlblk);
303 	__rcu_process_callbacks(&rcu_bh_ctrlblk);
304 	rcu_preempt_process_callbacks();
305 }
306 
307 /*
308  * Wait for a grace period to elapse.  But it is illegal to invoke
309  * synchronize_sched() from within an RCU read-side critical section.
310  * Therefore, any legal call to synchronize_sched() is a quiescent
311  * state, and so on a UP system, synchronize_sched() need do nothing.
312  * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
313  * benefits of doing might_sleep() to reduce latency.)
314  *
315  * Cool, huh?  (Due to Josh Triplett.)
316  *
317  * But we want to make this a static inline later.  The cond_resched()
318  * currently makes this problematic.
319  */
synchronize_sched(void)320 void synchronize_sched(void)
321 {
322 	cond_resched();
323 }
324 EXPORT_SYMBOL_GPL(synchronize_sched);
325 
326 /*
327  * Helper function for call_rcu() and call_rcu_bh().
328  */
__call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu),struct rcu_ctrlblk * rcp)329 static void __call_rcu(struct rcu_head *head,
330 		       void (*func)(struct rcu_head *rcu),
331 		       struct rcu_ctrlblk *rcp)
332 {
333 	unsigned long flags;
334 
335 	debug_rcu_head_queue(head);
336 	head->func = func;
337 	head->next = NULL;
338 
339 	local_irq_save(flags);
340 	*rcp->curtail = head;
341 	rcp->curtail = &head->next;
342 	RCU_TRACE(rcp->qlen++);
343 	local_irq_restore(flags);
344 }
345 
346 /*
347  * Post an RCU callback to be invoked after the end of an RCU-sched grace
348  * period.  But since we have but one CPU, that would be after any
349  * quiescent state.
350  */
call_rcu_sched(struct rcu_head * head,void (* func)(struct rcu_head * rcu))351 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
352 {
353 	__call_rcu(head, func, &rcu_sched_ctrlblk);
354 }
355 EXPORT_SYMBOL_GPL(call_rcu_sched);
356 
357 /*
358  * Post an RCU bottom-half callback to be invoked after any subsequent
359  * quiescent state.
360  */
call_rcu_bh(struct rcu_head * head,void (* func)(struct rcu_head * rcu))361 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
362 {
363 	__call_rcu(head, func, &rcu_bh_ctrlblk);
364 }
365 EXPORT_SYMBOL_GPL(call_rcu_bh);
366