1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26 
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
29 
30 #define RCU_KTHREAD_PRIO 1
31 
32 #ifdef CONFIG_RCU_BOOST
33 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34 #else
35 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36 #endif
37 
38 /*
39  * Check the RCU kernel configuration parameters and print informative
40  * messages about anything out of the ordinary.  If you like #ifdef, you
41  * will love this function.
42  */
rcu_bootup_announce_oddness(void)43 static void __init rcu_bootup_announce_oddness(void)
44 {
45 #ifdef CONFIG_RCU_TRACE
46 	printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47 #endif
48 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 	printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 	       CONFIG_RCU_FANOUT);
51 #endif
52 #ifdef CONFIG_RCU_FANOUT_EXACT
53 	printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54 #endif
55 #ifdef CONFIG_RCU_FAST_NO_HZ
56 	printk(KERN_INFO
57 	       "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58 #endif
59 #ifdef CONFIG_PROVE_RCU
60 	printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61 #endif
62 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 	printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64 #endif
65 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 	printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
67 #endif
68 #if NUM_RCU_LVL_4 != 0
69 	printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
70 #endif
71 }
72 
73 #ifdef CONFIG_TREE_PREEMPT_RCU
74 
75 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
76 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
77 static struct rcu_state *rcu_state = &rcu_preempt_state;
78 
79 static void rcu_read_unlock_special(struct task_struct *t);
80 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
81 
82 /*
83  * Tell them what RCU they are running.
84  */
rcu_bootup_announce(void)85 static void __init rcu_bootup_announce(void)
86 {
87 	printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
88 	rcu_bootup_announce_oddness();
89 }
90 
91 /*
92  * Return the number of RCU-preempt batches processed thus far
93  * for debug and statistics.
94  */
rcu_batches_completed_preempt(void)95 long rcu_batches_completed_preempt(void)
96 {
97 	return rcu_preempt_state.completed;
98 }
99 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
100 
101 /*
102  * Return the number of RCU batches processed thus far for debug & stats.
103  */
rcu_batches_completed(void)104 long rcu_batches_completed(void)
105 {
106 	return rcu_batches_completed_preempt();
107 }
108 EXPORT_SYMBOL_GPL(rcu_batches_completed);
109 
110 /*
111  * Force a quiescent state for preemptible RCU.
112  */
rcu_force_quiescent_state(void)113 void rcu_force_quiescent_state(void)
114 {
115 	force_quiescent_state(&rcu_preempt_state, 0);
116 }
117 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
118 
119 /*
120  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
121  * that this just means that the task currently running on the CPU is
122  * not in a quiescent state.  There might be any number of tasks blocked
123  * while in an RCU read-side critical section.
124  *
125  * Unlike the other rcu_*_qs() functions, callers to this function
126  * must disable irqs in order to protect the assignment to
127  * ->rcu_read_unlock_special.
128  */
rcu_preempt_qs(int cpu)129 static void rcu_preempt_qs(int cpu)
130 {
131 	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
132 
133 	rdp->passed_quiesce_gpnum = rdp->gpnum;
134 	barrier();
135 	if (rdp->passed_quiesce == 0)
136 		trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
137 	rdp->passed_quiesce = 1;
138 	current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
139 }
140 
141 /*
142  * We have entered the scheduler, and the current task might soon be
143  * context-switched away from.  If this task is in an RCU read-side
144  * critical section, we will no longer be able to rely on the CPU to
145  * record that fact, so we enqueue the task on the blkd_tasks list.
146  * The task will dequeue itself when it exits the outermost enclosing
147  * RCU read-side critical section.  Therefore, the current grace period
148  * cannot be permitted to complete until the blkd_tasks list entries
149  * predating the current grace period drain, in other words, until
150  * rnp->gp_tasks becomes NULL.
151  *
152  * Caller must disable preemption.
153  */
rcu_preempt_note_context_switch(int cpu)154 static void rcu_preempt_note_context_switch(int cpu)
155 {
156 	struct task_struct *t = current;
157 	unsigned long flags;
158 	struct rcu_data *rdp;
159 	struct rcu_node *rnp;
160 
161 	if (t->rcu_read_lock_nesting > 0 &&
162 	    (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
163 
164 		/* Possibly blocking in an RCU read-side critical section. */
165 		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
166 		rnp = rdp->mynode;
167 		raw_spin_lock_irqsave(&rnp->lock, flags);
168 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
169 		t->rcu_blocked_node = rnp;
170 
171 		/*
172 		 * If this CPU has already checked in, then this task
173 		 * will hold up the next grace period rather than the
174 		 * current grace period.  Queue the task accordingly.
175 		 * If the task is queued for the current grace period
176 		 * (i.e., this CPU has not yet passed through a quiescent
177 		 * state for the current grace period), then as long
178 		 * as that task remains queued, the current grace period
179 		 * cannot end.  Note that there is some uncertainty as
180 		 * to exactly when the current grace period started.
181 		 * We take a conservative approach, which can result
182 		 * in unnecessarily waiting on tasks that started very
183 		 * slightly after the current grace period began.  C'est
184 		 * la vie!!!
185 		 *
186 		 * But first, note that the current CPU must still be
187 		 * on line!
188 		 */
189 		WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
190 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
191 		if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192 			list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193 			rnp->gp_tasks = &t->rcu_node_entry;
194 #ifdef CONFIG_RCU_BOOST
195 			if (rnp->boost_tasks != NULL)
196 				rnp->boost_tasks = rnp->gp_tasks;
197 #endif /* #ifdef CONFIG_RCU_BOOST */
198 		} else {
199 			list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
200 			if (rnp->qsmask & rdp->grpmask)
201 				rnp->gp_tasks = &t->rcu_node_entry;
202 		}
203 		trace_rcu_preempt_task(rdp->rsp->name,
204 				       t->pid,
205 				       (rnp->qsmask & rdp->grpmask)
206 				       ? rnp->gpnum
207 				       : rnp->gpnum + 1);
208 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
209 	} else if (t->rcu_read_lock_nesting < 0 &&
210 		   t->rcu_read_unlock_special) {
211 
212 		/*
213 		 * Complete exit from RCU read-side critical section on
214 		 * behalf of preempted instance of __rcu_read_unlock().
215 		 */
216 		rcu_read_unlock_special(t);
217 	}
218 
219 	/*
220 	 * Either we were not in an RCU read-side critical section to
221 	 * begin with, or we have now recorded that critical section
222 	 * globally.  Either way, we can now note a quiescent state
223 	 * for this CPU.  Again, if we were in an RCU read-side critical
224 	 * section, and if that critical section was blocking the current
225 	 * grace period, then the fact that the task has been enqueued
226 	 * means that we continue to block the current grace period.
227 	 */
228 	local_irq_save(flags);
229 	rcu_preempt_qs(cpu);
230 	local_irq_restore(flags);
231 }
232 
233 /*
234  * Tree-preemptible RCU implementation for rcu_read_lock().
235  * Just increment ->rcu_read_lock_nesting, shared state will be updated
236  * if we block.
237  */
__rcu_read_lock(void)238 void __rcu_read_lock(void)
239 {
240 	current->rcu_read_lock_nesting++;
241 	barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */
242 }
243 EXPORT_SYMBOL_GPL(__rcu_read_lock);
244 
245 /*
246  * Check for preempted RCU readers blocking the current grace period
247  * for the specified rcu_node structure.  If the caller needs a reliable
248  * answer, it must hold the rcu_node's ->lock.
249  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)250 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
251 {
252 	return rnp->gp_tasks != NULL;
253 }
254 
255 /*
256  * Record a quiescent state for all tasks that were previously queued
257  * on the specified rcu_node structure and that were blocking the current
258  * RCU grace period.  The caller must hold the specified rnp->lock with
259  * irqs disabled, and this lock is released upon return, but irqs remain
260  * disabled.
261  */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)262 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
263 	__releases(rnp->lock)
264 {
265 	unsigned long mask;
266 	struct rcu_node *rnp_p;
267 
268 	if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
269 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
270 		return;  /* Still need more quiescent states! */
271 	}
272 
273 	rnp_p = rnp->parent;
274 	if (rnp_p == NULL) {
275 		/*
276 		 * Either there is only one rcu_node in the tree,
277 		 * or tasks were kicked up to root rcu_node due to
278 		 * CPUs going offline.
279 		 */
280 		rcu_report_qs_rsp(&rcu_preempt_state, flags);
281 		return;
282 	}
283 
284 	/* Report up the rest of the hierarchy. */
285 	mask = rnp->grpmask;
286 	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
287 	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
288 	rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
289 }
290 
291 /*
292  * Advance a ->blkd_tasks-list pointer to the next entry, instead
293  * returning NULL if at the end of the list.
294  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)295 static struct list_head *rcu_next_node_entry(struct task_struct *t,
296 					     struct rcu_node *rnp)
297 {
298 	struct list_head *np;
299 
300 	np = t->rcu_node_entry.next;
301 	if (np == &rnp->blkd_tasks)
302 		np = NULL;
303 	return np;
304 }
305 
306 /*
307  * Handle special cases during rcu_read_unlock(), such as needing to
308  * notify RCU core processing or task having blocked during the RCU
309  * read-side critical section.
310  */
rcu_read_unlock_special(struct task_struct * t)311 static noinline void rcu_read_unlock_special(struct task_struct *t)
312 {
313 	int empty;
314 	int empty_exp;
315 	int empty_exp_now;
316 	unsigned long flags;
317 	struct list_head *np;
318 #ifdef CONFIG_RCU_BOOST
319 	struct rt_mutex *rbmp = NULL;
320 #endif /* #ifdef CONFIG_RCU_BOOST */
321 	struct rcu_node *rnp;
322 	int special;
323 
324 	/* NMI handlers cannot block and cannot safely manipulate state. */
325 	if (in_nmi())
326 		return;
327 
328 	local_irq_save(flags);
329 
330 	/*
331 	 * If RCU core is waiting for this CPU to exit critical section,
332 	 * let it know that we have done so.
333 	 */
334 	special = t->rcu_read_unlock_special;
335 	if (special & RCU_READ_UNLOCK_NEED_QS) {
336 		rcu_preempt_qs(smp_processor_id());
337 	}
338 
339 	/* Hardware IRQ handlers cannot block. */
340 	if (in_irq() || in_serving_softirq()) {
341 		local_irq_restore(flags);
342 		return;
343 	}
344 
345 	/* Clean up if blocked during RCU read-side critical section. */
346 	if (special & RCU_READ_UNLOCK_BLOCKED) {
347 		t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
348 
349 		/*
350 		 * Remove this task from the list it blocked on.  The
351 		 * task can migrate while we acquire the lock, but at
352 		 * most one time.  So at most two passes through loop.
353 		 */
354 		for (;;) {
355 			rnp = t->rcu_blocked_node;
356 			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
357 			if (rnp == t->rcu_blocked_node)
358 				break;
359 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
360 		}
361 		empty = !rcu_preempt_blocked_readers_cgp(rnp);
362 		empty_exp = !rcu_preempted_readers_exp(rnp);
363 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
364 		np = rcu_next_node_entry(t, rnp);
365 		list_del_init(&t->rcu_node_entry);
366 		t->rcu_blocked_node = NULL;
367 		trace_rcu_unlock_preempted_task("rcu_preempt",
368 						rnp->gpnum, t->pid);
369 		if (&t->rcu_node_entry == rnp->gp_tasks)
370 			rnp->gp_tasks = np;
371 		if (&t->rcu_node_entry == rnp->exp_tasks)
372 			rnp->exp_tasks = np;
373 #ifdef CONFIG_RCU_BOOST
374 		if (&t->rcu_node_entry == rnp->boost_tasks)
375 			rnp->boost_tasks = np;
376 		/* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
377 		if (t->rcu_boost_mutex) {
378 			rbmp = t->rcu_boost_mutex;
379 			t->rcu_boost_mutex = NULL;
380 		}
381 #endif /* #ifdef CONFIG_RCU_BOOST */
382 
383 		/*
384 		 * If this was the last task on the current list, and if
385 		 * we aren't waiting on any CPUs, report the quiescent state.
386 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
387 		 * so we must take a snapshot of the expedited state.
388 		 */
389 		empty_exp_now = !rcu_preempted_readers_exp(rnp);
390 		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
391 			trace_rcu_quiescent_state_report("preempt_rcu",
392 							 rnp->gpnum,
393 							 0, rnp->qsmask,
394 							 rnp->level,
395 							 rnp->grplo,
396 							 rnp->grphi,
397 							 !!rnp->gp_tasks);
398 			rcu_report_unblock_qs_rnp(rnp, flags);
399 		} else
400 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
401 
402 #ifdef CONFIG_RCU_BOOST
403 		/* Unboost if we were boosted. */
404 		if (rbmp)
405 			rt_mutex_unlock(rbmp);
406 #endif /* #ifdef CONFIG_RCU_BOOST */
407 
408 		/*
409 		 * If this was the last task on the expedited lists,
410 		 * then we need to report up the rcu_node hierarchy.
411 		 */
412 		if (!empty_exp && empty_exp_now)
413 			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
414 	} else {
415 		local_irq_restore(flags);
416 	}
417 }
418 
419 /*
420  * Tree-preemptible RCU implementation for rcu_read_unlock().
421  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
422  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
423  * invoke rcu_read_unlock_special() to clean up after a context switch
424  * in an RCU read-side critical section and other special cases.
425  */
__rcu_read_unlock(void)426 void __rcu_read_unlock(void)
427 {
428 	struct task_struct *t = current;
429 
430 	if (t->rcu_read_lock_nesting != 1)
431 		--t->rcu_read_lock_nesting;
432 	else {
433 		barrier();  /* critical section before exit code. */
434 		t->rcu_read_lock_nesting = INT_MIN;
435 		barrier();  /* assign before ->rcu_read_unlock_special load */
436 		if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
437 			rcu_read_unlock_special(t);
438 		barrier();  /* ->rcu_read_unlock_special load before assign */
439 		t->rcu_read_lock_nesting = 0;
440 	}
441 #ifdef CONFIG_PROVE_LOCKING
442 	{
443 		int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
444 
445 		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
446 	}
447 #endif /* #ifdef CONFIG_PROVE_LOCKING */
448 }
449 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
450 
451 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
452 
453 /*
454  * Dump detailed information for all tasks blocking the current RCU
455  * grace period on the specified rcu_node structure.
456  */
rcu_print_detail_task_stall_rnp(struct rcu_node * rnp)457 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
458 {
459 	unsigned long flags;
460 	struct task_struct *t;
461 
462 	if (!rcu_preempt_blocked_readers_cgp(rnp))
463 		return;
464 	raw_spin_lock_irqsave(&rnp->lock, flags);
465 	t = list_entry(rnp->gp_tasks,
466 		       struct task_struct, rcu_node_entry);
467 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
468 		sched_show_task(t);
469 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
470 }
471 
472 /*
473  * Dump detailed information for all tasks blocking the current RCU
474  * grace period.
475  */
rcu_print_detail_task_stall(struct rcu_state * rsp)476 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
477 {
478 	struct rcu_node *rnp = rcu_get_root(rsp);
479 
480 	rcu_print_detail_task_stall_rnp(rnp);
481 	rcu_for_each_leaf_node(rsp, rnp)
482 		rcu_print_detail_task_stall_rnp(rnp);
483 }
484 
485 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
486 
rcu_print_detail_task_stall(struct rcu_state * rsp)487 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
488 {
489 }
490 
491 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
492 
493 /*
494  * Scan the current list of tasks blocked within RCU read-side critical
495  * sections, printing out the tid of each.
496  */
rcu_print_task_stall(struct rcu_node * rnp)497 static int rcu_print_task_stall(struct rcu_node *rnp)
498 {
499 	struct task_struct *t;
500 	int ndetected = 0;
501 
502 	if (!rcu_preempt_blocked_readers_cgp(rnp))
503 		return 0;
504 	t = list_entry(rnp->gp_tasks,
505 		       struct task_struct, rcu_node_entry);
506 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
507 		printk(" P%d", t->pid);
508 		ndetected++;
509 	}
510 	return ndetected;
511 }
512 
513 /*
514  * Suppress preemptible RCU's CPU stall warnings by pushing the
515  * time of the next stall-warning message comfortably far into the
516  * future.
517  */
rcu_preempt_stall_reset(void)518 static void rcu_preempt_stall_reset(void)
519 {
520 	rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
521 }
522 
523 /*
524  * Check that the list of blocked tasks for the newly completed grace
525  * period is in fact empty.  It is a serious bug to complete a grace
526  * period that still has RCU readers blocked!  This function must be
527  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
528  * must be held by the caller.
529  *
530  * Also, if there are blocked tasks on the list, they automatically
531  * block the newly created grace period, so set up ->gp_tasks accordingly.
532  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)533 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
534 {
535 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
536 	if (!list_empty(&rnp->blkd_tasks))
537 		rnp->gp_tasks = rnp->blkd_tasks.next;
538 	WARN_ON_ONCE(rnp->qsmask);
539 }
540 
541 #ifdef CONFIG_HOTPLUG_CPU
542 
543 /*
544  * Handle tasklist migration for case in which all CPUs covered by the
545  * specified rcu_node have gone offline.  Move them up to the root
546  * rcu_node.  The reason for not just moving them to the immediate
547  * parent is to remove the need for rcu_read_unlock_special() to
548  * make more than two attempts to acquire the target rcu_node's lock.
549  * Returns true if there were tasks blocking the current RCU grace
550  * period.
551  *
552  * Returns 1 if there was previously a task blocking the current grace
553  * period on the specified rcu_node structure.
554  *
555  * The caller must hold rnp->lock with irqs disabled.
556  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)557 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
558 				     struct rcu_node *rnp,
559 				     struct rcu_data *rdp)
560 {
561 	struct list_head *lp;
562 	struct list_head *lp_root;
563 	int retval = 0;
564 	struct rcu_node *rnp_root = rcu_get_root(rsp);
565 	struct task_struct *t;
566 
567 	if (rnp == rnp_root) {
568 		WARN_ONCE(1, "Last CPU thought to be offlined?");
569 		return 0;  /* Shouldn't happen: at least one CPU online. */
570 	}
571 
572 	/* If we are on an internal node, complain bitterly. */
573 	WARN_ON_ONCE(rnp != rdp->mynode);
574 
575 	/*
576 	 * Move tasks up to root rcu_node.  Don't try to get fancy for
577 	 * this corner-case operation -- just put this node's tasks
578 	 * at the head of the root node's list, and update the root node's
579 	 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
580 	 * if non-NULL.  This might result in waiting for more tasks than
581 	 * absolutely necessary, but this is a good performance/complexity
582 	 * tradeoff.
583 	 */
584 	if (rcu_preempt_blocked_readers_cgp(rnp))
585 		retval |= RCU_OFL_TASKS_NORM_GP;
586 	if (rcu_preempted_readers_exp(rnp))
587 		retval |= RCU_OFL_TASKS_EXP_GP;
588 	lp = &rnp->blkd_tasks;
589 	lp_root = &rnp_root->blkd_tasks;
590 	while (!list_empty(lp)) {
591 		t = list_entry(lp->next, typeof(*t), rcu_node_entry);
592 		raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
593 		list_del(&t->rcu_node_entry);
594 		t->rcu_blocked_node = rnp_root;
595 		list_add(&t->rcu_node_entry, lp_root);
596 		if (&t->rcu_node_entry == rnp->gp_tasks)
597 			rnp_root->gp_tasks = rnp->gp_tasks;
598 		if (&t->rcu_node_entry == rnp->exp_tasks)
599 			rnp_root->exp_tasks = rnp->exp_tasks;
600 #ifdef CONFIG_RCU_BOOST
601 		if (&t->rcu_node_entry == rnp->boost_tasks)
602 			rnp_root->boost_tasks = rnp->boost_tasks;
603 #endif /* #ifdef CONFIG_RCU_BOOST */
604 		raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
605 	}
606 
607 #ifdef CONFIG_RCU_BOOST
608 	/* In case root is being boosted and leaf is not. */
609 	raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
610 	if (rnp_root->boost_tasks != NULL &&
611 	    rnp_root->boost_tasks != rnp_root->gp_tasks)
612 		rnp_root->boost_tasks = rnp_root->gp_tasks;
613 	raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
614 #endif /* #ifdef CONFIG_RCU_BOOST */
615 
616 	rnp->gp_tasks = NULL;
617 	rnp->exp_tasks = NULL;
618 	return retval;
619 }
620 
621 /*
622  * Do CPU-offline processing for preemptible RCU.
623  */
rcu_preempt_offline_cpu(int cpu)624 static void rcu_preempt_offline_cpu(int cpu)
625 {
626 	__rcu_offline_cpu(cpu, &rcu_preempt_state);
627 }
628 
629 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
630 
631 /*
632  * Check for a quiescent state from the current CPU.  When a task blocks,
633  * the task is recorded in the corresponding CPU's rcu_node structure,
634  * which is checked elsewhere.
635  *
636  * Caller must disable hard irqs.
637  */
rcu_preempt_check_callbacks(int cpu)638 static void rcu_preempt_check_callbacks(int cpu)
639 {
640 	struct task_struct *t = current;
641 
642 	if (t->rcu_read_lock_nesting == 0) {
643 		rcu_preempt_qs(cpu);
644 		return;
645 	}
646 	if (t->rcu_read_lock_nesting > 0 &&
647 	    per_cpu(rcu_preempt_data, cpu).qs_pending)
648 		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
649 }
650 
651 /*
652  * Process callbacks for preemptible RCU.
653  */
rcu_preempt_process_callbacks(void)654 static void rcu_preempt_process_callbacks(void)
655 {
656 	__rcu_process_callbacks(&rcu_preempt_state,
657 				&__get_cpu_var(rcu_preempt_data));
658 }
659 
660 #ifdef CONFIG_RCU_BOOST
661 
rcu_preempt_do_callbacks(void)662 static void rcu_preempt_do_callbacks(void)
663 {
664 	rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
665 }
666 
667 #endif /* #ifdef CONFIG_RCU_BOOST */
668 
669 /*
670  * Queue a preemptible-RCU callback for invocation after a grace period.
671  */
call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))672 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673 {
674 	__call_rcu(head, func, &rcu_preempt_state);
675 }
676 EXPORT_SYMBOL_GPL(call_rcu);
677 
678 /**
679  * synchronize_rcu - wait until a grace period has elapsed.
680  *
681  * Control will return to the caller some time after a full grace
682  * period has elapsed, in other words after all currently executing RCU
683  * read-side critical sections have completed.  Note, however, that
684  * upon return from synchronize_rcu(), the caller might well be executing
685  * concurrently with new RCU read-side critical sections that began while
686  * synchronize_rcu() was waiting.  RCU read-side critical sections are
687  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
688  */
synchronize_rcu(void)689 void synchronize_rcu(void)
690 {
691 	if (!rcu_scheduler_active)
692 		return;
693 	wait_rcu_gp(call_rcu);
694 }
695 EXPORT_SYMBOL_GPL(synchronize_rcu);
696 
697 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
698 static long sync_rcu_preempt_exp_count;
699 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
700 
701 /*
702  * Return non-zero if there are any tasks in RCU read-side critical
703  * sections blocking the current preemptible-RCU expedited grace period.
704  * If there is no preemptible-RCU expedited grace period currently in
705  * progress, returns zero unconditionally.
706  */
rcu_preempted_readers_exp(struct rcu_node * rnp)707 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
708 {
709 	return rnp->exp_tasks != NULL;
710 }
711 
712 /*
713  * return non-zero if there is no RCU expedited grace period in progress
714  * for the specified rcu_node structure, in other words, if all CPUs and
715  * tasks covered by the specified rcu_node structure have done their bit
716  * for the current expedited grace period.  Works only for preemptible
717  * RCU -- other RCU implementation use other means.
718  *
719  * Caller must hold sync_rcu_preempt_exp_mutex.
720  */
sync_rcu_preempt_exp_done(struct rcu_node * rnp)721 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
722 {
723 	return !rcu_preempted_readers_exp(rnp) &&
724 	       ACCESS_ONCE(rnp->expmask) == 0;
725 }
726 
727 /*
728  * Report the exit from RCU read-side critical section for the last task
729  * that queued itself during or before the current expedited preemptible-RCU
730  * grace period.  This event is reported either to the rcu_node structure on
731  * which the task was queued or to one of that rcu_node structure's ancestors,
732  * recursively up the tree.  (Calm down, calm down, we do the recursion
733  * iteratively!)
734  *
735  * Most callers will set the "wake" flag, but the task initiating the
736  * expedited grace period need not wake itself.
737  *
738  * Caller must hold sync_rcu_preempt_exp_mutex.
739  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)740 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 			       bool wake)
742 {
743 	unsigned long flags;
744 	unsigned long mask;
745 
746 	raw_spin_lock_irqsave(&rnp->lock, flags);
747 	for (;;) {
748 		if (!sync_rcu_preempt_exp_done(rnp)) {
749 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
750 			break;
751 		}
752 		if (rnp->parent == NULL) {
753 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
754 			if (wake)
755 				wake_up(&sync_rcu_preempt_exp_wq);
756 			break;
757 		}
758 		mask = rnp->grpmask;
759 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
760 		rnp = rnp->parent;
761 		raw_spin_lock(&rnp->lock); /* irqs already disabled */
762 		rnp->expmask &= ~mask;
763 	}
764 }
765 
766 /*
767  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
768  * grace period for the specified rcu_node structure.  If there are no such
769  * tasks, report it up the rcu_node hierarchy.
770  *
771  * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
772  */
773 static void
sync_rcu_preempt_exp_init(struct rcu_state * rsp,struct rcu_node * rnp)774 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
775 {
776 	unsigned long flags;
777 	int must_wait = 0;
778 
779 	raw_spin_lock_irqsave(&rnp->lock, flags);
780 	if (list_empty(&rnp->blkd_tasks))
781 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
782 	else {
783 		rnp->exp_tasks = rnp->blkd_tasks.next;
784 		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
785 		must_wait = 1;
786 	}
787 	if (!must_wait)
788 		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
789 }
790 
791 /*
792  * Wait for an rcu-preempt grace period, but expedite it.  The basic idea
793  * is to invoke synchronize_sched_expedited() to push all the tasks to
794  * the ->blkd_tasks lists and wait for this list to drain.
795  */
synchronize_rcu_expedited(void)796 void synchronize_rcu_expedited(void)
797 {
798 	unsigned long flags;
799 	struct rcu_node *rnp;
800 	struct rcu_state *rsp = &rcu_preempt_state;
801 	long snap;
802 	int trycount = 0;
803 
804 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
805 	snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
806 	smp_mb(); /* Above access cannot bleed into critical section. */
807 
808 	/*
809 	 * Acquire lock, falling back to synchronize_rcu() if too many
810 	 * lock-acquisition failures.  Of course, if someone does the
811 	 * expedited grace period for us, just leave.
812 	 */
813 	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
814 		if (trycount++ < 10)
815 			udelay(trycount * num_online_cpus());
816 		else {
817 			synchronize_rcu();
818 			return;
819 		}
820 		if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
821 			goto mb_ret; /* Others did our work for us. */
822 	}
823 	if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
824 		goto unlock_mb_ret; /* Others did our work for us. */
825 
826 	/* force all RCU readers onto ->blkd_tasks lists. */
827 	synchronize_sched_expedited();
828 
829 	raw_spin_lock_irqsave(&rsp->onofflock, flags);
830 
831 	/* Initialize ->expmask for all non-leaf rcu_node structures. */
832 	rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
833 		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
834 		rnp->expmask = rnp->qsmaskinit;
835 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
836 	}
837 
838 	/* Snapshot current state of ->blkd_tasks lists. */
839 	rcu_for_each_leaf_node(rsp, rnp)
840 		sync_rcu_preempt_exp_init(rsp, rnp);
841 	if (NUM_RCU_NODES > 1)
842 		sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
843 
844 	raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
845 
846 	/* Wait for snapshotted ->blkd_tasks lists to drain. */
847 	rnp = rcu_get_root(rsp);
848 	wait_event(sync_rcu_preempt_exp_wq,
849 		   sync_rcu_preempt_exp_done(rnp));
850 
851 	/* Clean up and exit. */
852 	smp_mb(); /* ensure expedited GP seen before counter increment. */
853 	ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
854 unlock_mb_ret:
855 	mutex_unlock(&sync_rcu_preempt_exp_mutex);
856 mb_ret:
857 	smp_mb(); /* ensure subsequent action seen after grace period. */
858 }
859 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
860 
861 /*
862  * Check to see if there is any immediate preemptible-RCU-related work
863  * to be done.
864  */
rcu_preempt_pending(int cpu)865 static int rcu_preempt_pending(int cpu)
866 {
867 	return __rcu_pending(&rcu_preempt_state,
868 			     &per_cpu(rcu_preempt_data, cpu));
869 }
870 
871 /*
872  * Does preemptible RCU need the CPU to stay out of dynticks mode?
873  */
rcu_preempt_needs_cpu(int cpu)874 static int rcu_preempt_needs_cpu(int cpu)
875 {
876 	return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
877 }
878 
879 /**
880  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
881  */
rcu_barrier(void)882 void rcu_barrier(void)
883 {
884 	_rcu_barrier(&rcu_preempt_state, call_rcu);
885 }
886 EXPORT_SYMBOL_GPL(rcu_barrier);
887 
888 /*
889  * Initialize preemptible RCU's per-CPU data.
890  */
rcu_preempt_init_percpu_data(int cpu)891 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
892 {
893 	rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
894 }
895 
896 /*
897  * Move preemptible RCU's callbacks from dying CPU to other online CPU.
898  */
rcu_preempt_send_cbs_to_online(void)899 static void rcu_preempt_send_cbs_to_online(void)
900 {
901 	rcu_send_cbs_to_online(&rcu_preempt_state);
902 }
903 
904 /*
905  * Initialize preemptible RCU's state structures.
906  */
__rcu_init_preempt(void)907 static void __init __rcu_init_preempt(void)
908 {
909 	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
910 }
911 
912 /*
913  * Check for a task exiting while in a preemptible-RCU read-side
914  * critical section, clean up if so.  No need to issue warnings,
915  * as debug_check_no_locks_held() already does this if lockdep
916  * is enabled.
917  */
exit_rcu(void)918 void exit_rcu(void)
919 {
920 	struct task_struct *t = current;
921 
922 	if (t->rcu_read_lock_nesting == 0)
923 		return;
924 	t->rcu_read_lock_nesting = 1;
925 	__rcu_read_unlock();
926 }
927 
928 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
929 
930 static struct rcu_state *rcu_state = &rcu_sched_state;
931 
932 /*
933  * Tell them what RCU they are running.
934  */
rcu_bootup_announce(void)935 static void __init rcu_bootup_announce(void)
936 {
937 	printk(KERN_INFO "Hierarchical RCU implementation.\n");
938 	rcu_bootup_announce_oddness();
939 }
940 
941 /*
942  * Return the number of RCU batches processed thus far for debug & stats.
943  */
rcu_batches_completed(void)944 long rcu_batches_completed(void)
945 {
946 	return rcu_batches_completed_sched();
947 }
948 EXPORT_SYMBOL_GPL(rcu_batches_completed);
949 
950 /*
951  * Force a quiescent state for RCU, which, because there is no preemptible
952  * RCU, becomes the same as rcu-sched.
953  */
rcu_force_quiescent_state(void)954 void rcu_force_quiescent_state(void)
955 {
956 	rcu_sched_force_quiescent_state();
957 }
958 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
959 
960 /*
961  * Because preemptible RCU does not exist, we never have to check for
962  * CPUs being in quiescent states.
963  */
rcu_preempt_note_context_switch(int cpu)964 static void rcu_preempt_note_context_switch(int cpu)
965 {
966 }
967 
968 /*
969  * Because preemptible RCU does not exist, there are never any preempted
970  * RCU readers.
971  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)972 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
973 {
974 	return 0;
975 }
976 
977 #ifdef CONFIG_HOTPLUG_CPU
978 
979 /* Because preemptible RCU does not exist, no quieting of tasks. */
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)980 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
981 {
982 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
983 }
984 
985 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
986 
987 /*
988  * Because preemptible RCU does not exist, we never have to check for
989  * tasks blocked within RCU read-side critical sections.
990  */
rcu_print_detail_task_stall(struct rcu_state * rsp)991 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
992 {
993 }
994 
995 /*
996  * Because preemptible RCU does not exist, we never have to check for
997  * tasks blocked within RCU read-side critical sections.
998  */
rcu_print_task_stall(struct rcu_node * rnp)999 static int rcu_print_task_stall(struct rcu_node *rnp)
1000 {
1001 	return 0;
1002 }
1003 
1004 /*
1005  * Because preemptible RCU does not exist, there is no need to suppress
1006  * its CPU stall warnings.
1007  */
rcu_preempt_stall_reset(void)1008 static void rcu_preempt_stall_reset(void)
1009 {
1010 }
1011 
1012 /*
1013  * Because there is no preemptible RCU, there can be no readers blocked,
1014  * so there is no need to check for blocked tasks.  So check only for
1015  * bogus qsmask values.
1016  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)1017 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018 {
1019 	WARN_ON_ONCE(rnp->qsmask);
1020 }
1021 
1022 #ifdef CONFIG_HOTPLUG_CPU
1023 
1024 /*
1025  * Because preemptible RCU does not exist, it never needs to migrate
1026  * tasks that were blocked within RCU read-side critical sections, and
1027  * such non-existent tasks cannot possibly have been blocking the current
1028  * grace period.
1029  */
rcu_preempt_offline_tasks(struct rcu_state * rsp,struct rcu_node * rnp,struct rcu_data * rdp)1030 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 				     struct rcu_node *rnp,
1032 				     struct rcu_data *rdp)
1033 {
1034 	return 0;
1035 }
1036 
1037 /*
1038  * Because preemptible RCU does not exist, it never needs CPU-offline
1039  * processing.
1040  */
rcu_preempt_offline_cpu(int cpu)1041 static void rcu_preempt_offline_cpu(int cpu)
1042 {
1043 }
1044 
1045 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1046 
1047 /*
1048  * Because preemptible RCU does not exist, it never has any callbacks
1049  * to check.
1050  */
rcu_preempt_check_callbacks(int cpu)1051 static void rcu_preempt_check_callbacks(int cpu)
1052 {
1053 }
1054 
1055 /*
1056  * Because preemptible RCU does not exist, it never has any callbacks
1057  * to process.
1058  */
rcu_preempt_process_callbacks(void)1059 static void rcu_preempt_process_callbacks(void)
1060 {
1061 }
1062 
1063 /*
1064  * Wait for an rcu-preempt grace period, but make it happen quickly.
1065  * But because preemptible RCU does not exist, map to rcu-sched.
1066  */
synchronize_rcu_expedited(void)1067 void synchronize_rcu_expedited(void)
1068 {
1069 	synchronize_sched_expedited();
1070 }
1071 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072 
1073 #ifdef CONFIG_HOTPLUG_CPU
1074 
1075 /*
1076  * Because preemptible RCU does not exist, there is never any need to
1077  * report on tasks preempted in RCU read-side critical sections during
1078  * expedited RCU grace periods.
1079  */
rcu_report_exp_rnp(struct rcu_state * rsp,struct rcu_node * rnp,bool wake)1080 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 			       bool wake)
1082 {
1083 }
1084 
1085 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086 
1087 /*
1088  * Because preemptible RCU does not exist, it never has any work to do.
1089  */
rcu_preempt_pending(int cpu)1090 static int rcu_preempt_pending(int cpu)
1091 {
1092 	return 0;
1093 }
1094 
1095 /*
1096  * Because preemptible RCU does not exist, it never needs any CPU.
1097  */
rcu_preempt_needs_cpu(int cpu)1098 static int rcu_preempt_needs_cpu(int cpu)
1099 {
1100 	return 0;
1101 }
1102 
1103 /*
1104  * Because preemptible RCU does not exist, rcu_barrier() is just
1105  * another name for rcu_barrier_sched().
1106  */
rcu_barrier(void)1107 void rcu_barrier(void)
1108 {
1109 	rcu_barrier_sched();
1110 }
1111 EXPORT_SYMBOL_GPL(rcu_barrier);
1112 
1113 /*
1114  * Because preemptible RCU does not exist, there is no per-CPU
1115  * data to initialize.
1116  */
rcu_preempt_init_percpu_data(int cpu)1117 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1118 {
1119 }
1120 
1121 /*
1122  * Because there is no preemptible RCU, there are no callbacks to move.
1123  */
rcu_preempt_send_cbs_to_online(void)1124 static void rcu_preempt_send_cbs_to_online(void)
1125 {
1126 }
1127 
1128 /*
1129  * Because preemptible RCU does not exist, it need not be initialized.
1130  */
__rcu_init_preempt(void)1131 static void __init __rcu_init_preempt(void)
1132 {
1133 }
1134 
1135 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1136 
1137 #ifdef CONFIG_RCU_BOOST
1138 
1139 #include "rtmutex_common.h"
1140 
1141 #ifdef CONFIG_RCU_TRACE
1142 
rcu_initiate_boost_trace(struct rcu_node * rnp)1143 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1144 {
1145 	if (list_empty(&rnp->blkd_tasks))
1146 		rnp->n_balk_blkd_tasks++;
1147 	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1148 		rnp->n_balk_exp_gp_tasks++;
1149 	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1150 		rnp->n_balk_boost_tasks++;
1151 	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1152 		rnp->n_balk_notblocked++;
1153 	else if (rnp->gp_tasks != NULL &&
1154 		 ULONG_CMP_LT(jiffies, rnp->boost_time))
1155 		rnp->n_balk_notyet++;
1156 	else
1157 		rnp->n_balk_nos++;
1158 }
1159 
1160 #else /* #ifdef CONFIG_RCU_TRACE */
1161 
rcu_initiate_boost_trace(struct rcu_node * rnp)1162 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1163 {
1164 }
1165 
1166 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1167 
1168 /*
1169  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1170  * or ->boost_tasks, advancing the pointer to the next task in the
1171  * ->blkd_tasks list.
1172  *
1173  * Note that irqs must be enabled: boosting the task can block.
1174  * Returns 1 if there are more tasks needing to be boosted.
1175  */
rcu_boost(struct rcu_node * rnp)1176 static int rcu_boost(struct rcu_node *rnp)
1177 {
1178 	unsigned long flags;
1179 	struct rt_mutex mtx;
1180 	struct task_struct *t;
1181 	struct list_head *tb;
1182 
1183 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1184 		return 0;  /* Nothing left to boost. */
1185 
1186 	raw_spin_lock_irqsave(&rnp->lock, flags);
1187 
1188 	/*
1189 	 * Recheck under the lock: all tasks in need of boosting
1190 	 * might exit their RCU read-side critical sections on their own.
1191 	 */
1192 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1193 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1194 		return 0;
1195 	}
1196 
1197 	/*
1198 	 * Preferentially boost tasks blocking expedited grace periods.
1199 	 * This cannot starve the normal grace periods because a second
1200 	 * expedited grace period must boost all blocked tasks, including
1201 	 * those blocking the pre-existing normal grace period.
1202 	 */
1203 	if (rnp->exp_tasks != NULL) {
1204 		tb = rnp->exp_tasks;
1205 		rnp->n_exp_boosts++;
1206 	} else {
1207 		tb = rnp->boost_tasks;
1208 		rnp->n_normal_boosts++;
1209 	}
1210 	rnp->n_tasks_boosted++;
1211 
1212 	/*
1213 	 * We boost task t by manufacturing an rt_mutex that appears to
1214 	 * be held by task t.  We leave a pointer to that rt_mutex where
1215 	 * task t can find it, and task t will release the mutex when it
1216 	 * exits its outermost RCU read-side critical section.  Then
1217 	 * simply acquiring this artificial rt_mutex will boost task
1218 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1219 	 *
1220 	 * Note that task t must acquire rnp->lock to remove itself from
1221 	 * the ->blkd_tasks list, which it will do from exit() if from
1222 	 * nowhere else.  We therefore are guaranteed that task t will
1223 	 * stay around at least until we drop rnp->lock.  Note that
1224 	 * rnp->lock also resolves races between our priority boosting
1225 	 * and task t's exiting its outermost RCU read-side critical
1226 	 * section.
1227 	 */
1228 	t = container_of(tb, struct task_struct, rcu_node_entry);
1229 	rt_mutex_init_proxy_locked(&mtx, t);
1230 	t->rcu_boost_mutex = &mtx;
1231 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1232 	rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
1233 	rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
1234 
1235 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1236 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
1237 }
1238 
1239 /*
1240  * Timer handler to initiate waking up of boost kthreads that
1241  * have yielded the CPU due to excessive numbers of tasks to
1242  * boost.  We wake up the per-rcu_node kthread, which in turn
1243  * will wake up the booster kthread.
1244  */
rcu_boost_kthread_timer(unsigned long arg)1245 static void rcu_boost_kthread_timer(unsigned long arg)
1246 {
1247 	invoke_rcu_node_kthread((struct rcu_node *)arg);
1248 }
1249 
1250 /*
1251  * Priority-boosting kthread.  One per leaf rcu_node and one for the
1252  * root rcu_node.
1253  */
rcu_boost_kthread(void * arg)1254 static int rcu_boost_kthread(void *arg)
1255 {
1256 	struct rcu_node *rnp = (struct rcu_node *)arg;
1257 	int spincnt = 0;
1258 	int more2boost;
1259 
1260 	trace_rcu_utilization("Start boost kthread@init");
1261 	for (;;) {
1262 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1263 		trace_rcu_utilization("End boost kthread@rcu_wait");
1264 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1265 		trace_rcu_utilization("Start boost kthread@rcu_wait");
1266 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1267 		more2boost = rcu_boost(rnp);
1268 		if (more2boost)
1269 			spincnt++;
1270 		else
1271 			spincnt = 0;
1272 		if (spincnt > 10) {
1273 			trace_rcu_utilization("End boost kthread@rcu_yield");
1274 			rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1275 			trace_rcu_utilization("Start boost kthread@rcu_yield");
1276 			spincnt = 0;
1277 		}
1278 	}
1279 	/* NOTREACHED */
1280 	trace_rcu_utilization("End boost kthread@notreached");
1281 	return 0;
1282 }
1283 
1284 /*
1285  * Check to see if it is time to start boosting RCU readers that are
1286  * blocking the current grace period, and, if so, tell the per-rcu_node
1287  * kthread to start boosting them.  If there is an expedited grace
1288  * period in progress, it is always time to boost.
1289  *
1290  * The caller must hold rnp->lock, which this function releases,
1291  * but irqs remain disabled.  The ->boost_kthread_task is immortal,
1292  * so we don't need to worry about it going away.
1293  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1294 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1295 {
1296 	struct task_struct *t;
1297 
1298 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1299 		rnp->n_balk_exp_gp_tasks++;
1300 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1301 		return;
1302 	}
1303 	if (rnp->exp_tasks != NULL ||
1304 	    (rnp->gp_tasks != NULL &&
1305 	     rnp->boost_tasks == NULL &&
1306 	     rnp->qsmask == 0 &&
1307 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1308 		if (rnp->exp_tasks == NULL)
1309 			rnp->boost_tasks = rnp->gp_tasks;
1310 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1311 		t = rnp->boost_kthread_task;
1312 		if (t != NULL)
1313 			wake_up_process(t);
1314 	} else {
1315 		rcu_initiate_boost_trace(rnp);
1316 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1317 	}
1318 }
1319 
1320 /*
1321  * Wake up the per-CPU kthread to invoke RCU callbacks.
1322  */
invoke_rcu_callbacks_kthread(void)1323 static void invoke_rcu_callbacks_kthread(void)
1324 {
1325 	unsigned long flags;
1326 
1327 	local_irq_save(flags);
1328 	__this_cpu_write(rcu_cpu_has_work, 1);
1329 	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1330 	    current != __this_cpu_read(rcu_cpu_kthread_task))
1331 		wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1332 	local_irq_restore(flags);
1333 }
1334 
1335 /*
1336  * Is the current CPU running the RCU-callbacks kthread?
1337  * Caller must have preemption disabled.
1338  */
rcu_is_callbacks_kthread(void)1339 static bool rcu_is_callbacks_kthread(void)
1340 {
1341 	return __get_cpu_var(rcu_cpu_kthread_task) == current;
1342 }
1343 
1344 /*
1345  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
1346  * held, so no one should be messing with the existence of the boost
1347  * kthread.
1348  */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,cpumask_var_t cm)1349 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1350 					  cpumask_var_t cm)
1351 {
1352 	struct task_struct *t;
1353 
1354 	t = rnp->boost_kthread_task;
1355 	if (t != NULL)
1356 		set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1357 }
1358 
1359 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1360 
1361 /*
1362  * Do priority-boost accounting for the start of a new grace period.
1363  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1364 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1365 {
1366 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1367 }
1368 
1369 /*
1370  * Create an RCU-boost kthread for the specified node if one does not
1371  * already exist.  We only create this kthread for preemptible RCU.
1372  * Returns zero if all is well, a negated errno otherwise.
1373  */
rcu_spawn_one_boost_kthread(struct rcu_state * rsp,struct rcu_node * rnp,int rnp_index)1374 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1375 						 struct rcu_node *rnp,
1376 						 int rnp_index)
1377 {
1378 	unsigned long flags;
1379 	struct sched_param sp;
1380 	struct task_struct *t;
1381 
1382 	if (&rcu_preempt_state != rsp)
1383 		return 0;
1384 	rsp->boost = 1;
1385 	if (rnp->boost_kthread_task != NULL)
1386 		return 0;
1387 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1388 			   "rcub/%d", rnp_index);
1389 	if (IS_ERR(t))
1390 		return PTR_ERR(t);
1391 	raw_spin_lock_irqsave(&rnp->lock, flags);
1392 	rnp->boost_kthread_task = t;
1393 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1394 	sp.sched_priority = RCU_BOOST_PRIO;
1395 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1396 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1397 	return 0;
1398 }
1399 
1400 #ifdef CONFIG_HOTPLUG_CPU
1401 
1402 /*
1403  * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1404  */
rcu_stop_cpu_kthread(int cpu)1405 static void rcu_stop_cpu_kthread(int cpu)
1406 {
1407 	struct task_struct *t;
1408 
1409 	/* Stop the CPU's kthread. */
1410 	t = per_cpu(rcu_cpu_kthread_task, cpu);
1411 	if (t != NULL) {
1412 		per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1413 		kthread_stop(t);
1414 	}
1415 }
1416 
1417 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1418 
rcu_kthread_do_work(void)1419 static void rcu_kthread_do_work(void)
1420 {
1421 	rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1422 	rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1423 	rcu_preempt_do_callbacks();
1424 }
1425 
1426 /*
1427  * Wake up the specified per-rcu_node-structure kthread.
1428  * Because the per-rcu_node kthreads are immortal, we don't need
1429  * to do anything to keep them alive.
1430  */
invoke_rcu_node_kthread(struct rcu_node * rnp)1431 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1432 {
1433 	struct task_struct *t;
1434 
1435 	t = rnp->node_kthread_task;
1436 	if (t != NULL)
1437 		wake_up_process(t);
1438 }
1439 
1440 /*
1441  * Set the specified CPU's kthread to run RT or not, as specified by
1442  * the to_rt argument.  The CPU-hotplug locks are held, so the task
1443  * is not going away.
1444  */
rcu_cpu_kthread_setrt(int cpu,int to_rt)1445 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1446 {
1447 	int policy;
1448 	struct sched_param sp;
1449 	struct task_struct *t;
1450 
1451 	t = per_cpu(rcu_cpu_kthread_task, cpu);
1452 	if (t == NULL)
1453 		return;
1454 	if (to_rt) {
1455 		policy = SCHED_FIFO;
1456 		sp.sched_priority = RCU_KTHREAD_PRIO;
1457 	} else {
1458 		policy = SCHED_NORMAL;
1459 		sp.sched_priority = 0;
1460 	}
1461 	sched_setscheduler_nocheck(t, policy, &sp);
1462 }
1463 
1464 /*
1465  * Timer handler to initiate the waking up of per-CPU kthreads that
1466  * have yielded the CPU due to excess numbers of RCU callbacks.
1467  * We wake up the per-rcu_node kthread, which in turn will wake up
1468  * the booster kthread.
1469  */
rcu_cpu_kthread_timer(unsigned long arg)1470 static void rcu_cpu_kthread_timer(unsigned long arg)
1471 {
1472 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1473 	struct rcu_node *rnp = rdp->mynode;
1474 
1475 	atomic_or(rdp->grpmask, &rnp->wakemask);
1476 	invoke_rcu_node_kthread(rnp);
1477 }
1478 
1479 /*
1480  * Drop to non-real-time priority and yield, but only after posting a
1481  * timer that will cause us to regain our real-time priority if we
1482  * remain preempted.  Either way, we restore our real-time priority
1483  * before returning.
1484  */
rcu_yield(void (* f)(unsigned long),unsigned long arg)1485 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1486 {
1487 	struct sched_param sp;
1488 	struct timer_list yield_timer;
1489 	int prio = current->rt_priority;
1490 
1491 	setup_timer_on_stack(&yield_timer, f, arg);
1492 	mod_timer(&yield_timer, jiffies + 2);
1493 	sp.sched_priority = 0;
1494 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1495 	set_user_nice(current, 19);
1496 	schedule();
1497 	set_user_nice(current, 0);
1498 	sp.sched_priority = prio;
1499 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1500 	del_timer(&yield_timer);
1501 }
1502 
1503 /*
1504  * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1505  * This can happen while the corresponding CPU is either coming online
1506  * or going offline.  We cannot wait until the CPU is fully online
1507  * before starting the kthread, because the various notifier functions
1508  * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
1509  * the corresponding CPU is online.
1510  *
1511  * Return 1 if the kthread needs to stop, 0 otherwise.
1512  *
1513  * Caller must disable bh.  This function can momentarily enable it.
1514  */
rcu_cpu_kthread_should_stop(int cpu)1515 static int rcu_cpu_kthread_should_stop(int cpu)
1516 {
1517 	while (cpu_is_offline(cpu) ||
1518 	       !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1519 	       smp_processor_id() != cpu) {
1520 		if (kthread_should_stop())
1521 			return 1;
1522 		per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1523 		per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1524 		local_bh_enable();
1525 		schedule_timeout_uninterruptible(1);
1526 		if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1527 			set_cpus_allowed_ptr(current, cpumask_of(cpu));
1528 		local_bh_disable();
1529 	}
1530 	per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1531 	return 0;
1532 }
1533 
1534 /*
1535  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1536  * RCU softirq used in flavors and configurations of RCU that do not
1537  * support RCU priority boosting.
1538  */
rcu_cpu_kthread(void * arg)1539 static int rcu_cpu_kthread(void *arg)
1540 {
1541 	int cpu = (int)(long)arg;
1542 	unsigned long flags;
1543 	int spincnt = 0;
1544 	unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1545 	char work;
1546 	char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1547 
1548 	trace_rcu_utilization("Start CPU kthread@init");
1549 	for (;;) {
1550 		*statusp = RCU_KTHREAD_WAITING;
1551 		trace_rcu_utilization("End CPU kthread@rcu_wait");
1552 		rcu_wait(*workp != 0 || kthread_should_stop());
1553 		trace_rcu_utilization("Start CPU kthread@rcu_wait");
1554 		local_bh_disable();
1555 		if (rcu_cpu_kthread_should_stop(cpu)) {
1556 			local_bh_enable();
1557 			break;
1558 		}
1559 		*statusp = RCU_KTHREAD_RUNNING;
1560 		per_cpu(rcu_cpu_kthread_loops, cpu)++;
1561 		local_irq_save(flags);
1562 		work = *workp;
1563 		*workp = 0;
1564 		local_irq_restore(flags);
1565 		if (work)
1566 			rcu_kthread_do_work();
1567 		local_bh_enable();
1568 		if (*workp != 0)
1569 			spincnt++;
1570 		else
1571 			spincnt = 0;
1572 		if (spincnt > 10) {
1573 			*statusp = RCU_KTHREAD_YIELDING;
1574 			trace_rcu_utilization("End CPU kthread@rcu_yield");
1575 			rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1576 			trace_rcu_utilization("Start CPU kthread@rcu_yield");
1577 			spincnt = 0;
1578 		}
1579 	}
1580 	*statusp = RCU_KTHREAD_STOPPED;
1581 	trace_rcu_utilization("End CPU kthread@term");
1582 	return 0;
1583 }
1584 
1585 /*
1586  * Spawn a per-CPU kthread, setting up affinity and priority.
1587  * Because the CPU hotplug lock is held, no other CPU will be attempting
1588  * to manipulate rcu_cpu_kthread_task.  There might be another CPU
1589  * attempting to access it during boot, but the locking in kthread_bind()
1590  * will enforce sufficient ordering.
1591  *
1592  * Please note that we cannot simply refuse to wake up the per-CPU
1593  * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1594  * which can result in softlockup complaints if the task ends up being
1595  * idle for more than a couple of minutes.
1596  *
1597  * However, please note also that we cannot bind the per-CPU kthread to its
1598  * CPU until that CPU is fully online.  We also cannot wait until the
1599  * CPU is fully online before we create its per-CPU kthread, as this would
1600  * deadlock the system when CPU notifiers tried waiting for grace
1601  * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
1602  * is online.  If its CPU is not yet fully online, then the code in
1603  * rcu_cpu_kthread() will wait until it is fully online, and then do
1604  * the binding.
1605  */
rcu_spawn_one_cpu_kthread(int cpu)1606 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1607 {
1608 	struct sched_param sp;
1609 	struct task_struct *t;
1610 
1611 	if (!rcu_scheduler_fully_active ||
1612 	    per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1613 		return 0;
1614 	t = kthread_create_on_node(rcu_cpu_kthread,
1615 				   (void *)(long)cpu,
1616 				   cpu_to_node(cpu),
1617 				   "rcuc/%d", cpu);
1618 	if (IS_ERR(t))
1619 		return PTR_ERR(t);
1620 	if (cpu_online(cpu))
1621 		kthread_bind(t, cpu);
1622 	per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1623 	WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1624 	sp.sched_priority = RCU_KTHREAD_PRIO;
1625 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1626 	per_cpu(rcu_cpu_kthread_task, cpu) = t;
1627 	wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1628 	return 0;
1629 }
1630 
1631 /*
1632  * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1633  * kthreads when needed.  We ignore requests to wake up kthreads
1634  * for offline CPUs, which is OK because force_quiescent_state()
1635  * takes care of this case.
1636  */
rcu_node_kthread(void * arg)1637 static int rcu_node_kthread(void *arg)
1638 {
1639 	int cpu;
1640 	unsigned long flags;
1641 	unsigned long mask;
1642 	struct rcu_node *rnp = (struct rcu_node *)arg;
1643 	struct sched_param sp;
1644 	struct task_struct *t;
1645 
1646 	for (;;) {
1647 		rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1648 		rcu_wait(atomic_read(&rnp->wakemask) != 0);
1649 		rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1650 		raw_spin_lock_irqsave(&rnp->lock, flags);
1651 		mask = atomic_xchg(&rnp->wakemask, 0);
1652 		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1653 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1654 			if ((mask & 0x1) == 0)
1655 				continue;
1656 			preempt_disable();
1657 			t = per_cpu(rcu_cpu_kthread_task, cpu);
1658 			if (!cpu_online(cpu) || t == NULL) {
1659 				preempt_enable();
1660 				continue;
1661 			}
1662 			per_cpu(rcu_cpu_has_work, cpu) = 1;
1663 			sp.sched_priority = RCU_KTHREAD_PRIO;
1664 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1665 			preempt_enable();
1666 		}
1667 	}
1668 	/* NOTREACHED */
1669 	rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1670 	return 0;
1671 }
1672 
1673 /*
1674  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1675  * served by the rcu_node in question.  The CPU hotplug lock is still
1676  * held, so the value of rnp->qsmaskinit will be stable.
1677  *
1678  * We don't include outgoingcpu in the affinity set, use -1 if there is
1679  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1680  * this function allows the kthread to execute on any CPU.
1681  */
rcu_node_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1682 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1683 {
1684 	cpumask_var_t cm;
1685 	int cpu;
1686 	unsigned long mask = rnp->qsmaskinit;
1687 
1688 	if (rnp->node_kthread_task == NULL)
1689 		return;
1690 	if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1691 		return;
1692 	cpumask_clear(cm);
1693 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1694 		if ((mask & 0x1) && cpu != outgoingcpu)
1695 			cpumask_set_cpu(cpu, cm);
1696 	if (cpumask_weight(cm) == 0) {
1697 		cpumask_setall(cm);
1698 		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1699 			cpumask_clear_cpu(cpu, cm);
1700 		WARN_ON_ONCE(cpumask_weight(cm) == 0);
1701 	}
1702 	set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1703 	rcu_boost_kthread_setaffinity(rnp, cm);
1704 	free_cpumask_var(cm);
1705 }
1706 
1707 /*
1708  * Spawn a per-rcu_node kthread, setting priority and affinity.
1709  * Called during boot before online/offline can happen, or, if
1710  * during runtime, with the main CPU-hotplug locks held.  So only
1711  * one of these can be executing at a time.
1712  */
rcu_spawn_one_node_kthread(struct rcu_state * rsp,struct rcu_node * rnp)1713 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1714 						struct rcu_node *rnp)
1715 {
1716 	unsigned long flags;
1717 	int rnp_index = rnp - &rsp->node[0];
1718 	struct sched_param sp;
1719 	struct task_struct *t;
1720 
1721 	if (!rcu_scheduler_fully_active ||
1722 	    rnp->qsmaskinit == 0)
1723 		return 0;
1724 	if (rnp->node_kthread_task == NULL) {
1725 		t = kthread_create(rcu_node_kthread, (void *)rnp,
1726 				   "rcun/%d", rnp_index);
1727 		if (IS_ERR(t))
1728 			return PTR_ERR(t);
1729 		raw_spin_lock_irqsave(&rnp->lock, flags);
1730 		rnp->node_kthread_task = t;
1731 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1732 		sp.sched_priority = 99;
1733 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1734 		wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1735 	}
1736 	return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1737 }
1738 
1739 /*
1740  * Spawn all kthreads -- called as soon as the scheduler is running.
1741  */
rcu_spawn_kthreads(void)1742 static int __init rcu_spawn_kthreads(void)
1743 {
1744 	int cpu;
1745 	struct rcu_node *rnp;
1746 
1747 	rcu_scheduler_fully_active = 1;
1748 	for_each_possible_cpu(cpu) {
1749 		per_cpu(rcu_cpu_has_work, cpu) = 0;
1750 		if (cpu_online(cpu))
1751 			(void)rcu_spawn_one_cpu_kthread(cpu);
1752 	}
1753 	rnp = rcu_get_root(rcu_state);
1754 	(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1755 	if (NUM_RCU_NODES > 1) {
1756 		rcu_for_each_leaf_node(rcu_state, rnp)
1757 			(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1758 	}
1759 	return 0;
1760 }
1761 early_initcall(rcu_spawn_kthreads);
1762 
rcu_prepare_kthreads(int cpu)1763 static void __cpuinit rcu_prepare_kthreads(int cpu)
1764 {
1765 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1766 	struct rcu_node *rnp = rdp->mynode;
1767 
1768 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1769 	if (rcu_scheduler_fully_active) {
1770 		(void)rcu_spawn_one_cpu_kthread(cpu);
1771 		if (rnp->node_kthread_task == NULL)
1772 			(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1773 	}
1774 }
1775 
1776 #else /* #ifdef CONFIG_RCU_BOOST */
1777 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1778 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1779 {
1780 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1781 }
1782 
invoke_rcu_callbacks_kthread(void)1783 static void invoke_rcu_callbacks_kthread(void)
1784 {
1785 	WARN_ON_ONCE(1);
1786 }
1787 
rcu_is_callbacks_kthread(void)1788 static bool rcu_is_callbacks_kthread(void)
1789 {
1790 	return false;
1791 }
1792 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1793 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1794 {
1795 }
1796 
1797 #ifdef CONFIG_HOTPLUG_CPU
1798 
rcu_stop_cpu_kthread(int cpu)1799 static void rcu_stop_cpu_kthread(int cpu)
1800 {
1801 }
1802 
1803 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1804 
rcu_node_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1805 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1806 {
1807 }
1808 
rcu_cpu_kthread_setrt(int cpu,int to_rt)1809 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1810 {
1811 }
1812 
rcu_scheduler_really_started(void)1813 static int __init rcu_scheduler_really_started(void)
1814 {
1815 	rcu_scheduler_fully_active = 1;
1816 	return 0;
1817 }
1818 early_initcall(rcu_scheduler_really_started);
1819 
rcu_prepare_kthreads(int cpu)1820 static void __cpuinit rcu_prepare_kthreads(int cpu)
1821 {
1822 }
1823 
1824 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1825 
1826 #ifndef CONFIG_SMP
1827 
synchronize_sched_expedited(void)1828 void synchronize_sched_expedited(void)
1829 {
1830 	cond_resched();
1831 }
1832 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1833 
1834 #else /* #ifndef CONFIG_SMP */
1835 
1836 static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1837 static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1838 
synchronize_sched_expedited_cpu_stop(void * data)1839 static int synchronize_sched_expedited_cpu_stop(void *data)
1840 {
1841 	/*
1842 	 * There must be a full memory barrier on each affected CPU
1843 	 * between the time that try_stop_cpus() is called and the
1844 	 * time that it returns.
1845 	 *
1846 	 * In the current initial implementation of cpu_stop, the
1847 	 * above condition is already met when the control reaches
1848 	 * this point and the following smp_mb() is not strictly
1849 	 * necessary.  Do smp_mb() anyway for documentation and
1850 	 * robustness against future implementation changes.
1851 	 */
1852 	smp_mb(); /* See above comment block. */
1853 	return 0;
1854 }
1855 
1856 /*
1857  * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1858  * approach to force grace period to end quickly.  This consumes
1859  * significant time on all CPUs, and is thus not recommended for
1860  * any sort of common-case code.
1861  *
1862  * Note that it is illegal to call this function while holding any
1863  * lock that is acquired by a CPU-hotplug notifier.  Failing to
1864  * observe this restriction will result in deadlock.
1865  *
1866  * This implementation can be thought of as an application of ticket
1867  * locking to RCU, with sync_sched_expedited_started and
1868  * sync_sched_expedited_done taking on the roles of the halves
1869  * of the ticket-lock word.  Each task atomically increments
1870  * sync_sched_expedited_started upon entry, snapshotting the old value,
1871  * then attempts to stop all the CPUs.  If this succeeds, then each
1872  * CPU will have executed a context switch, resulting in an RCU-sched
1873  * grace period.  We are then done, so we use atomic_cmpxchg() to
1874  * update sync_sched_expedited_done to match our snapshot -- but
1875  * only if someone else has not already advanced past our snapshot.
1876  *
1877  * On the other hand, if try_stop_cpus() fails, we check the value
1878  * of sync_sched_expedited_done.  If it has advanced past our
1879  * initial snapshot, then someone else must have forced a grace period
1880  * some time after we took our snapshot.  In this case, our work is
1881  * done for us, and we can simply return.  Otherwise, we try again,
1882  * but keep our initial snapshot for purposes of checking for someone
1883  * doing our work for us.
1884  *
1885  * If we fail too many times in a row, we fall back to synchronize_sched().
1886  */
synchronize_sched_expedited(void)1887 void synchronize_sched_expedited(void)
1888 {
1889 	int firstsnap, s, snap, trycount = 0;
1890 
1891 	/* Note that atomic_inc_return() implies full memory barrier. */
1892 	firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1893 	get_online_cpus();
1894 
1895 	/*
1896 	 * Each pass through the following loop attempts to force a
1897 	 * context switch on each CPU.
1898 	 */
1899 	while (try_stop_cpus(cpu_online_mask,
1900 			     synchronize_sched_expedited_cpu_stop,
1901 			     NULL) == -EAGAIN) {
1902 		put_online_cpus();
1903 
1904 		/* No joy, try again later.  Or just synchronize_sched(). */
1905 		if (trycount++ < 10)
1906 			udelay(trycount * num_online_cpus());
1907 		else {
1908 			synchronize_sched();
1909 			return;
1910 		}
1911 
1912 		/* Check to see if someone else did our work for us. */
1913 		s = atomic_read(&sync_sched_expedited_done);
1914 		if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1915 			smp_mb(); /* ensure test happens before caller kfree */
1916 			return;
1917 		}
1918 
1919 		/*
1920 		 * Refetching sync_sched_expedited_started allows later
1921 		 * callers to piggyback on our grace period.  We subtract
1922 		 * 1 to get the same token that the last incrementer got.
1923 		 * We retry after they started, so our grace period works
1924 		 * for them, and they started after our first try, so their
1925 		 * grace period works for us.
1926 		 */
1927 		get_online_cpus();
1928 		snap = atomic_read(&sync_sched_expedited_started);
1929 		smp_mb(); /* ensure read is before try_stop_cpus(). */
1930 	}
1931 
1932 	/*
1933 	 * Everyone up to our most recent fetch is covered by our grace
1934 	 * period.  Update the counter, but only if our work is still
1935 	 * relevant -- which it won't be if someone who started later
1936 	 * than we did beat us to the punch.
1937 	 */
1938 	do {
1939 		s = atomic_read(&sync_sched_expedited_done);
1940 		if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1941 			smp_mb(); /* ensure test happens before caller kfree */
1942 			break;
1943 		}
1944 	} while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1945 
1946 	put_online_cpus();
1947 }
1948 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1949 
1950 #endif /* #else #ifndef CONFIG_SMP */
1951 
1952 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1953 
1954 /*
1955  * Check to see if any future RCU-related work will need to be done
1956  * by the current CPU, even if none need be done immediately, returning
1957  * 1 if so.  This function is part of the RCU implementation; it is -not-
1958  * an exported member of the RCU API.
1959  *
1960  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1961  * any flavor of RCU.
1962  */
rcu_needs_cpu(int cpu)1963 int rcu_needs_cpu(int cpu)
1964 {
1965 	return rcu_cpu_has_callbacks(cpu);
1966 }
1967 
1968 /*
1969  * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1970  */
rcu_prepare_for_idle_init(int cpu)1971 static void rcu_prepare_for_idle_init(int cpu)
1972 {
1973 }
1974 
1975 /*
1976  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1977  * after it.
1978  */
rcu_cleanup_after_idle(int cpu)1979 static void rcu_cleanup_after_idle(int cpu)
1980 {
1981 }
1982 
1983 /*
1984  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
1985  * is nothing.
1986  */
rcu_prepare_for_idle(int cpu)1987 static void rcu_prepare_for_idle(int cpu)
1988 {
1989 }
1990 
1991 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1992 
1993 /*
1994  * This code is invoked when a CPU goes idle, at which point we want
1995  * to have the CPU do everything required for RCU so that it can enter
1996  * the energy-efficient dyntick-idle mode.  This is handled by a
1997  * state machine implemented by rcu_prepare_for_idle() below.
1998  *
1999  * The following three proprocessor symbols control this state machine:
2000  *
2001  * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
2002  *	to satisfy RCU.  Beyond this point, it is better to incur a periodic
2003  *	scheduling-clock interrupt than to loop through the state machine
2004  *	at full power.
2005  * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
2006  *	optional if RCU does not need anything immediately from this
2007  *	CPU, even if this CPU still has RCU callbacks queued.  The first
2008  *	times through the state machine are mandatory: we need to give
2009  *	the state machine a chance to communicate a quiescent state
2010  *	to the RCU core.
2011  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
2012  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
2013  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
2014  *	benchmarkers who might otherwise be tempted to set this to a large
2015  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2016  *	system.  And if you are -that- concerned about energy efficiency,
2017  *	just power the system down and be done with it!
2018  *
2019  * The values below work well in practice.  If future workloads require
2020  * adjustment, they can be converted into kernel config parameters, though
2021  * making the state machine smarter might be a better option.
2022  */
2023 #define RCU_IDLE_FLUSHES 5		/* Number of dyntick-idle tries. */
2024 #define RCU_IDLE_OPT_FLUSHES 3		/* Optional dyntick-idle tries. */
2025 #define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */
2026 
2027 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
2028 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
2029 static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2030 static ktime_t rcu_idle_gp_wait;
2031 
2032 /*
2033  * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2034  * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2035  * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2036  * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
2037  * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
2038  * it is better to incur scheduling-clock interrupts than to spin
2039  * continuously for the same time duration!
2040  */
rcu_needs_cpu(int cpu)2041 int rcu_needs_cpu(int cpu)
2042 {
2043 	/* If no callbacks, RCU doesn't need the CPU. */
2044 	if (!rcu_cpu_has_callbacks(cpu))
2045 		return 0;
2046 	/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2047 	return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2048 }
2049 
2050 /*
2051  * Timer handler used to force CPU to start pushing its remaining RCU
2052  * callbacks in the case where it entered dyntick-idle mode with callbacks
2053  * pending.  The hander doesn't really need to do anything because the
2054  * real work is done upon re-entry to idle, or by the next scheduling-clock
2055  * interrupt should idle not be re-entered.
2056  */
rcu_idle_gp_timer_func(struct hrtimer * hrtp)2057 static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2058 {
2059 	trace_rcu_prep_idle("Timer");
2060 	return HRTIMER_NORESTART;
2061 }
2062 
2063 /*
2064  * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2065  */
rcu_prepare_for_idle_init(int cpu)2066 static void rcu_prepare_for_idle_init(int cpu)
2067 {
2068 	static int firsttime = 1;
2069 	struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2070 
2071 	hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2072 	hrtp->function = rcu_idle_gp_timer_func;
2073 	if (firsttime) {
2074 		unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2075 
2076 		rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2077 		firsttime = 0;
2078 	}
2079 }
2080 
2081 /*
2082  * Clean up for exit from idle.  Because we are exiting from idle, there
2083  * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
2084  * do nothing if this timer is not active, so just cancel it unconditionally.
2085  */
rcu_cleanup_after_idle(int cpu)2086 static void rcu_cleanup_after_idle(int cpu)
2087 {
2088 	hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2089 }
2090 
2091 /*
2092  * Check to see if any RCU-related work can be done by the current CPU,
2093  * and if so, schedule a softirq to get it done.  This function is part
2094  * of the RCU implementation; it is -not- an exported member of the RCU API.
2095  *
2096  * The idea is for the current CPU to clear out all work required by the
2097  * RCU core for the current grace period, so that this CPU can be permitted
2098  * to enter dyntick-idle mode.  In some cases, it will need to be awakened
2099  * at the end of the grace period by whatever CPU ends the grace period.
2100  * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2101  * number of wakeups by a modest integer factor.
2102  *
2103  * Because it is not legal to invoke rcu_process_callbacks() with irqs
2104  * disabled, we do one pass of force_quiescent_state(), then do a
2105  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2106  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
2107  *
2108  * The caller must have disabled interrupts.
2109  */
rcu_prepare_for_idle(int cpu)2110 static void rcu_prepare_for_idle(int cpu)
2111 {
2112 	unsigned long flags;
2113 
2114 	local_irq_save(flags);
2115 
2116 	/*
2117 	 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2118 	 * Also reset state to avoid prejudicing later attempts.
2119 	 */
2120 	if (!rcu_cpu_has_callbacks(cpu)) {
2121 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2122 		per_cpu(rcu_dyntick_drain, cpu) = 0;
2123 		local_irq_restore(flags);
2124 		trace_rcu_prep_idle("No callbacks");
2125 		return;
2126 	}
2127 
2128 	/*
2129 	 * If in holdoff mode, just return.  We will presumably have
2130 	 * refrained from disabling the scheduling-clock tick.
2131 	 */
2132 	if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2133 		local_irq_restore(flags);
2134 		trace_rcu_prep_idle("In holdoff");
2135 		return;
2136 	}
2137 
2138 	/* Check and update the rcu_dyntick_drain sequencing. */
2139 	if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2140 		/* First time through, initialize the counter. */
2141 		per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2142 	} else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2143 		   !rcu_pending(cpu)) {
2144 		/* Can we go dyntick-idle despite still having callbacks? */
2145 		trace_rcu_prep_idle("Dyntick with callbacks");
2146 		per_cpu(rcu_dyntick_drain, cpu) = 0;
2147 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2148 		hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2149 			      rcu_idle_gp_wait, HRTIMER_MODE_REL);
2150 		return; /* Nothing more to do immediately. */
2151 	} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2152 		/* We have hit the limit, so time to give up. */
2153 		per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2154 		local_irq_restore(flags);
2155 		trace_rcu_prep_idle("Begin holdoff");
2156 		invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
2157 		return;
2158 	}
2159 
2160 	/*
2161 	 * Do one step of pushing the remaining RCU callbacks through
2162 	 * the RCU core state machine.
2163 	 */
2164 #ifdef CONFIG_TREE_PREEMPT_RCU
2165 	if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2166 		local_irq_restore(flags);
2167 		rcu_preempt_qs(cpu);
2168 		force_quiescent_state(&rcu_preempt_state, 0);
2169 		local_irq_save(flags);
2170 	}
2171 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2172 	if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2173 		local_irq_restore(flags);
2174 		rcu_sched_qs(cpu);
2175 		force_quiescent_state(&rcu_sched_state, 0);
2176 		local_irq_save(flags);
2177 	}
2178 	if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2179 		local_irq_restore(flags);
2180 		rcu_bh_qs(cpu);
2181 		force_quiescent_state(&rcu_bh_state, 0);
2182 		local_irq_save(flags);
2183 	}
2184 
2185 	/*
2186 	 * If RCU callbacks are still pending, RCU still needs this CPU.
2187 	 * So try forcing the callbacks through the grace period.
2188 	 */
2189 	if (rcu_cpu_has_callbacks(cpu)) {
2190 		local_irq_restore(flags);
2191 		trace_rcu_prep_idle("More callbacks");
2192 		invoke_rcu_core();
2193 	} else {
2194 		local_irq_restore(flags);
2195 		trace_rcu_prep_idle("Callbacks drained");
2196 	}
2197 }
2198 
2199 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2200