xref: /linux/kernel/smpboot.c (revision 58e16d792a6a8c6b750f637a4649967fcac853dc)
1*457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
238498a67SThomas Gleixner /*
338498a67SThomas Gleixner  * Common SMP CPU bringup/teardown functions
438498a67SThomas Gleixner  */
5f97f8f06SThomas Gleixner #include <linux/cpu.h>
629d5e047SThomas Gleixner #include <linux/err.h>
729d5e047SThomas Gleixner #include <linux/smp.h>
88038dad7SPaul E. McKenney #include <linux/delay.h>
938498a67SThomas Gleixner #include <linux/init.h>
10f97f8f06SThomas Gleixner #include <linux/list.h>
11f97f8f06SThomas Gleixner #include <linux/slab.h>
1229d5e047SThomas Gleixner #include <linux/sched.h>
1329930025SIngo Molnar #include <linux/sched/task.h>
14f97f8f06SThomas Gleixner #include <linux/export.h>
1529d5e047SThomas Gleixner #include <linux/percpu.h>
16f97f8f06SThomas Gleixner #include <linux/kthread.h>
17f97f8f06SThomas Gleixner #include <linux/smpboot.h>
1838498a67SThomas Gleixner 
1938498a67SThomas Gleixner #include "smpboot.h"
2038498a67SThomas Gleixner 
213180d89bSPaul E. McKenney #ifdef CONFIG_SMP
223180d89bSPaul E. McKenney 
2329d5e047SThomas Gleixner #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
2429d5e047SThomas Gleixner /*
2529d5e047SThomas Gleixner  * For the hotplug case we keep the task structs around and reuse
2629d5e047SThomas Gleixner  * them.
2729d5e047SThomas Gleixner  */
2829d5e047SThomas Gleixner static DEFINE_PER_CPU(struct task_struct *, idle_threads);
2929d5e047SThomas Gleixner 
300db0628dSPaul Gortmaker struct task_struct *idle_thread_get(unsigned int cpu)
3129d5e047SThomas Gleixner {
3229d5e047SThomas Gleixner 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
3329d5e047SThomas Gleixner 
3429d5e047SThomas Gleixner 	if (!tsk)
353bb5d2eeSSuresh Siddha 		return ERR_PTR(-ENOMEM);
3629d5e047SThomas Gleixner 	init_idle(tsk, cpu);
3729d5e047SThomas Gleixner 	return tsk;
3829d5e047SThomas Gleixner }
3929d5e047SThomas Gleixner 
4029d5e047SThomas Gleixner void __init idle_thread_set_boot_cpu(void)
4129d5e047SThomas Gleixner {
4229d5e047SThomas Gleixner 	per_cpu(idle_threads, smp_processor_id()) = current;
4329d5e047SThomas Gleixner }
4429d5e047SThomas Gleixner 
454a70d2d9SSrivatsa S. Bhat /**
464a70d2d9SSrivatsa S. Bhat  * idle_init - Initialize the idle thread for a cpu
474a70d2d9SSrivatsa S. Bhat  * @cpu:	The cpu for which the idle thread should be initialized
484a70d2d9SSrivatsa S. Bhat  *
494a70d2d9SSrivatsa S. Bhat  * Creates the thread if it does not exist.
504a70d2d9SSrivatsa S. Bhat  */
513bb5d2eeSSuresh Siddha static inline void idle_init(unsigned int cpu)
523bb5d2eeSSuresh Siddha {
533bb5d2eeSSuresh Siddha 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
543bb5d2eeSSuresh Siddha 
553bb5d2eeSSuresh Siddha 	if (!tsk) {
563bb5d2eeSSuresh Siddha 		tsk = fork_idle(cpu);
573bb5d2eeSSuresh Siddha 		if (IS_ERR(tsk))
583bb5d2eeSSuresh Siddha 			pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
593bb5d2eeSSuresh Siddha 		else
603bb5d2eeSSuresh Siddha 			per_cpu(idle_threads, cpu) = tsk;
613bb5d2eeSSuresh Siddha 	}
623bb5d2eeSSuresh Siddha }
633bb5d2eeSSuresh Siddha 
6429d5e047SThomas Gleixner /**
654a70d2d9SSrivatsa S. Bhat  * idle_threads_init - Initialize idle threads for all cpus
6629d5e047SThomas Gleixner  */
673bb5d2eeSSuresh Siddha void __init idle_threads_init(void)
6829d5e047SThomas Gleixner {
69ee74d132SSrivatsa S. Bhat 	unsigned int cpu, boot_cpu;
70ee74d132SSrivatsa S. Bhat 
71ee74d132SSrivatsa S. Bhat 	boot_cpu = smp_processor_id();
7229d5e047SThomas Gleixner 
733bb5d2eeSSuresh Siddha 	for_each_possible_cpu(cpu) {
74ee74d132SSrivatsa S. Bhat 		if (cpu != boot_cpu)
753bb5d2eeSSuresh Siddha 			idle_init(cpu);
7629d5e047SThomas Gleixner 	}
7729d5e047SThomas Gleixner }
7829d5e047SThomas Gleixner #endif
79f97f8f06SThomas Gleixner 
803180d89bSPaul E. McKenney #endif /* #ifdef CONFIG_SMP */
813180d89bSPaul E. McKenney 
82f97f8f06SThomas Gleixner static LIST_HEAD(hotplug_threads);
83f97f8f06SThomas Gleixner static DEFINE_MUTEX(smpboot_threads_lock);
84f97f8f06SThomas Gleixner 
85f97f8f06SThomas Gleixner struct smpboot_thread_data {
86f97f8f06SThomas Gleixner 	unsigned int			cpu;
87f97f8f06SThomas Gleixner 	unsigned int			status;
88f97f8f06SThomas Gleixner 	struct smp_hotplug_thread	*ht;
89f97f8f06SThomas Gleixner };
90f97f8f06SThomas Gleixner 
91f97f8f06SThomas Gleixner enum {
92f97f8f06SThomas Gleixner 	HP_THREAD_NONE = 0,
93f97f8f06SThomas Gleixner 	HP_THREAD_ACTIVE,
94f97f8f06SThomas Gleixner 	HP_THREAD_PARKED,
95f97f8f06SThomas Gleixner };
96f97f8f06SThomas Gleixner 
97f97f8f06SThomas Gleixner /**
98f97f8f06SThomas Gleixner  * smpboot_thread_fn - percpu hotplug thread loop function
99f97f8f06SThomas Gleixner  * @data:	thread data pointer
100f97f8f06SThomas Gleixner  *
101f97f8f06SThomas Gleixner  * Checks for thread stop and park conditions. Calls the necessary
102f97f8f06SThomas Gleixner  * setup, cleanup, park and unpark functions for the registered
103f97f8f06SThomas Gleixner  * thread.
104f97f8f06SThomas Gleixner  *
105f97f8f06SThomas Gleixner  * Returns 1 when the thread should exit, 0 otherwise.
106f97f8f06SThomas Gleixner  */
107f97f8f06SThomas Gleixner static int smpboot_thread_fn(void *data)
108f97f8f06SThomas Gleixner {
109f97f8f06SThomas Gleixner 	struct smpboot_thread_data *td = data;
110f97f8f06SThomas Gleixner 	struct smp_hotplug_thread *ht = td->ht;
111f97f8f06SThomas Gleixner 
112f97f8f06SThomas Gleixner 	while (1) {
113f97f8f06SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
114f97f8f06SThomas Gleixner 		preempt_disable();
115f97f8f06SThomas Gleixner 		if (kthread_should_stop()) {
1167d4d2696SPeter Zijlstra 			__set_current_state(TASK_RUNNING);
117f97f8f06SThomas Gleixner 			preempt_enable();
1183dd08c0cSFrederic Weisbecker 			/* cleanup must mirror setup */
1193dd08c0cSFrederic Weisbecker 			if (ht->cleanup && td->status != HP_THREAD_NONE)
120f97f8f06SThomas Gleixner 				ht->cleanup(td->cpu, cpu_online(td->cpu));
121f97f8f06SThomas Gleixner 			kfree(td);
122f97f8f06SThomas Gleixner 			return 0;
123f97f8f06SThomas Gleixner 		}
124f97f8f06SThomas Gleixner 
125f97f8f06SThomas Gleixner 		if (kthread_should_park()) {
126f97f8f06SThomas Gleixner 			__set_current_state(TASK_RUNNING);
127be6a2e4cSIngo Molnar 			preempt_enable();
128f97f8f06SThomas Gleixner 			if (ht->park && td->status == HP_THREAD_ACTIVE) {
129f97f8f06SThomas Gleixner 				BUG_ON(td->cpu != smp_processor_id());
130f97f8f06SThomas Gleixner 				ht->park(td->cpu);
131f97f8f06SThomas Gleixner 				td->status = HP_THREAD_PARKED;
132f97f8f06SThomas Gleixner 			}
133f97f8f06SThomas Gleixner 			kthread_parkme();
134f97f8f06SThomas Gleixner 			/* We might have been woken for stop */
135f97f8f06SThomas Gleixner 			continue;
136f97f8f06SThomas Gleixner 		}
137f97f8f06SThomas Gleixner 
138dc893e19SArnd Bergmann 		BUG_ON(td->cpu != smp_processor_id());
139f97f8f06SThomas Gleixner 
140f97f8f06SThomas Gleixner 		/* Check for state change setup */
141f97f8f06SThomas Gleixner 		switch (td->status) {
142f97f8f06SThomas Gleixner 		case HP_THREAD_NONE:
1437d4d2696SPeter Zijlstra 			__set_current_state(TASK_RUNNING);
144f97f8f06SThomas Gleixner 			preempt_enable();
145f97f8f06SThomas Gleixner 			if (ht->setup)
146f97f8f06SThomas Gleixner 				ht->setup(td->cpu);
147f97f8f06SThomas Gleixner 			td->status = HP_THREAD_ACTIVE;
1487d4d2696SPeter Zijlstra 			continue;
1497d4d2696SPeter Zijlstra 
150f97f8f06SThomas Gleixner 		case HP_THREAD_PARKED:
1517d4d2696SPeter Zijlstra 			__set_current_state(TASK_RUNNING);
152f97f8f06SThomas Gleixner 			preempt_enable();
153f97f8f06SThomas Gleixner 			if (ht->unpark)
154f97f8f06SThomas Gleixner 				ht->unpark(td->cpu);
155f97f8f06SThomas Gleixner 			td->status = HP_THREAD_ACTIVE;
1567d4d2696SPeter Zijlstra 			continue;
157f97f8f06SThomas Gleixner 		}
158f97f8f06SThomas Gleixner 
159f97f8f06SThomas Gleixner 		if (!ht->thread_should_run(td->cpu)) {
1607d4d2696SPeter Zijlstra 			preempt_enable_no_resched();
161f97f8f06SThomas Gleixner 			schedule();
162f97f8f06SThomas Gleixner 		} else {
1637d4d2696SPeter Zijlstra 			__set_current_state(TASK_RUNNING);
164f97f8f06SThomas Gleixner 			preempt_enable();
165f97f8f06SThomas Gleixner 			ht->thread_fn(td->cpu);
166f97f8f06SThomas Gleixner 		}
167f97f8f06SThomas Gleixner 	}
168f97f8f06SThomas Gleixner }
169f97f8f06SThomas Gleixner 
170f97f8f06SThomas Gleixner static int
171f97f8f06SThomas Gleixner __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
172f97f8f06SThomas Gleixner {
173f97f8f06SThomas Gleixner 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
174f97f8f06SThomas Gleixner 	struct smpboot_thread_data *td;
175f97f8f06SThomas Gleixner 
176f97f8f06SThomas Gleixner 	if (tsk)
177f97f8f06SThomas Gleixner 		return 0;
178f97f8f06SThomas Gleixner 
179f97f8f06SThomas Gleixner 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
180f97f8f06SThomas Gleixner 	if (!td)
181f97f8f06SThomas Gleixner 		return -ENOMEM;
182f97f8f06SThomas Gleixner 	td->cpu = cpu;
183f97f8f06SThomas Gleixner 	td->ht = ht;
184f97f8f06SThomas Gleixner 
185f97f8f06SThomas Gleixner 	tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
186f97f8f06SThomas Gleixner 				    ht->thread_comm);
187f97f8f06SThomas Gleixner 	if (IS_ERR(tsk)) {
188f97f8f06SThomas Gleixner 		kfree(td);
189f97f8f06SThomas Gleixner 		return PTR_ERR(tsk);
190f97f8f06SThomas Gleixner 	}
191a65d4096SPetr Mladek 	/*
192a65d4096SPetr Mladek 	 * Park the thread so that it could start right on the CPU
193a65d4096SPetr Mladek 	 * when it is available.
194a65d4096SPetr Mladek 	 */
195a65d4096SPetr Mladek 	kthread_park(tsk);
196f97f8f06SThomas Gleixner 	get_task_struct(tsk);
197f97f8f06SThomas Gleixner 	*per_cpu_ptr(ht->store, cpu) = tsk;
198f2530dc7SThomas Gleixner 	if (ht->create) {
199f2530dc7SThomas Gleixner 		/*
200f2530dc7SThomas Gleixner 		 * Make sure that the task has actually scheduled out
201f2530dc7SThomas Gleixner 		 * into park position, before calling the create
202f2530dc7SThomas Gleixner 		 * callback. At least the migration thread callback
203f2530dc7SThomas Gleixner 		 * requires that the task is off the runqueue.
204f2530dc7SThomas Gleixner 		 */
205f2530dc7SThomas Gleixner 		if (!wait_task_inactive(tsk, TASK_PARKED))
206f2530dc7SThomas Gleixner 			WARN_ON(1);
207f2530dc7SThomas Gleixner 		else
2087d7e499fSThomas Gleixner 			ht->create(cpu);
209f2530dc7SThomas Gleixner 	}
210f97f8f06SThomas Gleixner 	return 0;
211f97f8f06SThomas Gleixner }
212f97f8f06SThomas Gleixner 
213f97f8f06SThomas Gleixner int smpboot_create_threads(unsigned int cpu)
214f97f8f06SThomas Gleixner {
215f97f8f06SThomas Gleixner 	struct smp_hotplug_thread *cur;
216f97f8f06SThomas Gleixner 	int ret = 0;
217f97f8f06SThomas Gleixner 
218f97f8f06SThomas Gleixner 	mutex_lock(&smpboot_threads_lock);
219f97f8f06SThomas Gleixner 	list_for_each_entry(cur, &hotplug_threads, list) {
220f97f8f06SThomas Gleixner 		ret = __smpboot_create_thread(cur, cpu);
221f97f8f06SThomas Gleixner 		if (ret)
222f97f8f06SThomas Gleixner 			break;
223f97f8f06SThomas Gleixner 	}
224f97f8f06SThomas Gleixner 	mutex_unlock(&smpboot_threads_lock);
225f97f8f06SThomas Gleixner 	return ret;
226f97f8f06SThomas Gleixner }
227f97f8f06SThomas Gleixner 
228f97f8f06SThomas Gleixner static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
229f97f8f06SThomas Gleixner {
230f97f8f06SThomas Gleixner 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
231f97f8f06SThomas Gleixner 
232c00166d8SOleg Nesterov 	if (!ht->selfparking)
233f97f8f06SThomas Gleixner 		kthread_unpark(tsk);
234f97f8f06SThomas Gleixner }
235f97f8f06SThomas Gleixner 
236931ef163SThomas Gleixner int smpboot_unpark_threads(unsigned int cpu)
237f97f8f06SThomas Gleixner {
238f97f8f06SThomas Gleixner 	struct smp_hotplug_thread *cur;
239f97f8f06SThomas Gleixner 
240f97f8f06SThomas Gleixner 	mutex_lock(&smpboot_threads_lock);
241f97f8f06SThomas Gleixner 	list_for_each_entry(cur, &hotplug_threads, list)
242f97f8f06SThomas Gleixner 		smpboot_unpark_thread(cur, cpu);
243f97f8f06SThomas Gleixner 	mutex_unlock(&smpboot_threads_lock);
244931ef163SThomas Gleixner 	return 0;
245f97f8f06SThomas Gleixner }
246f97f8f06SThomas Gleixner 
247f97f8f06SThomas Gleixner static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248f97f8f06SThomas Gleixner {
249f97f8f06SThomas Gleixner 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250f97f8f06SThomas Gleixner 
2517d7e499fSThomas Gleixner 	if (tsk && !ht->selfparking)
252f97f8f06SThomas Gleixner 		kthread_park(tsk);
253f97f8f06SThomas Gleixner }
254f97f8f06SThomas Gleixner 
255931ef163SThomas Gleixner int smpboot_park_threads(unsigned int cpu)
256f97f8f06SThomas Gleixner {
257f97f8f06SThomas Gleixner 	struct smp_hotplug_thread *cur;
258f97f8f06SThomas Gleixner 
259f97f8f06SThomas Gleixner 	mutex_lock(&smpboot_threads_lock);
260f97f8f06SThomas Gleixner 	list_for_each_entry_reverse(cur, &hotplug_threads, list)
261f97f8f06SThomas Gleixner 		smpboot_park_thread(cur, cpu);
262f97f8f06SThomas Gleixner 	mutex_unlock(&smpboot_threads_lock);
263931ef163SThomas Gleixner 	return 0;
264f97f8f06SThomas Gleixner }
265f97f8f06SThomas Gleixner 
266f97f8f06SThomas Gleixner static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267f97f8f06SThomas Gleixner {
268f97f8f06SThomas Gleixner 	unsigned int cpu;
269f97f8f06SThomas Gleixner 
270f97f8f06SThomas Gleixner 	/* We need to destroy also the parked threads of offline cpus */
271f97f8f06SThomas Gleixner 	for_each_possible_cpu(cpu) {
272f97f8f06SThomas Gleixner 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273f97f8f06SThomas Gleixner 
274f97f8f06SThomas Gleixner 		if (tsk) {
275f97f8f06SThomas Gleixner 			kthread_stop(tsk);
276f97f8f06SThomas Gleixner 			put_task_struct(tsk);
277f97f8f06SThomas Gleixner 			*per_cpu_ptr(ht->store, cpu) = NULL;
278f97f8f06SThomas Gleixner 		}
279f97f8f06SThomas Gleixner 	}
280f97f8f06SThomas Gleixner }
281f97f8f06SThomas Gleixner 
282f97f8f06SThomas Gleixner /**
283167a8867SPeter Zijlstra  * smpboot_register_percpu_thread - Register a per_cpu thread related
284230ec939SFrederic Weisbecker  * 					    to hotplug
285f97f8f06SThomas Gleixner  * @plug_thread:	Hotplug thread descriptor
286f97f8f06SThomas Gleixner  *
287f97f8f06SThomas Gleixner  * Creates and starts the threads on all online cpus.
288f97f8f06SThomas Gleixner  */
289167a8867SPeter Zijlstra int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
290f97f8f06SThomas Gleixner {
291f97f8f06SThomas Gleixner 	unsigned int cpu;
292f97f8f06SThomas Gleixner 	int ret = 0;
293f97f8f06SThomas Gleixner 
2944bee9686SLai Jiangshan 	get_online_cpus();
295f97f8f06SThomas Gleixner 	mutex_lock(&smpboot_threads_lock);
296f97f8f06SThomas Gleixner 	for_each_online_cpu(cpu) {
297f97f8f06SThomas Gleixner 		ret = __smpboot_create_thread(plug_thread, cpu);
298f97f8f06SThomas Gleixner 		if (ret) {
299f97f8f06SThomas Gleixner 			smpboot_destroy_threads(plug_thread);
300f97f8f06SThomas Gleixner 			goto out;
301f97f8f06SThomas Gleixner 		}
302f97f8f06SThomas Gleixner 		smpboot_unpark_thread(plug_thread, cpu);
303f97f8f06SThomas Gleixner 	}
304f97f8f06SThomas Gleixner 	list_add(&plug_thread->list, &hotplug_threads);
305f97f8f06SThomas Gleixner out:
306f97f8f06SThomas Gleixner 	mutex_unlock(&smpboot_threads_lock);
3074bee9686SLai Jiangshan 	put_online_cpus();
308f97f8f06SThomas Gleixner 	return ret;
309f97f8f06SThomas Gleixner }
310167a8867SPeter Zijlstra EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
311f97f8f06SThomas Gleixner 
312f97f8f06SThomas Gleixner /**
313f97f8f06SThomas Gleixner  * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
314f97f8f06SThomas Gleixner  * @plug_thread:	Hotplug thread descriptor
315f97f8f06SThomas Gleixner  *
316f97f8f06SThomas Gleixner  * Stops all threads on all possible cpus.
317f97f8f06SThomas Gleixner  */
318f97f8f06SThomas Gleixner void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
319f97f8f06SThomas Gleixner {
320f97f8f06SThomas Gleixner 	get_online_cpus();
321f97f8f06SThomas Gleixner 	mutex_lock(&smpboot_threads_lock);
322f97f8f06SThomas Gleixner 	list_del(&plug_thread->list);
323f97f8f06SThomas Gleixner 	smpboot_destroy_threads(plug_thread);
324f97f8f06SThomas Gleixner 	mutex_unlock(&smpboot_threads_lock);
325f97f8f06SThomas Gleixner 	put_online_cpus();
326f97f8f06SThomas Gleixner }
327f97f8f06SThomas Gleixner EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
3288038dad7SPaul E. McKenney 
3298038dad7SPaul E. McKenney static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
3308038dad7SPaul E. McKenney 
3318038dad7SPaul E. McKenney /*
3328038dad7SPaul E. McKenney  * Called to poll specified CPU's state, for example, when waiting for
3338038dad7SPaul E. McKenney  * a CPU to come online.
3348038dad7SPaul E. McKenney  */
3358038dad7SPaul E. McKenney int cpu_report_state(int cpu)
3368038dad7SPaul E. McKenney {
3378038dad7SPaul E. McKenney 	return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
3388038dad7SPaul E. McKenney }
3398038dad7SPaul E. McKenney 
3408038dad7SPaul E. McKenney /*
3418038dad7SPaul E. McKenney  * If CPU has died properly, set its state to CPU_UP_PREPARE and
3428038dad7SPaul E. McKenney  * return success.  Otherwise, return -EBUSY if the CPU died after
3438038dad7SPaul E. McKenney  * cpu_wait_death() timed out.  And yet otherwise again, return -EAGAIN
3448038dad7SPaul E. McKenney  * if cpu_wait_death() timed out and the CPU still hasn't gotten around
3458038dad7SPaul E. McKenney  * to dying.  In the latter two cases, the CPU might not be set up
3468038dad7SPaul E. McKenney  * properly, but it is up to the arch-specific code to decide.
3478038dad7SPaul E. McKenney  * Finally, -EIO indicates an unanticipated problem.
3488038dad7SPaul E. McKenney  *
3498038dad7SPaul E. McKenney  * Note that it is permissible to omit this call entirely, as is
3508038dad7SPaul E. McKenney  * done in architectures that do no CPU-hotplug error checking.
3518038dad7SPaul E. McKenney  */
3528038dad7SPaul E. McKenney int cpu_check_up_prepare(int cpu)
3538038dad7SPaul E. McKenney {
3548038dad7SPaul E. McKenney 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
3558038dad7SPaul E. McKenney 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
3568038dad7SPaul E. McKenney 		return 0;
3578038dad7SPaul E. McKenney 	}
3588038dad7SPaul E. McKenney 
3598038dad7SPaul E. McKenney 	switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
3608038dad7SPaul E. McKenney 
3618038dad7SPaul E. McKenney 	case CPU_POST_DEAD:
3628038dad7SPaul E. McKenney 
3638038dad7SPaul E. McKenney 		/* The CPU died properly, so just start it up again. */
3648038dad7SPaul E. McKenney 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
3658038dad7SPaul E. McKenney 		return 0;
3668038dad7SPaul E. McKenney 
3678038dad7SPaul E. McKenney 	case CPU_DEAD_FROZEN:
3688038dad7SPaul E. McKenney 
3698038dad7SPaul E. McKenney 		/*
3708038dad7SPaul E. McKenney 		 * Timeout during CPU death, so let caller know.
3718038dad7SPaul E. McKenney 		 * The outgoing CPU completed its processing, but after
3728038dad7SPaul E. McKenney 		 * cpu_wait_death() timed out and reported the error. The
3738038dad7SPaul E. McKenney 		 * caller is free to proceed, in which case the state
3748038dad7SPaul E. McKenney 		 * will be reset properly by cpu_set_state_online().
3758038dad7SPaul E. McKenney 		 * Proceeding despite this -EBUSY return makes sense
3768038dad7SPaul E. McKenney 		 * for systems where the outgoing CPUs take themselves
3778038dad7SPaul E. McKenney 		 * offline, with no post-death manipulation required from
3788038dad7SPaul E. McKenney 		 * a surviving CPU.
3798038dad7SPaul E. McKenney 		 */
3808038dad7SPaul E. McKenney 		return -EBUSY;
3818038dad7SPaul E. McKenney 
3828038dad7SPaul E. McKenney 	case CPU_BROKEN:
3838038dad7SPaul E. McKenney 
3848038dad7SPaul E. McKenney 		/*
3858038dad7SPaul E. McKenney 		 * The most likely reason we got here is that there was
3868038dad7SPaul E. McKenney 		 * a timeout during CPU death, and the outgoing CPU never
3878038dad7SPaul E. McKenney 		 * did complete its processing.  This could happen on
3888038dad7SPaul E. McKenney 		 * a virtualized system if the outgoing VCPU gets preempted
3898038dad7SPaul E. McKenney 		 * for more than five seconds, and the user attempts to
3908038dad7SPaul E. McKenney 		 * immediately online that same CPU.  Trying again later
3918038dad7SPaul E. McKenney 		 * might return -EBUSY above, hence -EAGAIN.
3928038dad7SPaul E. McKenney 		 */
3938038dad7SPaul E. McKenney 		return -EAGAIN;
3948038dad7SPaul E. McKenney 
3958038dad7SPaul E. McKenney 	default:
3968038dad7SPaul E. McKenney 
3978038dad7SPaul E. McKenney 		/* Should not happen.  Famous last words. */
3988038dad7SPaul E. McKenney 		return -EIO;
3998038dad7SPaul E. McKenney 	}
4008038dad7SPaul E. McKenney }
4018038dad7SPaul E. McKenney 
4028038dad7SPaul E. McKenney /*
4038038dad7SPaul E. McKenney  * Mark the specified CPU online.
4048038dad7SPaul E. McKenney  *
4058038dad7SPaul E. McKenney  * Note that it is permissible to omit this call entirely, as is
4068038dad7SPaul E. McKenney  * done in architectures that do no CPU-hotplug error checking.
4078038dad7SPaul E. McKenney  */
4088038dad7SPaul E. McKenney void cpu_set_state_online(int cpu)
4098038dad7SPaul E. McKenney {
4108038dad7SPaul E. McKenney 	(void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
4118038dad7SPaul E. McKenney }
4128038dad7SPaul E. McKenney 
4138038dad7SPaul E. McKenney #ifdef CONFIG_HOTPLUG_CPU
4148038dad7SPaul E. McKenney 
4158038dad7SPaul E. McKenney /*
4168038dad7SPaul E. McKenney  * Wait for the specified CPU to exit the idle loop and die.
4178038dad7SPaul E. McKenney  */
4188038dad7SPaul E. McKenney bool cpu_wait_death(unsigned int cpu, int seconds)
4198038dad7SPaul E. McKenney {
4208038dad7SPaul E. McKenney 	int jf_left = seconds * HZ;
4218038dad7SPaul E. McKenney 	int oldstate;
4228038dad7SPaul E. McKenney 	bool ret = true;
4238038dad7SPaul E. McKenney 	int sleep_jf = 1;
4248038dad7SPaul E. McKenney 
4258038dad7SPaul E. McKenney 	might_sleep();
4268038dad7SPaul E. McKenney 
4278038dad7SPaul E. McKenney 	/* The outgoing CPU will normally get done quite quickly. */
4288038dad7SPaul E. McKenney 	if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
4298038dad7SPaul E. McKenney 		goto update_state;
4308038dad7SPaul E. McKenney 	udelay(5);
4318038dad7SPaul E. McKenney 
4328038dad7SPaul E. McKenney 	/* But if the outgoing CPU dawdles, wait increasingly long times. */
4338038dad7SPaul E. McKenney 	while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
4348038dad7SPaul E. McKenney 		schedule_timeout_uninterruptible(sleep_jf);
4358038dad7SPaul E. McKenney 		jf_left -= sleep_jf;
4368038dad7SPaul E. McKenney 		if (jf_left <= 0)
4378038dad7SPaul E. McKenney 			break;
4388038dad7SPaul E. McKenney 		sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
4398038dad7SPaul E. McKenney 	}
4408038dad7SPaul E. McKenney update_state:
4418038dad7SPaul E. McKenney 	oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
4428038dad7SPaul E. McKenney 	if (oldstate == CPU_DEAD) {
4438038dad7SPaul E. McKenney 		/* Outgoing CPU died normally, update state. */
4448038dad7SPaul E. McKenney 		smp_mb(); /* atomic_read() before update. */
4458038dad7SPaul E. McKenney 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
4468038dad7SPaul E. McKenney 	} else {
4478038dad7SPaul E. McKenney 		/* Outgoing CPU still hasn't died, set state accordingly. */
4488038dad7SPaul E. McKenney 		if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
4498038dad7SPaul E. McKenney 				   oldstate, CPU_BROKEN) != oldstate)
4508038dad7SPaul E. McKenney 			goto update_state;
4518038dad7SPaul E. McKenney 		ret = false;
4528038dad7SPaul E. McKenney 	}
4538038dad7SPaul E. McKenney 	return ret;
4548038dad7SPaul E. McKenney }
4558038dad7SPaul E. McKenney 
4568038dad7SPaul E. McKenney /*
4578038dad7SPaul E. McKenney  * Called by the outgoing CPU to report its successful death.  Return
4588038dad7SPaul E. McKenney  * false if this report follows the surviving CPU's timing out.
4598038dad7SPaul E. McKenney  *
4608038dad7SPaul E. McKenney  * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
4618038dad7SPaul E. McKenney  * timed out.  This approach allows architectures to omit calls to
4628038dad7SPaul E. McKenney  * cpu_check_up_prepare() and cpu_set_state_online() without defeating
4638038dad7SPaul E. McKenney  * the next cpu_wait_death()'s polling loop.
4648038dad7SPaul E. McKenney  */
4658038dad7SPaul E. McKenney bool cpu_report_death(void)
4668038dad7SPaul E. McKenney {
4678038dad7SPaul E. McKenney 	int oldstate;
4688038dad7SPaul E. McKenney 	int newstate;
4698038dad7SPaul E. McKenney 	int cpu = smp_processor_id();
4708038dad7SPaul E. McKenney 
4718038dad7SPaul E. McKenney 	do {
4728038dad7SPaul E. McKenney 		oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
4738038dad7SPaul E. McKenney 		if (oldstate != CPU_BROKEN)
4748038dad7SPaul E. McKenney 			newstate = CPU_DEAD;
4758038dad7SPaul E. McKenney 		else
4768038dad7SPaul E. McKenney 			newstate = CPU_DEAD_FROZEN;
4778038dad7SPaul E. McKenney 	} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
4788038dad7SPaul E. McKenney 				oldstate, newstate) != oldstate);
4798038dad7SPaul E. McKenney 	return newstate == CPU_DEAD;
4808038dad7SPaul E. McKenney }
4818038dad7SPaul E. McKenney 
4828038dad7SPaul E. McKenney #endif /* #ifdef CONFIG_HOTPLUG_CPU */
483