1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
238498a67SThomas Gleixner /*
338498a67SThomas Gleixner * Common SMP CPU bringup/teardown functions
438498a67SThomas Gleixner */
5f97f8f06SThomas Gleixner #include <linux/cpu.h>
629d5e047SThomas Gleixner #include <linux/err.h>
729d5e047SThomas Gleixner #include <linux/smp.h>
88038dad7SPaul E. McKenney #include <linux/delay.h>
938498a67SThomas Gleixner #include <linux/init.h>
10f97f8f06SThomas Gleixner #include <linux/list.h>
11f97f8f06SThomas Gleixner #include <linux/slab.h>
1229d5e047SThomas Gleixner #include <linux/sched.h>
1329930025SIngo Molnar #include <linux/sched/task.h>
14f97f8f06SThomas Gleixner #include <linux/export.h>
1529d5e047SThomas Gleixner #include <linux/percpu.h>
16f97f8f06SThomas Gleixner #include <linux/kthread.h>
17f97f8f06SThomas Gleixner #include <linux/smpboot.h>
1838498a67SThomas Gleixner
1938498a67SThomas Gleixner #include "smpboot.h"
2038498a67SThomas Gleixner
213180d89bSPaul E. McKenney #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
223180d89bSPaul E. McKenney /*
2329d5e047SThomas Gleixner * For the hotplug case we keep the task structs around and reuse
2429d5e047SThomas Gleixner * them.
2529d5e047SThomas Gleixner */
2629d5e047SThomas Gleixner static DEFINE_PER_CPU(struct task_struct *, idle_threads);
2729d5e047SThomas Gleixner
idle_thread_get(unsigned int cpu)2829d5e047SThomas Gleixner struct task_struct *idle_thread_get(unsigned int cpu)
2929d5e047SThomas Gleixner {
300db0628dSPaul Gortmaker struct task_struct *tsk = per_cpu(idle_threads, cpu);
3129d5e047SThomas Gleixner
3229d5e047SThomas Gleixner if (!tsk)
3329d5e047SThomas Gleixner return ERR_PTR(-ENOMEM);
3429d5e047SThomas Gleixner return tsk;
353bb5d2eeSSuresh Siddha }
3629d5e047SThomas Gleixner
idle_thread_set_boot_cpu(void)3729d5e047SThomas Gleixner void __init idle_thread_set_boot_cpu(void)
3829d5e047SThomas Gleixner {
3929d5e047SThomas Gleixner per_cpu(idle_threads, smp_processor_id()) = current;
4029d5e047SThomas Gleixner }
4129d5e047SThomas Gleixner
4229d5e047SThomas Gleixner /**
4329d5e047SThomas Gleixner * idle_init - Initialize the idle thread for a cpu
444a70d2d9SSrivatsa S. Bhat * @cpu: The cpu for which the idle thread should be initialized
454a70d2d9SSrivatsa S. Bhat *
464a70d2d9SSrivatsa S. Bhat * Creates the thread if it does not exist.
474a70d2d9SSrivatsa S. Bhat */
idle_init(unsigned int cpu)484a70d2d9SSrivatsa S. Bhat static __always_inline void idle_init(unsigned int cpu)
494a70d2d9SSrivatsa S. Bhat {
50a1833a54SLinus Torvalds struct task_struct *tsk = per_cpu(idle_threads, cpu);
513bb5d2eeSSuresh Siddha
523bb5d2eeSSuresh Siddha if (!tsk) {
533bb5d2eeSSuresh Siddha tsk = fork_idle(cpu);
543bb5d2eeSSuresh Siddha if (IS_ERR(tsk))
553bb5d2eeSSuresh Siddha pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
563bb5d2eeSSuresh Siddha else
573bb5d2eeSSuresh Siddha per_cpu(idle_threads, cpu) = tsk;
583bb5d2eeSSuresh Siddha }
593bb5d2eeSSuresh Siddha }
603bb5d2eeSSuresh Siddha
613bb5d2eeSSuresh Siddha /**
623bb5d2eeSSuresh Siddha * idle_threads_init - Initialize idle threads for all cpus
6329d5e047SThomas Gleixner */
idle_threads_init(void)644a70d2d9SSrivatsa S. Bhat void __init idle_threads_init(void)
6529d5e047SThomas Gleixner {
663bb5d2eeSSuresh Siddha unsigned int cpu, boot_cpu;
6729d5e047SThomas Gleixner
68ee74d132SSrivatsa S. Bhat boot_cpu = smp_processor_id();
69ee74d132SSrivatsa S. Bhat
70ee74d132SSrivatsa S. Bhat for_each_possible_cpu(cpu) {
7129d5e047SThomas Gleixner if (cpu != boot_cpu)
723bb5d2eeSSuresh Siddha idle_init(cpu);
73ee74d132SSrivatsa S. Bhat }
743bb5d2eeSSuresh Siddha }
7529d5e047SThomas Gleixner #endif
7629d5e047SThomas Gleixner
7729d5e047SThomas Gleixner static LIST_HEAD(hotplug_threads);
78f97f8f06SThomas Gleixner static DEFINE_MUTEX(smpboot_threads_lock);
793180d89bSPaul E. McKenney
803180d89bSPaul E. McKenney struct smpboot_thread_data {
81f97f8f06SThomas Gleixner unsigned int cpu;
82f97f8f06SThomas Gleixner unsigned int status;
83f97f8f06SThomas Gleixner struct smp_hotplug_thread *ht;
84f97f8f06SThomas Gleixner };
85f97f8f06SThomas Gleixner
86f97f8f06SThomas Gleixner enum {
87f97f8f06SThomas Gleixner HP_THREAD_NONE = 0,
88f97f8f06SThomas Gleixner HP_THREAD_ACTIVE,
89f97f8f06SThomas Gleixner HP_THREAD_PARKED,
90f97f8f06SThomas Gleixner };
91f97f8f06SThomas Gleixner
92f97f8f06SThomas Gleixner /**
93f97f8f06SThomas Gleixner * smpboot_thread_fn - percpu hotplug thread loop function
94f97f8f06SThomas Gleixner * @data: thread data pointer
95f97f8f06SThomas Gleixner *
96f97f8f06SThomas Gleixner * Checks for thread stop and park conditions. Calls the necessary
97f97f8f06SThomas Gleixner * setup, cleanup, park and unpark functions for the registered
98f97f8f06SThomas Gleixner * thread.
99f97f8f06SThomas Gleixner *
100f97f8f06SThomas Gleixner * Returns 1 when the thread should exit, 0 otherwise.
101f97f8f06SThomas Gleixner */
smpboot_thread_fn(void * data)102f97f8f06SThomas Gleixner static int smpboot_thread_fn(void *data)
103f97f8f06SThomas Gleixner {
104f97f8f06SThomas Gleixner struct smpboot_thread_data *td = data;
105f97f8f06SThomas Gleixner struct smp_hotplug_thread *ht = td->ht;
106f97f8f06SThomas Gleixner
107f97f8f06SThomas Gleixner while (1) {
108f97f8f06SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE);
109f97f8f06SThomas Gleixner preempt_disable();
110f97f8f06SThomas Gleixner if (kthread_should_stop()) {
111f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
112f97f8f06SThomas Gleixner preempt_enable();
113f97f8f06SThomas Gleixner /* cleanup must mirror setup */
114f97f8f06SThomas Gleixner if (ht->cleanup && td->status != HP_THREAD_NONE)
1157d4d2696SPeter Zijlstra ht->cleanup(td->cpu, cpu_online(td->cpu));
116f97f8f06SThomas Gleixner kfree(td);
1173dd08c0cSFrederic Weisbecker return 0;
1183dd08c0cSFrederic Weisbecker }
119f97f8f06SThomas Gleixner
120f97f8f06SThomas Gleixner if (kthread_should_park()) {
121f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
122f97f8f06SThomas Gleixner preempt_enable();
123f97f8f06SThomas Gleixner if (ht->park && td->status == HP_THREAD_ACTIVE) {
124f97f8f06SThomas Gleixner BUG_ON(td->cpu != smp_processor_id());
125f97f8f06SThomas Gleixner ht->park(td->cpu);
126be6a2e4cSIngo Molnar td->status = HP_THREAD_PARKED;
127f97f8f06SThomas Gleixner }
128f97f8f06SThomas Gleixner kthread_parkme();
129f97f8f06SThomas Gleixner /* We might have been woken for stop */
130f97f8f06SThomas Gleixner continue;
131f97f8f06SThomas Gleixner }
132f97f8f06SThomas Gleixner
133f97f8f06SThomas Gleixner BUG_ON(td->cpu != smp_processor_id());
134f97f8f06SThomas Gleixner
135f97f8f06SThomas Gleixner /* Check for state change setup */
136f97f8f06SThomas Gleixner switch (td->status) {
137dc893e19SArnd Bergmann case HP_THREAD_NONE:
138f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
139f97f8f06SThomas Gleixner preempt_enable();
140f97f8f06SThomas Gleixner if (ht->setup)
141f97f8f06SThomas Gleixner ht->setup(td->cpu);
1427d4d2696SPeter Zijlstra td->status = HP_THREAD_ACTIVE;
143f97f8f06SThomas Gleixner continue;
144f97f8f06SThomas Gleixner
145f97f8f06SThomas Gleixner case HP_THREAD_PARKED:
146f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
1477d4d2696SPeter Zijlstra preempt_enable();
1487d4d2696SPeter Zijlstra if (ht->unpark)
149f97f8f06SThomas Gleixner ht->unpark(td->cpu);
1507d4d2696SPeter Zijlstra td->status = HP_THREAD_ACTIVE;
151f97f8f06SThomas Gleixner continue;
152f97f8f06SThomas Gleixner }
153f97f8f06SThomas Gleixner
154f97f8f06SThomas Gleixner if (!ht->thread_should_run(td->cpu)) {
1557d4d2696SPeter Zijlstra preempt_enable_no_resched();
156f97f8f06SThomas Gleixner schedule();
157f97f8f06SThomas Gleixner } else {
158f97f8f06SThomas Gleixner __set_current_state(TASK_RUNNING);
1597d4d2696SPeter Zijlstra preempt_enable();
160f97f8f06SThomas Gleixner ht->thread_fn(td->cpu);
161f97f8f06SThomas Gleixner }
1627d4d2696SPeter Zijlstra }
163f97f8f06SThomas Gleixner }
164f97f8f06SThomas Gleixner
165f97f8f06SThomas Gleixner static int
__smpboot_create_thread(struct smp_hotplug_thread * ht,unsigned int cpu)166f97f8f06SThomas Gleixner __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
167f97f8f06SThomas Gleixner {
168f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
169f97f8f06SThomas Gleixner struct smpboot_thread_data *td;
170f97f8f06SThomas Gleixner
171f97f8f06SThomas Gleixner if (tsk)
172f97f8f06SThomas Gleixner return 0;
173f97f8f06SThomas Gleixner
174f97f8f06SThomas Gleixner td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
175f97f8f06SThomas Gleixner if (!td)
176f97f8f06SThomas Gleixner return -ENOMEM;
177f97f8f06SThomas Gleixner td->cpu = cpu;
178f97f8f06SThomas Gleixner td->ht = ht;
179f97f8f06SThomas Gleixner
180f97f8f06SThomas Gleixner tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
181f97f8f06SThomas Gleixner ht->thread_comm);
182f97f8f06SThomas Gleixner if (IS_ERR(tsk)) {
183f97f8f06SThomas Gleixner kfree(td);
184f97f8f06SThomas Gleixner return PTR_ERR(tsk);
185f97f8f06SThomas Gleixner }
186f97f8f06SThomas Gleixner kthread_set_per_cpu(tsk, cpu);
187f97f8f06SThomas Gleixner /*
188f97f8f06SThomas Gleixner * Park the thread so that it could start right on the CPU
189f97f8f06SThomas Gleixner * when it is available.
190ac687e6eSPeter Zijlstra */
191a65d4096SPetr Mladek kthread_park(tsk);
192a65d4096SPetr Mladek get_task_struct(tsk);
193a65d4096SPetr Mladek *per_cpu_ptr(ht->store, cpu) = tsk;
194a65d4096SPetr Mladek if (ht->create) {
195a65d4096SPetr Mladek /*
196f97f8f06SThomas Gleixner * Make sure that the task has actually scheduled out
197f97f8f06SThomas Gleixner * into park position, before calling the create
198f2530dc7SThomas Gleixner * callback. At least the migration thread callback
199f2530dc7SThomas Gleixner * requires that the task is off the runqueue.
200f2530dc7SThomas Gleixner */
201f2530dc7SThomas Gleixner if (!wait_task_inactive(tsk, TASK_PARKED))
202f2530dc7SThomas Gleixner WARN_ON(1);
203f2530dc7SThomas Gleixner else
204f2530dc7SThomas Gleixner ht->create(cpu);
205f2530dc7SThomas Gleixner }
206f2530dc7SThomas Gleixner return 0;
207f2530dc7SThomas Gleixner }
2087d7e499fSThomas Gleixner
smpboot_create_threads(unsigned int cpu)209f2530dc7SThomas Gleixner int smpboot_create_threads(unsigned int cpu)
210f97f8f06SThomas Gleixner {
211f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
212f97f8f06SThomas Gleixner int ret = 0;
213f97f8f06SThomas Gleixner
214f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
215f97f8f06SThomas Gleixner list_for_each_entry(cur, &hotplug_threads, list) {
216f97f8f06SThomas Gleixner ret = __smpboot_create_thread(cur, cpu);
217f97f8f06SThomas Gleixner if (ret)
218f97f8f06SThomas Gleixner break;
219f97f8f06SThomas Gleixner }
220f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
221f97f8f06SThomas Gleixner return ret;
222f97f8f06SThomas Gleixner }
223f97f8f06SThomas Gleixner
smpboot_unpark_thread(struct smp_hotplug_thread * ht,unsigned int cpu)224f97f8f06SThomas Gleixner static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
225f97f8f06SThomas Gleixner {
226f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
227f97f8f06SThomas Gleixner
228f97f8f06SThomas Gleixner if (!ht->selfparking)
229f97f8f06SThomas Gleixner kthread_unpark(tsk);
230f97f8f06SThomas Gleixner }
231f97f8f06SThomas Gleixner
smpboot_unpark_threads(unsigned int cpu)232c00166d8SOleg Nesterov int smpboot_unpark_threads(unsigned int cpu)
233f97f8f06SThomas Gleixner {
234f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
235f97f8f06SThomas Gleixner
236931ef163SThomas Gleixner mutex_lock(&smpboot_threads_lock);
237f97f8f06SThomas Gleixner list_for_each_entry(cur, &hotplug_threads, list)
238f97f8f06SThomas Gleixner smpboot_unpark_thread(cur, cpu);
239f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
240f97f8f06SThomas Gleixner return 0;
241f97f8f06SThomas Gleixner }
242f97f8f06SThomas Gleixner
smpboot_park_thread(struct smp_hotplug_thread * ht,unsigned int cpu)243f97f8f06SThomas Gleixner static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
244931ef163SThomas Gleixner {
245f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
246f97f8f06SThomas Gleixner
247f97f8f06SThomas Gleixner if (tsk && !ht->selfparking)
248f97f8f06SThomas Gleixner kthread_park(tsk);
249f97f8f06SThomas Gleixner }
250f97f8f06SThomas Gleixner
smpboot_park_threads(unsigned int cpu)2517d7e499fSThomas Gleixner int smpboot_park_threads(unsigned int cpu)
252f97f8f06SThomas Gleixner {
253f97f8f06SThomas Gleixner struct smp_hotplug_thread *cur;
254f97f8f06SThomas Gleixner
255931ef163SThomas Gleixner mutex_lock(&smpboot_threads_lock);
256f97f8f06SThomas Gleixner list_for_each_entry_reverse(cur, &hotplug_threads, list)
257f97f8f06SThomas Gleixner smpboot_park_thread(cur, cpu);
258f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
259f97f8f06SThomas Gleixner return 0;
260f97f8f06SThomas Gleixner }
261f97f8f06SThomas Gleixner
smpboot_destroy_threads(struct smp_hotplug_thread * ht)262f97f8f06SThomas Gleixner static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
263931ef163SThomas Gleixner {
264f97f8f06SThomas Gleixner unsigned int cpu;
265f97f8f06SThomas Gleixner
266f97f8f06SThomas Gleixner /* We need to destroy also the parked threads of offline cpus */
267f97f8f06SThomas Gleixner for_each_possible_cpu(cpu) {
268f97f8f06SThomas Gleixner struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
269f97f8f06SThomas Gleixner
270f97f8f06SThomas Gleixner if (tsk) {
271f97f8f06SThomas Gleixner kthread_stop_put(tsk);
272f97f8f06SThomas Gleixner *per_cpu_ptr(ht->store, cpu) = NULL;
273f97f8f06SThomas Gleixner }
274f97f8f06SThomas Gleixner }
275*6309727eSAndreas Gruenbacher }
276f97f8f06SThomas Gleixner
277f97f8f06SThomas Gleixner /**
278f97f8f06SThomas Gleixner * smpboot_register_percpu_thread - Register a per_cpu thread related
279f97f8f06SThomas Gleixner * to hotplug
280f97f8f06SThomas Gleixner * @plug_thread: Hotplug thread descriptor
281f97f8f06SThomas Gleixner *
282167a8867SPeter Zijlstra * Creates and starts the threads on all online cpus.
283230ec939SFrederic Weisbecker */
smpboot_register_percpu_thread(struct smp_hotplug_thread * plug_thread)284f97f8f06SThomas Gleixner int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
285f97f8f06SThomas Gleixner {
286f97f8f06SThomas Gleixner unsigned int cpu;
287f97f8f06SThomas Gleixner int ret = 0;
288167a8867SPeter Zijlstra
289f97f8f06SThomas Gleixner cpus_read_lock();
290f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
291f97f8f06SThomas Gleixner for_each_online_cpu(cpu) {
292f97f8f06SThomas Gleixner ret = __smpboot_create_thread(plug_thread, cpu);
293844d8787SSebastian Andrzej Siewior if (ret) {
294f97f8f06SThomas Gleixner smpboot_destroy_threads(plug_thread);
295f97f8f06SThomas Gleixner goto out;
296f97f8f06SThomas Gleixner }
297f97f8f06SThomas Gleixner smpboot_unpark_thread(plug_thread, cpu);
298f97f8f06SThomas Gleixner }
299f97f8f06SThomas Gleixner list_add(&plug_thread->list, &hotplug_threads);
300f97f8f06SThomas Gleixner out:
301f97f8f06SThomas Gleixner mutex_unlock(&smpboot_threads_lock);
302f97f8f06SThomas Gleixner cpus_read_unlock();
303f97f8f06SThomas Gleixner return ret;
304f97f8f06SThomas Gleixner }
305f97f8f06SThomas Gleixner EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
306844d8787SSebastian Andrzej Siewior
307f97f8f06SThomas Gleixner /**
308f97f8f06SThomas Gleixner * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
309167a8867SPeter Zijlstra * @plug_thread: Hotplug thread descriptor
310f97f8f06SThomas Gleixner *
311f97f8f06SThomas Gleixner * Stops all threads on all possible cpus.
312f97f8f06SThomas Gleixner */
smpboot_unregister_percpu_thread(struct smp_hotplug_thread * plug_thread)313f97f8f06SThomas Gleixner void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
314f97f8f06SThomas Gleixner {
315f97f8f06SThomas Gleixner cpus_read_lock();
316f97f8f06SThomas Gleixner mutex_lock(&smpboot_threads_lock);
317f97f8f06SThomas Gleixner list_del(&plug_thread->list);
318f97f8f06SThomas Gleixner smpboot_destroy_threads(plug_thread);
319844d8787SSebastian Andrzej Siewior mutex_unlock(&smpboot_threads_lock);
320f97f8f06SThomas Gleixner cpus_read_unlock();
321f97f8f06SThomas Gleixner }
322f97f8f06SThomas Gleixner EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
323f97f8f06SThomas Gleixner