1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * kernel/stop_machine.c
4  *
5  * Copyright (C) 2008, 2005	IBM Corporation.
6  * Copyright (C) 2008, 2005	Rusty Russell rusty@rustcorp.com.au
7  * Copyright (C) 2010		SUSE Linux Products GmbH
8  * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
9  */
10 #include <linux/compiler.h>
11 #include <linux/completion.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kthread.h>
15 #include <linux/export.h>
16 #include <linux/percpu.h>
17 #include <linux/sched.h>
18 #include <linux/stop_machine.h>
19 #include <linux/interrupt.h>
20 #include <linux/kallsyms.h>
21 #include <linux/smpboot.h>
22 #include <linux/atomic.h>
23 #include <linux/nmi.h>
24 #include <linux/sched/wake_q.h>
25 
26 /*
27  * Structure to determine completion condition and record errors.  May
28  * be shared by works on different cpus.
29  */
30 struct cpu_stop_done {
31 	atomic_t		nr_todo;	/* nr left to execute */
32 	int			ret;		/* collected return value */
33 	struct completion	completion;	/* fired if nr_todo reaches 0 */
34 };
35 
36 /* the actual stopper, one per every possible cpu, enabled on online cpus */
37 struct cpu_stopper {
38 	struct task_struct	*thread;
39 
40 	raw_spinlock_t		lock;
41 	bool			enabled;	/* is this stopper enabled? */
42 	struct list_head	works;		/* list of pending works */
43 
44 	struct cpu_stop_work	stop_work;	/* for stop_cpus */
45 	unsigned long		caller;
46 	cpu_stop_fn_t		fn;
47 };
48 
49 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
50 static bool stop_machine_initialized = false;
51 
print_stop_info(const char * log_lvl,struct task_struct * task)52 void print_stop_info(const char *log_lvl, struct task_struct *task)
53 {
54 	/*
55 	 * If @task is a stopper task, it cannot migrate and task_cpu() is
56 	 * stable.
57 	 */
58 	struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
59 
60 	if (task != stopper->thread)
61 		return;
62 
63 	printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
64 }
65 
66 /* static data for stop_cpus */
67 static DEFINE_MUTEX(stop_cpus_mutex);
68 static bool stop_cpus_in_progress;
69 
cpu_stop_init_done(struct cpu_stop_done * done,unsigned int nr_todo)70 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
71 {
72 	memset(done, 0, sizeof(*done));
73 	atomic_set(&done->nr_todo, nr_todo);
74 	init_completion(&done->completion);
75 }
76 
77 /* signal completion unless @done is NULL */
cpu_stop_signal_done(struct cpu_stop_done * done)78 static void cpu_stop_signal_done(struct cpu_stop_done *done)
79 {
80 	if (atomic_dec_and_test(&done->nr_todo))
81 		complete(&done->completion);
82 }
83 
__cpu_stop_queue_work(struct cpu_stopper * stopper,struct cpu_stop_work * work,struct wake_q_head * wakeq)84 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
85 					struct cpu_stop_work *work,
86 					struct wake_q_head *wakeq)
87 {
88 	list_add_tail(&work->list, &stopper->works);
89 	wake_q_add(wakeq, stopper->thread);
90 }
91 
92 /* queue @work to @stopper.  if offline, @work is completed immediately */
cpu_stop_queue_work(unsigned int cpu,struct cpu_stop_work * work)93 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
94 {
95 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
96 	DEFINE_WAKE_Q(wakeq);
97 	unsigned long flags;
98 	bool enabled;
99 
100 	preempt_disable();
101 	raw_spin_lock_irqsave(&stopper->lock, flags);
102 	enabled = stopper->enabled;
103 	if (enabled)
104 		__cpu_stop_queue_work(stopper, work, &wakeq);
105 	else if (work->done)
106 		cpu_stop_signal_done(work->done);
107 	raw_spin_unlock_irqrestore(&stopper->lock, flags);
108 
109 	wake_up_q(&wakeq);
110 	preempt_enable();
111 
112 	return enabled;
113 }
114 
115 /**
116  * stop_one_cpu - stop a cpu
117  * @cpu: cpu to stop
118  * @fn: function to execute
119  * @arg: argument to @fn
120  *
121  * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
122  * the highest priority preempting any task on the cpu and
123  * monopolizing it.  This function returns after the execution is
124  * complete.
125  *
126  * This function doesn't guarantee @cpu stays online till @fn
127  * completes.  If @cpu goes down in the middle, execution may happen
128  * partially or fully on different cpus.  @fn should either be ready
129  * for that or the caller should ensure that @cpu stays online until
130  * this function completes.
131  *
132  * CONTEXT:
133  * Might sleep.
134  *
135  * RETURNS:
136  * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
137  * otherwise, the return value of @fn.
138  */
stop_one_cpu(unsigned int cpu,cpu_stop_fn_t fn,void * arg)139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
140 {
141 	struct cpu_stop_done done;
142 	struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
143 
144 	cpu_stop_init_done(&done, 1);
145 	if (!cpu_stop_queue_work(cpu, &work))
146 		return -ENOENT;
147 	/*
148 	 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
149 	 * cycle by doing a preemption:
150 	 */
151 	cond_resched();
152 	wait_for_completion(&done.completion);
153 	return done.ret;
154 }
155 
156 /* This controls the threads on each CPU. */
157 enum multi_stop_state {
158 	/* Dummy starting state for thread. */
159 	MULTI_STOP_NONE,
160 	/* Awaiting everyone to be scheduled. */
161 	MULTI_STOP_PREPARE,
162 	/* Disable interrupts. */
163 	MULTI_STOP_DISABLE_IRQ,
164 	/* Run the function */
165 	MULTI_STOP_RUN,
166 	/* Exit */
167 	MULTI_STOP_EXIT,
168 };
169 
170 struct multi_stop_data {
171 	cpu_stop_fn_t		fn;
172 	void			*data;
173 	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
174 	unsigned int		num_threads;
175 	const struct cpumask	*active_cpus;
176 
177 	enum multi_stop_state	state;
178 	atomic_t		thread_ack;
179 };
180 
set_state(struct multi_stop_data * msdata,enum multi_stop_state newstate)181 static void set_state(struct multi_stop_data *msdata,
182 		      enum multi_stop_state newstate)
183 {
184 	/* Reset ack counter. */
185 	atomic_set(&msdata->thread_ack, msdata->num_threads);
186 	smp_wmb();
187 	WRITE_ONCE(msdata->state, newstate);
188 }
189 
190 /* Last one to ack a state moves to the next state. */
ack_state(struct multi_stop_data * msdata)191 static void ack_state(struct multi_stop_data *msdata)
192 {
193 	if (atomic_dec_and_test(&msdata->thread_ack))
194 		set_state(msdata, msdata->state + 1);
195 }
196 
stop_machine_yield(const struct cpumask * cpumask)197 notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
198 {
199 	cpu_relax();
200 }
201 
202 /* This is the cpu_stop function which stops the CPU. */
multi_cpu_stop(void * data)203 static int multi_cpu_stop(void *data)
204 {
205 	struct multi_stop_data *msdata = data;
206 	enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
207 	int cpu = smp_processor_id(), err = 0;
208 	const struct cpumask *cpumask;
209 	unsigned long flags;
210 	bool is_active;
211 
212 	/*
213 	 * When called from stop_machine_from_inactive_cpu(), irq might
214 	 * already be disabled.  Save the state and restore it on exit.
215 	 */
216 	local_save_flags(flags);
217 
218 	if (!msdata->active_cpus) {
219 		cpumask = cpu_online_mask;
220 		is_active = cpu == cpumask_first(cpumask);
221 	} else {
222 		cpumask = msdata->active_cpus;
223 		is_active = cpumask_test_cpu(cpu, cpumask);
224 	}
225 
226 	/* Simple state machine */
227 	do {
228 		/* Chill out and ensure we re-read multi_stop_state. */
229 		stop_machine_yield(cpumask);
230 		newstate = READ_ONCE(msdata->state);
231 		if (newstate != curstate) {
232 			curstate = newstate;
233 			switch (curstate) {
234 			case MULTI_STOP_DISABLE_IRQ:
235 				local_irq_disable();
236 				hard_irq_disable();
237 				break;
238 			case MULTI_STOP_RUN:
239 				if (is_active)
240 					err = msdata->fn(msdata->data);
241 				break;
242 			default:
243 				break;
244 			}
245 			ack_state(msdata);
246 		} else if (curstate > MULTI_STOP_PREPARE) {
247 			/*
248 			 * At this stage all other CPUs we depend on must spin
249 			 * in the same loop. Any reason for hard-lockup should
250 			 * be detected and reported on their side.
251 			 */
252 			touch_nmi_watchdog();
253 			/* Also suppress RCU CPU stall warnings. */
254 			rcu_momentary_eqs();
255 		}
256 	} while (curstate != MULTI_STOP_EXIT);
257 
258 	local_irq_restore(flags);
259 	return err;
260 }
261 
cpu_stop_queue_two_works(int cpu1,struct cpu_stop_work * work1,int cpu2,struct cpu_stop_work * work2)262 static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
263 				    int cpu2, struct cpu_stop_work *work2)
264 {
265 	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
266 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
267 	DEFINE_WAKE_Q(wakeq);
268 	int err;
269 
270 retry:
271 	/*
272 	 * The waking up of stopper threads has to happen in the same
273 	 * scheduling context as the queueing.  Otherwise, there is a
274 	 * possibility of one of the above stoppers being woken up by another
275 	 * CPU, and preempting us. This will cause us to not wake up the other
276 	 * stopper forever.
277 	 */
278 	preempt_disable();
279 	raw_spin_lock_irq(&stopper1->lock);
280 	raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
281 
282 	if (!stopper1->enabled || !stopper2->enabled) {
283 		err = -ENOENT;
284 		goto unlock;
285 	}
286 
287 	/*
288 	 * Ensure that if we race with __stop_cpus() the stoppers won't get
289 	 * queued up in reverse order leading to system deadlock.
290 	 *
291 	 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
292 	 * queued a work on cpu1 but not on cpu2, we hold both locks.
293 	 *
294 	 * It can be falsely true but it is safe to spin until it is cleared,
295 	 * queue_stop_cpus_work() does everything under preempt_disable().
296 	 */
297 	if (unlikely(stop_cpus_in_progress)) {
298 		err = -EDEADLK;
299 		goto unlock;
300 	}
301 
302 	err = 0;
303 	__cpu_stop_queue_work(stopper1, work1, &wakeq);
304 	__cpu_stop_queue_work(stopper2, work2, &wakeq);
305 
306 unlock:
307 	raw_spin_unlock(&stopper2->lock);
308 	raw_spin_unlock_irq(&stopper1->lock);
309 
310 	if (unlikely(err == -EDEADLK)) {
311 		preempt_enable();
312 
313 		while (stop_cpus_in_progress)
314 			cpu_relax();
315 
316 		goto retry;
317 	}
318 
319 	wake_up_q(&wakeq);
320 	preempt_enable();
321 
322 	return err;
323 }
324 /**
325  * stop_two_cpus - stops two cpus
326  * @cpu1: the cpu to stop
327  * @cpu2: the other cpu to stop
328  * @fn: function to execute
329  * @arg: argument to @fn
330  *
331  * Stops both the current and specified CPU and runs @fn on one of them.
332  *
333  * returns when both are completed.
334  */
stop_two_cpus(unsigned int cpu1,unsigned int cpu2,cpu_stop_fn_t fn,void * arg)335 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
336 {
337 	struct cpu_stop_done done;
338 	struct cpu_stop_work work1, work2;
339 	struct multi_stop_data msdata;
340 
341 	msdata = (struct multi_stop_data){
342 		.fn = fn,
343 		.data = arg,
344 		.num_threads = 2,
345 		.active_cpus = cpumask_of(cpu1),
346 	};
347 
348 	work1 = work2 = (struct cpu_stop_work){
349 		.fn = multi_cpu_stop,
350 		.arg = &msdata,
351 		.done = &done,
352 		.caller = _RET_IP_,
353 	};
354 
355 	cpu_stop_init_done(&done, 2);
356 	set_state(&msdata, MULTI_STOP_PREPARE);
357 
358 	if (cpu1 > cpu2)
359 		swap(cpu1, cpu2);
360 	if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
361 		return -ENOENT;
362 
363 	wait_for_completion(&done.completion);
364 	return done.ret;
365 }
366 
367 /**
368  * stop_one_cpu_nowait - stop a cpu but don't wait for completion
369  * @cpu: cpu to stop
370  * @fn: function to execute
371  * @arg: argument to @fn
372  * @work_buf: pointer to cpu_stop_work structure
373  *
374  * Similar to stop_one_cpu() but doesn't wait for completion.  The
375  * caller is responsible for ensuring @work_buf is currently unused
376  * and will remain untouched until stopper starts executing @fn.
377  *
378  * CONTEXT:
379  * Don't care.
380  *
381  * RETURNS:
382  * true if cpu_stop_work was queued successfully and @fn will be called,
383  * false otherwise.
384  */
stop_one_cpu_nowait(unsigned int cpu,cpu_stop_fn_t fn,void * arg,struct cpu_stop_work * work_buf)385 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
386 			struct cpu_stop_work *work_buf)
387 {
388 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
389 	return cpu_stop_queue_work(cpu, work_buf);
390 }
391 
queue_stop_cpus_work(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg,struct cpu_stop_done * done)392 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
393 				 cpu_stop_fn_t fn, void *arg,
394 				 struct cpu_stop_done *done)
395 {
396 	struct cpu_stop_work *work;
397 	unsigned int cpu;
398 	bool queued = false;
399 
400 	/*
401 	 * Disable preemption while queueing to avoid getting
402 	 * preempted by a stopper which might wait for other stoppers
403 	 * to enter @fn which can lead to deadlock.
404 	 */
405 	preempt_disable();
406 	stop_cpus_in_progress = true;
407 	barrier();
408 	for_each_cpu(cpu, cpumask) {
409 		work = &per_cpu(cpu_stopper.stop_work, cpu);
410 		work->fn = fn;
411 		work->arg = arg;
412 		work->done = done;
413 		work->caller = _RET_IP_;
414 		if (cpu_stop_queue_work(cpu, work))
415 			queued = true;
416 	}
417 	barrier();
418 	stop_cpus_in_progress = false;
419 	preempt_enable();
420 
421 	return queued;
422 }
423 
__stop_cpus(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg)424 static int __stop_cpus(const struct cpumask *cpumask,
425 		       cpu_stop_fn_t fn, void *arg)
426 {
427 	struct cpu_stop_done done;
428 
429 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
430 	if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
431 		return -ENOENT;
432 	wait_for_completion(&done.completion);
433 	return done.ret;
434 }
435 
436 /**
437  * stop_cpus - stop multiple cpus
438  * @cpumask: cpus to stop
439  * @fn: function to execute
440  * @arg: argument to @fn
441  *
442  * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
443  * @fn is run in a process context with the highest priority
444  * preempting any task on the cpu and monopolizing it.  This function
445  * returns after all executions are complete.
446  *
447  * This function doesn't guarantee the cpus in @cpumask stay online
448  * till @fn completes.  If some cpus go down in the middle, execution
449  * on the cpu may happen partially or fully on different cpus.  @fn
450  * should either be ready for that or the caller should ensure that
451  * the cpus stay online until this function completes.
452  *
453  * All stop_cpus() calls are serialized making it safe for @fn to wait
454  * for all cpus to start executing it.
455  *
456  * CONTEXT:
457  * Might sleep.
458  *
459  * RETURNS:
460  * -ENOENT if @fn(@arg) was not executed at all because all cpus in
461  * @cpumask were offline; otherwise, 0 if all executions of @fn
462  * returned 0, any non zero return value if any returned non zero.
463  */
stop_cpus(const struct cpumask * cpumask,cpu_stop_fn_t fn,void * arg)464 static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
465 {
466 	int ret;
467 
468 	/* static works are used, process one request at a time */
469 	mutex_lock(&stop_cpus_mutex);
470 	ret = __stop_cpus(cpumask, fn, arg);
471 	mutex_unlock(&stop_cpus_mutex);
472 	return ret;
473 }
474 
cpu_stop_should_run(unsigned int cpu)475 static int cpu_stop_should_run(unsigned int cpu)
476 {
477 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
478 	unsigned long flags;
479 	int run;
480 
481 	raw_spin_lock_irqsave(&stopper->lock, flags);
482 	run = !list_empty(&stopper->works);
483 	raw_spin_unlock_irqrestore(&stopper->lock, flags);
484 	return run;
485 }
486 
cpu_stopper_thread(unsigned int cpu)487 static void cpu_stopper_thread(unsigned int cpu)
488 {
489 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
490 	struct cpu_stop_work *work;
491 
492 repeat:
493 	work = NULL;
494 	raw_spin_lock_irq(&stopper->lock);
495 	if (!list_empty(&stopper->works)) {
496 		work = list_first_entry(&stopper->works,
497 					struct cpu_stop_work, list);
498 		list_del_init(&work->list);
499 	}
500 	raw_spin_unlock_irq(&stopper->lock);
501 
502 	if (work) {
503 		cpu_stop_fn_t fn = work->fn;
504 		void *arg = work->arg;
505 		struct cpu_stop_done *done = work->done;
506 		int ret;
507 
508 		/* cpu stop callbacks must not sleep, make in_atomic() == T */
509 		stopper->caller = work->caller;
510 		stopper->fn = fn;
511 		preempt_count_inc();
512 		ret = fn(arg);
513 		if (done) {
514 			if (ret)
515 				done->ret = ret;
516 			cpu_stop_signal_done(done);
517 		}
518 		preempt_count_dec();
519 		stopper->fn = NULL;
520 		stopper->caller = 0;
521 		WARN_ONCE(preempt_count(),
522 			  "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
523 		goto repeat;
524 	}
525 }
526 
stop_machine_park(int cpu)527 void stop_machine_park(int cpu)
528 {
529 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
530 	/*
531 	 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
532 	 * the pending works before it parks, until then it is fine to queue
533 	 * the new works.
534 	 */
535 	stopper->enabled = false;
536 	kthread_park(stopper->thread);
537 }
538 
cpu_stop_create(unsigned int cpu)539 static void cpu_stop_create(unsigned int cpu)
540 {
541 	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
542 }
543 
cpu_stop_park(unsigned int cpu)544 static void cpu_stop_park(unsigned int cpu)
545 {
546 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
547 
548 	WARN_ON(!list_empty(&stopper->works));
549 }
550 
stop_machine_unpark(int cpu)551 void stop_machine_unpark(int cpu)
552 {
553 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
554 
555 	stopper->enabled = true;
556 	kthread_unpark(stopper->thread);
557 }
558 
559 static struct smp_hotplug_thread cpu_stop_threads = {
560 	.store			= &cpu_stopper.thread,
561 	.thread_should_run	= cpu_stop_should_run,
562 	.thread_fn		= cpu_stopper_thread,
563 	.thread_comm		= "migration/%u",
564 	.create			= cpu_stop_create,
565 	.park			= cpu_stop_park,
566 	.selfparking		= true,
567 };
568 
cpu_stop_init(void)569 static int __init cpu_stop_init(void)
570 {
571 	unsigned int cpu;
572 
573 	for_each_possible_cpu(cpu) {
574 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
575 
576 		raw_spin_lock_init(&stopper->lock);
577 		INIT_LIST_HEAD(&stopper->works);
578 	}
579 
580 	BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
581 	stop_machine_unpark(raw_smp_processor_id());
582 	stop_machine_initialized = true;
583 	return 0;
584 }
585 early_initcall(cpu_stop_init);
586 
stop_machine_cpuslocked(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)587 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
588 			    const struct cpumask *cpus)
589 {
590 	struct multi_stop_data msdata = {
591 		.fn = fn,
592 		.data = data,
593 		.num_threads = num_online_cpus(),
594 		.active_cpus = cpus,
595 	};
596 
597 	lockdep_assert_cpus_held();
598 
599 	if (!stop_machine_initialized) {
600 		/*
601 		 * Handle the case where stop_machine() is called
602 		 * early in boot before stop_machine() has been
603 		 * initialized.
604 		 */
605 		unsigned long flags;
606 		int ret;
607 
608 		WARN_ON_ONCE(msdata.num_threads != 1);
609 
610 		local_irq_save(flags);
611 		hard_irq_disable();
612 		ret = (*fn)(data);
613 		local_irq_restore(flags);
614 
615 		return ret;
616 	}
617 
618 	/* Set the initial state and stop all online cpus. */
619 	set_state(&msdata, MULTI_STOP_PREPARE);
620 	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
621 }
622 
stop_machine(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)623 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
624 {
625 	int ret;
626 
627 	/* No CPUs can come up or down during this. */
628 	cpus_read_lock();
629 	ret = stop_machine_cpuslocked(fn, data, cpus);
630 	cpus_read_unlock();
631 	return ret;
632 }
633 EXPORT_SYMBOL_GPL(stop_machine);
634 
635 #ifdef CONFIG_SCHED_SMT
stop_core_cpuslocked(unsigned int cpu,cpu_stop_fn_t fn,void * data)636 int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data)
637 {
638 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
639 
640 	struct multi_stop_data msdata = {
641 		.fn = fn,
642 		.data = data,
643 		.num_threads = cpumask_weight(smt_mask),
644 		.active_cpus = smt_mask,
645 	};
646 
647 	lockdep_assert_cpus_held();
648 
649 	/* Set the initial state and stop all online cpus. */
650 	set_state(&msdata, MULTI_STOP_PREPARE);
651 	return stop_cpus(smt_mask, multi_cpu_stop, &msdata);
652 }
653 EXPORT_SYMBOL_GPL(stop_core_cpuslocked);
654 #endif
655 
656 /**
657  * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
658  * @fn: the function to run
659  * @data: the data ptr for the @fn()
660  * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
661  *
662  * This is identical to stop_machine() but can be called from a CPU which
663  * is not active.  The local CPU is in the process of hotplug (so no other
664  * CPU hotplug can start) and not marked active and doesn't have enough
665  * context to sleep.
666  *
667  * This function provides stop_machine() functionality for such state by
668  * using busy-wait for synchronization and executing @fn directly for local
669  * CPU.
670  *
671  * CONTEXT:
672  * Local CPU is inactive.  Temporarily stops all active CPUs.
673  *
674  * RETURNS:
675  * 0 if all executions of @fn returned 0, any non zero return value if any
676  * returned non zero.
677  */
stop_machine_from_inactive_cpu(cpu_stop_fn_t fn,void * data,const struct cpumask * cpus)678 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
679 				  const struct cpumask *cpus)
680 {
681 	struct multi_stop_data msdata = { .fn = fn, .data = data,
682 					    .active_cpus = cpus };
683 	struct cpu_stop_done done;
684 	int ret;
685 
686 	/* Local CPU must be inactive and CPU hotplug in progress. */
687 	BUG_ON(cpu_active(raw_smp_processor_id()));
688 	msdata.num_threads = num_active_cpus() + 1;	/* +1 for local */
689 
690 	/* No proper task established and can't sleep - busy wait for lock. */
691 	while (!mutex_trylock(&stop_cpus_mutex))
692 		cpu_relax();
693 
694 	/* Schedule work on other CPUs and execute directly for local CPU */
695 	set_state(&msdata, MULTI_STOP_PREPARE);
696 	cpu_stop_init_done(&done, num_active_cpus());
697 	queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
698 			     &done);
699 	ret = multi_cpu_stop(&msdata);
700 
701 	/* Busy wait for completion. */
702 	while (!completion_done(&done.completion))
703 		cpu_relax();
704 
705 	mutex_unlock(&stop_cpus_mutex);
706 	return ret ?: done.ret;
707 }
708