1 // SPDX-License-Identifier: GPL-2.0
2 /* CPU control.
3 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 */
5 #include <linux/sched/mm.h>
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/isolation.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/smt.h>
15 #include <linux/unistd.h>
16 #include <linux/cpu.h>
17 #include <linux/oom.h>
18 #include <linux/rcupdate.h>
19 #include <linux/delay.h>
20 #include <linux/export.h>
21 #include <linux/bug.h>
22 #include <linux/kthread.h>
23 #include <linux/stop_machine.h>
24 #include <linux/mutex.h>
25 #include <linux/gfp.h>
26 #include <linux/suspend.h>
27 #include <linux/lockdep.h>
28 #include <linux/tick.h>
29 #include <linux/irq.h>
30 #include <linux/nmi.h>
31 #include <linux/smpboot.h>
32 #include <linux/relay.h>
33 #include <linux/slab.h>
34 #include <linux/scs.h>
35 #include <linux/percpu-rwsem.h>
36 #include <linux/cpuset.h>
37 #include <linux/random.h>
38 #include <linux/cc_platform.h>
39 #include <linux/parser.h>
40
41 #include <trace/events/power.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/cpuhp.h>
44
45 #include "smpboot.h"
46
47 /**
48 * struct cpuhp_cpu_state - Per cpu hotplug state storage
49 * @state: The current cpu state
50 * @target: The target state
51 * @fail: Current CPU hotplug callback state
52 * @thread: Pointer to the hotplug thread
53 * @should_run: Thread should execute
54 * @rollback: Perform a rollback
55 * @single: Single callback invocation
56 * @bringup: Single callback bringup or teardown selector
57 * @node: Remote CPU node; for multi-instance, do a
58 * single entry callback for install/remove
59 * @last: For multi-instance rollback, remember how far we got
60 * @cb_state: The state for a single callback (install/uninstall)
61 * @result: Result of the operation
62 * @ap_sync_state: State for AP synchronization
63 * @done_up: Signal completion to the issuer of the task for cpu-up
64 * @done_down: Signal completion to the issuer of the task for cpu-down
65 */
66 struct cpuhp_cpu_state {
67 enum cpuhp_state state;
68 enum cpuhp_state target;
69 enum cpuhp_state fail;
70 #ifdef CONFIG_SMP
71 struct task_struct *thread;
72 bool should_run;
73 bool rollback;
74 bool single;
75 bool bringup;
76 struct hlist_node *node;
77 struct hlist_node *last;
78 enum cpuhp_state cb_state;
79 int result;
80 atomic_t ap_sync_state;
81 struct completion done_up;
82 struct completion done_down;
83 #endif
84 };
85
86 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
87 .fail = CPUHP_INVALID,
88 };
89
90 #ifdef CONFIG_SMP
91 cpumask_t cpus_booted_once_mask;
92 #endif
93
94 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
95 static struct lockdep_map cpuhp_state_up_map =
96 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
97 static struct lockdep_map cpuhp_state_down_map =
98 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
99
100
cpuhp_lock_acquire(bool bringup)101 static inline void cpuhp_lock_acquire(bool bringup)
102 {
103 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
104 }
105
cpuhp_lock_release(bool bringup)106 static inline void cpuhp_lock_release(bool bringup)
107 {
108 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
109 }
110 #else
111
cpuhp_lock_acquire(bool bringup)112 static inline void cpuhp_lock_acquire(bool bringup) { }
cpuhp_lock_release(bool bringup)113 static inline void cpuhp_lock_release(bool bringup) { }
114
115 #endif
116
117 /**
118 * struct cpuhp_step - Hotplug state machine step
119 * @name: Name of the step
120 * @startup: Startup function of the step
121 * @teardown: Teardown function of the step
122 * @cant_stop: Bringup/teardown can't be stopped at this step
123 * @multi_instance: State has multiple instances which get added afterwards
124 */
125 struct cpuhp_step {
126 const char *name;
127 union {
128 int (*single)(unsigned int cpu);
129 int (*multi)(unsigned int cpu,
130 struct hlist_node *node);
131 } startup;
132 union {
133 int (*single)(unsigned int cpu);
134 int (*multi)(unsigned int cpu,
135 struct hlist_node *node);
136 } teardown;
137 /* private: */
138 struct hlist_head list;
139 /* public: */
140 bool cant_stop;
141 bool multi_instance;
142 };
143
144 static DEFINE_MUTEX(cpuhp_state_mutex);
145 static struct cpuhp_step cpuhp_hp_states[];
146
cpuhp_get_step(enum cpuhp_state state)147 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
148 {
149 return cpuhp_hp_states + state;
150 }
151
cpuhp_step_empty(bool bringup,struct cpuhp_step * step)152 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
153 {
154 return bringup ? !step->startup.single : !step->teardown.single;
155 }
156
157 /**
158 * cpuhp_invoke_callback - Invoke the callbacks for a given state
159 * @cpu: The cpu for which the callback should be invoked
160 * @state: The state to do callbacks for
161 * @bringup: True if the bringup callback should be invoked
162 * @node: For multi-instance, do a single entry callback for install/remove
163 * @lastp: For multi-instance rollback, remember how far we got
164 *
165 * Called from cpu hotplug and from the state register machinery.
166 *
167 * Return: %0 on success or a negative errno code
168 */
cpuhp_invoke_callback(unsigned int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node,struct hlist_node ** lastp)169 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
170 bool bringup, struct hlist_node *node,
171 struct hlist_node **lastp)
172 {
173 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174 struct cpuhp_step *step = cpuhp_get_step(state);
175 int (*cbm)(unsigned int cpu, struct hlist_node *node);
176 int (*cb)(unsigned int cpu);
177 int ret, cnt;
178
179 if (st->fail == state) {
180 st->fail = CPUHP_INVALID;
181 return -EAGAIN;
182 }
183
184 if (cpuhp_step_empty(bringup, step)) {
185 WARN_ON_ONCE(1);
186 return 0;
187 }
188
189 if (!step->multi_instance) {
190 WARN_ON_ONCE(lastp && *lastp);
191 cb = bringup ? step->startup.single : step->teardown.single;
192
193 trace_cpuhp_enter(cpu, st->target, state, cb);
194 ret = cb(cpu);
195 trace_cpuhp_exit(cpu, st->state, state, ret);
196 return ret;
197 }
198 cbm = bringup ? step->startup.multi : step->teardown.multi;
199
200 /* Single invocation for instance add/remove */
201 if (node) {
202 WARN_ON_ONCE(lastp && *lastp);
203 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204 ret = cbm(cpu, node);
205 trace_cpuhp_exit(cpu, st->state, state, ret);
206 return ret;
207 }
208
209 /* State transition. Invoke on all instances */
210 cnt = 0;
211 hlist_for_each(node, &step->list) {
212 if (lastp && node == *lastp)
213 break;
214
215 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216 ret = cbm(cpu, node);
217 trace_cpuhp_exit(cpu, st->state, state, ret);
218 if (ret) {
219 if (!lastp)
220 goto err;
221
222 *lastp = node;
223 return ret;
224 }
225 cnt++;
226 }
227 if (lastp)
228 *lastp = NULL;
229 return 0;
230 err:
231 /* Rollback the instances if one failed */
232 cbm = !bringup ? step->startup.multi : step->teardown.multi;
233 if (!cbm)
234 return ret;
235
236 hlist_for_each(node, &step->list) {
237 if (!cnt--)
238 break;
239
240 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
241 ret = cbm(cpu, node);
242 trace_cpuhp_exit(cpu, st->state, state, ret);
243 /*
244 * Rollback must not fail,
245 */
246 WARN_ON_ONCE(ret);
247 }
248 return ret;
249 }
250
251 /*
252 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
253 */
cpuhp_is_atomic_state(enum cpuhp_state state)254 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
255 {
256 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
257 }
258
259 #ifdef CONFIG_SMP
cpuhp_is_ap_state(enum cpuhp_state state)260 static bool cpuhp_is_ap_state(enum cpuhp_state state)
261 {
262 /*
263 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
264 * purposes as that state is handled explicitly in cpu_down.
265 */
266 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
267 }
268
wait_for_ap_thread(struct cpuhp_cpu_state * st,bool bringup)269 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
270 {
271 struct completion *done = bringup ? &st->done_up : &st->done_down;
272 wait_for_completion(done);
273 }
274
complete_ap_thread(struct cpuhp_cpu_state * st,bool bringup)275 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
276 {
277 struct completion *done = bringup ? &st->done_up : &st->done_down;
278 complete(done);
279 }
280
281 /* Synchronization state management */
282 enum cpuhp_sync_state {
283 SYNC_STATE_DEAD,
284 SYNC_STATE_KICKED,
285 SYNC_STATE_SHOULD_DIE,
286 SYNC_STATE_ALIVE,
287 SYNC_STATE_SHOULD_ONLINE,
288 SYNC_STATE_ONLINE,
289 };
290
291 #ifdef CONFIG_HOTPLUG_CORE_SYNC
292 /**
293 * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
294 * @state: The synchronization state to set
295 *
296 * No synchronization point. Just update of the synchronization state, but implies
297 * a full barrier so that the AP changes are visible before the control CPU proceeds.
298 */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)299 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
300 {
301 atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
302
303 (void)atomic_xchg(st, state);
304 }
305
arch_cpuhp_sync_state_poll(void)306 void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
307
cpuhp_wait_for_sync_state(unsigned int cpu,enum cpuhp_sync_state state,enum cpuhp_sync_state next_state)308 static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
309 enum cpuhp_sync_state next_state)
310 {
311 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
312 ktime_t now, end, start = ktime_get();
313 int sync;
314
315 end = start + 10ULL * NSEC_PER_SEC;
316
317 sync = atomic_read(st);
318 while (1) {
319 if (sync == state) {
320 if (!atomic_try_cmpxchg(st, &sync, next_state))
321 continue;
322 return true;
323 }
324
325 now = ktime_get();
326 if (now > end) {
327 /* Timeout. Leave the state unchanged */
328 return false;
329 } else if (now - start < NSEC_PER_MSEC) {
330 /* Poll for one millisecond */
331 arch_cpuhp_sync_state_poll();
332 } else {
333 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
334 }
335 sync = atomic_read(st);
336 }
337 return true;
338 }
339 #else /* CONFIG_HOTPLUG_CORE_SYNC */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)340 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
341 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
342
343 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
344 /**
345 * cpuhp_ap_report_dead - Update synchronization state to DEAD
346 *
347 * No synchronization point. Just update of the synchronization state.
348 */
cpuhp_ap_report_dead(void)349 void cpuhp_ap_report_dead(void)
350 {
351 cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
352 }
353
arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)354 void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
355
356 /*
357 * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
358 * because the AP cannot issue complete() at this stage.
359 */
cpuhp_bp_sync_dead(unsigned int cpu)360 static void cpuhp_bp_sync_dead(unsigned int cpu)
361 {
362 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
363 int sync = atomic_read(st);
364
365 do {
366 /* CPU can have reported dead already. Don't overwrite that! */
367 if (sync == SYNC_STATE_DEAD)
368 break;
369 } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
370
371 if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
372 /* CPU reached dead state. Invoke the cleanup function */
373 arch_cpuhp_cleanup_dead_cpu(cpu);
374 return;
375 }
376
377 /* No further action possible. Emit message and give up. */
378 pr_err("CPU%u failed to report dead state\n", cpu);
379 }
380 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
cpuhp_bp_sync_dead(unsigned int cpu)381 static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
382 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
383
384 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
385 /**
386 * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
387 *
388 * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
389 * for the BP to release it.
390 */
cpuhp_ap_sync_alive(void)391 void cpuhp_ap_sync_alive(void)
392 {
393 atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
394
395 cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
396
397 /* Wait for the control CPU to release it. */
398 while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
399 cpu_relax();
400 }
401
cpuhp_can_boot_ap(unsigned int cpu)402 static bool cpuhp_can_boot_ap(unsigned int cpu)
403 {
404 atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
405 int sync = atomic_read(st);
406
407 again:
408 switch (sync) {
409 case SYNC_STATE_DEAD:
410 /* CPU is properly dead */
411 break;
412 case SYNC_STATE_KICKED:
413 /* CPU did not come up in previous attempt */
414 break;
415 case SYNC_STATE_ALIVE:
416 /* CPU is stuck cpuhp_ap_sync_alive(). */
417 break;
418 default:
419 /* CPU failed to report online or dead and is in limbo state. */
420 return false;
421 }
422
423 /* Prepare for booting */
424 if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
425 goto again;
426
427 return true;
428 }
429
arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)430 void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
431
432 /*
433 * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
434 * because the AP cannot issue complete() so early in the bringup.
435 */
cpuhp_bp_sync_alive(unsigned int cpu)436 static int cpuhp_bp_sync_alive(unsigned int cpu)
437 {
438 int ret = 0;
439
440 if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
441 return 0;
442
443 if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
444 pr_err("CPU%u failed to report alive state\n", cpu);
445 ret = -EIO;
446 }
447
448 /* Let the architecture cleanup the kick alive mechanics. */
449 arch_cpuhp_cleanup_kick_cpu(cpu);
450 return ret;
451 }
452 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
cpuhp_bp_sync_alive(unsigned int cpu)453 static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
cpuhp_can_boot_ap(unsigned int cpu)454 static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
455 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
456
457 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458 static DEFINE_MUTEX(cpu_add_remove_lock);
459 bool cpuhp_tasks_frozen;
460 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
461
462 /*
463 * The following two APIs (cpu_maps_update_begin/done) must be used when
464 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
465 */
cpu_maps_update_begin(void)466 void cpu_maps_update_begin(void)
467 {
468 mutex_lock(&cpu_add_remove_lock);
469 }
470
cpu_maps_update_done(void)471 void cpu_maps_update_done(void)
472 {
473 mutex_unlock(&cpu_add_remove_lock);
474 }
475
476 /*
477 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478 * Should always be manipulated under cpu_add_remove_lock
479 */
480 static int cpu_hotplug_disabled;
481
482 #ifdef CONFIG_HOTPLUG_CPU
483
484 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
485
486 static bool cpu_hotplug_offline_disabled __ro_after_init;
487
cpus_read_lock(void)488 void cpus_read_lock(void)
489 {
490 percpu_down_read(&cpu_hotplug_lock);
491 }
492 EXPORT_SYMBOL_GPL(cpus_read_lock);
493
cpus_read_trylock(void)494 int cpus_read_trylock(void)
495 {
496 return percpu_down_read_trylock(&cpu_hotplug_lock);
497 }
498 EXPORT_SYMBOL_GPL(cpus_read_trylock);
499
cpus_read_unlock(void)500 void cpus_read_unlock(void)
501 {
502 percpu_up_read(&cpu_hotplug_lock);
503 }
504 EXPORT_SYMBOL_GPL(cpus_read_unlock);
505
cpus_write_lock(void)506 void cpus_write_lock(void)
507 {
508 percpu_down_write(&cpu_hotplug_lock);
509 }
510
cpus_write_unlock(void)511 void cpus_write_unlock(void)
512 {
513 percpu_up_write(&cpu_hotplug_lock);
514 }
515
lockdep_assert_cpus_held(void)516 void lockdep_assert_cpus_held(void)
517 {
518 /*
519 * We can't have hotplug operations before userspace starts running,
520 * and some init codepaths will knowingly not take the hotplug lock.
521 * This is all valid, so mute lockdep until it makes sense to report
522 * unheld locks.
523 */
524 if (system_state < SYSTEM_RUNNING)
525 return;
526
527 percpu_rwsem_assert_held(&cpu_hotplug_lock);
528 }
529 EXPORT_SYMBOL_GPL(lockdep_assert_cpus_held);
530
531 #ifdef CONFIG_LOCKDEP
lockdep_is_cpus_held(void)532 int lockdep_is_cpus_held(void)
533 {
534 return percpu_rwsem_is_held(&cpu_hotplug_lock);
535 }
536
lockdep_is_cpus_write_held(void)537 int lockdep_is_cpus_write_held(void)
538 {
539 return percpu_rwsem_is_write_held(&cpu_hotplug_lock);
540 }
541 #endif
542
lockdep_acquire_cpus_lock(void)543 static void lockdep_acquire_cpus_lock(void)
544 {
545 rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
546 }
547
lockdep_release_cpus_lock(void)548 static void lockdep_release_cpus_lock(void)
549 {
550 rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
551 }
552
553 /* Declare CPU offlining not supported */
cpu_hotplug_disable_offlining(void)554 void cpu_hotplug_disable_offlining(void)
555 {
556 cpu_maps_update_begin();
557 cpu_hotplug_offline_disabled = true;
558 cpu_maps_update_done();
559 }
560
561 /*
562 * Wait for currently running CPU hotplug operations to complete (if any) and
563 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
564 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
565 * hotplug path before performing hotplug operations. So acquiring that lock
566 * guarantees mutual exclusion from any currently running hotplug operations.
567 */
cpu_hotplug_disable(void)568 void cpu_hotplug_disable(void)
569 {
570 cpu_maps_update_begin();
571 cpu_hotplug_disabled++;
572 cpu_maps_update_done();
573 }
574 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
575
__cpu_hotplug_enable(void)576 static void __cpu_hotplug_enable(void)
577 {
578 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
579 return;
580 cpu_hotplug_disabled--;
581 }
582
cpu_hotplug_enable(void)583 void cpu_hotplug_enable(void)
584 {
585 cpu_maps_update_begin();
586 __cpu_hotplug_enable();
587 cpu_maps_update_done();
588 }
589 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
590
591 #else
592
lockdep_acquire_cpus_lock(void)593 static void lockdep_acquire_cpus_lock(void)
594 {
595 }
596
lockdep_release_cpus_lock(void)597 static void lockdep_release_cpus_lock(void)
598 {
599 }
600
601 #endif /* CONFIG_HOTPLUG_CPU */
602
603 /*
604 * Architectures that need SMT-specific errata handling during SMT hotplug
605 * should override this.
606 */
arch_smt_update(void)607 void __weak arch_smt_update(void) { }
608
609 #ifdef CONFIG_HOTPLUG_SMT
610
611 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
612 static unsigned int cpu_smt_max_threads __ro_after_init;
613 unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
614
cpu_smt_disable(bool force)615 void __init cpu_smt_disable(bool force)
616 {
617 if (!cpu_smt_possible())
618 return;
619
620 if (force) {
621 pr_info("SMT: Force disabled\n");
622 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
623 } else {
624 pr_info("SMT: disabled\n");
625 cpu_smt_control = CPU_SMT_DISABLED;
626 }
627 cpu_smt_num_threads = 1;
628 }
629
630 /*
631 * The decision whether SMT is supported can only be done after the full
632 * CPU identification. Called from architecture code.
633 */
cpu_smt_set_num_threads(unsigned int num_threads,unsigned int max_threads)634 void __init cpu_smt_set_num_threads(unsigned int num_threads,
635 unsigned int max_threads)
636 {
637 WARN_ON(!num_threads || (num_threads > max_threads));
638
639 if (max_threads == 1)
640 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
641
642 cpu_smt_max_threads = max_threads;
643
644 /*
645 * If SMT has been disabled via the kernel command line or SMT is
646 * not supported, set cpu_smt_num_threads to 1 for consistency.
647 * If enabled, take the architecture requested number of threads
648 * to bring up into account.
649 */
650 if (cpu_smt_control != CPU_SMT_ENABLED)
651 cpu_smt_num_threads = 1;
652 else if (num_threads < cpu_smt_num_threads)
653 cpu_smt_num_threads = num_threads;
654 }
655
smt_cmdline_disable(char * str)656 static int __init smt_cmdline_disable(char *str)
657 {
658 cpu_smt_disable(str && !strcmp(str, "force"));
659 return 0;
660 }
661 early_param("nosmt", smt_cmdline_disable);
662
663 /*
664 * For Archicture supporting partial SMT states check if the thread is allowed.
665 * Otherwise this has already been checked through cpu_smt_max_threads when
666 * setting the SMT level.
667 */
cpu_smt_thread_allowed(unsigned int cpu)668 static inline bool cpu_smt_thread_allowed(unsigned int cpu)
669 {
670 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
671 return topology_smt_thread_allowed(cpu);
672 #else
673 return true;
674 #endif
675 }
676
cpu_bootable(unsigned int cpu)677 static inline bool cpu_bootable(unsigned int cpu)
678 {
679 if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
680 return true;
681
682 /* All CPUs are bootable if controls are not configured */
683 if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
684 return true;
685
686 /* All CPUs are bootable if CPU is not SMT capable */
687 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
688 return true;
689
690 if (topology_is_primary_thread(cpu))
691 return true;
692
693 /*
694 * On x86 it's required to boot all logical CPUs at least once so
695 * that the init code can get a chance to set CR4.MCE on each
696 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
697 * core will shutdown the machine.
698 */
699 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
700 }
701
702 /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
cpu_smt_possible(void)703 bool cpu_smt_possible(void)
704 {
705 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
706 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
707 }
708 EXPORT_SYMBOL_GPL(cpu_smt_possible);
709
710 #else
cpu_bootable(unsigned int cpu)711 static inline bool cpu_bootable(unsigned int cpu) { return true; }
712 #endif
713
714 static inline enum cpuhp_state
cpuhp_set_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)715 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
716 {
717 enum cpuhp_state prev_state = st->state;
718 bool bringup = st->state < target;
719
720 st->rollback = false;
721 st->last = NULL;
722
723 st->target = target;
724 st->single = false;
725 st->bringup = bringup;
726 if (cpu_dying(cpu) != !bringup)
727 set_cpu_dying(cpu, !bringup);
728
729 return prev_state;
730 }
731
732 static inline void
cpuhp_reset_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state prev_state)733 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
734 enum cpuhp_state prev_state)
735 {
736 bool bringup = !st->bringup;
737
738 st->target = prev_state;
739
740 /*
741 * Already rolling back. No need invert the bringup value or to change
742 * the current state.
743 */
744 if (st->rollback)
745 return;
746
747 st->rollback = true;
748
749 /*
750 * If we have st->last we need to undo partial multi_instance of this
751 * state first. Otherwise start undo at the previous state.
752 */
753 if (!st->last) {
754 if (st->bringup)
755 st->state--;
756 else
757 st->state++;
758 }
759
760 st->bringup = bringup;
761 if (cpu_dying(cpu) != !bringup)
762 set_cpu_dying(cpu, !bringup);
763 }
764
765 /* Regular hotplug invocation of the AP hotplug thread */
__cpuhp_kick_ap(struct cpuhp_cpu_state * st)766 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
767 {
768 if (!st->single && st->state == st->target)
769 return;
770
771 st->result = 0;
772 /*
773 * Make sure the above stores are visible before should_run becomes
774 * true. Paired with the mb() above in cpuhp_thread_fun()
775 */
776 smp_mb();
777 st->should_run = true;
778 wake_up_process(st->thread);
779 wait_for_ap_thread(st, st->bringup);
780 }
781
cpuhp_kick_ap(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)782 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
783 enum cpuhp_state target)
784 {
785 enum cpuhp_state prev_state;
786 int ret;
787
788 prev_state = cpuhp_set_state(cpu, st, target);
789 __cpuhp_kick_ap(st);
790 if ((ret = st->result)) {
791 cpuhp_reset_state(cpu, st, prev_state);
792 __cpuhp_kick_ap(st);
793 }
794
795 return ret;
796 }
797
bringup_wait_for_ap_online(unsigned int cpu)798 static int bringup_wait_for_ap_online(unsigned int cpu)
799 {
800 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
801
802 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
803 wait_for_ap_thread(st, true);
804 if (WARN_ON_ONCE((!cpu_online(cpu))))
805 return -ECANCELED;
806
807 /* Unpark the hotplug thread of the target cpu */
808 kthread_unpark(st->thread);
809
810 /*
811 * SMT soft disabling on X86 requires to bring the CPU out of the
812 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
813 * CPU marked itself as booted_once in notify_cpu_starting() so the
814 * cpu_bootable() check will now return false if this is not the
815 * primary sibling.
816 */
817 if (!cpu_bootable(cpu))
818 return -ECANCELED;
819 return 0;
820 }
821
822 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
cpuhp_kick_ap_alive(unsigned int cpu)823 static int cpuhp_kick_ap_alive(unsigned int cpu)
824 {
825 if (!cpuhp_can_boot_ap(cpu))
826 return -EAGAIN;
827
828 return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
829 }
830
cpuhp_bringup_ap(unsigned int cpu)831 static int cpuhp_bringup_ap(unsigned int cpu)
832 {
833 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
834 int ret;
835
836 /*
837 * Some architectures have to walk the irq descriptors to
838 * setup the vector space for the cpu which comes online.
839 * Prevent irq alloc/free across the bringup.
840 */
841 irq_lock_sparse();
842
843 ret = cpuhp_bp_sync_alive(cpu);
844 if (ret)
845 goto out_unlock;
846
847 ret = bringup_wait_for_ap_online(cpu);
848 if (ret)
849 goto out_unlock;
850
851 irq_unlock_sparse();
852
853 if (st->target <= CPUHP_AP_ONLINE_IDLE)
854 return 0;
855
856 return cpuhp_kick_ap(cpu, st, st->target);
857
858 out_unlock:
859 irq_unlock_sparse();
860 return ret;
861 }
862 #else
bringup_cpu(unsigned int cpu)863 static int bringup_cpu(unsigned int cpu)
864 {
865 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
866 struct task_struct *idle = idle_thread_get(cpu);
867 int ret;
868
869 if (!cpuhp_can_boot_ap(cpu))
870 return -EAGAIN;
871
872 /*
873 * Some architectures have to walk the irq descriptors to
874 * setup the vector space for the cpu which comes online.
875 *
876 * Prevent irq alloc/free across the bringup by acquiring the
877 * sparse irq lock. Hold it until the upcoming CPU completes the
878 * startup in cpuhp_online_idle() which allows to avoid
879 * intermediate synchronization points in the architecture code.
880 */
881 irq_lock_sparse();
882
883 ret = __cpu_up(cpu, idle);
884 if (ret)
885 goto out_unlock;
886
887 ret = cpuhp_bp_sync_alive(cpu);
888 if (ret)
889 goto out_unlock;
890
891 ret = bringup_wait_for_ap_online(cpu);
892 if (ret)
893 goto out_unlock;
894
895 irq_unlock_sparse();
896
897 if (st->target <= CPUHP_AP_ONLINE_IDLE)
898 return 0;
899
900 return cpuhp_kick_ap(cpu, st, st->target);
901
902 out_unlock:
903 irq_unlock_sparse();
904 return ret;
905 }
906 #endif
907
finish_cpu(unsigned int cpu)908 static int finish_cpu(unsigned int cpu)
909 {
910 struct task_struct *idle = idle_thread_get(cpu);
911 struct mm_struct *mm = idle->active_mm;
912
913 /*
914 * sched_force_init_mm() ensured the use of &init_mm,
915 * drop that refcount now that the CPU has stopped.
916 */
917 WARN_ON(mm != &init_mm);
918 idle->active_mm = NULL;
919 mmdrop_lazy_tlb(mm);
920
921 return 0;
922 }
923
924 /*
925 * Hotplug state machine related functions
926 */
927
928 /*
929 * Get the next state to run. Empty ones will be skipped. Returns true if a
930 * state must be run.
931 *
932 * st->state will be modified ahead of time, to match state_to_run, as if it
933 * has already ran.
934 */
cpuhp_next_state(bool bringup,enum cpuhp_state * state_to_run,struct cpuhp_cpu_state * st,enum cpuhp_state target)935 static bool cpuhp_next_state(bool bringup,
936 enum cpuhp_state *state_to_run,
937 struct cpuhp_cpu_state *st,
938 enum cpuhp_state target)
939 {
940 do {
941 if (bringup) {
942 if (st->state >= target)
943 return false;
944
945 *state_to_run = ++st->state;
946 } else {
947 if (st->state <= target)
948 return false;
949
950 *state_to_run = st->state--;
951 }
952
953 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
954 break;
955 } while (true);
956
957 return true;
958 }
959
__cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target,bool nofail)960 static int __cpuhp_invoke_callback_range(bool bringup,
961 unsigned int cpu,
962 struct cpuhp_cpu_state *st,
963 enum cpuhp_state target,
964 bool nofail)
965 {
966 enum cpuhp_state state;
967 int ret = 0;
968
969 while (cpuhp_next_state(bringup, &state, st, target)) {
970 int err;
971
972 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
973 if (!err)
974 continue;
975
976 if (nofail) {
977 pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
978 cpu, bringup ? "UP" : "DOWN",
979 cpuhp_get_step(st->state)->name,
980 st->state, err);
981 ret = -1;
982 } else {
983 ret = err;
984 break;
985 }
986 }
987
988 return ret;
989 }
990
cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)991 static inline int cpuhp_invoke_callback_range(bool bringup,
992 unsigned int cpu,
993 struct cpuhp_cpu_state *st,
994 enum cpuhp_state target)
995 {
996 return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
997 }
998
cpuhp_invoke_callback_range_nofail(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)999 static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
1000 unsigned int cpu,
1001 struct cpuhp_cpu_state *st,
1002 enum cpuhp_state target)
1003 {
1004 __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
1005 }
1006
can_rollback_cpu(struct cpuhp_cpu_state * st)1007 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
1008 {
1009 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
1010 return true;
1011 /*
1012 * When CPU hotplug is disabled, then taking the CPU down is not
1013 * possible because takedown_cpu() and the architecture and
1014 * subsystem specific mechanisms are not available. So the CPU
1015 * which would be completely unplugged again needs to stay around
1016 * in the current state.
1017 */
1018 return st->state <= CPUHP_BRINGUP_CPU;
1019 }
1020
cpuhp_up_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)1021 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1022 enum cpuhp_state target)
1023 {
1024 enum cpuhp_state prev_state = st->state;
1025 int ret = 0;
1026
1027 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1028 if (ret) {
1029 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1030 ret, cpu, cpuhp_get_step(st->state)->name,
1031 st->state);
1032
1033 cpuhp_reset_state(cpu, st, prev_state);
1034 if (can_rollback_cpu(st))
1035 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1036 prev_state));
1037 }
1038 return ret;
1039 }
1040
1041 /*
1042 * The cpu hotplug threads manage the bringup and teardown of the cpus
1043 */
cpuhp_should_run(unsigned int cpu)1044 static int cpuhp_should_run(unsigned int cpu)
1045 {
1046 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1047
1048 return st->should_run;
1049 }
1050
1051 /*
1052 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1053 * callbacks when a state gets [un]installed at runtime.
1054 *
1055 * Each invocation of this function by the smpboot thread does a single AP
1056 * state callback.
1057 *
1058 * It has 3 modes of operation:
1059 * - single: runs st->cb_state
1060 * - up: runs ++st->state, while st->state < st->target
1061 * - down: runs st->state--, while st->state > st->target
1062 *
1063 * When complete or on error, should_run is cleared and the completion is fired.
1064 */
cpuhp_thread_fun(unsigned int cpu)1065 static void cpuhp_thread_fun(unsigned int cpu)
1066 {
1067 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1068 bool bringup = st->bringup;
1069 enum cpuhp_state state;
1070
1071 if (WARN_ON_ONCE(!st->should_run))
1072 return;
1073
1074 /*
1075 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1076 * that if we see ->should_run we also see the rest of the state.
1077 */
1078 smp_mb();
1079
1080 /*
1081 * The BP holds the hotplug lock, but we're now running on the AP,
1082 * ensure that anybody asserting the lock is held, will actually find
1083 * it so.
1084 */
1085 lockdep_acquire_cpus_lock();
1086 cpuhp_lock_acquire(bringup);
1087
1088 if (st->single) {
1089 state = st->cb_state;
1090 st->should_run = false;
1091 } else {
1092 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1093 if (!st->should_run)
1094 goto end;
1095 }
1096
1097 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1098
1099 if (cpuhp_is_atomic_state(state)) {
1100 local_irq_disable();
1101 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1102 local_irq_enable();
1103
1104 /*
1105 * STARTING/DYING must not fail!
1106 */
1107 WARN_ON_ONCE(st->result);
1108 } else {
1109 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1110 }
1111
1112 if (st->result) {
1113 /*
1114 * If we fail on a rollback, we're up a creek without no
1115 * paddle, no way forward, no way back. We loose, thanks for
1116 * playing.
1117 */
1118 WARN_ON_ONCE(st->rollback);
1119 st->should_run = false;
1120 }
1121
1122 end:
1123 cpuhp_lock_release(bringup);
1124 lockdep_release_cpus_lock();
1125
1126 if (!st->should_run)
1127 complete_ap_thread(st, bringup);
1128 }
1129
1130 /* Invoke a single callback on a remote cpu */
1131 static int
cpuhp_invoke_ap_callback(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)1132 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1133 struct hlist_node *node)
1134 {
1135 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1136 int ret;
1137
1138 if (!cpu_online(cpu))
1139 return 0;
1140
1141 cpuhp_lock_acquire(false);
1142 cpuhp_lock_release(false);
1143
1144 cpuhp_lock_acquire(true);
1145 cpuhp_lock_release(true);
1146
1147 /*
1148 * If we are up and running, use the hotplug thread. For early calls
1149 * we invoke the thread function directly.
1150 */
1151 if (!st->thread)
1152 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1153
1154 st->rollback = false;
1155 st->last = NULL;
1156
1157 st->node = node;
1158 st->bringup = bringup;
1159 st->cb_state = state;
1160 st->single = true;
1161
1162 __cpuhp_kick_ap(st);
1163
1164 /*
1165 * If we failed and did a partial, do a rollback.
1166 */
1167 if ((ret = st->result) && st->last) {
1168 st->rollback = true;
1169 st->bringup = !bringup;
1170
1171 __cpuhp_kick_ap(st);
1172 }
1173
1174 /*
1175 * Clean up the leftovers so the next hotplug operation wont use stale
1176 * data.
1177 */
1178 st->node = st->last = NULL;
1179 return ret;
1180 }
1181
cpuhp_kick_ap_work(unsigned int cpu)1182 static int cpuhp_kick_ap_work(unsigned int cpu)
1183 {
1184 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1185 enum cpuhp_state prev_state = st->state;
1186 int ret;
1187
1188 cpuhp_lock_acquire(false);
1189 cpuhp_lock_release(false);
1190
1191 cpuhp_lock_acquire(true);
1192 cpuhp_lock_release(true);
1193
1194 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1195 ret = cpuhp_kick_ap(cpu, st, st->target);
1196 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1197
1198 return ret;
1199 }
1200
1201 static struct smp_hotplug_thread cpuhp_threads = {
1202 .store = &cpuhp_state.thread,
1203 .thread_should_run = cpuhp_should_run,
1204 .thread_fn = cpuhp_thread_fun,
1205 .thread_comm = "cpuhp/%u",
1206 .selfparking = true,
1207 };
1208
cpuhp_init_state(void)1209 static __init void cpuhp_init_state(void)
1210 {
1211 struct cpuhp_cpu_state *st;
1212 int cpu;
1213
1214 for_each_possible_cpu(cpu) {
1215 st = per_cpu_ptr(&cpuhp_state, cpu);
1216 init_completion(&st->done_up);
1217 init_completion(&st->done_down);
1218 }
1219 }
1220
cpuhp_threads_init(void)1221 void __init cpuhp_threads_init(void)
1222 {
1223 cpuhp_init_state();
1224 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1225 kthread_unpark(this_cpu_read(cpuhp_state.thread));
1226 }
1227
1228 #ifdef CONFIG_HOTPLUG_CPU
1229 #ifndef arch_clear_mm_cpumask_cpu
1230 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1231 #endif
1232
1233 /**
1234 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1235 * @cpu: a CPU id
1236 *
1237 * This function walks all processes, finds a valid mm struct for each one and
1238 * then clears a corresponding bit in mm's cpumask. While this all sounds
1239 * trivial, there are various non-obvious corner cases, which this function
1240 * tries to solve in a safe manner.
1241 *
1242 * Also note that the function uses a somewhat relaxed locking scheme, so it may
1243 * be called only for an already offlined CPU.
1244 */
clear_tasks_mm_cpumask(int cpu)1245 void clear_tasks_mm_cpumask(int cpu)
1246 {
1247 struct task_struct *p;
1248
1249 /*
1250 * This function is called after the cpu is taken down and marked
1251 * offline, so its not like new tasks will ever get this cpu set in
1252 * their mm mask. -- Peter Zijlstra
1253 * Thus, we may use rcu_read_lock() here, instead of grabbing
1254 * full-fledged tasklist_lock.
1255 */
1256 WARN_ON(cpu_online(cpu));
1257 rcu_read_lock();
1258 for_each_process(p) {
1259 struct task_struct *t;
1260
1261 /*
1262 * Main thread might exit, but other threads may still have
1263 * a valid mm. Find one.
1264 */
1265 t = find_lock_task_mm(p);
1266 if (!t)
1267 continue;
1268 arch_clear_mm_cpumask_cpu(cpu, t->mm);
1269 task_unlock(t);
1270 }
1271 rcu_read_unlock();
1272 }
1273
1274 /* Take this CPU down. */
take_cpu_down(void * _param)1275 static int take_cpu_down(void *_param)
1276 {
1277 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1278 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1279 int err, cpu = smp_processor_id();
1280
1281 /* Ensure this CPU doesn't handle any more interrupts. */
1282 err = __cpu_disable();
1283 if (err < 0)
1284 return err;
1285
1286 /*
1287 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1288 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1289 */
1290 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1291
1292 /*
1293 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1294 */
1295 cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1296
1297 /* Park the stopper thread */
1298 stop_machine_park(cpu);
1299 return 0;
1300 }
1301
takedown_cpu(unsigned int cpu)1302 static int takedown_cpu(unsigned int cpu)
1303 {
1304 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1305 int err;
1306
1307 /* Park the smpboot threads */
1308 kthread_park(st->thread);
1309
1310 /*
1311 * Prevent irq alloc/free while the dying cpu reorganizes the
1312 * interrupt affinities.
1313 */
1314 irq_lock_sparse();
1315
1316 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1317 if (err) {
1318 /* CPU refused to die */
1319 irq_unlock_sparse();
1320 /* Unpark the hotplug thread so we can rollback there */
1321 kthread_unpark(st->thread);
1322 return err;
1323 }
1324 BUG_ON(cpu_online(cpu));
1325
1326 /*
1327 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1328 * all runnable tasks from the CPU, there's only the idle task left now
1329 * that the migration thread is done doing the stop_machine thing.
1330 *
1331 * Wait for the stop thread to go away.
1332 */
1333 wait_for_ap_thread(st, false);
1334 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1335
1336 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1337 irq_unlock_sparse();
1338
1339 hotplug_cpu__broadcast_tick_pull(cpu);
1340 /* This actually kills the CPU. */
1341 __cpu_die(cpu);
1342
1343 cpuhp_bp_sync_dead(cpu);
1344
1345 lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
1346
1347 /*
1348 * Callbacks must be re-integrated right away to the RCU state machine.
1349 * Otherwise an RCU callback could block a further teardown function
1350 * waiting for its completion.
1351 */
1352 rcutree_migrate_callbacks(cpu);
1353
1354 return 0;
1355 }
1356
cpuhp_complete_idle_dead(void * arg)1357 static void cpuhp_complete_idle_dead(void *arg)
1358 {
1359 struct cpuhp_cpu_state *st = arg;
1360
1361 complete_ap_thread(st, false);
1362 }
1363
cpuhp_report_idle_dead(void)1364 void cpuhp_report_idle_dead(void)
1365 {
1366 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1367
1368 BUG_ON(st->state != CPUHP_AP_OFFLINE);
1369 tick_assert_timekeeping_handover();
1370 rcutree_report_cpu_dead();
1371 st->state = CPUHP_AP_IDLE_DEAD;
1372 /*
1373 * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
1374 * to an online cpu.
1375 */
1376 smp_call_function_single(cpumask_first(cpu_online_mask),
1377 cpuhp_complete_idle_dead, st, 0);
1378 }
1379
cpuhp_down_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)1380 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1381 enum cpuhp_state target)
1382 {
1383 enum cpuhp_state prev_state = st->state;
1384 int ret = 0;
1385
1386 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1387 if (ret) {
1388 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1389 ret, cpu, cpuhp_get_step(st->state)->name,
1390 st->state);
1391
1392 cpuhp_reset_state(cpu, st, prev_state);
1393
1394 if (st->state < prev_state)
1395 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1396 prev_state));
1397 }
1398
1399 return ret;
1400 }
1401
1402 /* Requires cpu_add_remove_lock to be held */
_cpu_down(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1403 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1404 enum cpuhp_state target)
1405 {
1406 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1407 int prev_state, ret = 0;
1408
1409 if (num_online_cpus() == 1)
1410 return -EBUSY;
1411
1412 if (!cpu_present(cpu))
1413 return -EINVAL;
1414
1415 cpus_write_lock();
1416
1417 /*
1418 * Keep at least one housekeeping cpu onlined to avoid generating
1419 * an empty sched_domain span.
1420 */
1421 if (cpumask_any_and(cpu_online_mask,
1422 housekeeping_cpumask(HK_TYPE_DOMAIN)) >= nr_cpu_ids) {
1423 ret = -EBUSY;
1424 goto out;
1425 }
1426
1427 cpuhp_tasks_frozen = tasks_frozen;
1428
1429 prev_state = cpuhp_set_state(cpu, st, target);
1430 /*
1431 * If the current CPU state is in the range of the AP hotplug thread,
1432 * then we need to kick the thread.
1433 */
1434 if (st->state > CPUHP_TEARDOWN_CPU) {
1435 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1436 ret = cpuhp_kick_ap_work(cpu);
1437 /*
1438 * The AP side has done the error rollback already. Just
1439 * return the error code..
1440 */
1441 if (ret)
1442 goto out;
1443
1444 /*
1445 * We might have stopped still in the range of the AP hotplug
1446 * thread. Nothing to do anymore.
1447 */
1448 if (st->state > CPUHP_TEARDOWN_CPU)
1449 goto out;
1450
1451 st->target = target;
1452 }
1453 /*
1454 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1455 * to do the further cleanups.
1456 */
1457 ret = cpuhp_down_callbacks(cpu, st, target);
1458 if (ret && st->state < prev_state) {
1459 if (st->state == CPUHP_TEARDOWN_CPU) {
1460 cpuhp_reset_state(cpu, st, prev_state);
1461 __cpuhp_kick_ap(st);
1462 } else {
1463 WARN(1, "DEAD callback error for CPU%d", cpu);
1464 }
1465 }
1466
1467 out:
1468 cpus_write_unlock();
1469 arch_smt_update();
1470 return ret;
1471 }
1472
cpu_down_maps_locked(unsigned int cpu,enum cpuhp_state target)1473 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1474 {
1475 /*
1476 * If the platform does not support hotplug, report it explicitly to
1477 * differentiate it from a transient offlining failure.
1478 */
1479 if (cpu_hotplug_offline_disabled)
1480 return -EOPNOTSUPP;
1481 if (cpu_hotplug_disabled)
1482 return -EBUSY;
1483 return _cpu_down(cpu, 0, target);
1484 }
1485
cpu_down(unsigned int cpu,enum cpuhp_state target)1486 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1487 {
1488 int err;
1489
1490 cpu_maps_update_begin();
1491 err = cpu_down_maps_locked(cpu, target);
1492 cpu_maps_update_done();
1493 return err;
1494 }
1495
1496 /**
1497 * cpu_device_down - Bring down a cpu device
1498 * @dev: Pointer to the cpu device to offline
1499 *
1500 * This function is meant to be used by device core cpu subsystem only.
1501 *
1502 * Other subsystems should use remove_cpu() instead.
1503 *
1504 * Return: %0 on success or a negative errno code
1505 */
cpu_device_down(struct device * dev)1506 int cpu_device_down(struct device *dev)
1507 {
1508 return cpu_down(dev->id, CPUHP_OFFLINE);
1509 }
1510
remove_cpu(unsigned int cpu)1511 int remove_cpu(unsigned int cpu)
1512 {
1513 int ret;
1514
1515 lock_device_hotplug();
1516 ret = device_offline(get_cpu_device(cpu));
1517 unlock_device_hotplug();
1518
1519 return ret;
1520 }
1521 EXPORT_SYMBOL_GPL(remove_cpu);
1522
smp_shutdown_nonboot_cpus(unsigned int primary_cpu)1523 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1524 {
1525 unsigned int cpu;
1526 int error;
1527
1528 cpu_maps_update_begin();
1529
1530 /*
1531 * Make certain the cpu I'm about to reboot on is online.
1532 *
1533 * This is inline to what migrate_to_reboot_cpu() already do.
1534 */
1535 if (!cpu_online(primary_cpu))
1536 primary_cpu = cpumask_first(cpu_online_mask);
1537
1538 for_each_online_cpu(cpu) {
1539 if (cpu == primary_cpu)
1540 continue;
1541
1542 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1543 if (error) {
1544 pr_err("Failed to offline CPU%d - error=%d",
1545 cpu, error);
1546 break;
1547 }
1548 }
1549
1550 /*
1551 * Ensure all but the reboot CPU are offline.
1552 */
1553 BUG_ON(num_online_cpus() > 1);
1554
1555 /*
1556 * Make sure the CPUs won't be enabled by someone else after this
1557 * point. Kexec will reboot to a new kernel shortly resetting
1558 * everything along the way.
1559 */
1560 cpu_hotplug_disabled++;
1561
1562 cpu_maps_update_done();
1563 }
1564
1565 #else
1566 #define takedown_cpu NULL
1567 #endif /*CONFIG_HOTPLUG_CPU*/
1568
1569 /**
1570 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1571 * @cpu: cpu that just started
1572 *
1573 * It must be called by the arch code on the new cpu, before the new cpu
1574 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1575 */
notify_cpu_starting(unsigned int cpu)1576 void notify_cpu_starting(unsigned int cpu)
1577 {
1578 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1579 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1580
1581 rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1582 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1583
1584 /*
1585 * STARTING must not fail!
1586 */
1587 cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1588 }
1589
1590 /*
1591 * Called from the idle task. Wake up the controlling task which brings the
1592 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1593 * online bringup to the hotplug thread.
1594 */
cpuhp_online_idle(enum cpuhp_state state)1595 void cpuhp_online_idle(enum cpuhp_state state)
1596 {
1597 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1598
1599 /* Happens for the boot cpu */
1600 if (state != CPUHP_AP_ONLINE_IDLE)
1601 return;
1602
1603 cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
1604
1605 /*
1606 * Unpark the stopper thread before we start the idle loop (and start
1607 * scheduling); this ensures the stopper task is always available.
1608 */
1609 stop_machine_unpark(smp_processor_id());
1610
1611 st->state = CPUHP_AP_ONLINE_IDLE;
1612 complete_ap_thread(st, true);
1613 }
1614
1615 /* Requires cpu_add_remove_lock to be held */
_cpu_up(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1616 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1617 {
1618 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1619 struct task_struct *idle;
1620 int ret = 0;
1621
1622 cpus_write_lock();
1623
1624 if (!cpu_present(cpu)) {
1625 ret = -EINVAL;
1626 goto out;
1627 }
1628
1629 /*
1630 * The caller of cpu_up() might have raced with another
1631 * caller. Nothing to do.
1632 */
1633 if (st->state >= target)
1634 goto out;
1635
1636 if (st->state == CPUHP_OFFLINE) {
1637 /* Let it fail before we try to bring the cpu up */
1638 idle = idle_thread_get(cpu);
1639 if (IS_ERR(idle)) {
1640 ret = PTR_ERR(idle);
1641 goto out;
1642 }
1643
1644 /*
1645 * Reset stale stack state from the last time this CPU was online.
1646 */
1647 scs_task_reset(idle);
1648 kasan_unpoison_task_stack(idle);
1649 }
1650
1651 cpuhp_tasks_frozen = tasks_frozen;
1652
1653 cpuhp_set_state(cpu, st, target);
1654 /*
1655 * If the current CPU state is in the range of the AP hotplug thread,
1656 * then we need to kick the thread once more.
1657 */
1658 if (st->state > CPUHP_BRINGUP_CPU) {
1659 ret = cpuhp_kick_ap_work(cpu);
1660 /*
1661 * The AP side has done the error rollback already. Just
1662 * return the error code..
1663 */
1664 if (ret)
1665 goto out;
1666 }
1667
1668 /*
1669 * Try to reach the target state. We max out on the BP at
1670 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1671 * responsible for bringing it up to the target state.
1672 */
1673 target = min((int)target, CPUHP_BRINGUP_CPU);
1674 ret = cpuhp_up_callbacks(cpu, st, target);
1675 out:
1676 cpus_write_unlock();
1677 arch_smt_update();
1678 return ret;
1679 }
1680
cpu_up(unsigned int cpu,enum cpuhp_state target)1681 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1682 {
1683 int err = 0;
1684
1685 if (!cpu_possible(cpu)) {
1686 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1687 cpu);
1688 return -EINVAL;
1689 }
1690
1691 err = try_online_node(cpu_to_node(cpu));
1692 if (err)
1693 return err;
1694
1695 cpu_maps_update_begin();
1696
1697 if (cpu_hotplug_disabled) {
1698 err = -EBUSY;
1699 goto out;
1700 }
1701 if (!cpu_bootable(cpu)) {
1702 err = -EPERM;
1703 goto out;
1704 }
1705
1706 err = _cpu_up(cpu, 0, target);
1707 out:
1708 cpu_maps_update_done();
1709 return err;
1710 }
1711
1712 /**
1713 * cpu_device_up - Bring up a cpu device
1714 * @dev: Pointer to the cpu device to online
1715 *
1716 * This function is meant to be used by device core cpu subsystem only.
1717 *
1718 * Other subsystems should use add_cpu() instead.
1719 *
1720 * Return: %0 on success or a negative errno code
1721 */
cpu_device_up(struct device * dev)1722 int cpu_device_up(struct device *dev)
1723 {
1724 return cpu_up(dev->id, CPUHP_ONLINE);
1725 }
1726
add_cpu(unsigned int cpu)1727 int add_cpu(unsigned int cpu)
1728 {
1729 int ret;
1730
1731 lock_device_hotplug();
1732 ret = device_online(get_cpu_device(cpu));
1733 unlock_device_hotplug();
1734
1735 return ret;
1736 }
1737 EXPORT_SYMBOL_GPL(add_cpu);
1738
1739 /**
1740 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1741 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1742 *
1743 * On some architectures like arm64, we can hibernate on any CPU, but on
1744 * wake up the CPU we hibernated on might be offline as a side effect of
1745 * using maxcpus= for example.
1746 *
1747 * Return: %0 on success or a negative errno code
1748 */
bringup_hibernate_cpu(unsigned int sleep_cpu)1749 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1750 {
1751 int ret;
1752
1753 if (!cpu_online(sleep_cpu)) {
1754 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1755 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1756 if (ret) {
1757 pr_err("Failed to bring hibernate-CPU up!\n");
1758 return ret;
1759 }
1760 }
1761 return 0;
1762 }
1763
cpuhp_bringup_mask(const struct cpumask * mask,unsigned int ncpus,enum cpuhp_state target)1764 static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
1765 enum cpuhp_state target)
1766 {
1767 unsigned int cpu;
1768
1769 for_each_cpu(cpu, mask) {
1770 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1771
1772 if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
1773 /*
1774 * If this failed then cpu_up() might have only
1775 * rolled back to CPUHP_BP_KICK_AP for the final
1776 * online. Clean it up. NOOP if already rolled back.
1777 */
1778 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1779 }
1780
1781 if (!--ncpus)
1782 break;
1783 }
1784 }
1785
1786 #ifdef CONFIG_HOTPLUG_PARALLEL
1787 static bool __cpuhp_parallel_bringup __ro_after_init = true;
1788
parallel_bringup_parse_param(char * arg)1789 static int __init parallel_bringup_parse_param(char *arg)
1790 {
1791 return kstrtobool(arg, &__cpuhp_parallel_bringup);
1792 }
1793 early_param("cpuhp.parallel", parallel_bringup_parse_param);
1794
1795 #ifdef CONFIG_HOTPLUG_SMT
cpuhp_smt_aware(void)1796 static inline bool cpuhp_smt_aware(void)
1797 {
1798 return cpu_smt_max_threads > 1;
1799 }
1800
cpuhp_get_primary_thread_mask(void)1801 static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1802 {
1803 return cpu_primary_thread_mask;
1804 }
1805 #else
cpuhp_smt_aware(void)1806 static inline bool cpuhp_smt_aware(void)
1807 {
1808 return false;
1809 }
cpuhp_get_primary_thread_mask(void)1810 static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1811 {
1812 return cpu_none_mask;
1813 }
1814 #endif
1815
arch_cpuhp_init_parallel_bringup(void)1816 bool __weak arch_cpuhp_init_parallel_bringup(void)
1817 {
1818 return true;
1819 }
1820
1821 /*
1822 * On architectures which have enabled parallel bringup this invokes all BP
1823 * prepare states for each of the to be onlined APs first. The last state
1824 * sends the startup IPI to the APs. The APs proceed through the low level
1825 * bringup code in parallel and then wait for the control CPU to release
1826 * them one by one for the final onlining procedure.
1827 *
1828 * This avoids waiting for each AP to respond to the startup IPI in
1829 * CPUHP_BRINGUP_CPU.
1830 */
cpuhp_bringup_cpus_parallel(unsigned int ncpus)1831 static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
1832 {
1833 const struct cpumask *mask = cpu_present_mask;
1834
1835 if (__cpuhp_parallel_bringup)
1836 __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
1837 if (!__cpuhp_parallel_bringup)
1838 return false;
1839
1840 if (cpuhp_smt_aware()) {
1841 const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
1842 static struct cpumask tmp_mask __initdata;
1843
1844 /*
1845 * X86 requires to prevent that SMT siblings stopped while
1846 * the primary thread does a microcode update for various
1847 * reasons. Bring the primary threads up first.
1848 */
1849 cpumask_and(&tmp_mask, mask, pmask);
1850 cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
1851 cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
1852 /* Account for the online CPUs */
1853 ncpus -= num_online_cpus();
1854 if (!ncpus)
1855 return true;
1856 /* Create the mask for secondary CPUs */
1857 cpumask_andnot(&tmp_mask, mask, pmask);
1858 mask = &tmp_mask;
1859 }
1860
1861 /* Bring the not-yet started CPUs up */
1862 cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
1863 cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
1864 return true;
1865 }
1866 #else
cpuhp_bringup_cpus_parallel(unsigned int ncpus)1867 static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
1868 #endif /* CONFIG_HOTPLUG_PARALLEL */
1869
bringup_nonboot_cpus(unsigned int max_cpus)1870 void __init bringup_nonboot_cpus(unsigned int max_cpus)
1871 {
1872 if (!max_cpus)
1873 return;
1874
1875 /* Try parallel bringup optimization if enabled */
1876 if (cpuhp_bringup_cpus_parallel(max_cpus))
1877 return;
1878
1879 /* Full per CPU serialized bringup */
1880 cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
1881 }
1882
1883 #ifdef CONFIG_PM_SLEEP_SMP
1884 static cpumask_var_t frozen_cpus;
1885
freeze_secondary_cpus(int primary)1886 int freeze_secondary_cpus(int primary)
1887 {
1888 int cpu, error = 0;
1889
1890 cpu_maps_update_begin();
1891 if (primary == -1) {
1892 primary = cpumask_first(cpu_online_mask);
1893 if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1894 primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1895 } else {
1896 if (!cpu_online(primary))
1897 primary = cpumask_first(cpu_online_mask);
1898 }
1899
1900 /*
1901 * We take down all of the non-boot CPUs in one shot to avoid races
1902 * with the userspace trying to use the CPU hotplug at the same time
1903 */
1904 cpumask_clear(frozen_cpus);
1905
1906 pr_info("Disabling non-boot CPUs ...\n");
1907 for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
1908 if (!cpu_online(cpu) || cpu == primary)
1909 continue;
1910
1911 if (pm_wakeup_pending()) {
1912 pr_info("Wakeup pending. Abort CPU freeze\n");
1913 error = -EBUSY;
1914 break;
1915 }
1916
1917 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1918 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1919 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1920 if (!error)
1921 cpumask_set_cpu(cpu, frozen_cpus);
1922 else {
1923 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1924 break;
1925 }
1926 }
1927
1928 if (!error)
1929 BUG_ON(num_online_cpus() > 1);
1930 else
1931 pr_err("Non-boot CPUs are not disabled\n");
1932
1933 /*
1934 * Make sure the CPUs won't be enabled by someone else. We need to do
1935 * this even in case of failure as all freeze_secondary_cpus() users are
1936 * supposed to do thaw_secondary_cpus() on the failure path.
1937 */
1938 cpu_hotplug_disabled++;
1939
1940 cpu_maps_update_done();
1941 return error;
1942 }
1943
arch_thaw_secondary_cpus_begin(void)1944 void __weak arch_thaw_secondary_cpus_begin(void)
1945 {
1946 }
1947
arch_thaw_secondary_cpus_end(void)1948 void __weak arch_thaw_secondary_cpus_end(void)
1949 {
1950 }
1951
thaw_secondary_cpus(void)1952 void thaw_secondary_cpus(void)
1953 {
1954 int cpu, error;
1955
1956 /* Allow everyone to use the CPU hotplug again */
1957 cpu_maps_update_begin();
1958 __cpu_hotplug_enable();
1959 if (cpumask_empty(frozen_cpus))
1960 goto out;
1961
1962 pr_info("Enabling non-boot CPUs ...\n");
1963
1964 arch_thaw_secondary_cpus_begin();
1965
1966 for_each_cpu(cpu, frozen_cpus) {
1967 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1968 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1969 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1970 if (!error) {
1971 pr_info("CPU%d is up\n", cpu);
1972 continue;
1973 }
1974 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1975 }
1976
1977 arch_thaw_secondary_cpus_end();
1978
1979 cpumask_clear(frozen_cpus);
1980 out:
1981 cpu_maps_update_done();
1982 }
1983
alloc_frozen_cpus(void)1984 static int __init alloc_frozen_cpus(void)
1985 {
1986 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1987 return -ENOMEM;
1988 return 0;
1989 }
1990 core_initcall(alloc_frozen_cpus);
1991
1992 /*
1993 * When callbacks for CPU hotplug notifications are being executed, we must
1994 * ensure that the state of the system with respect to the tasks being frozen
1995 * or not, as reported by the notification, remains unchanged *throughout the
1996 * duration* of the execution of the callbacks.
1997 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1998 *
1999 * This synchronization is implemented by mutually excluding regular CPU
2000 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2001 * Hibernate notifications.
2002 */
2003 static int
cpu_hotplug_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)2004 cpu_hotplug_pm_callback(struct notifier_block *nb,
2005 unsigned long action, void *ptr)
2006 {
2007 switch (action) {
2008
2009 case PM_SUSPEND_PREPARE:
2010 case PM_HIBERNATION_PREPARE:
2011 cpu_hotplug_disable();
2012 break;
2013
2014 case PM_POST_SUSPEND:
2015 case PM_POST_HIBERNATION:
2016 cpu_hotplug_enable();
2017 break;
2018
2019 default:
2020 return NOTIFY_DONE;
2021 }
2022
2023 return NOTIFY_OK;
2024 }
2025
2026
cpu_hotplug_pm_sync_init(void)2027 static int __init cpu_hotplug_pm_sync_init(void)
2028 {
2029 /*
2030 * cpu_hotplug_pm_callback has higher priority than x86
2031 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2032 * to disable cpu hotplug to avoid cpu hotplug race.
2033 */
2034 pm_notifier(cpu_hotplug_pm_callback, 0);
2035 return 0;
2036 }
2037 core_initcall(cpu_hotplug_pm_sync_init);
2038
2039 #endif /* CONFIG_PM_SLEEP_SMP */
2040
2041 int __boot_cpu_id;
2042
2043 #endif /* CONFIG_SMP */
2044
2045 /* Boot processor state steps */
2046 static struct cpuhp_step cpuhp_hp_states[] = {
2047 [CPUHP_OFFLINE] = {
2048 .name = "offline",
2049 .startup.single = NULL,
2050 .teardown.single = NULL,
2051 },
2052 #ifdef CONFIG_SMP
2053 [CPUHP_CREATE_THREADS]= {
2054 .name = "threads:prepare",
2055 .startup.single = smpboot_create_threads,
2056 .teardown.single = NULL,
2057 .cant_stop = true,
2058 },
2059 [CPUHP_RANDOM_PREPARE] = {
2060 .name = "random:prepare",
2061 .startup.single = random_prepare_cpu,
2062 .teardown.single = NULL,
2063 },
2064 [CPUHP_WORKQUEUE_PREP] = {
2065 .name = "workqueue:prepare",
2066 .startup.single = workqueue_prepare_cpu,
2067 .teardown.single = NULL,
2068 },
2069 [CPUHP_HRTIMERS_PREPARE] = {
2070 .name = "hrtimers:prepare",
2071 .startup.single = hrtimers_prepare_cpu,
2072 .teardown.single = NULL,
2073 },
2074 [CPUHP_SMPCFD_PREPARE] = {
2075 .name = "smpcfd:prepare",
2076 .startup.single = smpcfd_prepare_cpu,
2077 .teardown.single = smpcfd_dead_cpu,
2078 },
2079 [CPUHP_RELAY_PREPARE] = {
2080 .name = "relay:prepare",
2081 .startup.single = relay_prepare_cpu,
2082 .teardown.single = NULL,
2083 },
2084 [CPUHP_RCUTREE_PREP] = {
2085 .name = "RCU/tree:prepare",
2086 .startup.single = rcutree_prepare_cpu,
2087 .teardown.single = rcutree_dead_cpu,
2088 },
2089 /*
2090 * On the tear-down path, timers_dead_cpu() must be invoked
2091 * before blk_mq_queue_reinit_notify() from notify_dead(),
2092 * otherwise a RCU stall occurs.
2093 */
2094 [CPUHP_TIMERS_PREPARE] = {
2095 .name = "timers:prepare",
2096 .startup.single = timers_prepare_cpu,
2097 .teardown.single = timers_dead_cpu,
2098 },
2099
2100 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2101 /*
2102 * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2103 * the next step will release it.
2104 */
2105 [CPUHP_BP_KICK_AP] = {
2106 .name = "cpu:kick_ap",
2107 .startup.single = cpuhp_kick_ap_alive,
2108 },
2109
2110 /*
2111 * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2112 * releases it for the complete bringup.
2113 */
2114 [CPUHP_BRINGUP_CPU] = {
2115 .name = "cpu:bringup",
2116 .startup.single = cpuhp_bringup_ap,
2117 .teardown.single = finish_cpu,
2118 .cant_stop = true,
2119 },
2120 #else
2121 /*
2122 * All-in-one CPU bringup state which includes the kick alive.
2123 */
2124 [CPUHP_BRINGUP_CPU] = {
2125 .name = "cpu:bringup",
2126 .startup.single = bringup_cpu,
2127 .teardown.single = finish_cpu,
2128 .cant_stop = true,
2129 },
2130 #endif
2131 /* Final state before CPU kills itself */
2132 [CPUHP_AP_IDLE_DEAD] = {
2133 .name = "idle:dead",
2134 },
2135 /*
2136 * Last state before CPU enters the idle loop to die. Transient state
2137 * for synchronization.
2138 */
2139 [CPUHP_AP_OFFLINE] = {
2140 .name = "ap:offline",
2141 .cant_stop = true,
2142 },
2143 /* First state is scheduler control. Interrupts are disabled */
2144 [CPUHP_AP_SCHED_STARTING] = {
2145 .name = "sched:starting",
2146 .startup.single = sched_cpu_starting,
2147 .teardown.single = sched_cpu_dying,
2148 },
2149 [CPUHP_AP_RCUTREE_DYING] = {
2150 .name = "RCU/tree:dying",
2151 .startup.single = NULL,
2152 .teardown.single = rcutree_dying_cpu,
2153 },
2154 [CPUHP_AP_SMPCFD_DYING] = {
2155 .name = "smpcfd:dying",
2156 .startup.single = NULL,
2157 .teardown.single = smpcfd_dying_cpu,
2158 },
2159 [CPUHP_AP_HRTIMERS_DYING] = {
2160 .name = "hrtimers:dying",
2161 .startup.single = hrtimers_cpu_starting,
2162 .teardown.single = hrtimers_cpu_dying,
2163 },
2164 [CPUHP_AP_TICK_DYING] = {
2165 .name = "tick:dying",
2166 .startup.single = NULL,
2167 .teardown.single = tick_cpu_dying,
2168 },
2169 /* Entry state on starting. Interrupts enabled from here on. Transient
2170 * state for synchronsization */
2171 [CPUHP_AP_ONLINE] = {
2172 .name = "ap:online",
2173 },
2174 /*
2175 * Handled on control processor until the plugged processor manages
2176 * this itself.
2177 */
2178 [CPUHP_TEARDOWN_CPU] = {
2179 .name = "cpu:teardown",
2180 .startup.single = NULL,
2181 .teardown.single = takedown_cpu,
2182 .cant_stop = true,
2183 },
2184
2185 [CPUHP_AP_SCHED_WAIT_EMPTY] = {
2186 .name = "sched:waitempty",
2187 .startup.single = NULL,
2188 .teardown.single = sched_cpu_wait_empty,
2189 },
2190
2191 /* Handle smpboot threads park/unpark */
2192 [CPUHP_AP_SMPBOOT_THREADS] = {
2193 .name = "smpboot/threads:online",
2194 .startup.single = smpboot_unpark_threads,
2195 .teardown.single = smpboot_park_threads,
2196 },
2197 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2198 .name = "irq/affinity:online",
2199 .startup.single = irq_affinity_online_cpu,
2200 .teardown.single = NULL,
2201 },
2202 [CPUHP_AP_PERF_ONLINE] = {
2203 .name = "perf:online",
2204 .startup.single = perf_event_init_cpu,
2205 .teardown.single = perf_event_exit_cpu,
2206 },
2207 [CPUHP_AP_WATCHDOG_ONLINE] = {
2208 .name = "lockup_detector:online",
2209 .startup.single = lockup_detector_online_cpu,
2210 .teardown.single = lockup_detector_offline_cpu,
2211 },
2212 [CPUHP_AP_WORKQUEUE_ONLINE] = {
2213 .name = "workqueue:online",
2214 .startup.single = workqueue_online_cpu,
2215 .teardown.single = workqueue_offline_cpu,
2216 },
2217 [CPUHP_AP_RANDOM_ONLINE] = {
2218 .name = "random:online",
2219 .startup.single = random_online_cpu,
2220 .teardown.single = NULL,
2221 },
2222 [CPUHP_AP_RCUTREE_ONLINE] = {
2223 .name = "RCU/tree:online",
2224 .startup.single = rcutree_online_cpu,
2225 .teardown.single = rcutree_offline_cpu,
2226 },
2227 #endif
2228 /*
2229 * The dynamically registered state space is here
2230 */
2231
2232 #ifdef CONFIG_SMP
2233 /* Last state is scheduler control setting the cpu active */
2234 [CPUHP_AP_ACTIVE] = {
2235 .name = "sched:active",
2236 .startup.single = sched_cpu_activate,
2237 .teardown.single = sched_cpu_deactivate,
2238 },
2239 #endif
2240
2241 /* CPU is fully up and running. */
2242 [CPUHP_ONLINE] = {
2243 .name = "online",
2244 .startup.single = NULL,
2245 .teardown.single = NULL,
2246 },
2247 };
2248
2249 /* Sanity check for callbacks */
cpuhp_cb_check(enum cpuhp_state state)2250 static int cpuhp_cb_check(enum cpuhp_state state)
2251 {
2252 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2253 return -EINVAL;
2254 return 0;
2255 }
2256
2257 /*
2258 * Returns a free for dynamic slot assignment of the Online state. The states
2259 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2260 * by having no name assigned.
2261 */
cpuhp_reserve_state(enum cpuhp_state state)2262 static int cpuhp_reserve_state(enum cpuhp_state state)
2263 {
2264 enum cpuhp_state i, end;
2265 struct cpuhp_step *step;
2266
2267 switch (state) {
2268 case CPUHP_AP_ONLINE_DYN:
2269 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2270 end = CPUHP_AP_ONLINE_DYN_END;
2271 break;
2272 case CPUHP_BP_PREPARE_DYN:
2273 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2274 end = CPUHP_BP_PREPARE_DYN_END;
2275 break;
2276 default:
2277 return -EINVAL;
2278 }
2279
2280 for (i = state; i <= end; i++, step++) {
2281 if (!step->name)
2282 return i;
2283 }
2284 WARN(1, "No more dynamic states available for CPU hotplug\n");
2285 return -ENOSPC;
2286 }
2287
cpuhp_store_callbacks(enum cpuhp_state state,const char * name,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2288 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2289 int (*startup)(unsigned int cpu),
2290 int (*teardown)(unsigned int cpu),
2291 bool multi_instance)
2292 {
2293 /* (Un)Install the callbacks for further cpu hotplug operations */
2294 struct cpuhp_step *sp;
2295 int ret = 0;
2296
2297 /*
2298 * If name is NULL, then the state gets removed.
2299 *
2300 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2301 * the first allocation from these dynamic ranges, so the removal
2302 * would trigger a new allocation and clear the wrong (already
2303 * empty) state, leaving the callbacks of the to be cleared state
2304 * dangling, which causes wreckage on the next hotplug operation.
2305 */
2306 if (name && (state == CPUHP_AP_ONLINE_DYN ||
2307 state == CPUHP_BP_PREPARE_DYN)) {
2308 ret = cpuhp_reserve_state(state);
2309 if (ret < 0)
2310 return ret;
2311 state = ret;
2312 }
2313 sp = cpuhp_get_step(state);
2314 if (name && sp->name)
2315 return -EBUSY;
2316
2317 sp->startup.single = startup;
2318 sp->teardown.single = teardown;
2319 sp->name = name;
2320 sp->multi_instance = multi_instance;
2321 INIT_HLIST_HEAD(&sp->list);
2322 return ret;
2323 }
2324
cpuhp_get_teardown_cb(enum cpuhp_state state)2325 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2326 {
2327 return cpuhp_get_step(state)->teardown.single;
2328 }
2329
2330 /*
2331 * Call the startup/teardown function for a step either on the AP or
2332 * on the current CPU.
2333 */
cpuhp_issue_call(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)2334 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2335 struct hlist_node *node)
2336 {
2337 struct cpuhp_step *sp = cpuhp_get_step(state);
2338 int ret;
2339
2340 /*
2341 * If there's nothing to do, we done.
2342 * Relies on the union for multi_instance.
2343 */
2344 if (cpuhp_step_empty(bringup, sp))
2345 return 0;
2346 /*
2347 * The non AP bound callbacks can fail on bringup. On teardown
2348 * e.g. module removal we crash for now.
2349 */
2350 #ifdef CONFIG_SMP
2351 if (cpuhp_is_ap_state(state))
2352 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2353 else
2354 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2355 #else
2356 if (cpuhp_is_atomic_state(state)) {
2357 guard(irqsave)();
2358 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2359 /* STARTING/DYING must not fail! */
2360 WARN_ON_ONCE(ret);
2361 } else {
2362 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2363 }
2364 #endif
2365 BUG_ON(ret && !bringup);
2366 return ret;
2367 }
2368
2369 /*
2370 * Called from __cpuhp_setup_state on a recoverable failure.
2371 *
2372 * Note: The teardown callbacks for rollback are not allowed to fail!
2373 */
cpuhp_rollback_install(int failedcpu,enum cpuhp_state state,struct hlist_node * node)2374 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2375 struct hlist_node *node)
2376 {
2377 int cpu;
2378
2379 /* Roll back the already executed steps on the other cpus */
2380 for_each_present_cpu(cpu) {
2381 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2382 int cpustate = st->state;
2383
2384 if (cpu >= failedcpu)
2385 break;
2386
2387 /* Did we invoke the startup call on that cpu ? */
2388 if (cpustate >= state)
2389 cpuhp_issue_call(cpu, state, false, node);
2390 }
2391 }
2392
__cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,struct hlist_node * node,bool invoke)2393 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2394 struct hlist_node *node,
2395 bool invoke)
2396 {
2397 struct cpuhp_step *sp;
2398 int cpu;
2399 int ret;
2400
2401 lockdep_assert_cpus_held();
2402
2403 sp = cpuhp_get_step(state);
2404 if (sp->multi_instance == false)
2405 return -EINVAL;
2406
2407 mutex_lock(&cpuhp_state_mutex);
2408
2409 if (!invoke || !sp->startup.multi)
2410 goto add_node;
2411
2412 /*
2413 * Try to call the startup callback for each present cpu
2414 * depending on the hotplug state of the cpu.
2415 */
2416 for_each_present_cpu(cpu) {
2417 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2418 int cpustate = st->state;
2419
2420 if (cpustate < state)
2421 continue;
2422
2423 ret = cpuhp_issue_call(cpu, state, true, node);
2424 if (ret) {
2425 if (sp->teardown.multi)
2426 cpuhp_rollback_install(cpu, state, node);
2427 goto unlock;
2428 }
2429 }
2430 add_node:
2431 ret = 0;
2432 hlist_add_head(node, &sp->list);
2433 unlock:
2434 mutex_unlock(&cpuhp_state_mutex);
2435 return ret;
2436 }
2437
__cpuhp_state_add_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)2438 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2439 bool invoke)
2440 {
2441 int ret;
2442
2443 cpus_read_lock();
2444 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2445 cpus_read_unlock();
2446 return ret;
2447 }
2448 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2449
2450 /**
2451 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2452 * @state: The state to setup
2453 * @name: Name of the step
2454 * @invoke: If true, the startup function is invoked for cpus where
2455 * cpu state >= @state
2456 * @startup: startup callback function
2457 * @teardown: teardown callback function
2458 * @multi_instance: State is set up for multiple instances which get
2459 * added afterwards.
2460 *
2461 * The caller needs to hold cpus read locked while calling this function.
2462 * Return:
2463 * On success:
2464 * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
2465 * 0 for all other states
2466 * On failure: proper (negative) error code
2467 */
__cpuhp_setup_state_cpuslocked(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2468 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2469 const char *name, bool invoke,
2470 int (*startup)(unsigned int cpu),
2471 int (*teardown)(unsigned int cpu),
2472 bool multi_instance)
2473 {
2474 int cpu, ret = 0;
2475 bool dynstate;
2476
2477 lockdep_assert_cpus_held();
2478
2479 if (cpuhp_cb_check(state) || !name)
2480 return -EINVAL;
2481
2482 mutex_lock(&cpuhp_state_mutex);
2483
2484 ret = cpuhp_store_callbacks(state, name, startup, teardown,
2485 multi_instance);
2486
2487 dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
2488 if (ret > 0 && dynstate) {
2489 state = ret;
2490 ret = 0;
2491 }
2492
2493 if (ret || !invoke || !startup)
2494 goto out;
2495
2496 /*
2497 * Try to call the startup callback for each present cpu
2498 * depending on the hotplug state of the cpu.
2499 */
2500 for_each_present_cpu(cpu) {
2501 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2502 int cpustate = st->state;
2503
2504 if (cpustate < state)
2505 continue;
2506
2507 ret = cpuhp_issue_call(cpu, state, true, NULL);
2508 if (ret) {
2509 if (teardown)
2510 cpuhp_rollback_install(cpu, state, NULL);
2511 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2512 goto out;
2513 }
2514 }
2515 out:
2516 mutex_unlock(&cpuhp_state_mutex);
2517 /*
2518 * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
2519 * return the dynamically allocated state in case of success.
2520 */
2521 if (!ret && dynstate)
2522 return state;
2523 return ret;
2524 }
2525 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2526
__cpuhp_setup_state(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2527 int __cpuhp_setup_state(enum cpuhp_state state,
2528 const char *name, bool invoke,
2529 int (*startup)(unsigned int cpu),
2530 int (*teardown)(unsigned int cpu),
2531 bool multi_instance)
2532 {
2533 int ret;
2534
2535 cpus_read_lock();
2536 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2537 teardown, multi_instance);
2538 cpus_read_unlock();
2539 return ret;
2540 }
2541 EXPORT_SYMBOL(__cpuhp_setup_state);
2542
__cpuhp_state_remove_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)2543 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2544 struct hlist_node *node, bool invoke)
2545 {
2546 struct cpuhp_step *sp = cpuhp_get_step(state);
2547 int cpu;
2548
2549 BUG_ON(cpuhp_cb_check(state));
2550
2551 if (!sp->multi_instance)
2552 return -EINVAL;
2553
2554 cpus_read_lock();
2555 mutex_lock(&cpuhp_state_mutex);
2556
2557 if (!invoke || !cpuhp_get_teardown_cb(state))
2558 goto remove;
2559 /*
2560 * Call the teardown callback for each present cpu depending
2561 * on the hotplug state of the cpu. This function is not
2562 * allowed to fail currently!
2563 */
2564 for_each_present_cpu(cpu) {
2565 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2566 int cpustate = st->state;
2567
2568 if (cpustate >= state)
2569 cpuhp_issue_call(cpu, state, false, node);
2570 }
2571
2572 remove:
2573 hlist_del(node);
2574 mutex_unlock(&cpuhp_state_mutex);
2575 cpus_read_unlock();
2576
2577 return 0;
2578 }
2579 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2580
2581 /**
2582 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2583 * @state: The state to remove
2584 * @invoke: If true, the teardown function is invoked for cpus where
2585 * cpu state >= @state
2586 *
2587 * The caller needs to hold cpus read locked while calling this function.
2588 * The teardown callback is currently not allowed to fail. Think
2589 * about module removal!
2590 */
__cpuhp_remove_state_cpuslocked(enum cpuhp_state state,bool invoke)2591 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2592 {
2593 struct cpuhp_step *sp = cpuhp_get_step(state);
2594 int cpu;
2595
2596 BUG_ON(cpuhp_cb_check(state));
2597
2598 lockdep_assert_cpus_held();
2599
2600 mutex_lock(&cpuhp_state_mutex);
2601 if (sp->multi_instance) {
2602 WARN(!hlist_empty(&sp->list),
2603 "Error: Removing state %d which has instances left.\n",
2604 state);
2605 goto remove;
2606 }
2607
2608 if (!invoke || !cpuhp_get_teardown_cb(state))
2609 goto remove;
2610
2611 /*
2612 * Call the teardown callback for each present cpu depending
2613 * on the hotplug state of the cpu. This function is not
2614 * allowed to fail currently!
2615 */
2616 for_each_present_cpu(cpu) {
2617 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2618 int cpustate = st->state;
2619
2620 if (cpustate >= state)
2621 cpuhp_issue_call(cpu, state, false, NULL);
2622 }
2623 remove:
2624 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2625 mutex_unlock(&cpuhp_state_mutex);
2626 }
2627 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2628
__cpuhp_remove_state(enum cpuhp_state state,bool invoke)2629 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2630 {
2631 cpus_read_lock();
2632 __cpuhp_remove_state_cpuslocked(state, invoke);
2633 cpus_read_unlock();
2634 }
2635 EXPORT_SYMBOL(__cpuhp_remove_state);
2636
2637 #ifdef CONFIG_HOTPLUG_SMT
cpuhp_offline_cpu_device(unsigned int cpu)2638 static void cpuhp_offline_cpu_device(unsigned int cpu)
2639 {
2640 struct device *dev = get_cpu_device(cpu);
2641
2642 dev->offline = true;
2643 /* Tell user space about the state change */
2644 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2645 }
2646
cpuhp_online_cpu_device(unsigned int cpu)2647 static void cpuhp_online_cpu_device(unsigned int cpu)
2648 {
2649 struct device *dev = get_cpu_device(cpu);
2650
2651 dev->offline = false;
2652 /* Tell user space about the state change */
2653 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2654 }
2655
cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)2656 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2657 {
2658 int cpu, ret = 0;
2659
2660 cpu_maps_update_begin();
2661 for_each_online_cpu(cpu) {
2662 if (topology_is_primary_thread(cpu))
2663 continue;
2664 /*
2665 * Disable can be called with CPU_SMT_ENABLED when changing
2666 * from a higher to lower number of SMT threads per core.
2667 */
2668 if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
2669 continue;
2670 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2671 if (ret)
2672 break;
2673 /*
2674 * As this needs to hold the cpu maps lock it's impossible
2675 * to call device_offline() because that ends up calling
2676 * cpu_down() which takes cpu maps lock. cpu maps lock
2677 * needs to be held as this might race against in kernel
2678 * abusers of the hotplug machinery (thermal management).
2679 *
2680 * So nothing would update device:offline state. That would
2681 * leave the sysfs entry stale and prevent onlining after
2682 * smt control has been changed to 'off' again. This is
2683 * called under the sysfs hotplug lock, so it is properly
2684 * serialized against the regular offline usage.
2685 */
2686 cpuhp_offline_cpu_device(cpu);
2687 }
2688 if (!ret)
2689 cpu_smt_control = ctrlval;
2690 cpu_maps_update_done();
2691 return ret;
2692 }
2693
2694 /* Check if the core a CPU belongs to is online */
2695 #if !defined(topology_is_core_online)
topology_is_core_online(unsigned int cpu)2696 static inline bool topology_is_core_online(unsigned int cpu)
2697 {
2698 return true;
2699 }
2700 #endif
2701
cpuhp_smt_enable(void)2702 int cpuhp_smt_enable(void)
2703 {
2704 int cpu, ret = 0;
2705
2706 cpu_maps_update_begin();
2707 cpu_smt_control = CPU_SMT_ENABLED;
2708 for_each_present_cpu(cpu) {
2709 /* Skip online CPUs and CPUs on offline nodes */
2710 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2711 continue;
2712 if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
2713 continue;
2714 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2715 if (ret)
2716 break;
2717 /* See comment in cpuhp_smt_disable() */
2718 cpuhp_online_cpu_device(cpu);
2719 }
2720 cpu_maps_update_done();
2721 return ret;
2722 }
2723 #endif
2724
2725 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
state_show(struct device * dev,struct device_attribute * attr,char * buf)2726 static ssize_t state_show(struct device *dev,
2727 struct device_attribute *attr, char *buf)
2728 {
2729 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2730
2731 return sprintf(buf, "%d\n", st->state);
2732 }
2733 static DEVICE_ATTR_RO(state);
2734
target_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2735 static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2736 const char *buf, size_t count)
2737 {
2738 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2739 struct cpuhp_step *sp;
2740 int target, ret;
2741
2742 ret = kstrtoint(buf, 10, &target);
2743 if (ret)
2744 return ret;
2745
2746 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2747 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2748 return -EINVAL;
2749 #else
2750 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2751 return -EINVAL;
2752 #endif
2753
2754 ret = lock_device_hotplug_sysfs();
2755 if (ret)
2756 return ret;
2757
2758 mutex_lock(&cpuhp_state_mutex);
2759 sp = cpuhp_get_step(target);
2760 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2761 mutex_unlock(&cpuhp_state_mutex);
2762 if (ret)
2763 goto out;
2764
2765 if (st->state < target)
2766 ret = cpu_up(dev->id, target);
2767 else if (st->state > target)
2768 ret = cpu_down(dev->id, target);
2769 else if (WARN_ON(st->target != target))
2770 st->target = target;
2771 out:
2772 unlock_device_hotplug();
2773 return ret ? ret : count;
2774 }
2775
target_show(struct device * dev,struct device_attribute * attr,char * buf)2776 static ssize_t target_show(struct device *dev,
2777 struct device_attribute *attr, char *buf)
2778 {
2779 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2780
2781 return sprintf(buf, "%d\n", st->target);
2782 }
2783 static DEVICE_ATTR_RW(target);
2784
fail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2785 static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2786 const char *buf, size_t count)
2787 {
2788 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2789 struct cpuhp_step *sp;
2790 int fail, ret;
2791
2792 ret = kstrtoint(buf, 10, &fail);
2793 if (ret)
2794 return ret;
2795
2796 if (fail == CPUHP_INVALID) {
2797 st->fail = fail;
2798 return count;
2799 }
2800
2801 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2802 return -EINVAL;
2803
2804 /*
2805 * Cannot fail STARTING/DYING callbacks.
2806 */
2807 if (cpuhp_is_atomic_state(fail))
2808 return -EINVAL;
2809
2810 /*
2811 * DEAD callbacks cannot fail...
2812 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2813 * triggering STARTING callbacks, a failure in this state would
2814 * hinder rollback.
2815 */
2816 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2817 return -EINVAL;
2818
2819 /*
2820 * Cannot fail anything that doesn't have callbacks.
2821 */
2822 mutex_lock(&cpuhp_state_mutex);
2823 sp = cpuhp_get_step(fail);
2824 if (!sp->startup.single && !sp->teardown.single)
2825 ret = -EINVAL;
2826 mutex_unlock(&cpuhp_state_mutex);
2827 if (ret)
2828 return ret;
2829
2830 st->fail = fail;
2831
2832 return count;
2833 }
2834
fail_show(struct device * dev,struct device_attribute * attr,char * buf)2835 static ssize_t fail_show(struct device *dev,
2836 struct device_attribute *attr, char *buf)
2837 {
2838 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2839
2840 return sprintf(buf, "%d\n", st->fail);
2841 }
2842
2843 static DEVICE_ATTR_RW(fail);
2844
2845 static struct attribute *cpuhp_cpu_attrs[] = {
2846 &dev_attr_state.attr,
2847 &dev_attr_target.attr,
2848 &dev_attr_fail.attr,
2849 NULL
2850 };
2851
2852 static const struct attribute_group cpuhp_cpu_attr_group = {
2853 .attrs = cpuhp_cpu_attrs,
2854 .name = "hotplug",
2855 };
2856
states_show(struct device * dev,struct device_attribute * attr,char * buf)2857 static ssize_t states_show(struct device *dev,
2858 struct device_attribute *attr, char *buf)
2859 {
2860 ssize_t cur, res = 0;
2861 int i;
2862
2863 mutex_lock(&cpuhp_state_mutex);
2864 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2865 struct cpuhp_step *sp = cpuhp_get_step(i);
2866
2867 if (sp->name) {
2868 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2869 buf += cur;
2870 res += cur;
2871 }
2872 }
2873 mutex_unlock(&cpuhp_state_mutex);
2874 return res;
2875 }
2876 static DEVICE_ATTR_RO(states);
2877
2878 static struct attribute *cpuhp_cpu_root_attrs[] = {
2879 &dev_attr_states.attr,
2880 NULL
2881 };
2882
2883 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2884 .attrs = cpuhp_cpu_root_attrs,
2885 .name = "hotplug",
2886 };
2887
2888 #ifdef CONFIG_HOTPLUG_SMT
2889
cpu_smt_num_threads_valid(unsigned int threads)2890 static bool cpu_smt_num_threads_valid(unsigned int threads)
2891 {
2892 if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
2893 return threads >= 1 && threads <= cpu_smt_max_threads;
2894 return threads == 1 || threads == cpu_smt_max_threads;
2895 }
2896
2897 static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2898 __store_smt_control(struct device *dev, struct device_attribute *attr,
2899 const char *buf, size_t count)
2900 {
2901 int ctrlval, ret, num_threads, orig_threads;
2902 bool force_off;
2903
2904 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2905 return -EPERM;
2906
2907 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2908 return -ENODEV;
2909
2910 if (sysfs_streq(buf, "on")) {
2911 ctrlval = CPU_SMT_ENABLED;
2912 num_threads = cpu_smt_max_threads;
2913 } else if (sysfs_streq(buf, "off")) {
2914 ctrlval = CPU_SMT_DISABLED;
2915 num_threads = 1;
2916 } else if (sysfs_streq(buf, "forceoff")) {
2917 ctrlval = CPU_SMT_FORCE_DISABLED;
2918 num_threads = 1;
2919 } else if (kstrtoint(buf, 10, &num_threads) == 0) {
2920 if (num_threads == 1)
2921 ctrlval = CPU_SMT_DISABLED;
2922 else if (cpu_smt_num_threads_valid(num_threads))
2923 ctrlval = CPU_SMT_ENABLED;
2924 else
2925 return -EINVAL;
2926 } else {
2927 return -EINVAL;
2928 }
2929
2930 ret = lock_device_hotplug_sysfs();
2931 if (ret)
2932 return ret;
2933
2934 orig_threads = cpu_smt_num_threads;
2935 cpu_smt_num_threads = num_threads;
2936
2937 force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
2938
2939 if (num_threads > orig_threads)
2940 ret = cpuhp_smt_enable();
2941 else if (num_threads < orig_threads || force_off)
2942 ret = cpuhp_smt_disable(ctrlval);
2943
2944 unlock_device_hotplug();
2945 return ret ? ret : count;
2946 }
2947
2948 #else /* !CONFIG_HOTPLUG_SMT */
2949 static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2950 __store_smt_control(struct device *dev, struct device_attribute *attr,
2951 const char *buf, size_t count)
2952 {
2953 return -ENODEV;
2954 }
2955 #endif /* CONFIG_HOTPLUG_SMT */
2956
2957 static const char *smt_states[] = {
2958 [CPU_SMT_ENABLED] = "on",
2959 [CPU_SMT_DISABLED] = "off",
2960 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2961 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2962 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2963 };
2964
control_show(struct device * dev,struct device_attribute * attr,char * buf)2965 static ssize_t control_show(struct device *dev,
2966 struct device_attribute *attr, char *buf)
2967 {
2968 const char *state = smt_states[cpu_smt_control];
2969
2970 #ifdef CONFIG_HOTPLUG_SMT
2971 /*
2972 * If SMT is enabled but not all threads are enabled then show the
2973 * number of threads. If all threads are enabled show "on". Otherwise
2974 * show the state name.
2975 */
2976 if (cpu_smt_control == CPU_SMT_ENABLED &&
2977 cpu_smt_num_threads != cpu_smt_max_threads)
2978 return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
2979 #endif
2980
2981 return sysfs_emit(buf, "%s\n", state);
2982 }
2983
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2984 static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2985 const char *buf, size_t count)
2986 {
2987 return __store_smt_control(dev, attr, buf, count);
2988 }
2989 static DEVICE_ATTR_RW(control);
2990
active_show(struct device * dev,struct device_attribute * attr,char * buf)2991 static ssize_t active_show(struct device *dev,
2992 struct device_attribute *attr, char *buf)
2993 {
2994 return sysfs_emit(buf, "%d\n", sched_smt_active());
2995 }
2996 static DEVICE_ATTR_RO(active);
2997
2998 static struct attribute *cpuhp_smt_attrs[] = {
2999 &dev_attr_control.attr,
3000 &dev_attr_active.attr,
3001 NULL
3002 };
3003
3004 static const struct attribute_group cpuhp_smt_attr_group = {
3005 .attrs = cpuhp_smt_attrs,
3006 .name = "smt",
3007 };
3008
cpu_smt_sysfs_init(void)3009 static int __init cpu_smt_sysfs_init(void)
3010 {
3011 struct device *dev_root;
3012 int ret = -ENODEV;
3013
3014 dev_root = bus_get_dev_root(&cpu_subsys);
3015 if (dev_root) {
3016 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3017 put_device(dev_root);
3018 }
3019 return ret;
3020 }
3021
cpuhp_sysfs_init(void)3022 static int __init cpuhp_sysfs_init(void)
3023 {
3024 struct device *dev_root;
3025 int cpu, ret;
3026
3027 ret = cpu_smt_sysfs_init();
3028 if (ret)
3029 return ret;
3030
3031 dev_root = bus_get_dev_root(&cpu_subsys);
3032 if (dev_root) {
3033 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
3034 put_device(dev_root);
3035 if (ret)
3036 return ret;
3037 }
3038
3039 for_each_possible_cpu(cpu) {
3040 struct device *dev = get_cpu_device(cpu);
3041
3042 if (!dev)
3043 continue;
3044 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3045 if (ret)
3046 return ret;
3047 }
3048 return 0;
3049 }
3050 device_initcall(cpuhp_sysfs_init);
3051 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3052
3053 /*
3054 * cpu_bit_bitmap[] is a special, "compressed" data structure that
3055 * represents all NR_CPUS bits binary values of 1<<nr.
3056 *
3057 * It is used by cpumask_of() to get a constant address to a CPU
3058 * mask value that has a single bit set only.
3059 */
3060
3061 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3062 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
3063 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3064 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3065 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3066
3067 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3068
3069 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
3070 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
3071 #if BITS_PER_LONG > 32
3072 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
3073 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
3074 #endif
3075 };
3076 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3077
3078 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3079 EXPORT_SYMBOL(cpu_all_bits);
3080
3081 #ifdef CONFIG_INIT_ALL_POSSIBLE
3082 struct cpumask __cpu_possible_mask __ro_after_init
3083 = {CPU_BITS_ALL};
3084 unsigned int __num_possible_cpus __ro_after_init = NR_CPUS;
3085 #else
3086 struct cpumask __cpu_possible_mask __ro_after_init;
3087 unsigned int __num_possible_cpus __ro_after_init;
3088 #endif
3089 EXPORT_SYMBOL(__cpu_possible_mask);
3090 EXPORT_SYMBOL(__num_possible_cpus);
3091
3092 struct cpumask __cpu_online_mask __read_mostly;
3093 EXPORT_SYMBOL(__cpu_online_mask);
3094
3095 struct cpumask __cpu_enabled_mask __read_mostly;
3096 EXPORT_SYMBOL(__cpu_enabled_mask);
3097
3098 struct cpumask __cpu_present_mask __read_mostly;
3099 EXPORT_SYMBOL(__cpu_present_mask);
3100
3101 struct cpumask __cpu_active_mask __read_mostly;
3102 EXPORT_SYMBOL(__cpu_active_mask);
3103
3104 struct cpumask __cpu_dying_mask __read_mostly;
3105 EXPORT_SYMBOL(__cpu_dying_mask);
3106
3107 atomic_t __num_online_cpus __read_mostly;
3108 EXPORT_SYMBOL(__num_online_cpus);
3109
init_cpu_present(const struct cpumask * src)3110 void init_cpu_present(const struct cpumask *src)
3111 {
3112 cpumask_copy(&__cpu_present_mask, src);
3113 }
3114
init_cpu_possible(const struct cpumask * src)3115 void init_cpu_possible(const struct cpumask *src)
3116 {
3117 cpumask_copy(&__cpu_possible_mask, src);
3118 __num_possible_cpus = cpumask_weight(&__cpu_possible_mask);
3119 }
3120
set_cpu_online(unsigned int cpu,bool online)3121 void set_cpu_online(unsigned int cpu, bool online)
3122 {
3123 /*
3124 * atomic_inc/dec() is required to handle the horrid abuse of this
3125 * function by the reboot and kexec code which invoke it from
3126 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
3127 * regular CPU hotplug is properly serialized.
3128 *
3129 * Note, that the fact that __num_online_cpus is of type atomic_t
3130 * does not protect readers which are not serialized against
3131 * concurrent hotplug operations.
3132 */
3133 if (online) {
3134 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
3135 atomic_inc(&__num_online_cpus);
3136 } else {
3137 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
3138 atomic_dec(&__num_online_cpus);
3139 }
3140 }
3141
3142 /*
3143 * This should be marked __init, but there is a boatload of call sites
3144 * which need to be fixed up to do so. Sigh...
3145 */
set_cpu_possible(unsigned int cpu,bool possible)3146 void set_cpu_possible(unsigned int cpu, bool possible)
3147 {
3148 if (possible) {
3149 if (!cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask))
3150 __num_possible_cpus++;
3151 } else {
3152 if (cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask))
3153 __num_possible_cpus--;
3154 }
3155 }
3156
3157 /*
3158 * Activate the first processor.
3159 */
boot_cpu_init(void)3160 void __init boot_cpu_init(void)
3161 {
3162 int cpu = smp_processor_id();
3163
3164 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3165 set_cpu_online(cpu, true);
3166 set_cpu_active(cpu, true);
3167 set_cpu_present(cpu, true);
3168 set_cpu_possible(cpu, true);
3169
3170 #ifdef CONFIG_SMP
3171 __boot_cpu_id = cpu;
3172 #endif
3173 }
3174
3175 /*
3176 * Must be called _AFTER_ setting up the per_cpu areas
3177 */
boot_cpu_hotplug_init(void)3178 void __init boot_cpu_hotplug_init(void)
3179 {
3180 #ifdef CONFIG_SMP
3181 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
3182 atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3183 #endif
3184 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3185 this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
3186 }
3187
3188 #ifdef CONFIG_CPU_MITIGATIONS
3189 /*
3190 * All except the cross-thread attack vector are mitigated by default.
3191 * Cross-thread mitigation often requires disabling SMT which is expensive
3192 * so cross-thread mitigations are only partially enabled by default.
3193 *
3194 * Guest-to-Host and Guest-to-Guest vectors are only needed if KVM support is
3195 * present.
3196 */
3197 static bool attack_vectors[NR_CPU_ATTACK_VECTORS] __ro_after_init = {
3198 [CPU_MITIGATE_USER_KERNEL] = true,
3199 [CPU_MITIGATE_USER_USER] = true,
3200 [CPU_MITIGATE_GUEST_HOST] = IS_ENABLED(CONFIG_KVM),
3201 [CPU_MITIGATE_GUEST_GUEST] = IS_ENABLED(CONFIG_KVM),
3202 };
3203
cpu_attack_vector_mitigated(enum cpu_attack_vectors v)3204 bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
3205 {
3206 if (v < NR_CPU_ATTACK_VECTORS)
3207 return attack_vectors[v];
3208
3209 WARN_ONCE(1, "Invalid attack vector %d\n", v);
3210 return false;
3211 }
3212
3213 /*
3214 * There are 3 global options, 'off', 'auto', 'auto,nosmt'. These may optionally
3215 * be combined with attack-vector disables which follow them.
3216 *
3217 * Examples:
3218 * mitigations=auto,no_user_kernel,no_user_user,no_cross_thread
3219 * mitigations=auto,nosmt,no_guest_host,no_guest_guest
3220 *
3221 * mitigations=off is equivalent to disabling all attack vectors.
3222 */
3223 enum cpu_mitigations {
3224 CPU_MITIGATIONS_OFF,
3225 CPU_MITIGATIONS_AUTO,
3226 CPU_MITIGATIONS_AUTO_NOSMT,
3227 };
3228
3229 enum {
3230 NO_USER_KERNEL,
3231 NO_USER_USER,
3232 NO_GUEST_HOST,
3233 NO_GUEST_GUEST,
3234 NO_CROSS_THREAD,
3235 NR_VECTOR_PARAMS,
3236 };
3237
3238 enum smt_mitigations smt_mitigations __ro_after_init = SMT_MITIGATIONS_AUTO;
3239 static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
3240
3241 static const match_table_t global_mitigations = {
3242 { CPU_MITIGATIONS_AUTO_NOSMT, "auto,nosmt"},
3243 { CPU_MITIGATIONS_AUTO, "auto"},
3244 { CPU_MITIGATIONS_OFF, "off"},
3245 };
3246
3247 static const match_table_t vector_mitigations = {
3248 { NO_USER_KERNEL, "no_user_kernel"},
3249 { NO_USER_USER, "no_user_user"},
3250 { NO_GUEST_HOST, "no_guest_host"},
3251 { NO_GUEST_GUEST, "no_guest_guest"},
3252 { NO_CROSS_THREAD, "no_cross_thread"},
3253 { NR_VECTOR_PARAMS, NULL},
3254 };
3255
mitigations_parse_global_opt(char * arg)3256 static int __init mitigations_parse_global_opt(char *arg)
3257 {
3258 int i;
3259
3260 for (i = 0; i < ARRAY_SIZE(global_mitigations); i++) {
3261 const char *pattern = global_mitigations[i].pattern;
3262
3263 if (!strncmp(arg, pattern, strlen(pattern))) {
3264 cpu_mitigations = global_mitigations[i].token;
3265 return strlen(pattern);
3266 }
3267 }
3268
3269 return 0;
3270 }
3271
mitigations_parse_cmdline(char * arg)3272 static int __init mitigations_parse_cmdline(char *arg)
3273 {
3274 char *s, *p;
3275 int len;
3276
3277 len = mitigations_parse_global_opt(arg);
3278
3279 if (cpu_mitigations_off()) {
3280 memset(attack_vectors, 0, sizeof(attack_vectors));
3281 smt_mitigations = SMT_MITIGATIONS_OFF;
3282 } else if (cpu_mitigations_auto_nosmt()) {
3283 smt_mitigations = SMT_MITIGATIONS_ON;
3284 }
3285
3286 p = arg + len;
3287
3288 if (!*p)
3289 return 0;
3290
3291 /* Attack vector controls may come after the ',' */
3292 if (*p++ != ',' || !IS_ENABLED(CONFIG_ARCH_HAS_CPU_ATTACK_VECTORS)) {
3293 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", arg);
3294 return 0;
3295 }
3296
3297 while ((s = strsep(&p, ",")) != NULL) {
3298 switch (match_token(s, vector_mitigations, NULL)) {
3299 case NO_USER_KERNEL:
3300 attack_vectors[CPU_MITIGATE_USER_KERNEL] = false;
3301 break;
3302 case NO_USER_USER:
3303 attack_vectors[CPU_MITIGATE_USER_USER] = false;
3304 break;
3305 case NO_GUEST_HOST:
3306 attack_vectors[CPU_MITIGATE_GUEST_HOST] = false;
3307 break;
3308 case NO_GUEST_GUEST:
3309 attack_vectors[CPU_MITIGATE_GUEST_GUEST] = false;
3310 break;
3311 case NO_CROSS_THREAD:
3312 smt_mitigations = SMT_MITIGATIONS_OFF;
3313 break;
3314 default:
3315 pr_crit("Unsupported mitigations options %s\n", s);
3316 return 0;
3317 }
3318 }
3319
3320 return 0;
3321 }
3322
3323 /* mitigations=off */
cpu_mitigations_off(void)3324 bool cpu_mitigations_off(void)
3325 {
3326 return cpu_mitigations == CPU_MITIGATIONS_OFF;
3327 }
3328 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3329
3330 /* mitigations=auto,nosmt */
cpu_mitigations_auto_nosmt(void)3331 bool cpu_mitigations_auto_nosmt(void)
3332 {
3333 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3334 }
3335 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3336 #else
mitigations_parse_cmdline(char * arg)3337 static int __init mitigations_parse_cmdline(char *arg)
3338 {
3339 pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3340 return 0;
3341 }
3342 #endif
3343 early_param("mitigations", mitigations_parse_cmdline);
3344