Lines Matching +full:multi +full:- +full:attr

36 #include <linux/percpu-rwsem.h>
48 * struct cpuhp_cpu_state - Per cpu hotplug state storage
58 * @node: Remote CPU node; for multi-instance, do a
60 * @last: For multi-instance rollback, remember how far we got
64 * @done_up: Signal completion to the issuer of the task for cpu-up
65 * @done_down: Signal completion to the issuer of the task for cpu-down
97 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
99 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
119 * struct cpuhp_step - Hotplug state machine step
130 int (*multi)(unsigned int cpu, member
135 int (*multi)(unsigned int cpu, member
155 return bringup ? !step->startup.single : !step->teardown.single; in cpuhp_step_empty()
159 * cpuhp_invoke_callback - Invoke the callbacks for a given state
163 * @node: For multi-instance, do a single entry callback for install/remove
164 * @lastp: For multi-instance rollback, remember how far we got
180 if (st->fail == state) { in cpuhp_invoke_callback()
181 st->fail = CPUHP_INVALID; in cpuhp_invoke_callback()
182 return -EAGAIN; in cpuhp_invoke_callback()
190 if (!step->multi_instance) { in cpuhp_invoke_callback()
192 cb = bringup ? step->startup.single : step->teardown.single; in cpuhp_invoke_callback()
194 trace_cpuhp_enter(cpu, st->target, state, cb); in cpuhp_invoke_callback()
196 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
199 cbm = bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
204 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
206 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
212 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
216 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
218 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
233 cbm = !bringup ? step->startup.multi : step->teardown.multi; in cpuhp_invoke_callback()
237 hlist_for_each(node, &step->list) { in cpuhp_invoke_callback()
238 if (!cnt--) in cpuhp_invoke_callback()
241 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); in cpuhp_invoke_callback()
243 trace_cpuhp_exit(cpu, st->state, state, ret); in cpuhp_invoke_callback()
264 struct completion *done = bringup ? &st->done_up : &st->done_down; in wait_for_ap_thread()
270 struct completion *done = bringup ? &st->done_up : &st->done_down; in complete_ap_thread()
294 * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
330 } else if (now - start < NSEC_PER_MSEC) { in cpuhp_wait_for_sync_state()
346 * cpuhp_ap_report_dead - Update synchronization state to DEAD
387 * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
446 ret = -EIO; in cpuhp_bp_sync_alive()
478 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
565 cpu_hotplug_disabled--; in __cpu_hotplug_enable()
589 * Architectures that need SMT-specific errata handling during SMT hotplug
702 enum cpuhp_state prev_state = st->state; in cpuhp_set_state()
703 bool bringup = st->state < target; in cpuhp_set_state()
705 st->rollback = false; in cpuhp_set_state()
706 st->last = NULL; in cpuhp_set_state()
708 st->target = target; in cpuhp_set_state()
709 st->single = false; in cpuhp_set_state()
710 st->bringup = bringup; in cpuhp_set_state()
721 bool bringup = !st->bringup; in cpuhp_reset_state()
723 st->target = prev_state; in cpuhp_reset_state()
729 if (st->rollback) in cpuhp_reset_state()
732 st->rollback = true; in cpuhp_reset_state()
735 * If we have st->last we need to undo partial multi_instance of this in cpuhp_reset_state()
738 if (!st->last) { in cpuhp_reset_state()
739 if (st->bringup) in cpuhp_reset_state()
740 st->state--; in cpuhp_reset_state()
742 st->state++; in cpuhp_reset_state()
745 st->bringup = bringup; in cpuhp_reset_state()
753 if (!st->single && st->state == st->target) in __cpuhp_kick_ap()
756 st->result = 0; in __cpuhp_kick_ap()
762 st->should_run = true; in __cpuhp_kick_ap()
763 wake_up_process(st->thread); in __cpuhp_kick_ap()
764 wait_for_ap_thread(st, st->bringup); in __cpuhp_kick_ap()
775 if ((ret = st->result)) { in cpuhp_kick_ap()
790 return -ECANCELED; in bringup_wait_for_ap_online()
793 kthread_unpark(st->thread); in bringup_wait_for_ap_online()
803 return -ECANCELED; in bringup_wait_for_ap_online()
811 return -EAGAIN; in cpuhp_kick_ap_alive()
838 if (st->target <= CPUHP_AP_ONLINE_IDLE) in cpuhp_bringup_ap()
841 return cpuhp_kick_ap(cpu, st, st->target); in cpuhp_bringup_ap()
855 return -EAGAIN; in bringup_cpu()
882 if (st->target <= CPUHP_AP_ONLINE_IDLE) in bringup_cpu()
885 return cpuhp_kick_ap(cpu, st, st->target); in bringup_cpu()
896 struct mm_struct *mm = idle->active_mm; in finish_cpu()
903 idle->active_mm = &init_mm; in finish_cpu()
916 * st->state will be modified ahead of time, to match state_to_run, as if it
926 if (st->state >= target) in cpuhp_next_state()
929 *state_to_run = ++st->state; in cpuhp_next_state()
931 if (st->state <= target) in cpuhp_next_state()
934 *state_to_run = st->state--; in cpuhp_next_state()
963 cpuhp_get_step(st->state)->name, in __cpuhp_invoke_callback_range()
964 st->state, err); in __cpuhp_invoke_callback_range()
965 ret = -1; in __cpuhp_invoke_callback_range()
1002 return st->state <= CPUHP_BRINGUP_CPU; in can_rollback_cpu()
1008 enum cpuhp_state prev_state = st->state; in cpuhp_up_callbacks()
1014 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_up_callbacks()
1015 st->state); in cpuhp_up_callbacks()
1032 return st->should_run; in cpuhp_should_run()
1043 * - single: runs st->cb_state
1044 * - up: runs ++st->state, while st->state < st->target
1045 * - down: runs st->state--, while st->state > st->target
1052 bool bringup = st->bringup; in cpuhp_thread_fun()
1055 if (WARN_ON_ONCE(!st->should_run)) in cpuhp_thread_fun()
1059 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures in cpuhp_thread_fun()
1060 * that if we see ->should_run we also see the rest of the state. in cpuhp_thread_fun()
1072 if (st->single) { in cpuhp_thread_fun()
1073 state = st->cb_state; in cpuhp_thread_fun()
1074 st->should_run = false; in cpuhp_thread_fun()
1076 st->should_run = cpuhp_next_state(bringup, &state, st, st->target); in cpuhp_thread_fun()
1077 if (!st->should_run) in cpuhp_thread_fun()
1085 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
1091 WARN_ON_ONCE(st->result); in cpuhp_thread_fun()
1093 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); in cpuhp_thread_fun()
1096 if (st->result) { in cpuhp_thread_fun()
1102 WARN_ON_ONCE(st->rollback); in cpuhp_thread_fun()
1103 st->should_run = false; in cpuhp_thread_fun()
1110 if (!st->should_run) in cpuhp_thread_fun()
1135 if (!st->thread) in cpuhp_invoke_ap_callback()
1138 st->rollback = false; in cpuhp_invoke_ap_callback()
1139 st->last = NULL; in cpuhp_invoke_ap_callback()
1141 st->node = node; in cpuhp_invoke_ap_callback()
1142 st->bringup = bringup; in cpuhp_invoke_ap_callback()
1143 st->cb_state = state; in cpuhp_invoke_ap_callback()
1144 st->single = true; in cpuhp_invoke_ap_callback()
1151 if ((ret = st->result) && st->last) { in cpuhp_invoke_ap_callback()
1152 st->rollback = true; in cpuhp_invoke_ap_callback()
1153 st->bringup = !bringup; in cpuhp_invoke_ap_callback()
1162 st->node = st->last = NULL; in cpuhp_invoke_ap_callback()
1169 enum cpuhp_state prev_state = st->state; in cpuhp_kick_ap_work()
1178 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); in cpuhp_kick_ap_work()
1179 ret = cpuhp_kick_ap(cpu, st, st->target); in cpuhp_kick_ap_work()
1180 trace_cpuhp_exit(cpu, st->state, prev_state, ret); in cpuhp_kick_ap_work()
1200 init_completion(&st->done_up); in cpuhp_init_state()
1201 init_completion(&st->done_down); in cpuhp_init_state()
1264 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1269 * trivial, there are various non-obvious corner cases, which this function
1282 * their mm mask. -- Peter Zijlstra in clear_tasks_mm_cpumask()
1284 * full-fledged tasklist_lock. in clear_tasks_mm_cpumask()
1298 arch_clear_mm_cpumask_cpu(cpu, t->mm); in clear_tasks_mm_cpumask()
1308 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); in take_cpu_down()
1318 * down, that the current state is CPUHP_TEARDOWN_CPU - 1. in take_cpu_down()
1320 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1)); in take_cpu_down()
1342 kthread_park(st->thread); in takedown_cpu()
1358 kthread_unpark(st->thread); in takedown_cpu()
1371 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); in takedown_cpu()
1385 * Callbacks must be re-integrated right away to the RCU state machine. in takedown_cpu()
1405 BUG_ON(st->state != CPUHP_AP_OFFLINE); in cpuhp_report_idle_dead()
1407 st->state = CPUHP_AP_IDLE_DEAD; in cpuhp_report_idle_dead()
1419 enum cpuhp_state prev_state = st->state; in cpuhp_down_callbacks()
1425 ret, cpu, cpuhp_get_step(st->state)->name, in cpuhp_down_callbacks()
1426 st->state); in cpuhp_down_callbacks()
1430 if (st->state < prev_state) in cpuhp_down_callbacks()
1446 return -EBUSY; in _cpu_down()
1449 return -EINVAL; in _cpu_down()
1460 if (st->state > CPUHP_TEARDOWN_CPU) { in _cpu_down()
1461 st->target = max((int)target, CPUHP_TEARDOWN_CPU); in _cpu_down()
1474 if (st->state > CPUHP_TEARDOWN_CPU) in _cpu_down()
1477 st->target = target; in _cpu_down()
1484 if (ret && st->state < prev_state) { in _cpu_down()
1485 if (st->state == CPUHP_TEARDOWN_CPU) { in _cpu_down()
1514 return _cpu_down(work->cpu, 0, work->target); in __cpu_down_maps_locked()
1526 return -EOPNOTSUPP; in cpu_down_maps_locked()
1528 return -EBUSY; in cpu_down_maps_locked()
1532 * CPU to prevent a deadlock against cfs_b->period_timer. in cpu_down_maps_locked()
1540 return -EBUSY; in cpu_down_maps_locked()
1554 * cpu_device_down - Bring down a cpu device
1565 return cpu_down(dev->id, CPUHP_OFFLINE); in cpu_device_down()
1601 pr_err("Failed to offline CPU%d - error=%d", in smp_shutdown_nonboot_cpus()
1627 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1636 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); in notify_cpu_starting()
1668 st->state = CPUHP_AP_ONLINE_IDLE; in cpuhp_online_idle()
1682 ret = -EINVAL; in _cpu_up()
1690 if (st->state >= target) in _cpu_up()
1693 if (st->state == CPUHP_OFFLINE) { in _cpu_up()
1715 if (st->state > CPUHP_BRINGUP_CPU) { in _cpu_up()
1744 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", in cpu_up()
1746 return -EINVAL; in cpu_up()
1756 err = -EBUSY; in cpu_up()
1760 err = -EPERM; in cpu_up()
1771 * cpu_device_up - Bring up a cpu device
1782 return cpu_up(dev->id, CPUHP_ONLINE); in cpu_device_up()
1798 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1815 pr_err("Failed to bring hibernate-CPU up!\n"); in bringup_hibernate_cpu()
1839 if (!--ncpus) in cpuhp_bringup_mask()
1895 ncpus -= num_online_cpus(); in cpuhp_bringup_cpus_parallel()
1903 /* Bring the not-yet started CPUs up */ in cpuhp_bringup_cpus_parallel()
1930 if (primary == -1) { in freeze_secondary_cpus()
1940 * We take down all of the non-boot CPUs in one shot to avoid races in freeze_secondary_cpus()
1945 pr_info("Disabling non-boot CPUs ...\n"); in freeze_secondary_cpus()
1952 error = -EBUSY; in freeze_secondary_cpus()
1970 pr_err("Non-boot CPUs are not disabled\n"); in freeze_secondary_cpus()
2001 pr_info("Enabling non-boot CPUs ...\n"); in thaw_secondary_cpus()
2026 return -ENOMEM; in alloc_frozen_cpus()
2134 * On the tear-down path, timers_dead_cpu() must be invoked
2166 * All-in-one CPU bringup state which includes the kick alive.
2293 return -EINVAL; in cpuhp_cb_check()
2317 return -EINVAL; in cpuhp_reserve_state()
2321 if (!step->name) in cpuhp_reserve_state()
2325 return -ENOSPC; in cpuhp_reserve_state()
2354 if (name && sp->name) in cpuhp_store_callbacks()
2355 return -EBUSY; in cpuhp_store_callbacks()
2357 sp->startup.single = startup; in cpuhp_store_callbacks()
2358 sp->teardown.single = teardown; in cpuhp_store_callbacks()
2359 sp->name = name; in cpuhp_store_callbacks()
2360 sp->multi_instance = multi_instance; in cpuhp_store_callbacks()
2361 INIT_HLIST_HEAD(&sp->list); in cpuhp_store_callbacks()
2367 return cpuhp_get_step(state)->teardown.single; in cpuhp_get_teardown_cb()
2415 int cpustate = st->state; in cpuhp_rollback_install()
2437 if (sp->multi_instance == false) in __cpuhp_state_add_instance_cpuslocked()
2438 return -EINVAL; in __cpuhp_state_add_instance_cpuslocked()
2442 if (!invoke || !sp->startup.multi) in __cpuhp_state_add_instance_cpuslocked()
2451 int cpustate = st->state; in __cpuhp_state_add_instance_cpuslocked()
2458 if (sp->teardown.multi) in __cpuhp_state_add_instance_cpuslocked()
2465 hlist_add_head(node, &sp->list); in __cpuhp_state_add_instance_cpuslocked()
2484 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2513 return -EINVAL; in __cpuhp_setup_state_cpuslocked()
2535 int cpustate = st->state; in __cpuhp_setup_state_cpuslocked()
2584 if (!sp->multi_instance) in __cpuhp_state_remove_instance()
2585 return -EINVAL; in __cpuhp_state_remove_instance()
2599 int cpustate = st->state; in __cpuhp_state_remove_instance()
2615 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2634 if (sp->multi_instance) { in __cpuhp_remove_state_cpuslocked()
2635 WARN(!hlist_empty(&sp->list), in __cpuhp_remove_state_cpuslocked()
2651 int cpustate = st->state; in __cpuhp_remove_state_cpuslocked()
2675 dev->offline = true; in cpuhp_offline_cpu_device()
2677 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); in cpuhp_offline_cpu_device()
2684 dev->offline = false; in cpuhp_online_cpu_device()
2686 kobject_uevent(&dev->kobj, KOBJ_ONLINE); in cpuhp_online_cpu_device()
2752 struct device_attribute *attr, char *buf) in state_show() argument
2754 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in state_show()
2756 return sprintf(buf, "%d\n", st->state); in state_show()
2760 static ssize_t target_store(struct device *dev, struct device_attribute *attr, in target_store() argument
2763 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in target_store()
2773 return -EINVAL; in target_store()
2776 return -EINVAL; in target_store()
2785 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; in target_store()
2790 if (st->state < target) in target_store()
2791 ret = cpu_up(dev->id, target); in target_store()
2792 else if (st->state > target) in target_store()
2793 ret = cpu_down(dev->id, target); in target_store()
2794 else if (WARN_ON(st->target != target)) in target_store()
2795 st->target = target; in target_store()
2802 struct device_attribute *attr, char *buf) in target_show() argument
2804 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in target_show()
2806 return sprintf(buf, "%d\n", st->target); in target_show()
2810 static ssize_t fail_store(struct device *dev, struct device_attribute *attr, in fail_store() argument
2813 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in fail_store()
2822 st->fail = fail; in fail_store()
2827 return -EINVAL; in fail_store()
2833 return -EINVAL; in fail_store()
2841 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU) in fail_store()
2842 return -EINVAL; in fail_store()
2849 if (!sp->startup.single && !sp->teardown.single) in fail_store()
2850 ret = -EINVAL; in fail_store()
2855 st->fail = fail; in fail_store()
2861 struct device_attribute *attr, char *buf) in fail_show() argument
2863 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); in fail_show()
2865 return sprintf(buf, "%d\n", st->fail); in fail_show()
2871 &dev_attr_state.attr,
2872 &dev_attr_target.attr,
2873 &dev_attr_fail.attr,
2884 struct device_attribute *attr, char *buf) in states_show() argument
2893 if (sp->name) { in states_show()
2894 cur = sprintf(buf, "%3d: %s\n", i, sp->name); in states_show()
2905 &dev_attr_states.attr,
2925 __store_smt_control(struct device *dev, struct device_attribute *attr, in __store_smt_control() argument
2932 return -EPERM; in __store_smt_control()
2935 return -ENODEV; in __store_smt_control()
2952 return -EINVAL; in __store_smt_control()
2954 return -EINVAL; in __store_smt_control()
2977 __store_smt_control(struct device *dev, struct device_attribute *attr, in __store_smt_control() argument
2980 return -ENODEV; in __store_smt_control()
2993 struct device_attribute *attr, char *buf) in control_show() argument
3008 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); in control_show()
3011 static ssize_t control_store(struct device *dev, struct device_attribute *attr, in control_store() argument
3014 return __store_smt_control(dev, attr, buf, count); in control_store()
3019 struct device_attribute *attr, char *buf) in active_show() argument
3021 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); in active_show()
3026 &dev_attr_control.attr,
3027 &dev_attr_active.attr,
3040 int ret = -ENODEV; in cpu_smt_sysfs_init()
3044 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group); in cpu_smt_sysfs_init()
3061 ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group); in cpuhp_sysfs_init()
3072 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); in cpuhp_sysfs_init()
3089 /* cpu_bit_bitmap[0] is empty - so we can back into it */