Lines Matching full:cpu

13  * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
23 #define KMSG_COMPONENT "cpu"
37 #include <linux/cpu.h>
55 #include <asm/cpu.h>
58 /* logical cpu to cpu address */
74 static DEFINE_PER_CPU(struct cpu, cpu_devices);
78 static int raw_cpu_stopped(int cpu) in raw_cpu_stopped() argument
82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { in raw_cpu_stopped()
94 static inline int cpu_stopped(int cpu) in cpu_stopped() argument
96 return raw_cpu_stopped(cpu_logical_map(cpu)); in cpu_stopped()
100 * Ensure that PSW restart is done on an online CPU
104 int cpu; in smp_restart_with_online_cpu() local
106 for_each_online_cpu(cpu) { in smp_restart_with_online_cpu()
107 if (stap() == __cpu_logical_map[cpu]) { in smp_restart_with_online_cpu()
113 /* We are not online: Do PSW restart on an online CPU */ in smp_restart_with_online_cpu()
114 while (sigp(cpu, sigp_restart) == sigp_busy) in smp_restart_with_online_cpu()
166 int cpu; in smp_send_stop() local
182 for_each_cpu(cpu, &cpumask) { in smp_send_stop()
184 &lowcore_ptr[cpu]->ext_call_fast); in smp_send_stop()
185 while (sigp(cpu, sigp_emergency_signal) == sigp_busy && in smp_send_stop()
190 for_each_cpu(cpu, &cpumask) in smp_send_stop()
191 if (cpu_stopped(cpu)) in smp_send_stop()
192 cpumask_clear_cpu(cpu, &cpumask); in smp_send_stop()
200 for_each_cpu(cpu, &cpumask) { in smp_send_stop()
201 while (sigp(cpu, sigp_stop) == sigp_busy) in smp_send_stop()
203 while (!cpu_stopped(cpu)) in smp_send_stop()
242 * Send an external call sigp to another cpu and return without waiting
245 static void smp_ext_bitcall(int cpu, int sig) in smp_ext_bitcall() argument
250 * Set signaling bit in lowcore of target cpu and kick it in smp_ext_bitcall()
252 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); in smp_ext_bitcall()
254 order = smp_vcpu_scheduled(cpu) ? in smp_ext_bitcall()
256 if (sigp(cpu, order) != sigp_busy) in smp_ext_bitcall()
264 int cpu; in arch_send_call_function_ipi_mask() local
266 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
267 smp_ext_bitcall(cpu, ec_call_function); in arch_send_call_function_ipi_mask()
270 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
272 smp_ext_bitcall(cpu, ec_call_function_single); in arch_send_call_function_single_ipi()
277 * this function sends a 'purge tlb' signal to another CPU.
292 * this function sends a 'reschedule' IPI to another CPU.
296 void smp_send_reschedule(int cpu) in smp_send_reschedule() argument
298 smp_ext_bitcall(cpu, ec_schedule); in smp_send_reschedule()
354 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) in smp_get_save_area() argument
360 if (cpu >= NR_CPUS) { in smp_get_save_area()
361 pr_warning("CPU %i exceeds the maximum %i and is excluded from " in smp_get_save_area()
362 "the dump\n", cpu, NR_CPUS - 1); in smp_get_save_area()
365 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); in smp_get_save_area()
368 memcpy_real(zfcpdump_save_areas[cpu], in smp_get_save_area()
378 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } in smp_get_save_area() argument
384 int cpu; in cpu_known() local
386 for_each_present_cpu(cpu) { in cpu_known()
387 if (__cpu_logical_map[cpu] == cpu_id) in cpu_known()
419 int cpu_id, logical_cpu, cpu; in smp_rescan_cpus_sclp() local
431 for (cpu = 0; cpu < info->combined; cpu++) { in smp_rescan_cpus_sclp()
432 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) in smp_rescan_cpus_sclp()
434 cpu_id = info->cpu[cpu].address; in smp_rescan_cpus_sclp()
440 if (cpu >= info->configured) in smp_rescan_cpus_sclp()
466 unsigned int cpu, c_cpus, s_cpus; in smp_detect_cpus() local
491 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { in smp_detect_cpus()
492 if (cpu == boot_cpu_addr) in smp_detect_cpus()
494 if (!raw_cpu_stopped(cpu)) in smp_detect_cpus()
496 smp_get_save_area(c_cpus, cpu); in smp_detect_cpus()
503 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
504 if (info->cpu[cpu].address == boot_cpu_addr) { in smp_detect_cpus()
505 smp_cpu_type = info->cpu[cpu].type; in smp_detect_cpus()
511 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
512 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) in smp_detect_cpus()
514 cpu_addr = info->cpu[cpu].address; in smp_detect_cpus()
554 * Wait until the cpu which brought this one up marked it in start_secondary()
569 int cpu; member
577 c_idle->idle = fork_idle(c_idle->cpu); in smp_fork_idle()
581 static int __cpuinit smp_alloc_lowcore(int cpu) in smp_alloc_lowcore() argument
613 if (vdso_alloc_per_cpu(cpu, lowcore)) in smp_alloc_lowcore()
616 lowcore_ptr[cpu] = lowcore; in smp_alloc_lowcore()
626 static void smp_free_lowcore(int cpu) in smp_free_lowcore() argument
630 lowcore = lowcore_ptr[cpu]; in smp_free_lowcore()
635 vdso_free_per_cpu(cpu, lowcore); in smp_free_lowcore()
640 lowcore_ptr[cpu] = NULL; in smp_free_lowcore()
644 int __cpuinit __cpu_up(unsigned int cpu) in __cpu_up() argument
653 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) in __cpu_up()
655 idle = current_set[cpu]; in __cpu_up()
659 c_idle.cpu = cpu; in __cpu_up()
665 current_set[cpu] = c_idle.idle; in __cpu_up()
667 init_idle(idle, cpu); in __cpu_up()
668 if (smp_alloc_lowcore(cpu)) in __cpu_up()
671 ccode = sigp(cpu, sigp_initial_cpu_reset); in __cpu_up()
678 lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; in __cpu_up()
679 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) in __cpu_up()
682 cpu_lowcore = lowcore_ptr[cpu]; in __cpu_up()
697 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; in __cpu_up()
699 cpu_lowcore->cpu_nr = cpu; in __cpu_up()
707 while (sigp(cpu, sigp_restart) == sigp_busy) in __cpu_up()
710 while (!cpu_online(cpu)) in __cpu_up()
715 smp_free_lowcore(cpu); in __cpu_up()
721 int pcpus, cpu; in setup_possible_cpus() local
725 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) in setup_possible_cpus()
726 set_cpu_possible(cpu, true); in setup_possible_cpus()
736 int cpu = smp_processor_id(); in __cpu_disable() local
738 set_cpu_online(cpu, false); in __cpu_disable()
740 /* Disable pfault pseudo page faults on this cpu. */ in __cpu_disable()
765 void __cpu_die(unsigned int cpu) in __cpu_die() argument
767 /* Wait until target cpu is down */ in __cpu_die()
768 while (!cpu_stopped(cpu)) in __cpu_die()
770 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) in __cpu_die()
772 smp_free_lowcore(cpu); in __cpu_die()
835 current_thread_info()->cpu = 0; in smp_prepare_boot_cpu()
881 int cpu = dev->id; in cpu_configure_store() local
893 /* disallow configuration changes of online cpus and cpu 0 */ in cpu_configure_store()
894 if (cpu_online(cpu) || cpu == 0) in cpu_configure_store()
899 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { in cpu_configure_store()
900 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); in cpu_configure_store()
902 smp_cpu_state[cpu] = CPU_STATE_STANDBY; in cpu_configure_store()
903 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in cpu_configure_store()
909 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { in cpu_configure_store()
910 rc = sclp_cpu_configure(__cpu_logical_map[cpu]); in cpu_configure_store()
912 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; in cpu_configure_store()
913 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in cpu_configure_store()
1023 unsigned int cpu = (unsigned int)(long)hcpu; in smp_cpu_notify() local
1024 struct cpu *c = &per_cpu(cpu_devices, cpu); in smp_cpu_notify()
1032 idle = &per_cpu(s390_idle, cpu); in smp_cpu_notify()
1048 static int __devinit smp_add_present_cpu(int cpu) in smp_add_present_cpu() argument
1050 struct cpu *c = &per_cpu(cpu_devices, cpu); in smp_add_present_cpu()
1055 rc = register_cpu(c, cpu); in smp_add_present_cpu()
1061 if (cpu_online(cpu)) { in smp_add_present_cpu()
1072 if (cpu_online(cpu)) in smp_add_present_cpu()
1089 int cpu; in smp_rescan_cpus() local
1099 for_each_cpu(cpu, &newcpus) { in smp_rescan_cpus()
1100 rc = smp_add_present_cpu(cpu); in smp_rescan_cpus()
1102 set_cpu_present(cpu, false); in smp_rescan_cpus()
1128 int cpu, rc; in s390_smp_init() local
1136 for_each_present_cpu(cpu) { in s390_smp_init()
1137 rc = smp_add_present_cpu(cpu); in s390_smp_init()