Lines Matching +full:cpu +full:- +full:core

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include <linux/cpu.h>
17 #include <asm/mips-cps.h>
20 #include <asm/pm-cps.h>
22 #include <asm/smp-cps.h>
38 static unsigned core_vpe_count(unsigned int cluster, unsigned core) in core_vpe_count() argument
43 return mips_cps_numvps(cluster, core); in core_vpe_count()
69 /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ in cps_smp_setup()
73 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { in cps_smp_setup()
86 /* Indicate present CPUs (CPU being synonymous with VPE) */ in cps_smp_setup()
97 /* Core 0 is powered up (we're running on it) */ in cps_smp_setup()
100 /* Initialise core 0 */ in cps_smp_setup()
103 /* Make core 0 coherent with everything */ in cps_smp_setup()
112 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_setup()
126 /* Detect whether the CCA is unsuited to multi-core SMP */ in cps_prepare_cpus()
131 /* The CCA is coherent, multi-core is fine */ in cps_prepare_cpus()
136 /* CCA is not coherent, multi-core is not usable */ in cps_prepare_cpus()
140 /* Warn the user if the CCA prevents multi-core */ in cps_prepare_cpus()
152 pr_warn("Using only one core due to %s%s%s\n", in cps_prepare_cpus()
167 (void *)entry_code - (void *)&mips_cps_core_entry); in cps_prepare_cpus()
170 /* Allocate core boot configuration structs */ in cps_prepare_cpus()
192 /* Mark this CPU as booted */ in cps_prepare_cpus()
214 static void boot_core(unsigned int core, unsigned int vpe_id) in boot_core() argument
219 /* Select the appropriate core */ in boot_core()
220 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in boot_core()
231 /* Ensure the core can access the GCRs */ in boot_core()
232 set_gcr_access(1 << core); in boot_core()
235 /* Reset the core */ in boot_core()
236 mips_cpc_lock_other(core); in boot_core()
245 * core leaves reset. in boot_core()
258 /* U6 == coherent execution, ie. the core is up */ in boot_core()
264 timeout--; in boot_core()
269 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n", in boot_core()
270 core, stat); in boot_core()
276 /* Take the core out of reset */ in boot_core()
282 /* The core is now powered up */ in boot_core()
283 bitmap_set(core_power, core, 1); in boot_core()
288 unsigned core = cpu_core(&current_cpu_data); in remote_vpe_boot() local
289 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; in remote_vpe_boot()
294 static int cps_boot_secondary(int cpu, struct task_struct *idle) in cps_boot_secondary() argument
296 unsigned core = cpu_core(&cpu_data[cpu]); in cps_boot_secondary() local
297 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_boot_secondary()
298 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; in cps_boot_secondary()
299 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; in cps_boot_secondary()
305 if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) in cps_boot_secondary()
306 return -ENOSYS; in cps_boot_secondary()
308 vpe_cfg->pc = (unsigned long)&smp_bootstrap; in cps_boot_secondary()
309 vpe_cfg->sp = __KSTK_TOS(idle); in cps_boot_secondary()
310 vpe_cfg->gp = (unsigned long)task_thread_info(idle); in cps_boot_secondary()
312 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); in cps_boot_secondary()
316 if (!test_bit(core, core_power)) { in cps_boot_secondary()
317 /* Boot a VPE on a powered down core */ in cps_boot_secondary()
318 boot_core(core, vpe_id); in cps_boot_secondary()
323 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in cps_boot_secondary()
329 if (!cpus_are_siblings(cpu, smp_processor_id())) { in cps_boot_secondary()
330 /* Boot a VPE on another powered up core */ in cps_boot_secondary()
332 if (!cpus_are_siblings(cpu, remote)) in cps_boot_secondary()
338 pr_crit("No online CPU in core %u to start CPU%d\n", in cps_boot_secondary()
339 core, cpu); in cps_boot_secondary()
346 panic("Failed to call remote CPU\n"); in cps_boot_secondary()
352 /* Boot a VPE on this core */ in cps_boot_secondary()
361 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary()
389 /* If we have an FPU, enroll ourselves in the FPU-full mask */ in cps_smp_finish()
406 unsigned int cpu, core, vpe_id; in cps_shutdown_this_cpu() local
408 cpu = smp_processor_id(); in cps_shutdown_this_cpu()
409 core = cpu_core(&cpu_data[cpu]); in cps_shutdown_this_cpu()
412 vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_shutdown_this_cpu()
414 pr_debug("Halting core %d VP%d\n", core, vpe_id); in cps_shutdown_this_cpu()
426 pr_debug("Gating power to core %d\n", core); in cps_shutdown_this_cpu()
427 /* Power down the core */ in cps_shutdown_this_cpu()
450 unsigned cpu = smp_processor_id(); in cps_cpu_disable() local
453 if (!cpu) in cps_cpu_disable()
454 return -EBUSY; in cps_cpu_disable()
457 return -EINVAL; in cps_cpu_disable()
460 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); in cps_cpu_disable()
462 set_cpu_online(cpu, false); in cps_cpu_disable()
473 unsigned int cpu; in play_dead() local
477 cpu = smp_processor_id(); in play_dead()
480 pr_debug("CPU%d going offline\n", cpu); in play_dead()
483 /* Look for another online VPE within the core */ in play_dead()
485 if (!cpus_are_siblings(cpu, cpu_death_sibling)) in play_dead()
489 * There is an online VPE within the core. Just halt in play_dead()
490 * this TC and leave the core alone. in play_dead()
497 /* This CPU has chosen its way out */ in play_dead()
503 panic("Failed to offline CPU %u", cpu); in play_dead()
508 unsigned cpu = (unsigned long)ptr_cpu; in wait_for_sibling_halt() local
509 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); in wait_for_sibling_halt()
521 static void cps_cpu_die(unsigned int cpu) in cps_cpu_die() argument
523 unsigned core = cpu_core(&cpu_data[cpu]); in cps_cpu_die() local
524 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); in cps_cpu_die()
529 /* Wait for the cpu to choose its way out */ in cps_cpu_die()
530 if (!cpu_wait_death(cpu, 5)) { in cps_cpu_die()
531 pr_err("CPU%u: didn't offline\n", cpu); in cps_cpu_die()
536 * Now wait for the CPU to actually offline. Without doing this that in cps_cpu_die()
539 * - Onlining the CPU again. in cps_cpu_die()
540 * - Powering down the core if another VPE within it is offlined. in cps_cpu_die()
541 * - A sibling VPE entering a non-coherent state. in cps_cpu_die()
543 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing in cps_cpu_die()
548 * Wait for the core to enter a powered down or clock gated in cps_cpu_die()
550 * in which case the CPC will refuse to power down the core. in cps_cpu_die()
554 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in cps_cpu_die()
555 mips_cpc_lock_other(core); in cps_cpu_die()
568 * The core ought to have powered down, but didn't & in cps_cpu_die()
575 * the hope that the core is doing nothing harmful & in cps_cpu_die()
579 "CPU%u hasn't powered down, seq. state %u\n", in cps_cpu_die()
580 cpu, stat)) in cps_cpu_die()
584 /* Indicate the core is powered off */ in cps_cpu_die()
585 bitmap_clear(core_power, core, 1); in cps_cpu_die()
588 * Have a CPU with access to the offlined CPUs registers wait in cps_cpu_die()
593 (void *)(unsigned long)cpu, 1); in cps_cpu_die()
595 panic("Failed to call remote sibling CPU\n"); in cps_cpu_die()
598 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in cps_cpu_die()
634 return -ENODEV; in register_cps_smp_ops()
637 /* check we have a GIC - we need one for IPIs */ in register_cps_smp_ops()
640 return -ENODEV; in register_cps_smp_ops()