1 /* 2 * Secondary cpu support 3 * 4 * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com> 5 * 6 * This work is licensed under the terms of the GNU LGPL, version 2. 7 */ 8 #include <libcflat.h> 9 #include <asm/thread_info.h> 10 #include <asm/spinlock.h> 11 #include <asm/cpumask.h> 12 #include <asm/barrier.h> 13 #include <asm/mmu.h> 14 #include <asm/psci.h> 15 #include <asm/smp.h> 16 17 bool cpu0_calls_idle; 18 19 cpumask_t cpu_present_mask; 20 cpumask_t cpu_online_mask; 21 cpumask_t cpu_idle_mask; 22 23 struct secondary_data { 24 void *stack; /* must be first member of struct */ 25 secondary_entry_fn entry; 26 }; 27 struct secondary_data secondary_data; 28 static struct spinlock lock; 29 30 secondary_entry_fn secondary_cinit(void) 31 { 32 struct thread_info *ti = current_thread_info(); 33 secondary_entry_fn entry; 34 35 thread_info_init(ti, 0); 36 mmu_mark_enabled(ti->cpu); 37 38 /* 39 * Save secondary_data.entry locally to avoid opening a race 40 * window between marking ourselves online and calling it. 41 */ 42 entry = secondary_data.entry; 43 set_cpu_online(ti->cpu, true); 44 sev(); 45 46 /* 47 * Return to the assembly stub, allowing entry to be called 48 * from there with an empty stack. 49 */ 50 return entry; 51 } 52 53 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry) 54 { 55 int ret; 56 57 secondary_data.stack = thread_stack_alloc(); 58 secondary_data.entry = entry; 59 mmu_mark_disabled(cpu); 60 ret = cpu_psci_cpu_boot(cpu); 61 assert(ret == 0); 62 63 while (!cpu_online(cpu)) 64 wfe(); 65 } 66 67 void smp_boot_secondary(int cpu, secondary_entry_fn entry) 68 { 69 spin_lock(&lock); 70 assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu); 71 __smp_boot_secondary(cpu, entry); 72 spin_unlock(&lock); 73 } 74 75 typedef void (*on_cpu_func)(void *); 76 struct on_cpu_info { 77 on_cpu_func func; 78 void *data; 79 cpumask_t waiters; 80 }; 81 static struct on_cpu_info on_cpu_info[NR_CPUS]; 82 83 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found) 84 { 85 int i; 86 87 for_each_cpu(i, waiters) { 88 if (i == cpu) { 89 printf("CPU%d", cpu); 90 *found = true; 91 return; 92 } 93 __deadlock_check(cpu, &on_cpu_info[i].waiters, found); 94 if (*found) { 95 printf(" <=> CPU%d", i); 96 return; 97 } 98 } 99 } 100 101 static void deadlock_check(int me, int cpu) 102 { 103 bool found = false; 104 105 __deadlock_check(cpu, &on_cpu_info[me].waiters, &found); 106 if (found) { 107 printf(" <=> CPU%d deadlock detectd\n", me); 108 assert(0); 109 } 110 } 111 112 static void cpu_wait(int cpu) 113 { 114 int me = smp_processor_id(); 115 116 if (cpu == me) 117 return; 118 119 cpumask_set_cpu(me, &on_cpu_info[cpu].waiters); 120 deadlock_check(me, cpu); 121 while (!cpu_idle(cpu)) 122 wfe(); 123 cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters); 124 } 125 126 void do_idle(void) 127 { 128 int cpu = smp_processor_id(); 129 130 if (cpu == 0) 131 cpu0_calls_idle = true; 132 133 set_cpu_idle(cpu, true); 134 sev(); 135 136 for (;;) { 137 while (cpu_idle(cpu)) 138 wfe(); 139 smp_rmb(); 140 on_cpu_info[cpu].func(on_cpu_info[cpu].data); 141 on_cpu_info[cpu].func = NULL; 142 smp_wmb(); 143 set_cpu_idle(cpu, true); 144 sev(); 145 } 146 } 147 148 void on_cpu_async(int cpu, void (*func)(void *data), void *data) 149 { 150 if (cpu == smp_processor_id()) { 151 func(data); 152 return; 153 } 154 155 assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. " 156 "If this is intended set cpu0_calls_idle=1"); 157 158 spin_lock(&lock); 159 if (!cpu_online(cpu)) 160 __smp_boot_secondary(cpu, do_idle); 161 spin_unlock(&lock); 162 163 for (;;) { 164 cpu_wait(cpu); 165 spin_lock(&lock); 166 if ((volatile void *)on_cpu_info[cpu].func == NULL) 167 break; 168 spin_unlock(&lock); 169 } 170 on_cpu_info[cpu].func = func; 171 on_cpu_info[cpu].data = data; 172 spin_unlock(&lock); 173 set_cpu_idle(cpu, false); 174 sev(); 175 } 176 177 void on_cpu(int cpu, void (*func)(void *data), void *data) 178 { 179 on_cpu_async(cpu, func, data); 180 cpu_wait(cpu); 181 } 182 183 void on_cpus(void (*func)(void)) 184 { 185 int cpu, me = smp_processor_id(); 186 187 for_each_present_cpu(cpu) { 188 if (cpu == me) 189 continue; 190 on_cpu_async(cpu, (on_cpu_func)func, NULL); 191 } 192 func(); 193 194 for_each_present_cpu(cpu) { 195 if (cpu == me) 196 continue; 197 cpumask_set_cpu(me, &on_cpu_info[cpu].waiters); 198 deadlock_check(me, cpu); 199 } 200 while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1) 201 wfe(); 202 for_each_present_cpu(cpu) 203 cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters); 204 } 205