1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * s390x smp 4 * Based on Linux's arch/s390/kernel/smp.c and 5 * arch/s390/include/asm/sigp.h 6 * 7 * Copyright (c) 2019 IBM Corp 8 * 9 * Authors: 10 * Janosch Frank <frankja@linux.ibm.com> 11 */ 12 #include <libcflat.h> 13 #include <bitops.h> 14 #include <asm/arch_def.h> 15 #include <asm/sigp.h> 16 #include <asm/page.h> 17 #include <asm/barrier.h> 18 #include <asm/spinlock.h> 19 #include <asm/asm-offsets.h> 20 21 #include <alloc.h> 22 #include <alloc_page.h> 23 24 #include "smp.h" 25 #include "sclp.h" 26 27 static struct cpu *cpus; 28 static struct spinlock lock; 29 30 extern void smp_cpu_setup_state(void); 31 32 static void check_idx(uint16_t idx) 33 { 34 assert(idx < smp_query_num_cpus()); 35 } 36 37 int smp_query_num_cpus(void) 38 { 39 return sclp_get_cpu_num(); 40 } 41 42 int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status) 43 { 44 check_idx(idx); 45 return sigp(cpus[idx].addr, order, parm, status); 46 } 47 48 int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status) 49 { 50 check_idx(idx); 51 return sigp_retry(cpus[idx].addr, order, parm, status); 52 } 53 54 struct cpu *smp_cpu_from_addr(uint16_t addr) 55 { 56 int i, num = smp_query_num_cpus(); 57 58 for (i = 0; i < num; i++) { 59 if (cpus[i].addr == addr) 60 return &cpus[i]; 61 } 62 return NULL; 63 } 64 65 struct cpu *smp_cpu_from_idx(uint16_t idx) 66 { 67 check_idx(idx); 68 return &cpus[idx]; 69 } 70 71 uint16_t smp_cpu_addr(uint16_t idx) 72 { 73 check_idx(idx); 74 return cpus[idx].addr; 75 } 76 77 bool smp_cpu_stopped(uint16_t idx) 78 { 79 uint32_t status; 80 81 if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED) 82 return false; 83 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 84 } 85 86 bool smp_sense_running_status(uint16_t idx) 87 { 88 if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED) 89 return true; 90 /* Status stored condition code is equivalent to cpu not running. */ 91 return false; 92 } 93 94 static int smp_cpu_stop_nolock(uint16_t idx, bool store) 95 { 96 uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP; 97 98 /* refuse to work on the boot CPU */ 99 if (idx == 0) 100 return -1; 101 102 if (smp_sigp_retry(idx, order, 0, NULL)) 103 return -1; 104 105 while (!smp_cpu_stopped(idx)) 106 mb(); 107 /* idx has been already checked by the smp_* functions called above */ 108 cpus[idx].active = false; 109 return 0; 110 } 111 112 int smp_cpu_stop(uint16_t idx) 113 { 114 int rc; 115 116 spin_lock(&lock); 117 rc = smp_cpu_stop_nolock(idx, false); 118 spin_unlock(&lock); 119 return rc; 120 } 121 122 int smp_cpu_stop_store_status(uint16_t idx) 123 { 124 int rc; 125 126 spin_lock(&lock); 127 rc = smp_cpu_stop_nolock(idx, true); 128 spin_unlock(&lock); 129 return rc; 130 } 131 132 static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw) 133 { 134 int rc; 135 136 check_idx(idx); 137 if (psw) { 138 cpus[idx].lowcore->restart_new_psw.mask = psw->mask; 139 cpus[idx].lowcore->restart_new_psw.addr = psw->addr; 140 } 141 /* 142 * Stop the cpu, so we don't have a race between a running cpu 143 * and the restart in the test that checks if the cpu is 144 * running after the restart. 145 */ 146 smp_cpu_stop_nolock(idx, false); 147 rc = smp_sigp(idx, SIGP_RESTART, 0, NULL); 148 if (rc) 149 return rc; 150 /* 151 * The order has been accepted, but the actual restart may not 152 * have been performed yet, so wait until the cpu is running. 153 */ 154 while (smp_cpu_stopped(idx)) 155 mb(); 156 cpus[idx].active = true; 157 return 0; 158 } 159 160 int smp_cpu_restart(uint16_t idx) 161 { 162 int rc; 163 164 spin_lock(&lock); 165 rc = smp_cpu_restart_nolock(idx, NULL); 166 spin_unlock(&lock); 167 return rc; 168 } 169 170 int smp_cpu_start(uint16_t idx, struct psw psw) 171 { 172 int rc; 173 174 spin_lock(&lock); 175 rc = smp_cpu_restart_nolock(idx, &psw); 176 spin_unlock(&lock); 177 return rc; 178 } 179 180 int smp_cpu_destroy(uint16_t idx) 181 { 182 int rc; 183 184 spin_lock(&lock); 185 rc = smp_cpu_stop_nolock(idx, false); 186 if (!rc) { 187 free_pages(cpus[idx].lowcore); 188 free_pages(cpus[idx].stack); 189 cpus[idx].lowcore = (void *)-1UL; 190 cpus[idx].stack = (void *)-1UL; 191 } 192 spin_unlock(&lock); 193 return rc; 194 } 195 196 static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw) 197 { 198 struct lowcore *lc; 199 200 if (cpus[idx].active) 201 return -1; 202 203 smp_sigp_retry(idx, SIGP_INITIAL_CPU_RESET, 0, NULL); 204 205 lc = alloc_pages_flags(1, AREA_DMA31); 206 cpus[idx].lowcore = lc; 207 smp_sigp_retry(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL); 208 209 /* Copy all exception psws. */ 210 memcpy(lc, cpus[0].lowcore, 512); 211 212 /* Setup stack */ 213 cpus[idx].stack = (uint64_t *)alloc_pages(2); 214 215 /* Start without DAT and any other mask bits. */ 216 lc->sw_int_psw.mask = psw.mask; 217 lc->sw_int_psw.addr = psw.addr; 218 lc->sw_int_grs[14] = psw.addr; 219 lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4); 220 lc->restart_new_psw.mask = PSW_MASK_64; 221 lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state; 222 lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP); 223 224 /* Start processing */ 225 smp_cpu_restart_nolock(idx, NULL); 226 /* Wait until the cpu has finished setup and started the provided psw */ 227 while (lc->restart_new_psw.addr != psw.addr) 228 mb(); 229 230 return 0; 231 } 232 233 int smp_cpu_setup(uint16_t idx, struct psw psw) 234 { 235 int rc = -1; 236 237 spin_lock(&lock); 238 if (cpus) { 239 check_idx(idx); 240 rc = smp_cpu_setup_nolock(idx, psw); 241 } 242 spin_unlock(&lock); 243 return rc; 244 } 245 246 /* 247 * Disregarding state, stop all cpus that once were online except for 248 * calling cpu. 249 */ 250 void smp_teardown(void) 251 { 252 int i = 0; 253 uint16_t this_cpu = stap(); 254 int num = smp_query_num_cpus(); 255 256 spin_lock(&lock); 257 for (; i < num; i++) { 258 if (cpus[i].active && 259 cpus[i].addr != this_cpu) { 260 sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL); 261 } 262 } 263 spin_unlock(&lock); 264 } 265 266 /*Expected to be called from boot cpu */ 267 extern uint64_t *stackptr; 268 void smp_setup(void) 269 { 270 int i = 0; 271 int num = smp_query_num_cpus(); 272 unsigned short cpu0_addr = stap(); 273 struct CPUEntry *entry = sclp_get_cpu_entries(); 274 275 spin_lock(&lock); 276 if (num > 1) 277 printf("SMP: Initializing, found %d cpus\n", num); 278 279 cpus = calloc(num, sizeof(*cpus)); 280 for (i = 0; i < num; i++) { 281 cpus[i].addr = entry[i].address; 282 cpus[i].active = false; 283 /* 284 * Fill in the boot CPU. If the boot CPU is not at index 0, 285 * swap it with the one at index 0. This guarantees that the 286 * boot CPU will always have index 0. If the boot CPU was 287 * already at index 0, a few extra useless assignments are 288 * performed, but everything will work ok. 289 * Notice that there is no guarantee that the list of CPUs 290 * returned by the Read SCP Info command is in any 291 * particular order, or that its order will stay consistent 292 * across multiple invocations. 293 */ 294 if (entry[i].address == cpu0_addr) { 295 cpus[i].addr = cpus[0].addr; 296 cpus[0].addr = cpu0_addr; 297 cpus[0].stack = stackptr; 298 cpus[0].lowcore = (void *)0; 299 cpus[0].active = true; 300 } 301 } 302 spin_unlock(&lock); 303 } 304