1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * s390x smp 4 * Based on Linux's arch/s390/kernel/smp.c and 5 * arch/s390/include/asm/sigp.h 6 * 7 * Copyright (c) 2019 IBM Corp 8 * 9 * Authors: 10 * Janosch Frank <frankja@linux.ibm.com> 11 */ 12 #include <libcflat.h> 13 #include <bitops.h> 14 #include <asm/arch_def.h> 15 #include <asm/sigp.h> 16 #include <asm/page.h> 17 #include <asm/barrier.h> 18 #include <asm/spinlock.h> 19 #include <asm/asm-offsets.h> 20 21 #include <alloc.h> 22 #include <alloc_page.h> 23 24 #include "smp.h" 25 #include "sclp.h" 26 27 static struct cpu *cpus; 28 static struct spinlock lock; 29 30 extern void smp_cpu_setup_state(void); 31 32 static void check_idx(uint16_t idx) 33 { 34 assert(idx < smp_query_num_cpus()); 35 } 36 37 int smp_query_num_cpus(void) 38 { 39 return sclp_get_cpu_num(); 40 } 41 42 int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status) 43 { 44 check_idx(idx); 45 return sigp(cpus[idx].addr, order, parm, status); 46 } 47 48 int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status) 49 { 50 check_idx(idx); 51 return sigp_retry(cpus[idx].addr, order, parm, status); 52 } 53 54 struct cpu *smp_cpu_from_addr(uint16_t addr) 55 { 56 int i, num = smp_query_num_cpus(); 57 58 for (i = 0; i < num; i++) { 59 if (cpus[i].addr == addr) 60 return &cpus[i]; 61 } 62 return NULL; 63 } 64 65 struct cpu *smp_cpu_from_idx(uint16_t idx) 66 { 67 check_idx(idx); 68 return &cpus[idx]; 69 } 70 71 uint16_t smp_cpu_addr(uint16_t idx) 72 { 73 check_idx(idx); 74 return cpus[idx].addr; 75 } 76 77 bool smp_cpu_stopped(uint16_t idx) 78 { 79 uint32_t status; 80 81 if (smp_sigp_retry(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED) 82 return false; 83 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 84 } 85 86 bool smp_sense_running_status(uint16_t idx) 87 { 88 if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED) 89 return true; 90 /* Status stored condition code is equivalent to cpu not running. */ 91 return false; 92 } 93 94 static int smp_cpu_stop_nolock(uint16_t idx, bool store) 95 { 96 uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP; 97 98 /* refuse to work on the boot CPU */ 99 if (idx == 0) 100 return -1; 101 102 if (smp_sigp_retry(idx, order, 0, NULL)) 103 return -1; 104 105 while (!smp_cpu_stopped(idx)) 106 mb(); 107 /* idx has been already checked by the smp_* functions called above */ 108 cpus[idx].active = false; 109 return 0; 110 } 111 112 int smp_cpu_stop(uint16_t idx) 113 { 114 int rc; 115 116 spin_lock(&lock); 117 rc = smp_cpu_stop_nolock(idx, false); 118 spin_unlock(&lock); 119 return rc; 120 } 121 122 /* 123 * Functionally equivalent to smp_cpu_stop(), but without the 124 * elements that wait/serialize matters itself. 125 * Used to see if KVM itself is serialized correctly. 126 */ 127 int smp_cpu_stop_nowait(uint16_t idx) 128 { 129 check_idx(idx); 130 131 /* refuse to work on the boot CPU */ 132 if (idx == 0) 133 return -1; 134 135 spin_lock(&lock); 136 137 /* Don't suppress a CC2 with sigp_retry() */ 138 if (sigp(cpus[idx].addr, SIGP_STOP, 0, NULL)) { 139 spin_unlock(&lock); 140 return -1; 141 } 142 143 cpus[idx].active = false; 144 spin_unlock(&lock); 145 146 return 0; 147 } 148 149 int smp_cpu_stop_store_status(uint16_t idx) 150 { 151 int rc; 152 153 spin_lock(&lock); 154 rc = smp_cpu_stop_nolock(idx, true); 155 spin_unlock(&lock); 156 return rc; 157 } 158 159 static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw) 160 { 161 int rc; 162 163 check_idx(idx); 164 if (psw) { 165 cpus[idx].lowcore->restart_new_psw.mask = psw->mask; 166 cpus[idx].lowcore->restart_new_psw.addr = psw->addr; 167 } 168 /* 169 * Stop the cpu, so we don't have a race between a running cpu 170 * and the restart in the test that checks if the cpu is 171 * running after the restart. 172 */ 173 smp_cpu_stop_nolock(idx, false); 174 rc = smp_sigp(idx, SIGP_RESTART, 0, NULL); 175 if (rc) 176 return rc; 177 /* 178 * The order has been accepted, but the actual restart may not 179 * have been performed yet, so wait until the cpu is running. 180 */ 181 while (smp_cpu_stopped(idx)) 182 mb(); 183 cpus[idx].active = true; 184 return 0; 185 } 186 187 int smp_cpu_restart(uint16_t idx) 188 { 189 int rc; 190 191 spin_lock(&lock); 192 rc = smp_cpu_restart_nolock(idx, NULL); 193 spin_unlock(&lock); 194 return rc; 195 } 196 197 int smp_cpu_start(uint16_t idx, struct psw psw) 198 { 199 int rc; 200 201 spin_lock(&lock); 202 rc = smp_cpu_restart_nolock(idx, &psw); 203 spin_unlock(&lock); 204 return rc; 205 } 206 207 int smp_cpu_destroy(uint16_t idx) 208 { 209 int rc; 210 211 spin_lock(&lock); 212 rc = smp_cpu_stop_nolock(idx, false); 213 if (!rc) { 214 free_pages(cpus[idx].lowcore); 215 free_pages(cpus[idx].stack); 216 cpus[idx].lowcore = (void *)-1UL; 217 cpus[idx].stack = (void *)-1UL; 218 } 219 spin_unlock(&lock); 220 return rc; 221 } 222 223 static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw) 224 { 225 struct lowcore *lc; 226 227 if (cpus[idx].active) 228 return -1; 229 230 smp_sigp_retry(idx, SIGP_INITIAL_CPU_RESET, 0, NULL); 231 232 lc = alloc_pages_flags(1, AREA_DMA31); 233 cpus[idx].lowcore = lc; 234 smp_sigp_retry(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL); 235 236 /* Copy all exception psws. */ 237 memcpy(lc, cpus[0].lowcore, 512); 238 239 /* Setup stack */ 240 cpus[idx].stack = (uint64_t *)alloc_pages(2); 241 242 /* Start without DAT and any other mask bits. */ 243 lc->sw_int_psw.mask = psw.mask; 244 lc->sw_int_psw.addr = psw.addr; 245 lc->sw_int_grs[14] = psw.addr; 246 lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4); 247 lc->restart_new_psw.mask = PSW_MASK_64; 248 lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state; 249 lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP); 250 251 /* Start processing */ 252 smp_cpu_restart_nolock(idx, NULL); 253 /* Wait until the cpu has finished setup and started the provided psw */ 254 while (lc->restart_new_psw.addr != psw.addr) 255 mb(); 256 257 return 0; 258 } 259 260 int smp_cpu_setup(uint16_t idx, struct psw psw) 261 { 262 int rc = -1; 263 264 spin_lock(&lock); 265 if (cpus) { 266 check_idx(idx); 267 rc = smp_cpu_setup_nolock(idx, psw); 268 } 269 spin_unlock(&lock); 270 return rc; 271 } 272 273 /* 274 * Disregarding state, stop all cpus that once were online except for 275 * calling cpu. 276 */ 277 void smp_teardown(void) 278 { 279 int i = 0; 280 uint16_t this_cpu = stap(); 281 int num = smp_query_num_cpus(); 282 283 spin_lock(&lock); 284 for (; i < num; i++) { 285 if (cpus[i].active && 286 cpus[i].addr != this_cpu) { 287 sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL); 288 } 289 } 290 spin_unlock(&lock); 291 } 292 293 /*Expected to be called from boot cpu */ 294 extern uint64_t *stackptr; 295 void smp_setup(void) 296 { 297 int i = 0; 298 int num = smp_query_num_cpus(); 299 unsigned short cpu0_addr = stap(); 300 struct CPUEntry *entry = sclp_get_cpu_entries(); 301 302 spin_lock(&lock); 303 if (num > 1) 304 printf("SMP: Initializing, found %d cpus\n", num); 305 306 cpus = calloc(num, sizeof(*cpus)); 307 for (i = 0; i < num; i++) { 308 cpus[i].addr = entry[i].address; 309 cpus[i].active = false; 310 /* 311 * Fill in the boot CPU. If the boot CPU is not at index 0, 312 * swap it with the one at index 0. This guarantees that the 313 * boot CPU will always have index 0. If the boot CPU was 314 * already at index 0, a few extra useless assignments are 315 * performed, but everything will work ok. 316 * Notice that there is no guarantee that the list of CPUs 317 * returned by the Read SCP Info command is in any 318 * particular order, or that its order will stay consistent 319 * across multiple invocations. 320 */ 321 if (entry[i].address == cpu0_addr) { 322 cpus[i].addr = cpus[0].addr; 323 cpus[0].addr = cpu0_addr; 324 cpus[0].stack = stackptr; 325 cpus[0].lowcore = (void *)0; 326 cpus[0].active = true; 327 } 328 } 329 spin_unlock(&lock); 330 } 331