1 /* 2 * s390x smp 3 * Based on Linux's arch/s390/kernel/smp.c and 4 * arch/s390/include/asm/sigp.h 5 * 6 * Copyright (c) 2019 IBM Corp 7 * 8 * Authors: 9 * Janosch Frank <frankja@linux.ibm.com> 10 * 11 * This code is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2. 13 */ 14 #include <libcflat.h> 15 #include <asm/arch_def.h> 16 #include <asm/sigp.h> 17 #include <asm/page.h> 18 #include <asm/barrier.h> 19 #include <asm/spinlock.h> 20 #include <asm/asm-offsets.h> 21 22 #include <alloc.h> 23 #include <alloc_page.h> 24 25 #include "smp.h" 26 #include "sclp.h" 27 28 static char cpu_info_buffer[PAGE_SIZE] __attribute__((__aligned__(4096))); 29 static struct cpu *cpus; 30 static struct cpu *cpu0; 31 static struct spinlock lock; 32 33 extern void smp_cpu_setup_state(void); 34 35 int smp_query_num_cpus(void) 36 { 37 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 38 return info->nr_configured; 39 } 40 41 struct cpu *smp_cpu_from_addr(uint16_t addr) 42 { 43 int i, num = smp_query_num_cpus(); 44 45 for (i = 0; i < num; i++) { 46 if (cpus[i].addr == addr) 47 return &cpus[i]; 48 } 49 return NULL; 50 } 51 52 bool smp_cpu_stopped(uint16_t addr) 53 { 54 uint32_t status; 55 56 if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED) 57 return false; 58 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 59 } 60 61 bool smp_sense_running_status(uint16_t addr) 62 { 63 if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED) 64 return true; 65 /* Status stored condition code is equivalent to cpu not running. */ 66 return false; 67 } 68 69 static int smp_cpu_stop_nolock(uint16_t addr, bool store) 70 { 71 struct cpu *cpu; 72 uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP; 73 74 cpu = smp_cpu_from_addr(addr); 75 if (!cpu || cpu == cpu0) 76 return -1; 77 78 if (sigp_retry(addr, order, 0, NULL)) 79 return -1; 80 81 while (!smp_cpu_stopped(addr)) 82 mb(); 83 cpu->active = false; 84 return 0; 85 } 86 87 int smp_cpu_stop(uint16_t addr) 88 { 89 int rc; 90 91 spin_lock(&lock); 92 rc = smp_cpu_stop_nolock(addr, false); 93 spin_unlock(&lock); 94 return rc; 95 } 96 97 int smp_cpu_stop_store_status(uint16_t addr) 98 { 99 int rc; 100 101 spin_lock(&lock); 102 rc = smp_cpu_stop_nolock(addr, true); 103 spin_unlock(&lock); 104 return rc; 105 } 106 107 static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw) 108 { 109 int rc; 110 struct cpu *cpu = smp_cpu_from_addr(addr); 111 112 if (!cpu) 113 return -1; 114 if (psw) { 115 cpu->lowcore->restart_new_psw.mask = psw->mask; 116 cpu->lowcore->restart_new_psw.addr = psw->addr; 117 } 118 /* 119 * Stop the cpu, so we don't have a race between a running cpu 120 * and the restart in the test that checks if the cpu is 121 * running after the restart. 122 */ 123 smp_cpu_stop_nolock(addr, false); 124 rc = sigp(addr, SIGP_RESTART, 0, NULL); 125 if (rc) 126 return rc; 127 /* 128 * The order has been accepted, but the actual restart may not 129 * have been performed yet, so wait until the cpu is running. 130 */ 131 while (smp_cpu_stopped(addr)) 132 mb(); 133 cpu->active = true; 134 return 0; 135 } 136 137 int smp_cpu_restart(uint16_t addr) 138 { 139 int rc; 140 141 spin_lock(&lock); 142 rc = smp_cpu_restart_nolock(addr, NULL); 143 spin_unlock(&lock); 144 return rc; 145 } 146 147 int smp_cpu_start(uint16_t addr, struct psw psw) 148 { 149 int rc; 150 151 spin_lock(&lock); 152 rc = smp_cpu_restart_nolock(addr, &psw); 153 spin_unlock(&lock); 154 return rc; 155 } 156 157 int smp_cpu_destroy(uint16_t addr) 158 { 159 struct cpu *cpu; 160 int rc; 161 162 spin_lock(&lock); 163 rc = smp_cpu_stop_nolock(addr, false); 164 if (!rc) { 165 cpu = smp_cpu_from_addr(addr); 166 free_pages(cpu->lowcore); 167 free_pages(cpu->stack); 168 cpu->lowcore = (void *)-1UL; 169 cpu->stack = (void *)-1UL; 170 } 171 spin_unlock(&lock); 172 return rc; 173 } 174 175 int smp_cpu_setup(uint16_t addr, struct psw psw) 176 { 177 struct lowcore *lc; 178 struct cpu *cpu; 179 int rc = -1; 180 181 spin_lock(&lock); 182 183 if (!cpus) 184 goto out; 185 186 cpu = smp_cpu_from_addr(addr); 187 188 if (!cpu || cpu->active) 189 goto out; 190 191 sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL); 192 193 lc = alloc_pages_area(AREA_DMA31, 1); 194 cpu->lowcore = lc; 195 memset(lc, 0, PAGE_SIZE * 2); 196 sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL); 197 198 /* Copy all exception psws. */ 199 memcpy(lc, cpu0->lowcore, 512); 200 201 /* Setup stack */ 202 cpu->stack = (uint64_t *)alloc_pages(2); 203 204 /* Start without DAT and any other mask bits. */ 205 cpu->lowcore->sw_int_psw.mask = psw.mask; 206 cpu->lowcore->sw_int_psw.addr = psw.addr; 207 cpu->lowcore->sw_int_grs[14] = psw.addr; 208 cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4); 209 lc->restart_new_psw.mask = 0x0000000180000000UL; 210 lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state; 211 lc->sw_int_crs[0] = 0x0000000000040000UL; 212 213 /* Start processing */ 214 smp_cpu_restart_nolock(addr, NULL); 215 /* Wait until the cpu has finished setup and started the provided psw */ 216 while (lc->restart_new_psw.addr != psw.addr) 217 mb(); 218 out: 219 spin_unlock(&lock); 220 return rc; 221 } 222 223 /* 224 * Disregarding state, stop all cpus that once were online except for 225 * calling cpu. 226 */ 227 void smp_teardown(void) 228 { 229 int i = 0; 230 uint16_t this_cpu = stap(); 231 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 232 233 spin_lock(&lock); 234 for (; i < info->nr_configured; i++) { 235 if (cpus[i].active && 236 cpus[i].addr != this_cpu) { 237 sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL); 238 } 239 } 240 spin_unlock(&lock); 241 } 242 243 /*Expected to be called from boot cpu */ 244 extern uint64_t *stackptr; 245 void smp_setup(void) 246 { 247 int i = 0; 248 unsigned short cpu0_addr = stap(); 249 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 250 251 spin_lock(&lock); 252 sclp_mark_busy(); 253 info->h.length = PAGE_SIZE; 254 sclp_service_call(SCLP_READ_CPU_INFO, cpu_info_buffer); 255 256 if (smp_query_num_cpus() > 1) 257 printf("SMP: Initializing, found %d cpus\n", info->nr_configured); 258 259 cpus = calloc(info->nr_configured, sizeof(cpus)); 260 for (i = 0; i < info->nr_configured; i++) { 261 cpus[i].addr = info->entries[i].address; 262 cpus[i].active = false; 263 if (info->entries[i].address == cpu0_addr) { 264 cpu0 = &cpus[i]; 265 cpu0->stack = stackptr; 266 cpu0->lowcore = (void *)0; 267 cpu0->active = true; 268 } 269 } 270 spin_unlock(&lock); 271 } 272