1 /* 2 * s390x smp 3 * Based on Linux's arch/s390/kernel/smp.c and 4 * arch/s390/include/asm/sigp.h 5 * 6 * Copyright (c) 2019 IBM Corp 7 * 8 * Authors: 9 * Janosch Frank <frankja@linux.ibm.com> 10 * 11 * This code is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2. 13 */ 14 #include <libcflat.h> 15 #include <asm/arch_def.h> 16 #include <asm/sigp.h> 17 #include <asm/page.h> 18 #include <asm/barrier.h> 19 #include <asm/spinlock.h> 20 #include <asm/asm-offsets.h> 21 22 #include <alloc.h> 23 #include <alloc_page.h> 24 25 #include "smp.h" 26 #include "sclp.h" 27 28 static char cpu_info_buffer[PAGE_SIZE] __attribute__((__aligned__(4096))); 29 static struct cpu *cpus; 30 static struct cpu *cpu0; 31 static struct spinlock lock; 32 33 extern void smp_cpu_setup_state(void); 34 35 int smp_query_num_cpus(void) 36 { 37 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 38 return info->nr_configured; 39 } 40 41 struct cpu *smp_cpu_from_addr(uint16_t addr) 42 { 43 int i, num = smp_query_num_cpus(); 44 45 for (i = 0; i < num; i++) { 46 if (cpus[i].addr == addr) 47 return &cpus[i]; 48 } 49 return NULL; 50 } 51 52 bool smp_cpu_stopped(uint16_t addr) 53 { 54 uint32_t status; 55 56 if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED) 57 return false; 58 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 59 } 60 61 bool smp_cpu_running(uint16_t addr) 62 { 63 if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED) 64 return true; 65 /* Status stored condition code is equivalent to cpu not running. */ 66 return false; 67 } 68 69 static int smp_cpu_stop_nolock(uint16_t addr, bool store) 70 { 71 struct cpu *cpu; 72 uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP; 73 74 cpu = smp_cpu_from_addr(addr); 75 if (!cpu || cpu == cpu0) 76 return -1; 77 78 if (sigp_retry(addr, order, 0, NULL)) 79 return -1; 80 81 while (!smp_cpu_stopped(addr)) 82 mb(); 83 cpu->active = false; 84 return 0; 85 } 86 87 int smp_cpu_stop(uint16_t addr) 88 { 89 int rc; 90 91 spin_lock(&lock); 92 rc = smp_cpu_stop_nolock(addr, false); 93 spin_unlock(&lock); 94 return rc; 95 } 96 97 int smp_cpu_stop_store_status(uint16_t addr) 98 { 99 int rc; 100 101 spin_lock(&lock); 102 rc = smp_cpu_stop_nolock(addr, true); 103 spin_unlock(&lock); 104 return rc; 105 } 106 107 int smp_cpu_restart(uint16_t addr) 108 { 109 int rc = -1; 110 struct cpu *cpu; 111 112 spin_lock(&lock); 113 cpu = smp_cpu_from_addr(addr); 114 if (cpu) { 115 rc = sigp(addr, SIGP_RESTART, 0, NULL); 116 cpu->active = true; 117 } 118 spin_unlock(&lock); 119 return rc; 120 } 121 122 int smp_cpu_start(uint16_t addr, struct psw psw) 123 { 124 int rc = -1; 125 struct cpu *cpu; 126 struct lowcore *lc; 127 128 spin_lock(&lock); 129 cpu = smp_cpu_from_addr(addr); 130 if (cpu) { 131 lc = cpu->lowcore; 132 lc->restart_new_psw.mask = psw.mask; 133 lc->restart_new_psw.addr = psw.addr; 134 rc = sigp(addr, SIGP_RESTART, 0, NULL); 135 } 136 spin_unlock(&lock); 137 return rc; 138 } 139 140 int smp_cpu_destroy(uint16_t addr) 141 { 142 struct cpu *cpu; 143 int rc; 144 145 spin_lock(&lock); 146 rc = smp_cpu_stop_nolock(addr, false); 147 if (!rc) { 148 cpu = smp_cpu_from_addr(addr); 149 free_pages(cpu->lowcore, 2 * PAGE_SIZE); 150 free_pages(cpu->stack, 4 * PAGE_SIZE); 151 cpu->lowcore = (void *)-1UL; 152 cpu->stack = (void *)-1UL; 153 } 154 spin_unlock(&lock); 155 return rc; 156 } 157 158 int smp_cpu_setup(uint16_t addr, struct psw psw) 159 { 160 struct lowcore *lc; 161 struct cpu *cpu; 162 int rc = -1; 163 164 spin_lock(&lock); 165 166 if (!cpus) 167 goto out; 168 169 cpu = smp_cpu_from_addr(addr); 170 171 if (!cpu || cpu->active) 172 goto out; 173 174 sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL); 175 176 lc = alloc_pages(1); 177 cpu->lowcore = lc; 178 memset(lc, 0, PAGE_SIZE * 2); 179 sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL); 180 181 /* Copy all exception psws. */ 182 memcpy(lc, cpu0->lowcore, 512); 183 184 /* Setup stack */ 185 cpu->stack = (uint64_t *)alloc_pages(2); 186 187 /* Start without DAT and any other mask bits. */ 188 cpu->lowcore->sw_int_grs[14] = psw.addr; 189 cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4); 190 lc->restart_new_psw.mask = 0x0000000180000000UL; 191 lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state; 192 lc->sw_int_crs[0] = 0x0000000000040000UL; 193 194 /* Start processing */ 195 rc = sigp_retry(cpu->addr, SIGP_RESTART, 0, NULL); 196 if (!rc) 197 cpu->active = true; 198 199 out: 200 spin_unlock(&lock); 201 return rc; 202 } 203 204 /* 205 * Disregarding state, stop all cpus that once were online except for 206 * calling cpu. 207 */ 208 void smp_teardown(void) 209 { 210 int i = 0; 211 uint16_t this_cpu = stap(); 212 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 213 214 spin_lock(&lock); 215 for (; i < info->nr_configured; i++) { 216 if (cpus[i].active && 217 cpus[i].addr != this_cpu) { 218 sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL); 219 } 220 } 221 spin_unlock(&lock); 222 } 223 224 /*Expected to be called from boot cpu */ 225 extern uint64_t *stackptr; 226 void smp_setup(void) 227 { 228 int i = 0; 229 unsigned short cpu0_addr = stap(); 230 struct ReadCpuInfo *info = (void *)cpu_info_buffer; 231 232 spin_lock(&lock); 233 sclp_mark_busy(); 234 info->h.length = PAGE_SIZE; 235 sclp_service_call(SCLP_READ_CPU_INFO, cpu_info_buffer); 236 237 if (smp_query_num_cpus() > 1) 238 printf("SMP: Initializing, found %d cpus\n", info->nr_configured); 239 240 cpus = calloc(info->nr_configured, sizeof(cpus)); 241 for (i = 0; i < info->nr_configured; i++) { 242 cpus[i].addr = info->entries[i].address; 243 cpus[i].active = false; 244 if (info->entries[i].address == cpu0_addr) { 245 cpu0 = &cpus[i]; 246 cpu0->stack = stackptr; 247 cpu0->lowcore = (void *)0; 248 cpu0->active = true; 249 } 250 } 251 spin_unlock(&lock); 252 } 253