1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * s390x smp
4 * Based on Linux's arch/s390/kernel/smp.c and
5 * arch/s390/include/asm/sigp.h
6 *
7 * Copyright (c) 2019 IBM Corp
8 *
9 * Authors:
10 * Janosch Frank <frankja@linux.ibm.com>
11 */
12 #include <libcflat.h>
13 #include <bitops.h>
14 #include <asm/arch_def.h>
15 #include <asm/sigp.h>
16 #include <asm/page.h>
17 #include <asm/barrier.h>
18 #include <asm/spinlock.h>
19 #include <asm/asm-offsets.h>
20
21 #include <alloc.h>
22 #include <alloc_page.h>
23
24 #include "smp.h"
25 #include "sclp.h"
26
27 static struct cpu *cpus;
28 static struct spinlock lock;
29
30 extern void smp_cpu_setup_state(void);
31
check_idx(uint16_t idx)32 static void check_idx(uint16_t idx)
33 {
34 assert(idx < smp_query_num_cpus());
35 }
36
smp_query_num_cpus(void)37 int smp_query_num_cpus(void)
38 {
39 return sclp_get_cpu_num();
40 }
41
smp_get_lowcore(uint16_t idx)42 struct lowcore *smp_get_lowcore(uint16_t idx)
43 {
44 if (THIS_CPU->idx == idx)
45 return &lowcore;
46
47 check_idx(idx);
48 return cpus[idx].lowcore;
49 }
50
smp_sigp(uint16_t idx,uint8_t order,unsigned long parm,uint32_t * status)51 int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
52 {
53 check_idx(idx);
54 return sigp_retry(cpus[idx].addr, order, parm, status);
55 }
56
smp_cpu_from_addr(uint16_t addr)57 struct cpu *smp_cpu_from_addr(uint16_t addr)
58 {
59 int i, num = smp_query_num_cpus();
60
61 for (i = 0; i < num; i++) {
62 if (cpus[i].addr == addr)
63 return &cpus[i];
64 }
65 return NULL;
66 }
67
smp_cpu_from_idx(uint16_t idx)68 struct cpu *smp_cpu_from_idx(uint16_t idx)
69 {
70 check_idx(idx);
71 return &cpus[idx];
72 }
73
smp_cpu_addr(uint16_t idx)74 uint16_t smp_cpu_addr(uint16_t idx)
75 {
76 check_idx(idx);
77 return cpus[idx].addr;
78 }
79
smp_cpu_stopped(uint16_t idx)80 bool smp_cpu_stopped(uint16_t idx)
81 {
82 uint32_t status;
83
84 if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
85 return false;
86 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
87 }
88
smp_sense_running_status(uint16_t idx)89 bool smp_sense_running_status(uint16_t idx)
90 {
91 if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
92 return true;
93 /* Status stored condition code is equivalent to cpu not running. */
94 return false;
95 }
96
smp_cpu_stop_nolock(uint16_t idx,bool store)97 static int smp_cpu_stop_nolock(uint16_t idx, bool store)
98 {
99 uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
100
101 /* refuse to work on the boot CPU */
102 if (idx == 0)
103 return -1;
104
105 if (smp_sigp(idx, order, 0, NULL))
106 return -1;
107
108 while (!smp_cpu_stopped(idx))
109 mb();
110 /* idx has been already checked by the smp_* functions called above */
111 cpus[idx].active = false;
112 return 0;
113 }
114
smp_cpu_stop(uint16_t idx)115 int smp_cpu_stop(uint16_t idx)
116 {
117 int rc;
118
119 spin_lock(&lock);
120 rc = smp_cpu_stop_nolock(idx, false);
121 spin_unlock(&lock);
122 return rc;
123 }
124
125 /*
126 * Functionally equivalent to smp_cpu_stop(), but without the
127 * elements that wait/serialize matters itself.
128 * Used to see if KVM itself is serialized correctly.
129 */
smp_cpu_stop_nowait(uint16_t idx)130 int smp_cpu_stop_nowait(uint16_t idx)
131 {
132 check_idx(idx);
133
134 /* refuse to work on the boot CPU */
135 if (idx == 0)
136 return -1;
137
138 spin_lock(&lock);
139
140 /* Don't suppress a CC2 with sigp_retry() */
141 if (sigp(cpus[idx].addr, SIGP_STOP, 0, NULL)) {
142 spin_unlock(&lock);
143 return -1;
144 }
145
146 cpus[idx].active = false;
147 spin_unlock(&lock);
148
149 return 0;
150 }
151
smp_cpu_stop_store_status(uint16_t idx)152 int smp_cpu_stop_store_status(uint16_t idx)
153 {
154 int rc;
155
156 spin_lock(&lock);
157 rc = smp_cpu_stop_nolock(idx, true);
158 spin_unlock(&lock);
159 return rc;
160 }
161
smp_cpu_restart_nolock(uint16_t idx,struct psw * psw)162 static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
163 {
164 int rc;
165
166 check_idx(idx);
167 if (psw) {
168 cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
169 cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
170 }
171 /*
172 * Stop the cpu, so we don't have a race between a running cpu
173 * and the restart in the test that checks if the cpu is
174 * running after the restart.
175 */
176 smp_cpu_stop_nolock(idx, false);
177 rc = smp_sigp(idx, SIGP_RESTART, 0, NULL);
178 if (rc)
179 return rc;
180 /*
181 * The order has been accepted, but the actual restart may not
182 * have been performed yet, so wait until the cpu is running.
183 */
184 while (smp_cpu_stopped(idx))
185 mb();
186 cpus[idx].active = true;
187 return 0;
188 }
189
smp_cpu_restart(uint16_t idx)190 int smp_cpu_restart(uint16_t idx)
191 {
192 int rc;
193
194 spin_lock(&lock);
195 rc = smp_cpu_restart_nolock(idx, NULL);
196 spin_unlock(&lock);
197 return rc;
198 }
199
200 /*
201 * Functionally equivalent to smp_cpu_restart(), but without the
202 * elements that wait/serialize matters here in the test.
203 * Used to see if KVM itself is serialized correctly.
204 */
smp_cpu_restart_nowait(uint16_t idx)205 int smp_cpu_restart_nowait(uint16_t idx)
206 {
207 check_idx(idx);
208
209 spin_lock(&lock);
210
211 /* Don't suppress a CC2 with sigp_retry() */
212 if (sigp(cpus[idx].addr, SIGP_RESTART, 0, NULL)) {
213 spin_unlock(&lock);
214 return -1;
215 }
216
217 cpus[idx].active = true;
218
219 spin_unlock(&lock);
220
221 return 0;
222 }
223
smp_cpu_start(uint16_t idx,struct psw psw)224 int smp_cpu_start(uint16_t idx, struct psw psw)
225 {
226 int rc;
227
228 spin_lock(&lock);
229 rc = smp_cpu_restart_nolock(idx, &psw);
230 spin_unlock(&lock);
231 return rc;
232 }
233
smp_cpu_destroy(uint16_t idx)234 int smp_cpu_destroy(uint16_t idx)
235 {
236 int rc;
237
238 spin_lock(&lock);
239 rc = smp_cpu_stop_nolock(idx, false);
240 if (!rc) {
241 free_pages(cpus[idx].lowcore);
242 free_pages(cpus[idx].stack);
243 cpus[idx].lowcore = (void *)-1UL;
244 cpus[idx].stack = (void *)-1UL;
245 }
246 spin_unlock(&lock);
247 return rc;
248 }
249
smp_cpu_setup_nolock(uint16_t idx,struct psw psw)250 static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
251 {
252 struct lowcore *lc;
253
254 if (cpus[idx].active)
255 return -1;
256
257 smp_sigp(idx, SIGP_INITIAL_CPU_RESET, 0, NULL);
258
259 lc = alloc_pages_flags(1, AREA_DMA31);
260 cpus[idx].lowcore = lc;
261 smp_sigp(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
262
263 /* Copy all exception psws. */
264 memcpy(lc, cpus[0].lowcore, 512);
265 lc->this_cpu = &cpus[idx];
266
267 /* Setup stack */
268 cpus[idx].stack = (uint64_t *)alloc_pages(2);
269
270 /* Start without DAT and any other mask bits. */
271 lc->sw_int_psw.mask = psw.mask;
272 lc->sw_int_psw.addr = psw.addr;
273 lc->sw_int_grs[14] = psw.addr;
274 lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
275 lc->restart_new_psw.mask = PSW_MASK_64;
276 lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
277 lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
278
279 /* Start processing */
280 smp_cpu_restart_nolock(idx, NULL);
281 /* Wait until the cpu has finished setup and started the provided psw */
282 while (lc->restart_new_psw.addr != psw.addr)
283 mb();
284
285 return 0;
286 }
287
smp_cpu_setup(uint16_t idx,struct psw psw)288 int smp_cpu_setup(uint16_t idx, struct psw psw)
289 {
290 int rc = -1;
291
292 spin_lock(&lock);
293 if (cpus) {
294 check_idx(idx);
295 rc = smp_cpu_setup_nolock(idx, psw);
296 }
297 spin_unlock(&lock);
298 return rc;
299 }
300
301 /*
302 * Disregarding state, stop all cpus that once were online except for
303 * calling cpu.
304 */
smp_teardown(void)305 void smp_teardown(void)
306 {
307 int i = 0;
308 uint16_t this_cpu = stap();
309 int num = smp_query_num_cpus();
310
311 spin_lock(&lock);
312 for (; i < num; i++) {
313 if (cpus[i].active &&
314 cpus[i].addr != this_cpu) {
315 sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
316 }
317 }
318 spin_unlock(&lock);
319 }
320
321 /*Expected to be called from boot cpu */
322 extern uint64_t *stackptr;
smp_setup(void)323 void smp_setup(void)
324 {
325 int i = 0;
326 int num = smp_query_num_cpus();
327 unsigned short cpu0_addr = stap();
328 struct CPUEntry *entry = sclp_get_cpu_entries();
329
330 spin_lock(&lock);
331 if (num > 1)
332 printf("SMP: Initializing, found %d cpus\n", num);
333
334 cpus = calloc(num, sizeof(*cpus));
335 for (i = 0; i < num; i++) {
336 cpus[i].addr = entry[i].address;
337 cpus[i].active = false;
338 cpus[i].idx = i;
339 /*
340 * Fill in the boot CPU. If the boot CPU is not at index 0,
341 * swap it with the one at index 0. This guarantees that the
342 * boot CPU will always have index 0. If the boot CPU was
343 * already at index 0, a few extra useless assignments are
344 * performed, but everything will work ok.
345 * Notice that there is no guarantee that the list of CPUs
346 * returned by the Read SCP Info command is in any
347 * particular order, or that its order will stay consistent
348 * across multiple invocations.
349 */
350 if (entry[i].address == cpu0_addr) {
351 cpus[i].addr = cpus[0].addr;
352 cpus[0].addr = cpu0_addr;
353 cpus[0].stack = stackptr;
354 cpus[0].lowcore = (void *)0;
355 cpus[0].active = true;
356 THIS_CPU = &cpus[0];
357 }
358 }
359 spin_unlock(&lock);
360 }
361