xref: /kvm-unit-tests/lib/s390x/smp.c (revision f1cdb0329ede1fb57d5d29d2c70c82c7979a5fce)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x smp
4  * Based on Linux's arch/s390/kernel/smp.c and
5  * arch/s390/include/asm/sigp.h
6  *
7  * Copyright (c) 2019 IBM Corp
8  *
9  * Authors:
10  *  Janosch Frank <frankja@linux.ibm.com>
11  */
12 #include <libcflat.h>
13 #include <bitops.h>
14 #include <asm/arch_def.h>
15 #include <asm/sigp.h>
16 #include <asm/page.h>
17 #include <asm/barrier.h>
18 #include <asm/spinlock.h>
19 #include <asm/asm-offsets.h>
20 
21 #include <alloc.h>
22 #include <alloc_page.h>
23 
24 #include "smp.h"
25 #include "sclp.h"
26 
27 static struct cpu *cpus;
28 static struct spinlock lock;
29 
30 extern void smp_cpu_setup_state(void);
31 
32 static void check_idx(uint16_t idx)
33 {
34 	assert(idx < smp_query_num_cpus());
35 }
36 
37 int smp_query_num_cpus(void)
38 {
39 	return sclp_get_cpu_num();
40 }
41 
42 int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
43 {
44 	check_idx(idx);
45 	return sigp(cpus[idx].addr, order, parm, status);
46 }
47 
48 int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
49 {
50 	check_idx(idx);
51 	return sigp_retry(cpus[idx].addr, order, parm, status);
52 }
53 
54 struct cpu *smp_cpu_from_addr(uint16_t addr)
55 {
56 	int i, num = smp_query_num_cpus();
57 
58 	for (i = 0; i < num; i++) {
59 		if (cpus[i].addr == addr)
60 			return &cpus[i];
61 	}
62 	return NULL;
63 }
64 
65 struct cpu *smp_cpu_from_idx(uint16_t idx)
66 {
67 	check_idx(idx);
68 	return &cpus[idx];
69 }
70 
71 uint16_t smp_cpu_addr(uint16_t idx)
72 {
73 	check_idx(idx);
74 	return cpus[idx].addr;
75 }
76 
77 bool smp_cpu_stopped(uint16_t idx)
78 {
79 	uint32_t status;
80 
81 	if (smp_sigp_retry(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
82 		return false;
83 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
84 }
85 
86 bool smp_sense_running_status(uint16_t idx)
87 {
88 	if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
89 		return true;
90 	/* Status stored condition code is equivalent to cpu not running. */
91 	return false;
92 }
93 
94 static int smp_cpu_stop_nolock(uint16_t idx, bool store)
95 {
96 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
97 
98 	/* refuse to work on the boot CPU */
99 	if (idx == 0)
100 		return -1;
101 
102 	if (smp_sigp_retry(idx, order, 0, NULL))
103 		return -1;
104 
105 	while (!smp_cpu_stopped(idx))
106 		mb();
107 	/* idx has been already checked by the smp_* functions called above */
108 	cpus[idx].active = false;
109 	return 0;
110 }
111 
112 int smp_cpu_stop(uint16_t idx)
113 {
114 	int rc;
115 
116 	spin_lock(&lock);
117 	rc = smp_cpu_stop_nolock(idx, false);
118 	spin_unlock(&lock);
119 	return rc;
120 }
121 
122 /*
123  * Functionally equivalent to smp_cpu_stop(), but without the
124  * elements that wait/serialize matters itself.
125  * Used to see if KVM itself is serialized correctly.
126  */
127 int smp_cpu_stop_nowait(uint16_t idx)
128 {
129 	check_idx(idx);
130 
131 	/* refuse to work on the boot CPU */
132 	if (idx == 0)
133 		return -1;
134 
135 	spin_lock(&lock);
136 
137 	/* Don't suppress a CC2 with sigp_retry() */
138 	if (sigp(cpus[idx].addr, SIGP_STOP, 0, NULL)) {
139 		spin_unlock(&lock);
140 		return -1;
141 	}
142 
143 	cpus[idx].active = false;
144 	spin_unlock(&lock);
145 
146 	return 0;
147 }
148 
149 int smp_cpu_stop_store_status(uint16_t idx)
150 {
151 	int rc;
152 
153 	spin_lock(&lock);
154 	rc = smp_cpu_stop_nolock(idx, true);
155 	spin_unlock(&lock);
156 	return rc;
157 }
158 
159 static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
160 {
161 	int rc;
162 
163 	check_idx(idx);
164 	if (psw) {
165 		cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
166 		cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
167 	}
168 	/*
169 	 * Stop the cpu, so we don't have a race between a running cpu
170 	 * and the restart in the test that checks if the cpu is
171 	 * running after the restart.
172 	 */
173 	smp_cpu_stop_nolock(idx, false);
174 	rc = smp_sigp(idx, SIGP_RESTART, 0, NULL);
175 	if (rc)
176 		return rc;
177 	/*
178 	 * The order has been accepted, but the actual restart may not
179 	 * have been performed yet, so wait until the cpu is running.
180 	 */
181 	while (smp_cpu_stopped(idx))
182 		mb();
183 	cpus[idx].active = true;
184 	return 0;
185 }
186 
187 int smp_cpu_restart(uint16_t idx)
188 {
189 	int rc;
190 
191 	spin_lock(&lock);
192 	rc = smp_cpu_restart_nolock(idx, NULL);
193 	spin_unlock(&lock);
194 	return rc;
195 }
196 
197 /*
198  * Functionally equivalent to smp_cpu_restart(), but without the
199  * elements that wait/serialize matters here in the test.
200  * Used to see if KVM itself is serialized correctly.
201  */
202 int smp_cpu_restart_nowait(uint16_t idx)
203 {
204 	check_idx(idx);
205 
206 	spin_lock(&lock);
207 
208 	/* Don't suppress a CC2 with sigp_retry() */
209 	if (sigp(cpus[idx].addr, SIGP_RESTART, 0, NULL)) {
210 		spin_unlock(&lock);
211 		return -1;
212 	}
213 
214 	cpus[idx].active = true;
215 
216 	spin_unlock(&lock);
217 
218 	return 0;
219 }
220 
221 int smp_cpu_start(uint16_t idx, struct psw psw)
222 {
223 	int rc;
224 
225 	spin_lock(&lock);
226 	rc = smp_cpu_restart_nolock(idx, &psw);
227 	spin_unlock(&lock);
228 	return rc;
229 }
230 
231 int smp_cpu_destroy(uint16_t idx)
232 {
233 	int rc;
234 
235 	spin_lock(&lock);
236 	rc = smp_cpu_stop_nolock(idx, false);
237 	if (!rc) {
238 		free_pages(cpus[idx].lowcore);
239 		free_pages(cpus[idx].stack);
240 		cpus[idx].lowcore = (void *)-1UL;
241 		cpus[idx].stack = (void *)-1UL;
242 	}
243 	spin_unlock(&lock);
244 	return rc;
245 }
246 
247 static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
248 {
249 	struct lowcore *lc;
250 
251 	if (cpus[idx].active)
252 		return -1;
253 
254 	smp_sigp_retry(idx, SIGP_INITIAL_CPU_RESET, 0, NULL);
255 
256 	lc = alloc_pages_flags(1, AREA_DMA31);
257 	cpus[idx].lowcore = lc;
258 	smp_sigp_retry(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
259 
260 	/* Copy all exception psws. */
261 	memcpy(lc, cpus[0].lowcore, 512);
262 
263 	/* Setup stack */
264 	cpus[idx].stack = (uint64_t *)alloc_pages(2);
265 
266 	/* Start without DAT and any other mask bits. */
267 	lc->sw_int_psw.mask = psw.mask;
268 	lc->sw_int_psw.addr = psw.addr;
269 	lc->sw_int_grs[14] = psw.addr;
270 	lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
271 	lc->restart_new_psw.mask = PSW_MASK_64;
272 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
273 	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
274 
275 	/* Start processing */
276 	smp_cpu_restart_nolock(idx, NULL);
277 	/* Wait until the cpu has finished setup and started the provided psw */
278 	while (lc->restart_new_psw.addr != psw.addr)
279 		mb();
280 
281 	return 0;
282 }
283 
284 int smp_cpu_setup(uint16_t idx, struct psw psw)
285 {
286 	int rc = -1;
287 
288 	spin_lock(&lock);
289 	if (cpus) {
290 		check_idx(idx);
291 		rc = smp_cpu_setup_nolock(idx, psw);
292 	}
293 	spin_unlock(&lock);
294 	return rc;
295 }
296 
297 /*
298  * Disregarding state, stop all cpus that once were online except for
299  * calling cpu.
300  */
301 void smp_teardown(void)
302 {
303 	int i = 0;
304 	uint16_t this_cpu = stap();
305 	int num = smp_query_num_cpus();
306 
307 	spin_lock(&lock);
308 	for (; i < num; i++) {
309 		if (cpus[i].active &&
310 		    cpus[i].addr != this_cpu) {
311 			sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
312 		}
313 	}
314 	spin_unlock(&lock);
315 }
316 
317 /*Expected to be called from boot cpu */
318 extern uint64_t *stackptr;
319 void smp_setup(void)
320 {
321 	int i = 0;
322 	int num = smp_query_num_cpus();
323 	unsigned short cpu0_addr = stap();
324 	struct CPUEntry *entry = sclp_get_cpu_entries();
325 
326 	spin_lock(&lock);
327 	if (num > 1)
328 		printf("SMP: Initializing, found %d cpus\n", num);
329 
330 	cpus = calloc(num, sizeof(*cpus));
331 	for (i = 0; i < num; i++) {
332 		cpus[i].addr = entry[i].address;
333 		cpus[i].active = false;
334 		/*
335 		 * Fill in the boot CPU. If the boot CPU is not at index 0,
336 		 * swap it with the one at index 0. This guarantees that the
337 		 * boot CPU will always have index 0. If the boot CPU was
338 		 * already at index 0, a few extra useless assignments are
339 		 * performed, but everything will work ok.
340 		 * Notice that there is no guarantee that the list of CPUs
341 		 * returned by the Read SCP Info command is in any
342 		 * particular order, or that its order will stay consistent
343 		 * across multiple invocations.
344 		 */
345 		if (entry[i].address == cpu0_addr) {
346 			cpus[i].addr = cpus[0].addr;
347 			cpus[0].addr = cpu0_addr;
348 			cpus[0].stack = stackptr;
349 			cpus[0].lowcore = (void *)0;
350 			cpus[0].active = true;
351 		}
352 	}
353 	spin_unlock(&lock);
354 }
355