xref: /kvm-unit-tests/lib/s390x/smp.c (revision c604fa931a1cb70c3649ac1b7223178fc79eab6a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x smp
4  * Based on Linux's arch/s390/kernel/smp.c and
5  * arch/s390/include/asm/sigp.h
6  *
7  * Copyright (c) 2019 IBM Corp
8  *
9  * Authors:
10  *  Janosch Frank <frankja@linux.ibm.com>
11  */
12 #include <libcflat.h>
13 #include <bitops.h>
14 #include <asm/arch_def.h>
15 #include <asm/sigp.h>
16 #include <asm/page.h>
17 #include <asm/barrier.h>
18 #include <asm/spinlock.h>
19 #include <asm/asm-offsets.h>
20 
21 #include <alloc.h>
22 #include <alloc_page.h>
23 
24 #include "smp.h"
25 #include "sclp.h"
26 
27 static struct cpu *cpus;
28 static struct spinlock lock;
29 
30 extern void smp_cpu_setup_state(void);
31 
32 static void check_idx(uint16_t idx)
33 {
34 	assert(idx < smp_query_num_cpus());
35 }
36 
37 int smp_query_num_cpus(void)
38 {
39 	return sclp_get_cpu_num();
40 }
41 
42 int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
43 {
44 	check_idx(idx);
45 	return sigp_retry(cpus[idx].addr, order, parm, status);
46 }
47 
48 struct cpu *smp_cpu_from_addr(uint16_t addr)
49 {
50 	int i, num = smp_query_num_cpus();
51 
52 	for (i = 0; i < num; i++) {
53 		if (cpus[i].addr == addr)
54 			return &cpus[i];
55 	}
56 	return NULL;
57 }
58 
59 struct cpu *smp_cpu_from_idx(uint16_t idx)
60 {
61 	check_idx(idx);
62 	return &cpus[idx];
63 }
64 
65 uint16_t smp_cpu_addr(uint16_t idx)
66 {
67 	check_idx(idx);
68 	return cpus[idx].addr;
69 }
70 
71 bool smp_cpu_stopped(uint16_t idx)
72 {
73 	uint32_t status;
74 
75 	if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
76 		return false;
77 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
78 }
79 
80 bool smp_sense_running_status(uint16_t idx)
81 {
82 	if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
83 		return true;
84 	/* Status stored condition code is equivalent to cpu not running. */
85 	return false;
86 }
87 
88 static int smp_cpu_stop_nolock(uint16_t idx, bool store)
89 {
90 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
91 
92 	/* refuse to work on the boot CPU */
93 	if (idx == 0)
94 		return -1;
95 
96 	if (smp_sigp(idx, order, 0, NULL))
97 		return -1;
98 
99 	while (!smp_cpu_stopped(idx))
100 		mb();
101 	/* idx has been already checked by the smp_* functions called above */
102 	cpus[idx].active = false;
103 	return 0;
104 }
105 
106 int smp_cpu_stop(uint16_t idx)
107 {
108 	int rc;
109 
110 	spin_lock(&lock);
111 	rc = smp_cpu_stop_nolock(idx, false);
112 	spin_unlock(&lock);
113 	return rc;
114 }
115 
116 /*
117  * Functionally equivalent to smp_cpu_stop(), but without the
118  * elements that wait/serialize matters itself.
119  * Used to see if KVM itself is serialized correctly.
120  */
121 int smp_cpu_stop_nowait(uint16_t idx)
122 {
123 	check_idx(idx);
124 
125 	/* refuse to work on the boot CPU */
126 	if (idx == 0)
127 		return -1;
128 
129 	spin_lock(&lock);
130 
131 	/* Don't suppress a CC2 with sigp_retry() */
132 	if (sigp(cpus[idx].addr, SIGP_STOP, 0, NULL)) {
133 		spin_unlock(&lock);
134 		return -1;
135 	}
136 
137 	cpus[idx].active = false;
138 	spin_unlock(&lock);
139 
140 	return 0;
141 }
142 
143 int smp_cpu_stop_store_status(uint16_t idx)
144 {
145 	int rc;
146 
147 	spin_lock(&lock);
148 	rc = smp_cpu_stop_nolock(idx, true);
149 	spin_unlock(&lock);
150 	return rc;
151 }
152 
153 static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
154 {
155 	int rc;
156 
157 	check_idx(idx);
158 	if (psw) {
159 		cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
160 		cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
161 	}
162 	/*
163 	 * Stop the cpu, so we don't have a race between a running cpu
164 	 * and the restart in the test that checks if the cpu is
165 	 * running after the restart.
166 	 */
167 	smp_cpu_stop_nolock(idx, false);
168 	rc = smp_sigp(idx, SIGP_RESTART, 0, NULL);
169 	if (rc)
170 		return rc;
171 	/*
172 	 * The order has been accepted, but the actual restart may not
173 	 * have been performed yet, so wait until the cpu is running.
174 	 */
175 	while (smp_cpu_stopped(idx))
176 		mb();
177 	cpus[idx].active = true;
178 	return 0;
179 }
180 
181 int smp_cpu_restart(uint16_t idx)
182 {
183 	int rc;
184 
185 	spin_lock(&lock);
186 	rc = smp_cpu_restart_nolock(idx, NULL);
187 	spin_unlock(&lock);
188 	return rc;
189 }
190 
191 /*
192  * Functionally equivalent to smp_cpu_restart(), but without the
193  * elements that wait/serialize matters here in the test.
194  * Used to see if KVM itself is serialized correctly.
195  */
196 int smp_cpu_restart_nowait(uint16_t idx)
197 {
198 	check_idx(idx);
199 
200 	spin_lock(&lock);
201 
202 	/* Don't suppress a CC2 with sigp_retry() */
203 	if (sigp(cpus[idx].addr, SIGP_RESTART, 0, NULL)) {
204 		spin_unlock(&lock);
205 		return -1;
206 	}
207 
208 	cpus[idx].active = true;
209 
210 	spin_unlock(&lock);
211 
212 	return 0;
213 }
214 
215 int smp_cpu_start(uint16_t idx, struct psw psw)
216 {
217 	int rc;
218 
219 	spin_lock(&lock);
220 	rc = smp_cpu_restart_nolock(idx, &psw);
221 	spin_unlock(&lock);
222 	return rc;
223 }
224 
225 int smp_cpu_destroy(uint16_t idx)
226 {
227 	int rc;
228 
229 	spin_lock(&lock);
230 	rc = smp_cpu_stop_nolock(idx, false);
231 	if (!rc) {
232 		free_pages(cpus[idx].lowcore);
233 		free_pages(cpus[idx].stack);
234 		cpus[idx].lowcore = (void *)-1UL;
235 		cpus[idx].stack = (void *)-1UL;
236 	}
237 	spin_unlock(&lock);
238 	return rc;
239 }
240 
241 static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
242 {
243 	struct lowcore *lc;
244 
245 	if (cpus[idx].active)
246 		return -1;
247 
248 	smp_sigp(idx, SIGP_INITIAL_CPU_RESET, 0, NULL);
249 
250 	lc = alloc_pages_flags(1, AREA_DMA31);
251 	cpus[idx].lowcore = lc;
252 	smp_sigp(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
253 
254 	/* Copy all exception psws. */
255 	memcpy(lc, cpus[0].lowcore, 512);
256 
257 	/* Setup stack */
258 	cpus[idx].stack = (uint64_t *)alloc_pages(2);
259 
260 	/* Start without DAT and any other mask bits. */
261 	lc->sw_int_psw.mask = psw.mask;
262 	lc->sw_int_psw.addr = psw.addr;
263 	lc->sw_int_grs[14] = psw.addr;
264 	lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
265 	lc->restart_new_psw.mask = PSW_MASK_64;
266 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
267 	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
268 
269 	/* Start processing */
270 	smp_cpu_restart_nolock(idx, NULL);
271 	/* Wait until the cpu has finished setup and started the provided psw */
272 	while (lc->restart_new_psw.addr != psw.addr)
273 		mb();
274 
275 	return 0;
276 }
277 
278 int smp_cpu_setup(uint16_t idx, struct psw psw)
279 {
280 	int rc = -1;
281 
282 	spin_lock(&lock);
283 	if (cpus) {
284 		check_idx(idx);
285 		rc = smp_cpu_setup_nolock(idx, psw);
286 	}
287 	spin_unlock(&lock);
288 	return rc;
289 }
290 
291 /*
292  * Disregarding state, stop all cpus that once were online except for
293  * calling cpu.
294  */
295 void smp_teardown(void)
296 {
297 	int i = 0;
298 	uint16_t this_cpu = stap();
299 	int num = smp_query_num_cpus();
300 
301 	spin_lock(&lock);
302 	for (; i < num; i++) {
303 		if (cpus[i].active &&
304 		    cpus[i].addr != this_cpu) {
305 			sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
306 		}
307 	}
308 	spin_unlock(&lock);
309 }
310 
311 /*Expected to be called from boot cpu */
312 extern uint64_t *stackptr;
313 void smp_setup(void)
314 {
315 	int i = 0;
316 	int num = smp_query_num_cpus();
317 	unsigned short cpu0_addr = stap();
318 	struct CPUEntry *entry = sclp_get_cpu_entries();
319 
320 	spin_lock(&lock);
321 	if (num > 1)
322 		printf("SMP: Initializing, found %d cpus\n", num);
323 
324 	cpus = calloc(num, sizeof(*cpus));
325 	for (i = 0; i < num; i++) {
326 		cpus[i].addr = entry[i].address;
327 		cpus[i].active = false;
328 		/*
329 		 * Fill in the boot CPU. If the boot CPU is not at index 0,
330 		 * swap it with the one at index 0. This guarantees that the
331 		 * boot CPU will always have index 0. If the boot CPU was
332 		 * already at index 0, a few extra useless assignments are
333 		 * performed, but everything will work ok.
334 		 * Notice that there is no guarantee that the list of CPUs
335 		 * returned by the Read SCP Info command is in any
336 		 * particular order, or that its order will stay consistent
337 		 * across multiple invocations.
338 		 */
339 		if (entry[i].address == cpu0_addr) {
340 			cpus[i].addr = cpus[0].addr;
341 			cpus[0].addr = cpu0_addr;
342 			cpus[0].stack = stackptr;
343 			cpus[0].lowcore = (void *)0;
344 			cpus[0].active = true;
345 		}
346 	}
347 	spin_unlock(&lock);
348 }
349