xref: /kvm-unit-tests/lib/s390x/smp.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x smp
4  * Based on Linux's arch/s390/kernel/smp.c and
5  * arch/s390/include/asm/sigp.h
6  *
7  * Copyright (c) 2019 IBM Corp
8  *
9  * Authors:
10  *  Janosch Frank <frankja@linux.ibm.com>
11  */
12 #include <libcflat.h>
13 #include <bitops.h>
14 #include <asm/arch_def.h>
15 #include <asm/sigp.h>
16 #include <asm/page.h>
17 #include <asm/barrier.h>
18 #include <asm/spinlock.h>
19 #include <asm/asm-offsets.h>
20 
21 #include <alloc.h>
22 #include <alloc_page.h>
23 
24 #include "smp.h"
25 #include "sclp.h"
26 
27 static struct cpu *cpus;
28 static struct cpu *cpu0;
29 static struct spinlock lock;
30 
31 extern void smp_cpu_setup_state(void);
32 
33 int smp_query_num_cpus(void)
34 {
35 	return sclp_get_cpu_num();
36 }
37 
38 struct cpu *smp_cpu_from_addr(uint16_t addr)
39 {
40 	int i, num = smp_query_num_cpus();
41 
42 	for (i = 0; i < num; i++) {
43 		if (cpus[i].addr == addr)
44 			return &cpus[i];
45 	}
46 	return NULL;
47 }
48 
49 bool smp_cpu_stopped(uint16_t addr)
50 {
51 	uint32_t status;
52 
53 	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
54 		return false;
55 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
56 }
57 
58 bool smp_sense_running_status(uint16_t addr)
59 {
60 	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
61 		return true;
62 	/* Status stored condition code is equivalent to cpu not running. */
63 	return false;
64 }
65 
66 static int smp_cpu_stop_nolock(uint16_t addr, bool store)
67 {
68 	struct cpu *cpu;
69 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
70 
71 	cpu = smp_cpu_from_addr(addr);
72 	if (!cpu || cpu == cpu0)
73 		return -1;
74 
75 	if (sigp_retry(addr, order, 0, NULL))
76 		return -1;
77 
78 	while (!smp_cpu_stopped(addr))
79 		mb();
80 	cpu->active = false;
81 	return 0;
82 }
83 
84 int smp_cpu_stop(uint16_t addr)
85 {
86 	int rc;
87 
88 	spin_lock(&lock);
89 	rc = smp_cpu_stop_nolock(addr, false);
90 	spin_unlock(&lock);
91 	return rc;
92 }
93 
94 int smp_cpu_stop_store_status(uint16_t addr)
95 {
96 	int rc;
97 
98 	spin_lock(&lock);
99 	rc = smp_cpu_stop_nolock(addr, true);
100 	spin_unlock(&lock);
101 	return rc;
102 }
103 
104 static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
105 {
106 	int rc;
107 	struct cpu *cpu = smp_cpu_from_addr(addr);
108 
109 	if (!cpu)
110 		return -1;
111 	if (psw) {
112 		cpu->lowcore->restart_new_psw.mask = psw->mask;
113 		cpu->lowcore->restart_new_psw.addr = psw->addr;
114 	}
115 	/*
116 	 * Stop the cpu, so we don't have a race between a running cpu
117 	 * and the restart in the test that checks if the cpu is
118 	 * running after the restart.
119 	 */
120 	smp_cpu_stop_nolock(addr, false);
121 	rc = sigp(addr, SIGP_RESTART, 0, NULL);
122 	if (rc)
123 		return rc;
124 	/*
125 	 * The order has been accepted, but the actual restart may not
126 	 * have been performed yet, so wait until the cpu is running.
127 	 */
128 	while (smp_cpu_stopped(addr))
129 		mb();
130 	cpu->active = true;
131 	return 0;
132 }
133 
134 int smp_cpu_restart(uint16_t addr)
135 {
136 	int rc;
137 
138 	spin_lock(&lock);
139 	rc = smp_cpu_restart_nolock(addr, NULL);
140 	spin_unlock(&lock);
141 	return rc;
142 }
143 
144 int smp_cpu_start(uint16_t addr, struct psw psw)
145 {
146 	int rc;
147 
148 	spin_lock(&lock);
149 	rc = smp_cpu_restart_nolock(addr, &psw);
150 	spin_unlock(&lock);
151 	return rc;
152 }
153 
154 int smp_cpu_destroy(uint16_t addr)
155 {
156 	struct cpu *cpu;
157 	int rc;
158 
159 	spin_lock(&lock);
160 	rc = smp_cpu_stop_nolock(addr, false);
161 	if (!rc) {
162 		cpu = smp_cpu_from_addr(addr);
163 		free_pages(cpu->lowcore);
164 		free_pages(cpu->stack);
165 		cpu->lowcore = (void *)-1UL;
166 		cpu->stack = (void *)-1UL;
167 	}
168 	spin_unlock(&lock);
169 	return rc;
170 }
171 
172 int smp_cpu_setup(uint16_t addr, struct psw psw)
173 {
174 	struct lowcore *lc;
175 	struct cpu *cpu;
176 	int rc = -1;
177 
178 	spin_lock(&lock);
179 
180 	if (!cpus)
181 		goto out;
182 
183 	cpu = smp_cpu_from_addr(addr);
184 
185 	if (!cpu || cpu->active)
186 		goto out;
187 
188 	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
189 
190 	lc = alloc_pages_flags(1, AREA_DMA31);
191 	cpu->lowcore = lc;
192 	memset(lc, 0, PAGE_SIZE * 2);
193 	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
194 
195 	/* Copy all exception psws. */
196 	memcpy(lc, cpu0->lowcore, 512);
197 
198 	/* Setup stack */
199 	cpu->stack = (uint64_t *)alloc_pages(2);
200 
201 	/* Start without DAT and any other mask bits. */
202 	cpu->lowcore->sw_int_psw.mask = psw.mask;
203 	cpu->lowcore->sw_int_psw.addr = psw.addr;
204 	cpu->lowcore->sw_int_grs[14] = psw.addr;
205 	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
206 	lc->restart_new_psw.mask = PSW_MASK_64;
207 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
208 	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
209 
210 	/* Start processing */
211 	smp_cpu_restart_nolock(addr, NULL);
212 	/* Wait until the cpu has finished setup and started the provided psw */
213 	while (lc->restart_new_psw.addr != psw.addr)
214 		mb();
215 	rc = 0;
216 out:
217 	spin_unlock(&lock);
218 	return rc;
219 }
220 
221 /*
222  * Disregarding state, stop all cpus that once were online except for
223  * calling cpu.
224  */
225 void smp_teardown(void)
226 {
227 	int i = 0;
228 	uint16_t this_cpu = stap();
229 	int num = smp_query_num_cpus();
230 
231 	spin_lock(&lock);
232 	for (; i < num; i++) {
233 		if (cpus[i].active &&
234 		    cpus[i].addr != this_cpu) {
235 			sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
236 		}
237 	}
238 	spin_unlock(&lock);
239 }
240 
241 /*Expected to be called from boot cpu */
242 extern uint64_t *stackptr;
243 void smp_setup(void)
244 {
245 	int i = 0;
246 	int num = smp_query_num_cpus();
247 	unsigned short cpu0_addr = stap();
248 	struct CPUEntry *entry = sclp_get_cpu_entries();
249 
250 	spin_lock(&lock);
251 	if (num > 1)
252 		printf("SMP: Initializing, found %d cpus\n", num);
253 
254 	cpus = calloc(num, sizeof(cpus));
255 	for (i = 0; i < num; i++) {
256 		cpus[i].addr = entry[i].address;
257 		cpus[i].active = false;
258 		if (entry[i].address == cpu0_addr) {
259 			cpu0 = &cpus[i];
260 			cpu0->stack = stackptr;
261 			cpu0->lowcore = (void *)0;
262 			cpu0->active = true;
263 		}
264 	}
265 	spin_unlock(&lock);
266 }
267