xref: /kvm-unit-tests/lib/s390x/smp.c (revision fdab948bc134fb9989a8265380a55e809879418e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x smp
4  * Based on Linux's arch/s390/kernel/smp.c and
5  * arch/s390/include/asm/sigp.h
6  *
7  * Copyright (c) 2019 IBM Corp
8  *
9  * Authors:
10  *  Janosch Frank <frankja@linux.ibm.com>
11  */
12 #include <libcflat.h>
13 #include <asm/arch_def.h>
14 #include <asm/sigp.h>
15 #include <asm/page.h>
16 #include <asm/barrier.h>
17 #include <asm/spinlock.h>
18 #include <asm/asm-offsets.h>
19 
20 #include <alloc.h>
21 #include <alloc_page.h>
22 
23 #include "smp.h"
24 #include "sclp.h"
25 
26 static struct cpu *cpus;
27 static struct cpu *cpu0;
28 static struct spinlock lock;
29 
30 extern void smp_cpu_setup_state(void);
31 
32 int smp_query_num_cpus(void)
33 {
34 	return sclp_get_cpu_num();
35 }
36 
37 struct cpu *smp_cpu_from_addr(uint16_t addr)
38 {
39 	int i, num = smp_query_num_cpus();
40 
41 	for (i = 0; i < num; i++) {
42 		if (cpus[i].addr == addr)
43 			return &cpus[i];
44 	}
45 	return NULL;
46 }
47 
48 bool smp_cpu_stopped(uint16_t addr)
49 {
50 	uint32_t status;
51 
52 	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
53 		return false;
54 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
55 }
56 
57 bool smp_sense_running_status(uint16_t addr)
58 {
59 	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
60 		return true;
61 	/* Status stored condition code is equivalent to cpu not running. */
62 	return false;
63 }
64 
65 static int smp_cpu_stop_nolock(uint16_t addr, bool store)
66 {
67 	struct cpu *cpu;
68 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
69 
70 	cpu = smp_cpu_from_addr(addr);
71 	if (!cpu || cpu == cpu0)
72 		return -1;
73 
74 	if (sigp_retry(addr, order, 0, NULL))
75 		return -1;
76 
77 	while (!smp_cpu_stopped(addr))
78 		mb();
79 	cpu->active = false;
80 	return 0;
81 }
82 
83 int smp_cpu_stop(uint16_t addr)
84 {
85 	int rc;
86 
87 	spin_lock(&lock);
88 	rc = smp_cpu_stop_nolock(addr, false);
89 	spin_unlock(&lock);
90 	return rc;
91 }
92 
93 int smp_cpu_stop_store_status(uint16_t addr)
94 {
95 	int rc;
96 
97 	spin_lock(&lock);
98 	rc = smp_cpu_stop_nolock(addr, true);
99 	spin_unlock(&lock);
100 	return rc;
101 }
102 
103 static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
104 {
105 	int rc;
106 	struct cpu *cpu = smp_cpu_from_addr(addr);
107 
108 	if (!cpu)
109 		return -1;
110 	if (psw) {
111 		cpu->lowcore->restart_new_psw.mask = psw->mask;
112 		cpu->lowcore->restart_new_psw.addr = psw->addr;
113 	}
114 	/*
115 	 * Stop the cpu, so we don't have a race between a running cpu
116 	 * and the restart in the test that checks if the cpu is
117 	 * running after the restart.
118 	 */
119 	smp_cpu_stop_nolock(addr, false);
120 	rc = sigp(addr, SIGP_RESTART, 0, NULL);
121 	if (rc)
122 		return rc;
123 	/*
124 	 * The order has been accepted, but the actual restart may not
125 	 * have been performed yet, so wait until the cpu is running.
126 	 */
127 	while (smp_cpu_stopped(addr))
128 		mb();
129 	cpu->active = true;
130 	return 0;
131 }
132 
133 int smp_cpu_restart(uint16_t addr)
134 {
135 	int rc;
136 
137 	spin_lock(&lock);
138 	rc = smp_cpu_restart_nolock(addr, NULL);
139 	spin_unlock(&lock);
140 	return rc;
141 }
142 
143 int smp_cpu_start(uint16_t addr, struct psw psw)
144 {
145 	int rc;
146 
147 	spin_lock(&lock);
148 	rc = smp_cpu_restart_nolock(addr, &psw);
149 	spin_unlock(&lock);
150 	return rc;
151 }
152 
153 int smp_cpu_destroy(uint16_t addr)
154 {
155 	struct cpu *cpu;
156 	int rc;
157 
158 	spin_lock(&lock);
159 	rc = smp_cpu_stop_nolock(addr, false);
160 	if (!rc) {
161 		cpu = smp_cpu_from_addr(addr);
162 		free_pages(cpu->lowcore);
163 		free_pages(cpu->stack);
164 		cpu->lowcore = (void *)-1UL;
165 		cpu->stack = (void *)-1UL;
166 	}
167 	spin_unlock(&lock);
168 	return rc;
169 }
170 
171 int smp_cpu_setup(uint16_t addr, struct psw psw)
172 {
173 	struct lowcore *lc;
174 	struct cpu *cpu;
175 	int rc = -1;
176 
177 	spin_lock(&lock);
178 
179 	if (!cpus)
180 		goto out;
181 
182 	cpu = smp_cpu_from_addr(addr);
183 
184 	if (!cpu || cpu->active)
185 		goto out;
186 
187 	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
188 
189 	lc = alloc_pages_flags(1, AREA_DMA31);
190 	cpu->lowcore = lc;
191 	memset(lc, 0, PAGE_SIZE * 2);
192 	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
193 
194 	/* Copy all exception psws. */
195 	memcpy(lc, cpu0->lowcore, 512);
196 
197 	/* Setup stack */
198 	cpu->stack = (uint64_t *)alloc_pages(2);
199 
200 	/* Start without DAT and any other mask bits. */
201 	cpu->lowcore->sw_int_psw.mask = psw.mask;
202 	cpu->lowcore->sw_int_psw.addr = psw.addr;
203 	cpu->lowcore->sw_int_grs[14] = psw.addr;
204 	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
205 	lc->restart_new_psw.mask = 0x0000000180000000UL;
206 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
207 	lc->sw_int_crs[0] = 0x0000000000040000UL;
208 
209 	/* Start processing */
210 	smp_cpu_restart_nolock(addr, NULL);
211 	/* Wait until the cpu has finished setup and started the provided psw */
212 	while (lc->restart_new_psw.addr != psw.addr)
213 		mb();
214 out:
215 	spin_unlock(&lock);
216 	return rc;
217 }
218 
219 /*
220  * Disregarding state, stop all cpus that once were online except for
221  * calling cpu.
222  */
223 void smp_teardown(void)
224 {
225 	int i = 0;
226 	uint16_t this_cpu = stap();
227 	int num = smp_query_num_cpus();
228 
229 	spin_lock(&lock);
230 	for (; i < num; i++) {
231 		if (cpus[i].active &&
232 		    cpus[i].addr != this_cpu) {
233 			sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
234 		}
235 	}
236 	spin_unlock(&lock);
237 }
238 
239 /*Expected to be called from boot cpu */
240 extern uint64_t *stackptr;
241 void smp_setup(void)
242 {
243 	int i = 0;
244 	int num = smp_query_num_cpus();
245 	unsigned short cpu0_addr = stap();
246 	struct CPUEntry *entry = sclp_get_cpu_entries();
247 
248 	spin_lock(&lock);
249 	if (num > 1)
250 		printf("SMP: Initializing, found %d cpus\n", num);
251 
252 	cpus = calloc(num, sizeof(cpus));
253 	for (i = 0; i < num; i++) {
254 		cpus[i].addr = entry[i].address;
255 		cpus[i].active = false;
256 		if (entry[i].address == cpu0_addr) {
257 			cpu0 = &cpus[i];
258 			cpu0->stack = stackptr;
259 			cpu0->lowcore = (void *)0;
260 			cpu0->active = true;
261 		}
262 	}
263 	spin_unlock(&lock);
264 }
265