xref: /kvm-unit-tests/lib/s390x/smp.c (revision 6c9f99df2fa51f58bd6a8b6775810b7f249bd0d7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * s390x smp
4  * Based on Linux's arch/s390/kernel/smp.c and
5  * arch/s390/include/asm/sigp.h
6  *
7  * Copyright (c) 2019 IBM Corp
8  *
9  * Authors:
10  *  Janosch Frank <frankja@linux.ibm.com>
11  */
12 #include <libcflat.h>
13 #include <asm/arch_def.h>
14 #include <asm/sigp.h>
15 #include <asm/page.h>
16 #include <asm/barrier.h>
17 #include <asm/spinlock.h>
18 #include <asm/asm-offsets.h>
19 
20 #include <alloc.h>
21 #include <alloc_page.h>
22 
23 #include "smp.h"
24 #include "sclp.h"
25 
26 static char cpu_info_buffer[PAGE_SIZE] __attribute__((__aligned__(4096)));
27 static struct cpu *cpus;
28 static struct cpu *cpu0;
29 static struct spinlock lock;
30 
31 extern void smp_cpu_setup_state(void);
32 
33 int smp_query_num_cpus(void)
34 {
35 	struct ReadCpuInfo *info = (void *)cpu_info_buffer;
36 	return info->nr_configured;
37 }
38 
39 struct cpu *smp_cpu_from_addr(uint16_t addr)
40 {
41 	int i, num = smp_query_num_cpus();
42 
43 	for (i = 0; i < num; i++) {
44 		if (cpus[i].addr == addr)
45 			return &cpus[i];
46 	}
47 	return NULL;
48 }
49 
50 bool smp_cpu_stopped(uint16_t addr)
51 {
52 	uint32_t status;
53 
54 	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
55 		return false;
56 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
57 }
58 
59 bool smp_sense_running_status(uint16_t addr)
60 {
61 	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
62 		return true;
63 	/* Status stored condition code is equivalent to cpu not running. */
64 	return false;
65 }
66 
67 static int smp_cpu_stop_nolock(uint16_t addr, bool store)
68 {
69 	struct cpu *cpu;
70 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
71 
72 	cpu = smp_cpu_from_addr(addr);
73 	if (!cpu || cpu == cpu0)
74 		return -1;
75 
76 	if (sigp_retry(addr, order, 0, NULL))
77 		return -1;
78 
79 	while (!smp_cpu_stopped(addr))
80 		mb();
81 	cpu->active = false;
82 	return 0;
83 }
84 
85 int smp_cpu_stop(uint16_t addr)
86 {
87 	int rc;
88 
89 	spin_lock(&lock);
90 	rc = smp_cpu_stop_nolock(addr, false);
91 	spin_unlock(&lock);
92 	return rc;
93 }
94 
95 int smp_cpu_stop_store_status(uint16_t addr)
96 {
97 	int rc;
98 
99 	spin_lock(&lock);
100 	rc = smp_cpu_stop_nolock(addr, true);
101 	spin_unlock(&lock);
102 	return rc;
103 }
104 
105 static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
106 {
107 	int rc;
108 	struct cpu *cpu = smp_cpu_from_addr(addr);
109 
110 	if (!cpu)
111 		return -1;
112 	if (psw) {
113 		cpu->lowcore->restart_new_psw.mask = psw->mask;
114 		cpu->lowcore->restart_new_psw.addr = psw->addr;
115 	}
116 	/*
117 	 * Stop the cpu, so we don't have a race between a running cpu
118 	 * and the restart in the test that checks if the cpu is
119 	 * running after the restart.
120 	 */
121 	smp_cpu_stop_nolock(addr, false);
122 	rc = sigp(addr, SIGP_RESTART, 0, NULL);
123 	if (rc)
124 		return rc;
125 	/*
126 	 * The order has been accepted, but the actual restart may not
127 	 * have been performed yet, so wait until the cpu is running.
128 	 */
129 	while (smp_cpu_stopped(addr))
130 		mb();
131 	cpu->active = true;
132 	return 0;
133 }
134 
135 int smp_cpu_restart(uint16_t addr)
136 {
137 	int rc;
138 
139 	spin_lock(&lock);
140 	rc = smp_cpu_restart_nolock(addr, NULL);
141 	spin_unlock(&lock);
142 	return rc;
143 }
144 
145 int smp_cpu_start(uint16_t addr, struct psw psw)
146 {
147 	int rc;
148 
149 	spin_lock(&lock);
150 	rc = smp_cpu_restart_nolock(addr, &psw);
151 	spin_unlock(&lock);
152 	return rc;
153 }
154 
155 int smp_cpu_destroy(uint16_t addr)
156 {
157 	struct cpu *cpu;
158 	int rc;
159 
160 	spin_lock(&lock);
161 	rc = smp_cpu_stop_nolock(addr, false);
162 	if (!rc) {
163 		cpu = smp_cpu_from_addr(addr);
164 		free_pages(cpu->lowcore);
165 		free_pages(cpu->stack);
166 		cpu->lowcore = (void *)-1UL;
167 		cpu->stack = (void *)-1UL;
168 	}
169 	spin_unlock(&lock);
170 	return rc;
171 }
172 
173 int smp_cpu_setup(uint16_t addr, struct psw psw)
174 {
175 	struct lowcore *lc;
176 	struct cpu *cpu;
177 	int rc = -1;
178 
179 	spin_lock(&lock);
180 
181 	if (!cpus)
182 		goto out;
183 
184 	cpu = smp_cpu_from_addr(addr);
185 
186 	if (!cpu || cpu->active)
187 		goto out;
188 
189 	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
190 
191 	lc = alloc_pages_flags(1, AREA_DMA31);
192 	cpu->lowcore = lc;
193 	memset(lc, 0, PAGE_SIZE * 2);
194 	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
195 
196 	/* Copy all exception psws. */
197 	memcpy(lc, cpu0->lowcore, 512);
198 
199 	/* Setup stack */
200 	cpu->stack = (uint64_t *)alloc_pages(2);
201 
202 	/* Start without DAT and any other mask bits. */
203 	cpu->lowcore->sw_int_psw.mask = psw.mask;
204 	cpu->lowcore->sw_int_psw.addr = psw.addr;
205 	cpu->lowcore->sw_int_grs[14] = psw.addr;
206 	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
207 	lc->restart_new_psw.mask = 0x0000000180000000UL;
208 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
209 	lc->sw_int_crs[0] = 0x0000000000040000UL;
210 
211 	/* Start processing */
212 	smp_cpu_restart_nolock(addr, NULL);
213 	/* Wait until the cpu has finished setup and started the provided psw */
214 	while (lc->restart_new_psw.addr != psw.addr)
215 		mb();
216 out:
217 	spin_unlock(&lock);
218 	return rc;
219 }
220 
221 /*
222  * Disregarding state, stop all cpus that once were online except for
223  * calling cpu.
224  */
225 void smp_teardown(void)
226 {
227 	int i = 0;
228 	uint16_t this_cpu = stap();
229 	struct ReadCpuInfo *info = (void *)cpu_info_buffer;
230 
231 	spin_lock(&lock);
232 	for (; i < info->nr_configured; i++) {
233 		if (cpus[i].active &&
234 		    cpus[i].addr != this_cpu) {
235 			sigp_retry(cpus[i].addr, SIGP_STOP, 0, NULL);
236 		}
237 	}
238 	spin_unlock(&lock);
239 }
240 
241 /*Expected to be called from boot cpu */
242 extern uint64_t *stackptr;
243 void smp_setup(void)
244 {
245 	int i = 0;
246 	unsigned short cpu0_addr = stap();
247 	struct ReadCpuInfo *info = (void *)cpu_info_buffer;
248 
249 	spin_lock(&lock);
250 	sclp_mark_busy();
251 	info->h.length = PAGE_SIZE;
252 	sclp_service_call(SCLP_READ_CPU_INFO, cpu_info_buffer);
253 
254 	if (smp_query_num_cpus() > 1)
255 		printf("SMP: Initializing, found %d cpus\n", info->nr_configured);
256 
257 	cpus = calloc(info->nr_configured, sizeof(cpus));
258 	for (i = 0; i < info->nr_configured; i++) {
259 		cpus[i].addr = info->entries[i].address;
260 		cpus[i].active = false;
261 		if (info->entries[i].address == cpu0_addr) {
262 			cpu0 = &cpus[i];
263 			cpu0->stack = stackptr;
264 			cpu0->lowcore = (void *)0;
265 			cpu0->active = true;
266 		}
267 	}
268 	spin_unlock(&lock);
269 }
270