xref: /kvm-unit-tests/s390x/smp.c (revision 4c8a99ca02252d4a2bee43f4558fe47ce5ab7ec0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Tests sigp emulation
4  *
5  * Copyright 2019 IBM Corp.
6  *
7  * Authors:
8  *    Janosch Frank <frankja@linux.ibm.com>
9  */
10 #include <libcflat.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/interrupt.h>
13 #include <asm/page.h>
14 #include <asm/facility.h>
15 #include <asm-generic/barrier.h>
16 #include <asm/sigp.h>
17 
18 #include <smp.h>
19 #include <uv.h>
20 #include <alloc_page.h>
21 
22 static int testflag = 0;
23 #define INVALID_CPU_ADDRESS -4711
24 #define INVALID_ORDER_CODE 0xFF
25 struct sigp_invalid_cases {
26 	int order;
27 	char message[100];
28 };
29 static const struct sigp_invalid_cases cases_invalid_cpu_addr[] = {
30 	{ SIGP_STOP,                  "stop with invalid CPU address" },
31 	{ SIGP_START,                 "start with invalid CPU address" },
32 	{ SIGP_CPU_RESET,             "reset with invalid CPU address" },
33 	{ SIGP_COND_EMERGENCY_SIGNAL, "conditional emcall with invalid CPU address" },
34 	{ SIGP_EMERGENCY_SIGNAL,      "emcall with invalid CPU address" },
35 	{ SIGP_EXTERNAL_CALL,         "ecall with invalid CPU address" },
36 	{ INVALID_ORDER_CODE,         "invalid order code and CPU address" },
37 	{ SIGP_SENSE,                 "sense with invalid CPU address" },
38 	{ SIGP_STOP_AND_STORE_STATUS, "stop and store status with invalid CPU address" },
39 };
40 static const struct sigp_invalid_cases cases_valid_cpu_addr[] = {
41 	{ INVALID_ORDER_CODE,         "invalid order code" },
42 };
43 
44 static uint32_t cpu1_prefix;
45 
46 struct sigp_call_cases {
47 	char name[20];
48 	int call;
49 	uint16_t ext_int_expected_type;
50 	unsigned int cr0_bit;
51 	bool supports_pv;
52 };
53 static const struct sigp_call_cases cases_sigp_call[] = {
54 	{ "emcall",      SIGP_EMERGENCY_SIGNAL,      0x1201, CTL0_EMERGENCY_SIGNAL, true },
55 	{ "cond emcall", SIGP_COND_EMERGENCY_SIGNAL, 0x1201, CTL0_EMERGENCY_SIGNAL, false },
56 	{ "ecall",       SIGP_EXTERNAL_CALL,         0x1202, CTL0_EXTERNAL_CALL,    true },
57 };
58 static const struct sigp_call_cases *current_sigp_call_case;
59 
60 static void test_invalid(void)
61 {
62 	const struct sigp_invalid_cases *c;
63 	uint32_t status;
64 	int cc;
65 	int i;
66 
67 	report_prefix_push("invalid parameters");
68 
69 	for (i = 0; i < ARRAY_SIZE(cases_invalid_cpu_addr); i++) {
70 		c = &cases_invalid_cpu_addr[i];
71 		cc = sigp(INVALID_CPU_ADDRESS, c->order, 0, &status);
72 		report(cc == 3, "%s", c->message);
73 	}
74 
75 	for (i = 0; i < ARRAY_SIZE(cases_valid_cpu_addr); i++) {
76 		c = &cases_valid_cpu_addr[i];
77 		cc = smp_sigp(1, c->order, 0, &status);
78 		report(cc == 1, "%s", c->message);
79 	}
80 
81 	report_prefix_pop();
82 }
83 
84 static void wait_for_flag(void)
85 {
86 	while (!testflag)
87 		mb();
88 }
89 
90 static void set_flag(int val)
91 {
92 	mb();
93 	testflag = val;
94 	mb();
95 }
96 
97 static void test_func(void)
98 {
99 	set_flag(1);
100 }
101 
102 static void test_start(void)
103 {
104 	struct psw psw;
105 	psw.mask = extract_psw_mask();
106 	psw.addr = (unsigned long)test_func;
107 
108 	set_flag(0);
109 	smp_cpu_start(1, psw);
110 	wait_for_flag();
111 	report_pass("start");
112 }
113 
114 static void test_restart(void)
115 {
116 	struct cpu *cpu = smp_cpu_from_idx(1);
117 	struct lowcore *lc = cpu->lowcore;
118 	int rc;
119 
120 	report_prefix_push("restart");
121 	report_prefix_push("stopped");
122 
123 	lc->restart_new_psw.mask = extract_psw_mask();
124 	lc->restart_new_psw.addr = (unsigned long)test_func;
125 
126 	/* Make sure cpu is stopped */
127 	smp_cpu_stop(1);
128 	set_flag(0);
129 	rc = smp_cpu_restart_nowait(1);
130 	report(!rc, "return code");
131 	report(!smp_cpu_stopped(1), "cpu started");
132 	wait_for_flag();
133 	report_pass("test flag");
134 
135 	report_prefix_pop();
136 	report_prefix_push("running");
137 
138 	/*
139 	 * Wait until cpu 1 has set the flag because it executed the
140 	 * restart function.
141 	 */
142 	set_flag(0);
143 	rc = smp_cpu_restart_nowait(1);
144 	report(!rc, "return code");
145 	report(!smp_cpu_stopped(1), "cpu started");
146 	wait_for_flag();
147 	report_pass("test flag");
148 
149 	report_prefix_pop();
150 	report_prefix_pop();
151 }
152 
153 static void test_stop(void)
154 {
155 	int rc;
156 
157 	report_prefix_push("stop");
158 
159 	rc = smp_cpu_stop_nowait(1);
160 	report(!rc, "return code");
161 	report(smp_cpu_stopped(1), "cpu stopped");
162 
163 	report_prefix_push("stop stopped CPU");
164 	rc = smp_cpu_stop_nowait(1);
165 	report(!rc, "return code");
166 	report(smp_cpu_stopped(1), "cpu stopped");
167 	report_prefix_pop();
168 
169 	report_prefix_pop();
170 }
171 
172 static void test_stop_store_status(void)
173 {
174 	struct cpu *cpu = smp_cpu_from_idx(1);
175 
176 	report_prefix_push("stop store status");
177 	report_prefix_push("running");
178 	smp_cpu_restart(1);
179 	lowcore.prefix_sa = 0;
180 	lowcore.grs_sa[15] = 0;
181 	smp_cpu_stop_store_status(1);
182 	mb();
183 	report(smp_cpu_stopped(1), "cpu stopped");
184 	report(lowcore.prefix_sa == (uint32_t)(uintptr_t)cpu->lowcore, "prefix");
185 	report(lowcore.grs_sa[15], "stack");
186 	report_prefix_pop();
187 
188 	report_prefix_push("stopped");
189 	lowcore.prefix_sa = 0;
190 	lowcore.grs_sa[15] = 0;
191 	smp_cpu_stop_store_status(1);
192 	mb();
193 	report(smp_cpu_stopped(1), "cpu stopped");
194 	report(lowcore.prefix_sa == (uint32_t)(uintptr_t)cpu->lowcore, "prefix");
195 	report(lowcore.grs_sa[15], "stack");
196 	report_prefix_pop();
197 
198 	report_prefix_pop();
199 }
200 
201 static void test_store_status(void)
202 {
203 	struct cpu_status *status = alloc_pages_flags(1, AREA_DMA31);
204 	uint32_t r;
205 	int cc;
206 
207 	report_prefix_push("store status at address");
208 	memset(status, 0, PAGE_SIZE * 2);
209 
210 	report_prefix_push("invalid CPU address");
211 	cc = sigp(INVALID_CPU_ADDRESS, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
212 	report(cc == 3, "returned with CC = 3");
213 	report_prefix_pop();
214 
215 	report_prefix_push("running");
216 	smp_cpu_restart(1);
217 	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
218 	report(r == SIGP_STATUS_INCORRECT_STATE, "incorrect state");
219 	report(!memcmp(status, (void *)status + PAGE_SIZE, PAGE_SIZE),
220 	       "status not written");
221 	report_prefix_pop();
222 
223 	memset(status, 0, PAGE_SIZE);
224 	report_prefix_push("stopped");
225 	smp_cpu_stop(1);
226 	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
227 	while (!status->prefix) { mb(); }
228 	report_pass("status written");
229 	free_pages(status);
230 	report_prefix_pop();
231 	smp_cpu_stop(1);
232 
233 	report_prefix_pop();
234 }
235 
236 static void loop(void)
237 {
238 	while (1)
239 		;
240 }
241 
242 static void stpx_and_set_flag(void)
243 {
244 	asm volatile (
245 		"	stpx %[prefix]\n"
246 		: [prefix] "=Q" (cpu1_prefix)
247 		:
248 		:
249 	);
250 
251 	set_flag(1);
252 }
253 
254 static void test_set_prefix(void)
255 {
256 	struct lowcore *new_lc = alloc_pages_flags(1, AREA_DMA31);
257 	struct cpu *cpu1 = smp_cpu_from_idx(1);
258 	uint32_t status = 0;
259 	struct psw new_psw;
260 	int cc;
261 
262 	report_prefix_push("set prefix");
263 
264 	assert(new_lc);
265 
266 	memcpy(new_lc, cpu1->lowcore, sizeof(struct lowcore));
267 	new_lc->restart_new_psw.addr = (unsigned long)loop;
268 
269 	report_prefix_push("running");
270 	set_flag(0);
271 	new_psw.addr = (unsigned long)stpx_and_set_flag;
272 	new_psw.mask = extract_psw_mask();
273 	smp_cpu_start(1, new_psw);
274 	wait_for_flag();
275 	cpu1_prefix = 0xFFFFFFFF;
276 
277 	cc = smp_sigp(1, SIGP_SET_PREFIX, (unsigned long)new_lc, &status);
278 	report(cc == 1, "CC = 1");
279 	report(status == SIGP_STATUS_INCORRECT_STATE, "status = INCORRECT_STATE");
280 
281 	/*
282 	 * If the prefix of the other CPU was changed it will enter an endless
283 	 * loop. Otherwise, it should eventually set the flag.
284 	 */
285 	smp_cpu_stop(1);
286 	set_flag(0);
287 	smp_cpu_restart(1);
288 	wait_for_flag();
289 	report(cpu1_prefix == (uint64_t)cpu1->lowcore, "prefix unchanged");
290 
291 	report_prefix_pop();
292 
293 	report_prefix_push("invalid CPU address");
294 
295 	cc = sigp(INVALID_CPU_ADDRESS, SIGP_SET_PREFIX, (unsigned long)new_lc, &status);
296 	report(cc == 3, "CC = 3");
297 
298 	report_prefix_pop();
299 
300 	free_pages(new_lc);
301 
302 	report_prefix_pop();
303 
304 }
305 
306 static void call_received(void)
307 {
308 	expect_ext_int();
309 	ctl_set_bit(0, current_sigp_call_case->cr0_bit);
310 	/* make sure conditional emergency is accepted by disabling IO interrupts */
311 	psw_mask_clear_and_set_bits(PSW_MASK_IO, PSW_MASK_EXT);
312 
313 	/* Indicate that we're ready to receive the call */
314 	set_flag(1);
315 
316 	while (lowcore.ext_int_code != current_sigp_call_case->ext_int_expected_type)
317 		mb();
318 	report_pass("received");
319 
320 	ctl_clear_bit(0, current_sigp_call_case->cr0_bit);
321 
322 	/* Indicate that we're done */
323 	set_flag(1);
324 }
325 
326 static void test_calls(void)
327 {
328 	int i;
329 	struct psw psw;
330 
331 	for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
332 		current_sigp_call_case = &cases_sigp_call[i];
333 
334 		report_prefix_push(current_sigp_call_case->name);
335 		if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
336 			report_skip("Not supported under PV");
337 			report_prefix_pop();
338 			continue;
339 		}
340 
341 		set_flag(0);
342 		psw.mask = extract_psw_mask();
343 		psw.addr = (unsigned long)call_received;
344 		smp_cpu_start(1, psw);
345 
346 		/* Wait until the receiver has finished setup */
347 		wait_for_flag();
348 		set_flag(0);
349 
350 		smp_sigp(1, current_sigp_call_case->call, 0, NULL);
351 
352 		/* Wait until the receiver has handled the call */
353 		wait_for_flag();
354 		smp_cpu_stop(1);
355 		report_prefix_pop();
356 	}
357 }
358 
359 static void call_in_wait_ext_int_fixup(struct stack_frame_int *stack)
360 {
361 	/* Clear wait bit so we don't immediately wait again after the fixup */
362 	lowcore.ext_old_psw.mask &= ~PSW_MASK_WAIT;
363 }
364 
365 static void call_in_wait_setup(void)
366 {
367 	expect_ext_int();
368 	ctl_set_bit(0, current_sigp_call_case->cr0_bit);
369 	register_ext_cleanup_func(call_in_wait_ext_int_fixup);
370 
371 	set_flag(1);
372 }
373 
374 static void call_in_wait_received(void)
375 {
376 	report(lowcore.ext_int_code == current_sigp_call_case->ext_int_expected_type, "received");
377 
378 	set_flag(1);
379 }
380 
381 static void call_in_wait_cleanup(void)
382 {
383 	ctl_clear_bit(0, current_sigp_call_case->cr0_bit);
384 	register_ext_cleanup_func(NULL);
385 
386 	set_flag(1);
387 }
388 
389 static void test_calls_in_wait(void)
390 {
391 	int i;
392 	struct psw psw;
393 
394 	report_prefix_push("psw wait");
395 	for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
396 		current_sigp_call_case = &cases_sigp_call[i];
397 
398 		report_prefix_push(current_sigp_call_case->name);
399 		if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
400 			report_skip("Not supported under PV");
401 			report_prefix_pop();
402 			continue;
403 		}
404 
405 		/* Let the secondary CPU setup the external mask and the external interrupt cleanup function */
406 		set_flag(0);
407 		psw.mask = extract_psw_mask();
408 		psw.addr = (unsigned long)call_in_wait_setup;
409 		smp_cpu_start(1, psw);
410 
411 		/* Wait until the receiver has finished setup */
412 		wait_for_flag();
413 		set_flag(0);
414 
415 		/*
416 		 * To avoid races, we need to know that the secondary CPU has entered wait,
417 		 * but the architecture provides no way to check whether the secondary CPU
418 		 * is in wait.
419 		 *
420 		 * But since a waiting CPU is considered operating, simply stop the CPU, set
421 		 * up the restart new PSW mask in wait, send the restart interrupt and then
422 		 * wait until the CPU becomes operating (done by smp_cpu_start).
423 		 */
424 		smp_cpu_stop(1);
425 		psw.mask = extract_psw_mask() | PSW_MASK_EXT | PSW_MASK_WAIT;
426 		psw.addr = (unsigned long)call_in_wait_received;
427 		smp_cpu_start(1, psw);
428 
429 		smp_sigp(1, current_sigp_call_case->call, 0, NULL);
430 
431 		/* Wait until the receiver has handled the call */
432 		wait_for_flag();
433 		smp_cpu_stop(1);
434 		set_flag(0);
435 
436 		/*
437 		 * Now clean up the mess we have left behind. If the cleanup
438 		 * were part of call_in_wait_received we would not get a chance
439 		 * to catch an interrupt that is presented twice since we would
440 		 * disable the external call on the first interrupt.
441 		 */
442 		psw.mask = extract_psw_mask();
443 		psw.addr = (unsigned long)call_in_wait_cleanup;
444 		smp_cpu_start(1, psw);
445 
446 		/* Wait until the cleanup has been completed */
447 		wait_for_flag();
448 		smp_cpu_stop(1);
449 
450 		report_prefix_pop();
451 	}
452 	report_prefix_pop();
453 }
454 
455 static void test_sense_running(void)
456 {
457 	report_prefix_push("sense_running");
458 	/* we (CPU0) are running */
459 	report(smp_sense_running_status(0), "CPU0 sense claims running");
460 	/* stop the target CPU (CPU1) to speed up the not running case */
461 	smp_cpu_stop(1);
462 	/* Make sure to have at least one time with a not running indication */
463 	while(smp_sense_running_status(1));
464 	report_pass("CPU1 sense claims not running");
465 	report_prefix_pop();
466 }
467 
468 /* Used to dirty registers of cpu #1 before it is reset */
469 static void test_func_initial(void)
470 {
471 	asm volatile("sfpc %0" :: "d" (0x11));
472 	lctlg(1, 0x42000UL);
473 	lctlg(7, 0x43000UL);
474 	lctlg(13, 0x44000UL);
475 	set_flag(1);
476 }
477 
478 static void test_reset_initial(void)
479 {
480 	struct cpu_status *status = alloc_pages_flags(0, AREA_DMA31);
481 	struct psw psw;
482 	int i;
483 
484 	psw.mask = extract_psw_mask();
485 	psw.addr = (unsigned long)test_func_initial;
486 
487 	report_prefix_push("reset initial");
488 	set_flag(0);
489 	smp_cpu_start(1, psw);
490 	wait_for_flag();
491 
492 	smp_sigp(1, SIGP_INITIAL_CPU_RESET, 0, NULL);
493 	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
494 
495 	report_prefix_push("clear");
496 	report(!status->psw.mask && !status->psw.addr, "psw");
497 	report(!status->prefix, "prefix");
498 	report(!status->fpc, "fpc");
499 	report(!status->cputm, "cpu timer");
500 	report(!status->todpr, "todpr");
501 	for (i = 1; i <= 13; i++) {
502 		report(status->crs[i] == 0, "cr%d == 0", i);
503 	}
504 	report(status->crs[15] == 0, "cr15 == 0");
505 	report_prefix_pop();
506 
507 	report_prefix_push("initialized");
508 	report(status->crs[0] == 0xE0UL, "cr0 == 0xE0");
509 	report(status->crs[14] == 0xC2000000UL, "cr14 == 0xC2000000");
510 	report_prefix_pop();
511 
512 	report(smp_cpu_stopped(1), "cpu stopped");
513 	free_pages(status);
514 	report_prefix_pop();
515 }
516 
517 static void test_local_ints(void)
518 {
519 	/* Open masks for ecall and emcall */
520 	ctl_set_bit(0, CTL0_EXTERNAL_CALL);
521 	ctl_set_bit(0, CTL0_EMERGENCY_SIGNAL);
522 	psw_mask_set_bits(PSW_MASK_EXT);
523 	set_flag(1);
524 }
525 
526 static void test_reset(void)
527 {
528 	struct psw psw;
529 
530 	psw.mask = extract_psw_mask();
531 	psw.addr = (unsigned long)test_func;
532 
533 	report_prefix_push("cpu reset");
534 	smp_sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
535 	smp_sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
536 	smp_cpu_start(1, psw);
537 
538 	smp_sigp(1, SIGP_CPU_RESET, 0, NULL);
539 	report(smp_cpu_stopped(1), "cpu stopped");
540 
541 	set_flag(0);
542 	psw.addr = (unsigned long)test_local_ints;
543 	smp_cpu_start(1, psw);
544 	wait_for_flag();
545 	report_pass("local interrupts cleared");
546 	report_prefix_pop();
547 }
548 
549 int main(void)
550 {
551 	struct psw psw;
552 	report_prefix_push("smp");
553 
554 	if (smp_query_num_cpus() == 1) {
555 		report_skip("need at least 2 cpus for this test");
556 		goto done;
557 	}
558 
559 	/* Setting up the cpu to give it a stack and lowcore */
560 	psw.mask = extract_psw_mask();
561 	psw.addr = (unsigned long)test_func;
562 	smp_cpu_setup(1, psw);
563 	smp_cpu_stop(1);
564 
565 	test_start();
566 	test_invalid();
567 	test_restart();
568 	test_stop();
569 	test_stop_store_status();
570 	test_store_status();
571 	test_set_prefix();
572 	test_calls();
573 	test_calls_in_wait();
574 	test_sense_running();
575 	test_reset();
576 	test_reset_initial();
577 	smp_cpu_destroy(1);
578 
579 done:
580 	report_prefix_pop();
581 	return report_summary();
582 }
583