1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Tests sigp emulation
4 *
5 * Copyright 2019 IBM Corp.
6 *
7 * Authors:
8 * Janosch Frank <frankja@linux.ibm.com>
9 */
10 #include <libcflat.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/interrupt.h>
13 #include <asm/page.h>
14 #include <asm/facility.h>
15 #include <asm-generic/barrier.h>
16 #include <asm/sigp.h>
17
18 #include <smp.h>
19 #include <uv.h>
20 #include <alloc_page.h>
21
22 static int testflag = 0;
23 #define INVALID_CPU_ADDRESS -4711
24 #define INVALID_ORDER_CODE 0xFF
25 struct sigp_invalid_cases {
26 int order;
27 char message[100];
28 };
29 static const struct sigp_invalid_cases cases_invalid_cpu_addr[] = {
30 { SIGP_STOP, "stop with invalid CPU address" },
31 { SIGP_START, "start with invalid CPU address" },
32 { SIGP_CPU_RESET, "reset with invalid CPU address" },
33 { SIGP_COND_EMERGENCY_SIGNAL, "conditional emcall with invalid CPU address" },
34 { SIGP_EMERGENCY_SIGNAL, "emcall with invalid CPU address" },
35 { SIGP_EXTERNAL_CALL, "ecall with invalid CPU address" },
36 { INVALID_ORDER_CODE, "invalid order code and CPU address" },
37 { SIGP_SENSE, "sense with invalid CPU address" },
38 { SIGP_STOP_AND_STORE_STATUS, "stop and store status with invalid CPU address" },
39 };
40 static const struct sigp_invalid_cases cases_valid_cpu_addr[] = {
41 { INVALID_ORDER_CODE, "invalid order code" },
42 };
43
44 static uint32_t cpu1_prefix;
45
46 struct sigp_call_cases {
47 char name[20];
48 int call;
49 uint16_t ext_int_expected_type;
50 unsigned int cr0_bit;
51 bool supports_pv;
52 };
53 static const struct sigp_call_cases cases_sigp_call[] = {
54 { "emcall", SIGP_EMERGENCY_SIGNAL, 0x1201, CTL0_EMERGENCY_SIGNAL, true },
55 { "cond emcall", SIGP_COND_EMERGENCY_SIGNAL, 0x1201, CTL0_EMERGENCY_SIGNAL, false },
56 { "ecall", SIGP_EXTERNAL_CALL, 0x1202, CTL0_EXTERNAL_CALL, true },
57 };
58 static const struct sigp_call_cases *current_sigp_call_case;
59
test_invalid(void)60 static void test_invalid(void)
61 {
62 const struct sigp_invalid_cases *c;
63 uint32_t status;
64 int cc;
65 int i;
66
67 report_prefix_push("invalid parameters");
68
69 for (i = 0; i < ARRAY_SIZE(cases_invalid_cpu_addr); i++) {
70 c = &cases_invalid_cpu_addr[i];
71 cc = sigp(INVALID_CPU_ADDRESS, c->order, 0, &status);
72 report(cc == 3, "%s", c->message);
73 }
74
75 for (i = 0; i < ARRAY_SIZE(cases_valid_cpu_addr); i++) {
76 c = &cases_valid_cpu_addr[i];
77 cc = smp_sigp(1, c->order, 0, &status);
78 report(cc == 1, "%s", c->message);
79 }
80
81 report_prefix_pop();
82 }
83
wait_for_flag(void)84 static void wait_for_flag(void)
85 {
86 while (!testflag)
87 mb();
88 }
89
set_flag(int val)90 static void set_flag(int val)
91 {
92 mb();
93 testflag = val;
94 mb();
95 }
96
test_func(void)97 static void test_func(void)
98 {
99 set_flag(1);
100 }
101
test_start(void)102 static void test_start(void)
103 {
104 set_flag(0);
105 smp_cpu_start(1, PSW_WITH_CUR_MASK(test_func));
106 wait_for_flag();
107 report_pass("start");
108 }
109
test_restart(void)110 static void test_restart(void)
111 {
112 struct cpu *cpu = smp_cpu_from_idx(1);
113 struct lowcore *lc = cpu->lowcore;
114 int rc;
115
116 report_prefix_push("restart");
117 report_prefix_push("stopped");
118
119 lc->restart_new_psw = PSW_WITH_CUR_MASK(test_func);
120
121 /* Make sure cpu is stopped */
122 smp_cpu_stop(1);
123 set_flag(0);
124 rc = smp_cpu_restart_nowait(1);
125 report(!rc, "return code");
126 report(!smp_cpu_stopped(1), "cpu started");
127 wait_for_flag();
128 report_pass("test flag");
129
130 report_prefix_pop();
131 report_prefix_push("running");
132
133 /*
134 * Wait until cpu 1 has set the flag because it executed the
135 * restart function.
136 */
137 set_flag(0);
138 rc = smp_cpu_restart_nowait(1);
139 report(!rc, "return code");
140 report(!smp_cpu_stopped(1), "cpu started");
141 wait_for_flag();
142 report_pass("test flag");
143
144 report_prefix_pop();
145 report_prefix_pop();
146 }
147
test_stop(void)148 static void test_stop(void)
149 {
150 int rc;
151
152 report_prefix_push("stop");
153
154 rc = smp_cpu_stop_nowait(1);
155 report(!rc, "return code");
156 report(smp_cpu_stopped(1), "cpu stopped");
157
158 report_prefix_push("stop stopped CPU");
159 rc = smp_cpu_stop_nowait(1);
160 report(!rc, "return code");
161 report(smp_cpu_stopped(1), "cpu stopped");
162 report_prefix_pop();
163
164 report_prefix_pop();
165 }
166
test_stop_store_status(void)167 static void test_stop_store_status(void)
168 {
169 struct cpu *cpu = smp_cpu_from_idx(1);
170
171 report_prefix_push("stop store status");
172 report_prefix_push("running");
173 smp_cpu_restart(1);
174 lowcore.prefix_sa = 0;
175 lowcore.grs_sa[15] = 0;
176 smp_cpu_stop_store_status(1);
177 mb();
178 report(smp_cpu_stopped(1), "cpu stopped");
179 report(lowcore.prefix_sa == (uint32_t)(uintptr_t)cpu->lowcore, "prefix");
180 report(lowcore.grs_sa[15], "stack");
181 report_prefix_pop();
182
183 report_prefix_push("stopped");
184 lowcore.prefix_sa = 0;
185 lowcore.grs_sa[15] = 0;
186 smp_cpu_stop_store_status(1);
187 mb();
188 report(smp_cpu_stopped(1), "cpu stopped");
189 report(lowcore.prefix_sa == (uint32_t)(uintptr_t)cpu->lowcore, "prefix");
190 report(lowcore.grs_sa[15], "stack");
191 report_prefix_pop();
192
193 report_prefix_pop();
194 }
195
test_store_status(void)196 static void test_store_status(void)
197 {
198 struct cpu_status *status = alloc_pages_flags(1, AREA_DMA31);
199 uint32_t r;
200 int cc;
201
202 report_prefix_push("store status at address");
203 memset(status, 0, PAGE_SIZE * 2);
204
205 report_prefix_push("invalid CPU address");
206 cc = sigp(INVALID_CPU_ADDRESS, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
207 report(cc == 3, "returned with CC = 3");
208 report_prefix_pop();
209
210 report_prefix_push("running");
211 smp_cpu_restart(1);
212 smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
213 report(r == SIGP_STATUS_INCORRECT_STATE, "incorrect state");
214 report(!memcmp(status, (void *)status + PAGE_SIZE, PAGE_SIZE),
215 "status not written");
216 report_prefix_pop();
217
218 memset(status, 0, PAGE_SIZE);
219 report_prefix_push("stopped");
220 smp_cpu_stop(1);
221 smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
222 while (!status->prefix) { mb(); }
223 report_pass("status written");
224 free_pages(status);
225 report_prefix_pop();
226 smp_cpu_stop(1);
227
228 report_prefix_pop();
229 }
230
loop(void)231 static void loop(void)
232 {
233 while (1)
234 ;
235 }
236
stpx_and_set_flag(void)237 static void stpx_and_set_flag(void)
238 {
239 asm volatile (
240 " stpx %[prefix]\n"
241 : [prefix] "=Q" (cpu1_prefix)
242 :
243 :
244 );
245
246 set_flag(1);
247 }
248
test_set_prefix(void)249 static void test_set_prefix(void)
250 {
251 struct lowcore *new_lc = alloc_pages_flags(1, AREA_DMA31);
252 struct cpu *cpu1 = smp_cpu_from_idx(1);
253 uint32_t status = 0;
254 int cc;
255
256 report_prefix_push("set prefix");
257
258 assert(new_lc);
259
260 memcpy(new_lc, cpu1->lowcore, sizeof(struct lowcore));
261 new_lc->restart_new_psw.addr = (unsigned long)loop;
262
263 report_prefix_push("running");
264 set_flag(0);
265 smp_cpu_start(1, PSW_WITH_CUR_MASK(stpx_and_set_flag));
266 wait_for_flag();
267 cpu1_prefix = 0xFFFFFFFF;
268
269 cc = smp_sigp(1, SIGP_SET_PREFIX, (unsigned long)new_lc, &status);
270 report(cc == 1, "CC = 1");
271 report(status == SIGP_STATUS_INCORRECT_STATE, "status = INCORRECT_STATE");
272
273 /*
274 * If the prefix of the other CPU was changed it will enter an endless
275 * loop. Otherwise, it should eventually set the flag.
276 */
277 smp_cpu_stop(1);
278 set_flag(0);
279 smp_cpu_restart(1);
280 wait_for_flag();
281 report(cpu1_prefix == (uint64_t)cpu1->lowcore, "prefix unchanged");
282
283 report_prefix_pop();
284
285 report_prefix_push("invalid CPU address");
286
287 cc = sigp(INVALID_CPU_ADDRESS, SIGP_SET_PREFIX, (unsigned long)new_lc, &status);
288 report(cc == 3, "CC = 3");
289
290 report_prefix_pop();
291
292 free_pages(new_lc);
293
294 report_prefix_pop();
295
296 }
297
call_received(void)298 static void call_received(void)
299 {
300 expect_ext_int();
301 ctl_set_bit(0, current_sigp_call_case->cr0_bit);
302 /* make sure conditional emergency is accepted by disabling IO interrupts */
303 psw_mask_clear_and_set_bits(PSW_MASK_IO, PSW_MASK_EXT);
304
305 /* Indicate that we're ready to receive the call */
306 set_flag(1);
307
308 while (lowcore.ext_int_code != current_sigp_call_case->ext_int_expected_type)
309 mb();
310 report_pass("received");
311
312 ctl_clear_bit(0, current_sigp_call_case->cr0_bit);
313
314 /* Indicate that we're done */
315 set_flag(1);
316 }
317
test_calls(void)318 static void test_calls(void)
319 {
320 int i;
321
322 for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
323 current_sigp_call_case = &cases_sigp_call[i];
324
325 report_prefix_push(current_sigp_call_case->name);
326 if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
327 report_skip("Not supported under PV");
328 report_prefix_pop();
329 continue;
330 }
331
332 set_flag(0);
333 smp_cpu_start(1, PSW_WITH_CUR_MASK(call_received));
334
335 /* Wait until the receiver has finished setup */
336 wait_for_flag();
337 set_flag(0);
338
339 smp_sigp(1, current_sigp_call_case->call, 0, NULL);
340
341 /* Wait until the receiver has handled the call */
342 wait_for_flag();
343 smp_cpu_stop(1);
344 report_prefix_pop();
345 }
346 }
347
call_in_wait_ext_int_fixup(struct stack_frame_int * stack)348 static void call_in_wait_ext_int_fixup(struct stack_frame_int *stack)
349 {
350 /* Clear wait bit so we don't immediately wait again after the fixup */
351 lowcore.ext_old_psw.mask &= ~PSW_MASK_WAIT;
352 }
353
call_in_wait_setup(void)354 static void call_in_wait_setup(void)
355 {
356 expect_ext_int();
357 ctl_set_bit(0, current_sigp_call_case->cr0_bit);
358 register_ext_cleanup_func(call_in_wait_ext_int_fixup);
359
360 set_flag(1);
361 }
362
call_in_wait_received(void)363 static void call_in_wait_received(void)
364 {
365 report(lowcore.ext_int_code == current_sigp_call_case->ext_int_expected_type, "received");
366
367 set_flag(1);
368 }
369
call_in_wait_cleanup(void)370 static void call_in_wait_cleanup(void)
371 {
372 ctl_clear_bit(0, current_sigp_call_case->cr0_bit);
373 register_ext_cleanup_func(NULL);
374
375 set_flag(1);
376 }
377
test_calls_in_wait(void)378 static void test_calls_in_wait(void)
379 {
380 int i;
381
382 report_prefix_push("psw wait");
383 for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
384 current_sigp_call_case = &cases_sigp_call[i];
385
386 report_prefix_push(current_sigp_call_case->name);
387 if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
388 report_skip("Not supported under PV");
389 report_prefix_pop();
390 continue;
391 }
392
393 /* Let the secondary CPU setup the external mask and the external interrupt cleanup function */
394 set_flag(0);
395 smp_cpu_start(1, PSW_WITH_CUR_MASK(call_in_wait_setup));
396
397 /* Wait until the receiver has finished setup */
398 wait_for_flag();
399 set_flag(0);
400
401 /*
402 * To avoid races, we need to know that the secondary CPU has entered wait,
403 * but the architecture provides no way to check whether the secondary CPU
404 * is in wait.
405 *
406 * But since a waiting CPU is considered operating, simply stop the CPU, set
407 * up the restart new PSW mask in wait, send the restart interrupt and then
408 * wait until the CPU becomes operating (done by smp_cpu_start).
409 */
410 smp_cpu_stop(1);
411 smp_cpu_start(1, PSW(extract_psw_mask() | PSW_MASK_EXT | PSW_MASK_WAIT, call_in_wait_received));
412
413 smp_sigp(1, current_sigp_call_case->call, 0, NULL);
414
415 /* Wait until the receiver has handled the call */
416 wait_for_flag();
417 smp_cpu_stop(1);
418 set_flag(0);
419
420 /*
421 * Now clean up the mess we have left behind. If the cleanup
422 * were part of call_in_wait_received we would not get a chance
423 * to catch an interrupt that is presented twice since we would
424 * disable the external call on the first interrupt.
425 */
426 smp_cpu_start(1, PSW_WITH_CUR_MASK(call_in_wait_cleanup));
427
428 /* Wait until the cleanup has been completed */
429 wait_for_flag();
430 smp_cpu_stop(1);
431
432 report_prefix_pop();
433 }
434 report_prefix_pop();
435 }
436
test_sense_running(void)437 static void test_sense_running(void)
438 {
439 report_prefix_push("sense_running");
440 /* we (CPU0) are running */
441 report(smp_sense_running_status(0), "CPU0 sense claims running");
442 /* stop the target CPU (CPU1) to speed up the not running case */
443 smp_cpu_stop(1);
444 /* Make sure to have at least one time with a not running indication */
445 while(smp_sense_running_status(1));
446 report_pass("CPU1 sense claims not running");
447 report_prefix_pop();
448 }
449
450 /* Used to dirty registers of cpu #1 before it is reset */
test_func_initial(void)451 static void test_func_initial(void)
452 {
453 asm volatile("sfpc %0" :: "d" (0x11));
454 lctlg(1, 0x42000UL);
455 lctlg(7, 0x43000UL);
456 lctlg(13, 0x44000UL);
457 set_flag(1);
458 }
459
test_reset_initial(void)460 static void test_reset_initial(void)
461 {
462 struct cpu_status *status = alloc_pages_flags(0, AREA_DMA31);
463 int i;
464
465 report_prefix_push("reset initial");
466 set_flag(0);
467 smp_cpu_start(1, PSW_WITH_CUR_MASK(test_func_initial));
468 wait_for_flag();
469
470 smp_sigp(1, SIGP_INITIAL_CPU_RESET, 0, NULL);
471 smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
472
473 report_prefix_push("clear");
474 report(!status->psw.mask && !status->psw.addr, "psw");
475 report(!status->prefix, "prefix");
476 report(!status->fpc, "fpc");
477 report(!status->cputm, "cpu timer");
478 report(!status->todpr, "todpr");
479 for (i = 1; i <= 13; i++) {
480 report(status->crs[i] == 0, "cr%d == 0", i);
481 }
482 report(status->crs[15] == 0, "cr15 == 0");
483 report_prefix_pop();
484
485 report_prefix_push("initialized");
486 report(status->crs[0] == 0xE0UL, "cr0 == 0xE0");
487 report(status->crs[14] == 0xC2000000UL, "cr14 == 0xC2000000");
488 report_prefix_pop();
489
490 report(smp_cpu_stopped(1), "cpu stopped");
491 free_pages(status);
492 report_prefix_pop();
493 }
494
test_local_ints(void)495 static void test_local_ints(void)
496 {
497 /* Open masks for ecall and emcall */
498 ctl_set_bit(0, CTL0_EXTERNAL_CALL);
499 ctl_set_bit(0, CTL0_EMERGENCY_SIGNAL);
500 psw_mask_set_bits(PSW_MASK_EXT);
501 set_flag(1);
502 }
503
test_reset(void)504 static void test_reset(void)
505 {
506 report_prefix_push("cpu reset");
507 smp_sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
508 smp_sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
509 smp_cpu_start(1, PSW_WITH_CUR_MASK(test_func));
510
511 smp_sigp(1, SIGP_CPU_RESET, 0, NULL);
512 report(smp_cpu_stopped(1), "cpu stopped");
513
514 set_flag(0);
515 smp_cpu_start(1, PSW_WITH_CUR_MASK(test_local_ints));
516 wait_for_flag();
517 report_pass("local interrupts cleared");
518 report_prefix_pop();
519 }
520
main(void)521 int main(void)
522 {
523 report_prefix_push("smp");
524
525 if (smp_query_num_cpus() == 1) {
526 report_skip("need at least 2 cpus for this test");
527 goto done;
528 }
529
530 /* Setting up the cpu to give it a stack and lowcore */
531 smp_cpu_setup(1, PSW_WITH_CUR_MASK(test_func));
532 smp_cpu_stop(1);
533
534 test_start();
535 test_invalid();
536 test_restart();
537 test_stop();
538 test_stop_store_status();
539 test_store_status();
540 test_set_prefix();
541 test_calls();
542 test_calls_in_wait();
543 test_sense_running();
544 test_reset();
545 test_reset_initial();
546 smp_cpu_destroy(1);
547
548 done:
549 report_prefix_pop();
550 return report_summary();
551 }
552