xref: /kvm-unit-tests/riscv/sbi.c (revision 2ffe016ff1c9603792507e74d89615014ffb5e74)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/processor.h>
25 #include <asm/sbi.h>
26 #include <asm/smp.h>
27 #include <asm/timer.h>
28 
29 #include "sbi-tests.h"
30 
31 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
32 
33 static long __labs(long a)
34 {
35 	return __builtin_labs(a);
36 }
37 
38 static void help(void)
39 {
40 	puts("Test SBI\n");
41 	puts("An environ must be provided where expected values are given.\n");
42 }
43 
44 static struct sbiret sbi_base(int fid, unsigned long arg0)
45 {
46 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
47 }
48 
49 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
50 				    unsigned long base_addr_hi)
51 {
52 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
53 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
54 }
55 
56 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
57 {
58 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
59 }
60 
61 static struct sbiret sbi_system_suspend(uint32_t sleep_type, unsigned long resume_addr, unsigned long opaque)
62 {
63 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
64 }
65 
66 static void start_cpu(void *data)
67 {
68 	/* nothing to do */
69 }
70 
71 static void stop_cpu(void *data)
72 {
73 	struct sbiret ret = sbi_hart_stop();
74 	assert_msg(0, "cpu%d failed to stop with sbiret.error %ld", smp_processor_id(), ret.error);
75 }
76 
77 static int rand_online_cpu(prng_state *ps)
78 {
79 	int cpu, me = smp_processor_id();
80 
81 	for (;;) {
82 		cpu = prng32(ps) % nr_cpus;
83 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
84 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
85 			break;
86 	}
87 
88 	return cpu;
89 }
90 
91 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
92 {
93 	*lo = (unsigned long)paddr;
94 	*hi = 0;
95 	if (__riscv_xlen == 32)
96 		*hi = (unsigned long)(paddr >> 32);
97 }
98 
99 static bool check_addr(phys_addr_t start, phys_addr_t size)
100 {
101 	struct mem_region *r = memregions_find(start);
102 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
103 }
104 
105 static phys_addr_t get_highest_addr(void)
106 {
107 	phys_addr_t highest_end = 0;
108 	struct mem_region *r;
109 
110 	for (r = mem_regions; r->end; ++r) {
111 		if (r->end > highest_end)
112 			highest_end = r->end;
113 	}
114 
115 	return highest_end - 1;
116 }
117 
118 static bool env_enabled(const char *env)
119 {
120 	char *s = getenv(env);
121 
122 	return s && (*s == '1' || *s == 'y' || *s == 'Y');
123 }
124 
125 static bool env_or_skip(const char *env)
126 {
127 	if (!getenv(env)) {
128 		report_skip("missing %s environment variable", env);
129 		return false;
130 	}
131 
132 	return true;
133 }
134 
135 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
136 {
137 	if (env_enabled("INVALID_ADDR_AUTO")) {
138 		*paddr = get_highest_addr() + 1;
139 		return true;
140 	} else if (allow_default && !getenv("INVALID_ADDR")) {
141 		*paddr = -1ul;
142 		return true;
143 	} else if (env_or_skip("INVALID_ADDR")) {
144 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
145 		return true;
146 	}
147 
148 	return false;
149 }
150 
151 static void gen_report(struct sbiret *ret,
152 		       long expected_error, long expected_value)
153 {
154 	bool check_error = ret->error == expected_error;
155 	bool check_value = ret->value == expected_value;
156 
157 	if (!check_error || !check_value)
158 		report_info("expected (error: %ld, value: %ld), received: (error: %ld, value %ld)",
159 			    expected_error, expected_value, ret->error, ret->value);
160 
161 	report(check_error, "expected sbi.error");
162 	report(check_value, "expected sbi.value");
163 }
164 
165 static void check_base(void)
166 {
167 	struct sbiret ret;
168 	long expected;
169 
170 	report_prefix_push("base");
171 
172 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
173 
174 	report_prefix_push("spec_version");
175 	if (env_or_skip("SBI_SPEC_VERSION")) {
176 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
177 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
178 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
179 		gen_report(&ret, 0, expected);
180 	}
181 	report_prefix_pop();
182 
183 	ret.value &= 0x7ffffffful;
184 
185 	if (ret.error || ret.value < 2) {
186 		report_skip("SBI spec version 0.2 or higher required");
187 		return;
188 	}
189 
190 	report_prefix_push("impl_id");
191 	if (env_or_skip("SBI_IMPL_ID")) {
192 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
193 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
194 		gen_report(&ret, 0, expected);
195 	}
196 	report_prefix_pop();
197 
198 	report_prefix_push("impl_version");
199 	if (env_or_skip("SBI_IMPL_VERSION")) {
200 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
201 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
202 		gen_report(&ret, 0, expected);
203 	}
204 	report_prefix_pop();
205 
206 	report_prefix_push("probe_ext");
207 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
208 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
209 	gen_report(&ret, 0, expected);
210 	report_prefix_push("unavailable");
211 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
212 	gen_report(&ret, 0, 0);
213 	report_prefix_popn(2);
214 
215 	report_prefix_push("mvendorid");
216 	if (env_or_skip("MVENDORID")) {
217 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
218 		assert(__riscv_xlen == 32 || !(expected >> 32));
219 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
220 		gen_report(&ret, 0, expected);
221 	}
222 	report_prefix_pop();
223 
224 	report_prefix_push("marchid");
225 	if (env_or_skip("MARCHID")) {
226 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
227 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
228 		gen_report(&ret, 0, expected);
229 	}
230 	report_prefix_pop();
231 
232 	report_prefix_push("mimpid");
233 	if (env_or_skip("MIMPID")) {
234 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
235 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
236 		gen_report(&ret, 0, expected);
237 	}
238 	report_prefix_popn(2);
239 }
240 
241 struct timer_info {
242 	bool timer_works;
243 	bool mask_timer_irq;
244 	bool timer_irq_set;
245 	bool timer_irq_cleared;
246 	unsigned long timer_irq_count;
247 };
248 
249 static struct timer_info timer_info;
250 
251 static bool timer_irq_pending(void)
252 {
253 	return csr_read(CSR_SIP) & IP_TIP;
254 }
255 
256 static void timer_irq_handler(struct pt_regs *regs)
257 {
258 	timer_info.timer_works = true;
259 
260 	if (timer_info.timer_irq_count < ULONG_MAX)
261 		++timer_info.timer_irq_count;
262 
263 	if (timer_irq_pending())
264 		timer_info.timer_irq_set = true;
265 
266 	if (timer_info.mask_timer_irq)
267 		timer_irq_disable();
268 	else
269 		sbi_set_timer(ULONG_MAX);
270 
271 	if (!timer_irq_pending())
272 		timer_info.timer_irq_cleared = true;
273 }
274 
275 static void timer_check_set_timer(bool mask_timer_irq)
276 {
277 	struct sbiret ret;
278 	unsigned long begin, end, duration;
279 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
280 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
281 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
282 
283 	d = usec_to_cycles(d);
284 	margin = usec_to_cycles(margin);
285 
286 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
287 	begin = timer_get_cycles();
288 	ret = sbi_set_timer(begin + d);
289 
290 	report(!ret.error, "set timer%s", mask_test_str);
291 	if (ret.error)
292 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
293 
294 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
295 		cpu_relax();
296 
297 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
298 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
299 
300 	if (!mask_timer_irq) {
301 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
302 		       "pending timer interrupt bit cleared by setting timer to -1");
303 	}
304 
305 	if (timer_info.timer_works) {
306 		duration = end - begin;
307 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
308 	}
309 
310 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
311 }
312 
313 static void check_time(void)
314 {
315 	bool pending;
316 
317 	report_prefix_push("time");
318 
319 	if (!sbi_probe(SBI_EXT_TIME)) {
320 		report_skip("time extension not available");
321 		report_prefix_pop();
322 		return;
323 	}
324 
325 	report_prefix_push("set_timer");
326 
327 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
328 	local_irq_enable();
329 	timer_irq_enable();
330 
331 	timer_check_set_timer(false);
332 
333 	if (csr_read(CSR_SIE) & IE_TIE)
334 		timer_check_set_timer(true);
335 	else
336 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
337 
338 	timer_irq_disable();
339 	sbi_set_timer(0);
340 	pending = timer_irq_pending();
341 	report(pending, "timer immediately pending by setting timer to 0");
342 	sbi_set_timer(ULONG_MAX);
343 	if (pending)
344 		report(!timer_irq_pending(), "pending timer cleared while masked");
345 	else
346 		report_skip("timer is not pending, skipping timer cleared while masked test");
347 
348 	local_irq_disable();
349 	install_irq_handler(IRQ_S_TIMER, NULL);
350 
351 	report_prefix_popn(2);
352 }
353 
354 static bool ipi_received[NR_CPUS];
355 static bool ipi_timeout[NR_CPUS];
356 static cpumask_t ipi_done;
357 
358 static void ipi_timeout_handler(struct pt_regs *regs)
359 {
360 	timer_stop();
361 	ipi_timeout[smp_processor_id()] = true;
362 }
363 
364 static void ipi_irq_handler(struct pt_regs *regs)
365 {
366 	ipi_ack();
367 	ipi_received[smp_processor_id()] = true;
368 }
369 
370 static void ipi_hart_wait(void *data)
371 {
372 	unsigned long timeout = (unsigned long)data;
373 	int me = smp_processor_id();
374 
375 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
376 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
377 	local_ipi_enable();
378 	timer_irq_enable();
379 	local_irq_enable();
380 
381 	timer_start(timeout);
382 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
383 		cpu_relax();
384 	local_irq_disable();
385 	timer_stop();
386 	local_ipi_disable();
387 	timer_irq_disable();
388 
389 	cpumask_set_cpu(me, &ipi_done);
390 }
391 
392 static void ipi_hart_check(cpumask_t *mask)
393 {
394 	int cpu;
395 
396 	for_each_cpu(cpu, mask) {
397 		if (ipi_timeout[cpu]) {
398 			const char *rec = ipi_received[cpu] ? "but was still received"
399 							    : "and has still not been received";
400 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
401 		}
402 
403 		ipi_timeout[cpu] = false;
404 		ipi_received[cpu] = false;
405 	}
406 }
407 
408 static void check_ipi(void)
409 {
410 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
411 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
412 	int me = smp_processor_id();
413 	unsigned long max_hartid = 0;
414 	unsigned long hartid1, hartid2;
415 	cpumask_t ipi_receivers;
416 	static prng_state ps;
417 	struct sbiret ret;
418 	int cpu, cpu2;
419 
420 	ps = prng_init(0xDEADBEEF);
421 
422 	report_prefix_push("ipi");
423 
424 	if (!sbi_probe(SBI_EXT_IPI)) {
425 		report_skip("ipi extension not available");
426 		report_prefix_pop();
427 		return;
428 	}
429 
430 	if (nr_cpus_present < 2) {
431 		report_skip("At least 2 cpus required");
432 		report_prefix_pop();
433 		return;
434 	}
435 
436 	report_prefix_push("random hart");
437 	cpumask_clear(&ipi_done);
438 	cpumask_clear(&ipi_receivers);
439 	cpu = rand_online_cpu(&ps);
440 	cpumask_set_cpu(cpu, &ipi_receivers);
441 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
442 	ret = sbi_send_ipi_cpu(cpu);
443 	report(ret.error == SBI_SUCCESS, "ipi returned success");
444 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
445 		cpu_relax();
446 	ipi_hart_check(&ipi_receivers);
447 	report_prefix_pop();
448 
449 	report_prefix_push("two in hart_mask");
450 
451 	if (nr_cpus_present < 3) {
452 		report_skip("3 cpus required");
453 		goto end_two;
454 	}
455 
456 	cpu = rand_online_cpu(&ps);
457 	hartid1 = cpus[cpu].hartid;
458 	hartid2 = 0;
459 	for_each_present_cpu(cpu2) {
460 		if (cpu2 == cpu || cpu2 == me)
461 			continue;
462 		hartid2 = cpus[cpu2].hartid;
463 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
464 			break;
465 	}
466 	if (cpu2 == nr_cpus) {
467 		report_skip("hartids are too sparse");
468 		goto end_two;
469 	}
470 
471 	cpumask_clear(&ipi_done);
472 	cpumask_clear(&ipi_receivers);
473 	cpumask_set_cpu(cpu, &ipi_receivers);
474 	cpumask_set_cpu(cpu2, &ipi_receivers);
475 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
476 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
477 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
478 	report(ret.error == SBI_SUCCESS, "ipi returned success");
479 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
480 		cpu_relax();
481 	ipi_hart_check(&ipi_receivers);
482 end_two:
483 	report_prefix_pop();
484 
485 	report_prefix_push("broadcast");
486 	cpumask_clear(&ipi_done);
487 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
488 	cpumask_clear_cpu(me, &ipi_receivers);
489 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
490 	ret = sbi_send_ipi_broadcast();
491 	report(ret.error == SBI_SUCCESS, "ipi returned success");
492 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
493 		cpu_relax();
494 	ipi_hart_check(&ipi_receivers);
495 	report_prefix_pop();
496 
497 	report_prefix_push("invalid parameters");
498 
499 	for_each_present_cpu(cpu) {
500 		if (cpus[cpu].hartid > max_hartid)
501 			max_hartid = cpus[cpu].hartid;
502 	}
503 
504 	/* Try the next higher hartid than the max */
505 	ret = sbi_send_ipi(2, max_hartid);
506 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
507 	ret = sbi_send_ipi(1, max_hartid + 1);
508 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
509 
510 	report_prefix_pop();
511 
512 	report_prefix_pop();
513 }
514 
515 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
516 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
517 
518 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
519 {
520 	unsigned long base_addr_lo, base_addr_hi;
521 	phys_addr_t paddr = virt_to_phys((void *)s);
522 	int num_calls = 0;
523 	struct sbiret ret;
524 
525 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
526 
527 	do {
528 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
529 		num_bytes -= ret.value;
530 		paddr += ret.value;
531 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
532 		num_calls++;
533 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
534 
535 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
536 	report_info("%d sbi calls made", num_calls);
537 }
538 
539 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
540 				 phys_addr_t page_addr, size_t page_offset,
541 				 bool highmem_supported)
542 {
543 	int nr_pages = page_offset ? 2 : 1;
544 	void *vaddr;
545 
546 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
547 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
548 		report_skip("Memory above 4G required");
549 		return;
550 	}
551 
552 	vaddr = alloc_vpages(nr_pages);
553 
554 	for (int i = 0; i < nr_pages; ++i)
555 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
556 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
557 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
558 }
559 
560 /*
561  * Only the write functionality is tested here. There's no easy way to
562  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
563  */
564 static void check_dbcn(void)
565 {
566 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
567 	unsigned long base_addr_lo, base_addr_hi;
568 	bool highmem_supported = true;
569 	phys_addr_t paddr;
570 	struct sbiret ret;
571 	char *buf;
572 
573 	report_prefix_push("dbcn");
574 
575 	if (!sbi_probe(SBI_EXT_DBCN)) {
576 		report_skip("DBCN extension unavailable");
577 		report_prefix_pop();
578 		return;
579 	}
580 
581 	report_prefix_push("write");
582 
583 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
584 
585 	assert(num_bytes < PAGE_SIZE);
586 
587 	report_prefix_push("page boundary");
588 	buf = alloc_pages(1);
589 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
590 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
591 	report_prefix_pop();
592 
593 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
594 		highmem_supported = false;
595 
596 	report_prefix_push("high boundary");
597 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
598 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
599 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
600 				     highmem_supported);
601 	else
602 		report_skip("user disabled");
603 	report_prefix_pop();
604 
605 	report_prefix_push("high page");
606 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
607 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
608 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
609 	} else {
610 		report_skip("user disabled");
611 	}
612 	report_prefix_pop();
613 
614 	/* Bytes are read from memory and written to the console */
615 	report_prefix_push("invalid parameter");
616 	if (get_invalid_addr(&paddr, false)) {
617 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
618 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
619 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
620 	}
621 	report_prefix_popn(2);
622 	report_prefix_push("write_byte");
623 
624 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
625 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
626 	puts("\n");
627 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
628 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
629 
630 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
631 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
632 	puts("\n");
633 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
634 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
635 
636 	report_prefix_popn(2);
637 }
638 
639 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
640 jmp_buf sbi_susp_jmp;
641 
642 struct susp_params {
643 	unsigned long sleep_type;
644 	unsigned long resume_addr;
645 	unsigned long opaque;
646 	bool returns;
647 	struct sbiret ret;
648 };
649 
650 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
651 {
652 	int cpu, me = smp_processor_id();
653 	struct sbiret ret;
654 	cpumask_t mask;
655 
656 	memset(params, 0, sizeof(*params));
657 	params->sleep_type = 0; /* suspend-to-ram */
658 	params->resume_addr = virt_to_phys(sbi_susp_resume);
659 	params->opaque = virt_to_phys(ctx);
660 	params->returns = false;
661 
662 	cpumask_copy(&mask, &cpu_present_mask);
663 	cpumask_clear_cpu(me, &mask);
664 	on_cpumask_async(&mask, stop_cpu, NULL);
665 
666 	/* Wait up to 1s for all harts to stop */
667 	for (int i = 0; i < 100; i++) {
668 		int count = 1;
669 
670 		udelay(10000);
671 
672 		for_each_present_cpu(cpu) {
673 			if (cpu == me)
674 				continue;
675 			ret = sbi_hart_get_status(cpus[cpu].hartid);
676 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
677 				++count;
678 		}
679 		if (count == cpumask_weight(&cpu_present_mask))
680 			break;
681 	}
682 
683 	for_each_present_cpu(cpu) {
684 		ret = sbi_hart_get_status(cpus[cpu].hartid);
685 		if (cpu == me) {
686 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
687 				   "cpu%d is not started", cpu);
688 		} else {
689 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
690 				   "cpu%d is not stopped", cpu);
691 		}
692 	}
693 
694 	return true;
695 }
696 
697 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
698 {
699 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
700 		report_pass("suspend and resume");
701 	} else {
702 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
703 			report_fail("SATP set to zero on resume");
704 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
705 			report_fail("sstatus.SIE clear on resume");
706 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
707 			report_fail("a0 is hartid on resume");
708 	}
709 }
710 
711 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
712 {
713 	bool r;
714 
715 	r = susp_basic_prep(ctx, params);
716 	assert(r);
717 	params->sleep_type = 1;
718 	params->returns = true;
719 	params->ret.error = SBI_ERR_INVALID_PARAM;
720 
721 	return true;
722 }
723 
724 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
725 {
726 	phys_addr_t badaddr;
727 	bool r;
728 
729 	if (!get_invalid_addr(&badaddr, false))
730 		return false;
731 
732 	r = susp_basic_prep(ctx, params);
733 	assert(r);
734 	params->resume_addr = badaddr;
735 	params->returns = true;
736 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
737 
738 	return true;
739 }
740 
741 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
742 {
743 	int started = 0, cpu, me = smp_processor_id();
744 	struct sbiret ret;
745 	bool r;
746 
747 	if (cpumask_weight(&cpu_present_mask) < 2) {
748 		report_skip("At least 2 cpus required");
749 		return false;
750 	}
751 
752 	r = susp_basic_prep(ctx, params);
753 	assert(r);
754 	params->returns = true;
755 	params->ret.error = SBI_ERR_DENIED;
756 
757 	for_each_present_cpu(cpu) {
758 		if (cpu == me)
759 			continue;
760 		break;
761 	}
762 
763 	on_cpu(cpu, start_cpu, NULL);
764 
765 	for_each_present_cpu(cpu) {
766 		ret = sbi_hart_get_status(cpus[cpu].hartid);
767 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
768 		if (ret.value == SBI_EXT_HSM_STARTED)
769 			started++;
770 	}
771 
772 	assert(started == 2);
773 
774 	return true;
775 }
776 
777 static void check_susp(void)
778 {
779 	unsigned long csrs[] = {
780 		[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS),
781 		[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE),
782 		[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC),
783 		[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH),
784 		[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP),
785 	};
786 	unsigned long ctx[] = {
787 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
788 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
789 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
790 		[SBI_SUSP_TESTNUM_IDX] = 0,
791 		[SBI_SUSP_RESULTS_IDX] = 0,
792 	};
793 	enum {
794 #define SUSP_FIRST_TESTNUM 1
795 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
796 		SUSP_TYPE,
797 		SUSP_BAD_ADDR,
798 		SUSP_ONE_ONLINE,
799 		NR_SUSP_TESTS,
800 	};
801 	struct susp_test {
802 		const char *name;
803 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
804 		void (*check)(unsigned long ctx[], struct susp_params *params);
805 	} susp_tests[] = {
806 		[SUSP_BASIC]		= { "basic",		susp_basic_prep,	susp_basic_check,	},
807 		[SUSP_TYPE]		= { "sleep_type",	susp_type_prep,					},
808 		[SUSP_BAD_ADDR]		= { "bad addr",		susp_badaddr_prep,				},
809 		[SUSP_ONE_ONLINE]	= { "one cpu online",	susp_one_prep,					},
810 	};
811 	struct susp_params params;
812 	struct sbiret ret;
813 	int testnum, i;
814 
815 	local_irq_disable();
816 	timer_stop();
817 
818 	report_prefix_push("susp");
819 
820 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
821 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
822 
823 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
824 		report_prefix_push(susp_tests[i].name);
825 
826 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
827 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
828 
829 		assert(susp_tests[i].prep);
830 		if (!susp_tests[i].prep(ctx, &params)) {
831 			report_prefix_pop();
832 			continue;
833 		}
834 
835 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
836 			ret = sbi_system_suspend(params.sleep_type, params.resume_addr, params.opaque);
837 
838 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
839 				report_skip("SUSP not supported?");
840 				report_prefix_popn(2);
841 				return;
842 			} else if (!params.returns) {
843 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
844 			} else {
845 				report(ret.error == params.ret.error, "expected sbi.error");
846 				if (ret.error != params.ret.error)
847 					report_info("expected error %ld, received %ld", params.ret.error, ret.error);
848 			}
849 
850 			report_prefix_pop();
851 			continue;
852 		}
853 		assert(testnum == i);
854 
855 		if (susp_tests[i].check)
856 			susp_tests[i].check(ctx, &params);
857 
858 		report_prefix_pop();
859 	}
860 
861 	report_prefix_pop();
862 }
863 
864 int main(int argc, char **argv)
865 {
866 	if (argc > 1 && !strcmp(argv[1], "-h")) {
867 		help();
868 		exit(0);
869 	}
870 
871 	report_prefix_push("sbi");
872 	check_base();
873 	check_time();
874 	check_ipi();
875 	check_dbcn();
876 	check_susp();
877 
878 	return report_summary();
879 }
880