xref: /kvm-unit-tests/riscv/sbi.c (revision da49e2919f6b7e14af756e781105acaabd30f7ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 static long __labs(long a)
36 {
37 	return __builtin_labs(a);
38 }
39 
40 static void help(void)
41 {
42 	puts("Test SBI\n");
43 	puts("An environ must be provided where expected values are given.\n");
44 }
45 
46 static struct sbiret sbi_base(int fid, unsigned long arg0)
47 {
48 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
49 }
50 
51 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
52 				    unsigned long base_addr_hi)
53 {
54 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
55 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
56 }
57 
58 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
59 {
60 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
61 }
62 
63 static struct sbiret sbi_hart_suspend(uint32_t suspend_type, unsigned long resume_addr, unsigned long opaque)
64 {
65 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
66 }
67 
68 static struct sbiret sbi_system_suspend(uint32_t sleep_type, unsigned long resume_addr, unsigned long opaque)
69 {
70 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
71 }
72 
73 static void start_cpu(void *data)
74 {
75 	/* nothing to do */
76 }
77 
78 static void stop_cpu(void *data)
79 {
80 	struct sbiret ret = sbi_hart_stop();
81 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
82 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
83 }
84 
85 static int rand_online_cpu(prng_state *ps)
86 {
87 	int cpu, me = smp_processor_id();
88 
89 	for (;;) {
90 		cpu = prng32(ps) % nr_cpus;
91 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
92 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
93 			break;
94 	}
95 
96 	return cpu;
97 }
98 
99 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
100 {
101 	*lo = (unsigned long)paddr;
102 	*hi = 0;
103 	if (__riscv_xlen == 32)
104 		*hi = (unsigned long)(paddr >> 32);
105 }
106 
107 static bool check_addr(phys_addr_t start, phys_addr_t size)
108 {
109 	struct mem_region *r = memregions_find(start);
110 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
111 }
112 
113 static phys_addr_t get_highest_addr(void)
114 {
115 	phys_addr_t highest_end = 0;
116 	struct mem_region *r;
117 
118 	for (r = mem_regions; r->end; ++r) {
119 		if (r->end > highest_end)
120 			highest_end = r->end;
121 	}
122 
123 	return highest_end - 1;
124 }
125 
126 static bool env_enabled(const char *env)
127 {
128 	char *s = getenv(env);
129 
130 	return s && (*s == '1' || *s == 'y' || *s == 'Y');
131 }
132 
133 static bool env_or_skip(const char *env)
134 {
135 	if (!getenv(env)) {
136 		report_skip("missing %s environment variable", env);
137 		return false;
138 	}
139 
140 	return true;
141 }
142 
143 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
144 {
145 	if (env_enabled("INVALID_ADDR_AUTO")) {
146 		*paddr = get_highest_addr() + 1;
147 		return true;
148 	} else if (allow_default && !getenv("INVALID_ADDR")) {
149 		*paddr = -1ul;
150 		return true;
151 	} else if (env_or_skip("INVALID_ADDR")) {
152 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
153 		return true;
154 	}
155 
156 	return false;
157 }
158 
159 static void gen_report(struct sbiret *ret,
160 		       long expected_error, long expected_value)
161 {
162 	bool check_error = ret->error == expected_error;
163 	bool check_value = ret->value == expected_value;
164 
165 	if (!check_error || !check_value)
166 		report_info("expected (error: %ld, value: %ld), received: (error: %ld, value %ld)",
167 			    expected_error, expected_value, ret->error, ret->value);
168 
169 	report(check_error, "expected sbi.error");
170 	report(check_value, "expected sbi.value");
171 }
172 
173 static void check_base(void)
174 {
175 	struct sbiret ret;
176 	long expected;
177 
178 	report_prefix_push("base");
179 
180 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
181 
182 	report_prefix_push("spec_version");
183 	if (env_or_skip("SBI_SPEC_VERSION")) {
184 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
185 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
186 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
187 		gen_report(&ret, 0, expected);
188 	}
189 	report_prefix_pop();
190 
191 	ret.value &= 0x7ffffffful;
192 
193 	if (ret.error || ret.value < 2) {
194 		report_skip("SBI spec version 0.2 or higher required");
195 		return;
196 	}
197 
198 	report_prefix_push("impl_id");
199 	if (env_or_skip("SBI_IMPL_ID")) {
200 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
201 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
202 		gen_report(&ret, 0, expected);
203 	}
204 	report_prefix_pop();
205 
206 	report_prefix_push("impl_version");
207 	if (env_or_skip("SBI_IMPL_VERSION")) {
208 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
209 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
210 		gen_report(&ret, 0, expected);
211 	}
212 	report_prefix_pop();
213 
214 	report_prefix_push("probe_ext");
215 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
216 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
217 	gen_report(&ret, 0, expected);
218 	report_prefix_push("unavailable");
219 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
220 	gen_report(&ret, 0, 0);
221 	report_prefix_popn(2);
222 
223 	report_prefix_push("mvendorid");
224 	if (env_or_skip("MVENDORID")) {
225 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
226 		assert(__riscv_xlen == 32 || !(expected >> 32));
227 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
228 		gen_report(&ret, 0, expected);
229 	}
230 	report_prefix_pop();
231 
232 	report_prefix_push("marchid");
233 	if (env_or_skip("MARCHID")) {
234 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
235 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
236 		gen_report(&ret, 0, expected);
237 	}
238 	report_prefix_pop();
239 
240 	report_prefix_push("mimpid");
241 	if (env_or_skip("MIMPID")) {
242 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
243 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
244 		gen_report(&ret, 0, expected);
245 	}
246 	report_prefix_popn(2);
247 }
248 
249 struct timer_info {
250 	bool timer_works;
251 	bool mask_timer_irq;
252 	bool timer_irq_set;
253 	bool timer_irq_cleared;
254 	unsigned long timer_irq_count;
255 };
256 
257 static struct timer_info timer_info;
258 
259 static bool timer_irq_pending(void)
260 {
261 	return csr_read(CSR_SIP) & IP_TIP;
262 }
263 
264 static void timer_irq_handler(struct pt_regs *regs)
265 {
266 	timer_info.timer_works = true;
267 
268 	if (timer_info.timer_irq_count < ULONG_MAX)
269 		++timer_info.timer_irq_count;
270 
271 	if (timer_irq_pending())
272 		timer_info.timer_irq_set = true;
273 
274 	if (timer_info.mask_timer_irq)
275 		timer_irq_disable();
276 	else
277 		sbi_set_timer(ULONG_MAX);
278 
279 	if (!timer_irq_pending())
280 		timer_info.timer_irq_cleared = true;
281 }
282 
283 static void timer_check_set_timer(bool mask_timer_irq)
284 {
285 	struct sbiret ret;
286 	unsigned long begin, end, duration;
287 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
288 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
289 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
290 
291 	d = usec_to_cycles(d);
292 	margin = usec_to_cycles(margin);
293 
294 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
295 	begin = timer_get_cycles();
296 	ret = sbi_set_timer(begin + d);
297 
298 	report(!ret.error, "set timer%s", mask_test_str);
299 	if (ret.error)
300 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
301 
302 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
303 		cpu_relax();
304 
305 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
306 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
307 
308 	if (!mask_timer_irq) {
309 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
310 		       "pending timer interrupt bit cleared by setting timer to -1");
311 	}
312 
313 	if (timer_info.timer_works) {
314 		duration = end - begin;
315 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
316 	}
317 
318 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
319 }
320 
321 static void check_time(void)
322 {
323 	bool pending;
324 
325 	report_prefix_push("time");
326 
327 	if (!sbi_probe(SBI_EXT_TIME)) {
328 		report_skip("time extension not available");
329 		report_prefix_pop();
330 		return;
331 	}
332 
333 	report_prefix_push("set_timer");
334 
335 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
336 	local_irq_enable();
337 	timer_irq_enable();
338 
339 	timer_check_set_timer(false);
340 
341 	if (csr_read(CSR_SIE) & IE_TIE)
342 		timer_check_set_timer(true);
343 	else
344 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
345 
346 	timer_irq_disable();
347 	sbi_set_timer(0);
348 	pending = timer_irq_pending();
349 	report(pending, "timer immediately pending by setting timer to 0");
350 	sbi_set_timer(ULONG_MAX);
351 	if (pending)
352 		report(!timer_irq_pending(), "pending timer cleared while masked");
353 	else
354 		report_skip("timer is not pending, skipping timer cleared while masked test");
355 
356 	local_irq_disable();
357 	install_irq_handler(IRQ_S_TIMER, NULL);
358 
359 	report_prefix_popn(2);
360 }
361 
362 static bool ipi_received[NR_CPUS];
363 static bool ipi_timeout[NR_CPUS];
364 static cpumask_t ipi_done;
365 
366 static void ipi_timeout_handler(struct pt_regs *regs)
367 {
368 	timer_stop();
369 	ipi_timeout[smp_processor_id()] = true;
370 }
371 
372 static void ipi_irq_handler(struct pt_regs *regs)
373 {
374 	ipi_ack();
375 	ipi_received[smp_processor_id()] = true;
376 }
377 
378 static void ipi_hart_wait(void *data)
379 {
380 	unsigned long timeout = (unsigned long)data;
381 	int me = smp_processor_id();
382 
383 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
384 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
385 	local_ipi_enable();
386 	timer_irq_enable();
387 	local_irq_enable();
388 
389 	timer_start(timeout);
390 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
391 		cpu_relax();
392 	local_irq_disable();
393 	timer_stop();
394 	local_ipi_disable();
395 	timer_irq_disable();
396 
397 	cpumask_set_cpu(me, &ipi_done);
398 }
399 
400 static void ipi_hart_check(cpumask_t *mask)
401 {
402 	int cpu;
403 
404 	for_each_cpu(cpu, mask) {
405 		if (ipi_timeout[cpu]) {
406 			const char *rec = ipi_received[cpu] ? "but was still received"
407 							    : "and has still not been received";
408 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
409 		}
410 
411 		ipi_timeout[cpu] = false;
412 		ipi_received[cpu] = false;
413 	}
414 }
415 
416 static void check_ipi(void)
417 {
418 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
419 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
420 	int me = smp_processor_id();
421 	unsigned long max_hartid = 0;
422 	unsigned long hartid1, hartid2;
423 	cpumask_t ipi_receivers;
424 	static prng_state ps;
425 	struct sbiret ret;
426 	int cpu, cpu2;
427 
428 	ps = prng_init(0xDEADBEEF);
429 
430 	report_prefix_push("ipi");
431 
432 	if (!sbi_probe(SBI_EXT_IPI)) {
433 		report_skip("ipi extension not available");
434 		report_prefix_pop();
435 		return;
436 	}
437 
438 	if (nr_cpus_present < 2) {
439 		report_skip("At least 2 cpus required");
440 		report_prefix_pop();
441 		return;
442 	}
443 
444 	report_prefix_push("random hart");
445 	cpumask_clear(&ipi_done);
446 	cpumask_clear(&ipi_receivers);
447 	cpu = rand_online_cpu(&ps);
448 	cpumask_set_cpu(cpu, &ipi_receivers);
449 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
450 	ret = sbi_send_ipi_cpu(cpu);
451 	report(ret.error == SBI_SUCCESS, "ipi returned success");
452 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
453 		cpu_relax();
454 	ipi_hart_check(&ipi_receivers);
455 	report_prefix_pop();
456 
457 	report_prefix_push("two in hart_mask");
458 
459 	if (nr_cpus_present < 3) {
460 		report_skip("3 cpus required");
461 		goto end_two;
462 	}
463 
464 	cpu = rand_online_cpu(&ps);
465 	hartid1 = cpus[cpu].hartid;
466 	hartid2 = 0;
467 	for_each_present_cpu(cpu2) {
468 		if (cpu2 == cpu || cpu2 == me)
469 			continue;
470 		hartid2 = cpus[cpu2].hartid;
471 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
472 			break;
473 	}
474 	if (cpu2 == nr_cpus) {
475 		report_skip("hartids are too sparse");
476 		goto end_two;
477 	}
478 
479 	cpumask_clear(&ipi_done);
480 	cpumask_clear(&ipi_receivers);
481 	cpumask_set_cpu(cpu, &ipi_receivers);
482 	cpumask_set_cpu(cpu2, &ipi_receivers);
483 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
484 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
485 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
486 	report(ret.error == SBI_SUCCESS, "ipi returned success");
487 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
488 		cpu_relax();
489 	ipi_hart_check(&ipi_receivers);
490 end_two:
491 	report_prefix_pop();
492 
493 	report_prefix_push("broadcast");
494 	cpumask_clear(&ipi_done);
495 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
496 	cpumask_clear_cpu(me, &ipi_receivers);
497 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
498 	ret = sbi_send_ipi_broadcast();
499 	report(ret.error == SBI_SUCCESS, "ipi returned success");
500 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
501 		cpu_relax();
502 	ipi_hart_check(&ipi_receivers);
503 	report_prefix_pop();
504 
505 	report_prefix_push("invalid parameters");
506 
507 	for_each_present_cpu(cpu) {
508 		if (cpus[cpu].hartid > max_hartid)
509 			max_hartid = cpus[cpu].hartid;
510 	}
511 
512 	/* Try the next higher hartid than the max */
513 	ret = sbi_send_ipi(2, max_hartid);
514 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
515 	ret = sbi_send_ipi(1, max_hartid + 1);
516 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
517 
518 	report_prefix_pop();
519 
520 	report_prefix_pop();
521 }
522 
523 unsigned char sbi_hsm_stop_hart[NR_CPUS];
524 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
525 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
526 
527 static const char * const hart_state_str[] = {
528 	[SBI_EXT_HSM_STARTED] = "started",
529 	[SBI_EXT_HSM_STOPPED] = "stopped",
530 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
531 };
532 struct hart_state_transition_info {
533 	enum sbi_ext_hsm_sid initial_state;
534 	enum sbi_ext_hsm_sid intermediate_state;
535 	enum sbi_ext_hsm_sid final_state;
536 };
537 static cpumask_t sbi_hsm_started_hart_checks;
538 static bool sbi_hsm_invalid_hartid_check;
539 static bool sbi_hsm_timer_fired;
540 extern void sbi_hsm_check_hart_start(void);
541 extern void sbi_hsm_check_non_retentive_suspend(void);
542 
543 static void hsm_timer_irq_handler(struct pt_regs *regs)
544 {
545 	timer_stop();
546 	sbi_hsm_timer_fired = true;
547 }
548 
549 static void hsm_timer_setup(void)
550 {
551 	install_irq_handler(IRQ_S_TIMER, hsm_timer_irq_handler);
552 	timer_irq_enable();
553 }
554 
555 static void hsm_timer_teardown(void)
556 {
557 	timer_irq_disable();
558 	install_irq_handler(IRQ_S_TIMER, NULL);
559 }
560 
561 static void hart_check_already_started(void *data)
562 {
563 	struct sbiret ret;
564 	unsigned long hartid = current_thread_info()->hartid;
565 	int me = smp_processor_id();
566 
567 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
568 
569 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
570 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
571 }
572 
573 static void hart_start_invalid_hartid(void *data)
574 {
575 	struct sbiret ret;
576 
577 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
578 
579 	if (ret.error == SBI_ERR_INVALID_PARAM)
580 		sbi_hsm_invalid_hartid_check = true;
581 }
582 
583 static void hart_retentive_suspend(void *data)
584 {
585 	unsigned long hartid = current_thread_info()->hartid;
586 	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0);
587 
588 	if (ret.error)
589 		report_fail("failed to retentive suspend cpu%d (hartid = %lx) (error=%ld)",
590 			    smp_processor_id(), hartid, ret.error);
591 }
592 
593 static void hart_non_retentive_suspend(void *data)
594 {
595 	unsigned long hartid = current_thread_info()->hartid;
596 	unsigned long params[] = {
597 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
598 		[SBI_HSM_HARTID_IDX] = hartid,
599 	};
600 	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
601 					     virt_to_phys(&sbi_hsm_check_non_retentive_suspend),
602 					     virt_to_phys(params));
603 
604 	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) (error=%ld)",
605 		    smp_processor_id(), hartid, ret.error);
606 }
607 
608 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
609 static void hart_retentive_suspend_with_msb_set(void *data)
610 {
611 	unsigned long hartid = current_thread_info()->hartid;
612 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
613 	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, 0, 0, 0, 0, 0);
614 
615 	if (ret.error)
616 		report_fail("failed to retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
617 			    smp_processor_id(), hartid, ret.error);
618 }
619 
620 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
621 static void hart_non_retentive_suspend_with_msb_set(void *data)
622 {
623 	unsigned long hartid = current_thread_info()->hartid;
624 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
625 	unsigned long params[] = {
626 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
627 		[SBI_HSM_HARTID_IDX] = hartid,
628 	};
629 
630 	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type,
631 				      virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
632 				      0, 0, 0);
633 
634 	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
635 		    smp_processor_id(), hartid, ret.error);
636 }
637 
638 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
639 {
640 	struct sbiret ret;
641 
642 	sbi_hsm_timer_fired = false;
643 	timer_start(duration);
644 
645 	ret = sbi_hart_get_status(hartid);
646 
647 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
648 		cpu_relax();
649 		ret = sbi_hart_get_status(hartid);
650 	}
651 
652 	timer_stop();
653 
654 	if (sbi_hsm_timer_fired)
655 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
656 	else if (ret.error)
657 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
658 
659 	return !sbi_hsm_timer_fired && !ret.error;
660 }
661 
662 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
663 				      struct hart_state_transition_info *states)
664 {
665 	struct sbiret ret;
666 	unsigned long hartid;
667 	int cpu, count = 0;
668 
669 	for_each_cpu(cpu, mask) {
670 		hartid = cpus[cpu].hartid;
671 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
672 			continue;
673 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
674 			continue;
675 
676 		ret = sbi_hart_get_status(hartid);
677 		if (ret.error)
678 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
679 		else if (ret.value != states->final_state)
680 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
681 				    hart_state_str[states->final_state], ret.value);
682 		else
683 			count++;
684 	}
685 
686 	return count;
687 }
688 
689 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
690 {
691 	sbi_hsm_timer_fired = false;
692 	timer_start(duration);
693 
694 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
695 		cpu_relax();
696 
697 	timer_stop();
698 
699 	if (sbi_hsm_timer_fired)
700 		report_info("hsm timer fired before all cpus became idle");
701 }
702 
703 static void check_hsm(void)
704 {
705 	struct sbiret ret;
706 	unsigned long hartid;
707 	cpumask_t secondary_cpus_mask, mask;
708 	struct hart_state_transition_info transition_states;
709 	bool ipi_unavailable = false;
710 	int cpu, me = smp_processor_id();
711 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
712 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
713 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
714 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
715 	int count, check;
716 
717 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
718 
719 	report_prefix_push("hsm");
720 
721 	if (!sbi_probe(SBI_EXT_HSM)) {
722 		report_skip("hsm extension not available");
723 		report_prefix_pop();
724 		return;
725 	}
726 
727 	report_prefix_push("hart_get_status");
728 
729 	hartid = current_thread_info()->hartid;
730 	ret = sbi_hart_get_status(hartid);
731 
732 	if (ret.error) {
733 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
734 		report_prefix_popn(2);
735 		return;
736 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
737 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
738 		report_prefix_popn(2);
739 		return;
740 	}
741 
742 	report_pass("status of current hart is started");
743 
744 	report_prefix_pop();
745 
746 	if (max_cpus < 2) {
747 		report_skip("no other cpus to run the remaining hsm tests on");
748 		report_prefix_pop();
749 		return;
750 	}
751 
752 	report_prefix_push("hart_stop");
753 
754 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
755 	cpumask_clear_cpu(me, &secondary_cpus_mask);
756 	hsm_timer_setup();
757 	local_irq_enable();
758 
759 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
760 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
761 
762 	transition_states = (struct hart_state_transition_info) {
763 		.initial_state = SBI_EXT_HSM_STARTED,
764 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
765 		.final_state = SBI_EXT_HSM_STOPPED,
766 	};
767 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
768 
769 	report(count == max_cpus - 1, "all secondary harts stopped");
770 
771 	report_prefix_pop();
772 
773 	report_prefix_push("hart_start");
774 
775 	for_each_cpu(cpu, &secondary_cpus_mask) {
776 		hartid = cpus[cpu].hartid;
777 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
778 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
779 
780 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
781 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
782 		if (ret.error) {
783 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
784 			continue;
785 		}
786 	}
787 
788 	transition_states = (struct hart_state_transition_info) {
789 		.initial_state = SBI_EXT_HSM_STOPPED,
790 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
791 		.final_state = SBI_EXT_HSM_STARTED,
792 	};
793 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
794 	check = 0;
795 
796 	for_each_cpu(cpu, &secondary_cpus_mask) {
797 		sbi_hsm_timer_fired = false;
798 		timer_start(hsm_timer_duration);
799 
800 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
801 			cpu_relax();
802 
803 		timer_stop();
804 
805 		if (sbi_hsm_timer_fired) {
806 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
807 			continue;
808 		}
809 
810 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
811 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
812 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
813 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
814 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
815 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
816 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
817 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
818 		else
819 			check++;
820 	}
821 
822 	report(count == max_cpus - 1, "all secondary harts started");
823 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
824 
825 	report_prefix_pop();
826 
827 	report_prefix_push("hart_stop");
828 
829 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
830 
831 	transition_states = (struct hart_state_transition_info) {
832 		.initial_state = SBI_EXT_HSM_STARTED,
833 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
834 		.final_state = SBI_EXT_HSM_STOPPED,
835 	};
836 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
837 
838 	report(count == max_cpus - 1, "all secondary harts stopped");
839 
840 	/* Reset the stop flags so that we can reuse them after suspension tests */
841 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
842 
843 	report_prefix_pop();
844 
845 	report_prefix_push("hart_start");
846 
847 	/* Select just one secondary cpu to run the invalid hartid test */
848 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
849 
850 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
851 
852 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
853 
854 	transition_states = (struct hart_state_transition_info) {
855 		.initial_state = SBI_EXT_HSM_STOPPED,
856 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
857 		.final_state = SBI_EXT_HSM_STARTED,
858 	};
859 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
860 
861 	report(count == max_cpus - 1, "all secondary harts started");
862 
863 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
864 
865 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
866 	       "all secondary harts are already started");
867 
868 	report_prefix_pop();
869 
870 	report_prefix_push("hart_suspend");
871 
872 	if (!sbi_probe(SBI_EXT_IPI)) {
873 		report_skip("skipping suspension tests since ipi extension is unavailable");
874 		report_prefix_pop();
875 		ipi_unavailable = true;
876 		goto sbi_hsm_hart_stop_tests;
877 	}
878 
879 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
880 
881 	transition_states = (struct hart_state_transition_info) {
882 		.initial_state = SBI_EXT_HSM_STARTED,
883 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
884 		.final_state = SBI_EXT_HSM_SUSPENDED,
885 	};
886 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
887 
888 	report(count == max_cpus - 1, "all secondary harts retentive suspended");
889 
890 	/* Ignore the return value since we check the status of each hart anyway */
891 	sbi_send_ipi_cpumask(&secondary_cpus_mask);
892 
893 	transition_states = (struct hart_state_transition_info) {
894 		.initial_state = SBI_EXT_HSM_SUSPENDED,
895 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
896 		.final_state = SBI_EXT_HSM_STARTED,
897 	};
898 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
899 
900 	report(count == max_cpus - 1, "all secondary harts retentive resumed");
901 
902 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
903 
904 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
905 
906 	transition_states = (struct hart_state_transition_info) {
907 		.initial_state = SBI_EXT_HSM_STARTED,
908 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
909 		.final_state = SBI_EXT_HSM_SUSPENDED,
910 	};
911 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
912 
913 	report(count == max_cpus - 1, "all secondary harts non-retentive suspended");
914 
915 	/* Ignore the return value since we check the status of each hart anyway */
916 	sbi_send_ipi_cpumask(&secondary_cpus_mask);
917 
918 	transition_states = (struct hart_state_transition_info) {
919 		.initial_state = SBI_EXT_HSM_SUSPENDED,
920 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
921 		.final_state = SBI_EXT_HSM_STARTED,
922 	};
923 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
924 	check = 0;
925 
926 	for_each_cpu(cpu, &secondary_cpus_mask) {
927 		sbi_hsm_timer_fired = false;
928 		timer_start(hsm_timer_duration);
929 
930 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
931 			cpu_relax();
932 
933 		timer_stop();
934 
935 		if (sbi_hsm_timer_fired) {
936 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
937 			continue;
938 		}
939 
940 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
941 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
942 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
943 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
944 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
945 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
946 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
947 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
948 		else
949 			check++;
950 	}
951 
952 	report(count == max_cpus - 1, "all secondary harts non-retentive resumed");
953 	report(check == max_cpus - 1, "all secondary harts have expected register values after non-retentive resume");
954 
955 	report_prefix_pop();
956 
957 sbi_hsm_hart_stop_tests:
958 	report_prefix_push("hart_stop");
959 
960 	if (ipi_unavailable)
961 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
962 	else
963 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
964 
965 	transition_states = (struct hart_state_transition_info) {
966 		.initial_state = SBI_EXT_HSM_STARTED,
967 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
968 		.final_state = SBI_EXT_HSM_STOPPED,
969 	};
970 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
971 
972 	report(count == max_cpus - 1, "all secondary harts stopped");
973 
974 	report_prefix_pop();
975 
976 	if (__riscv_xlen == 32 || ipi_unavailable) {
977 		local_irq_disable();
978 		hsm_timer_teardown();
979 		report_prefix_pop();
980 		return;
981 	}
982 
983 	report_prefix_push("hart_suspend");
984 
985 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
986 	cpu = cpumask_next(-1, &secondary_cpus_mask);
987 	hartid = cpus[cpu].hartid;
988 	cpumask_clear(&mask);
989 	cpumask_set_cpu(cpu, &mask);
990 
991 	/* Boot up the secondary cpu and let it proceed to the idle loop */
992 	on_cpu(cpu, start_cpu, NULL);
993 
994 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
995 
996 	transition_states = (struct hart_state_transition_info) {
997 		.initial_state = SBI_EXT_HSM_STARTED,
998 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
999 		.final_state = SBI_EXT_HSM_SUSPENDED,
1000 	};
1001 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1002 
1003 	report(count, "secondary hart retentive suspended with MSB set");
1004 
1005 	/* Ignore the return value since we manually validate the status of the hart anyway */
1006 	sbi_send_ipi_cpu(cpu);
1007 
1008 	transition_states = (struct hart_state_transition_info) {
1009 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1010 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1011 		.final_state = SBI_EXT_HSM_STARTED,
1012 	};
1013 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1014 
1015 	report(count, "secondary hart retentive resumed with MSB set");
1016 
1017 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1018 	sbi_hsm_stop_hart[cpu] = 0;
1019 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1020 
1021 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1022 
1023 	transition_states = (struct hart_state_transition_info) {
1024 		.initial_state = SBI_EXT_HSM_STARTED,
1025 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1026 		.final_state = SBI_EXT_HSM_SUSPENDED,
1027 	};
1028 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1029 
1030 	report(count, "secondary hart non-retentive suspended with MSB set");
1031 
1032 	/* Ignore the return value since we manually validate the status of the hart anyway */
1033 	sbi_send_ipi_cpu(cpu);
1034 
1035 	transition_states = (struct hart_state_transition_info) {
1036 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1037 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1038 		.final_state = SBI_EXT_HSM_STARTED,
1039 	};
1040 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1041 	check = 0;
1042 
1043 	if (count) {
1044 		sbi_hsm_timer_fired = false;
1045 		timer_start(hsm_timer_duration);
1046 
1047 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1048 			cpu_relax();
1049 
1050 		timer_stop();
1051 
1052 		if (sbi_hsm_timer_fired) {
1053 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1054 		} else {
1055 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1056 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1057 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1058 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1059 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1060 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1061 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1062 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1063 			else
1064 				check = 1;
1065 		}
1066 	}
1067 
1068 	report(count, "secondary hart non-retentive resumed with MSB set");
1069 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1070 
1071 	report_prefix_pop();
1072 
1073 	report_prefix_push("hart_stop");
1074 
1075 	sbi_hsm_stop_hart[cpu] = 1;
1076 
1077 	transition_states = (struct hart_state_transition_info) {
1078 		.initial_state = SBI_EXT_HSM_STARTED,
1079 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1080 		.final_state = SBI_EXT_HSM_STOPPED,
1081 	};
1082 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1083 
1084 	report(count, "secondary hart stopped after suspension tests with MSB set");
1085 
1086 	local_irq_disable();
1087 	hsm_timer_teardown();
1088 	report_prefix_popn(2);
1089 }
1090 
1091 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1092 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1093 
1094 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1095 {
1096 	unsigned long base_addr_lo, base_addr_hi;
1097 	phys_addr_t paddr = virt_to_phys((void *)s);
1098 	int num_calls = 0;
1099 	struct sbiret ret;
1100 
1101 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1102 
1103 	do {
1104 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1105 		num_bytes -= ret.value;
1106 		paddr += ret.value;
1107 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1108 		num_calls++;
1109 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1110 
1111 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1112 	report_info("%d sbi calls made", num_calls);
1113 }
1114 
1115 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1116 				 phys_addr_t page_addr, size_t page_offset,
1117 				 bool highmem_supported)
1118 {
1119 	int nr_pages = page_offset ? 2 : 1;
1120 	void *vaddr;
1121 
1122 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1123 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1124 		report_skip("Memory above 4G required");
1125 		return;
1126 	}
1127 
1128 	vaddr = alloc_vpages(nr_pages);
1129 
1130 	for (int i = 0; i < nr_pages; ++i)
1131 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1132 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1133 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1134 }
1135 
1136 /*
1137  * Only the write functionality is tested here. There's no easy way to
1138  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1139  */
1140 static void check_dbcn(void)
1141 {
1142 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1143 	unsigned long base_addr_lo, base_addr_hi;
1144 	bool highmem_supported = true;
1145 	phys_addr_t paddr;
1146 	struct sbiret ret;
1147 	char *buf;
1148 
1149 	report_prefix_push("dbcn");
1150 
1151 	if (!sbi_probe(SBI_EXT_DBCN)) {
1152 		report_skip("DBCN extension unavailable");
1153 		report_prefix_pop();
1154 		return;
1155 	}
1156 
1157 	report_prefix_push("write");
1158 
1159 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1160 
1161 	assert(num_bytes < PAGE_SIZE);
1162 
1163 	report_prefix_push("page boundary");
1164 	buf = alloc_pages(1);
1165 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1166 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1167 	report_prefix_pop();
1168 
1169 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1170 		highmem_supported = false;
1171 
1172 	report_prefix_push("high boundary");
1173 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1174 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1175 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1176 				     highmem_supported);
1177 	else
1178 		report_skip("user disabled");
1179 	report_prefix_pop();
1180 
1181 	report_prefix_push("high page");
1182 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1183 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1184 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1185 	} else {
1186 		report_skip("user disabled");
1187 	}
1188 	report_prefix_pop();
1189 
1190 	/* Bytes are read from memory and written to the console */
1191 	report_prefix_push("invalid parameter");
1192 	if (get_invalid_addr(&paddr, false)) {
1193 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1194 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1195 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1196 	}
1197 	report_prefix_popn(2);
1198 	report_prefix_push("write_byte");
1199 
1200 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1201 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1202 	puts("\n");
1203 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1204 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1205 
1206 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1207 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1208 	puts("\n");
1209 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1210 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1211 
1212 	report_prefix_popn(2);
1213 }
1214 
1215 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1216 jmp_buf sbi_susp_jmp;
1217 
1218 struct susp_params {
1219 	unsigned long sleep_type;
1220 	unsigned long resume_addr;
1221 	unsigned long opaque;
1222 	bool returns;
1223 	struct sbiret ret;
1224 };
1225 
1226 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1227 {
1228 	int cpu, me = smp_processor_id();
1229 	struct sbiret ret;
1230 	cpumask_t mask;
1231 
1232 	memset(params, 0, sizeof(*params));
1233 	params->sleep_type = 0; /* suspend-to-ram */
1234 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1235 	params->opaque = virt_to_phys(ctx);
1236 	params->returns = false;
1237 
1238 	cpumask_copy(&mask, &cpu_present_mask);
1239 	cpumask_clear_cpu(me, &mask);
1240 	on_cpumask_async(&mask, stop_cpu, NULL);
1241 
1242 	/* Wait up to 1s for all harts to stop */
1243 	for (int i = 0; i < 100; i++) {
1244 		int count = 1;
1245 
1246 		udelay(10000);
1247 
1248 		for_each_present_cpu(cpu) {
1249 			if (cpu == me)
1250 				continue;
1251 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1252 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1253 				++count;
1254 		}
1255 		if (count == cpumask_weight(&cpu_present_mask))
1256 			break;
1257 	}
1258 
1259 	for_each_present_cpu(cpu) {
1260 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1261 		if (cpu == me) {
1262 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1263 				   "cpu%d is not started", cpu);
1264 		} else {
1265 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1266 				   "cpu%d is not stopped", cpu);
1267 		}
1268 	}
1269 
1270 	return true;
1271 }
1272 
1273 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1274 {
1275 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1276 		report_pass("suspend and resume");
1277 	} else {
1278 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1279 			report_fail("SATP set to zero on resume");
1280 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1281 			report_fail("sstatus.SIE clear on resume");
1282 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1283 			report_fail("a0 is hartid on resume");
1284 	}
1285 }
1286 
1287 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1288 {
1289 	bool r;
1290 
1291 	r = susp_basic_prep(ctx, params);
1292 	assert(r);
1293 	params->sleep_type = 1;
1294 	params->returns = true;
1295 	params->ret.error = SBI_ERR_INVALID_PARAM;
1296 
1297 	return true;
1298 }
1299 
1300 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1301 {
1302 	phys_addr_t badaddr;
1303 	bool r;
1304 
1305 	if (!get_invalid_addr(&badaddr, false))
1306 		return false;
1307 
1308 	r = susp_basic_prep(ctx, params);
1309 	assert(r);
1310 	params->resume_addr = badaddr;
1311 	params->returns = true;
1312 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1313 
1314 	return true;
1315 }
1316 
1317 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1318 {
1319 	int started = 0, cpu, me = smp_processor_id();
1320 	struct sbiret ret;
1321 	bool r;
1322 
1323 	if (cpumask_weight(&cpu_present_mask) < 2) {
1324 		report_skip("At least 2 cpus required");
1325 		return false;
1326 	}
1327 
1328 	r = susp_basic_prep(ctx, params);
1329 	assert(r);
1330 	params->returns = true;
1331 	params->ret.error = SBI_ERR_DENIED;
1332 
1333 	for_each_present_cpu(cpu) {
1334 		if (cpu == me)
1335 			continue;
1336 		break;
1337 	}
1338 
1339 	on_cpu(cpu, start_cpu, NULL);
1340 
1341 	for_each_present_cpu(cpu) {
1342 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1343 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1344 		if (ret.value == SBI_EXT_HSM_STARTED)
1345 			started++;
1346 	}
1347 
1348 	assert(started == 2);
1349 
1350 	return true;
1351 }
1352 
1353 static void check_susp(void)
1354 {
1355 	unsigned long csrs[] = {
1356 		[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS),
1357 		[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE),
1358 		[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC),
1359 		[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH),
1360 		[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP),
1361 	};
1362 	unsigned long ctx[] = {
1363 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1364 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1365 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1366 		[SBI_SUSP_TESTNUM_IDX] = 0,
1367 		[SBI_SUSP_RESULTS_IDX] = 0,
1368 	};
1369 	enum {
1370 #define SUSP_FIRST_TESTNUM 1
1371 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1372 		SUSP_TYPE,
1373 		SUSP_BAD_ADDR,
1374 		SUSP_ONE_ONLINE,
1375 		NR_SUSP_TESTS,
1376 	};
1377 	struct susp_test {
1378 		const char *name;
1379 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1380 		void (*check)(unsigned long ctx[], struct susp_params *params);
1381 	} susp_tests[] = {
1382 		[SUSP_BASIC]		= { "basic",		susp_basic_prep,	susp_basic_check,	},
1383 		[SUSP_TYPE]		= { "sleep_type",	susp_type_prep,					},
1384 		[SUSP_BAD_ADDR]		= { "bad addr",		susp_badaddr_prep,				},
1385 		[SUSP_ONE_ONLINE]	= { "one cpu online",	susp_one_prep,					},
1386 	};
1387 	struct susp_params params;
1388 	struct sbiret ret;
1389 	int testnum, i;
1390 
1391 	local_irq_disable();
1392 	timer_stop();
1393 
1394 	report_prefix_push("susp");
1395 
1396 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1397 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1398 
1399 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1400 		report_prefix_push(susp_tests[i].name);
1401 
1402 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1403 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1404 
1405 		assert(susp_tests[i].prep);
1406 		if (!susp_tests[i].prep(ctx, &params)) {
1407 			report_prefix_pop();
1408 			continue;
1409 		}
1410 
1411 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1412 			ret = sbi_system_suspend(params.sleep_type, params.resume_addr, params.opaque);
1413 
1414 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1415 				report_skip("SUSP not supported?");
1416 				report_prefix_popn(2);
1417 				return;
1418 			} else if (!params.returns) {
1419 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1420 			} else {
1421 				report(ret.error == params.ret.error, "expected sbi.error");
1422 				if (ret.error != params.ret.error)
1423 					report_info("expected error %ld, received %ld", params.ret.error, ret.error);
1424 			}
1425 
1426 			report_prefix_pop();
1427 			continue;
1428 		}
1429 		assert(testnum == i);
1430 
1431 		if (susp_tests[i].check)
1432 			susp_tests[i].check(ctx, &params);
1433 
1434 		report_prefix_pop();
1435 	}
1436 
1437 	report_prefix_pop();
1438 }
1439 
1440 int main(int argc, char **argv)
1441 {
1442 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1443 		help();
1444 		exit(0);
1445 	}
1446 
1447 	report_prefix_push("sbi");
1448 	check_base();
1449 	check_time();
1450 	check_ipi();
1451 	check_hsm();
1452 	check_dbcn();
1453 	check_susp();
1454 
1455 	return report_summary();
1456 }
1457