xref: /kvm-unit-tests/riscv/sbi.c (revision b9423a4fc5a9332150c48b8c8d28c81e44935660)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 void check_fwft(void);
36 
37 static long __labs(long a)
38 {
39 	return __builtin_labs(a);
40 }
41 
42 static void help(void)
43 {
44 	puts("Test SBI\n");
45 	puts("An environ must be provided where expected values are given.\n");
46 }
47 
48 static struct sbiret sbi_base(int fid, unsigned long arg0)
49 {
50 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
51 }
52 
53 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
54 				    unsigned long base_addr_hi)
55 {
56 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
57 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
58 }
59 
60 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
61 {
62 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
63 }
64 
65 static struct sbiret sbi_hart_suspend_raw(unsigned long suspend_type, unsigned long resume_addr, unsigned long opaque)
66 {
67 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
68 }
69 
70 static struct sbiret sbi_system_suspend_raw(unsigned long sleep_type, unsigned long resume_addr, unsigned long opaque)
71 {
72 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
73 }
74 
75 void sbi_bad_fid(int ext)
76 {
77 	struct sbiret ret = sbi_ecall(ext, 0xbad, 0, 0, 0, 0, 0, 0);
78 	sbiret_report_error(&ret, SBI_ERR_NOT_SUPPORTED, "Bad FID");
79 }
80 
81 static void start_cpu(void *data)
82 {
83 	/* nothing to do */
84 }
85 
86 static void stop_cpu(void *data)
87 {
88 	struct sbiret ret = sbi_hart_stop();
89 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
90 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
91 }
92 
93 static int rand_online_cpu(prng_state *ps)
94 {
95 	int cpu, me = smp_processor_id();
96 
97 	for (;;) {
98 		cpu = prng32(ps) % nr_cpus;
99 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
100 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
101 			break;
102 	}
103 
104 	return cpu;
105 }
106 
107 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
108 {
109 	*lo = (unsigned long)paddr;
110 	*hi = 0;
111 	if (__riscv_xlen == 32)
112 		*hi = (unsigned long)(paddr >> 32);
113 }
114 
115 static bool check_addr(phys_addr_t start, phys_addr_t size)
116 {
117 	struct mem_region *r = memregions_find(start);
118 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
119 }
120 
121 static phys_addr_t get_highest_addr(void)
122 {
123 	phys_addr_t highest_end = 0;
124 	struct mem_region *r;
125 
126 	for (r = mem_regions; r->end; ++r) {
127 		if (r->end > highest_end)
128 			highest_end = r->end;
129 	}
130 
131 	return highest_end - 1;
132 }
133 
134 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
135 {
136 	if (env_enabled("INVALID_ADDR_AUTO")) {
137 		*paddr = get_highest_addr() + 1;
138 		return true;
139 	} else if (allow_default && !getenv("INVALID_ADDR")) {
140 		*paddr = -1ul;
141 		return true;
142 	} else if (env_or_skip("INVALID_ADDR")) {
143 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
144 		return true;
145 	}
146 
147 	return false;
148 }
149 
150 static void timer_setup(void (*handler)(struct pt_regs *))
151 {
152 	install_irq_handler(IRQ_S_TIMER, handler);
153 	timer_irq_enable();
154 }
155 
156 static void timer_teardown(void)
157 {
158 	timer_irq_disable();
159 	timer_stop();
160 	install_irq_handler(IRQ_S_TIMER, NULL);
161 }
162 
163 static void check_base(void)
164 {
165 	struct sbiret ret;
166 	long expected;
167 
168 	report_prefix_push("base");
169 
170 	sbi_bad_fid(SBI_EXT_BASE);
171 
172 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
173 
174 	report_prefix_push("spec_version");
175 	if (env_or_skip("SBI_SPEC_VERSION")) {
176 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
177 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
178 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
179 		sbiret_check(&ret, 0, expected);
180 	}
181 	report_prefix_pop();
182 
183 	ret.value &= 0x7ffffffful;
184 
185 	if (ret.error || ret.value < 2) {
186 		report_skip("SBI spec version 0.2 or higher required");
187 		return;
188 	}
189 
190 	report_prefix_push("impl_id");
191 	if (env_or_skip("SBI_IMPL_ID")) {
192 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
193 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
194 		sbiret_check(&ret, 0, expected);
195 	}
196 	report_prefix_pop();
197 
198 	report_prefix_push("impl_version");
199 	if (env_or_skip("SBI_IMPL_VERSION")) {
200 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
201 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
202 		sbiret_check(&ret, 0, expected);
203 	}
204 	report_prefix_pop();
205 
206 	report_prefix_push("probe_ext");
207 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
208 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
209 	sbiret_check(&ret, 0, expected);
210 	report_prefix_push("unavailable");
211 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
212 	sbiret_check(&ret, 0, 0);
213 	report_prefix_popn(2);
214 
215 	report_prefix_push("mvendorid");
216 	if (env_or_skip("MVENDORID")) {
217 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
218 		assert(__riscv_xlen == 32 || !(expected >> 32));
219 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
220 		sbiret_check(&ret, 0, expected);
221 	}
222 	report_prefix_pop();
223 
224 	report_prefix_push("marchid");
225 	if (env_or_skip("MARCHID")) {
226 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
227 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
228 		sbiret_check(&ret, 0, expected);
229 	}
230 	report_prefix_pop();
231 
232 	report_prefix_push("mimpid");
233 	if (env_or_skip("MIMPID")) {
234 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
235 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
236 		sbiret_check(&ret, 0, expected);
237 	}
238 	report_prefix_popn(2);
239 }
240 
241 struct timer_info {
242 	bool timer_works;
243 	bool mask_timer_irq;
244 	bool timer_irq_set;
245 	bool timer_irq_cleared;
246 	unsigned long timer_irq_count;
247 };
248 
249 static struct timer_info timer_info;
250 
251 static bool timer_irq_pending(void)
252 {
253 	return csr_read(CSR_SIP) & IP_TIP;
254 }
255 
256 static void timer_irq_handler(struct pt_regs *regs)
257 {
258 	timer_info.timer_works = true;
259 
260 	if (timer_info.timer_irq_count < ULONG_MAX)
261 		++timer_info.timer_irq_count;
262 
263 	if (timer_irq_pending())
264 		timer_info.timer_irq_set = true;
265 
266 	if (timer_info.mask_timer_irq)
267 		timer_irq_disable();
268 	else
269 		sbi_set_timer(ULONG_MAX);
270 
271 	if (!timer_irq_pending())
272 		timer_info.timer_irq_cleared = true;
273 }
274 
275 static void timer_check_set_timer(bool mask_timer_irq)
276 {
277 	struct sbiret ret;
278 	unsigned long begin, end, duration;
279 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
280 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
281 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
282 
283 	d = usec_to_cycles(d);
284 	margin = usec_to_cycles(margin);
285 
286 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
287 	begin = timer_get_cycles();
288 	ret = sbi_set_timer(begin + d);
289 
290 	report(!ret.error, "set timer%s", mask_test_str);
291 	if (ret.error)
292 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
293 
294 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
295 		cpu_relax();
296 
297 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
298 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
299 
300 	if (!mask_timer_irq) {
301 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
302 		       "pending timer interrupt bit cleared by setting timer to -1");
303 	}
304 
305 	if (timer_info.timer_works) {
306 		duration = end - begin;
307 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
308 	}
309 
310 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
311 }
312 
313 static void check_time(void)
314 {
315 	bool pending;
316 
317 	report_prefix_push("time");
318 
319 	if (!sbi_probe(SBI_EXT_TIME)) {
320 		report_skip("time extension not available");
321 		report_prefix_pop();
322 		return;
323 	}
324 
325 	sbi_bad_fid(SBI_EXT_TIME);
326 
327 	report_prefix_push("set_timer");
328 
329 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
330 	local_irq_enable();
331 	timer_irq_enable();
332 
333 	timer_check_set_timer(false);
334 
335 	if (csr_read(CSR_SIE) & IE_TIE)
336 		timer_check_set_timer(true);
337 	else
338 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
339 
340 	timer_irq_disable();
341 	sbi_set_timer(0);
342 	pending = timer_irq_pending();
343 	report(pending, "timer immediately pending by setting timer to 0");
344 	sbi_set_timer(ULONG_MAX);
345 	if (pending)
346 		report(!timer_irq_pending(), "pending timer cleared while masked");
347 	else
348 		report_skip("timer is not pending, skipping timer cleared while masked test");
349 
350 	local_irq_disable();
351 	install_irq_handler(IRQ_S_TIMER, NULL);
352 
353 	report_prefix_popn(2);
354 }
355 
356 static bool ipi_received[NR_CPUS];
357 static bool ipi_timeout[NR_CPUS];
358 static cpumask_t ipi_done;
359 
360 static void ipi_timeout_handler(struct pt_regs *regs)
361 {
362 	timer_stop();
363 	ipi_timeout[smp_processor_id()] = true;
364 }
365 
366 static void ipi_irq_handler(struct pt_regs *regs)
367 {
368 	ipi_ack();
369 	ipi_received[smp_processor_id()] = true;
370 }
371 
372 static void ipi_hart_wait(void *data)
373 {
374 	unsigned long timeout = (unsigned long)data;
375 	int me = smp_processor_id();
376 
377 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
378 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
379 	local_ipi_enable();
380 	timer_irq_enable();
381 	local_irq_enable();
382 
383 	timer_start(timeout);
384 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
385 		cpu_relax();
386 	local_irq_disable();
387 	timer_stop();
388 	local_ipi_disable();
389 	timer_irq_disable();
390 	install_irq_handler(IRQ_S_SOFT, NULL);
391 	install_irq_handler(IRQ_S_TIMER, NULL);
392 
393 	cpumask_set_cpu(me, &ipi_done);
394 }
395 
396 static void ipi_hart_check(cpumask_t *mask)
397 {
398 	int cpu;
399 
400 	for_each_cpu(cpu, mask) {
401 		if (ipi_timeout[cpu]) {
402 			const char *rec = ipi_received[cpu] ? "but was still received"
403 							    : "and has still not been received";
404 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
405 		}
406 
407 		ipi_timeout[cpu] = false;
408 		ipi_received[cpu] = false;
409 	}
410 }
411 
412 static void check_ipi(void)
413 {
414 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
415 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
416 	int me = smp_processor_id();
417 	unsigned long max_hartid = 0;
418 	unsigned long hartid1, hartid2;
419 	cpumask_t ipi_receivers;
420 	static prng_state ps;
421 	struct sbiret ret;
422 	int cpu, cpu2;
423 
424 	ps = prng_init(0xDEADBEEF);
425 
426 	report_prefix_push("ipi");
427 
428 	if (!sbi_probe(SBI_EXT_IPI)) {
429 		report_skip("ipi extension not available");
430 		report_prefix_pop();
431 		return;
432 	}
433 
434 	sbi_bad_fid(SBI_EXT_IPI);
435 
436 	if (nr_cpus_present < 2) {
437 		report_skip("At least 2 cpus required");
438 		report_prefix_pop();
439 		return;
440 	}
441 
442 	report_prefix_push("random hart");
443 	cpumask_clear(&ipi_done);
444 	cpumask_clear(&ipi_receivers);
445 	cpu = rand_online_cpu(&ps);
446 	cpumask_set_cpu(cpu, &ipi_receivers);
447 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
448 	ret = sbi_send_ipi_cpu(cpu);
449 	report(ret.error == SBI_SUCCESS, "ipi returned success");
450 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
451 		cpu_relax();
452 	ipi_hart_check(&ipi_receivers);
453 	report_prefix_pop();
454 
455 	report_prefix_push("two in hart_mask");
456 
457 	if (nr_cpus_present < 3) {
458 		report_skip("3 cpus required");
459 		goto end_two;
460 	}
461 
462 	cpu = rand_online_cpu(&ps);
463 	hartid1 = cpus[cpu].hartid;
464 	hartid2 = 0;
465 	for_each_present_cpu(cpu2) {
466 		if (cpu2 == cpu || cpu2 == me)
467 			continue;
468 		hartid2 = cpus[cpu2].hartid;
469 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
470 			break;
471 	}
472 	if (cpu2 == nr_cpus) {
473 		report_skip("hartids are too sparse");
474 		goto end_two;
475 	}
476 
477 	cpumask_clear(&ipi_done);
478 	cpumask_clear(&ipi_receivers);
479 	cpumask_set_cpu(cpu, &ipi_receivers);
480 	cpumask_set_cpu(cpu2, &ipi_receivers);
481 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
482 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
483 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
484 	report(ret.error == SBI_SUCCESS, "ipi returned success");
485 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
486 		cpu_relax();
487 	ipi_hart_check(&ipi_receivers);
488 end_two:
489 	report_prefix_pop();
490 
491 	report_prefix_push("broadcast");
492 	cpumask_clear(&ipi_done);
493 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
494 	cpumask_clear_cpu(me, &ipi_receivers);
495 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
496 	ret = sbi_send_ipi_broadcast();
497 	report(ret.error == SBI_SUCCESS, "ipi returned success");
498 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
499 		cpu_relax();
500 	ipi_hart_check(&ipi_receivers);
501 	report_prefix_pop();
502 
503 	report_prefix_push("invalid parameters");
504 
505 	for_each_present_cpu(cpu) {
506 		if (cpus[cpu].hartid > max_hartid)
507 			max_hartid = cpus[cpu].hartid;
508 	}
509 
510 	/* Test no targets */
511 	ret = sbi_send_ipi(0, 0);
512 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 0");
513 	ret = sbi_send_ipi(0, 1);
514 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 1");
515 
516 	/* Try the next higher hartid than the max */
517 	ret = sbi_send_ipi(2, max_hartid);
518 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
519 	ret = sbi_send_ipi(1, max_hartid + 1);
520 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
521 
522 	report_prefix_pop();
523 
524 	report_prefix_pop();
525 }
526 
527 unsigned char sbi_hsm_stop_hart[NR_CPUS];
528 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
529 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
530 
531 static const char * const hart_state_str[] = {
532 	[SBI_EXT_HSM_STARTED] = "started",
533 	[SBI_EXT_HSM_STOPPED] = "stopped",
534 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
535 };
536 struct hart_state_transition_info {
537 	enum sbi_ext_hsm_sid initial_state;
538 	enum sbi_ext_hsm_sid intermediate_state;
539 	enum sbi_ext_hsm_sid final_state;
540 };
541 static cpumask_t sbi_hsm_started_hart_checks;
542 static bool sbi_hsm_invalid_hartid_check;
543 static bool sbi_hsm_timer_fired;
544 extern void sbi_hsm_check_hart_start(void);
545 extern void sbi_hsm_check_non_retentive_suspend(void);
546 
547 static void hsm_timer_irq_handler(struct pt_regs *regs)
548 {
549 	timer_stop();
550 	sbi_hsm_timer_fired = true;
551 }
552 
553 static void hart_check_already_started(void *data)
554 {
555 	struct sbiret ret;
556 	unsigned long hartid = current_thread_info()->hartid;
557 	int me = smp_processor_id();
558 
559 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
560 
561 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
562 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
563 }
564 
565 static void hart_start_invalid_hartid(void *data)
566 {
567 	struct sbiret ret;
568 
569 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
570 
571 	if (ret.error == SBI_ERR_INVALID_PARAM)
572 		sbi_hsm_invalid_hartid_check = true;
573 }
574 
575 static cpumask_t hsm_suspend_not_supported;
576 
577 static void ipi_nop(struct pt_regs *regs)
578 {
579 	ipi_ack();
580 }
581 
582 static void hart_suspend_and_wait_ipi(unsigned long suspend_type, unsigned long resume_addr,
583 				      unsigned long opaque, bool returns, const char *typestr)
584 {
585 	unsigned long hartid = current_thread_info()->hartid;
586 	struct sbiret ret;
587 
588 	install_irq_handler(IRQ_S_SOFT, ipi_nop);
589 	local_ipi_enable();
590 	local_irq_enable();
591 
592 	ret = sbi_hart_suspend_raw(suspend_type, resume_addr, opaque);
593 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
594 		cpumask_set_cpu(smp_processor_id(), &hsm_suspend_not_supported);
595 	else if (ret.error)
596 		report_fail("failed to %s cpu%d (hartid = %lx) (error=%ld)",
597 			    typestr, smp_processor_id(), hartid, ret.error);
598 	else if (!returns)
599 		report_fail("failed to %s cpu%d (hartid = %lx) (call should not return)",
600 			    typestr, smp_processor_id(), hartid);
601 
602 	local_irq_disable();
603 	local_ipi_disable();
604 	install_irq_handler(IRQ_S_SOFT, NULL);
605 }
606 
607 static void hart_retentive_suspend(void *data)
608 {
609 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0, true, "retentive suspend");
610 }
611 
612 static void hart_non_retentive_suspend(void *data)
613 {
614 	unsigned long params[] = {
615 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
616 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
617 	};
618 
619 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
620 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
621 				  false, "non-retentive suspend");
622 }
623 
624 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
625 static void hart_retentive_suspend_with_msb_set(void *data)
626 {
627 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
628 
629 	hart_suspend_and_wait_ipi(suspend_type, 0, 0, true, "retentive suspend with MSB set");
630 }
631 
632 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
633 static void hart_non_retentive_suspend_with_msb_set(void *data)
634 {
635 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
636 	unsigned long params[] = {
637 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
638 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
639 	};
640 
641 	hart_suspend_and_wait_ipi(suspend_type,
642 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
643 				  false, "non-retentive suspend with MSB set");
644 }
645 
646 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
647 {
648 	struct sbiret ret;
649 
650 	sbi_hsm_timer_fired = false;
651 	timer_start(duration);
652 
653 	ret = sbi_hart_get_status(hartid);
654 
655 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
656 		cpu_relax();
657 		ret = sbi_hart_get_status(hartid);
658 	}
659 
660 	timer_stop();
661 
662 	if (sbi_hsm_timer_fired)
663 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
664 	else if (ret.error)
665 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
666 
667 	return !sbi_hsm_timer_fired && !ret.error;
668 }
669 
670 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
671 				      struct hart_state_transition_info *states)
672 {
673 	struct sbiret ret;
674 	unsigned long hartid;
675 	int cpu, count = 0;
676 
677 	for_each_cpu(cpu, mask) {
678 		hartid = cpus[cpu].hartid;
679 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
680 			continue;
681 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
682 			continue;
683 
684 		ret = sbi_hart_get_status(hartid);
685 		if (ret.error)
686 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
687 		else if (ret.value != states->final_state)
688 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
689 				    hart_state_str[states->final_state], ret.value);
690 		else
691 			count++;
692 	}
693 
694 	return count;
695 }
696 
697 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
698 {
699 	sbi_hsm_timer_fired = false;
700 	timer_start(duration);
701 
702 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
703 		cpu_relax();
704 
705 	timer_stop();
706 
707 	if (sbi_hsm_timer_fired)
708 		report_info("hsm timer fired before all cpus became idle");
709 }
710 
711 static void check_hsm(void)
712 {
713 	struct sbiret ret;
714 	unsigned long hartid;
715 	cpumask_t secondary_cpus_mask, mask, resume_mask;
716 	struct hart_state_transition_info transition_states;
717 	bool ipi_unavailable = false;
718 	int cpu, me = smp_processor_id();
719 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
720 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
721 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
722 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
723 	int count, check, expected_count, resume_count;
724 
725 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
726 
727 	report_prefix_push("hsm");
728 
729 	if (!sbi_probe(SBI_EXT_HSM)) {
730 		report_skip("hsm extension not available");
731 		report_prefix_pop();
732 		return;
733 	}
734 
735 	sbi_bad_fid(SBI_EXT_HSM);
736 
737 	report_prefix_push("hart_get_status");
738 
739 	hartid = current_thread_info()->hartid;
740 	ret = sbi_hart_get_status(hartid);
741 
742 	if (ret.error) {
743 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
744 		report_prefix_popn(2);
745 		return;
746 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
747 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
748 		report_prefix_popn(2);
749 		return;
750 	}
751 
752 	report_pass("status of current hart is started");
753 
754 	report_prefix_pop();
755 
756 	if (max_cpus < 2) {
757 		report_skip("no other cpus to run the remaining hsm tests on");
758 		report_prefix_pop();
759 		return;
760 	}
761 
762 	report_prefix_push("hart_stop");
763 
764 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
765 	cpumask_clear_cpu(me, &secondary_cpus_mask);
766 	timer_setup(hsm_timer_irq_handler);
767 	local_irq_enable();
768 
769 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
770 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
771 
772 	transition_states = (struct hart_state_transition_info) {
773 		.initial_state = SBI_EXT_HSM_STARTED,
774 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
775 		.final_state = SBI_EXT_HSM_STOPPED,
776 	};
777 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
778 
779 	report(count == max_cpus - 1, "all secondary harts stopped");
780 
781 	report_prefix_pop();
782 
783 	report_prefix_push("hart_start");
784 
785 	for_each_cpu(cpu, &secondary_cpus_mask) {
786 		hartid = cpus[cpu].hartid;
787 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
788 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
789 
790 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
791 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
792 		if (ret.error) {
793 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
794 			continue;
795 		}
796 	}
797 
798 	transition_states = (struct hart_state_transition_info) {
799 		.initial_state = SBI_EXT_HSM_STOPPED,
800 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
801 		.final_state = SBI_EXT_HSM_STARTED,
802 	};
803 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
804 	check = 0;
805 
806 	for_each_cpu(cpu, &secondary_cpus_mask) {
807 		sbi_hsm_timer_fired = false;
808 		timer_start(hsm_timer_duration);
809 
810 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
811 			cpu_relax();
812 
813 		timer_stop();
814 
815 		if (sbi_hsm_timer_fired) {
816 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
817 			continue;
818 		}
819 
820 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
821 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
822 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
823 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
824 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
825 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
826 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
827 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
828 		else
829 			check++;
830 	}
831 
832 	report(count == max_cpus - 1, "all secondary harts started");
833 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
834 
835 	report_prefix_pop();
836 
837 	report_prefix_push("hart_stop");
838 
839 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
840 
841 	transition_states = (struct hart_state_transition_info) {
842 		.initial_state = SBI_EXT_HSM_STARTED,
843 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
844 		.final_state = SBI_EXT_HSM_STOPPED,
845 	};
846 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
847 
848 	report(count == max_cpus - 1, "all secondary harts stopped");
849 
850 	/* Reset the stop flags so that we can reuse them after suspension tests */
851 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
852 
853 	report_prefix_pop();
854 
855 	report_prefix_push("hart_start");
856 
857 	/* Select just one secondary cpu to run the invalid hartid test */
858 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
859 
860 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
861 
862 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
863 
864 	transition_states = (struct hart_state_transition_info) {
865 		.initial_state = SBI_EXT_HSM_STOPPED,
866 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
867 		.final_state = SBI_EXT_HSM_STARTED,
868 	};
869 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
870 
871 	report(count == max_cpus - 1, "all secondary harts started");
872 
873 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
874 
875 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
876 	       "all secondary harts are already started");
877 
878 	report_prefix_pop();
879 
880 	report_prefix_push("hart_suspend");
881 
882 	if (!sbi_probe(SBI_EXT_IPI)) {
883 		report_skip("skipping suspension tests since ipi extension is unavailable");
884 		report_prefix_pop();
885 		ipi_unavailable = true;
886 		goto sbi_hsm_hart_stop_tests;
887 	}
888 
889 	cpumask_clear(&hsm_suspend_not_supported);
890 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
891 
892 	transition_states = (struct hart_state_transition_info) {
893 		.initial_state = SBI_EXT_HSM_STARTED,
894 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
895 		.final_state = SBI_EXT_HSM_SUSPENDED,
896 	};
897 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
898 
899 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
900 
901 	if (expected_count != 0) {
902 		if (expected_count != max_cpus - 1)
903 			report_info("not all harts support retentive suspend");
904 		report(count == expected_count, "supporting secondary harts retentive suspended");
905 	} else {
906 		report_skip("retentive suspend not supported by any harts");
907 		goto nonret_suspend_tests;
908 	}
909 
910 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
911 	resume_count = cpumask_weight(&resume_mask);
912 
913 	/* Ignore the return value since we check the status of each hart anyway */
914 	sbi_send_ipi_cpumask(&resume_mask);
915 
916 	transition_states = (struct hart_state_transition_info) {
917 		.initial_state = SBI_EXT_HSM_SUSPENDED,
918 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
919 		.final_state = SBI_EXT_HSM_STARTED,
920 	};
921 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
922 
923 	report(count == resume_count, "supporting secondary harts retentive resumed");
924 
925 nonret_suspend_tests:
926 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
927 
928 	cpumask_clear(&hsm_suspend_not_supported);
929 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
930 
931 	transition_states = (struct hart_state_transition_info) {
932 		.initial_state = SBI_EXT_HSM_STARTED,
933 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
934 		.final_state = SBI_EXT_HSM_SUSPENDED,
935 	};
936 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
937 
938 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
939 
940 	if (expected_count != 0) {
941 		if (expected_count != max_cpus - 1)
942 			report_info("not all harts support non-retentive suspend");
943 		report(count == expected_count, "supporting secondary harts non-retentive suspended");
944 	} else {
945 		report_skip("non-retentive suspend not supported by any harts");
946 		goto hsm_suspend_tests_done;
947 	}
948 
949 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
950 	resume_count = cpumask_weight(&resume_mask);
951 
952 	/* Ignore the return value since we check the status of each hart anyway */
953 	sbi_send_ipi_cpumask(&resume_mask);
954 
955 	transition_states = (struct hart_state_transition_info) {
956 		.initial_state = SBI_EXT_HSM_SUSPENDED,
957 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
958 		.final_state = SBI_EXT_HSM_STARTED,
959 	};
960 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
961 	check = 0;
962 
963 	for_each_cpu(cpu, &resume_mask) {
964 		sbi_hsm_timer_fired = false;
965 		timer_start(hsm_timer_duration);
966 
967 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
968 			cpu_relax();
969 
970 		timer_stop();
971 
972 		if (sbi_hsm_timer_fired) {
973 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
974 			continue;
975 		}
976 
977 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
978 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
979 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
980 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
981 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
982 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
983 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
984 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
985 		else
986 			check++;
987 	}
988 
989 	report(count == resume_count, "supporting secondary harts non-retentive resumed");
990 	report(check == resume_count, "supporting secondary harts have expected register values after non-retentive resume");
991 
992 hsm_suspend_tests_done:
993 	report_prefix_pop();
994 
995 sbi_hsm_hart_stop_tests:
996 	report_prefix_push("hart_stop");
997 
998 	if (ipi_unavailable || expected_count == 0)
999 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
1000 	else
1001 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
1002 
1003 	transition_states = (struct hart_state_transition_info) {
1004 		.initial_state = SBI_EXT_HSM_STARTED,
1005 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1006 		.final_state = SBI_EXT_HSM_STOPPED,
1007 	};
1008 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
1009 
1010 	report(count == max_cpus - 1, "all secondary harts stopped");
1011 
1012 	report_prefix_pop();
1013 
1014 	if (__riscv_xlen == 32 || ipi_unavailable) {
1015 		local_irq_disable();
1016 		timer_teardown();
1017 		report_prefix_pop();
1018 		return;
1019 	}
1020 
1021 	report_prefix_push("hart_suspend");
1022 
1023 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
1024 	cpu = cpumask_next(-1, &secondary_cpus_mask);
1025 	hartid = cpus[cpu].hartid;
1026 	cpumask_clear(&mask);
1027 	cpumask_set_cpu(cpu, &mask);
1028 
1029 	/* Boot up the secondary cpu and let it proceed to the idle loop */
1030 	on_cpu(cpu, start_cpu, NULL);
1031 
1032 	cpumask_clear(&hsm_suspend_not_supported);
1033 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
1034 
1035 	transition_states = (struct hart_state_transition_info) {
1036 		.initial_state = SBI_EXT_HSM_STARTED,
1037 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1038 		.final_state = SBI_EXT_HSM_SUSPENDED,
1039 	};
1040 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1041 
1042 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1043 
1044 	if (expected_count) {
1045 		report(count == expected_count, "retentive suspend with MSB set");
1046 	} else {
1047 		report_skip("retentive suspend not supported by cpu%d", cpu);
1048 		goto nonret_suspend_with_msb;
1049 	}
1050 
1051 	/* Ignore the return value since we manually validate the status of the hart anyway */
1052 	sbi_send_ipi_cpu(cpu);
1053 
1054 	transition_states = (struct hart_state_transition_info) {
1055 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1056 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1057 		.final_state = SBI_EXT_HSM_STARTED,
1058 	};
1059 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1060 
1061 	report(count, "secondary hart retentive resumed with MSB set");
1062 
1063 nonret_suspend_with_msb:
1064 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1065 	sbi_hsm_stop_hart[cpu] = 0;
1066 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1067 
1068 	cpumask_clear(&hsm_suspend_not_supported);
1069 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1070 
1071 	transition_states = (struct hart_state_transition_info) {
1072 		.initial_state = SBI_EXT_HSM_STARTED,
1073 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1074 		.final_state = SBI_EXT_HSM_SUSPENDED,
1075 	};
1076 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1077 
1078 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1079 
1080 	if (expected_count) {
1081 		report(count == expected_count, "non-retentive suspend with MSB set");
1082 	} else {
1083 		report_skip("non-retentive suspend not supported by cpu%d", cpu);
1084 		goto hsm_hart_stop_test;
1085 	}
1086 
1087 	/* Ignore the return value since we manually validate the status of the hart anyway */
1088 	sbi_send_ipi_cpu(cpu);
1089 
1090 	transition_states = (struct hart_state_transition_info) {
1091 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1092 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1093 		.final_state = SBI_EXT_HSM_STARTED,
1094 	};
1095 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1096 	check = 0;
1097 
1098 	if (count) {
1099 		sbi_hsm_timer_fired = false;
1100 		timer_start(hsm_timer_duration);
1101 
1102 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1103 			cpu_relax();
1104 
1105 		timer_stop();
1106 
1107 		if (sbi_hsm_timer_fired) {
1108 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1109 		} else {
1110 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1111 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1112 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1113 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1114 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1115 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1116 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1117 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1118 			else
1119 				check = 1;
1120 		}
1121 	}
1122 
1123 	report(count, "secondary hart non-retentive resumed with MSB set");
1124 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1125 
1126 hsm_hart_stop_test:
1127 	report_prefix_pop();
1128 
1129 	report_prefix_push("hart_stop");
1130 
1131 	if (expected_count == 0)
1132 		on_cpu_async(cpu, stop_cpu, NULL);
1133 	else
1134 		sbi_hsm_stop_hart[cpu] = 1;
1135 
1136 	transition_states = (struct hart_state_transition_info) {
1137 		.initial_state = SBI_EXT_HSM_STARTED,
1138 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1139 		.final_state = SBI_EXT_HSM_STOPPED,
1140 	};
1141 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1142 
1143 	report(count, "secondary hart stopped after suspension tests with MSB set");
1144 
1145 	local_irq_disable();
1146 	timer_teardown();
1147 	report_prefix_popn(2);
1148 }
1149 
1150 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1151 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1152 
1153 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1154 {
1155 	unsigned long base_addr_lo, base_addr_hi;
1156 	phys_addr_t paddr = virt_to_phys((void *)s);
1157 	int num_calls = 0;
1158 	struct sbiret ret;
1159 
1160 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1161 
1162 	do {
1163 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1164 		num_bytes -= ret.value;
1165 		paddr += ret.value;
1166 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1167 		num_calls++;
1168 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1169 
1170 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1171 	report_info("%d sbi calls made", num_calls);
1172 }
1173 
1174 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1175 				 phys_addr_t page_addr, size_t page_offset,
1176 				 bool highmem_supported)
1177 {
1178 	int nr_pages = page_offset ? 2 : 1;
1179 	void *vaddr;
1180 
1181 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1182 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1183 		report_skip("Memory above 4G required");
1184 		return;
1185 	}
1186 
1187 	vaddr = alloc_vpages(nr_pages);
1188 
1189 	for (int i = 0; i < nr_pages; ++i)
1190 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1191 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1192 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1193 }
1194 
1195 /*
1196  * Only the write functionality is tested here. There's no easy way to
1197  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1198  */
1199 static void check_dbcn(void)
1200 {
1201 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1202 	unsigned long base_addr_lo, base_addr_hi;
1203 	bool highmem_supported = true;
1204 	phys_addr_t paddr;
1205 	struct sbiret ret;
1206 	char *buf;
1207 
1208 	report_prefix_push("dbcn");
1209 
1210 	if (!sbi_probe(SBI_EXT_DBCN)) {
1211 		report_skip("DBCN extension unavailable");
1212 		report_prefix_pop();
1213 		return;
1214 	}
1215 
1216 	sbi_bad_fid(SBI_EXT_DBCN);
1217 
1218 	report_prefix_push("write");
1219 
1220 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1221 
1222 	assert(num_bytes < PAGE_SIZE);
1223 
1224 	report_prefix_push("page boundary");
1225 	buf = alloc_pages(1);
1226 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1227 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1228 	report_prefix_pop();
1229 
1230 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1231 		highmem_supported = false;
1232 
1233 	report_prefix_push("high boundary");
1234 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1235 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1236 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1237 				     highmem_supported);
1238 	else
1239 		report_skip("user disabled");
1240 	report_prefix_pop();
1241 
1242 	report_prefix_push("high page");
1243 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1244 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1245 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1246 	} else {
1247 		report_skip("user disabled");
1248 	}
1249 	report_prefix_pop();
1250 
1251 	/* Bytes are read from memory and written to the console */
1252 	report_prefix_push("invalid parameter");
1253 	if (get_invalid_addr(&paddr, false)) {
1254 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1255 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1256 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1257 	}
1258 	report_prefix_popn(2);
1259 	report_prefix_push("write_byte");
1260 
1261 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1262 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1263 	puts("\n");
1264 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1265 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1266 
1267 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1268 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1269 	puts("\n");
1270 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1271 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1272 
1273 	report_prefix_popn(2);
1274 }
1275 
1276 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1277 jmp_buf sbi_susp_jmp;
1278 
1279 #define SBI_SUSP_TIMER_DURATION_US 500000
1280 static void susp_timer(struct pt_regs *regs)
1281 {
1282 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1283 }
1284 
1285 struct susp_params {
1286 	unsigned long sleep_type;
1287 	unsigned long resume_addr;
1288 	unsigned long opaque;
1289 	bool returns;
1290 	struct sbiret ret;
1291 };
1292 
1293 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1294 {
1295 	int cpu, me = smp_processor_id();
1296 	unsigned long *csrs;
1297 	struct sbiret ret;
1298 	cpumask_t mask;
1299 
1300 	csrs = (unsigned long *)ctx[SBI_SUSP_CSRS_IDX];
1301 	csrs[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS);
1302 	csrs[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE);
1303 	csrs[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC);
1304 	csrs[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH);
1305 	csrs[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP);
1306 
1307 	memset(params, 0, sizeof(*params));
1308 	params->sleep_type = 0; /* suspend-to-ram */
1309 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1310 	params->opaque = virt_to_phys(ctx);
1311 	params->returns = false;
1312 
1313 	cpumask_copy(&mask, &cpu_present_mask);
1314 	cpumask_clear_cpu(me, &mask);
1315 	on_cpumask_async(&mask, stop_cpu, NULL);
1316 
1317 	/* Wait up to 1s for all harts to stop */
1318 	for (int i = 0; i < 100; i++) {
1319 		int count = 1;
1320 
1321 		udelay(10000);
1322 
1323 		for_each_present_cpu(cpu) {
1324 			if (cpu == me)
1325 				continue;
1326 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1327 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1328 				++count;
1329 		}
1330 		if (count == cpumask_weight(&cpu_present_mask))
1331 			break;
1332 	}
1333 
1334 	for_each_present_cpu(cpu) {
1335 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1336 		if (cpu == me) {
1337 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1338 				   "cpu%d is not started", cpu);
1339 		} else {
1340 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1341 				   "cpu%d is not stopped", cpu);
1342 		}
1343 	}
1344 
1345 	return true;
1346 }
1347 
1348 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1349 {
1350 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1351 		report_pass("suspend and resume");
1352 	} else {
1353 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1354 			report_fail("SATP set to zero on resume");
1355 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1356 			report_fail("sstatus.SIE clear on resume");
1357 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1358 			report_fail("a0 is hartid on resume");
1359 	}
1360 }
1361 
1362 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1363 {
1364 	bool r;
1365 
1366 	r = susp_basic_prep(ctx, params);
1367 	assert(r);
1368 	params->sleep_type = 1;
1369 	params->returns = true;
1370 	params->ret.error = SBI_ERR_INVALID_PARAM;
1371 
1372 	return true;
1373 }
1374 
1375 #if __riscv_xlen != 32
1376 static bool susp_type_prep2(unsigned long ctx[], struct susp_params *params)
1377 {
1378 	bool r;
1379 
1380 	r = susp_basic_prep(ctx, params);
1381 	assert(r);
1382 	params->sleep_type = BIT(32);
1383 
1384 	return true;
1385 }
1386 #endif
1387 
1388 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1389 {
1390 	phys_addr_t badaddr;
1391 	bool r;
1392 
1393 	if (!get_invalid_addr(&badaddr, false))
1394 		return false;
1395 
1396 	r = susp_basic_prep(ctx, params);
1397 	assert(r);
1398 	params->resume_addr = badaddr;
1399 	params->returns = true;
1400 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1401 
1402 	return true;
1403 }
1404 
1405 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1406 {
1407 	int started = 0, cpu, me = smp_processor_id();
1408 	struct sbiret ret;
1409 	bool r;
1410 
1411 	if (cpumask_weight(&cpu_present_mask) < 2) {
1412 		report_skip("At least 2 cpus required");
1413 		return false;
1414 	}
1415 
1416 	r = susp_basic_prep(ctx, params);
1417 	assert(r);
1418 	params->returns = true;
1419 	params->ret.error = SBI_ERR_DENIED;
1420 
1421 	for_each_present_cpu(cpu) {
1422 		if (cpu == me)
1423 			continue;
1424 		break;
1425 	}
1426 
1427 	on_cpu(cpu, start_cpu, NULL);
1428 
1429 	for_each_present_cpu(cpu) {
1430 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1431 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1432 		if (ret.value == SBI_EXT_HSM_STARTED)
1433 			started++;
1434 	}
1435 
1436 	assert(started == 2);
1437 
1438 	return true;
1439 }
1440 
1441 static void check_susp(void)
1442 {
1443 	unsigned long csrs[SBI_CSR_NR_IDX];
1444 	unsigned long ctx[SBI_SUSP_NR_IDX] = {
1445 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1446 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1447 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1448 	};
1449 	enum {
1450 #define SUSP_FIRST_TESTNUM 1
1451 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1452 		SUSP_TYPE,
1453 		SUSP_TYPE2,
1454 		SUSP_BAD_ADDR,
1455 		SUSP_ONE_ONLINE,
1456 		NR_SUSP_TESTS,
1457 	};
1458 	struct susp_test {
1459 		const char *name;
1460 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1461 		void (*check)(unsigned long ctx[], struct susp_params *params);
1462 	} susp_tests[] = {
1463 		[SUSP_BASIC]		= { "basic",			susp_basic_prep,	susp_basic_check,	},
1464 		[SUSP_TYPE]		= { "sleep_type",		susp_type_prep,					},
1465 #if __riscv_xlen != 32
1466 		[SUSP_TYPE2]		= { "sleep_type upper bits",	susp_type_prep2,	susp_basic_check	},
1467 #endif
1468 		[SUSP_BAD_ADDR]		= { "bad addr",			susp_badaddr_prep,				},
1469 		[SUSP_ONE_ONLINE]	= { "one cpu online",		susp_one_prep,					},
1470 	};
1471 	struct susp_params params;
1472 	struct sbiret ret;
1473 	int testnum, i;
1474 
1475 	report_prefix_push("susp");
1476 
1477 	if (!sbi_probe(SBI_EXT_SUSP)) {
1478 		report_skip("SUSP extension not available");
1479 		report_prefix_pop();
1480 		return;
1481 	}
1482 
1483 	sbi_bad_fid(SBI_EXT_SUSP);
1484 
1485 	timer_setup(susp_timer);
1486 	local_irq_enable();
1487 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1488 
1489 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1490 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1491 
1492 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1493 		if (!susp_tests[i].name)
1494 			continue;
1495 
1496 		report_prefix_push(susp_tests[i].name);
1497 
1498 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1499 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1500 
1501 		local_irq_disable();
1502 
1503 		assert(susp_tests[i].prep);
1504 		if (!susp_tests[i].prep(ctx, &params)) {
1505 			report_prefix_pop();
1506 			continue;
1507 		}
1508 
1509 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1510 			ret = sbi_system_suspend_raw(params.sleep_type, params.resume_addr, params.opaque);
1511 
1512 			local_irq_enable();
1513 
1514 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1515 				report_fail("probing claims support, but it's not?");
1516 				report_prefix_pop();
1517 				goto out;
1518 			} else if (!params.returns) {
1519 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1520 			} else {
1521 				if (!report(ret.error == params.ret.error, "got expected sbi.error (%ld)", params.ret.error))
1522 					report_info("expected sbi.error %ld, received %ld", params.ret.error, ret.error);
1523 			}
1524 
1525 			report_prefix_pop();
1526 			continue;
1527 		}
1528 		assert(testnum == i);
1529 
1530 		local_irq_enable();
1531 
1532 		if (susp_tests[i].check)
1533 			susp_tests[i].check(ctx, &params);
1534 
1535 		report_prefix_pop();
1536 	}
1537 
1538 out:
1539 	local_irq_disable();
1540 	timer_teardown();
1541 
1542 	report_prefix_pop();
1543 }
1544 
1545 int main(int argc, char **argv)
1546 {
1547 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1548 		help();
1549 		exit(0);
1550 	}
1551 
1552 	report_prefix_push("sbi");
1553 	check_base();
1554 	check_time();
1555 	check_ipi();
1556 	check_hsm();
1557 	check_dbcn();
1558 	check_susp();
1559 	check_fwft();
1560 
1561 	return report_summary();
1562 }
1563