xref: /kvm-unit-tests/riscv/sbi.c (revision 0cc3a351b925928827baa4b69cf0e46ff5837083)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 void check_fwft(void);
36 
37 static long __labs(long a)
38 {
39 	return __builtin_labs(a);
40 }
41 
42 static void help(void)
43 {
44 	puts("Test SBI\n");
45 	puts("An environ must be provided where expected values are given.\n");
46 }
47 
48 static struct sbiret sbi_base(int fid, unsigned long arg0)
49 {
50 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
51 }
52 
53 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
54 				    unsigned long base_addr_hi)
55 {
56 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
57 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
58 }
59 
60 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
61 {
62 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
63 }
64 
65 static struct sbiret sbi_hart_suspend_raw(unsigned long suspend_type, unsigned long resume_addr, unsigned long opaque)
66 {
67 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
68 }
69 
70 static struct sbiret sbi_system_suspend_raw(unsigned long sleep_type, unsigned long resume_addr, unsigned long opaque)
71 {
72 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
73 }
74 
75 void sbi_bad_fid(int ext)
76 {
77 	struct sbiret ret = sbi_ecall(ext, 0xbad, 0, 0, 0, 0, 0, 0);
78 	sbiret_report_error(&ret, SBI_ERR_NOT_SUPPORTED, "Bad FID");
79 }
80 
81 static void start_cpu(void *data)
82 {
83 	/* nothing to do */
84 }
85 
86 static void stop_cpu(void *data)
87 {
88 	struct sbiret ret = sbi_hart_stop();
89 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
90 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
91 }
92 
93 static int rand_online_cpu(prng_state *ps)
94 {
95 	int cpu, me = smp_processor_id();
96 
97 	for (;;) {
98 		cpu = prng32(ps) % nr_cpus;
99 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
100 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
101 			break;
102 	}
103 
104 	return cpu;
105 }
106 
107 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
108 {
109 	*lo = (unsigned long)paddr;
110 	*hi = 0;
111 	if (__riscv_xlen == 32)
112 		*hi = (unsigned long)(paddr >> 32);
113 }
114 
115 static bool check_addr(phys_addr_t start, phys_addr_t size)
116 {
117 	struct mem_region *r = memregions_find(start);
118 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
119 }
120 
121 static phys_addr_t get_highest_addr(void)
122 {
123 	phys_addr_t highest_end = 0;
124 	struct mem_region *r;
125 
126 	for (r = mem_regions; r->end; ++r) {
127 		if (r->end > highest_end)
128 			highest_end = r->end;
129 	}
130 
131 	return highest_end - 1;
132 }
133 
134 static bool env_enabled(const char *env)
135 {
136 	char *s = getenv(env);
137 
138 	return s && (*s == '1' || *s == 'y' || *s == 'Y');
139 }
140 
141 static bool env_or_skip(const char *env)
142 {
143 	if (!getenv(env)) {
144 		report_skip("missing %s environment variable", env);
145 		return false;
146 	}
147 
148 	return true;
149 }
150 
151 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
152 {
153 	if (env_enabled("INVALID_ADDR_AUTO")) {
154 		*paddr = get_highest_addr() + 1;
155 		return true;
156 	} else if (allow_default && !getenv("INVALID_ADDR")) {
157 		*paddr = -1ul;
158 		return true;
159 	} else if (env_or_skip("INVALID_ADDR")) {
160 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
161 		return true;
162 	}
163 
164 	return false;
165 }
166 
167 static void timer_setup(void (*handler)(struct pt_regs *))
168 {
169 	install_irq_handler(IRQ_S_TIMER, handler);
170 	timer_irq_enable();
171 }
172 
173 static void timer_teardown(void)
174 {
175 	timer_irq_disable();
176 	timer_stop();
177 	install_irq_handler(IRQ_S_TIMER, NULL);
178 }
179 
180 static void check_base(void)
181 {
182 	struct sbiret ret;
183 	long expected;
184 
185 	report_prefix_push("base");
186 
187 	sbi_bad_fid(SBI_EXT_BASE);
188 
189 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
190 
191 	report_prefix_push("spec_version");
192 	if (env_or_skip("SBI_SPEC_VERSION")) {
193 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
194 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
195 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
196 		sbiret_check(&ret, 0, expected);
197 	}
198 	report_prefix_pop();
199 
200 	ret.value &= 0x7ffffffful;
201 
202 	if (ret.error || ret.value < 2) {
203 		report_skip("SBI spec version 0.2 or higher required");
204 		return;
205 	}
206 
207 	report_prefix_push("impl_id");
208 	if (env_or_skip("SBI_IMPL_ID")) {
209 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
210 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
211 		sbiret_check(&ret, 0, expected);
212 	}
213 	report_prefix_pop();
214 
215 	report_prefix_push("impl_version");
216 	if (env_or_skip("SBI_IMPL_VERSION")) {
217 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
218 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
219 		sbiret_check(&ret, 0, expected);
220 	}
221 	report_prefix_pop();
222 
223 	report_prefix_push("probe_ext");
224 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
225 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
226 	sbiret_check(&ret, 0, expected);
227 	report_prefix_push("unavailable");
228 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
229 	sbiret_check(&ret, 0, 0);
230 	report_prefix_popn(2);
231 
232 	report_prefix_push("mvendorid");
233 	if (env_or_skip("MVENDORID")) {
234 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
235 		assert(__riscv_xlen == 32 || !(expected >> 32));
236 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
237 		sbiret_check(&ret, 0, expected);
238 	}
239 	report_prefix_pop();
240 
241 	report_prefix_push("marchid");
242 	if (env_or_skip("MARCHID")) {
243 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
244 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
245 		sbiret_check(&ret, 0, expected);
246 	}
247 	report_prefix_pop();
248 
249 	report_prefix_push("mimpid");
250 	if (env_or_skip("MIMPID")) {
251 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
252 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
253 		sbiret_check(&ret, 0, expected);
254 	}
255 	report_prefix_popn(2);
256 }
257 
258 struct timer_info {
259 	bool timer_works;
260 	bool mask_timer_irq;
261 	bool timer_irq_set;
262 	bool timer_irq_cleared;
263 	unsigned long timer_irq_count;
264 };
265 
266 static struct timer_info timer_info;
267 
268 static bool timer_irq_pending(void)
269 {
270 	return csr_read(CSR_SIP) & IP_TIP;
271 }
272 
273 static void timer_irq_handler(struct pt_regs *regs)
274 {
275 	timer_info.timer_works = true;
276 
277 	if (timer_info.timer_irq_count < ULONG_MAX)
278 		++timer_info.timer_irq_count;
279 
280 	if (timer_irq_pending())
281 		timer_info.timer_irq_set = true;
282 
283 	if (timer_info.mask_timer_irq)
284 		timer_irq_disable();
285 	else
286 		sbi_set_timer(ULONG_MAX);
287 
288 	if (!timer_irq_pending())
289 		timer_info.timer_irq_cleared = true;
290 }
291 
292 static void timer_check_set_timer(bool mask_timer_irq)
293 {
294 	struct sbiret ret;
295 	unsigned long begin, end, duration;
296 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
297 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
298 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
299 
300 	d = usec_to_cycles(d);
301 	margin = usec_to_cycles(margin);
302 
303 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
304 	begin = timer_get_cycles();
305 	ret = sbi_set_timer(begin + d);
306 
307 	report(!ret.error, "set timer%s", mask_test_str);
308 	if (ret.error)
309 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
310 
311 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
312 		cpu_relax();
313 
314 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
315 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
316 
317 	if (!mask_timer_irq) {
318 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
319 		       "pending timer interrupt bit cleared by setting timer to -1");
320 	}
321 
322 	if (timer_info.timer_works) {
323 		duration = end - begin;
324 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
325 	}
326 
327 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
328 }
329 
330 static void check_time(void)
331 {
332 	bool pending;
333 
334 	report_prefix_push("time");
335 
336 	if (!sbi_probe(SBI_EXT_TIME)) {
337 		report_skip("time extension not available");
338 		report_prefix_pop();
339 		return;
340 	}
341 
342 	sbi_bad_fid(SBI_EXT_TIME);
343 
344 	report_prefix_push("set_timer");
345 
346 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
347 	local_irq_enable();
348 	timer_irq_enable();
349 
350 	timer_check_set_timer(false);
351 
352 	if (csr_read(CSR_SIE) & IE_TIE)
353 		timer_check_set_timer(true);
354 	else
355 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
356 
357 	timer_irq_disable();
358 	sbi_set_timer(0);
359 	pending = timer_irq_pending();
360 	report(pending, "timer immediately pending by setting timer to 0");
361 	sbi_set_timer(ULONG_MAX);
362 	if (pending)
363 		report(!timer_irq_pending(), "pending timer cleared while masked");
364 	else
365 		report_skip("timer is not pending, skipping timer cleared while masked test");
366 
367 	local_irq_disable();
368 	install_irq_handler(IRQ_S_TIMER, NULL);
369 
370 	report_prefix_popn(2);
371 }
372 
373 static bool ipi_received[NR_CPUS];
374 static bool ipi_timeout[NR_CPUS];
375 static cpumask_t ipi_done;
376 
377 static void ipi_timeout_handler(struct pt_regs *regs)
378 {
379 	timer_stop();
380 	ipi_timeout[smp_processor_id()] = true;
381 }
382 
383 static void ipi_irq_handler(struct pt_regs *regs)
384 {
385 	ipi_ack();
386 	ipi_received[smp_processor_id()] = true;
387 }
388 
389 static void ipi_hart_wait(void *data)
390 {
391 	unsigned long timeout = (unsigned long)data;
392 	int me = smp_processor_id();
393 
394 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
395 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
396 	local_ipi_enable();
397 	timer_irq_enable();
398 	local_irq_enable();
399 
400 	timer_start(timeout);
401 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
402 		cpu_relax();
403 	local_irq_disable();
404 	timer_stop();
405 	local_ipi_disable();
406 	timer_irq_disable();
407 	install_irq_handler(IRQ_S_SOFT, NULL);
408 	install_irq_handler(IRQ_S_TIMER, NULL);
409 
410 	cpumask_set_cpu(me, &ipi_done);
411 }
412 
413 static void ipi_hart_check(cpumask_t *mask)
414 {
415 	int cpu;
416 
417 	for_each_cpu(cpu, mask) {
418 		if (ipi_timeout[cpu]) {
419 			const char *rec = ipi_received[cpu] ? "but was still received"
420 							    : "and has still not been received";
421 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
422 		}
423 
424 		ipi_timeout[cpu] = false;
425 		ipi_received[cpu] = false;
426 	}
427 }
428 
429 static void check_ipi(void)
430 {
431 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
432 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
433 	int me = smp_processor_id();
434 	unsigned long max_hartid = 0;
435 	unsigned long hartid1, hartid2;
436 	cpumask_t ipi_receivers;
437 	static prng_state ps;
438 	struct sbiret ret;
439 	int cpu, cpu2;
440 
441 	ps = prng_init(0xDEADBEEF);
442 
443 	report_prefix_push("ipi");
444 
445 	if (!sbi_probe(SBI_EXT_IPI)) {
446 		report_skip("ipi extension not available");
447 		report_prefix_pop();
448 		return;
449 	}
450 
451 	sbi_bad_fid(SBI_EXT_IPI);
452 
453 	if (nr_cpus_present < 2) {
454 		report_skip("At least 2 cpus required");
455 		report_prefix_pop();
456 		return;
457 	}
458 
459 	report_prefix_push("random hart");
460 	cpumask_clear(&ipi_done);
461 	cpumask_clear(&ipi_receivers);
462 	cpu = rand_online_cpu(&ps);
463 	cpumask_set_cpu(cpu, &ipi_receivers);
464 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
465 	ret = sbi_send_ipi_cpu(cpu);
466 	report(ret.error == SBI_SUCCESS, "ipi returned success");
467 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
468 		cpu_relax();
469 	ipi_hart_check(&ipi_receivers);
470 	report_prefix_pop();
471 
472 	report_prefix_push("two in hart_mask");
473 
474 	if (nr_cpus_present < 3) {
475 		report_skip("3 cpus required");
476 		goto end_two;
477 	}
478 
479 	cpu = rand_online_cpu(&ps);
480 	hartid1 = cpus[cpu].hartid;
481 	hartid2 = 0;
482 	for_each_present_cpu(cpu2) {
483 		if (cpu2 == cpu || cpu2 == me)
484 			continue;
485 		hartid2 = cpus[cpu2].hartid;
486 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
487 			break;
488 	}
489 	if (cpu2 == nr_cpus) {
490 		report_skip("hartids are too sparse");
491 		goto end_two;
492 	}
493 
494 	cpumask_clear(&ipi_done);
495 	cpumask_clear(&ipi_receivers);
496 	cpumask_set_cpu(cpu, &ipi_receivers);
497 	cpumask_set_cpu(cpu2, &ipi_receivers);
498 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
499 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
500 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
501 	report(ret.error == SBI_SUCCESS, "ipi returned success");
502 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
503 		cpu_relax();
504 	ipi_hart_check(&ipi_receivers);
505 end_two:
506 	report_prefix_pop();
507 
508 	report_prefix_push("broadcast");
509 	cpumask_clear(&ipi_done);
510 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
511 	cpumask_clear_cpu(me, &ipi_receivers);
512 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
513 	ret = sbi_send_ipi_broadcast();
514 	report(ret.error == SBI_SUCCESS, "ipi returned success");
515 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
516 		cpu_relax();
517 	ipi_hart_check(&ipi_receivers);
518 	report_prefix_pop();
519 
520 	report_prefix_push("invalid parameters");
521 
522 	for_each_present_cpu(cpu) {
523 		if (cpus[cpu].hartid > max_hartid)
524 			max_hartid = cpus[cpu].hartid;
525 	}
526 
527 	/* Try the next higher hartid than the max */
528 	ret = sbi_send_ipi(2, max_hartid);
529 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
530 	ret = sbi_send_ipi(1, max_hartid + 1);
531 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
532 
533 	report_prefix_pop();
534 
535 	report_prefix_pop();
536 }
537 
538 unsigned char sbi_hsm_stop_hart[NR_CPUS];
539 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
540 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
541 
542 static const char * const hart_state_str[] = {
543 	[SBI_EXT_HSM_STARTED] = "started",
544 	[SBI_EXT_HSM_STOPPED] = "stopped",
545 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
546 };
547 struct hart_state_transition_info {
548 	enum sbi_ext_hsm_sid initial_state;
549 	enum sbi_ext_hsm_sid intermediate_state;
550 	enum sbi_ext_hsm_sid final_state;
551 };
552 static cpumask_t sbi_hsm_started_hart_checks;
553 static bool sbi_hsm_invalid_hartid_check;
554 static bool sbi_hsm_timer_fired;
555 extern void sbi_hsm_check_hart_start(void);
556 extern void sbi_hsm_check_non_retentive_suspend(void);
557 
558 static void hsm_timer_irq_handler(struct pt_regs *regs)
559 {
560 	timer_stop();
561 	sbi_hsm_timer_fired = true;
562 }
563 
564 static void hart_check_already_started(void *data)
565 {
566 	struct sbiret ret;
567 	unsigned long hartid = current_thread_info()->hartid;
568 	int me = smp_processor_id();
569 
570 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
571 
572 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
573 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
574 }
575 
576 static void hart_start_invalid_hartid(void *data)
577 {
578 	struct sbiret ret;
579 
580 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
581 
582 	if (ret.error == SBI_ERR_INVALID_PARAM)
583 		sbi_hsm_invalid_hartid_check = true;
584 }
585 
586 static cpumask_t hsm_suspend_not_supported;
587 
588 static void ipi_nop(struct pt_regs *regs)
589 {
590 	ipi_ack();
591 }
592 
593 static void hart_suspend_and_wait_ipi(unsigned long suspend_type, unsigned long resume_addr,
594 				      unsigned long opaque, bool returns, const char *typestr)
595 {
596 	unsigned long hartid = current_thread_info()->hartid;
597 	struct sbiret ret;
598 
599 	install_irq_handler(IRQ_S_SOFT, ipi_nop);
600 	local_ipi_enable();
601 	local_irq_enable();
602 
603 	ret = sbi_hart_suspend_raw(suspend_type, resume_addr, opaque);
604 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
605 		cpumask_set_cpu(smp_processor_id(), &hsm_suspend_not_supported);
606 	else if (ret.error)
607 		report_fail("failed to %s cpu%d (hartid = %lx) (error=%ld)",
608 			    typestr, smp_processor_id(), hartid, ret.error);
609 	else if (!returns)
610 		report_fail("failed to %s cpu%d (hartid = %lx) (call should not return)",
611 			    typestr, smp_processor_id(), hartid);
612 
613 	local_irq_disable();
614 	local_ipi_disable();
615 	install_irq_handler(IRQ_S_SOFT, NULL);
616 }
617 
618 static void hart_retentive_suspend(void *data)
619 {
620 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0, true, "retentive suspend");
621 }
622 
623 static void hart_non_retentive_suspend(void *data)
624 {
625 	unsigned long params[] = {
626 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
627 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
628 	};
629 
630 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
631 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
632 				  false, "non-retentive suspend");
633 }
634 
635 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
636 static void hart_retentive_suspend_with_msb_set(void *data)
637 {
638 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
639 
640 	hart_suspend_and_wait_ipi(suspend_type, 0, 0, true, "retentive suspend with MSB set");
641 }
642 
643 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
644 static void hart_non_retentive_suspend_with_msb_set(void *data)
645 {
646 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
647 	unsigned long params[] = {
648 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
649 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
650 	};
651 
652 	hart_suspend_and_wait_ipi(suspend_type,
653 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
654 				  false, "non-retentive suspend with MSB set");
655 }
656 
657 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
658 {
659 	struct sbiret ret;
660 
661 	sbi_hsm_timer_fired = false;
662 	timer_start(duration);
663 
664 	ret = sbi_hart_get_status(hartid);
665 
666 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
667 		cpu_relax();
668 		ret = sbi_hart_get_status(hartid);
669 	}
670 
671 	timer_stop();
672 
673 	if (sbi_hsm_timer_fired)
674 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
675 	else if (ret.error)
676 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
677 
678 	return !sbi_hsm_timer_fired && !ret.error;
679 }
680 
681 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
682 				      struct hart_state_transition_info *states)
683 {
684 	struct sbiret ret;
685 	unsigned long hartid;
686 	int cpu, count = 0;
687 
688 	for_each_cpu(cpu, mask) {
689 		hartid = cpus[cpu].hartid;
690 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
691 			continue;
692 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
693 			continue;
694 
695 		ret = sbi_hart_get_status(hartid);
696 		if (ret.error)
697 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
698 		else if (ret.value != states->final_state)
699 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
700 				    hart_state_str[states->final_state], ret.value);
701 		else
702 			count++;
703 	}
704 
705 	return count;
706 }
707 
708 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
709 {
710 	sbi_hsm_timer_fired = false;
711 	timer_start(duration);
712 
713 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
714 		cpu_relax();
715 
716 	timer_stop();
717 
718 	if (sbi_hsm_timer_fired)
719 		report_info("hsm timer fired before all cpus became idle");
720 }
721 
722 static void check_hsm(void)
723 {
724 	struct sbiret ret;
725 	unsigned long hartid;
726 	cpumask_t secondary_cpus_mask, mask, resume_mask;
727 	struct hart_state_transition_info transition_states;
728 	bool ipi_unavailable = false;
729 	int cpu, me = smp_processor_id();
730 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
731 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
732 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
733 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
734 	int count, check, expected_count, resume_count;
735 
736 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
737 
738 	report_prefix_push("hsm");
739 
740 	if (!sbi_probe(SBI_EXT_HSM)) {
741 		report_skip("hsm extension not available");
742 		report_prefix_pop();
743 		return;
744 	}
745 
746 	sbi_bad_fid(SBI_EXT_HSM);
747 
748 	report_prefix_push("hart_get_status");
749 
750 	hartid = current_thread_info()->hartid;
751 	ret = sbi_hart_get_status(hartid);
752 
753 	if (ret.error) {
754 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
755 		report_prefix_popn(2);
756 		return;
757 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
758 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
759 		report_prefix_popn(2);
760 		return;
761 	}
762 
763 	report_pass("status of current hart is started");
764 
765 	report_prefix_pop();
766 
767 	if (max_cpus < 2) {
768 		report_skip("no other cpus to run the remaining hsm tests on");
769 		report_prefix_pop();
770 		return;
771 	}
772 
773 	report_prefix_push("hart_stop");
774 
775 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
776 	cpumask_clear_cpu(me, &secondary_cpus_mask);
777 	timer_setup(hsm_timer_irq_handler);
778 	local_irq_enable();
779 
780 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
781 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
782 
783 	transition_states = (struct hart_state_transition_info) {
784 		.initial_state = SBI_EXT_HSM_STARTED,
785 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
786 		.final_state = SBI_EXT_HSM_STOPPED,
787 	};
788 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
789 
790 	report(count == max_cpus - 1, "all secondary harts stopped");
791 
792 	report_prefix_pop();
793 
794 	report_prefix_push("hart_start");
795 
796 	for_each_cpu(cpu, &secondary_cpus_mask) {
797 		hartid = cpus[cpu].hartid;
798 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
799 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
800 
801 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
802 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
803 		if (ret.error) {
804 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
805 			continue;
806 		}
807 	}
808 
809 	transition_states = (struct hart_state_transition_info) {
810 		.initial_state = SBI_EXT_HSM_STOPPED,
811 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
812 		.final_state = SBI_EXT_HSM_STARTED,
813 	};
814 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
815 	check = 0;
816 
817 	for_each_cpu(cpu, &secondary_cpus_mask) {
818 		sbi_hsm_timer_fired = false;
819 		timer_start(hsm_timer_duration);
820 
821 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
822 			cpu_relax();
823 
824 		timer_stop();
825 
826 		if (sbi_hsm_timer_fired) {
827 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
828 			continue;
829 		}
830 
831 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
832 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
833 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
834 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
835 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
836 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
837 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
838 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
839 		else
840 			check++;
841 	}
842 
843 	report(count == max_cpus - 1, "all secondary harts started");
844 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
845 
846 	report_prefix_pop();
847 
848 	report_prefix_push("hart_stop");
849 
850 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
851 
852 	transition_states = (struct hart_state_transition_info) {
853 		.initial_state = SBI_EXT_HSM_STARTED,
854 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
855 		.final_state = SBI_EXT_HSM_STOPPED,
856 	};
857 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
858 
859 	report(count == max_cpus - 1, "all secondary harts stopped");
860 
861 	/* Reset the stop flags so that we can reuse them after suspension tests */
862 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
863 
864 	report_prefix_pop();
865 
866 	report_prefix_push("hart_start");
867 
868 	/* Select just one secondary cpu to run the invalid hartid test */
869 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
870 
871 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
872 
873 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
874 
875 	transition_states = (struct hart_state_transition_info) {
876 		.initial_state = SBI_EXT_HSM_STOPPED,
877 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
878 		.final_state = SBI_EXT_HSM_STARTED,
879 	};
880 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
881 
882 	report(count == max_cpus - 1, "all secondary harts started");
883 
884 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
885 
886 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
887 	       "all secondary harts are already started");
888 
889 	report_prefix_pop();
890 
891 	report_prefix_push("hart_suspend");
892 
893 	if (!sbi_probe(SBI_EXT_IPI)) {
894 		report_skip("skipping suspension tests since ipi extension is unavailable");
895 		report_prefix_pop();
896 		ipi_unavailable = true;
897 		goto sbi_hsm_hart_stop_tests;
898 	}
899 
900 	cpumask_clear(&hsm_suspend_not_supported);
901 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
902 
903 	transition_states = (struct hart_state_transition_info) {
904 		.initial_state = SBI_EXT_HSM_STARTED,
905 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
906 		.final_state = SBI_EXT_HSM_SUSPENDED,
907 	};
908 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
909 
910 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
911 
912 	if (expected_count != 0) {
913 		if (expected_count != max_cpus - 1)
914 			report_info("not all harts support retentive suspend");
915 		report(count == expected_count, "supporting secondary harts retentive suspended");
916 	} else {
917 		report_skip("retentive suspend not supported by any harts");
918 		goto nonret_suspend_tests;
919 	}
920 
921 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
922 	resume_count = cpumask_weight(&resume_mask);
923 
924 	/* Ignore the return value since we check the status of each hart anyway */
925 	sbi_send_ipi_cpumask(&resume_mask);
926 
927 	transition_states = (struct hart_state_transition_info) {
928 		.initial_state = SBI_EXT_HSM_SUSPENDED,
929 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
930 		.final_state = SBI_EXT_HSM_STARTED,
931 	};
932 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
933 
934 	report(count == resume_count, "supporting secondary harts retentive resumed");
935 
936 nonret_suspend_tests:
937 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
938 
939 	cpumask_clear(&hsm_suspend_not_supported);
940 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
941 
942 	transition_states = (struct hart_state_transition_info) {
943 		.initial_state = SBI_EXT_HSM_STARTED,
944 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
945 		.final_state = SBI_EXT_HSM_SUSPENDED,
946 	};
947 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
948 
949 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
950 
951 	if (expected_count != 0) {
952 		if (expected_count != max_cpus - 1)
953 			report_info("not all harts support non-retentive suspend");
954 		report(count == expected_count, "supporting secondary harts non-retentive suspended");
955 	} else {
956 		report_skip("non-retentive suspend not supported by any harts");
957 		goto hsm_suspend_tests_done;
958 	}
959 
960 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
961 	resume_count = cpumask_weight(&resume_mask);
962 
963 	/* Ignore the return value since we check the status of each hart anyway */
964 	sbi_send_ipi_cpumask(&resume_mask);
965 
966 	transition_states = (struct hart_state_transition_info) {
967 		.initial_state = SBI_EXT_HSM_SUSPENDED,
968 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
969 		.final_state = SBI_EXT_HSM_STARTED,
970 	};
971 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
972 	check = 0;
973 
974 	for_each_cpu(cpu, &resume_mask) {
975 		sbi_hsm_timer_fired = false;
976 		timer_start(hsm_timer_duration);
977 
978 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
979 			cpu_relax();
980 
981 		timer_stop();
982 
983 		if (sbi_hsm_timer_fired) {
984 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
985 			continue;
986 		}
987 
988 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
989 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
990 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
991 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
992 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
993 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
994 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
995 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
996 		else
997 			check++;
998 	}
999 
1000 	report(count == resume_count, "supporting secondary harts non-retentive resumed");
1001 	report(check == resume_count, "supporting secondary harts have expected register values after non-retentive resume");
1002 
1003 hsm_suspend_tests_done:
1004 	report_prefix_pop();
1005 
1006 sbi_hsm_hart_stop_tests:
1007 	report_prefix_push("hart_stop");
1008 
1009 	if (ipi_unavailable || expected_count == 0)
1010 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
1011 	else
1012 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
1013 
1014 	transition_states = (struct hart_state_transition_info) {
1015 		.initial_state = SBI_EXT_HSM_STARTED,
1016 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1017 		.final_state = SBI_EXT_HSM_STOPPED,
1018 	};
1019 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
1020 
1021 	report(count == max_cpus - 1, "all secondary harts stopped");
1022 
1023 	report_prefix_pop();
1024 
1025 	if (__riscv_xlen == 32 || ipi_unavailable) {
1026 		local_irq_disable();
1027 		timer_teardown();
1028 		report_prefix_pop();
1029 		return;
1030 	}
1031 
1032 	report_prefix_push("hart_suspend");
1033 
1034 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
1035 	cpu = cpumask_next(-1, &secondary_cpus_mask);
1036 	hartid = cpus[cpu].hartid;
1037 	cpumask_clear(&mask);
1038 	cpumask_set_cpu(cpu, &mask);
1039 
1040 	/* Boot up the secondary cpu and let it proceed to the idle loop */
1041 	on_cpu(cpu, start_cpu, NULL);
1042 
1043 	cpumask_clear(&hsm_suspend_not_supported);
1044 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
1045 
1046 	transition_states = (struct hart_state_transition_info) {
1047 		.initial_state = SBI_EXT_HSM_STARTED,
1048 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1049 		.final_state = SBI_EXT_HSM_SUSPENDED,
1050 	};
1051 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1052 
1053 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1054 
1055 	if (expected_count) {
1056 		report(count == expected_count, "retentive suspend with MSB set");
1057 	} else {
1058 		report_skip("retentive suspend not supported by cpu%d", cpu);
1059 		goto nonret_suspend_with_msb;
1060 	}
1061 
1062 	/* Ignore the return value since we manually validate the status of the hart anyway */
1063 	sbi_send_ipi_cpu(cpu);
1064 
1065 	transition_states = (struct hart_state_transition_info) {
1066 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1067 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1068 		.final_state = SBI_EXT_HSM_STARTED,
1069 	};
1070 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1071 
1072 	report(count, "secondary hart retentive resumed with MSB set");
1073 
1074 nonret_suspend_with_msb:
1075 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1076 	sbi_hsm_stop_hart[cpu] = 0;
1077 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1078 
1079 	cpumask_clear(&hsm_suspend_not_supported);
1080 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1081 
1082 	transition_states = (struct hart_state_transition_info) {
1083 		.initial_state = SBI_EXT_HSM_STARTED,
1084 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1085 		.final_state = SBI_EXT_HSM_SUSPENDED,
1086 	};
1087 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1088 
1089 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1090 
1091 	if (expected_count) {
1092 		report(count == expected_count, "non-retentive suspend with MSB set");
1093 	} else {
1094 		report_skip("non-retentive suspend not supported by cpu%d", cpu);
1095 		goto hsm_hart_stop_test;
1096 	}
1097 
1098 	/* Ignore the return value since we manually validate the status of the hart anyway */
1099 	sbi_send_ipi_cpu(cpu);
1100 
1101 	transition_states = (struct hart_state_transition_info) {
1102 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1103 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1104 		.final_state = SBI_EXT_HSM_STARTED,
1105 	};
1106 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1107 	check = 0;
1108 
1109 	if (count) {
1110 		sbi_hsm_timer_fired = false;
1111 		timer_start(hsm_timer_duration);
1112 
1113 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1114 			cpu_relax();
1115 
1116 		timer_stop();
1117 
1118 		if (sbi_hsm_timer_fired) {
1119 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1120 		} else {
1121 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1122 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1123 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1124 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1125 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1126 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1127 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1128 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1129 			else
1130 				check = 1;
1131 		}
1132 	}
1133 
1134 	report(count, "secondary hart non-retentive resumed with MSB set");
1135 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1136 
1137 hsm_hart_stop_test:
1138 	report_prefix_pop();
1139 
1140 	report_prefix_push("hart_stop");
1141 
1142 	if (expected_count == 0)
1143 		on_cpu_async(cpu, stop_cpu, NULL);
1144 	else
1145 		sbi_hsm_stop_hart[cpu] = 1;
1146 
1147 	transition_states = (struct hart_state_transition_info) {
1148 		.initial_state = SBI_EXT_HSM_STARTED,
1149 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1150 		.final_state = SBI_EXT_HSM_STOPPED,
1151 	};
1152 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1153 
1154 	report(count, "secondary hart stopped after suspension tests with MSB set");
1155 
1156 	local_irq_disable();
1157 	timer_teardown();
1158 	report_prefix_popn(2);
1159 }
1160 
1161 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1162 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1163 
1164 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1165 {
1166 	unsigned long base_addr_lo, base_addr_hi;
1167 	phys_addr_t paddr = virt_to_phys((void *)s);
1168 	int num_calls = 0;
1169 	struct sbiret ret;
1170 
1171 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1172 
1173 	do {
1174 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1175 		num_bytes -= ret.value;
1176 		paddr += ret.value;
1177 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1178 		num_calls++;
1179 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1180 
1181 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1182 	report_info("%d sbi calls made", num_calls);
1183 }
1184 
1185 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1186 				 phys_addr_t page_addr, size_t page_offset,
1187 				 bool highmem_supported)
1188 {
1189 	int nr_pages = page_offset ? 2 : 1;
1190 	void *vaddr;
1191 
1192 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1193 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1194 		report_skip("Memory above 4G required");
1195 		return;
1196 	}
1197 
1198 	vaddr = alloc_vpages(nr_pages);
1199 
1200 	for (int i = 0; i < nr_pages; ++i)
1201 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1202 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1203 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1204 }
1205 
1206 /*
1207  * Only the write functionality is tested here. There's no easy way to
1208  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1209  */
1210 static void check_dbcn(void)
1211 {
1212 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1213 	unsigned long base_addr_lo, base_addr_hi;
1214 	bool highmem_supported = true;
1215 	phys_addr_t paddr;
1216 	struct sbiret ret;
1217 	char *buf;
1218 
1219 	report_prefix_push("dbcn");
1220 
1221 	if (!sbi_probe(SBI_EXT_DBCN)) {
1222 		report_skip("DBCN extension unavailable");
1223 		report_prefix_pop();
1224 		return;
1225 	}
1226 
1227 	sbi_bad_fid(SBI_EXT_DBCN);
1228 
1229 	report_prefix_push("write");
1230 
1231 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1232 
1233 	assert(num_bytes < PAGE_SIZE);
1234 
1235 	report_prefix_push("page boundary");
1236 	buf = alloc_pages(1);
1237 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1238 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1239 	report_prefix_pop();
1240 
1241 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1242 		highmem_supported = false;
1243 
1244 	report_prefix_push("high boundary");
1245 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1246 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1247 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1248 				     highmem_supported);
1249 	else
1250 		report_skip("user disabled");
1251 	report_prefix_pop();
1252 
1253 	report_prefix_push("high page");
1254 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1255 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1256 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1257 	} else {
1258 		report_skip("user disabled");
1259 	}
1260 	report_prefix_pop();
1261 
1262 	/* Bytes are read from memory and written to the console */
1263 	report_prefix_push("invalid parameter");
1264 	if (get_invalid_addr(&paddr, false)) {
1265 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1266 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1267 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1268 	}
1269 	report_prefix_popn(2);
1270 	report_prefix_push("write_byte");
1271 
1272 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1273 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1274 	puts("\n");
1275 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1276 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1277 
1278 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1279 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1280 	puts("\n");
1281 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1282 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1283 
1284 	report_prefix_popn(2);
1285 }
1286 
1287 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1288 jmp_buf sbi_susp_jmp;
1289 
1290 #define SBI_SUSP_TIMER_DURATION_US 500000
1291 static void susp_timer(struct pt_regs *regs)
1292 {
1293 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1294 }
1295 
1296 struct susp_params {
1297 	unsigned long sleep_type;
1298 	unsigned long resume_addr;
1299 	unsigned long opaque;
1300 	bool returns;
1301 	struct sbiret ret;
1302 };
1303 
1304 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1305 {
1306 	int cpu, me = smp_processor_id();
1307 	unsigned long *csrs;
1308 	struct sbiret ret;
1309 	cpumask_t mask;
1310 
1311 	csrs = (unsigned long *)ctx[SBI_SUSP_CSRS_IDX];
1312 	csrs[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS);
1313 	csrs[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE);
1314 	csrs[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC);
1315 	csrs[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH);
1316 	csrs[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP);
1317 
1318 	memset(params, 0, sizeof(*params));
1319 	params->sleep_type = 0; /* suspend-to-ram */
1320 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1321 	params->opaque = virt_to_phys(ctx);
1322 	params->returns = false;
1323 
1324 	cpumask_copy(&mask, &cpu_present_mask);
1325 	cpumask_clear_cpu(me, &mask);
1326 	on_cpumask_async(&mask, stop_cpu, NULL);
1327 
1328 	/* Wait up to 1s for all harts to stop */
1329 	for (int i = 0; i < 100; i++) {
1330 		int count = 1;
1331 
1332 		udelay(10000);
1333 
1334 		for_each_present_cpu(cpu) {
1335 			if (cpu == me)
1336 				continue;
1337 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1338 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1339 				++count;
1340 		}
1341 		if (count == cpumask_weight(&cpu_present_mask))
1342 			break;
1343 	}
1344 
1345 	for_each_present_cpu(cpu) {
1346 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1347 		if (cpu == me) {
1348 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1349 				   "cpu%d is not started", cpu);
1350 		} else {
1351 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1352 				   "cpu%d is not stopped", cpu);
1353 		}
1354 	}
1355 
1356 	return true;
1357 }
1358 
1359 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1360 {
1361 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1362 		report_pass("suspend and resume");
1363 	} else {
1364 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1365 			report_fail("SATP set to zero on resume");
1366 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1367 			report_fail("sstatus.SIE clear on resume");
1368 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1369 			report_fail("a0 is hartid on resume");
1370 	}
1371 }
1372 
1373 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1374 {
1375 	bool r;
1376 
1377 	r = susp_basic_prep(ctx, params);
1378 	assert(r);
1379 	params->sleep_type = 1;
1380 	params->returns = true;
1381 	params->ret.error = SBI_ERR_INVALID_PARAM;
1382 
1383 	return true;
1384 }
1385 
1386 #if __riscv_xlen != 32
1387 static bool susp_type_prep2(unsigned long ctx[], struct susp_params *params)
1388 {
1389 	bool r;
1390 
1391 	r = susp_basic_prep(ctx, params);
1392 	assert(r);
1393 	params->sleep_type = BIT(32);
1394 
1395 	return true;
1396 }
1397 #endif
1398 
1399 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1400 {
1401 	phys_addr_t badaddr;
1402 	bool r;
1403 
1404 	if (!get_invalid_addr(&badaddr, false))
1405 		return false;
1406 
1407 	r = susp_basic_prep(ctx, params);
1408 	assert(r);
1409 	params->resume_addr = badaddr;
1410 	params->returns = true;
1411 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1412 
1413 	return true;
1414 }
1415 
1416 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1417 {
1418 	int started = 0, cpu, me = smp_processor_id();
1419 	struct sbiret ret;
1420 	bool r;
1421 
1422 	if (cpumask_weight(&cpu_present_mask) < 2) {
1423 		report_skip("At least 2 cpus required");
1424 		return false;
1425 	}
1426 
1427 	r = susp_basic_prep(ctx, params);
1428 	assert(r);
1429 	params->returns = true;
1430 	params->ret.error = SBI_ERR_DENIED;
1431 
1432 	for_each_present_cpu(cpu) {
1433 		if (cpu == me)
1434 			continue;
1435 		break;
1436 	}
1437 
1438 	on_cpu(cpu, start_cpu, NULL);
1439 
1440 	for_each_present_cpu(cpu) {
1441 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1442 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1443 		if (ret.value == SBI_EXT_HSM_STARTED)
1444 			started++;
1445 	}
1446 
1447 	assert(started == 2);
1448 
1449 	return true;
1450 }
1451 
1452 static void check_susp(void)
1453 {
1454 	unsigned long csrs[SBI_CSR_NR_IDX];
1455 	unsigned long ctx[SBI_SUSP_NR_IDX] = {
1456 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1457 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1458 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1459 	};
1460 	enum {
1461 #define SUSP_FIRST_TESTNUM 1
1462 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1463 		SUSP_TYPE,
1464 		SUSP_TYPE2,
1465 		SUSP_BAD_ADDR,
1466 		SUSP_ONE_ONLINE,
1467 		NR_SUSP_TESTS,
1468 	};
1469 	struct susp_test {
1470 		const char *name;
1471 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1472 		void (*check)(unsigned long ctx[], struct susp_params *params);
1473 	} susp_tests[] = {
1474 		[SUSP_BASIC]		= { "basic",			susp_basic_prep,	susp_basic_check,	},
1475 		[SUSP_TYPE]		= { "sleep_type",		susp_type_prep,					},
1476 #if __riscv_xlen != 32
1477 		[SUSP_TYPE2]		= { "sleep_type upper bits",	susp_type_prep2,	susp_basic_check	},
1478 #endif
1479 		[SUSP_BAD_ADDR]		= { "bad addr",			susp_badaddr_prep,				},
1480 		[SUSP_ONE_ONLINE]	= { "one cpu online",		susp_one_prep,					},
1481 	};
1482 	struct susp_params params;
1483 	struct sbiret ret;
1484 	int testnum, i;
1485 
1486 	report_prefix_push("susp");
1487 
1488 	if (!sbi_probe(SBI_EXT_SUSP)) {
1489 		report_skip("SUSP extension not available");
1490 		report_prefix_pop();
1491 		return;
1492 	}
1493 
1494 	sbi_bad_fid(SBI_EXT_SUSP);
1495 
1496 	timer_setup(susp_timer);
1497 	local_irq_enable();
1498 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1499 
1500 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1501 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1502 
1503 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1504 		if (!susp_tests[i].name)
1505 			continue;
1506 
1507 		report_prefix_push(susp_tests[i].name);
1508 
1509 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1510 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1511 
1512 		local_irq_disable();
1513 
1514 		assert(susp_tests[i].prep);
1515 		if (!susp_tests[i].prep(ctx, &params)) {
1516 			report_prefix_pop();
1517 			continue;
1518 		}
1519 
1520 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1521 			ret = sbi_system_suspend_raw(params.sleep_type, params.resume_addr, params.opaque);
1522 
1523 			local_irq_enable();
1524 
1525 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1526 				report_fail("probing claims support, but it's not?");
1527 				report_prefix_pop();
1528 				goto out;
1529 			} else if (!params.returns) {
1530 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1531 			} else {
1532 				if (!report(ret.error == params.ret.error, "got expected sbi.error (%ld)", params.ret.error))
1533 					report_info("expected sbi.error %ld, received %ld", params.ret.error, ret.error);
1534 			}
1535 
1536 			report_prefix_pop();
1537 			continue;
1538 		}
1539 		assert(testnum == i);
1540 
1541 		local_irq_enable();
1542 
1543 		if (susp_tests[i].check)
1544 			susp_tests[i].check(ctx, &params);
1545 
1546 		report_prefix_pop();
1547 	}
1548 
1549 out:
1550 	local_irq_disable();
1551 	timer_teardown();
1552 
1553 	report_prefix_pop();
1554 }
1555 
1556 int main(int argc, char **argv)
1557 {
1558 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1559 		help();
1560 		exit(0);
1561 	}
1562 
1563 	report_prefix_push("sbi");
1564 	check_base();
1565 	check_time();
1566 	check_ipi();
1567 	check_hsm();
1568 	check_dbcn();
1569 	check_susp();
1570 	check_fwft();
1571 
1572 	return report_summary();
1573 }
1574