xref: /kvm-unit-tests/riscv/sbi.c (revision a3fc8778e92d12896807b35e781bf21b7dc64db3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 void check_sse(void);
36 void check_fwft(void);
37 
38 static long __labs(long a)
39 {
40 	return __builtin_labs(a);
41 }
42 
43 static void help(void)
44 {
45 	puts("Test SBI\n");
46 	puts("An environ must be provided where expected values are given.\n");
47 }
48 
49 static struct sbiret sbi_base(int fid, unsigned long arg0)
50 {
51 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
52 }
53 
54 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
55 				    unsigned long base_addr_hi)
56 {
57 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
58 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
59 }
60 
61 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
62 {
63 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
64 }
65 
66 static struct sbiret sbi_hart_suspend_raw(unsigned long suspend_type, unsigned long resume_addr, unsigned long opaque)
67 {
68 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
69 }
70 
71 static struct sbiret sbi_system_suspend_raw(unsigned long sleep_type, unsigned long resume_addr, unsigned long opaque)
72 {
73 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
74 }
75 
76 void sbi_bad_fid(int ext)
77 {
78 	struct sbiret ret = sbi_ecall(ext, 0xbad, 0, 0, 0, 0, 0, 0);
79 	sbiret_report_error(&ret, SBI_ERR_NOT_SUPPORTED, "Bad FID");
80 }
81 
82 static void start_cpu(void *data)
83 {
84 	/* nothing to do */
85 }
86 
87 static void stop_cpu(void *data)
88 {
89 	struct sbiret ret = sbi_hart_stop();
90 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
91 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
92 }
93 
94 static int rand_online_cpu(prng_state *ps)
95 {
96 	int cpu, me = smp_processor_id();
97 
98 	for (;;) {
99 		cpu = prng32(ps) % nr_cpus;
100 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
101 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
102 			break;
103 	}
104 
105 	return cpu;
106 }
107 
108 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
109 {
110 	*lo = (unsigned long)paddr;
111 	*hi = 0;
112 	if (__riscv_xlen == 32)
113 		*hi = (unsigned long)(paddr >> 32);
114 }
115 
116 static bool check_addr(phys_addr_t start, phys_addr_t size)
117 {
118 	struct mem_region *r = memregions_find(start);
119 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
120 }
121 
122 static phys_addr_t get_highest_addr(void)
123 {
124 	phys_addr_t highest_end = 0;
125 	struct mem_region *r;
126 
127 	for (r = mem_regions; r->end; ++r) {
128 		if (r->end > highest_end)
129 			highest_end = r->end;
130 	}
131 
132 	return highest_end - 1;
133 }
134 
135 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
136 {
137 	if (env_enabled("INVALID_ADDR_AUTO")) {
138 		*paddr = get_highest_addr() + 1;
139 		return true;
140 	} else if (allow_default && !getenv("INVALID_ADDR")) {
141 		*paddr = -1ul;
142 		return true;
143 	} else if (env_or_skip("INVALID_ADDR")) {
144 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
145 		return true;
146 	}
147 
148 	return false;
149 }
150 
151 static void timer_setup(void (*handler)(struct pt_regs *))
152 {
153 	install_irq_handler(IRQ_S_TIMER, handler);
154 	timer_irq_enable();
155 }
156 
157 static void timer_teardown(void)
158 {
159 	timer_irq_disable();
160 	timer_stop();
161 	install_irq_handler(IRQ_S_TIMER, NULL);
162 }
163 
164 static void check_base(void)
165 {
166 	struct sbiret ret;
167 	long expected;
168 
169 	report_prefix_push("base");
170 
171 	sbi_bad_fid(SBI_EXT_BASE);
172 
173 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
174 
175 	report_prefix_push("spec_version");
176 	if (env_or_skip("SBI_SPEC_VERSION")) {
177 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
178 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
179 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
180 		sbiret_check(&ret, 0, expected);
181 	}
182 	report_prefix_pop();
183 
184 	ret.value &= 0x7ffffffful;
185 
186 	if (ret.error || ret.value < 2) {
187 		report_skip("SBI spec version 0.2 or higher required");
188 		return;
189 	}
190 
191 	report_prefix_push("impl_id");
192 	if (env_or_skip("SBI_IMPL_ID")) {
193 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
194 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
195 		sbiret_check(&ret, 0, expected);
196 	}
197 	report_prefix_pop();
198 
199 	report_prefix_push("impl_version");
200 	if (env_or_skip("SBI_IMPL_VERSION")) {
201 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
202 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
203 		sbiret_check(&ret, 0, expected);
204 	}
205 	report_prefix_pop();
206 
207 	report_prefix_push("probe_ext");
208 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
209 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
210 	sbiret_check(&ret, 0, expected);
211 	report_prefix_push("unavailable");
212 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
213 	sbiret_check(&ret, 0, 0);
214 	report_prefix_popn(2);
215 
216 	report_prefix_push("mvendorid");
217 	if (env_or_skip("MVENDORID")) {
218 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
219 		assert(__riscv_xlen == 32 || !(expected >> 32));
220 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
221 		sbiret_check(&ret, 0, expected);
222 	}
223 	report_prefix_pop();
224 
225 	report_prefix_push("marchid");
226 	if (env_or_skip("MARCHID")) {
227 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
228 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
229 		sbiret_check(&ret, 0, expected);
230 	}
231 	report_prefix_pop();
232 
233 	report_prefix_push("mimpid");
234 	if (env_or_skip("MIMPID")) {
235 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
236 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
237 		sbiret_check(&ret, 0, expected);
238 	}
239 	report_prefix_popn(2);
240 }
241 
242 struct timer_info {
243 	bool timer_works;
244 	bool mask_timer_irq;
245 	bool timer_irq_set;
246 	bool timer_irq_cleared;
247 	unsigned long timer_irq_count;
248 };
249 
250 static struct timer_info timer_info;
251 
252 static bool timer_irq_pending(void)
253 {
254 	return csr_read(CSR_SIP) & IP_TIP;
255 }
256 
257 static void timer_irq_handler(struct pt_regs *regs)
258 {
259 	timer_info.timer_works = true;
260 
261 	if (timer_info.timer_irq_count < ULONG_MAX)
262 		++timer_info.timer_irq_count;
263 
264 	if (timer_irq_pending())
265 		timer_info.timer_irq_set = true;
266 
267 	if (timer_info.mask_timer_irq)
268 		timer_irq_disable();
269 	else
270 		sbi_set_timer(ULONG_MAX);
271 
272 	if (!timer_irq_pending())
273 		timer_info.timer_irq_cleared = true;
274 }
275 
276 static void timer_check_set_timer(bool mask_timer_irq)
277 {
278 	struct sbiret ret;
279 	unsigned long begin, end, duration;
280 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
281 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
282 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
283 
284 	d = usec_to_cycles(d);
285 	margin = usec_to_cycles(margin);
286 
287 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
288 	begin = timer_get_cycles();
289 	ret = sbi_set_timer(begin + d);
290 
291 	report(!ret.error, "set timer%s", mask_test_str);
292 	if (ret.error)
293 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
294 
295 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
296 		cpu_relax();
297 
298 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
299 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
300 
301 	if (!mask_timer_irq) {
302 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
303 		       "pending timer interrupt bit cleared by setting timer to -1");
304 	}
305 
306 	if (timer_info.timer_works) {
307 		duration = end - begin;
308 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
309 	}
310 
311 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
312 }
313 
314 static void check_time(void)
315 {
316 	bool pending;
317 
318 	report_prefix_push("time");
319 
320 	if (!sbi_probe(SBI_EXT_TIME)) {
321 		report_skip("time extension not available");
322 		report_prefix_pop();
323 		return;
324 	}
325 
326 	sbi_bad_fid(SBI_EXT_TIME);
327 
328 	report_prefix_push("set_timer");
329 
330 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
331 	local_irq_enable();
332 	timer_irq_enable();
333 
334 	timer_check_set_timer(false);
335 
336 	if (csr_read(CSR_SIE) & IE_TIE)
337 		timer_check_set_timer(true);
338 	else
339 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
340 
341 	timer_irq_disable();
342 	sbi_set_timer(0);
343 	pending = timer_irq_pending();
344 	report(pending, "timer immediately pending by setting timer to 0");
345 	sbi_set_timer(ULONG_MAX);
346 	if (pending)
347 		report(!timer_irq_pending(), "pending timer cleared while masked");
348 	else
349 		report_skip("timer is not pending, skipping timer cleared while masked test");
350 
351 	local_irq_disable();
352 	install_irq_handler(IRQ_S_TIMER, NULL);
353 
354 	report_prefix_popn(2);
355 }
356 
357 static bool ipi_received[NR_CPUS];
358 static bool ipi_timeout[NR_CPUS];
359 static cpumask_t ipi_done;
360 
361 static void ipi_timeout_handler(struct pt_regs *regs)
362 {
363 	timer_stop();
364 	ipi_timeout[smp_processor_id()] = true;
365 }
366 
367 static void ipi_irq_handler(struct pt_regs *regs)
368 {
369 	ipi_ack();
370 	ipi_received[smp_processor_id()] = true;
371 }
372 
373 static void ipi_hart_wait(void *data)
374 {
375 	unsigned long timeout = (unsigned long)data;
376 	int me = smp_processor_id();
377 
378 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
379 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
380 	local_ipi_enable();
381 	timer_irq_enable();
382 	local_irq_enable();
383 
384 	timer_start(timeout);
385 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
386 		cpu_relax();
387 	local_irq_disable();
388 	timer_stop();
389 	local_ipi_disable();
390 	timer_irq_disable();
391 	install_irq_handler(IRQ_S_SOFT, NULL);
392 	install_irq_handler(IRQ_S_TIMER, NULL);
393 
394 	cpumask_set_cpu(me, &ipi_done);
395 }
396 
397 static void ipi_hart_check(cpumask_t *mask)
398 {
399 	int cpu;
400 
401 	for_each_cpu(cpu, mask) {
402 		if (ipi_timeout[cpu]) {
403 			const char *rec = ipi_received[cpu] ? "but was still received"
404 							    : "and has still not been received";
405 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
406 		}
407 
408 		ipi_timeout[cpu] = false;
409 		ipi_received[cpu] = false;
410 	}
411 }
412 
413 static void check_ipi(void)
414 {
415 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
416 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
417 	int me = smp_processor_id();
418 	unsigned long max_hartid = 0;
419 	unsigned long hartid1, hartid2;
420 	cpumask_t ipi_receivers;
421 	static prng_state ps;
422 	struct sbiret ret;
423 	int cpu, cpu2;
424 
425 	ps = prng_init(0xDEADBEEF);
426 
427 	report_prefix_push("ipi");
428 
429 	if (!sbi_probe(SBI_EXT_IPI)) {
430 		report_skip("ipi extension not available");
431 		report_prefix_pop();
432 		return;
433 	}
434 
435 	sbi_bad_fid(SBI_EXT_IPI);
436 
437 	if (nr_cpus_present < 2) {
438 		report_skip("At least 2 cpus required");
439 		report_prefix_pop();
440 		return;
441 	}
442 
443 	report_prefix_push("random hart");
444 	cpumask_clear(&ipi_done);
445 	cpumask_clear(&ipi_receivers);
446 	cpu = rand_online_cpu(&ps);
447 	cpumask_set_cpu(cpu, &ipi_receivers);
448 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
449 	ret = sbi_send_ipi_cpu(cpu);
450 	report(ret.error == SBI_SUCCESS, "ipi returned success");
451 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
452 		cpu_relax();
453 	ipi_hart_check(&ipi_receivers);
454 	report_prefix_pop();
455 
456 	report_prefix_push("two in hart_mask");
457 
458 	if (nr_cpus_present < 3) {
459 		report_skip("3 cpus required");
460 		goto end_two;
461 	}
462 
463 	cpu = rand_online_cpu(&ps);
464 	hartid1 = cpus[cpu].hartid;
465 	hartid2 = 0;
466 	for_each_present_cpu(cpu2) {
467 		if (cpu2 == cpu || cpu2 == me)
468 			continue;
469 		hartid2 = cpus[cpu2].hartid;
470 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
471 			break;
472 	}
473 	if (cpu2 == nr_cpus) {
474 		report_skip("hartids are too sparse");
475 		goto end_two;
476 	}
477 
478 	cpumask_clear(&ipi_done);
479 	cpumask_clear(&ipi_receivers);
480 	cpumask_set_cpu(cpu, &ipi_receivers);
481 	cpumask_set_cpu(cpu2, &ipi_receivers);
482 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
483 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
484 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
485 	report(ret.error == SBI_SUCCESS, "ipi returned success");
486 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
487 		cpu_relax();
488 	ipi_hart_check(&ipi_receivers);
489 end_two:
490 	report_prefix_pop();
491 
492 	report_prefix_push("broadcast");
493 	cpumask_clear(&ipi_done);
494 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
495 	cpumask_clear_cpu(me, &ipi_receivers);
496 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
497 	ret = sbi_send_ipi_broadcast();
498 	report(ret.error == SBI_SUCCESS, "ipi returned success");
499 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
500 		cpu_relax();
501 	ipi_hart_check(&ipi_receivers);
502 	report_prefix_pop();
503 
504 	report_prefix_push("invalid parameters");
505 
506 	for_each_present_cpu(cpu) {
507 		if (cpus[cpu].hartid > max_hartid)
508 			max_hartid = cpus[cpu].hartid;
509 	}
510 
511 	/* Test no targets */
512 	ret = sbi_send_ipi(0, 0);
513 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 0");
514 	ret = sbi_send_ipi(0, 1);
515 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 1");
516 
517 	/* Try the next higher hartid than the max */
518 	ret = sbi_send_ipi(2, max_hartid);
519 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
520 	ret = sbi_send_ipi(1, max_hartid + 1);
521 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
522 
523 	report_prefix_pop();
524 
525 	report_prefix_pop();
526 }
527 
528 unsigned char sbi_hsm_stop_hart[NR_CPUS];
529 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
530 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
531 
532 static const char * const hart_state_str[] = {
533 	[SBI_EXT_HSM_STARTED] = "started",
534 	[SBI_EXT_HSM_STOPPED] = "stopped",
535 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
536 };
537 struct hart_state_transition_info {
538 	enum sbi_ext_hsm_sid initial_state;
539 	enum sbi_ext_hsm_sid intermediate_state;
540 	enum sbi_ext_hsm_sid final_state;
541 };
542 static cpumask_t sbi_hsm_started_hart_checks;
543 static bool sbi_hsm_invalid_hartid_check;
544 static bool sbi_hsm_timer_fired;
545 extern void sbi_hsm_check_hart_start(void);
546 extern void sbi_hsm_check_non_retentive_suspend(void);
547 
548 static void hsm_timer_irq_handler(struct pt_regs *regs)
549 {
550 	timer_stop();
551 	sbi_hsm_timer_fired = true;
552 }
553 
554 static void hart_check_already_started(void *data)
555 {
556 	struct sbiret ret;
557 	unsigned long hartid = current_thread_info()->hartid;
558 	int me = smp_processor_id();
559 
560 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
561 
562 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
563 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
564 }
565 
566 static void hart_start_invalid_hartid(void *data)
567 {
568 	struct sbiret ret;
569 
570 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
571 
572 	if (ret.error == SBI_ERR_INVALID_PARAM)
573 		sbi_hsm_invalid_hartid_check = true;
574 }
575 
576 static cpumask_t hsm_suspend_not_supported;
577 
578 static void ipi_nop(struct pt_regs *regs)
579 {
580 	ipi_ack();
581 }
582 
583 static void hart_suspend_and_wait_ipi(unsigned long suspend_type, unsigned long resume_addr,
584 				      unsigned long opaque, bool returns, const char *typestr)
585 {
586 	unsigned long hartid = current_thread_info()->hartid;
587 	struct sbiret ret;
588 
589 	install_irq_handler(IRQ_S_SOFT, ipi_nop);
590 	local_ipi_enable();
591 	local_irq_enable();
592 
593 	ret = sbi_hart_suspend_raw(suspend_type, resume_addr, opaque);
594 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
595 		cpumask_set_cpu(smp_processor_id(), &hsm_suspend_not_supported);
596 	else if (ret.error)
597 		report_fail("failed to %s cpu%d (hartid = %lx) (error=%ld)",
598 			    typestr, smp_processor_id(), hartid, ret.error);
599 	else if (!returns)
600 		report_fail("failed to %s cpu%d (hartid = %lx) (call should not return)",
601 			    typestr, smp_processor_id(), hartid);
602 
603 	local_irq_disable();
604 	local_ipi_disable();
605 	install_irq_handler(IRQ_S_SOFT, NULL);
606 }
607 
608 static void hart_retentive_suspend(void *data)
609 {
610 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0, true, "retentive suspend");
611 }
612 
613 static void hart_non_retentive_suspend(void *data)
614 {
615 	unsigned long params[] = {
616 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
617 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
618 	};
619 
620 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
621 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
622 				  false, "non-retentive suspend");
623 }
624 
625 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
626 static void hart_retentive_suspend_with_msb_set(void *data)
627 {
628 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
629 
630 	hart_suspend_and_wait_ipi(suspend_type, 0, 0, true, "retentive suspend with MSB set");
631 }
632 
633 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
634 static void hart_non_retentive_suspend_with_msb_set(void *data)
635 {
636 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
637 	unsigned long params[] = {
638 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
639 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
640 	};
641 
642 	hart_suspend_and_wait_ipi(suspend_type,
643 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
644 				  false, "non-retentive suspend with MSB set");
645 }
646 
647 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
648 {
649 	struct sbiret ret;
650 
651 	sbi_hsm_timer_fired = false;
652 	timer_start(duration);
653 
654 	ret = sbi_hart_get_status(hartid);
655 
656 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
657 		cpu_relax();
658 		ret = sbi_hart_get_status(hartid);
659 	}
660 
661 	timer_stop();
662 
663 	if (sbi_hsm_timer_fired)
664 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
665 	else if (ret.error)
666 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
667 
668 	return !sbi_hsm_timer_fired && !ret.error;
669 }
670 
671 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
672 				      struct hart_state_transition_info *states)
673 {
674 	struct sbiret ret;
675 	unsigned long hartid;
676 	int cpu, count = 0;
677 
678 	for_each_cpu(cpu, mask) {
679 		hartid = cpus[cpu].hartid;
680 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
681 			continue;
682 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
683 			continue;
684 
685 		ret = sbi_hart_get_status(hartid);
686 		if (ret.error)
687 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
688 		else if (ret.value != states->final_state)
689 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
690 				    hart_state_str[states->final_state], ret.value);
691 		else
692 			count++;
693 	}
694 
695 	return count;
696 }
697 
698 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
699 {
700 	sbi_hsm_timer_fired = false;
701 	timer_start(duration);
702 
703 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
704 		cpu_relax();
705 
706 	timer_stop();
707 
708 	if (sbi_hsm_timer_fired)
709 		report_info("hsm timer fired before all cpus became idle");
710 }
711 
712 static void check_hsm(void)
713 {
714 	struct sbiret ret;
715 	unsigned long hartid;
716 	cpumask_t secondary_cpus_mask, mask, resume_mask;
717 	struct hart_state_transition_info transition_states;
718 	bool ipi_unavailable = false;
719 	int cpu, me = smp_processor_id();
720 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
721 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
722 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
723 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
724 	int count, check, expected_count, resume_count;
725 
726 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
727 
728 	report_prefix_push("hsm");
729 
730 	if (!sbi_probe(SBI_EXT_HSM)) {
731 		report_skip("hsm extension not available");
732 		report_prefix_pop();
733 		return;
734 	}
735 
736 	sbi_bad_fid(SBI_EXT_HSM);
737 
738 	report_prefix_push("hart_get_status");
739 
740 	hartid = current_thread_info()->hartid;
741 	ret = sbi_hart_get_status(hartid);
742 
743 	if (ret.error) {
744 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
745 		report_prefix_popn(2);
746 		return;
747 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
748 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
749 		report_prefix_popn(2);
750 		return;
751 	}
752 
753 	report_pass("status of current hart is started");
754 
755 	report_prefix_pop();
756 
757 	if (max_cpus < 2) {
758 		report_skip("no other cpus to run the remaining hsm tests on");
759 		report_prefix_pop();
760 		return;
761 	}
762 
763 	report_prefix_push("hart_stop");
764 
765 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
766 	cpumask_clear_cpu(me, &secondary_cpus_mask);
767 	timer_setup(hsm_timer_irq_handler);
768 	local_irq_enable();
769 
770 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
771 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
772 
773 	transition_states = (struct hart_state_transition_info) {
774 		.initial_state = SBI_EXT_HSM_STARTED,
775 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
776 		.final_state = SBI_EXT_HSM_STOPPED,
777 	};
778 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
779 
780 	report(count == max_cpus - 1, "all secondary harts stopped");
781 
782 	report_prefix_pop();
783 
784 	report_prefix_push("hart_start");
785 
786 	for_each_cpu(cpu, &secondary_cpus_mask) {
787 		hartid = cpus[cpu].hartid;
788 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
789 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
790 
791 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
792 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
793 		if (ret.error) {
794 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
795 			continue;
796 		}
797 	}
798 
799 	transition_states = (struct hart_state_transition_info) {
800 		.initial_state = SBI_EXT_HSM_STOPPED,
801 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
802 		.final_state = SBI_EXT_HSM_STARTED,
803 	};
804 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
805 	check = 0;
806 
807 	for_each_cpu(cpu, &secondary_cpus_mask) {
808 		sbi_hsm_timer_fired = false;
809 		timer_start(hsm_timer_duration);
810 
811 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
812 			cpu_relax();
813 
814 		timer_stop();
815 
816 		if (sbi_hsm_timer_fired) {
817 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
818 			continue;
819 		}
820 
821 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
822 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
823 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
824 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
825 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
826 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
827 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
828 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
829 		else
830 			check++;
831 	}
832 
833 	report(count == max_cpus - 1, "all secondary harts started");
834 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
835 
836 	report_prefix_pop();
837 
838 	report_prefix_push("hart_stop");
839 
840 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
841 
842 	transition_states = (struct hart_state_transition_info) {
843 		.initial_state = SBI_EXT_HSM_STARTED,
844 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
845 		.final_state = SBI_EXT_HSM_STOPPED,
846 	};
847 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
848 
849 	report(count == max_cpus - 1, "all secondary harts stopped");
850 
851 	/* Reset the stop flags so that we can reuse them after suspension tests */
852 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
853 
854 	report_prefix_pop();
855 
856 	report_prefix_push("hart_start");
857 
858 	/* Select just one secondary cpu to run the invalid hartid test */
859 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
860 
861 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
862 
863 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
864 
865 	transition_states = (struct hart_state_transition_info) {
866 		.initial_state = SBI_EXT_HSM_STOPPED,
867 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
868 		.final_state = SBI_EXT_HSM_STARTED,
869 	};
870 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
871 
872 	report(count == max_cpus - 1, "all secondary harts started");
873 
874 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
875 
876 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
877 	       "all secondary harts are already started");
878 
879 	report_prefix_pop();
880 
881 	report_prefix_push("hart_suspend");
882 
883 	if (!sbi_probe(SBI_EXT_IPI)) {
884 		report_skip("skipping suspension tests since ipi extension is unavailable");
885 		report_prefix_pop();
886 		ipi_unavailable = true;
887 		goto sbi_hsm_hart_stop_tests;
888 	}
889 
890 	cpumask_clear(&hsm_suspend_not_supported);
891 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
892 
893 	transition_states = (struct hart_state_transition_info) {
894 		.initial_state = SBI_EXT_HSM_STARTED,
895 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
896 		.final_state = SBI_EXT_HSM_SUSPENDED,
897 	};
898 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
899 
900 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
901 
902 	if (expected_count != 0) {
903 		if (expected_count != max_cpus - 1)
904 			report_info("not all harts support retentive suspend");
905 		report(count == expected_count, "supporting secondary harts retentive suspended");
906 	} else {
907 		report_skip("retentive suspend not supported by any harts");
908 		goto nonret_suspend_tests;
909 	}
910 
911 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
912 	resume_count = cpumask_weight(&resume_mask);
913 
914 	/* Ignore the return value since we check the status of each hart anyway */
915 	sbi_send_ipi_cpumask(&resume_mask);
916 
917 	transition_states = (struct hart_state_transition_info) {
918 		.initial_state = SBI_EXT_HSM_SUSPENDED,
919 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
920 		.final_state = SBI_EXT_HSM_STARTED,
921 	};
922 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
923 
924 	report(count == resume_count, "supporting secondary harts retentive resumed");
925 
926 nonret_suspend_tests:
927 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
928 
929 	cpumask_clear(&hsm_suspend_not_supported);
930 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
931 
932 	transition_states = (struct hart_state_transition_info) {
933 		.initial_state = SBI_EXT_HSM_STARTED,
934 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
935 		.final_state = SBI_EXT_HSM_SUSPENDED,
936 	};
937 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
938 
939 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
940 
941 	if (expected_count != 0) {
942 		if (expected_count != max_cpus - 1)
943 			report_info("not all harts support non-retentive suspend");
944 		report(count == expected_count, "supporting secondary harts non-retentive suspended");
945 	} else {
946 		report_skip("non-retentive suspend not supported by any harts");
947 		goto hsm_suspend_tests_done;
948 	}
949 
950 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
951 	resume_count = cpumask_weight(&resume_mask);
952 
953 	/* Ignore the return value since we check the status of each hart anyway */
954 	sbi_send_ipi_cpumask(&resume_mask);
955 
956 	transition_states = (struct hart_state_transition_info) {
957 		.initial_state = SBI_EXT_HSM_SUSPENDED,
958 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
959 		.final_state = SBI_EXT_HSM_STARTED,
960 	};
961 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
962 	check = 0;
963 
964 	for_each_cpu(cpu, &resume_mask) {
965 		sbi_hsm_timer_fired = false;
966 		timer_start(hsm_timer_duration);
967 
968 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
969 			cpu_relax();
970 
971 		timer_stop();
972 
973 		if (sbi_hsm_timer_fired) {
974 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
975 			continue;
976 		}
977 
978 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
979 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
980 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
981 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
982 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
983 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
984 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
985 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
986 		else
987 			check++;
988 	}
989 
990 	report(count == resume_count, "supporting secondary harts non-retentive resumed");
991 	report(check == resume_count, "supporting secondary harts have expected register values after non-retentive resume");
992 
993 hsm_suspend_tests_done:
994 	report_prefix_pop();
995 
996 sbi_hsm_hart_stop_tests:
997 	report_prefix_push("hart_stop");
998 
999 	if (ipi_unavailable || expected_count == 0)
1000 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
1001 	else
1002 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
1003 
1004 	transition_states = (struct hart_state_transition_info) {
1005 		.initial_state = SBI_EXT_HSM_STARTED,
1006 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1007 		.final_state = SBI_EXT_HSM_STOPPED,
1008 	};
1009 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
1010 
1011 	report(count == max_cpus - 1, "all secondary harts stopped");
1012 
1013 	report_prefix_pop();
1014 
1015 	if (__riscv_xlen == 32 || ipi_unavailable) {
1016 		local_irq_disable();
1017 		timer_teardown();
1018 		report_prefix_pop();
1019 		return;
1020 	}
1021 
1022 	report_prefix_push("hart_suspend");
1023 
1024 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
1025 	cpu = cpumask_next(-1, &secondary_cpus_mask);
1026 	hartid = cpus[cpu].hartid;
1027 	cpumask_clear(&mask);
1028 	cpumask_set_cpu(cpu, &mask);
1029 
1030 	/* Boot up the secondary cpu and let it proceed to the idle loop */
1031 	on_cpu(cpu, start_cpu, NULL);
1032 
1033 	cpumask_clear(&hsm_suspend_not_supported);
1034 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
1035 
1036 	transition_states = (struct hart_state_transition_info) {
1037 		.initial_state = SBI_EXT_HSM_STARTED,
1038 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1039 		.final_state = SBI_EXT_HSM_SUSPENDED,
1040 	};
1041 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1042 
1043 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1044 
1045 	if (expected_count) {
1046 		report(count == expected_count, "retentive suspend with MSB set");
1047 	} else {
1048 		report_skip("retentive suspend not supported by cpu%d", cpu);
1049 		goto nonret_suspend_with_msb;
1050 	}
1051 
1052 	/* Ignore the return value since we manually validate the status of the hart anyway */
1053 	sbi_send_ipi_cpu(cpu);
1054 
1055 	transition_states = (struct hart_state_transition_info) {
1056 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1057 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1058 		.final_state = SBI_EXT_HSM_STARTED,
1059 	};
1060 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1061 
1062 	report(count, "secondary hart retentive resumed with MSB set");
1063 
1064 nonret_suspend_with_msb:
1065 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1066 	sbi_hsm_stop_hart[cpu] = 0;
1067 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1068 
1069 	cpumask_clear(&hsm_suspend_not_supported);
1070 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1071 
1072 	transition_states = (struct hart_state_transition_info) {
1073 		.initial_state = SBI_EXT_HSM_STARTED,
1074 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1075 		.final_state = SBI_EXT_HSM_SUSPENDED,
1076 	};
1077 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1078 
1079 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1080 
1081 	if (expected_count) {
1082 		report(count == expected_count, "non-retentive suspend with MSB set");
1083 	} else {
1084 		report_skip("non-retentive suspend not supported by cpu%d", cpu);
1085 		goto hsm_hart_stop_test;
1086 	}
1087 
1088 	/* Ignore the return value since we manually validate the status of the hart anyway */
1089 	sbi_send_ipi_cpu(cpu);
1090 
1091 	transition_states = (struct hart_state_transition_info) {
1092 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1093 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1094 		.final_state = SBI_EXT_HSM_STARTED,
1095 	};
1096 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1097 	check = 0;
1098 
1099 	if (count) {
1100 		sbi_hsm_timer_fired = false;
1101 		timer_start(hsm_timer_duration);
1102 
1103 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1104 			cpu_relax();
1105 
1106 		timer_stop();
1107 
1108 		if (sbi_hsm_timer_fired) {
1109 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1110 		} else {
1111 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1112 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1113 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1114 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1115 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1116 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1117 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1118 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1119 			else
1120 				check = 1;
1121 		}
1122 	}
1123 
1124 	report(count, "secondary hart non-retentive resumed with MSB set");
1125 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1126 
1127 hsm_hart_stop_test:
1128 	report_prefix_pop();
1129 
1130 	report_prefix_push("hart_stop");
1131 
1132 	if (expected_count == 0)
1133 		on_cpu_async(cpu, stop_cpu, NULL);
1134 	else
1135 		sbi_hsm_stop_hart[cpu] = 1;
1136 
1137 	transition_states = (struct hart_state_transition_info) {
1138 		.initial_state = SBI_EXT_HSM_STARTED,
1139 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1140 		.final_state = SBI_EXT_HSM_STOPPED,
1141 	};
1142 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1143 
1144 	report(count, "secondary hart stopped after suspension tests with MSB set");
1145 
1146 	local_irq_disable();
1147 	timer_teardown();
1148 	report_prefix_popn(2);
1149 }
1150 
1151 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1152 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1153 
1154 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1155 {
1156 	unsigned long base_addr_lo, base_addr_hi;
1157 	phys_addr_t paddr = virt_to_phys((void *)s);
1158 	int num_calls = 0;
1159 	struct sbiret ret;
1160 
1161 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1162 
1163 	do {
1164 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1165 		num_bytes -= ret.value;
1166 		paddr += ret.value;
1167 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1168 		num_calls++;
1169 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1170 
1171 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1172 	report_info("%d sbi calls made", num_calls);
1173 }
1174 
1175 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1176 				 phys_addr_t page_addr, size_t page_offset,
1177 				 bool highmem_supported)
1178 {
1179 	int nr_pages = page_offset ? 2 : 1;
1180 	void *vaddr;
1181 
1182 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1183 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1184 		report_skip("Memory above 4G required");
1185 		return;
1186 	}
1187 
1188 	vaddr = alloc_vpages(nr_pages);
1189 
1190 	for (int i = 0; i < nr_pages; ++i)
1191 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1192 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1193 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1194 }
1195 
1196 /*
1197  * Only the write functionality is tested here. There's no easy way to
1198  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1199  */
1200 static void check_dbcn(void)
1201 {
1202 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1203 	unsigned long base_addr_lo, base_addr_hi;
1204 	bool highmem_supported = true;
1205 	phys_addr_t paddr;
1206 	struct sbiret ret;
1207 	char *buf;
1208 
1209 	report_prefix_push("dbcn");
1210 
1211 	if (!sbi_probe(SBI_EXT_DBCN)) {
1212 		report_skip("DBCN extension unavailable");
1213 		report_prefix_pop();
1214 		return;
1215 	}
1216 
1217 	sbi_bad_fid(SBI_EXT_DBCN);
1218 
1219 	report_prefix_push("write");
1220 
1221 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1222 
1223 	assert(num_bytes < PAGE_SIZE);
1224 
1225 	report_prefix_push("page boundary");
1226 	buf = alloc_pages(1);
1227 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1228 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1229 	report_prefix_pop();
1230 
1231 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1232 		highmem_supported = false;
1233 
1234 	report_prefix_push("high boundary");
1235 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1236 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1237 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1238 				     highmem_supported);
1239 	else
1240 		report_skip("user disabled");
1241 	report_prefix_pop();
1242 
1243 	report_prefix_push("high page");
1244 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1245 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1246 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1247 	} else {
1248 		report_skip("user disabled");
1249 	}
1250 	report_prefix_pop();
1251 
1252 	/* Bytes are read from memory and written to the console */
1253 	report_prefix_push("invalid parameter");
1254 	if (get_invalid_addr(&paddr, false)) {
1255 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1256 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1257 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1258 	}
1259 	report_prefix_popn(2);
1260 	report_prefix_push("write_byte");
1261 
1262 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1263 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1264 	puts("\n");
1265 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1266 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1267 
1268 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1269 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1270 	puts("\n");
1271 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1272 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1273 
1274 	report_prefix_popn(2);
1275 }
1276 
1277 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1278 jmp_buf sbi_susp_jmp;
1279 
1280 #define SBI_SUSP_TIMER_DURATION_US 500000
1281 static void susp_timer(struct pt_regs *regs)
1282 {
1283 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1284 }
1285 
1286 struct susp_params {
1287 	unsigned long sleep_type;
1288 	unsigned long resume_addr;
1289 	unsigned long opaque;
1290 	bool returns;
1291 	struct sbiret ret;
1292 };
1293 
1294 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1295 {
1296 	int cpu, me = smp_processor_id();
1297 	unsigned long *csrs;
1298 	struct sbiret ret;
1299 	cpumask_t mask;
1300 
1301 	csrs = (unsigned long *)ctx[SBI_SUSP_CSRS_IDX];
1302 	csrs[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS);
1303 	csrs[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE);
1304 	csrs[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC);
1305 	csrs[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH);
1306 	csrs[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP);
1307 
1308 	memset(params, 0, sizeof(*params));
1309 	params->sleep_type = 0; /* suspend-to-ram */
1310 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1311 	params->opaque = virt_to_phys(ctx);
1312 	params->returns = false;
1313 
1314 	cpumask_copy(&mask, &cpu_present_mask);
1315 	cpumask_clear_cpu(me, &mask);
1316 	on_cpumask_async(&mask, stop_cpu, NULL);
1317 
1318 	/* Wait up to 1s for all harts to stop */
1319 	for (int i = 0; i < 100; i++) {
1320 		int count = 1;
1321 
1322 		udelay(10000);
1323 
1324 		for_each_present_cpu(cpu) {
1325 			if (cpu == me)
1326 				continue;
1327 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1328 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1329 				++count;
1330 		}
1331 		if (count == cpumask_weight(&cpu_present_mask))
1332 			break;
1333 	}
1334 
1335 	for_each_present_cpu(cpu) {
1336 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1337 		if (cpu == me) {
1338 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1339 				   "cpu%d is not started", cpu);
1340 		} else {
1341 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1342 				   "cpu%d is not stopped", cpu);
1343 		}
1344 	}
1345 
1346 	return true;
1347 }
1348 
1349 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1350 {
1351 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1352 		report_pass("suspend and resume");
1353 	} else {
1354 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1355 			report_fail("SATP set to zero on resume");
1356 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1357 			report_fail("sstatus.SIE clear on resume");
1358 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1359 			report_fail("a0 is hartid on resume");
1360 	}
1361 }
1362 
1363 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1364 {
1365 	bool r;
1366 
1367 	r = susp_basic_prep(ctx, params);
1368 	assert(r);
1369 	params->sleep_type = 1;
1370 	params->returns = true;
1371 	params->ret.error = SBI_ERR_INVALID_PARAM;
1372 
1373 	return true;
1374 }
1375 
1376 #if __riscv_xlen != 32
1377 static bool susp_type_prep2(unsigned long ctx[], struct susp_params *params)
1378 {
1379 	bool r;
1380 
1381 	r = susp_basic_prep(ctx, params);
1382 	assert(r);
1383 	params->sleep_type = BIT(32);
1384 
1385 	return true;
1386 }
1387 #endif
1388 
1389 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1390 {
1391 	phys_addr_t badaddr;
1392 	bool r;
1393 
1394 	if (!get_invalid_addr(&badaddr, false))
1395 		return false;
1396 
1397 	r = susp_basic_prep(ctx, params);
1398 	assert(r);
1399 	params->resume_addr = badaddr;
1400 	params->returns = true;
1401 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1402 
1403 	return true;
1404 }
1405 
1406 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1407 {
1408 	int started = 0, cpu, me = smp_processor_id();
1409 	struct sbiret ret;
1410 	bool r;
1411 
1412 	if (cpumask_weight(&cpu_present_mask) < 2) {
1413 		report_skip("At least 2 cpus required");
1414 		return false;
1415 	}
1416 
1417 	r = susp_basic_prep(ctx, params);
1418 	assert(r);
1419 	params->returns = true;
1420 	params->ret.error = SBI_ERR_DENIED;
1421 
1422 	for_each_present_cpu(cpu) {
1423 		if (cpu == me)
1424 			continue;
1425 		break;
1426 	}
1427 
1428 	on_cpu(cpu, start_cpu, NULL);
1429 
1430 	for_each_present_cpu(cpu) {
1431 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1432 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1433 		if (ret.value == SBI_EXT_HSM_STARTED)
1434 			started++;
1435 	}
1436 
1437 	assert(started == 2);
1438 
1439 	return true;
1440 }
1441 
1442 static void check_susp(void)
1443 {
1444 	unsigned long csrs[SBI_CSR_NR_IDX];
1445 	unsigned long ctx[SBI_SUSP_NR_IDX] = {
1446 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1447 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1448 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1449 	};
1450 	enum {
1451 #define SUSP_FIRST_TESTNUM 1
1452 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1453 		SUSP_TYPE,
1454 		SUSP_TYPE2,
1455 		SUSP_BAD_ADDR,
1456 		SUSP_ONE_ONLINE,
1457 		NR_SUSP_TESTS,
1458 	};
1459 	struct susp_test {
1460 		const char *name;
1461 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1462 		void (*check)(unsigned long ctx[], struct susp_params *params);
1463 	} susp_tests[] = {
1464 		[SUSP_BASIC]		= { "basic",			susp_basic_prep,	susp_basic_check,	},
1465 		[SUSP_TYPE]		= { "sleep_type",		susp_type_prep,					},
1466 #if __riscv_xlen != 32
1467 		[SUSP_TYPE2]		= { "sleep_type upper bits",	susp_type_prep2,	susp_basic_check	},
1468 #endif
1469 		[SUSP_BAD_ADDR]		= { "bad addr",			susp_badaddr_prep,				},
1470 		[SUSP_ONE_ONLINE]	= { "one cpu online",		susp_one_prep,					},
1471 	};
1472 	struct susp_params params;
1473 	struct sbiret ret;
1474 	int testnum, i;
1475 
1476 	report_prefix_push("susp");
1477 
1478 	if (!sbi_probe(SBI_EXT_SUSP)) {
1479 		report_skip("SUSP extension not available");
1480 		report_prefix_pop();
1481 		return;
1482 	}
1483 
1484 	sbi_bad_fid(SBI_EXT_SUSP);
1485 
1486 	timer_setup(susp_timer);
1487 	local_irq_enable();
1488 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1489 
1490 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1491 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1492 
1493 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1494 		if (!susp_tests[i].name)
1495 			continue;
1496 
1497 		report_prefix_push(susp_tests[i].name);
1498 
1499 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1500 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1501 
1502 		local_irq_disable();
1503 
1504 		assert(susp_tests[i].prep);
1505 		if (!susp_tests[i].prep(ctx, &params)) {
1506 			report_prefix_pop();
1507 			continue;
1508 		}
1509 
1510 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1511 			ret = sbi_system_suspend_raw(params.sleep_type, params.resume_addr, params.opaque);
1512 
1513 			local_irq_enable();
1514 
1515 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1516 				report_fail("probing claims support, but it's not?");
1517 				report_prefix_pop();
1518 				goto out;
1519 			} else if (!params.returns) {
1520 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1521 			} else {
1522 				if (!report(ret.error == params.ret.error, "got expected sbi.error (%ld)", params.ret.error))
1523 					report_info("expected sbi.error %ld, received %ld", params.ret.error, ret.error);
1524 			}
1525 
1526 			report_prefix_pop();
1527 			continue;
1528 		}
1529 		assert(testnum == i);
1530 
1531 		local_irq_enable();
1532 
1533 		if (susp_tests[i].check)
1534 			susp_tests[i].check(ctx, &params);
1535 
1536 		report_prefix_pop();
1537 	}
1538 
1539 out:
1540 	local_irq_disable();
1541 	timer_teardown();
1542 
1543 	report_prefix_pop();
1544 }
1545 
1546 int main(int argc, char **argv)
1547 {
1548 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1549 		help();
1550 		exit(0);
1551 	}
1552 
1553 	report_prefix_push("sbi");
1554 	check_base();
1555 	check_time();
1556 	check_ipi();
1557 	check_hsm();
1558 	check_dbcn();
1559 	check_susp();
1560 	check_sse();
1561 	check_fwft();
1562 
1563 	return report_summary();
1564 }
1565