xref: /kvm-unit-tests/riscv/sbi.c (revision f1302432275128322c9f5c99f4fdc3a5fc288820)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 void check_fwft(void);
36 
37 static long __labs(long a)
38 {
39 	return __builtin_labs(a);
40 }
41 
42 static void help(void)
43 {
44 	puts("Test SBI\n");
45 	puts("An environ must be provided where expected values are given.\n");
46 }
47 
48 static struct sbiret sbi_base(int fid, unsigned long arg0)
49 {
50 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
51 }
52 
53 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
54 				    unsigned long base_addr_hi)
55 {
56 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
57 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
58 }
59 
60 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
61 {
62 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
63 }
64 
65 static struct sbiret sbi_hart_suspend(uint32_t suspend_type, unsigned long resume_addr, unsigned long opaque)
66 {
67 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
68 }
69 
70 static struct sbiret sbi_system_suspend(uint32_t sleep_type, unsigned long resume_addr, unsigned long opaque)
71 {
72 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
73 }
74 
75 static void start_cpu(void *data)
76 {
77 	/* nothing to do */
78 }
79 
80 static void stop_cpu(void *data)
81 {
82 	struct sbiret ret = sbi_hart_stop();
83 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
84 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
85 }
86 
87 static int rand_online_cpu(prng_state *ps)
88 {
89 	int cpu, me = smp_processor_id();
90 
91 	for (;;) {
92 		cpu = prng32(ps) % nr_cpus;
93 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
94 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
95 			break;
96 	}
97 
98 	return cpu;
99 }
100 
101 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
102 {
103 	*lo = (unsigned long)paddr;
104 	*hi = 0;
105 	if (__riscv_xlen == 32)
106 		*hi = (unsigned long)(paddr >> 32);
107 }
108 
109 static bool check_addr(phys_addr_t start, phys_addr_t size)
110 {
111 	struct mem_region *r = memregions_find(start);
112 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
113 }
114 
115 static phys_addr_t get_highest_addr(void)
116 {
117 	phys_addr_t highest_end = 0;
118 	struct mem_region *r;
119 
120 	for (r = mem_regions; r->end; ++r) {
121 		if (r->end > highest_end)
122 			highest_end = r->end;
123 	}
124 
125 	return highest_end - 1;
126 }
127 
128 static bool env_enabled(const char *env)
129 {
130 	char *s = getenv(env);
131 
132 	return s && (*s == '1' || *s == 'y' || *s == 'Y');
133 }
134 
135 static bool env_or_skip(const char *env)
136 {
137 	if (!getenv(env)) {
138 		report_skip("missing %s environment variable", env);
139 		return false;
140 	}
141 
142 	return true;
143 }
144 
145 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
146 {
147 	if (env_enabled("INVALID_ADDR_AUTO")) {
148 		*paddr = get_highest_addr() + 1;
149 		return true;
150 	} else if (allow_default && !getenv("INVALID_ADDR")) {
151 		*paddr = -1ul;
152 		return true;
153 	} else if (env_or_skip("INVALID_ADDR")) {
154 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
155 		return true;
156 	}
157 
158 	return false;
159 }
160 
161 static void check_base(void)
162 {
163 	struct sbiret ret;
164 	long expected;
165 
166 	report_prefix_push("base");
167 
168 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
169 
170 	report_prefix_push("spec_version");
171 	if (env_or_skip("SBI_SPEC_VERSION")) {
172 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
173 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
174 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
175 		sbiret_check(&ret, 0, expected);
176 	}
177 	report_prefix_pop();
178 
179 	ret.value &= 0x7ffffffful;
180 
181 	if (ret.error || ret.value < 2) {
182 		report_skip("SBI spec version 0.2 or higher required");
183 		return;
184 	}
185 
186 	report_prefix_push("impl_id");
187 	if (env_or_skip("SBI_IMPL_ID")) {
188 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
189 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
190 		sbiret_check(&ret, 0, expected);
191 	}
192 	report_prefix_pop();
193 
194 	report_prefix_push("impl_version");
195 	if (env_or_skip("SBI_IMPL_VERSION")) {
196 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
197 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
198 		sbiret_check(&ret, 0, expected);
199 	}
200 	report_prefix_pop();
201 
202 	report_prefix_push("probe_ext");
203 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
204 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
205 	sbiret_check(&ret, 0, expected);
206 	report_prefix_push("unavailable");
207 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
208 	sbiret_check(&ret, 0, 0);
209 	report_prefix_popn(2);
210 
211 	report_prefix_push("mvendorid");
212 	if (env_or_skip("MVENDORID")) {
213 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
214 		assert(__riscv_xlen == 32 || !(expected >> 32));
215 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
216 		sbiret_check(&ret, 0, expected);
217 	}
218 	report_prefix_pop();
219 
220 	report_prefix_push("marchid");
221 	if (env_or_skip("MARCHID")) {
222 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
223 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
224 		sbiret_check(&ret, 0, expected);
225 	}
226 	report_prefix_pop();
227 
228 	report_prefix_push("mimpid");
229 	if (env_or_skip("MIMPID")) {
230 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
231 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
232 		sbiret_check(&ret, 0, expected);
233 	}
234 	report_prefix_popn(2);
235 }
236 
237 struct timer_info {
238 	bool timer_works;
239 	bool mask_timer_irq;
240 	bool timer_irq_set;
241 	bool timer_irq_cleared;
242 	unsigned long timer_irq_count;
243 };
244 
245 static struct timer_info timer_info;
246 
247 static bool timer_irq_pending(void)
248 {
249 	return csr_read(CSR_SIP) & IP_TIP;
250 }
251 
252 static void timer_irq_handler(struct pt_regs *regs)
253 {
254 	timer_info.timer_works = true;
255 
256 	if (timer_info.timer_irq_count < ULONG_MAX)
257 		++timer_info.timer_irq_count;
258 
259 	if (timer_irq_pending())
260 		timer_info.timer_irq_set = true;
261 
262 	if (timer_info.mask_timer_irq)
263 		timer_irq_disable();
264 	else
265 		sbi_set_timer(ULONG_MAX);
266 
267 	if (!timer_irq_pending())
268 		timer_info.timer_irq_cleared = true;
269 }
270 
271 static void timer_check_set_timer(bool mask_timer_irq)
272 {
273 	struct sbiret ret;
274 	unsigned long begin, end, duration;
275 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
276 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
277 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
278 
279 	d = usec_to_cycles(d);
280 	margin = usec_to_cycles(margin);
281 
282 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
283 	begin = timer_get_cycles();
284 	ret = sbi_set_timer(begin + d);
285 
286 	report(!ret.error, "set timer%s", mask_test_str);
287 	if (ret.error)
288 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
289 
290 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
291 		cpu_relax();
292 
293 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
294 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
295 
296 	if (!mask_timer_irq) {
297 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
298 		       "pending timer interrupt bit cleared by setting timer to -1");
299 	}
300 
301 	if (timer_info.timer_works) {
302 		duration = end - begin;
303 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
304 	}
305 
306 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
307 }
308 
309 static void check_time(void)
310 {
311 	bool pending;
312 
313 	report_prefix_push("time");
314 
315 	if (!sbi_probe(SBI_EXT_TIME)) {
316 		report_skip("time extension not available");
317 		report_prefix_pop();
318 		return;
319 	}
320 
321 	report_prefix_push("set_timer");
322 
323 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
324 	local_irq_enable();
325 	timer_irq_enable();
326 
327 	timer_check_set_timer(false);
328 
329 	if (csr_read(CSR_SIE) & IE_TIE)
330 		timer_check_set_timer(true);
331 	else
332 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
333 
334 	timer_irq_disable();
335 	sbi_set_timer(0);
336 	pending = timer_irq_pending();
337 	report(pending, "timer immediately pending by setting timer to 0");
338 	sbi_set_timer(ULONG_MAX);
339 	if (pending)
340 		report(!timer_irq_pending(), "pending timer cleared while masked");
341 	else
342 		report_skip("timer is not pending, skipping timer cleared while masked test");
343 
344 	local_irq_disable();
345 	install_irq_handler(IRQ_S_TIMER, NULL);
346 
347 	report_prefix_popn(2);
348 }
349 
350 static bool ipi_received[NR_CPUS];
351 static bool ipi_timeout[NR_CPUS];
352 static cpumask_t ipi_done;
353 
354 static void ipi_timeout_handler(struct pt_regs *regs)
355 {
356 	timer_stop();
357 	ipi_timeout[smp_processor_id()] = true;
358 }
359 
360 static void ipi_irq_handler(struct pt_regs *regs)
361 {
362 	ipi_ack();
363 	ipi_received[smp_processor_id()] = true;
364 }
365 
366 static void ipi_hart_wait(void *data)
367 {
368 	unsigned long timeout = (unsigned long)data;
369 	int me = smp_processor_id();
370 
371 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
372 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
373 	local_ipi_enable();
374 	timer_irq_enable();
375 	local_irq_enable();
376 
377 	timer_start(timeout);
378 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
379 		cpu_relax();
380 	local_irq_disable();
381 	timer_stop();
382 	local_ipi_disable();
383 	timer_irq_disable();
384 
385 	cpumask_set_cpu(me, &ipi_done);
386 }
387 
388 static void ipi_hart_check(cpumask_t *mask)
389 {
390 	int cpu;
391 
392 	for_each_cpu(cpu, mask) {
393 		if (ipi_timeout[cpu]) {
394 			const char *rec = ipi_received[cpu] ? "but was still received"
395 							    : "and has still not been received";
396 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
397 		}
398 
399 		ipi_timeout[cpu] = false;
400 		ipi_received[cpu] = false;
401 	}
402 }
403 
404 static void check_ipi(void)
405 {
406 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
407 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
408 	int me = smp_processor_id();
409 	unsigned long max_hartid = 0;
410 	unsigned long hartid1, hartid2;
411 	cpumask_t ipi_receivers;
412 	static prng_state ps;
413 	struct sbiret ret;
414 	int cpu, cpu2;
415 
416 	ps = prng_init(0xDEADBEEF);
417 
418 	report_prefix_push("ipi");
419 
420 	if (!sbi_probe(SBI_EXT_IPI)) {
421 		report_skip("ipi extension not available");
422 		report_prefix_pop();
423 		return;
424 	}
425 
426 	if (nr_cpus_present < 2) {
427 		report_skip("At least 2 cpus required");
428 		report_prefix_pop();
429 		return;
430 	}
431 
432 	report_prefix_push("random hart");
433 	cpumask_clear(&ipi_done);
434 	cpumask_clear(&ipi_receivers);
435 	cpu = rand_online_cpu(&ps);
436 	cpumask_set_cpu(cpu, &ipi_receivers);
437 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
438 	ret = sbi_send_ipi_cpu(cpu);
439 	report(ret.error == SBI_SUCCESS, "ipi returned success");
440 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
441 		cpu_relax();
442 	ipi_hart_check(&ipi_receivers);
443 	report_prefix_pop();
444 
445 	report_prefix_push("two in hart_mask");
446 
447 	if (nr_cpus_present < 3) {
448 		report_skip("3 cpus required");
449 		goto end_two;
450 	}
451 
452 	cpu = rand_online_cpu(&ps);
453 	hartid1 = cpus[cpu].hartid;
454 	hartid2 = 0;
455 	for_each_present_cpu(cpu2) {
456 		if (cpu2 == cpu || cpu2 == me)
457 			continue;
458 		hartid2 = cpus[cpu2].hartid;
459 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
460 			break;
461 	}
462 	if (cpu2 == nr_cpus) {
463 		report_skip("hartids are too sparse");
464 		goto end_two;
465 	}
466 
467 	cpumask_clear(&ipi_done);
468 	cpumask_clear(&ipi_receivers);
469 	cpumask_set_cpu(cpu, &ipi_receivers);
470 	cpumask_set_cpu(cpu2, &ipi_receivers);
471 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
472 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
473 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
474 	report(ret.error == SBI_SUCCESS, "ipi returned success");
475 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
476 		cpu_relax();
477 	ipi_hart_check(&ipi_receivers);
478 end_two:
479 	report_prefix_pop();
480 
481 	report_prefix_push("broadcast");
482 	cpumask_clear(&ipi_done);
483 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
484 	cpumask_clear_cpu(me, &ipi_receivers);
485 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
486 	ret = sbi_send_ipi_broadcast();
487 	report(ret.error == SBI_SUCCESS, "ipi returned success");
488 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
489 		cpu_relax();
490 	ipi_hart_check(&ipi_receivers);
491 	report_prefix_pop();
492 
493 	report_prefix_push("invalid parameters");
494 
495 	for_each_present_cpu(cpu) {
496 		if (cpus[cpu].hartid > max_hartid)
497 			max_hartid = cpus[cpu].hartid;
498 	}
499 
500 	/* Try the next higher hartid than the max */
501 	ret = sbi_send_ipi(2, max_hartid);
502 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask got expected error (%ld)", ret.error);
503 	ret = sbi_send_ipi(1, max_hartid + 1);
504 	report_kfail(true, ret.error == SBI_ERR_INVALID_PARAM, "hart_mask_base got expected error (%ld)", ret.error);
505 
506 	report_prefix_pop();
507 
508 	report_prefix_pop();
509 }
510 
511 unsigned char sbi_hsm_stop_hart[NR_CPUS];
512 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
513 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
514 
515 static const char * const hart_state_str[] = {
516 	[SBI_EXT_HSM_STARTED] = "started",
517 	[SBI_EXT_HSM_STOPPED] = "stopped",
518 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
519 };
520 struct hart_state_transition_info {
521 	enum sbi_ext_hsm_sid initial_state;
522 	enum sbi_ext_hsm_sid intermediate_state;
523 	enum sbi_ext_hsm_sid final_state;
524 };
525 static cpumask_t sbi_hsm_started_hart_checks;
526 static bool sbi_hsm_invalid_hartid_check;
527 static bool sbi_hsm_timer_fired;
528 extern void sbi_hsm_check_hart_start(void);
529 extern void sbi_hsm_check_non_retentive_suspend(void);
530 
531 static void hsm_timer_irq_handler(struct pt_regs *regs)
532 {
533 	timer_stop();
534 	sbi_hsm_timer_fired = true;
535 }
536 
537 static void hsm_timer_setup(void)
538 {
539 	install_irq_handler(IRQ_S_TIMER, hsm_timer_irq_handler);
540 	timer_irq_enable();
541 }
542 
543 static void hsm_timer_teardown(void)
544 {
545 	timer_irq_disable();
546 	install_irq_handler(IRQ_S_TIMER, NULL);
547 }
548 
549 static void hart_check_already_started(void *data)
550 {
551 	struct sbiret ret;
552 	unsigned long hartid = current_thread_info()->hartid;
553 	int me = smp_processor_id();
554 
555 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
556 
557 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
558 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
559 }
560 
561 static void hart_start_invalid_hartid(void *data)
562 {
563 	struct sbiret ret;
564 
565 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
566 
567 	if (ret.error == SBI_ERR_INVALID_PARAM)
568 		sbi_hsm_invalid_hartid_check = true;
569 }
570 
571 static void hart_retentive_suspend(void *data)
572 {
573 	unsigned long hartid = current_thread_info()->hartid;
574 	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0);
575 
576 	if (ret.error)
577 		report_fail("failed to retentive suspend cpu%d (hartid = %lx) (error=%ld)",
578 			    smp_processor_id(), hartid, ret.error);
579 }
580 
581 static void hart_non_retentive_suspend(void *data)
582 {
583 	unsigned long hartid = current_thread_info()->hartid;
584 	unsigned long params[] = {
585 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
586 		[SBI_HSM_HARTID_IDX] = hartid,
587 	};
588 	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
589 					     virt_to_phys(&sbi_hsm_check_non_retentive_suspend),
590 					     virt_to_phys(params));
591 
592 	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) (error=%ld)",
593 		    smp_processor_id(), hartid, ret.error);
594 }
595 
596 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
597 static void hart_retentive_suspend_with_msb_set(void *data)
598 {
599 	unsigned long hartid = current_thread_info()->hartid;
600 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
601 	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, 0, 0, 0, 0, 0);
602 
603 	if (ret.error)
604 		report_fail("failed to retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
605 			    smp_processor_id(), hartid, ret.error);
606 }
607 
608 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
609 static void hart_non_retentive_suspend_with_msb_set(void *data)
610 {
611 	unsigned long hartid = current_thread_info()->hartid;
612 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
613 	unsigned long params[] = {
614 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
615 		[SBI_HSM_HARTID_IDX] = hartid,
616 	};
617 
618 	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type,
619 				      virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
620 				      0, 0, 0);
621 
622 	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
623 		    smp_processor_id(), hartid, ret.error);
624 }
625 
626 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
627 {
628 	struct sbiret ret;
629 
630 	sbi_hsm_timer_fired = false;
631 	timer_start(duration);
632 
633 	ret = sbi_hart_get_status(hartid);
634 
635 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
636 		cpu_relax();
637 		ret = sbi_hart_get_status(hartid);
638 	}
639 
640 	timer_stop();
641 
642 	if (sbi_hsm_timer_fired)
643 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
644 	else if (ret.error)
645 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
646 
647 	return !sbi_hsm_timer_fired && !ret.error;
648 }
649 
650 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
651 				      struct hart_state_transition_info *states)
652 {
653 	struct sbiret ret;
654 	unsigned long hartid;
655 	int cpu, count = 0;
656 
657 	for_each_cpu(cpu, mask) {
658 		hartid = cpus[cpu].hartid;
659 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
660 			continue;
661 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
662 			continue;
663 
664 		ret = sbi_hart_get_status(hartid);
665 		if (ret.error)
666 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
667 		else if (ret.value != states->final_state)
668 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
669 				    hart_state_str[states->final_state], ret.value);
670 		else
671 			count++;
672 	}
673 
674 	return count;
675 }
676 
677 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
678 {
679 	sbi_hsm_timer_fired = false;
680 	timer_start(duration);
681 
682 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
683 		cpu_relax();
684 
685 	timer_stop();
686 
687 	if (sbi_hsm_timer_fired)
688 		report_info("hsm timer fired before all cpus became idle");
689 }
690 
691 static void check_hsm(void)
692 {
693 	struct sbiret ret;
694 	unsigned long hartid;
695 	cpumask_t secondary_cpus_mask, mask;
696 	struct hart_state_transition_info transition_states;
697 	bool ipi_unavailable = false;
698 	int cpu, me = smp_processor_id();
699 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
700 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
701 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
702 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
703 	int count, check;
704 
705 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
706 
707 	report_prefix_push("hsm");
708 
709 	if (!sbi_probe(SBI_EXT_HSM)) {
710 		report_skip("hsm extension not available");
711 		report_prefix_pop();
712 		return;
713 	}
714 
715 	report_prefix_push("hart_get_status");
716 
717 	hartid = current_thread_info()->hartid;
718 	ret = sbi_hart_get_status(hartid);
719 
720 	if (ret.error) {
721 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
722 		report_prefix_popn(2);
723 		return;
724 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
725 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
726 		report_prefix_popn(2);
727 		return;
728 	}
729 
730 	report_pass("status of current hart is started");
731 
732 	report_prefix_pop();
733 
734 	if (max_cpus < 2) {
735 		report_skip("no other cpus to run the remaining hsm tests on");
736 		report_prefix_pop();
737 		return;
738 	}
739 
740 	report_prefix_push("hart_stop");
741 
742 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
743 	cpumask_clear_cpu(me, &secondary_cpus_mask);
744 	hsm_timer_setup();
745 	local_irq_enable();
746 
747 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
748 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
749 
750 	transition_states = (struct hart_state_transition_info) {
751 		.initial_state = SBI_EXT_HSM_STARTED,
752 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
753 		.final_state = SBI_EXT_HSM_STOPPED,
754 	};
755 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
756 
757 	report(count == max_cpus - 1, "all secondary harts stopped");
758 
759 	report_prefix_pop();
760 
761 	report_prefix_push("hart_start");
762 
763 	for_each_cpu(cpu, &secondary_cpus_mask) {
764 		hartid = cpus[cpu].hartid;
765 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
766 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
767 
768 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
769 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
770 		if (ret.error) {
771 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
772 			continue;
773 		}
774 	}
775 
776 	transition_states = (struct hart_state_transition_info) {
777 		.initial_state = SBI_EXT_HSM_STOPPED,
778 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
779 		.final_state = SBI_EXT_HSM_STARTED,
780 	};
781 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
782 	check = 0;
783 
784 	for_each_cpu(cpu, &secondary_cpus_mask) {
785 		sbi_hsm_timer_fired = false;
786 		timer_start(hsm_timer_duration);
787 
788 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
789 			cpu_relax();
790 
791 		timer_stop();
792 
793 		if (sbi_hsm_timer_fired) {
794 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
795 			continue;
796 		}
797 
798 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
799 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
800 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
801 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
802 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
803 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
804 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
805 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
806 		else
807 			check++;
808 	}
809 
810 	report(count == max_cpus - 1, "all secondary harts started");
811 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
812 
813 	report_prefix_pop();
814 
815 	report_prefix_push("hart_stop");
816 
817 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
818 
819 	transition_states = (struct hart_state_transition_info) {
820 		.initial_state = SBI_EXT_HSM_STARTED,
821 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
822 		.final_state = SBI_EXT_HSM_STOPPED,
823 	};
824 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
825 
826 	report(count == max_cpus - 1, "all secondary harts stopped");
827 
828 	/* Reset the stop flags so that we can reuse them after suspension tests */
829 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
830 
831 	report_prefix_pop();
832 
833 	report_prefix_push("hart_start");
834 
835 	/* Select just one secondary cpu to run the invalid hartid test */
836 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
837 
838 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
839 
840 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
841 
842 	transition_states = (struct hart_state_transition_info) {
843 		.initial_state = SBI_EXT_HSM_STOPPED,
844 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
845 		.final_state = SBI_EXT_HSM_STARTED,
846 	};
847 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
848 
849 	report(count == max_cpus - 1, "all secondary harts started");
850 
851 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
852 
853 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
854 	       "all secondary harts are already started");
855 
856 	report_prefix_pop();
857 
858 	report_prefix_push("hart_suspend");
859 
860 	if (!sbi_probe(SBI_EXT_IPI)) {
861 		report_skip("skipping suspension tests since ipi extension is unavailable");
862 		report_prefix_pop();
863 		ipi_unavailable = true;
864 		goto sbi_hsm_hart_stop_tests;
865 	}
866 
867 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
868 
869 	transition_states = (struct hart_state_transition_info) {
870 		.initial_state = SBI_EXT_HSM_STARTED,
871 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
872 		.final_state = SBI_EXT_HSM_SUSPENDED,
873 	};
874 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
875 
876 	report(count == max_cpus - 1, "all secondary harts retentive suspended");
877 
878 	/* Ignore the return value since we check the status of each hart anyway */
879 	sbi_send_ipi_cpumask(&secondary_cpus_mask);
880 
881 	transition_states = (struct hart_state_transition_info) {
882 		.initial_state = SBI_EXT_HSM_SUSPENDED,
883 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
884 		.final_state = SBI_EXT_HSM_STARTED,
885 	};
886 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
887 
888 	report(count == max_cpus - 1, "all secondary harts retentive resumed");
889 
890 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
891 
892 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
893 
894 	transition_states = (struct hart_state_transition_info) {
895 		.initial_state = SBI_EXT_HSM_STARTED,
896 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
897 		.final_state = SBI_EXT_HSM_SUSPENDED,
898 	};
899 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
900 
901 	report(count == max_cpus - 1, "all secondary harts non-retentive suspended");
902 
903 	/* Ignore the return value since we check the status of each hart anyway */
904 	sbi_send_ipi_cpumask(&secondary_cpus_mask);
905 
906 	transition_states = (struct hart_state_transition_info) {
907 		.initial_state = SBI_EXT_HSM_SUSPENDED,
908 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
909 		.final_state = SBI_EXT_HSM_STARTED,
910 	};
911 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
912 	check = 0;
913 
914 	for_each_cpu(cpu, &secondary_cpus_mask) {
915 		sbi_hsm_timer_fired = false;
916 		timer_start(hsm_timer_duration);
917 
918 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
919 			cpu_relax();
920 
921 		timer_stop();
922 
923 		if (sbi_hsm_timer_fired) {
924 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
925 			continue;
926 		}
927 
928 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
929 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
930 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
931 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
932 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
933 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
934 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
935 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
936 		else
937 			check++;
938 	}
939 
940 	report(count == max_cpus - 1, "all secondary harts non-retentive resumed");
941 	report(check == max_cpus - 1, "all secondary harts have expected register values after non-retentive resume");
942 
943 	report_prefix_pop();
944 
945 sbi_hsm_hart_stop_tests:
946 	report_prefix_push("hart_stop");
947 
948 	if (ipi_unavailable)
949 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
950 	else
951 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
952 
953 	transition_states = (struct hart_state_transition_info) {
954 		.initial_state = SBI_EXT_HSM_STARTED,
955 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
956 		.final_state = SBI_EXT_HSM_STOPPED,
957 	};
958 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
959 
960 	report(count == max_cpus - 1, "all secondary harts stopped");
961 
962 	report_prefix_pop();
963 
964 	if (__riscv_xlen == 32 || ipi_unavailable) {
965 		local_irq_disable();
966 		hsm_timer_teardown();
967 		report_prefix_pop();
968 		return;
969 	}
970 
971 	report_prefix_push("hart_suspend");
972 
973 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
974 	cpu = cpumask_next(-1, &secondary_cpus_mask);
975 	hartid = cpus[cpu].hartid;
976 	cpumask_clear(&mask);
977 	cpumask_set_cpu(cpu, &mask);
978 
979 	/* Boot up the secondary cpu and let it proceed to the idle loop */
980 	on_cpu(cpu, start_cpu, NULL);
981 
982 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
983 
984 	transition_states = (struct hart_state_transition_info) {
985 		.initial_state = SBI_EXT_HSM_STARTED,
986 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
987 		.final_state = SBI_EXT_HSM_SUSPENDED,
988 	};
989 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
990 
991 	report(count, "secondary hart retentive suspended with MSB set");
992 
993 	/* Ignore the return value since we manually validate the status of the hart anyway */
994 	sbi_send_ipi_cpu(cpu);
995 
996 	transition_states = (struct hart_state_transition_info) {
997 		.initial_state = SBI_EXT_HSM_SUSPENDED,
998 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
999 		.final_state = SBI_EXT_HSM_STARTED,
1000 	};
1001 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1002 
1003 	report(count, "secondary hart retentive resumed with MSB set");
1004 
1005 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1006 	sbi_hsm_stop_hart[cpu] = 0;
1007 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1008 
1009 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1010 
1011 	transition_states = (struct hart_state_transition_info) {
1012 		.initial_state = SBI_EXT_HSM_STARTED,
1013 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1014 		.final_state = SBI_EXT_HSM_SUSPENDED,
1015 	};
1016 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1017 
1018 	report(count, "secondary hart non-retentive suspended with MSB set");
1019 
1020 	/* Ignore the return value since we manually validate the status of the hart anyway */
1021 	sbi_send_ipi_cpu(cpu);
1022 
1023 	transition_states = (struct hart_state_transition_info) {
1024 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1025 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1026 		.final_state = SBI_EXT_HSM_STARTED,
1027 	};
1028 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1029 	check = 0;
1030 
1031 	if (count) {
1032 		sbi_hsm_timer_fired = false;
1033 		timer_start(hsm_timer_duration);
1034 
1035 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1036 			cpu_relax();
1037 
1038 		timer_stop();
1039 
1040 		if (sbi_hsm_timer_fired) {
1041 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1042 		} else {
1043 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1044 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1045 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1046 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1047 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1048 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1049 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1050 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1051 			else
1052 				check = 1;
1053 		}
1054 	}
1055 
1056 	report(count, "secondary hart non-retentive resumed with MSB set");
1057 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1058 
1059 	report_prefix_pop();
1060 
1061 	report_prefix_push("hart_stop");
1062 
1063 	sbi_hsm_stop_hart[cpu] = 1;
1064 
1065 	transition_states = (struct hart_state_transition_info) {
1066 		.initial_state = SBI_EXT_HSM_STARTED,
1067 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1068 		.final_state = SBI_EXT_HSM_STOPPED,
1069 	};
1070 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1071 
1072 	report(count, "secondary hart stopped after suspension tests with MSB set");
1073 
1074 	local_irq_disable();
1075 	hsm_timer_teardown();
1076 	report_prefix_popn(2);
1077 }
1078 
1079 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1080 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1081 
1082 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1083 {
1084 	unsigned long base_addr_lo, base_addr_hi;
1085 	phys_addr_t paddr = virt_to_phys((void *)s);
1086 	int num_calls = 0;
1087 	struct sbiret ret;
1088 
1089 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1090 
1091 	do {
1092 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1093 		num_bytes -= ret.value;
1094 		paddr += ret.value;
1095 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1096 		num_calls++;
1097 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1098 
1099 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1100 	report_info("%d sbi calls made", num_calls);
1101 }
1102 
1103 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1104 				 phys_addr_t page_addr, size_t page_offset,
1105 				 bool highmem_supported)
1106 {
1107 	int nr_pages = page_offset ? 2 : 1;
1108 	void *vaddr;
1109 
1110 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1111 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1112 		report_skip("Memory above 4G required");
1113 		return;
1114 	}
1115 
1116 	vaddr = alloc_vpages(nr_pages);
1117 
1118 	for (int i = 0; i < nr_pages; ++i)
1119 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1120 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1121 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1122 }
1123 
1124 /*
1125  * Only the write functionality is tested here. There's no easy way to
1126  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1127  */
1128 static void check_dbcn(void)
1129 {
1130 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1131 	unsigned long base_addr_lo, base_addr_hi;
1132 	bool highmem_supported = true;
1133 	phys_addr_t paddr;
1134 	struct sbiret ret;
1135 	char *buf;
1136 
1137 	report_prefix_push("dbcn");
1138 
1139 	if (!sbi_probe(SBI_EXT_DBCN)) {
1140 		report_skip("DBCN extension unavailable");
1141 		report_prefix_pop();
1142 		return;
1143 	}
1144 
1145 	report_prefix_push("write");
1146 
1147 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1148 
1149 	assert(num_bytes < PAGE_SIZE);
1150 
1151 	report_prefix_push("page boundary");
1152 	buf = alloc_pages(1);
1153 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1154 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1155 	report_prefix_pop();
1156 
1157 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1158 		highmem_supported = false;
1159 
1160 	report_prefix_push("high boundary");
1161 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1162 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1163 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1164 				     highmem_supported);
1165 	else
1166 		report_skip("user disabled");
1167 	report_prefix_pop();
1168 
1169 	report_prefix_push("high page");
1170 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1171 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1172 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1173 	} else {
1174 		report_skip("user disabled");
1175 	}
1176 	report_prefix_pop();
1177 
1178 	/* Bytes are read from memory and written to the console */
1179 	report_prefix_push("invalid parameter");
1180 	if (get_invalid_addr(&paddr, false)) {
1181 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1182 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1183 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1184 	}
1185 	report_prefix_popn(2);
1186 	report_prefix_push("write_byte");
1187 
1188 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1189 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1190 	puts("\n");
1191 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1192 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1193 
1194 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1195 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1196 	puts("\n");
1197 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1198 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1199 
1200 	report_prefix_popn(2);
1201 }
1202 
1203 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1204 jmp_buf sbi_susp_jmp;
1205 
1206 struct susp_params {
1207 	unsigned long sleep_type;
1208 	unsigned long resume_addr;
1209 	unsigned long opaque;
1210 	bool returns;
1211 	struct sbiret ret;
1212 };
1213 
1214 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1215 {
1216 	int cpu, me = smp_processor_id();
1217 	struct sbiret ret;
1218 	cpumask_t mask;
1219 
1220 	memset(params, 0, sizeof(*params));
1221 	params->sleep_type = 0; /* suspend-to-ram */
1222 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1223 	params->opaque = virt_to_phys(ctx);
1224 	params->returns = false;
1225 
1226 	cpumask_copy(&mask, &cpu_present_mask);
1227 	cpumask_clear_cpu(me, &mask);
1228 	on_cpumask_async(&mask, stop_cpu, NULL);
1229 
1230 	/* Wait up to 1s for all harts to stop */
1231 	for (int i = 0; i < 100; i++) {
1232 		int count = 1;
1233 
1234 		udelay(10000);
1235 
1236 		for_each_present_cpu(cpu) {
1237 			if (cpu == me)
1238 				continue;
1239 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1240 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1241 				++count;
1242 		}
1243 		if (count == cpumask_weight(&cpu_present_mask))
1244 			break;
1245 	}
1246 
1247 	for_each_present_cpu(cpu) {
1248 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1249 		if (cpu == me) {
1250 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1251 				   "cpu%d is not started", cpu);
1252 		} else {
1253 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1254 				   "cpu%d is not stopped", cpu);
1255 		}
1256 	}
1257 
1258 	return true;
1259 }
1260 
1261 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1262 {
1263 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1264 		report_pass("suspend and resume");
1265 	} else {
1266 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1267 			report_fail("SATP set to zero on resume");
1268 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1269 			report_fail("sstatus.SIE clear on resume");
1270 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1271 			report_fail("a0 is hartid on resume");
1272 	}
1273 }
1274 
1275 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1276 {
1277 	bool r;
1278 
1279 	r = susp_basic_prep(ctx, params);
1280 	assert(r);
1281 	params->sleep_type = 1;
1282 	params->returns = true;
1283 	params->ret.error = SBI_ERR_INVALID_PARAM;
1284 
1285 	return true;
1286 }
1287 
1288 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1289 {
1290 	phys_addr_t badaddr;
1291 	bool r;
1292 
1293 	if (!get_invalid_addr(&badaddr, false))
1294 		return false;
1295 
1296 	r = susp_basic_prep(ctx, params);
1297 	assert(r);
1298 	params->resume_addr = badaddr;
1299 	params->returns = true;
1300 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1301 
1302 	return true;
1303 }
1304 
1305 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1306 {
1307 	int started = 0, cpu, me = smp_processor_id();
1308 	struct sbiret ret;
1309 	bool r;
1310 
1311 	if (cpumask_weight(&cpu_present_mask) < 2) {
1312 		report_skip("At least 2 cpus required");
1313 		return false;
1314 	}
1315 
1316 	r = susp_basic_prep(ctx, params);
1317 	assert(r);
1318 	params->returns = true;
1319 	params->ret.error = SBI_ERR_DENIED;
1320 
1321 	for_each_present_cpu(cpu) {
1322 		if (cpu == me)
1323 			continue;
1324 		break;
1325 	}
1326 
1327 	on_cpu(cpu, start_cpu, NULL);
1328 
1329 	for_each_present_cpu(cpu) {
1330 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1331 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1332 		if (ret.value == SBI_EXT_HSM_STARTED)
1333 			started++;
1334 	}
1335 
1336 	assert(started == 2);
1337 
1338 	return true;
1339 }
1340 
1341 static void check_susp(void)
1342 {
1343 	unsigned long csrs[] = {
1344 		[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS),
1345 		[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE),
1346 		[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC),
1347 		[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH),
1348 		[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP),
1349 	};
1350 	unsigned long ctx[] = {
1351 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1352 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1353 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1354 		[SBI_SUSP_TESTNUM_IDX] = 0,
1355 		[SBI_SUSP_RESULTS_IDX] = 0,
1356 	};
1357 	enum {
1358 #define SUSP_FIRST_TESTNUM 1
1359 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1360 		SUSP_TYPE,
1361 		SUSP_BAD_ADDR,
1362 		SUSP_ONE_ONLINE,
1363 		NR_SUSP_TESTS,
1364 	};
1365 	struct susp_test {
1366 		const char *name;
1367 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1368 		void (*check)(unsigned long ctx[], struct susp_params *params);
1369 	} susp_tests[] = {
1370 		[SUSP_BASIC]		= { "basic",		susp_basic_prep,	susp_basic_check,	},
1371 		[SUSP_TYPE]		= { "sleep_type",	susp_type_prep,					},
1372 		[SUSP_BAD_ADDR]		= { "bad addr",		susp_badaddr_prep,				},
1373 		[SUSP_ONE_ONLINE]	= { "one cpu online",	susp_one_prep,					},
1374 	};
1375 	struct susp_params params;
1376 	struct sbiret ret;
1377 	int testnum, i;
1378 
1379 	local_irq_disable();
1380 	timer_stop();
1381 
1382 	report_prefix_push("susp");
1383 
1384 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1385 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1386 
1387 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1388 		report_prefix_push(susp_tests[i].name);
1389 
1390 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1391 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1392 
1393 		assert(susp_tests[i].prep);
1394 		if (!susp_tests[i].prep(ctx, &params)) {
1395 			report_prefix_pop();
1396 			continue;
1397 		}
1398 
1399 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1400 			ret = sbi_system_suspend(params.sleep_type, params.resume_addr, params.opaque);
1401 
1402 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1403 				report_skip("SUSP not supported?");
1404 				report_prefix_popn(2);
1405 				return;
1406 			} else if (!params.returns) {
1407 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1408 			} else {
1409 				report(ret.error == params.ret.error, "expected sbi.error");
1410 				if (ret.error != params.ret.error)
1411 					report_info("expected error %ld, received %ld", params.ret.error, ret.error);
1412 			}
1413 
1414 			report_prefix_pop();
1415 			continue;
1416 		}
1417 		assert(testnum == i);
1418 
1419 		if (susp_tests[i].check)
1420 			susp_tests[i].check(ctx, &params);
1421 
1422 		report_prefix_pop();
1423 	}
1424 
1425 	report_prefix_pop();
1426 }
1427 
1428 int main(int argc, char **argv)
1429 {
1430 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1431 		help();
1432 		exit(0);
1433 	}
1434 
1435 	report_prefix_push("sbi");
1436 	check_base();
1437 	check_time();
1438 	check_ipi();
1439 	check_hsm();
1440 	check_dbcn();
1441 	check_susp();
1442 	check_fwft();
1443 
1444 	return report_summary();
1445 }
1446