xref: /kvm-unit-tests/riscv/sbi.c (revision 695740795adee59b48599e2f1a6bf19866a77779)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI verification
4  *
5  * Copyright (C) 2023, Ventana Micro Systems Inc., Andrew Jones <ajones@ventanamicro.com>
6  */
7 #include <libcflat.h>
8 #include <alloc_page.h>
9 #include <cpumask.h>
10 #include <limits.h>
11 #include <memregions.h>
12 #include <on-cpus.h>
13 #include <rand.h>
14 #include <setjmp.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <vmalloc.h>
18 
19 #include <asm/barrier.h>
20 #include <asm/csr.h>
21 #include <asm/delay.h>
22 #include <asm/io.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/processor.h>
26 #include <asm/sbi.h>
27 #include <asm/setup.h>
28 #include <asm/smp.h>
29 #include <asm/timer.h>
30 
31 #include "sbi-tests.h"
32 
33 #define	HIGH_ADDR_BOUNDARY	((phys_addr_t)1 << 32)
34 
35 void check_sse(void);
36 void check_fwft(void);
37 
__labs(long a)38 static long __labs(long a)
39 {
40 	return __builtin_labs(a);
41 }
42 
help(void)43 static void help(void)
44 {
45 	puts("Test SBI\n");
46 	puts("An environ must be provided where expected values are given.\n");
47 }
48 
sbi_base(int fid,unsigned long arg0)49 static struct sbiret sbi_base(int fid, unsigned long arg0)
50 {
51 	return sbi_ecall(SBI_EXT_BASE, fid, arg0, 0, 0, 0, 0, 0);
52 }
53 
sbi_dbcn_write(unsigned long num_bytes,unsigned long base_addr_lo,unsigned long base_addr_hi)54 static struct sbiret sbi_dbcn_write(unsigned long num_bytes, unsigned long base_addr_lo,
55 				    unsigned long base_addr_hi)
56 {
57 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
58 			 num_bytes, base_addr_lo, base_addr_hi, 0, 0, 0);
59 }
60 
sbi_dbcn_write_byte(uint8_t byte)61 static struct sbiret sbi_dbcn_write_byte(uint8_t byte)
62 {
63 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
64 }
65 
sbi_hart_suspend_raw(unsigned long suspend_type,unsigned long resume_addr,unsigned long opaque)66 static struct sbiret sbi_hart_suspend_raw(unsigned long suspend_type, unsigned long resume_addr, unsigned long opaque)
67 {
68 	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
69 }
70 
sbi_system_suspend_raw(unsigned long sleep_type,unsigned long resume_addr,unsigned long opaque)71 static struct sbiret sbi_system_suspend_raw(unsigned long sleep_type, unsigned long resume_addr, unsigned long opaque)
72 {
73 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
74 }
75 
sbi_bad_fid(int ext)76 void sbi_bad_fid(int ext)
77 {
78 	struct sbiret ret = sbi_ecall(ext, 0xbad, 0, 0, 0, 0, 0, 0);
79 	sbiret_report_error(&ret, SBI_ERR_NOT_SUPPORTED, "Bad FID");
80 }
81 
start_cpu(void * data)82 static void start_cpu(void *data)
83 {
84 	/* nothing to do */
85 }
86 
stop_cpu(void * data)87 static void stop_cpu(void *data)
88 {
89 	struct sbiret ret = sbi_hart_stop();
90 	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
91 		   smp_processor_id(), current_thread_info()->hartid, ret.error);
92 }
93 
rand_online_cpu(prng_state * ps)94 static int rand_online_cpu(prng_state *ps)
95 {
96 	int cpu, me = smp_processor_id();
97 
98 	for (;;) {
99 		cpu = prng32(ps) % nr_cpus;
100 		cpu = cpumask_next(cpu - 1, &cpu_present_mask);
101 		if (cpu != nr_cpus && cpu != me && cpu_present(cpu))
102 			break;
103 	}
104 
105 	return cpu;
106 }
107 
split_phys_addr(phys_addr_t paddr,unsigned long * hi,unsigned long * lo)108 static void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo)
109 {
110 	*lo = (unsigned long)paddr;
111 	*hi = 0;
112 	if (__riscv_xlen == 32)
113 		*hi = (unsigned long)(paddr >> 32);
114 }
115 
check_addr(phys_addr_t start,phys_addr_t size)116 static bool check_addr(phys_addr_t start, phys_addr_t size)
117 {
118 	struct mem_region *r = memregions_find(start);
119 	return r && r->end - start >= size && r->flags == MR_F_UNUSED;
120 }
121 
get_highest_addr(void)122 static phys_addr_t get_highest_addr(void)
123 {
124 	phys_addr_t highest_end = 0;
125 	struct mem_region *r;
126 
127 	for (r = mem_regions; r->end; ++r) {
128 		if (r->end > highest_end)
129 			highest_end = r->end;
130 	}
131 
132 	return highest_end - 1;
133 }
134 
get_invalid_addr(phys_addr_t * paddr,bool allow_default)135 static bool get_invalid_addr(phys_addr_t *paddr, bool allow_default)
136 {
137 	if (env_enabled("INVALID_ADDR_AUTO")) {
138 		*paddr = get_highest_addr() + 1;
139 		return true;
140 	} else if (allow_default && !getenv("INVALID_ADDR")) {
141 		*paddr = -1ul;
142 		return true;
143 	} else if (env_or_skip("INVALID_ADDR")) {
144 		*paddr = strtoull(getenv("INVALID_ADDR"), NULL, 0);
145 		return true;
146 	}
147 
148 	return false;
149 }
150 
timer_setup(void (* handler)(struct pt_regs *))151 static void timer_setup(void (*handler)(struct pt_regs *))
152 {
153 	install_irq_handler(IRQ_S_TIMER, handler);
154 	timer_irq_enable();
155 }
156 
timer_teardown(void)157 static void timer_teardown(void)
158 {
159 	timer_irq_disable();
160 	timer_stop();
161 	install_irq_handler(IRQ_S_TIMER, NULL);
162 }
163 
check_base(void)164 static void check_base(void)
165 {
166 	struct sbiret ret;
167 	long expected;
168 
169 	report_prefix_push("base");
170 
171 	sbi_bad_fid(SBI_EXT_BASE);
172 
173 	ret = sbi_base(SBI_EXT_BASE_GET_SPEC_VERSION, 0);
174 
175 	report_prefix_push("spec_version");
176 	if (env_or_skip("SBI_SPEC_VERSION")) {
177 		expected = (long)strtoul(getenv("SBI_SPEC_VERSION"), NULL, 0);
178 		assert_msg(!(expected & BIT(31)), "SBI spec version bit 31 must be zero");
179 		assert_msg(__riscv_xlen == 32 || !(expected >> 32), "SBI spec version bits greater than 31 must be zero");
180 		sbiret_check(&ret, 0, expected);
181 	}
182 	report_prefix_pop();
183 
184 	ret.value &= 0x7ffffffful;
185 
186 	if (ret.error || ret.value < 2) {
187 		report_skip("SBI spec version 0.2 or higher required");
188 		return;
189 	}
190 
191 	report_prefix_push("impl_id");
192 	if (env_or_skip("SBI_IMPL_ID")) {
193 		expected = (long)strtoul(getenv("SBI_IMPL_ID"), NULL, 0);
194 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_ID, 0);
195 		sbiret_check(&ret, 0, expected);
196 	}
197 	report_prefix_pop();
198 
199 	report_prefix_push("impl_version");
200 	if (env_or_skip("SBI_IMPL_VERSION")) {
201 		expected = (long)strtoul(getenv("SBI_IMPL_VERSION"), NULL, 0);
202 		ret = sbi_base(SBI_EXT_BASE_GET_IMP_VERSION, 0);
203 		sbiret_check(&ret, 0, expected);
204 	}
205 	report_prefix_pop();
206 
207 	report_prefix_push("probe_ext");
208 	expected = getenv("SBI_PROBE_EXT") ? (long)strtoul(getenv("SBI_PROBE_EXT"), NULL, 0) : 1;
209 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, SBI_EXT_BASE);
210 	sbiret_check(&ret, 0, expected);
211 	report_prefix_push("unavailable");
212 	ret = sbi_base(SBI_EXT_BASE_PROBE_EXT, 0xb000000);
213 	sbiret_check(&ret, 0, 0);
214 	report_prefix_popn(2);
215 
216 	report_prefix_push("mvendorid");
217 	if (env_or_skip("MVENDORID")) {
218 		expected = (long)strtoul(getenv("MVENDORID"), NULL, 0);
219 		assert(__riscv_xlen == 32 || !(expected >> 32));
220 		ret = sbi_base(SBI_EXT_BASE_GET_MVENDORID, 0);
221 		sbiret_check(&ret, 0, expected);
222 	}
223 	report_prefix_pop();
224 
225 	report_prefix_push("marchid");
226 	if (env_or_skip("MARCHID")) {
227 		expected = (long)strtoul(getenv("MARCHID"), NULL, 0);
228 		ret = sbi_base(SBI_EXT_BASE_GET_MARCHID, 0);
229 		sbiret_check(&ret, 0, expected);
230 	}
231 	report_prefix_pop();
232 
233 	report_prefix_push("mimpid");
234 	if (env_or_skip("MIMPID")) {
235 		expected = (long)strtoul(getenv("MIMPID"), NULL, 0);
236 		ret = sbi_base(SBI_EXT_BASE_GET_MIMPID, 0);
237 		sbiret_check(&ret, 0, expected);
238 	}
239 	report_prefix_popn(2);
240 }
241 
242 struct timer_info {
243 	bool timer_works;
244 	bool mask_timer_irq;
245 	bool timer_irq_set;
246 	bool timer_irq_cleared;
247 	unsigned long timer_irq_count;
248 };
249 
250 static struct timer_info timer_info;
251 
timer_irq_pending(void)252 static bool timer_irq_pending(void)
253 {
254 	return csr_read(CSR_SIP) & IP_TIP;
255 }
256 
timer_irq_handler(struct pt_regs * regs)257 static void timer_irq_handler(struct pt_regs *regs)
258 {
259 	timer_info.timer_works = true;
260 
261 	if (timer_info.timer_irq_count < ULONG_MAX)
262 		++timer_info.timer_irq_count;
263 
264 	if (timer_irq_pending())
265 		timer_info.timer_irq_set = true;
266 
267 	if (timer_info.mask_timer_irq)
268 		timer_irq_disable();
269 	else
270 		sbi_set_timer(ULONG_MAX);
271 
272 	if (!timer_irq_pending())
273 		timer_info.timer_irq_cleared = true;
274 }
275 
timer_check_set_timer(bool mask_timer_irq)276 static void timer_check_set_timer(bool mask_timer_irq)
277 {
278 	struct sbiret ret;
279 	unsigned long begin, end, duration;
280 	const char *mask_test_str = mask_timer_irq ? " for mask irq test" : "";
281 	unsigned long d = getenv("SBI_TIMER_DELAY") ? strtol(getenv("SBI_TIMER_DELAY"), NULL, 0) : 200000;
282 	unsigned long margin = getenv("SBI_TIMER_MARGIN") ? strtol(getenv("SBI_TIMER_MARGIN"), NULL, 0) : 200000;
283 
284 	d = usec_to_cycles(d);
285 	margin = usec_to_cycles(margin);
286 
287 	timer_info = (struct timer_info){ .mask_timer_irq = mask_timer_irq };
288 	begin = timer_get_cycles();
289 	ret = sbi_set_timer(begin + d);
290 
291 	report(!ret.error, "set timer%s", mask_test_str);
292 	if (ret.error)
293 		report_info("set timer%s failed with %ld\n", mask_test_str, ret.error);
294 
295 	while ((end = timer_get_cycles()) <= (begin + d + margin) && !timer_info.timer_works)
296 		cpu_relax();
297 
298 	report(timer_info.timer_works, "timer interrupt received%s", mask_test_str);
299 	report(timer_info.timer_irq_set, "pending timer interrupt bit set in irq handler%s", mask_test_str);
300 
301 	if (!mask_timer_irq) {
302 		report(timer_info.timer_irq_set && timer_info.timer_irq_cleared,
303 		       "pending timer interrupt bit cleared by setting timer to -1");
304 	}
305 
306 	if (timer_info.timer_works) {
307 		duration = end - begin;
308 		report(duration >= d && duration <= (d + margin), "timer delay honored%s", mask_test_str);
309 	}
310 
311 	report(timer_info.timer_irq_count == 1, "timer interrupt received exactly once%s", mask_test_str);
312 }
313 
check_time(void)314 static void check_time(void)
315 {
316 	bool pending;
317 
318 	report_prefix_push("time");
319 
320 	if (!sbi_probe(SBI_EXT_TIME)) {
321 		report_skip("time extension not available");
322 		report_prefix_pop();
323 		return;
324 	}
325 
326 	sbi_bad_fid(SBI_EXT_TIME);
327 
328 	report_prefix_push("set_timer");
329 
330 	install_irq_handler(IRQ_S_TIMER, timer_irq_handler);
331 	local_irq_enable();
332 	timer_irq_enable();
333 
334 	timer_check_set_timer(false);
335 
336 	if (csr_read(CSR_SIE) & IE_TIE)
337 		timer_check_set_timer(true);
338 	else
339 		report_skip("timer irq enable bit is not writable, skipping mask irq test");
340 
341 	timer_irq_disable();
342 	sbi_set_timer(0);
343 	pending = timer_irq_pending();
344 	report(pending, "timer immediately pending by setting timer to 0");
345 	sbi_set_timer(ULONG_MAX);
346 	if (pending)
347 		report(!timer_irq_pending(), "pending timer cleared while masked");
348 	else
349 		report_skip("timer is not pending, skipping timer cleared while masked test");
350 
351 	local_irq_disable();
352 	install_irq_handler(IRQ_S_TIMER, NULL);
353 
354 	report_prefix_popn(2);
355 }
356 
357 static bool ipi_received[NR_CPUS];
358 static bool ipi_timeout[NR_CPUS];
359 static cpumask_t ipi_done;
360 
ipi_timeout_handler(struct pt_regs * regs)361 static void ipi_timeout_handler(struct pt_regs *regs)
362 {
363 	timer_stop();
364 	ipi_timeout[smp_processor_id()] = true;
365 }
366 
ipi_irq_handler(struct pt_regs * regs)367 static void ipi_irq_handler(struct pt_regs *regs)
368 {
369 	ipi_ack();
370 	ipi_received[smp_processor_id()] = true;
371 }
372 
ipi_hart_wait(void * data)373 static void ipi_hart_wait(void *data)
374 {
375 	unsigned long timeout = (unsigned long)data;
376 	int me = smp_processor_id();
377 
378 	install_irq_handler(IRQ_S_SOFT, ipi_irq_handler);
379 	install_irq_handler(IRQ_S_TIMER, ipi_timeout_handler);
380 	local_ipi_enable();
381 	timer_irq_enable();
382 	local_irq_enable();
383 
384 	timer_start(timeout);
385 	while (!READ_ONCE(ipi_received[me]) && !READ_ONCE(ipi_timeout[me]))
386 		cpu_relax();
387 	local_irq_disable();
388 	timer_stop();
389 	local_ipi_disable();
390 	timer_irq_disable();
391 	install_irq_handler(IRQ_S_SOFT, NULL);
392 	install_irq_handler(IRQ_S_TIMER, NULL);
393 
394 	cpumask_set_cpu(me, &ipi_done);
395 }
396 
ipi_hart_check(cpumask_t * mask)397 static void ipi_hart_check(cpumask_t *mask)
398 {
399 	int cpu;
400 
401 	for_each_cpu(cpu, mask) {
402 		if (ipi_timeout[cpu]) {
403 			const char *rec = ipi_received[cpu] ? "but was still received"
404 							    : "and has still not been received";
405 			report_fail("ipi timed out on cpu%d %s", cpu, rec);
406 		}
407 
408 		ipi_timeout[cpu] = false;
409 		ipi_received[cpu] = false;
410 	}
411 }
412 
check_ipi(void)413 static void check_ipi(void)
414 {
415 	unsigned long d = getenv("SBI_IPI_TIMEOUT") ? strtol(getenv("SBI_IPI_TIMEOUT"), NULL, 0) : 200000;
416 	int nr_cpus_present = cpumask_weight(&cpu_present_mask);
417 	int me = smp_processor_id();
418 	unsigned long max_hartid = 0;
419 	unsigned long hartid1, hartid2;
420 	cpumask_t ipi_receivers;
421 	static prng_state ps;
422 	struct sbiret ret;
423 	int cpu, cpu2;
424 
425 	ps = prng_init(0xDEADBEEF);
426 
427 	report_prefix_push("ipi");
428 
429 	if (!sbi_probe(SBI_EXT_IPI)) {
430 		report_skip("ipi extension not available");
431 		report_prefix_pop();
432 		return;
433 	}
434 
435 	sbi_bad_fid(SBI_EXT_IPI);
436 
437 	if (nr_cpus_present < 2) {
438 		report_skip("At least 2 cpus required");
439 		report_prefix_pop();
440 		return;
441 	}
442 
443 	report_prefix_push("random hart");
444 	cpumask_clear(&ipi_done);
445 	cpumask_clear(&ipi_receivers);
446 	cpu = rand_online_cpu(&ps);
447 	cpumask_set_cpu(cpu, &ipi_receivers);
448 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
449 	ret = sbi_send_ipi_cpu(cpu);
450 	report(ret.error == SBI_SUCCESS, "ipi returned success");
451 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
452 		cpu_relax();
453 	ipi_hart_check(&ipi_receivers);
454 	report_prefix_pop();
455 
456 	report_prefix_push("two in hart_mask");
457 
458 	if (nr_cpus_present < 3) {
459 		report_skip("3 cpus required");
460 		goto end_two;
461 	}
462 
463 	cpu = rand_online_cpu(&ps);
464 	hartid1 = cpus[cpu].hartid;
465 	hartid2 = 0;
466 	for_each_present_cpu(cpu2) {
467 		if (cpu2 == cpu || cpu2 == me)
468 			continue;
469 		hartid2 = cpus[cpu2].hartid;
470 		if (__labs(hartid2 - hartid1) < BITS_PER_LONG)
471 			break;
472 	}
473 	if (cpu2 == nr_cpus) {
474 		report_skip("hartids are too sparse");
475 		goto end_two;
476 	}
477 
478 	cpumask_clear(&ipi_done);
479 	cpumask_clear(&ipi_receivers);
480 	cpumask_set_cpu(cpu, &ipi_receivers);
481 	cpumask_set_cpu(cpu2, &ipi_receivers);
482 	on_cpu_async(cpu, ipi_hart_wait, (void *)d);
483 	on_cpu_async(cpu2, ipi_hart_wait, (void *)d);
484 	ret = sbi_send_ipi((1UL << __labs(hartid2 - hartid1)) | 1UL, hartid1 < hartid2 ? hartid1 : hartid2);
485 	report(ret.error == SBI_SUCCESS, "ipi returned success");
486 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
487 		cpu_relax();
488 	ipi_hart_check(&ipi_receivers);
489 end_two:
490 	report_prefix_pop();
491 
492 	report_prefix_push("broadcast");
493 	cpumask_clear(&ipi_done);
494 	cpumask_copy(&ipi_receivers, &cpu_present_mask);
495 	cpumask_clear_cpu(me, &ipi_receivers);
496 	on_cpumask_async(&ipi_receivers, ipi_hart_wait, (void *)d);
497 	ret = sbi_send_ipi_broadcast();
498 	report(ret.error == SBI_SUCCESS, "ipi returned success");
499 	while (!cpumask_equal(&ipi_done, &ipi_receivers))
500 		cpu_relax();
501 	ipi_hart_check(&ipi_receivers);
502 	report_prefix_pop();
503 
504 	report_prefix_push("invalid parameters");
505 
506 	for_each_present_cpu(cpu) {
507 		if (cpus[cpu].hartid > max_hartid)
508 			max_hartid = cpus[cpu].hartid;
509 	}
510 
511 	/* Test no targets */
512 	ret = sbi_send_ipi(0, 0);
513 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 0");
514 	ret = sbi_send_ipi(0, 1);
515 	sbiret_report_error(&ret, SBI_SUCCESS, "no targets, hart_mask_base is 1");
516 
517 	/* Try the next higher hartid than the max */
518 	bool kfail = __sbi_get_imp_id() == SBI_IMPL_OPENSBI &&
519 		     __sbi_get_imp_version() < sbi_impl_opensbi_mk_version(1, 7);
520 	ret = sbi_send_ipi(2, max_hartid);
521 	sbiret_kfail_error(kfail, &ret, SBI_ERR_INVALID_PARAM, "hart_mask");
522 	ret = sbi_send_ipi(1, max_hartid + 1);
523 	sbiret_kfail_error(kfail, &ret, SBI_ERR_INVALID_PARAM, "hart_mask_base");
524 
525 	report_prefix_pop();
526 
527 	report_prefix_pop();
528 }
529 
530 unsigned char sbi_hsm_stop_hart[NR_CPUS];
531 unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
532 unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
533 
534 static const char * const hart_state_str[] = {
535 	[SBI_EXT_HSM_STARTED] = "started",
536 	[SBI_EXT_HSM_STOPPED] = "stopped",
537 	[SBI_EXT_HSM_SUSPENDED] = "suspended",
538 };
539 struct hart_state_transition_info {
540 	enum sbi_ext_hsm_sid initial_state;
541 	enum sbi_ext_hsm_sid intermediate_state;
542 	enum sbi_ext_hsm_sid final_state;
543 };
544 static cpumask_t sbi_hsm_started_hart_checks;
545 static bool sbi_hsm_invalid_hartid_check;
546 static bool sbi_hsm_timer_fired;
547 extern void sbi_hsm_check_hart_start(void);
548 extern void sbi_hsm_check_non_retentive_suspend(void);
549 
hsm_timer_irq_handler(struct pt_regs * regs)550 static void hsm_timer_irq_handler(struct pt_regs *regs)
551 {
552 	timer_stop();
553 	sbi_hsm_timer_fired = true;
554 }
555 
hart_check_already_started(void * data)556 static void hart_check_already_started(void *data)
557 {
558 	struct sbiret ret;
559 	unsigned long hartid = current_thread_info()->hartid;
560 	int me = smp_processor_id();
561 
562 	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
563 
564 	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
565 		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
566 }
567 
hart_start_invalid_hartid(void * data)568 static void hart_start_invalid_hartid(void *data)
569 {
570 	struct sbiret ret;
571 
572 	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
573 
574 	if (ret.error == SBI_ERR_INVALID_PARAM)
575 		sbi_hsm_invalid_hartid_check = true;
576 }
577 
578 static cpumask_t hsm_suspend_not_supported;
579 
ipi_nop(struct pt_regs * regs)580 static void ipi_nop(struct pt_regs *regs)
581 {
582 	ipi_ack();
583 }
584 
hart_suspend_and_wait_ipi(unsigned long suspend_type,unsigned long resume_addr,unsigned long opaque,bool returns,const char * typestr)585 static void hart_suspend_and_wait_ipi(unsigned long suspend_type, unsigned long resume_addr,
586 				      unsigned long opaque, bool returns, const char *typestr)
587 {
588 	unsigned long hartid = current_thread_info()->hartid;
589 	struct sbiret ret;
590 
591 	install_irq_handler(IRQ_S_SOFT, ipi_nop);
592 	local_ipi_enable();
593 	local_irq_enable();
594 
595 	ret = sbi_hart_suspend_raw(suspend_type, resume_addr, opaque);
596 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
597 		cpumask_set_cpu(smp_processor_id(), &hsm_suspend_not_supported);
598 	else if (ret.error)
599 		report_fail("failed to %s cpu%d (hartid = %lx) (error=%ld)",
600 			    typestr, smp_processor_id(), hartid, ret.error);
601 	else if (!returns)
602 		report_fail("failed to %s cpu%d (hartid = %lx) (call should not return)",
603 			    typestr, smp_processor_id(), hartid);
604 
605 	local_irq_disable();
606 	local_ipi_disable();
607 	install_irq_handler(IRQ_S_SOFT, NULL);
608 }
609 
hart_retentive_suspend(void * data)610 static void hart_retentive_suspend(void *data)
611 {
612 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0, true, "retentive suspend");
613 }
614 
hart_non_retentive_suspend(void * data)615 static void hart_non_retentive_suspend(void *data)
616 {
617 	unsigned long params[] = {
618 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
619 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
620 	};
621 
622 	hart_suspend_and_wait_ipi(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
623 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
624 				  false, "non-retentive suspend");
625 }
626 
627 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
hart_retentive_suspend_with_msb_set(void * data)628 static void hart_retentive_suspend_with_msb_set(void *data)
629 {
630 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
631 
632 	hart_suspend_and_wait_ipi(suspend_type, 0, 0, true, "retentive suspend with MSB set");
633 }
634 
635 /* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
hart_non_retentive_suspend_with_msb_set(void * data)636 static void hart_non_retentive_suspend_with_msb_set(void *data)
637 {
638 	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
639 	unsigned long params[] = {
640 		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
641 		[SBI_HSM_HARTID_IDX] = current_thread_info()->hartid,
642 	};
643 
644 	hart_suspend_and_wait_ipi(suspend_type,
645 				  virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
646 				  false, "non-retentive suspend with MSB set");
647 }
648 
hart_wait_on_status(unsigned long hartid,enum sbi_ext_hsm_sid status,unsigned long duration)649 static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
650 {
651 	struct sbiret ret;
652 
653 	sbi_hsm_timer_fired = false;
654 	timer_start(duration);
655 
656 	ret = sbi_hart_get_status(hartid);
657 
658 	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
659 		cpu_relax();
660 		ret = sbi_hart_get_status(hartid);
661 	}
662 
663 	timer_stop();
664 
665 	if (sbi_hsm_timer_fired)
666 		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
667 	else if (ret.error)
668 		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
669 
670 	return !sbi_hsm_timer_fired && !ret.error;
671 }
672 
hart_wait_state_transition(cpumask_t * mask,unsigned long duration,struct hart_state_transition_info * states)673 static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
674 				      struct hart_state_transition_info *states)
675 {
676 	struct sbiret ret;
677 	unsigned long hartid;
678 	int cpu, count = 0;
679 
680 	for_each_cpu(cpu, mask) {
681 		hartid = cpus[cpu].hartid;
682 		if (!hart_wait_on_status(hartid, states->initial_state, duration))
683 			continue;
684 		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
685 			continue;
686 
687 		ret = sbi_hart_get_status(hartid);
688 		if (ret.error)
689 			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
690 		else if (ret.value != states->final_state)
691 			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
692 				    hart_state_str[states->final_state], ret.value);
693 		else
694 			count++;
695 	}
696 
697 	return count;
698 }
699 
hart_wait_until_idle(cpumask_t * mask,unsigned long duration)700 static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
701 {
702 	sbi_hsm_timer_fired = false;
703 	timer_start(duration);
704 
705 	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
706 		cpu_relax();
707 
708 	timer_stop();
709 
710 	if (sbi_hsm_timer_fired)
711 		report_info("hsm timer fired before all cpus became idle");
712 }
713 
check_hsm(void)714 static void check_hsm(void)
715 {
716 	struct sbiret ret;
717 	unsigned long hartid;
718 	cpumask_t secondary_cpus_mask, mask, resume_mask;
719 	struct hart_state_transition_info transition_states;
720 	bool ipi_unavailable = false;
721 	int cpu, me = smp_processor_id();
722 	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
723 	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
724 					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
725 	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
726 	int count, check, expected_count, resume_count;
727 
728 	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
729 
730 	report_prefix_push("hsm");
731 
732 	if (!sbi_probe(SBI_EXT_HSM)) {
733 		report_skip("hsm extension not available");
734 		report_prefix_pop();
735 		return;
736 	}
737 
738 	sbi_bad_fid(SBI_EXT_HSM);
739 
740 	report_prefix_push("hart_get_status");
741 
742 	hartid = current_thread_info()->hartid;
743 	ret = sbi_hart_get_status(hartid);
744 
745 	if (ret.error) {
746 		report_fail("failed to get status of current hart (error=%ld)", ret.error);
747 		report_prefix_popn(2);
748 		return;
749 	} else if (ret.value != SBI_EXT_HSM_STARTED) {
750 		report_fail("current hart is not started (ret.value=%ld)", ret.value);
751 		report_prefix_popn(2);
752 		return;
753 	}
754 
755 	report_pass("status of current hart is started");
756 
757 	report_prefix_pop();
758 
759 	if (max_cpus < 2) {
760 		report_skip("no other cpus to run the remaining hsm tests on");
761 		report_prefix_pop();
762 		return;
763 	}
764 
765 	report_prefix_push("hart_stop");
766 
767 	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
768 	cpumask_clear_cpu(me, &secondary_cpus_mask);
769 	timer_setup(hsm_timer_irq_handler);
770 	local_irq_enable();
771 
772 	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
773 	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
774 
775 	transition_states = (struct hart_state_transition_info) {
776 		.initial_state = SBI_EXT_HSM_STARTED,
777 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
778 		.final_state = SBI_EXT_HSM_STOPPED,
779 	};
780 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
781 
782 	report(count == max_cpus - 1, "all secondary harts stopped");
783 
784 	report_prefix_pop();
785 
786 	report_prefix_push("hart_start");
787 
788 	for_each_cpu(cpu, &secondary_cpus_mask) {
789 		hartid = cpus[cpu].hartid;
790 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
791 		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
792 
793 		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
794 				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
795 		if (ret.error) {
796 			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
797 			continue;
798 		}
799 	}
800 
801 	transition_states = (struct hart_state_transition_info) {
802 		.initial_state = SBI_EXT_HSM_STOPPED,
803 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
804 		.final_state = SBI_EXT_HSM_STARTED,
805 	};
806 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
807 	check = 0;
808 
809 	for_each_cpu(cpu, &secondary_cpus_mask) {
810 		sbi_hsm_timer_fired = false;
811 		timer_start(hsm_timer_duration);
812 
813 		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
814 			cpu_relax();
815 
816 		timer_stop();
817 
818 		if (sbi_hsm_timer_fired) {
819 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
820 			continue;
821 		}
822 
823 		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
824 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
825 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
826 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
827 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
828 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
829 		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
830 			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
831 		else
832 			check++;
833 	}
834 
835 	report(count == max_cpus - 1, "all secondary harts started");
836 	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
837 
838 	report_prefix_pop();
839 
840 	report_prefix_push("hart_stop");
841 
842 	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
843 
844 	transition_states = (struct hart_state_transition_info) {
845 		.initial_state = SBI_EXT_HSM_STARTED,
846 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
847 		.final_state = SBI_EXT_HSM_STOPPED,
848 	};
849 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
850 
851 	report(count == max_cpus - 1, "all secondary harts stopped");
852 
853 	/* Reset the stop flags so that we can reuse them after suspension tests */
854 	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
855 
856 	report_prefix_pop();
857 
858 	report_prefix_push("hart_start");
859 
860 	/* Select just one secondary cpu to run the invalid hartid test */
861 	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
862 
863 	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
864 
865 	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
866 
867 	transition_states = (struct hart_state_transition_info) {
868 		.initial_state = SBI_EXT_HSM_STOPPED,
869 		.intermediate_state = SBI_EXT_HSM_START_PENDING,
870 		.final_state = SBI_EXT_HSM_STARTED,
871 	};
872 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
873 
874 	report(count == max_cpus - 1, "all secondary harts started");
875 
876 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
877 
878 	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
879 	       "all secondary harts are already started");
880 
881 	report_prefix_pop();
882 
883 	report_prefix_push("hart_suspend");
884 
885 	if (!sbi_probe(SBI_EXT_IPI)) {
886 		report_skip("skipping suspension tests since ipi extension is unavailable");
887 		report_prefix_pop();
888 		ipi_unavailable = true;
889 		goto sbi_hsm_hart_stop_tests;
890 	}
891 
892 	cpumask_clear(&hsm_suspend_not_supported);
893 	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
894 
895 	transition_states = (struct hart_state_transition_info) {
896 		.initial_state = SBI_EXT_HSM_STARTED,
897 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
898 		.final_state = SBI_EXT_HSM_SUSPENDED,
899 	};
900 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
901 
902 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
903 
904 	if (expected_count != 0) {
905 		if (expected_count != max_cpus - 1)
906 			report_info("not all harts support retentive suspend");
907 		report(count == expected_count, "supporting secondary harts retentive suspended");
908 	} else {
909 		report_skip("retentive suspend not supported by any harts");
910 		goto nonret_suspend_tests;
911 	}
912 
913 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
914 	resume_count = cpumask_weight(&resume_mask);
915 
916 	/* Ignore the return value since we check the status of each hart anyway */
917 	sbi_send_ipi_cpumask(&resume_mask);
918 
919 	transition_states = (struct hart_state_transition_info) {
920 		.initial_state = SBI_EXT_HSM_SUSPENDED,
921 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
922 		.final_state = SBI_EXT_HSM_STARTED,
923 	};
924 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
925 
926 	report(count == resume_count, "supporting secondary harts retentive resumed");
927 
928 nonret_suspend_tests:
929 	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
930 
931 	cpumask_clear(&hsm_suspend_not_supported);
932 	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
933 
934 	transition_states = (struct hart_state_transition_info) {
935 		.initial_state = SBI_EXT_HSM_STARTED,
936 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
937 		.final_state = SBI_EXT_HSM_SUSPENDED,
938 	};
939 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
940 
941 	expected_count = max_cpus - 1 - cpumask_weight(&hsm_suspend_not_supported);
942 
943 	if (expected_count != 0) {
944 		if (expected_count != max_cpus - 1)
945 			report_info("not all harts support non-retentive suspend");
946 		report(count == expected_count, "supporting secondary harts non-retentive suspended");
947 	} else {
948 		report_skip("non-retentive suspend not supported by any harts");
949 		goto hsm_suspend_tests_done;
950 	}
951 
952 	cpumask_andnot(&resume_mask, &secondary_cpus_mask, &hsm_suspend_not_supported);
953 	resume_count = cpumask_weight(&resume_mask);
954 
955 	/* Ignore the return value since we check the status of each hart anyway */
956 	sbi_send_ipi_cpumask(&resume_mask);
957 
958 	transition_states = (struct hart_state_transition_info) {
959 		.initial_state = SBI_EXT_HSM_SUSPENDED,
960 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
961 		.final_state = SBI_EXT_HSM_STARTED,
962 	};
963 	count = hart_wait_state_transition(&resume_mask, hsm_timer_duration, &transition_states);
964 	check = 0;
965 
966 	for_each_cpu(cpu, &resume_mask) {
967 		sbi_hsm_timer_fired = false;
968 		timer_start(hsm_timer_duration);
969 
970 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
971 			cpu_relax();
972 
973 		timer_stop();
974 
975 		if (sbi_hsm_timer_fired) {
976 			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
977 			continue;
978 		}
979 
980 		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
981 			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
982 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
983 			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
984 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
985 			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
986 		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
987 			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
988 		else
989 			check++;
990 	}
991 
992 	report(count == resume_count, "supporting secondary harts non-retentive resumed");
993 	report(check == resume_count, "supporting secondary harts have expected register values after non-retentive resume");
994 
995 hsm_suspend_tests_done:
996 	report_prefix_pop();
997 
998 sbi_hsm_hart_stop_tests:
999 	report_prefix_push("hart_stop");
1000 
1001 	if (ipi_unavailable || expected_count == 0)
1002 		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
1003 	else
1004 		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
1005 
1006 	transition_states = (struct hart_state_transition_info) {
1007 		.initial_state = SBI_EXT_HSM_STARTED,
1008 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1009 		.final_state = SBI_EXT_HSM_STOPPED,
1010 	};
1011 	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
1012 
1013 	report(count == max_cpus - 1, "all secondary harts stopped");
1014 
1015 	report_prefix_pop();
1016 
1017 	if (__riscv_xlen == 32 || ipi_unavailable) {
1018 		local_irq_disable();
1019 		timer_teardown();
1020 		report_prefix_pop();
1021 		return;
1022 	}
1023 
1024 	report_prefix_push("hart_suspend");
1025 
1026 	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
1027 	cpu = cpumask_next(-1, &secondary_cpus_mask);
1028 	hartid = cpus[cpu].hartid;
1029 	cpumask_clear(&mask);
1030 	cpumask_set_cpu(cpu, &mask);
1031 
1032 	/* Boot up the secondary cpu and let it proceed to the idle loop */
1033 	on_cpu(cpu, start_cpu, NULL);
1034 
1035 	cpumask_clear(&hsm_suspend_not_supported);
1036 	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
1037 
1038 	transition_states = (struct hart_state_transition_info) {
1039 		.initial_state = SBI_EXT_HSM_STARTED,
1040 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1041 		.final_state = SBI_EXT_HSM_SUSPENDED,
1042 	};
1043 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1044 
1045 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1046 
1047 	if (expected_count) {
1048 		report(count == expected_count, "retentive suspend with MSB set");
1049 	} else {
1050 		report_skip("retentive suspend not supported by cpu%d", cpu);
1051 		goto nonret_suspend_with_msb;
1052 	}
1053 
1054 	/* Ignore the return value since we manually validate the status of the hart anyway */
1055 	sbi_send_ipi_cpu(cpu);
1056 
1057 	transition_states = (struct hart_state_transition_info) {
1058 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1059 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1060 		.final_state = SBI_EXT_HSM_STARTED,
1061 	};
1062 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1063 
1064 	report(count, "secondary hart retentive resumed with MSB set");
1065 
1066 nonret_suspend_with_msb:
1067 	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
1068 	sbi_hsm_stop_hart[cpu] = 0;
1069 	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
1070 
1071 	cpumask_clear(&hsm_suspend_not_supported);
1072 	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
1073 
1074 	transition_states = (struct hart_state_transition_info) {
1075 		.initial_state = SBI_EXT_HSM_STARTED,
1076 		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
1077 		.final_state = SBI_EXT_HSM_SUSPENDED,
1078 	};
1079 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1080 
1081 	expected_count = 1 - cpumask_weight(&hsm_suspend_not_supported);
1082 
1083 	if (expected_count) {
1084 		report(count == expected_count, "non-retentive suspend with MSB set");
1085 	} else {
1086 		report_skip("non-retentive suspend not supported by cpu%d", cpu);
1087 		goto hsm_hart_stop_test;
1088 	}
1089 
1090 	/* Ignore the return value since we manually validate the status of the hart anyway */
1091 	sbi_send_ipi_cpu(cpu);
1092 
1093 	transition_states = (struct hart_state_transition_info) {
1094 		.initial_state = SBI_EXT_HSM_SUSPENDED,
1095 		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
1096 		.final_state = SBI_EXT_HSM_STARTED,
1097 	};
1098 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1099 	check = 0;
1100 
1101 	if (count) {
1102 		sbi_hsm_timer_fired = false;
1103 		timer_start(hsm_timer_duration);
1104 
1105 		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
1106 			cpu_relax();
1107 
1108 		timer_stop();
1109 
1110 		if (sbi_hsm_timer_fired) {
1111 			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
1112 		} else {
1113 			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
1114 				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1115 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
1116 				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
1117 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
1118 				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
1119 			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
1120 				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
1121 			else
1122 				check = 1;
1123 		}
1124 	}
1125 
1126 	report(count, "secondary hart non-retentive resumed with MSB set");
1127 	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
1128 
1129 hsm_hart_stop_test:
1130 	report_prefix_pop();
1131 
1132 	report_prefix_push("hart_stop");
1133 
1134 	if (expected_count == 0)
1135 		on_cpu_async(cpu, stop_cpu, NULL);
1136 	else
1137 		sbi_hsm_stop_hart[cpu] = 1;
1138 
1139 	transition_states = (struct hart_state_transition_info) {
1140 		.initial_state = SBI_EXT_HSM_STARTED,
1141 		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
1142 		.final_state = SBI_EXT_HSM_STOPPED,
1143 	};
1144 	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
1145 
1146 	report(count, "secondary hart stopped after suspension tests with MSB set");
1147 
1148 	local_irq_disable();
1149 	timer_teardown();
1150 	report_prefix_popn(2);
1151 }
1152 
1153 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
1154 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
1155 
dbcn_write_test(const char * s,unsigned long num_bytes,bool xfail)1156 static void dbcn_write_test(const char *s, unsigned long num_bytes, bool xfail)
1157 {
1158 	unsigned long base_addr_lo, base_addr_hi;
1159 	phys_addr_t paddr = virt_to_phys((void *)s);
1160 	int num_calls = 0;
1161 	struct sbiret ret;
1162 
1163 	split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1164 
1165 	do {
1166 		ret = sbi_dbcn_write(num_bytes, base_addr_lo, base_addr_hi);
1167 		num_bytes -= ret.value;
1168 		paddr += ret.value;
1169 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1170 		num_calls++;
1171 	} while (num_bytes != 0 && ret.error == SBI_SUCCESS);
1172 
1173 	report_xfail(xfail, ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1174 	report_info("%d sbi calls made", num_calls);
1175 }
1176 
dbcn_high_write_test(const char * s,unsigned long num_bytes,phys_addr_t page_addr,size_t page_offset,bool highmem_supported)1177 static void dbcn_high_write_test(const char *s, unsigned long num_bytes,
1178 				 phys_addr_t page_addr, size_t page_offset,
1179 				 bool highmem_supported)
1180 {
1181 	int nr_pages = page_offset ? 2 : 1;
1182 	void *vaddr;
1183 
1184 	if (page_addr != PAGE_ALIGN(page_addr) || page_addr + PAGE_SIZE < HIGH_ADDR_BOUNDARY ||
1185 	    !check_addr(page_addr, nr_pages * PAGE_SIZE)) {
1186 		report_skip("Memory above 4G required");
1187 		return;
1188 	}
1189 
1190 	vaddr = alloc_vpages(nr_pages);
1191 
1192 	for (int i = 0; i < nr_pages; ++i)
1193 		install_page(current_pgtable(), page_addr + i * PAGE_SIZE, vaddr + i * PAGE_SIZE);
1194 	memcpy(vaddr + page_offset, DBCN_WRITE_TEST_STRING, num_bytes);
1195 	dbcn_write_test(vaddr + page_offset, num_bytes, !highmem_supported);
1196 }
1197 
1198 /*
1199  * Only the write functionality is tested here. There's no easy way to
1200  * non-interactively test SBI_EXT_DBCN_CONSOLE_READ.
1201  */
check_dbcn(void)1202 static void check_dbcn(void)
1203 {
1204 	unsigned long num_bytes = strlen(DBCN_WRITE_TEST_STRING);
1205 	unsigned long base_addr_lo, base_addr_hi;
1206 	bool highmem_supported = true;
1207 	phys_addr_t paddr;
1208 	struct sbiret ret;
1209 	char *buf;
1210 
1211 	report_prefix_push("dbcn");
1212 
1213 	if (!sbi_probe(SBI_EXT_DBCN)) {
1214 		report_skip("DBCN extension unavailable");
1215 		report_prefix_pop();
1216 		return;
1217 	}
1218 
1219 	sbi_bad_fid(SBI_EXT_DBCN);
1220 
1221 	report_prefix_push("write");
1222 
1223 	dbcn_write_test(DBCN_WRITE_TEST_STRING, num_bytes, false);
1224 
1225 	assert(num_bytes < PAGE_SIZE);
1226 
1227 	report_prefix_push("page boundary");
1228 	buf = alloc_pages(1);
1229 	memcpy(&buf[PAGE_SIZE - num_bytes / 2], DBCN_WRITE_TEST_STRING, num_bytes);
1230 	dbcn_write_test(&buf[PAGE_SIZE - num_bytes / 2], num_bytes, false);
1231 	report_prefix_pop();
1232 
1233 	if (env_enabled("SBI_HIGHMEM_NOT_SUPPORTED"))
1234 		highmem_supported = false;
1235 
1236 	report_prefix_push("high boundary");
1237 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_BOUNDARY"))
1238 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes,
1239 				     HIGH_ADDR_BOUNDARY - PAGE_SIZE, PAGE_SIZE - num_bytes / 2,
1240 				     highmem_supported);
1241 	else
1242 		report_skip("user disabled");
1243 	report_prefix_pop();
1244 
1245 	report_prefix_push("high page");
1246 	if (!env_enabled("SBI_DBCN_SKIP_HIGH_PAGE")) {
1247 		paddr = getenv("HIGH_PAGE") ? strtoull(getenv("HIGH_PAGE"), NULL, 0) : HIGH_ADDR_BOUNDARY;
1248 		dbcn_high_write_test(DBCN_WRITE_TEST_STRING, num_bytes, paddr, 0, highmem_supported);
1249 	} else {
1250 		report_skip("user disabled");
1251 	}
1252 	report_prefix_pop();
1253 
1254 	/* Bytes are read from memory and written to the console */
1255 	report_prefix_push("invalid parameter");
1256 	if (get_invalid_addr(&paddr, false)) {
1257 		split_phys_addr(paddr, &base_addr_hi, &base_addr_lo);
1258 		ret = sbi_dbcn_write(1, base_addr_lo, base_addr_hi);
1259 		report(ret.error == SBI_ERR_INVALID_PARAM, "address (error=%ld)", ret.error);
1260 	}
1261 	report_prefix_popn(2);
1262 	report_prefix_push("write_byte");
1263 
1264 	puts("DBCN_WRITE_BYTE TEST BYTE: ");
1265 	ret = sbi_dbcn_write_byte(DBCN_WRITE_BYTE_TEST_BYTE);
1266 	puts("\n");
1267 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1268 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1269 
1270 	puts("DBCN_WRITE_BYTE TEST WORD: "); /* still expect 'a' in the output */
1271 	ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, 0x64636261, 0, 0, 0, 0, 0);
1272 	puts("\n");
1273 	report(ret.error == SBI_SUCCESS, "write success (error=%ld)", ret.error);
1274 	report(ret.value == 0, "expected ret.value (%ld)", ret.value);
1275 
1276 	report_prefix_popn(2);
1277 }
1278 
1279 void sbi_susp_resume(unsigned long hartid, unsigned long opaque);
1280 jmp_buf sbi_susp_jmp;
1281 
1282 #define SBI_SUSP_TIMER_DURATION_US 500000
susp_timer(struct pt_regs * regs)1283 static void susp_timer(struct pt_regs *regs)
1284 {
1285 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1286 }
1287 
1288 struct susp_params {
1289 	unsigned long sleep_type;
1290 	unsigned long resume_addr;
1291 	unsigned long opaque;
1292 	bool returns;
1293 	struct sbiret ret;
1294 };
1295 
susp_basic_prep(unsigned long ctx[],struct susp_params * params)1296 static bool susp_basic_prep(unsigned long ctx[], struct susp_params *params)
1297 {
1298 	int cpu, me = smp_processor_id();
1299 	unsigned long *csrs;
1300 	struct sbiret ret;
1301 	cpumask_t mask;
1302 
1303 	csrs = (unsigned long *)ctx[SBI_SUSP_CSRS_IDX];
1304 	csrs[SBI_CSR_SSTATUS_IDX] = csr_read(CSR_SSTATUS);
1305 	csrs[SBI_CSR_SIE_IDX] = csr_read(CSR_SIE);
1306 	csrs[SBI_CSR_STVEC_IDX] = csr_read(CSR_STVEC);
1307 	csrs[SBI_CSR_SSCRATCH_IDX] = csr_read(CSR_SSCRATCH);
1308 	csrs[SBI_CSR_SATP_IDX] = csr_read(CSR_SATP);
1309 
1310 	memset(params, 0, sizeof(*params));
1311 	params->sleep_type = 0; /* suspend-to-ram */
1312 	params->resume_addr = virt_to_phys(sbi_susp_resume);
1313 	params->opaque = virt_to_phys(ctx);
1314 	params->returns = false;
1315 
1316 	cpumask_copy(&mask, &cpu_present_mask);
1317 	cpumask_clear_cpu(me, &mask);
1318 	on_cpumask_async(&mask, stop_cpu, NULL);
1319 
1320 	/* Wait up to 1s for all harts to stop */
1321 	for (int i = 0; i < 100; i++) {
1322 		int count = 1;
1323 
1324 		udelay(10000);
1325 
1326 		for_each_present_cpu(cpu) {
1327 			if (cpu == me)
1328 				continue;
1329 			ret = sbi_hart_get_status(cpus[cpu].hartid);
1330 			if (!ret.error && ret.value == SBI_EXT_HSM_STOPPED)
1331 				++count;
1332 		}
1333 		if (count == cpumask_weight(&cpu_present_mask))
1334 			break;
1335 	}
1336 
1337 	for_each_present_cpu(cpu) {
1338 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1339 		if (cpu == me) {
1340 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STARTED,
1341 				   "cpu%d is not started", cpu);
1342 		} else {
1343 			assert_msg(!ret.error && ret.value == SBI_EXT_HSM_STOPPED,
1344 				   "cpu%d is not stopped", cpu);
1345 		}
1346 	}
1347 
1348 	return true;
1349 }
1350 
susp_basic_check(unsigned long ctx[],struct susp_params * params)1351 static void susp_basic_check(unsigned long ctx[], struct susp_params *params)
1352 {
1353 	if (ctx[SBI_SUSP_RESULTS_IDX] == SBI_SUSP_TEST_MASK) {
1354 		report_pass("suspend and resume");
1355 	} else {
1356 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SATP))
1357 			report_fail("SATP set to zero on resume");
1358 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_SIE))
1359 			report_fail("sstatus.SIE clear on resume");
1360 		if (!(ctx[SBI_SUSP_RESULTS_IDX] & SBI_SUSP_TEST_HARTID))
1361 			report_fail("a0 is hartid on resume");
1362 	}
1363 }
1364 
susp_type_prep(unsigned long ctx[],struct susp_params * params)1365 static bool susp_type_prep(unsigned long ctx[], struct susp_params *params)
1366 {
1367 	bool r;
1368 
1369 	r = susp_basic_prep(ctx, params);
1370 	assert(r);
1371 	params->sleep_type = 1;
1372 	params->returns = true;
1373 	params->ret.error = SBI_ERR_INVALID_PARAM;
1374 
1375 	return true;
1376 }
1377 
1378 #if __riscv_xlen != 32
susp_type_prep2(unsigned long ctx[],struct susp_params * params)1379 static bool susp_type_prep2(unsigned long ctx[], struct susp_params *params)
1380 {
1381 	bool r;
1382 
1383 	r = susp_basic_prep(ctx, params);
1384 	assert(r);
1385 	params->sleep_type = BIT(32);
1386 
1387 	return true;
1388 }
1389 #endif
1390 
susp_badaddr_prep(unsigned long ctx[],struct susp_params * params)1391 static bool susp_badaddr_prep(unsigned long ctx[], struct susp_params *params)
1392 {
1393 	phys_addr_t badaddr;
1394 	bool r;
1395 
1396 	if (!get_invalid_addr(&badaddr, false))
1397 		return false;
1398 
1399 	r = susp_basic_prep(ctx, params);
1400 	assert(r);
1401 	params->resume_addr = badaddr;
1402 	params->returns = true;
1403 	params->ret.error = SBI_ERR_INVALID_ADDRESS;
1404 
1405 	return true;
1406 }
1407 
susp_one_prep(unsigned long ctx[],struct susp_params * params)1408 static bool susp_one_prep(unsigned long ctx[], struct susp_params *params)
1409 {
1410 	int started = 0, cpu, me = smp_processor_id();
1411 	struct sbiret ret;
1412 	bool r;
1413 
1414 	if (cpumask_weight(&cpu_present_mask) < 2) {
1415 		report_skip("At least 2 cpus required");
1416 		return false;
1417 	}
1418 
1419 	r = susp_basic_prep(ctx, params);
1420 	assert(r);
1421 	params->returns = true;
1422 	params->ret.error = SBI_ERR_DENIED;
1423 
1424 	for_each_present_cpu(cpu) {
1425 		if (cpu == me)
1426 			continue;
1427 		break;
1428 	}
1429 
1430 	on_cpu(cpu, start_cpu, NULL);
1431 
1432 	for_each_present_cpu(cpu) {
1433 		ret = sbi_hart_get_status(cpus[cpu].hartid);
1434 		assert_msg(!ret.error, "HSM get status failed for cpu%d", cpu);
1435 		if (ret.value == SBI_EXT_HSM_STARTED)
1436 			started++;
1437 	}
1438 
1439 	assert(started == 2);
1440 
1441 	return true;
1442 }
1443 
check_susp(void)1444 static void check_susp(void)
1445 {
1446 	unsigned long csrs[SBI_CSR_NR_IDX];
1447 	unsigned long ctx[SBI_SUSP_NR_IDX] = {
1448 		[SBI_SUSP_MAGIC_IDX] = SBI_SUSP_MAGIC,
1449 		[SBI_SUSP_CSRS_IDX] = (unsigned long)csrs,
1450 		[SBI_SUSP_HARTID_IDX] = current_thread_info()->hartid,
1451 	};
1452 	enum {
1453 #define SUSP_FIRST_TESTNUM 1
1454 		SUSP_BASIC = SUSP_FIRST_TESTNUM,
1455 		SUSP_TYPE,
1456 		SUSP_TYPE2,
1457 		SUSP_BAD_ADDR,
1458 		SUSP_ONE_ONLINE,
1459 		NR_SUSP_TESTS,
1460 	};
1461 	struct susp_test {
1462 		const char *name;
1463 		bool (*prep)(unsigned long ctx[], struct susp_params *params);
1464 		void (*check)(unsigned long ctx[], struct susp_params *params);
1465 	} susp_tests[] = {
1466 		[SUSP_BASIC]		= { "basic",			susp_basic_prep,	susp_basic_check,	},
1467 		[SUSP_TYPE]		= { "sleep_type",		susp_type_prep,					},
1468 #if __riscv_xlen != 32
1469 		[SUSP_TYPE2]		= { "sleep_type upper bits",	susp_type_prep2,	susp_basic_check	},
1470 #endif
1471 		[SUSP_BAD_ADDR]		= { "bad addr",			susp_badaddr_prep,				},
1472 		[SUSP_ONE_ONLINE]	= { "one cpu online",		susp_one_prep,					},
1473 	};
1474 	struct susp_params params;
1475 	struct sbiret ret;
1476 	int testnum, i;
1477 
1478 	report_prefix_push("susp");
1479 
1480 	if (!sbi_probe(SBI_EXT_SUSP)) {
1481 		report_skip("SUSP extension not available");
1482 		report_prefix_pop();
1483 		return;
1484 	}
1485 
1486 	sbi_bad_fid(SBI_EXT_SUSP);
1487 
1488 	timer_setup(susp_timer);
1489 	local_irq_enable();
1490 	timer_start(SBI_SUSP_TIMER_DURATION_US);
1491 
1492 	ret = sbi_ecall(SBI_EXT_SUSP, 1, 0, 0, 0, 0, 0, 0);
1493 	report(ret.error == SBI_ERR_NOT_SUPPORTED, "funcid != 0 not supported");
1494 
1495 	for (i = SUSP_FIRST_TESTNUM; i < NR_SUSP_TESTS; i++) {
1496 		if (!susp_tests[i].name)
1497 			continue;
1498 
1499 		report_prefix_push(susp_tests[i].name);
1500 
1501 		ctx[SBI_SUSP_TESTNUM_IDX] = i;
1502 		ctx[SBI_SUSP_RESULTS_IDX] = 0;
1503 
1504 		local_irq_disable();
1505 
1506 		assert(susp_tests[i].prep);
1507 		if (!susp_tests[i].prep(ctx, &params)) {
1508 			report_prefix_pop();
1509 			continue;
1510 		}
1511 
1512 		if ((testnum = setjmp(sbi_susp_jmp)) == 0) {
1513 			ret = sbi_system_suspend_raw(params.sleep_type, params.resume_addr, params.opaque);
1514 
1515 			local_irq_enable();
1516 
1517 			if (!params.returns && ret.error == SBI_ERR_NOT_SUPPORTED) {
1518 				report_fail("probing claims support, but it's not?");
1519 				report_prefix_pop();
1520 				goto out;
1521 			} else if (!params.returns) {
1522 				report_fail("unexpected return with error: %ld, value: %ld", ret.error, ret.value);
1523 			} else {
1524 				if (!report(ret.error == params.ret.error, "got expected sbi.error (%ld)", params.ret.error))
1525 					report_info("expected sbi.error %ld, received %ld", params.ret.error, ret.error);
1526 			}
1527 
1528 			report_prefix_pop();
1529 			continue;
1530 		}
1531 		assert(testnum == i);
1532 
1533 		local_irq_enable();
1534 
1535 		if (susp_tests[i].check)
1536 			susp_tests[i].check(ctx, &params);
1537 
1538 		report_prefix_pop();
1539 	}
1540 
1541 out:
1542 	local_irq_disable();
1543 	timer_teardown();
1544 
1545 	report_prefix_pop();
1546 }
1547 
main(int argc,char ** argv)1548 int main(int argc, char **argv)
1549 {
1550 	if (argc > 1 && !strcmp(argv[1], "-h")) {
1551 		help();
1552 		exit(0);
1553 	}
1554 
1555 	report_prefix_push("sbi");
1556 	check_base();
1557 	check_time();
1558 	check_ipi();
1559 	check_hsm();
1560 	check_dbcn();
1561 	check_susp();
1562 	check_sse();
1563 	check_fwft();
1564 
1565 	return report_summary();
1566 }
1567