xref: /kvm-unit-tests/riscv/sbi-sse.c (revision 695740795adee59b48599e2f1a6bf19866a77779)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SBI SSE testsuite
4  *
5  * Copyright (C) 2025, Rivos Inc., Clément Léger <cleger@rivosinc.com>
6  */
7 #include <alloc.h>
8 #include <alloc_page.h>
9 #include <bitops.h>
10 #include <cpumask.h>
11 #include <libcflat.h>
12 #include <on-cpus.h>
13 #include <stdlib.h>
14 
15 #include <asm/barrier.h>
16 #include <asm/delay.h>
17 #include <asm/io.h>
18 #include <asm/page.h>
19 #include <asm/processor.h>
20 #include <asm/sbi.h>
21 #include <asm/setup.h>
22 #include <asm/timer.h>
23 
24 #include "sbi-tests.h"
25 
26 #define SSE_STACK_SIZE	PAGE_SIZE
27 
28 struct sse_event_info {
29 	uint32_t event_id;
30 	const char *name;
31 	bool can_inject;
32 };
33 
34 static struct sse_event_info sse_event_infos[] = {
35 	{
36 		.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS,
37 		.name = "local_high_prio_ras",
38 	},
39 	{
40 		.event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP,
41 		.name = "double_trap",
42 	},
43 	{
44 		.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS,
45 		.name = "global_high_prio_ras",
46 	},
47 	{
48 		.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW,
49 		.name = "local_pmu_overflow",
50 	},
51 	{
52 		.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS,
53 		.name = "local_low_prio_ras",
54 	},
55 	{
56 		.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS,
57 		.name = "global_low_prio_ras",
58 	},
59 	{
60 		.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE,
61 		.name = "local_software",
62 	},
63 	{
64 		.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE,
65 		.name = "global_software",
66 	},
67 };
68 
69 static const char *const attr_names[] = {
70 	[SBI_SSE_ATTR_STATUS] =			"status",
71 	[SBI_SSE_ATTR_PRIORITY] =		"priority",
72 	[SBI_SSE_ATTR_CONFIG] =			"config",
73 	[SBI_SSE_ATTR_PREFERRED_HART] =		"preferred_hart",
74 	[SBI_SSE_ATTR_ENTRY_PC] =		"entry_pc",
75 	[SBI_SSE_ATTR_ENTRY_ARG] =		"entry_arg",
76 	[SBI_SSE_ATTR_INTERRUPTED_SEPC] =	"interrupted_sepc",
77 	[SBI_SSE_ATTR_INTERRUPTED_FLAGS] =	"interrupted_flags",
78 	[SBI_SSE_ATTR_INTERRUPTED_A6] =		"interrupted_a6",
79 	[SBI_SSE_ATTR_INTERRUPTED_A7] =		"interrupted_a7",
80 };
81 
82 static const unsigned long ro_attrs[] = {
83 	SBI_SSE_ATTR_STATUS,
84 	SBI_SSE_ATTR_ENTRY_PC,
85 	SBI_SSE_ATTR_ENTRY_ARG,
86 };
87 
88 static const unsigned long interrupted_attrs[] = {
89 	SBI_SSE_ATTR_INTERRUPTED_SEPC,
90 	SBI_SSE_ATTR_INTERRUPTED_FLAGS,
91 	SBI_SSE_ATTR_INTERRUPTED_A6,
92 	SBI_SSE_ATTR_INTERRUPTED_A7,
93 };
94 
95 static const unsigned long interrupted_flags[] = {
96 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP,
97 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE,
98 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP,
99 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT,
100 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV,
101 	SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP,
102 };
103 
sse_event_get_info(uint32_t event_id)104 static struct sse_event_info *sse_event_get_info(uint32_t event_id)
105 {
106 	int i;
107 
108 	for (i = 0; i < ARRAY_SIZE(sse_event_infos); i++) {
109 		if (sse_event_infos[i].event_id == event_id)
110 			return &sse_event_infos[i];
111 	}
112 
113 	assert_msg(false, "Invalid event id: %d", event_id);
114 }
115 
sse_event_name(uint32_t event_id)116 static const char *sse_event_name(uint32_t event_id)
117 {
118 	return sse_event_get_info(event_id)->name;
119 }
120 
sse_event_can_inject(uint32_t event_id)121 static bool sse_event_can_inject(uint32_t event_id)
122 {
123 	return sse_event_get_info(event_id)->can_inject;
124 }
125 
sse_get_event_status_field(uint32_t event_id,unsigned long mask,unsigned long shift,unsigned long * value)126 static struct sbiret sse_get_event_status_field(uint32_t event_id, unsigned long mask,
127 						unsigned long shift, unsigned long *value)
128 {
129 	struct sbiret ret;
130 	unsigned long status;
131 
132 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, 1, &status);
133 	if (ret.error) {
134 		sbiret_report_error(&ret, SBI_SUCCESS, "Get event status");
135 		return ret;
136 	}
137 
138 	*value = (status & mask) >> shift;
139 
140 	return ret;
141 }
142 
sse_event_get_state(uint32_t event_id,enum sbi_sse_state * state)143 static struct sbiret sse_event_get_state(uint32_t event_id, enum sbi_sse_state *state)
144 {
145 	unsigned long status = 0;
146 	struct sbiret ret;
147 
148 	ret = sse_get_event_status_field(event_id, SBI_SSE_ATTR_STATUS_STATE_MASK,
149 					  SBI_SSE_ATTR_STATUS_STATE_OFFSET, &status);
150 	*state = status;
151 
152 	return ret;
153 }
154 
sse_global_event_set_current_hart(uint32_t event_id)155 static unsigned long sse_global_event_set_current_hart(uint32_t event_id)
156 {
157 	struct sbiret ret;
158 	unsigned long current_hart = current_thread_info()->hartid;
159 
160 	assert(sbi_sse_event_is_global(event_id));
161 
162 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &current_hart);
163 	if (sbiret_report_error(&ret, SBI_SUCCESS, "Set preferred hart"))
164 		return ret.error;
165 
166 	return 0;
167 }
168 
sse_check_state(uint32_t event_id,unsigned long expected_state)169 static bool sse_check_state(uint32_t event_id, unsigned long expected_state)
170 {
171 	struct sbiret ret;
172 	enum sbi_sse_state state;
173 
174 	ret = sse_event_get_state(event_id, &state);
175 	if (ret.error)
176 		return false;
177 
178 	return report(state == expected_state, "event status == %ld", expected_state);
179 }
180 
sse_event_pending(uint32_t event_id)181 static bool sse_event_pending(uint32_t event_id)
182 {
183 	bool pending = 0;
184 
185 	sse_get_event_status_field(event_id, BIT(SBI_SSE_ATTR_STATUS_PENDING_OFFSET),
186 		SBI_SSE_ATTR_STATUS_PENDING_OFFSET, (unsigned long *)&pending);
187 
188 	return pending;
189 }
190 
sse_alloc_stack(void)191 static void *sse_alloc_stack(void)
192 {
193 	/*
194 	 * We assume that SSE_STACK_SIZE always fit in one page. This page will
195 	 * always be decremented before storing anything on it in sse-entry.S.
196 	 */
197 	assert(SSE_STACK_SIZE <= PAGE_SIZE);
198 
199 	return (alloc_page() + SSE_STACK_SIZE);
200 }
201 
sse_free_stack(void * stack)202 static void sse_free_stack(void *stack)
203 {
204 	free_page(stack - SSE_STACK_SIZE);
205 }
206 
sse_read_write_test(uint32_t event_id,unsigned long attr,unsigned long attr_count,unsigned long * value,long expected_error,const char * str)207 static void sse_read_write_test(uint32_t event_id, unsigned long attr, unsigned long attr_count,
208 				unsigned long *value, long expected_error, const char *str)
209 {
210 	struct sbiret ret;
211 
212 	ret = sbi_sse_read_attrs(event_id, attr, attr_count, value);
213 	sbiret_report_error(&ret, expected_error, "Read %s error", str);
214 
215 	ret = sbi_sse_write_attrs(event_id, attr, attr_count, value);
216 	sbiret_report_error(&ret, expected_error, "Write %s error", str);
217 }
218 
219 #define ALL_ATTRS_COUNT	(SBI_SSE_ATTR_INTERRUPTED_A7 + 1)
220 
sse_test_attrs(uint32_t event_id)221 static void sse_test_attrs(uint32_t event_id)
222 {
223 	unsigned long value = 0;
224 	struct sbiret ret;
225 	void *ptr;
226 	unsigned long values[ALL_ATTRS_COUNT];
227 	unsigned int i;
228 	const char *invalid_hart_str;
229 	const char *attr_name;
230 
231 	report_prefix_push("attrs");
232 
233 	for (i = 0; i < ARRAY_SIZE(ro_attrs); i++) {
234 		ret = sbi_sse_write_attrs(event_id, ro_attrs[i], 1, &value);
235 		sbiret_report_error(&ret, SBI_ERR_DENIED, "RO attribute %s not writable",
236 				    attr_names[ro_attrs[i]]);
237 	}
238 
239 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, ALL_ATTRS_COUNT, values);
240 	sbiret_report_error(&ret, SBI_SUCCESS, "Read multiple attributes");
241 
242 	for (i = SBI_SSE_ATTR_STATUS; i <= SBI_SSE_ATTR_INTERRUPTED_A7; i++) {
243 		ret = sbi_sse_read_attrs(event_id, i, 1, &value);
244 		attr_name = attr_names[i];
245 
246 		sbiret_report_error(&ret, SBI_SUCCESS, "Read single attribute %s", attr_name);
247 		if (values[i] != value)
248 			report_fail("Attribute 0x%x single value read (0x%lx) differs from the one read with multiple attributes (0x%lx)",
249 				    i, value, values[i]);
250 		/*
251 		 * Preferred hart reset value is defined by SBI vendor
252 		 */
253 		if (i != SBI_SSE_ATTR_PREFERRED_HART) {
254 			/*
255 			 * Specification states that injectable bit is implementation dependent
256 			 * but other bits are zero-initialized.
257 			 */
258 			if (i == SBI_SSE_ATTR_STATUS)
259 				value &= ~BIT(SBI_SSE_ATTR_STATUS_INJECT_OFFSET);
260 			report(value == 0, "Attribute %s reset value is 0, found %lx", attr_name, value);
261 		}
262 	}
263 
264 #if __riscv_xlen > 32
265 	value = BIT(32);
266 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value);
267 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Write invalid prio > 0xFFFFFFFF error");
268 #endif
269 
270 	value = ~SBI_SSE_ATTR_CONFIG_ONESHOT;
271 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value);
272 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Write invalid config value error");
273 
274 	if (sbi_sse_event_is_global(event_id)) {
275 		invalid_hart_str = getenv("INVALID_HART_ID");
276 		if (!invalid_hart_str)
277 			value = 0xFFFFFFFFUL;
278 		else
279 			value = strtoul(invalid_hart_str, NULL, 0);
280 
281 		ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value);
282 		sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid hart id error");
283 	} else {
284 		/* Set Hart on local event -> RO */
285 		value = current_thread_info()->hartid;
286 		ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value);
287 		sbiret_report_error(&ret, SBI_ERR_DENIED,
288 				    "Set hart id on local event error");
289 	}
290 
291 	/* Set/get flags, sepc, a6, a7 */
292 	for (i = 0; i < ARRAY_SIZE(interrupted_attrs); i++) {
293 		attr_name = attr_names[interrupted_attrs[i]];
294 		ret = sbi_sse_read_attrs(event_id, interrupted_attrs[i], 1, &value);
295 		sbiret_report_error(&ret, SBI_SUCCESS, "Get interrupted %s", attr_name);
296 
297 		value = ARRAY_SIZE(interrupted_attrs) - i;
298 		ret = sbi_sse_write_attrs(event_id, interrupted_attrs[i], 1, &value);
299 		sbiret_report_error(&ret, SBI_ERR_INVALID_STATE,
300 				    "Set attribute %s invalid state error", attr_name);
301 	}
302 
303 	sse_read_write_test(event_id, SBI_SSE_ATTR_STATUS, 0, &value, SBI_ERR_INVALID_PARAM,
304 			    "attribute attr_count == 0");
305 	sse_read_write_test(event_id, SBI_SSE_ATTR_INTERRUPTED_A7 + 1, 1, &value, SBI_ERR_BAD_RANGE,
306 			    "invalid attribute");
307 
308 	/* Misaligned pointer address */
309 	ptr = (void *)&value;
310 	ptr += 1;
311 	sse_read_write_test(event_id, SBI_SSE_ATTR_STATUS, 1, ptr, SBI_ERR_INVALID_ADDRESS,
312 		"attribute with invalid address");
313 
314 	report_prefix_pop();
315 }
316 
sse_test_register_error(uint32_t event_id)317 static void sse_test_register_error(uint32_t event_id)
318 {
319 	struct sbiret ret;
320 
321 	report_prefix_push("register");
322 
323 	ret = sbi_sse_unregister(event_id);
324 	sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "unregister non-registered event");
325 
326 	ret = sbi_sse_register_raw(event_id, 0x1, 0);
327 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "register misaligned entry");
328 
329 	ret = sbi_sse_register(event_id, NULL);
330 	sbiret_report_error(&ret, SBI_SUCCESS, "register");
331 	if (ret.error)
332 		goto done;
333 
334 	ret = sbi_sse_register(event_id, NULL);
335 	sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "register used event failure");
336 
337 	ret = sbi_sse_unregister(event_id);
338 	sbiret_report_error(&ret, SBI_SUCCESS, "unregister");
339 
340 done:
341 	report_prefix_pop();
342 }
343 
344 struct sse_simple_test_arg {
345 	bool done;
346 	unsigned long expected_a6;
347 	uint32_t event_id;
348 };
349 
350 #if __riscv_xlen > 32
351 
352 struct alias_test_params {
353 	unsigned long event_id;
354 	unsigned long attr_id;
355 	unsigned long attr_count;
356 	const char *str;
357 };
358 
test_alias(uint32_t event_id)359 static void test_alias(uint32_t event_id)
360 {
361 	struct alias_test_params *write, *read;
362 	unsigned long write_value, read_value;
363 	struct sbiret ret;
364 	bool err = false;
365 	int r, w;
366 	struct alias_test_params params[] = {
367 		{event_id, SBI_SSE_ATTR_INTERRUPTED_A6, 1, "non aliased"},
368 		{BIT(32) + event_id, SBI_SSE_ATTR_INTERRUPTED_A6, 1, "aliased event_id"},
369 		{event_id, BIT(32) + SBI_SSE_ATTR_INTERRUPTED_A6, 1, "aliased attr_id"},
370 		{event_id, SBI_SSE_ATTR_INTERRUPTED_A6, BIT(32) + 1, "aliased attr_count"},
371 	};
372 
373 	report_prefix_push("alias");
374 	for (w = 0; w < ARRAY_SIZE(params); w++) {
375 		write = &params[w];
376 
377 		write_value = 0xDEADBEEF + w;
378 		ret = sbi_sse_write_attrs(write->event_id, write->attr_id, write->attr_count, &write_value);
379 		if (ret.error)
380 			sbiret_report_error(&ret, SBI_SUCCESS, "Write %s, event 0x%lx attr 0x%lx, attr count 0x%lx",
381 					    write->str, write->event_id, write->attr_id, write->attr_count);
382 
383 		for (r = 0; r < ARRAY_SIZE(params); r++) {
384 			read = &params[r];
385 			read_value = 0;
386 			ret = sbi_sse_read_attrs(read->event_id, read->attr_id, read->attr_count, &read_value);
387 			if (ret.error)
388 				sbiret_report_error(&ret, SBI_SUCCESS,
389 						    "Read %s, event 0x%lx attr 0x%lx, attr count 0x%lx",
390 						    read->str, read->event_id, read->attr_id, read->attr_count);
391 
392 			/* Do not spam output with a lot of reports */
393 			if (write_value != read_value) {
394 				err = true;
395 				report_fail("Write %s, event 0x%lx attr 0x%lx, attr count 0x%lx value %lx =="
396 					    "Read %s, event 0x%lx attr 0x%lx, attr count 0x%lx value %lx",
397 					    write->str, write->event_id, write->attr_id,
398 					    write->attr_count, write_value, read->str,
399 					    read->event_id, read->attr_id, read->attr_count,
400 					    read_value);
401 			}
402 		}
403 	}
404 
405 	report(!err, "BIT(32) aliasing tests");
406 	report_prefix_pop();
407 }
408 #endif
409 
sse_simple_handler(void * data,struct pt_regs * regs,unsigned int hartid)410 static void sse_simple_handler(void *data, struct pt_regs *regs, unsigned int hartid)
411 {
412 	struct sse_simple_test_arg *arg = data;
413 	int i;
414 	struct sbiret ret;
415 	const char *attr_name;
416 	uint32_t event_id = READ_ONCE(arg->event_id), attr;
417 	unsigned long value, prev_value, flags;
418 	unsigned long interrupted_state[ARRAY_SIZE(interrupted_attrs)];
419 	unsigned long modified_state[ARRAY_SIZE(interrupted_attrs)] = {4, 3, 2, 1};
420 	unsigned long tmp_state[ARRAY_SIZE(interrupted_attrs)];
421 
422 	report((regs->status & SR_SPP) == SR_SPP, "Interrupted S-mode");
423 	report(hartid == current_thread_info()->hartid, "Hartid correctly passed");
424 	sse_check_state(event_id, SBI_SSE_STATE_RUNNING);
425 	report(!sse_event_pending(event_id), "Event not pending");
426 
427 	/* Read full interrupted state */
428 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC,
429 				 ARRAY_SIZE(interrupted_attrs), interrupted_state);
430 	sbiret_report_error(&ret, SBI_SUCCESS, "Save full interrupted state from handler");
431 
432 	/* Write full modified state and read it */
433 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC,
434 				  ARRAY_SIZE(modified_state), modified_state);
435 	sbiret_report_error(&ret, SBI_SUCCESS,
436 			    "Write full interrupted state from handler");
437 
438 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC,
439 				ARRAY_SIZE(tmp_state), tmp_state);
440 	sbiret_report_error(&ret, SBI_SUCCESS, "Read full modified state from handler");
441 
442 	report(memcmp(tmp_state, modified_state, sizeof(modified_state)) == 0,
443 	       "Full interrupted state successfully written");
444 
445 #if __riscv_xlen > 32
446 	test_alias(event_id);
447 #endif
448 
449 	/* Restore full saved state */
450 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC,
451 				  ARRAY_SIZE(interrupted_attrs), interrupted_state);
452 	sbiret_report_error(&ret, SBI_SUCCESS, "Full interrupted state restore from handler");
453 
454 	/* We test SBI_SSE_ATTR_INTERRUPTED_FLAGS below with specific flag values */
455 	for (i = 0; i < ARRAY_SIZE(interrupted_attrs); i++) {
456 		attr = interrupted_attrs[i];
457 		if (attr == SBI_SSE_ATTR_INTERRUPTED_FLAGS)
458 			continue;
459 
460 		attr_name = attr_names[attr];
461 
462 		ret = sbi_sse_read_attrs(event_id, attr, 1, &prev_value);
463 		sbiret_report_error(&ret, SBI_SUCCESS, "Get attr %s", attr_name);
464 
465 		value = 0xDEADBEEF + i;
466 		ret = sbi_sse_write_attrs(event_id, attr, 1, &value);
467 		sbiret_report_error(&ret, SBI_SUCCESS, "Set attr %s", attr_name);
468 
469 		ret = sbi_sse_read_attrs(event_id, attr, 1, &value);
470 		sbiret_report_error(&ret, SBI_SUCCESS, "Get attr %s", attr_name);
471 		report(value == 0xDEADBEEF + i, "Get attr %s, value: 0x%lx", attr_name, value);
472 
473 		ret = sbi_sse_write_attrs(event_id, attr, 1, &prev_value);
474 		sbiret_report_error(&ret, SBI_SUCCESS, "Restore attr %s value", attr_name);
475 	}
476 
477 	/* Test all flags allowed for SBI_SSE_ATTR_INTERRUPTED_FLAGS */
478 	attr = SBI_SSE_ATTR_INTERRUPTED_FLAGS;
479 	ret = sbi_sse_read_attrs(event_id, attr, 1, &prev_value);
480 	sbiret_report_error(&ret, SBI_SUCCESS, "Save interrupted flags");
481 
482 	for (i = 0; i < ARRAY_SIZE(interrupted_flags); i++) {
483 		flags = interrupted_flags[i];
484 		ret = sbi_sse_write_attrs(event_id, attr, 1, &flags);
485 		sbiret_report_error(&ret, SBI_SUCCESS,
486 				    "Set interrupted flags bit 0x%lx value", flags);
487 		ret = sbi_sse_read_attrs(event_id, attr, 1, &value);
488 		sbiret_report_error(&ret, SBI_SUCCESS, "Get interrupted flags after set");
489 		report(value == flags, "interrupted flags modified value: 0x%lx", value);
490 	}
491 
492 	/* Write invalid bit in flag register */
493 	flags = SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT << 1;
494 	ret = sbi_sse_write_attrs(event_id, attr, 1, &flags);
495 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid flags bit 0x%lx value error",
496 			    flags);
497 
498 #if __riscv_xlen > 32
499 	flags = BIT(32);
500 	ret = sbi_sse_write_attrs(event_id, attr, 1, &flags);
501 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid flags bit 0x%lx value error",
502 			    flags);
503 #endif
504 
505 	ret = sbi_sse_write_attrs(event_id, attr, 1, &prev_value);
506 	sbiret_report_error(&ret, SBI_SUCCESS, "Restore interrupted flags");
507 
508 	/* Try to change HARTID/Priority while running */
509 	if (sbi_sse_event_is_global(event_id)) {
510 		value = current_thread_info()->hartid;
511 		ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value);
512 		sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "Set hart id while running error");
513 	}
514 
515 	value = 0;
516 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value);
517 	sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "Set priority while running error");
518 
519 	value = READ_ONCE(arg->expected_a6);
520 	report(interrupted_state[2] == value, "Interrupted state a6, expected 0x%lx, got 0x%lx",
521 	       value, interrupted_state[2]);
522 
523 	report(interrupted_state[3] == SBI_EXT_SSE,
524 	       "Interrupted state a7, expected 0x%x, got 0x%lx", SBI_EXT_SSE,
525 	       interrupted_state[3]);
526 
527 	WRITE_ONCE(arg->done, true);
528 }
529 
sse_test_inject_simple(uint32_t event_id)530 static void sse_test_inject_simple(uint32_t event_id)
531 {
532 	unsigned long value, error;
533 	struct sbiret ret;
534 	enum sbi_sse_state state = SBI_SSE_STATE_UNUSED;
535 	struct sse_simple_test_arg test_arg = {.event_id = event_id};
536 	struct sbi_sse_handler_arg args = {
537 		.handler = sse_simple_handler,
538 		.handler_data = (void *)&test_arg,
539 		.stack = sse_alloc_stack(),
540 	};
541 
542 	report_prefix_push("simple");
543 
544 	if (!sse_check_state(event_id, SBI_SSE_STATE_UNUSED))
545 		goto cleanup;
546 
547 	ret = sbi_sse_register(event_id, &args);
548 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "register"))
549 		goto cleanup;
550 
551 	state = SBI_SSE_STATE_REGISTERED;
552 
553 	if (!sse_check_state(event_id, SBI_SSE_STATE_REGISTERED))
554 		goto cleanup;
555 
556 	if (sbi_sse_event_is_global(event_id)) {
557 		/* Be sure global events are targeting the current hart */
558 		error = sse_global_event_set_current_hart(event_id);
559 		if (error)
560 			goto cleanup;
561 	}
562 
563 	ret = sbi_sse_enable(event_id);
564 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "enable"))
565 		goto cleanup;
566 
567 	state = SBI_SSE_STATE_ENABLED;
568 	if (!sse_check_state(event_id, SBI_SSE_STATE_ENABLED))
569 		goto cleanup;
570 
571 	ret = sbi_sse_hart_mask();
572 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "hart mask"))
573 		goto cleanup;
574 
575 	ret = sbi_sse_inject(event_id, current_thread_info()->hartid);
576 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "injection masked")) {
577 		sbi_sse_hart_unmask();
578 		goto cleanup;
579 	}
580 
581 	report(READ_ONCE(test_arg.done) == 0, "event masked not handled");
582 
583 	/*
584 	 * When unmasking the SSE events, we expect it to be injected
585 	 * immediately so a6 should be SBI_EXT_SBI_SSE_HART_UNMASK
586 	 */
587 	WRITE_ONCE(test_arg.expected_a6, SBI_EXT_SSE_HART_UNMASK);
588 	ret = sbi_sse_hart_unmask();
589 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "hart unmask"))
590 		goto cleanup;
591 
592 	report(READ_ONCE(test_arg.done) == 1, "event unmasked handled");
593 	WRITE_ONCE(test_arg.done, 0);
594 	WRITE_ONCE(test_arg.expected_a6, SBI_EXT_SSE_INJECT);
595 
596 	/* Set as oneshot and verify it is disabled */
597 	ret = sbi_sse_disable(event_id);
598 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Disable event")) {
599 		/* Nothing we can really do here, event can not be disabled */
600 		goto cleanup;
601 	}
602 	state = SBI_SSE_STATE_REGISTERED;
603 
604 	value = SBI_SSE_ATTR_CONFIG_ONESHOT;
605 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value);
606 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Set event attribute as ONESHOT"))
607 		goto cleanup;
608 
609 	ret = sbi_sse_enable(event_id);
610 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Enable event"))
611 		goto cleanup;
612 	state = SBI_SSE_STATE_ENABLED;
613 
614 	ret = sbi_sse_inject(event_id, current_thread_info()->hartid);
615 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "second injection"))
616 		goto cleanup;
617 
618 	report(READ_ONCE(test_arg.done) == 1, "event handled");
619 	WRITE_ONCE(test_arg.done, 0);
620 
621 	if (!sse_check_state(event_id, SBI_SSE_STATE_REGISTERED))
622 		goto cleanup;
623 	state = SBI_SSE_STATE_REGISTERED;
624 
625 	/* Clear ONESHOT FLAG */
626 	value = 0;
627 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value);
628 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Clear CONFIG.ONESHOT flag"))
629 		goto cleanup;
630 
631 	ret = sbi_sse_unregister(event_id);
632 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "unregister"))
633 		goto cleanup;
634 	state = SBI_SSE_STATE_UNUSED;
635 
636 	sse_check_state(event_id, SBI_SSE_STATE_UNUSED);
637 
638 cleanup:
639 	switch (state) {
640 	case SBI_SSE_STATE_ENABLED:
641 		ret = sbi_sse_disable(event_id);
642 		if (ret.error) {
643 			sbiret_report_error(&ret, SBI_SUCCESS, "disable event 0x%x", event_id);
644 			break;
645 		}
646 	case SBI_SSE_STATE_REGISTERED:
647 		sbi_sse_unregister(event_id);
648 		if (ret.error)
649 			sbiret_report_error(&ret, SBI_SUCCESS, "unregister event 0x%x", event_id);
650 	default:
651 		break;
652 	}
653 
654 	sse_free_stack(args.stack);
655 	report_prefix_pop();
656 }
657 
658 struct sse_foreign_cpu_test_arg {
659 	bool done;
660 	unsigned int expected_cpu;
661 	uint32_t event_id;
662 };
663 
sse_foreign_cpu_handler(void * data,struct pt_regs * regs,unsigned int hartid)664 static void sse_foreign_cpu_handler(void *data, struct pt_regs *regs, unsigned int hartid)
665 {
666 	struct sse_foreign_cpu_test_arg *arg = data;
667 	unsigned int expected_cpu;
668 
669 	/* For arg content to be visible */
670 	smp_rmb();
671 	expected_cpu = READ_ONCE(arg->expected_cpu);
672 	report(expected_cpu == current_thread_info()->cpu,
673 	       "Received event on CPU (%d), expected CPU (%d)", current_thread_info()->cpu,
674 	       expected_cpu);
675 
676 	WRITE_ONCE(arg->done, true);
677 	/* For arg update to be visible for other CPUs */
678 	smp_wmb();
679 }
680 
681 struct sse_local_per_cpu {
682 	struct sbi_sse_handler_arg args;
683 	struct sbiret ret;
684 	struct sse_foreign_cpu_test_arg handler_arg;
685 	enum sbi_sse_state state;
686 };
687 
sse_register_enable_local(void * data)688 static void sse_register_enable_local(void *data)
689 {
690 	struct sbiret ret;
691 	struct sse_local_per_cpu *cpu_args = data;
692 	struct sse_local_per_cpu *cpu_arg = &cpu_args[current_thread_info()->cpu];
693 	uint32_t event_id = cpu_arg->handler_arg.event_id;
694 
695 	ret = sbi_sse_register(event_id, &cpu_arg->args);
696 	WRITE_ONCE(cpu_arg->ret, ret);
697 	if (ret.error)
698 		return;
699 	cpu_arg->state = SBI_SSE_STATE_REGISTERED;
700 
701 	ret = sbi_sse_enable(event_id);
702 	WRITE_ONCE(cpu_arg->ret, ret);
703 	if (ret.error)
704 		return;
705 	cpu_arg->state = SBI_SSE_STATE_ENABLED;
706 }
707 
sbi_sse_disable_unregister_local(void * data)708 static void sbi_sse_disable_unregister_local(void *data)
709 {
710 	struct sbiret ret;
711 	struct sse_local_per_cpu *cpu_args = data;
712 	struct sse_local_per_cpu *cpu_arg = &cpu_args[current_thread_info()->cpu];
713 	uint32_t event_id = cpu_arg->handler_arg.event_id;
714 
715 	switch (cpu_arg->state) {
716 	case SBI_SSE_STATE_ENABLED:
717 		ret = sbi_sse_disable(event_id);
718 		WRITE_ONCE(cpu_arg->ret, ret);
719 		if (ret.error)
720 			return;
721 	case SBI_SSE_STATE_REGISTERED:
722 		ret = sbi_sse_unregister(event_id);
723 		WRITE_ONCE(cpu_arg->ret, ret);
724 	default:
725 		break;
726 	}
727 }
728 
sse_event_get_complete_timeout(void)729 static uint64_t sse_event_get_complete_timeout(void)
730 {
731 	char *event_complete_timeout_str;
732 	uint64_t timeout;
733 
734 	event_complete_timeout_str = getenv("SSE_EVENT_COMPLETE_TIMEOUT");
735 	if (!event_complete_timeout_str)
736 		timeout = 3000;
737 	else
738 		timeout = strtoul(event_complete_timeout_str, NULL, 0);
739 
740 	return timer_get_cycles() + usec_to_cycles(timeout);
741 }
742 
sse_test_inject_local(uint32_t event_id)743 static void sse_test_inject_local(uint32_t event_id)
744 {
745 	int cpu;
746 	uint64_t timeout;
747 	struct sbiret ret;
748 	struct sse_local_per_cpu *cpu_args, *cpu_arg;
749 	struct sse_foreign_cpu_test_arg *handler_arg;
750 
751 	cpu_args = calloc(NR_CPUS, sizeof(struct sbi_sse_handler_arg));
752 
753 	report_prefix_push("local_dispatch");
754 	for_each_online_cpu(cpu) {
755 		cpu_arg = &cpu_args[cpu];
756 		cpu_arg->handler_arg.event_id = event_id;
757 		cpu_arg->args.stack = sse_alloc_stack();
758 		cpu_arg->args.handler = sse_foreign_cpu_handler;
759 		cpu_arg->args.handler_data = (void *)&cpu_arg->handler_arg;
760 		cpu_arg->state = SBI_SSE_STATE_UNUSED;
761 	}
762 
763 	on_cpus(sse_register_enable_local, cpu_args);
764 	for_each_online_cpu(cpu) {
765 		cpu_arg = &cpu_args[cpu];
766 		ret = cpu_arg->ret;
767 		if (ret.error) {
768 			report_fail("CPU failed to register/enable event: %ld", ret.error);
769 			goto cleanup;
770 		}
771 
772 		handler_arg = &cpu_arg->handler_arg;
773 		WRITE_ONCE(handler_arg->expected_cpu, cpu);
774 		/* For handler_arg content to be visible for other CPUs */
775 		smp_wmb();
776 		ret = sbi_sse_inject(event_id, cpus[cpu].hartid);
777 		if (ret.error) {
778 			report_fail("CPU failed to inject event: %ld", ret.error);
779 			goto cleanup;
780 		}
781 	}
782 
783 	for_each_online_cpu(cpu) {
784 		handler_arg = &cpu_args[cpu].handler_arg;
785 		smp_rmb();
786 
787 		timeout = sse_event_get_complete_timeout();
788 		while (!READ_ONCE(handler_arg->done) && timer_get_cycles() < timeout) {
789 			/* For handler_arg update to be visible */
790 			smp_rmb();
791 			cpu_relax();
792 		}
793 		report(READ_ONCE(handler_arg->done), "Event handled");
794 		WRITE_ONCE(handler_arg->done, false);
795 	}
796 
797 cleanup:
798 	on_cpus(sbi_sse_disable_unregister_local, cpu_args);
799 	for_each_online_cpu(cpu) {
800 		cpu_arg = &cpu_args[cpu];
801 		ret = READ_ONCE(cpu_arg->ret);
802 		if (ret.error)
803 			report_fail("CPU failed to disable/unregister event: %ld", ret.error);
804 	}
805 
806 	for_each_online_cpu(cpu) {
807 		cpu_arg = &cpu_args[cpu];
808 		sse_free_stack(cpu_arg->args.stack);
809 	}
810 
811 	report_prefix_pop();
812 }
813 
sse_test_inject_global_cpu(uint32_t event_id,unsigned int cpu,struct sse_foreign_cpu_test_arg * test_arg)814 static void sse_test_inject_global_cpu(uint32_t event_id, unsigned int cpu,
815 				       struct sse_foreign_cpu_test_arg *test_arg)
816 {
817 	unsigned long value;
818 	struct sbiret ret;
819 	uint64_t timeout;
820 	enum sbi_sse_state state;
821 
822 	WRITE_ONCE(test_arg->expected_cpu, cpu);
823 	/* For test_arg content to be visible for other CPUs */
824 	smp_wmb();
825 	value = cpu;
826 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value);
827 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Set preferred hart"))
828 		return;
829 
830 	ret = sbi_sse_enable(event_id);
831 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Enable event"))
832 		return;
833 
834 	ret = sbi_sse_inject(event_id, cpu);
835 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Inject event"))
836 		goto disable;
837 
838 	smp_rmb();
839 	timeout = sse_event_get_complete_timeout();
840 	while (!READ_ONCE(test_arg->done) && timer_get_cycles() < timeout) {
841 		/* For shared test_arg structure */
842 		smp_rmb();
843 		cpu_relax();
844 	}
845 
846 	report(READ_ONCE(test_arg->done), "event handler called");
847 	WRITE_ONCE(test_arg->done, false);
848 
849 	timeout = sse_event_get_complete_timeout();
850 	/* Wait for event to be back in ENABLED state */
851 	do {
852 		ret = sse_event_get_state(event_id, &state);
853 		if (ret.error)
854 			goto disable;
855 		cpu_relax();
856 	} while (state != SBI_SSE_STATE_ENABLED && timer_get_cycles() < timeout);
857 
858 	report(state == SBI_SSE_STATE_ENABLED, "Event in enabled state");
859 
860 disable:
861 	ret = sbi_sse_disable(event_id);
862 	sbiret_report_error(&ret, SBI_SUCCESS, "Disable event");
863 }
864 
sse_test_inject_global(uint32_t event_id)865 static void sse_test_inject_global(uint32_t event_id)
866 {
867 	struct sbiret ret;
868 	unsigned int cpu;
869 	struct sse_foreign_cpu_test_arg test_arg = {.event_id = event_id};
870 	struct sbi_sse_handler_arg args = {
871 		.handler = sse_foreign_cpu_handler,
872 		.handler_data = (void *)&test_arg,
873 		.stack = sse_alloc_stack(),
874 	};
875 
876 	report_prefix_push("global_dispatch");
877 
878 	ret = sbi_sse_register(event_id, &args);
879 	if (!sbiret_report_error(&ret, SBI_SUCCESS, "Register event"))
880 		goto err;
881 
882 	for_each_online_cpu(cpu)
883 		sse_test_inject_global_cpu(event_id, cpu, &test_arg);
884 
885 	ret = sbi_sse_unregister(event_id);
886 	sbiret_report_error(&ret, SBI_SUCCESS, "Unregister event");
887 
888 err:
889 	sse_free_stack(args.stack);
890 	report_prefix_pop();
891 }
892 
893 struct priority_test_arg {
894 	uint32_t event_id;
895 	bool called;
896 	u32 prio;
897 	enum sbi_sse_state state; /* Used for error handling */
898 	struct priority_test_arg *next_event_arg;
899 	void (*check_func)(struct priority_test_arg *arg);
900 };
901 
sse_hi_priority_test_handler(void * arg,struct pt_regs * regs,unsigned int hartid)902 static void sse_hi_priority_test_handler(void *arg, struct pt_regs *regs,
903 					 unsigned int hartid)
904 {
905 	struct priority_test_arg *targ = arg;
906 	struct priority_test_arg *next = targ->next_event_arg;
907 
908 	targ->called = true;
909 	if (next) {
910 		sbi_sse_inject(next->event_id, current_thread_info()->hartid);
911 
912 		report(!sse_event_pending(next->event_id), "Higher priority event is not pending");
913 		report(next->called, "Higher priority event was handled");
914 	}
915 }
916 
sse_low_priority_test_handler(void * arg,struct pt_regs * regs,unsigned int hartid)917 static void sse_low_priority_test_handler(void *arg, struct pt_regs *regs,
918 					  unsigned int hartid)
919 {
920 	struct priority_test_arg *targ = arg;
921 	struct priority_test_arg *next = targ->next_event_arg;
922 
923 	targ->called = true;
924 
925 	if (next) {
926 		sbi_sse_inject(next->event_id, current_thread_info()->hartid);
927 
928 		report(sse_event_pending(next->event_id), "Lower priority event is pending");
929 		report(!next->called, "Lower priority event %s was not handled before %s",
930 		       sse_event_name(next->event_id), sse_event_name(targ->event_id));
931 	}
932 }
933 
sse_test_injection_priority_arg(struct priority_test_arg * in_args,unsigned int in_args_size,sbi_sse_handler_fn handler,const char * test_name)934 static void sse_test_injection_priority_arg(struct priority_test_arg *in_args,
935 					    unsigned int in_args_size,
936 					    sbi_sse_handler_fn handler,
937 					    const char *test_name)
938 {
939 	unsigned int i;
940 	unsigned long value, uret;
941 	struct sbiret ret;
942 	uint32_t event_id;
943 	struct priority_test_arg *arg;
944 	unsigned int args_size = 0;
945 	struct sbi_sse_handler_arg event_args[in_args_size];
946 	struct priority_test_arg *args[in_args_size];
947 	void *stack;
948 	struct sbi_sse_handler_arg *event_arg;
949 
950 	report_prefix_push(test_name);
951 
952 	for (i = 0; i < in_args_size; i++) {
953 		arg = &in_args[i];
954 		arg->state = SBI_SSE_STATE_UNUSED;
955 		event_id = arg->event_id;
956 		if (!sse_event_can_inject(event_id))
957 			continue;
958 
959 		args[args_size] = arg;
960 		args_size++;
961 		event_args->stack = 0;
962 	}
963 
964 	if (!args_size) {
965 		report_skip("No injectable events");
966 		goto skip;
967 	}
968 
969 	for (i = 0; i < args_size; i++) {
970 		arg = args[i];
971 		event_id = arg->event_id;
972 		stack = sse_alloc_stack();
973 
974 		event_arg = &event_args[i];
975 		event_arg->handler = handler;
976 		event_arg->handler_data = (void *)arg;
977 		event_arg->stack = stack;
978 
979 		if (i < (args_size - 1))
980 			arg->next_event_arg = args[i + 1];
981 		else
982 			arg->next_event_arg = NULL;
983 
984 		/* Be sure global events are targeting the current hart */
985 		if (sbi_sse_event_is_global(event_id)) {
986 			uret = sse_global_event_set_current_hart(event_id);
987 			if (uret)
988 				goto err;
989 		}
990 
991 		ret = sbi_sse_register(event_id, event_arg);
992 		if (ret.error) {
993 			sbiret_report_error(&ret, SBI_SUCCESS, "register event %s",
994 					    sse_event_name(event_id));
995 			goto err;
996 		}
997 		arg->state = SBI_SSE_STATE_REGISTERED;
998 
999 		value = arg->prio;
1000 		ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value);
1001 		if (ret.error) {
1002 			sbiret_report_error(&ret, SBI_SUCCESS, "set event %s priority",
1003 					    sse_event_name(event_id));
1004 			goto err;
1005 		}
1006 		ret = sbi_sse_enable(event_id);
1007 		if (ret.error) {
1008 			sbiret_report_error(&ret, SBI_SUCCESS, "enable event %s",
1009 					    sse_event_name(event_id));
1010 			goto err;
1011 		}
1012 		arg->state = SBI_SSE_STATE_ENABLED;
1013 	}
1014 
1015 	/* Inject first event */
1016 	ret = sbi_sse_inject(args[0]->event_id, current_thread_info()->hartid);
1017 	sbiret_report_error(&ret, SBI_SUCCESS, "injection");
1018 
1019 	/* Check that all handlers have been called */
1020 	for (i = 0; i < args_size; i++)
1021 		report(arg->called, "Event %s handler called", sse_event_name(args[i]->event_id));
1022 
1023 err:
1024 	for (i = 0; i < args_size; i++) {
1025 		arg = args[i];
1026 		event_id = arg->event_id;
1027 
1028 		switch (arg->state) {
1029 		case SBI_SSE_STATE_ENABLED:
1030 			ret = sbi_sse_disable(event_id);
1031 			if (ret.error) {
1032 				sbiret_report_error(&ret, SBI_SUCCESS, "disable event 0x%x",
1033 						    event_id);
1034 				break;
1035 			}
1036 		case SBI_SSE_STATE_REGISTERED:
1037 			sbi_sse_unregister(event_id);
1038 			if (ret.error)
1039 				sbiret_report_error(&ret, SBI_SUCCESS, "unregister event 0x%x",
1040 						event_id);
1041 		default:
1042 			break;
1043 		}
1044 
1045 		event_arg = &event_args[i];
1046 		if (event_arg->stack)
1047 			sse_free_stack(event_arg->stack);
1048 	}
1049 
1050 skip:
1051 	report_prefix_pop();
1052 }
1053 
1054 static struct priority_test_arg hi_prio_args[] = {
1055 	{.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE},
1056 	{.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE},
1057 	{.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS},
1058 	{.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS},
1059 	{.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW},
1060 	{.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS},
1061 	{.event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP},
1062 	{.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS},
1063 };
1064 
1065 static struct priority_test_arg low_prio_args[] = {
1066 	{.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS},
1067 	{.event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP},
1068 	{.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS},
1069 	{.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW},
1070 	{.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS},
1071 	{.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS},
1072 	{.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE},
1073 	{.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE},
1074 };
1075 
1076 static struct priority_test_arg prio_args[] = {
1077 	{.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE,		.prio = 5},
1078 	{.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE,		.prio = 10},
1079 	{.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS,		.prio = 12},
1080 	{.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW,		.prio = 15},
1081 	{.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS,	.prio = 20},
1082 	{.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS,		.prio = 22},
1083 	{.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS,		.prio = 25},
1084 };
1085 
1086 static struct priority_test_arg same_prio_args[] = {
1087 	{.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW,		.prio = 0},
1088 	{.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS,		.prio = 0},
1089 	{.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS,		.prio = 10},
1090 	{.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE,		.prio = 10},
1091 	{.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE,		.prio = 10},
1092 	{.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS,	.prio = 20},
1093 	{.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS,		.prio = 20},
1094 };
1095 
sse_test_injection_priority(void)1096 static void sse_test_injection_priority(void)
1097 {
1098 	report_prefix_push("prio");
1099 
1100 	sse_test_injection_priority_arg(hi_prio_args, ARRAY_SIZE(hi_prio_args),
1101 					sse_hi_priority_test_handler, "high");
1102 
1103 	sse_test_injection_priority_arg(low_prio_args, ARRAY_SIZE(low_prio_args),
1104 					sse_low_priority_test_handler, "low");
1105 
1106 	sse_test_injection_priority_arg(prio_args, ARRAY_SIZE(prio_args),
1107 					sse_low_priority_test_handler, "changed");
1108 
1109 	sse_test_injection_priority_arg(same_prio_args, ARRAY_SIZE(same_prio_args),
1110 					sse_low_priority_test_handler, "same_prio_args");
1111 
1112 	report_prefix_pop();
1113 }
1114 
test_invalid_event_id(unsigned long event_id)1115 static void test_invalid_event_id(unsigned long event_id)
1116 {
1117 	struct sbiret ret;
1118 	unsigned long value = 0;
1119 
1120 	ret = sbi_sse_register_raw(event_id, (unsigned long) sbi_sse_entry, 0);
1121 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1122 			    "register event_id 0x%lx", event_id);
1123 
1124 	ret = sbi_sse_unregister(event_id);
1125 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1126 			"unregister event_id 0x%lx", event_id);
1127 
1128 	ret = sbi_sse_enable(event_id);
1129 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1130 			    "enable event_id 0x%lx", event_id);
1131 
1132 	ret = sbi_sse_disable(event_id);
1133 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1134 			    "disable event_id 0x%lx", event_id);
1135 
1136 	ret = sbi_sse_inject(event_id, 0);
1137 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1138 			    "inject event_id 0x%lx", event_id);
1139 
1140 	ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value);
1141 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1142 			    "write attr event_id 0x%lx", event_id);
1143 
1144 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value);
1145 	sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM,
1146 			    "read attr event_id 0x%lx", event_id);
1147 }
1148 
sse_test_invalid_event_id(void)1149 static void sse_test_invalid_event_id(void)
1150 {
1151 
1152 	report_prefix_push("event_id");
1153 
1154 	test_invalid_event_id(SBI_SSE_EVENT_LOCAL_RESERVED_0_START);
1155 
1156 	report_prefix_pop();
1157 }
1158 
sse_check_event_availability(uint32_t event_id,bool * can_inject,bool * supported)1159 static void sse_check_event_availability(uint32_t event_id, bool *can_inject, bool *supported)
1160 {
1161 	unsigned long status;
1162 	struct sbiret ret;
1163 
1164 	*can_inject = false;
1165 	*supported = false;
1166 
1167 	ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, 1, &status);
1168 	if (ret.error != SBI_SUCCESS && ret.error != SBI_ERR_NOT_SUPPORTED) {
1169 		report_fail("Get event status != SBI_SUCCESS && != SBI_ERR_NOT_SUPPORTED: %ld",
1170 			    ret.error);
1171 		return;
1172 	}
1173 	if (ret.error == SBI_ERR_NOT_SUPPORTED)
1174 		return;
1175 
1176 	*supported = true;
1177 	*can_inject = (status >> SBI_SSE_ATTR_STATUS_INJECT_OFFSET) & 1;
1178 }
1179 
sse_secondary_boot_and_unmask(void * data)1180 static void sse_secondary_boot_and_unmask(void *data)
1181 {
1182 	sbi_sse_hart_unmask();
1183 }
1184 
sse_check_mask(void)1185 static void sse_check_mask(void)
1186 {
1187 	struct sbiret ret;
1188 
1189 	/* Upon boot, event are masked, check that */
1190 	ret = sbi_sse_hart_mask();
1191 	sbiret_report_error(&ret, SBI_ERR_ALREADY_STOPPED, "hart mask at boot time");
1192 
1193 	ret = sbi_sse_hart_unmask();
1194 	sbiret_report_error(&ret, SBI_SUCCESS, "hart unmask");
1195 	ret = sbi_sse_hart_unmask();
1196 	sbiret_report_error(&ret, SBI_ERR_ALREADY_STARTED, "hart unmask twice error");
1197 
1198 	ret = sbi_sse_hart_mask();
1199 	sbiret_report_error(&ret, SBI_SUCCESS, "hart mask");
1200 	ret = sbi_sse_hart_mask();
1201 	sbiret_report_error(&ret, SBI_ERR_ALREADY_STOPPED, "hart mask twice");
1202 }
1203 
run_inject_test(struct sse_event_info * info)1204 static void run_inject_test(struct sse_event_info *info)
1205 {
1206 	unsigned long event_id = info->event_id;
1207 
1208 	if (!info->can_inject) {
1209 		report_skip("Event does not support injection, skipping injection tests");
1210 		return;
1211 	}
1212 
1213 	sse_test_inject_simple(event_id);
1214 
1215 	if (sbi_sse_event_is_global(event_id))
1216 		sse_test_inject_global(event_id);
1217 	else
1218 		sse_test_inject_local(event_id);
1219 }
1220 
check_sse(void)1221 void check_sse(void)
1222 {
1223 	struct sse_event_info *info;
1224 	unsigned long i, event_id;
1225 	bool supported;
1226 
1227 	report_prefix_push("sse");
1228 
1229 	if (!sbi_probe(SBI_EXT_SSE)) {
1230 		report_skip("extension not available");
1231 		report_prefix_pop();
1232 		return;
1233 	}
1234 
1235 	if (__sbi_get_imp_id() == SBI_IMPL_OPENSBI &&
1236 	    __sbi_get_imp_version() < sbi_impl_opensbi_mk_version(1, 7)) {
1237 		report_skip("OpenSBI < v1.7 detected, skipping tests");
1238 		report_prefix_pop();
1239 		return;
1240 	}
1241 
1242 	sse_check_mask();
1243 
1244 	/*
1245 	 * Dummy wakeup of all processors since some of them will be targeted
1246 	 * by global events without going through the wakeup call as well as
1247 	 * unmasking SSE events on all harts
1248 	 */
1249 	on_cpus(sse_secondary_boot_and_unmask, NULL);
1250 
1251 	sse_test_invalid_event_id();
1252 
1253 	for (i = 0; i < ARRAY_SIZE(sse_event_infos); i++) {
1254 		info = &sse_event_infos[i];
1255 		event_id = info->event_id;
1256 		report_prefix_push(info->name);
1257 		sse_check_event_availability(event_id, &info->can_inject, &supported);
1258 		if (!supported) {
1259 			report_skip("Event is not supported, skipping tests");
1260 			report_prefix_pop();
1261 			continue;
1262 		}
1263 
1264 		sse_test_attrs(event_id);
1265 		sse_test_register_error(event_id);
1266 
1267 		run_inject_test(info);
1268 
1269 		report_prefix_pop();
1270 	}
1271 
1272 	sse_test_injection_priority();
1273 
1274 	report_prefix_pop();
1275 }
1276