1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SBI SSE testsuite 4 * 5 * Copyright (C) 2025, Rivos Inc., Clément Léger <cleger@rivosinc.com> 6 */ 7 #include <alloc.h> 8 #include <alloc_page.h> 9 #include <bitops.h> 10 #include <cpumask.h> 11 #include <libcflat.h> 12 #include <on-cpus.h> 13 #include <stdlib.h> 14 15 #include <asm/barrier.h> 16 #include <asm/delay.h> 17 #include <asm/io.h> 18 #include <asm/page.h> 19 #include <asm/processor.h> 20 #include <asm/sbi.h> 21 #include <asm/setup.h> 22 #include <asm/timer.h> 23 24 #include "sbi-tests.h" 25 26 #define SSE_STACK_SIZE PAGE_SIZE 27 28 struct sse_event_info { 29 uint32_t event_id; 30 const char *name; 31 bool can_inject; 32 }; 33 34 static struct sse_event_info sse_event_infos[] = { 35 { 36 .event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, 37 .name = "local_high_prio_ras", 38 }, 39 { 40 .event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, 41 .name = "double_trap", 42 }, 43 { 44 .event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, 45 .name = "global_high_prio_ras", 46 }, 47 { 48 .event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, 49 .name = "local_pmu_overflow", 50 }, 51 { 52 .event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, 53 .name = "local_low_prio_ras", 54 }, 55 { 56 .event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, 57 .name = "global_low_prio_ras", 58 }, 59 { 60 .event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE, 61 .name = "local_software", 62 }, 63 { 64 .event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE, 65 .name = "global_software", 66 }, 67 }; 68 69 static const char *const attr_names[] = { 70 [SBI_SSE_ATTR_STATUS] = "status", 71 [SBI_SSE_ATTR_PRIORITY] = "priority", 72 [SBI_SSE_ATTR_CONFIG] = "config", 73 [SBI_SSE_ATTR_PREFERRED_HART] = "preferred_hart", 74 [SBI_SSE_ATTR_ENTRY_PC] = "entry_pc", 75 [SBI_SSE_ATTR_ENTRY_ARG] = "entry_arg", 76 [SBI_SSE_ATTR_INTERRUPTED_SEPC] = "interrupted_sepc", 77 [SBI_SSE_ATTR_INTERRUPTED_FLAGS] = "interrupted_flags", 78 [SBI_SSE_ATTR_INTERRUPTED_A6] = "interrupted_a6", 79 [SBI_SSE_ATTR_INTERRUPTED_A7] = "interrupted_a7", 80 }; 81 82 static const unsigned long ro_attrs[] = { 83 SBI_SSE_ATTR_STATUS, 84 SBI_SSE_ATTR_ENTRY_PC, 85 SBI_SSE_ATTR_ENTRY_ARG, 86 }; 87 88 static const unsigned long interrupted_attrs[] = { 89 SBI_SSE_ATTR_INTERRUPTED_SEPC, 90 SBI_SSE_ATTR_INTERRUPTED_FLAGS, 91 SBI_SSE_ATTR_INTERRUPTED_A6, 92 SBI_SSE_ATTR_INTERRUPTED_A7, 93 }; 94 95 static const unsigned long interrupted_flags[] = { 96 SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP, 97 SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE, 98 SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP, 99 SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT, 100 SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV, 101 SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP, 102 }; 103 104 static struct sse_event_info *sse_event_get_info(uint32_t event_id) 105 { 106 int i; 107 108 for (i = 0; i < ARRAY_SIZE(sse_event_infos); i++) { 109 if (sse_event_infos[i].event_id == event_id) 110 return &sse_event_infos[i]; 111 } 112 113 assert_msg(false, "Invalid event id: %d", event_id); 114 } 115 116 static const char *sse_event_name(uint32_t event_id) 117 { 118 return sse_event_get_info(event_id)->name; 119 } 120 121 static bool sse_event_can_inject(uint32_t event_id) 122 { 123 return sse_event_get_info(event_id)->can_inject; 124 } 125 126 static struct sbiret sse_get_event_status_field(uint32_t event_id, unsigned long mask, 127 unsigned long shift, unsigned long *value) 128 { 129 struct sbiret ret; 130 unsigned long status; 131 132 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, 1, &status); 133 if (ret.error) { 134 sbiret_report_error(&ret, SBI_SUCCESS, "Get event status"); 135 return ret; 136 } 137 138 *value = (status & mask) >> shift; 139 140 return ret; 141 } 142 143 static struct sbiret sse_event_get_state(uint32_t event_id, enum sbi_sse_state *state) 144 { 145 unsigned long status = 0; 146 struct sbiret ret; 147 148 ret = sse_get_event_status_field(event_id, SBI_SSE_ATTR_STATUS_STATE_MASK, 149 SBI_SSE_ATTR_STATUS_STATE_OFFSET, &status); 150 *state = status; 151 152 return ret; 153 } 154 155 static unsigned long sse_global_event_set_current_hart(uint32_t event_id) 156 { 157 struct sbiret ret; 158 unsigned long current_hart = current_thread_info()->hartid; 159 160 assert(sbi_sse_event_is_global(event_id)); 161 162 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, ¤t_hart); 163 if (sbiret_report_error(&ret, SBI_SUCCESS, "Set preferred hart")) 164 return ret.error; 165 166 return 0; 167 } 168 169 static bool sse_check_state(uint32_t event_id, unsigned long expected_state) 170 { 171 struct sbiret ret; 172 enum sbi_sse_state state; 173 174 ret = sse_event_get_state(event_id, &state); 175 if (ret.error) 176 return false; 177 178 return report(state == expected_state, "event status == %ld", expected_state); 179 } 180 181 static bool sse_event_pending(uint32_t event_id) 182 { 183 bool pending = 0; 184 185 sse_get_event_status_field(event_id, BIT(SBI_SSE_ATTR_STATUS_PENDING_OFFSET), 186 SBI_SSE_ATTR_STATUS_PENDING_OFFSET, (unsigned long *)&pending); 187 188 return pending; 189 } 190 191 static void *sse_alloc_stack(void) 192 { 193 /* 194 * We assume that SSE_STACK_SIZE always fit in one page. This page will 195 * always be decremented before storing anything on it in sse-entry.S. 196 */ 197 assert(SSE_STACK_SIZE <= PAGE_SIZE); 198 199 return (alloc_page() + SSE_STACK_SIZE); 200 } 201 202 static void sse_free_stack(void *stack) 203 { 204 free_page(stack - SSE_STACK_SIZE); 205 } 206 207 static void sse_read_write_test(uint32_t event_id, unsigned long attr, unsigned long attr_count, 208 unsigned long *value, long expected_error, const char *str) 209 { 210 struct sbiret ret; 211 212 ret = sbi_sse_read_attrs(event_id, attr, attr_count, value); 213 sbiret_report_error(&ret, expected_error, "Read %s error", str); 214 215 ret = sbi_sse_write_attrs(event_id, attr, attr_count, value); 216 sbiret_report_error(&ret, expected_error, "Write %s error", str); 217 } 218 219 #define ALL_ATTRS_COUNT (SBI_SSE_ATTR_INTERRUPTED_A7 + 1) 220 221 static void sse_test_attrs(uint32_t event_id) 222 { 223 unsigned long value = 0; 224 struct sbiret ret; 225 void *ptr; 226 unsigned long values[ALL_ATTRS_COUNT]; 227 unsigned int i; 228 const char *invalid_hart_str; 229 const char *attr_name; 230 231 report_prefix_push("attrs"); 232 233 for (i = 0; i < ARRAY_SIZE(ro_attrs); i++) { 234 ret = sbi_sse_write_attrs(event_id, ro_attrs[i], 1, &value); 235 sbiret_report_error(&ret, SBI_ERR_DENIED, "RO attribute %s not writable", 236 attr_names[ro_attrs[i]]); 237 } 238 239 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, ALL_ATTRS_COUNT, values); 240 sbiret_report_error(&ret, SBI_SUCCESS, "Read multiple attributes"); 241 242 for (i = SBI_SSE_ATTR_STATUS; i <= SBI_SSE_ATTR_INTERRUPTED_A7; i++) { 243 ret = sbi_sse_read_attrs(event_id, i, 1, &value); 244 attr_name = attr_names[i]; 245 246 sbiret_report_error(&ret, SBI_SUCCESS, "Read single attribute %s", attr_name); 247 if (values[i] != value) 248 report_fail("Attribute 0x%x single value read (0x%lx) differs from the one read with multiple attributes (0x%lx)", 249 i, value, values[i]); 250 /* 251 * Preferred hart reset value is defined by SBI vendor 252 */ 253 if (i != SBI_SSE_ATTR_PREFERRED_HART) { 254 /* 255 * Specification states that injectable bit is implementation dependent 256 * but other bits are zero-initialized. 257 */ 258 if (i == SBI_SSE_ATTR_STATUS) 259 value &= ~BIT(SBI_SSE_ATTR_STATUS_INJECT_OFFSET); 260 report(value == 0, "Attribute %s reset value is 0, found %lx", attr_name, value); 261 } 262 } 263 264 #if __riscv_xlen > 32 265 value = BIT(32); 266 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value); 267 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Write invalid prio > 0xFFFFFFFF error"); 268 #endif 269 270 value = ~SBI_SSE_ATTR_CONFIG_ONESHOT; 271 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value); 272 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Write invalid config value error"); 273 274 if (sbi_sse_event_is_global(event_id)) { 275 invalid_hart_str = getenv("INVALID_HART_ID"); 276 if (!invalid_hart_str) 277 value = 0xFFFFFFFFUL; 278 else 279 value = strtoul(invalid_hart_str, NULL, 0); 280 281 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value); 282 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid hart id error"); 283 } else { 284 /* Set Hart on local event -> RO */ 285 value = current_thread_info()->hartid; 286 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value); 287 sbiret_report_error(&ret, SBI_ERR_DENIED, 288 "Set hart id on local event error"); 289 } 290 291 /* Set/get flags, sepc, a6, a7 */ 292 for (i = 0; i < ARRAY_SIZE(interrupted_attrs); i++) { 293 attr_name = attr_names[interrupted_attrs[i]]; 294 ret = sbi_sse_read_attrs(event_id, interrupted_attrs[i], 1, &value); 295 sbiret_report_error(&ret, SBI_SUCCESS, "Get interrupted %s", attr_name); 296 297 value = ARRAY_SIZE(interrupted_attrs) - i; 298 ret = sbi_sse_write_attrs(event_id, interrupted_attrs[i], 1, &value); 299 sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, 300 "Set attribute %s invalid state error", attr_name); 301 } 302 303 sse_read_write_test(event_id, SBI_SSE_ATTR_STATUS, 0, &value, SBI_ERR_INVALID_PARAM, 304 "attribute attr_count == 0"); 305 sse_read_write_test(event_id, SBI_SSE_ATTR_INTERRUPTED_A7 + 1, 1, &value, SBI_ERR_BAD_RANGE, 306 "invalid attribute"); 307 308 /* Misaligned pointer address */ 309 ptr = (void *)&value; 310 ptr += 1; 311 sse_read_write_test(event_id, SBI_SSE_ATTR_STATUS, 1, ptr, SBI_ERR_INVALID_ADDRESS, 312 "attribute with invalid address"); 313 314 report_prefix_pop(); 315 } 316 317 static void sse_test_register_error(uint32_t event_id) 318 { 319 struct sbiret ret; 320 321 report_prefix_push("register"); 322 323 ret = sbi_sse_unregister(event_id); 324 sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "unregister non-registered event"); 325 326 ret = sbi_sse_register_raw(event_id, 0x1, 0); 327 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "register misaligned entry"); 328 329 ret = sbi_sse_register(event_id, NULL); 330 sbiret_report_error(&ret, SBI_SUCCESS, "register"); 331 if (ret.error) 332 goto done; 333 334 ret = sbi_sse_register(event_id, NULL); 335 sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "register used event failure"); 336 337 ret = sbi_sse_unregister(event_id); 338 sbiret_report_error(&ret, SBI_SUCCESS, "unregister"); 339 340 done: 341 report_prefix_pop(); 342 } 343 344 struct sse_simple_test_arg { 345 bool done; 346 unsigned long expected_a6; 347 uint32_t event_id; 348 }; 349 350 #if __riscv_xlen > 32 351 352 struct alias_test_params { 353 unsigned long event_id; 354 unsigned long attr_id; 355 unsigned long attr_count; 356 const char *str; 357 }; 358 359 static void test_alias(uint32_t event_id) 360 { 361 struct alias_test_params *write, *read; 362 unsigned long write_value, read_value; 363 struct sbiret ret; 364 bool err = false; 365 int r, w; 366 struct alias_test_params params[] = { 367 {event_id, SBI_SSE_ATTR_INTERRUPTED_A6, 1, "non aliased"}, 368 {BIT(32) + event_id, SBI_SSE_ATTR_INTERRUPTED_A6, 1, "aliased event_id"}, 369 {event_id, BIT(32) + SBI_SSE_ATTR_INTERRUPTED_A6, 1, "aliased attr_id"}, 370 {event_id, SBI_SSE_ATTR_INTERRUPTED_A6, BIT(32) + 1, "aliased attr_count"}, 371 }; 372 373 report_prefix_push("alias"); 374 for (w = 0; w < ARRAY_SIZE(params); w++) { 375 write = ¶ms[w]; 376 377 write_value = 0xDEADBEEF + w; 378 ret = sbi_sse_write_attrs(write->event_id, write->attr_id, write->attr_count, &write_value); 379 if (ret.error) 380 sbiret_report_error(&ret, SBI_SUCCESS, "Write %s, event 0x%lx attr 0x%lx, attr count 0x%lx", 381 write->str, write->event_id, write->attr_id, write->attr_count); 382 383 for (r = 0; r < ARRAY_SIZE(params); r++) { 384 read = ¶ms[r]; 385 read_value = 0; 386 ret = sbi_sse_read_attrs(read->event_id, read->attr_id, read->attr_count, &read_value); 387 if (ret.error) 388 sbiret_report_error(&ret, SBI_SUCCESS, 389 "Read %s, event 0x%lx attr 0x%lx, attr count 0x%lx", 390 read->str, read->event_id, read->attr_id, read->attr_count); 391 392 /* Do not spam output with a lot of reports */ 393 if (write_value != read_value) { 394 err = true; 395 report_fail("Write %s, event 0x%lx attr 0x%lx, attr count 0x%lx value %lx ==" 396 "Read %s, event 0x%lx attr 0x%lx, attr count 0x%lx value %lx", 397 write->str, write->event_id, write->attr_id, 398 write->attr_count, write_value, read->str, 399 read->event_id, read->attr_id, read->attr_count, 400 read_value); 401 } 402 } 403 } 404 405 report(!err, "BIT(32) aliasing tests"); 406 report_prefix_pop(); 407 } 408 #endif 409 410 static void sse_simple_handler(void *data, struct pt_regs *regs, unsigned int hartid) 411 { 412 struct sse_simple_test_arg *arg = data; 413 int i; 414 struct sbiret ret; 415 const char *attr_name; 416 uint32_t event_id = READ_ONCE(arg->event_id), attr; 417 unsigned long value, prev_value, flags; 418 unsigned long interrupted_state[ARRAY_SIZE(interrupted_attrs)]; 419 unsigned long modified_state[ARRAY_SIZE(interrupted_attrs)] = {4, 3, 2, 1}; 420 unsigned long tmp_state[ARRAY_SIZE(interrupted_attrs)]; 421 422 report((regs->status & SR_SPP) == SR_SPP, "Interrupted S-mode"); 423 report(hartid == current_thread_info()->hartid, "Hartid correctly passed"); 424 sse_check_state(event_id, SBI_SSE_STATE_RUNNING); 425 report(!sse_event_pending(event_id), "Event not pending"); 426 427 /* Read full interrupted state */ 428 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC, 429 ARRAY_SIZE(interrupted_attrs), interrupted_state); 430 sbiret_report_error(&ret, SBI_SUCCESS, "Save full interrupted state from handler"); 431 432 /* Write full modified state and read it */ 433 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC, 434 ARRAY_SIZE(modified_state), modified_state); 435 sbiret_report_error(&ret, SBI_SUCCESS, 436 "Write full interrupted state from handler"); 437 438 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC, 439 ARRAY_SIZE(tmp_state), tmp_state); 440 sbiret_report_error(&ret, SBI_SUCCESS, "Read full modified state from handler"); 441 442 report(memcmp(tmp_state, modified_state, sizeof(modified_state)) == 0, 443 "Full interrupted state successfully written"); 444 445 #if __riscv_xlen > 32 446 test_alias(event_id); 447 #endif 448 449 /* Restore full saved state */ 450 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_INTERRUPTED_SEPC, 451 ARRAY_SIZE(interrupted_attrs), interrupted_state); 452 sbiret_report_error(&ret, SBI_SUCCESS, "Full interrupted state restore from handler"); 453 454 /* We test SBI_SSE_ATTR_INTERRUPTED_FLAGS below with specific flag values */ 455 for (i = 0; i < ARRAY_SIZE(interrupted_attrs); i++) { 456 attr = interrupted_attrs[i]; 457 if (attr == SBI_SSE_ATTR_INTERRUPTED_FLAGS) 458 continue; 459 460 attr_name = attr_names[attr]; 461 462 ret = sbi_sse_read_attrs(event_id, attr, 1, &prev_value); 463 sbiret_report_error(&ret, SBI_SUCCESS, "Get attr %s", attr_name); 464 465 value = 0xDEADBEEF + i; 466 ret = sbi_sse_write_attrs(event_id, attr, 1, &value); 467 sbiret_report_error(&ret, SBI_SUCCESS, "Set attr %s", attr_name); 468 469 ret = sbi_sse_read_attrs(event_id, attr, 1, &value); 470 sbiret_report_error(&ret, SBI_SUCCESS, "Get attr %s", attr_name); 471 report(value == 0xDEADBEEF + i, "Get attr %s, value: 0x%lx", attr_name, value); 472 473 ret = sbi_sse_write_attrs(event_id, attr, 1, &prev_value); 474 sbiret_report_error(&ret, SBI_SUCCESS, "Restore attr %s value", attr_name); 475 } 476 477 /* Test all flags allowed for SBI_SSE_ATTR_INTERRUPTED_FLAGS */ 478 attr = SBI_SSE_ATTR_INTERRUPTED_FLAGS; 479 ret = sbi_sse_read_attrs(event_id, attr, 1, &prev_value); 480 sbiret_report_error(&ret, SBI_SUCCESS, "Save interrupted flags"); 481 482 for (i = 0; i < ARRAY_SIZE(interrupted_flags); i++) { 483 flags = interrupted_flags[i]; 484 ret = sbi_sse_write_attrs(event_id, attr, 1, &flags); 485 sbiret_report_error(&ret, SBI_SUCCESS, 486 "Set interrupted flags bit 0x%lx value", flags); 487 ret = sbi_sse_read_attrs(event_id, attr, 1, &value); 488 sbiret_report_error(&ret, SBI_SUCCESS, "Get interrupted flags after set"); 489 report(value == flags, "interrupted flags modified value: 0x%lx", value); 490 } 491 492 /* Write invalid bit in flag register */ 493 flags = SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT << 1; 494 ret = sbi_sse_write_attrs(event_id, attr, 1, &flags); 495 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid flags bit 0x%lx value error", 496 flags); 497 498 #if __riscv_xlen > 32 499 flags = BIT(32); 500 ret = sbi_sse_write_attrs(event_id, attr, 1, &flags); 501 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, "Set invalid flags bit 0x%lx value error", 502 flags); 503 #endif 504 505 ret = sbi_sse_write_attrs(event_id, attr, 1, &prev_value); 506 sbiret_report_error(&ret, SBI_SUCCESS, "Restore interrupted flags"); 507 508 /* Try to change HARTID/Priority while running */ 509 if (sbi_sse_event_is_global(event_id)) { 510 value = current_thread_info()->hartid; 511 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value); 512 sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "Set hart id while running error"); 513 } 514 515 value = 0; 516 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value); 517 sbiret_report_error(&ret, SBI_ERR_INVALID_STATE, "Set priority while running error"); 518 519 value = READ_ONCE(arg->expected_a6); 520 report(interrupted_state[2] == value, "Interrupted state a6, expected 0x%lx, got 0x%lx", 521 value, interrupted_state[2]); 522 523 report(interrupted_state[3] == SBI_EXT_SSE, 524 "Interrupted state a7, expected 0x%x, got 0x%lx", SBI_EXT_SSE, 525 interrupted_state[3]); 526 527 WRITE_ONCE(arg->done, true); 528 } 529 530 static void sse_test_inject_simple(uint32_t event_id) 531 { 532 unsigned long value, error; 533 struct sbiret ret; 534 enum sbi_sse_state state = SBI_SSE_STATE_UNUSED; 535 struct sse_simple_test_arg test_arg = {.event_id = event_id}; 536 struct sbi_sse_handler_arg args = { 537 .handler = sse_simple_handler, 538 .handler_data = (void *)&test_arg, 539 .stack = sse_alloc_stack(), 540 }; 541 542 report_prefix_push("simple"); 543 544 if (!sse_check_state(event_id, SBI_SSE_STATE_UNUSED)) 545 goto cleanup; 546 547 ret = sbi_sse_register(event_id, &args); 548 if (!sbiret_report_error(&ret, SBI_SUCCESS, "register")) 549 goto cleanup; 550 551 state = SBI_SSE_STATE_REGISTERED; 552 553 if (!sse_check_state(event_id, SBI_SSE_STATE_REGISTERED)) 554 goto cleanup; 555 556 if (sbi_sse_event_is_global(event_id)) { 557 /* Be sure global events are targeting the current hart */ 558 error = sse_global_event_set_current_hart(event_id); 559 if (error) 560 goto cleanup; 561 } 562 563 ret = sbi_sse_enable(event_id); 564 if (!sbiret_report_error(&ret, SBI_SUCCESS, "enable")) 565 goto cleanup; 566 567 state = SBI_SSE_STATE_ENABLED; 568 if (!sse_check_state(event_id, SBI_SSE_STATE_ENABLED)) 569 goto cleanup; 570 571 ret = sbi_sse_hart_mask(); 572 if (!sbiret_report_error(&ret, SBI_SUCCESS, "hart mask")) 573 goto cleanup; 574 575 ret = sbi_sse_inject(event_id, current_thread_info()->hartid); 576 if (!sbiret_report_error(&ret, SBI_SUCCESS, "injection masked")) { 577 sbi_sse_hart_unmask(); 578 goto cleanup; 579 } 580 581 report(READ_ONCE(test_arg.done) == 0, "event masked not handled"); 582 583 /* 584 * When unmasking the SSE events, we expect it to be injected 585 * immediately so a6 should be SBI_EXT_SBI_SSE_HART_UNMASK 586 */ 587 WRITE_ONCE(test_arg.expected_a6, SBI_EXT_SSE_HART_UNMASK); 588 ret = sbi_sse_hart_unmask(); 589 if (!sbiret_report_error(&ret, SBI_SUCCESS, "hart unmask")) 590 goto cleanup; 591 592 report(READ_ONCE(test_arg.done) == 1, "event unmasked handled"); 593 WRITE_ONCE(test_arg.done, 0); 594 WRITE_ONCE(test_arg.expected_a6, SBI_EXT_SSE_INJECT); 595 596 /* Set as oneshot and verify it is disabled */ 597 ret = sbi_sse_disable(event_id); 598 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Disable event")) { 599 /* Nothing we can really do here, event can not be disabled */ 600 goto cleanup; 601 } 602 state = SBI_SSE_STATE_REGISTERED; 603 604 value = SBI_SSE_ATTR_CONFIG_ONESHOT; 605 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value); 606 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Set event attribute as ONESHOT")) 607 goto cleanup; 608 609 ret = sbi_sse_enable(event_id); 610 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Enable event")) 611 goto cleanup; 612 state = SBI_SSE_STATE_ENABLED; 613 614 ret = sbi_sse_inject(event_id, current_thread_info()->hartid); 615 if (!sbiret_report_error(&ret, SBI_SUCCESS, "second injection")) 616 goto cleanup; 617 618 report(READ_ONCE(test_arg.done) == 1, "event handled"); 619 WRITE_ONCE(test_arg.done, 0); 620 621 if (!sse_check_state(event_id, SBI_SSE_STATE_REGISTERED)) 622 goto cleanup; 623 state = SBI_SSE_STATE_REGISTERED; 624 625 /* Clear ONESHOT FLAG */ 626 value = 0; 627 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_CONFIG, 1, &value); 628 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Clear CONFIG.ONESHOT flag")) 629 goto cleanup; 630 631 ret = sbi_sse_unregister(event_id); 632 if (!sbiret_report_error(&ret, SBI_SUCCESS, "unregister")) 633 goto cleanup; 634 state = SBI_SSE_STATE_UNUSED; 635 636 sse_check_state(event_id, SBI_SSE_STATE_UNUSED); 637 638 cleanup: 639 switch (state) { 640 case SBI_SSE_STATE_ENABLED: 641 ret = sbi_sse_disable(event_id); 642 if (ret.error) { 643 sbiret_report_error(&ret, SBI_SUCCESS, "disable event 0x%x", event_id); 644 break; 645 } 646 case SBI_SSE_STATE_REGISTERED: 647 sbi_sse_unregister(event_id); 648 if (ret.error) 649 sbiret_report_error(&ret, SBI_SUCCESS, "unregister event 0x%x", event_id); 650 default: 651 break; 652 } 653 654 sse_free_stack(args.stack); 655 report_prefix_pop(); 656 } 657 658 struct sse_foreign_cpu_test_arg { 659 bool done; 660 unsigned int expected_cpu; 661 uint32_t event_id; 662 }; 663 664 static void sse_foreign_cpu_handler(void *data, struct pt_regs *regs, unsigned int hartid) 665 { 666 struct sse_foreign_cpu_test_arg *arg = data; 667 unsigned int expected_cpu; 668 669 /* For arg content to be visible */ 670 smp_rmb(); 671 expected_cpu = READ_ONCE(arg->expected_cpu); 672 report(expected_cpu == current_thread_info()->cpu, 673 "Received event on CPU (%d), expected CPU (%d)", current_thread_info()->cpu, 674 expected_cpu); 675 676 WRITE_ONCE(arg->done, true); 677 /* For arg update to be visible for other CPUs */ 678 smp_wmb(); 679 } 680 681 struct sse_local_per_cpu { 682 struct sbi_sse_handler_arg args; 683 struct sbiret ret; 684 struct sse_foreign_cpu_test_arg handler_arg; 685 enum sbi_sse_state state; 686 }; 687 688 static void sse_register_enable_local(void *data) 689 { 690 struct sbiret ret; 691 struct sse_local_per_cpu *cpu_args = data; 692 struct sse_local_per_cpu *cpu_arg = &cpu_args[current_thread_info()->cpu]; 693 uint32_t event_id = cpu_arg->handler_arg.event_id; 694 695 ret = sbi_sse_register(event_id, &cpu_arg->args); 696 WRITE_ONCE(cpu_arg->ret, ret); 697 if (ret.error) 698 return; 699 cpu_arg->state = SBI_SSE_STATE_REGISTERED; 700 701 ret = sbi_sse_enable(event_id); 702 WRITE_ONCE(cpu_arg->ret, ret); 703 if (ret.error) 704 return; 705 cpu_arg->state = SBI_SSE_STATE_ENABLED; 706 } 707 708 static void sbi_sse_disable_unregister_local(void *data) 709 { 710 struct sbiret ret; 711 struct sse_local_per_cpu *cpu_args = data; 712 struct sse_local_per_cpu *cpu_arg = &cpu_args[current_thread_info()->cpu]; 713 uint32_t event_id = cpu_arg->handler_arg.event_id; 714 715 switch (cpu_arg->state) { 716 case SBI_SSE_STATE_ENABLED: 717 ret = sbi_sse_disable(event_id); 718 WRITE_ONCE(cpu_arg->ret, ret); 719 if (ret.error) 720 return; 721 case SBI_SSE_STATE_REGISTERED: 722 ret = sbi_sse_unregister(event_id); 723 WRITE_ONCE(cpu_arg->ret, ret); 724 default: 725 break; 726 } 727 } 728 729 static uint64_t sse_event_get_complete_timeout(void) 730 { 731 char *event_complete_timeout_str; 732 uint64_t timeout; 733 734 event_complete_timeout_str = getenv("SSE_EVENT_COMPLETE_TIMEOUT"); 735 if (!event_complete_timeout_str) 736 timeout = 3000; 737 else 738 timeout = strtoul(event_complete_timeout_str, NULL, 0); 739 740 return timer_get_cycles() + usec_to_cycles(timeout); 741 } 742 743 static void sse_test_inject_local(uint32_t event_id) 744 { 745 int cpu; 746 uint64_t timeout; 747 struct sbiret ret; 748 struct sse_local_per_cpu *cpu_args, *cpu_arg; 749 struct sse_foreign_cpu_test_arg *handler_arg; 750 751 cpu_args = calloc(NR_CPUS, sizeof(struct sbi_sse_handler_arg)); 752 753 report_prefix_push("local_dispatch"); 754 for_each_online_cpu(cpu) { 755 cpu_arg = &cpu_args[cpu]; 756 cpu_arg->handler_arg.event_id = event_id; 757 cpu_arg->args.stack = sse_alloc_stack(); 758 cpu_arg->args.handler = sse_foreign_cpu_handler; 759 cpu_arg->args.handler_data = (void *)&cpu_arg->handler_arg; 760 cpu_arg->state = SBI_SSE_STATE_UNUSED; 761 } 762 763 on_cpus(sse_register_enable_local, cpu_args); 764 for_each_online_cpu(cpu) { 765 cpu_arg = &cpu_args[cpu]; 766 ret = cpu_arg->ret; 767 if (ret.error) { 768 report_fail("CPU failed to register/enable event: %ld", ret.error); 769 goto cleanup; 770 } 771 772 handler_arg = &cpu_arg->handler_arg; 773 WRITE_ONCE(handler_arg->expected_cpu, cpu); 774 /* For handler_arg content to be visible for other CPUs */ 775 smp_wmb(); 776 ret = sbi_sse_inject(event_id, cpus[cpu].hartid); 777 if (ret.error) { 778 report_fail("CPU failed to inject event: %ld", ret.error); 779 goto cleanup; 780 } 781 } 782 783 for_each_online_cpu(cpu) { 784 handler_arg = &cpu_args[cpu].handler_arg; 785 smp_rmb(); 786 787 timeout = sse_event_get_complete_timeout(); 788 while (!READ_ONCE(handler_arg->done) && timer_get_cycles() < timeout) { 789 /* For handler_arg update to be visible */ 790 smp_rmb(); 791 cpu_relax(); 792 } 793 report(READ_ONCE(handler_arg->done), "Event handled"); 794 WRITE_ONCE(handler_arg->done, false); 795 } 796 797 cleanup: 798 on_cpus(sbi_sse_disable_unregister_local, cpu_args); 799 for_each_online_cpu(cpu) { 800 cpu_arg = &cpu_args[cpu]; 801 ret = READ_ONCE(cpu_arg->ret); 802 if (ret.error) 803 report_fail("CPU failed to disable/unregister event: %ld", ret.error); 804 } 805 806 for_each_online_cpu(cpu) { 807 cpu_arg = &cpu_args[cpu]; 808 sse_free_stack(cpu_arg->args.stack); 809 } 810 811 report_prefix_pop(); 812 } 813 814 static void sse_test_inject_global_cpu(uint32_t event_id, unsigned int cpu, 815 struct sse_foreign_cpu_test_arg *test_arg) 816 { 817 unsigned long value; 818 struct sbiret ret; 819 uint64_t timeout; 820 enum sbi_sse_state state; 821 822 WRITE_ONCE(test_arg->expected_cpu, cpu); 823 /* For test_arg content to be visible for other CPUs */ 824 smp_wmb(); 825 value = cpu; 826 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PREFERRED_HART, 1, &value); 827 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Set preferred hart")) 828 return; 829 830 ret = sbi_sse_enable(event_id); 831 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Enable event")) 832 return; 833 834 ret = sbi_sse_inject(event_id, cpu); 835 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Inject event")) 836 goto disable; 837 838 smp_rmb(); 839 timeout = sse_event_get_complete_timeout(); 840 while (!READ_ONCE(test_arg->done) && timer_get_cycles() < timeout) { 841 /* For shared test_arg structure */ 842 smp_rmb(); 843 cpu_relax(); 844 } 845 846 report(READ_ONCE(test_arg->done), "event handler called"); 847 WRITE_ONCE(test_arg->done, false); 848 849 timeout = sse_event_get_complete_timeout(); 850 /* Wait for event to be back in ENABLED state */ 851 do { 852 ret = sse_event_get_state(event_id, &state); 853 if (ret.error) 854 goto disable; 855 cpu_relax(); 856 } while (state != SBI_SSE_STATE_ENABLED && timer_get_cycles() < timeout); 857 858 report(state == SBI_SSE_STATE_ENABLED, "Event in enabled state"); 859 860 disable: 861 ret = sbi_sse_disable(event_id); 862 sbiret_report_error(&ret, SBI_SUCCESS, "Disable event"); 863 } 864 865 static void sse_test_inject_global(uint32_t event_id) 866 { 867 struct sbiret ret; 868 unsigned int cpu; 869 struct sse_foreign_cpu_test_arg test_arg = {.event_id = event_id}; 870 struct sbi_sse_handler_arg args = { 871 .handler = sse_foreign_cpu_handler, 872 .handler_data = (void *)&test_arg, 873 .stack = sse_alloc_stack(), 874 }; 875 876 report_prefix_push("global_dispatch"); 877 878 ret = sbi_sse_register(event_id, &args); 879 if (!sbiret_report_error(&ret, SBI_SUCCESS, "Register event")) 880 goto err; 881 882 for_each_online_cpu(cpu) 883 sse_test_inject_global_cpu(event_id, cpu, &test_arg); 884 885 ret = sbi_sse_unregister(event_id); 886 sbiret_report_error(&ret, SBI_SUCCESS, "Unregister event"); 887 888 err: 889 sse_free_stack(args.stack); 890 report_prefix_pop(); 891 } 892 893 struct priority_test_arg { 894 uint32_t event_id; 895 bool called; 896 u32 prio; 897 enum sbi_sse_state state; /* Used for error handling */ 898 struct priority_test_arg *next_event_arg; 899 void (*check_func)(struct priority_test_arg *arg); 900 }; 901 902 static void sse_hi_priority_test_handler(void *arg, struct pt_regs *regs, 903 unsigned int hartid) 904 { 905 struct priority_test_arg *targ = arg; 906 struct priority_test_arg *next = targ->next_event_arg; 907 908 targ->called = true; 909 if (next) { 910 sbi_sse_inject(next->event_id, current_thread_info()->hartid); 911 912 report(!sse_event_pending(next->event_id), "Higher priority event is not pending"); 913 report(next->called, "Higher priority event was handled"); 914 } 915 } 916 917 static void sse_low_priority_test_handler(void *arg, struct pt_regs *regs, 918 unsigned int hartid) 919 { 920 struct priority_test_arg *targ = arg; 921 struct priority_test_arg *next = READ_ONCE(targ->next_event_arg); 922 923 WRITE_ONCE(targ->called, true); 924 925 if (next) { 926 sbi_sse_inject(next->event_id, current_thread_info()->hartid); 927 928 report(sse_event_pending(next->event_id), "Lower priority event is pending"); 929 report(!READ_ONCE(next->called), 930 "Lower priority event %s was not handled before %s", 931 sse_event_name(next->event_id), sse_event_name(targ->event_id)); 932 } 933 } 934 935 static void sse_test_injection_priority_arg(struct priority_test_arg *in_args, 936 unsigned int in_args_size, 937 sbi_sse_handler_fn handler, 938 const char *test_name) 939 { 940 unsigned int i; 941 unsigned long value, uret; 942 struct sbiret ret; 943 uint32_t event_id; 944 struct priority_test_arg *arg; 945 unsigned int args_size = 0; 946 struct sbi_sse_handler_arg event_args[in_args_size]; 947 struct priority_test_arg *args[in_args_size]; 948 void *stack; 949 struct sbi_sse_handler_arg *event_arg; 950 951 report_prefix_push(test_name); 952 953 for (i = 0; i < in_args_size; i++) { 954 arg = &in_args[i]; 955 arg->state = SBI_SSE_STATE_UNUSED; 956 event_id = arg->event_id; 957 if (!sse_event_can_inject(event_id)) 958 continue; 959 960 args[args_size] = arg; 961 event_args[args_size].stack = 0; 962 args_size++; 963 } 964 965 if (!args_size) { 966 report_skip("No injectable events"); 967 goto skip; 968 } 969 970 for (i = 0; i < args_size; i++) { 971 arg = args[i]; 972 event_id = arg->event_id; 973 stack = sse_alloc_stack(); 974 975 event_arg = &event_args[i]; 976 event_arg->handler = handler; 977 event_arg->handler_data = (void *)arg; 978 event_arg->stack = stack; 979 980 if (i < (args_size - 1)) 981 WRITE_ONCE(arg->next_event_arg, args[i + 1]); 982 else 983 WRITE_ONCE(arg->next_event_arg, NULL); 984 985 /* Be sure global events are targeting the current hart */ 986 if (sbi_sse_event_is_global(event_id)) { 987 uret = sse_global_event_set_current_hart(event_id); 988 if (uret) 989 goto err; 990 } 991 992 ret = sbi_sse_register(event_id, event_arg); 993 if (ret.error) { 994 sbiret_report_error(&ret, SBI_SUCCESS, "register event %s", 995 sse_event_name(event_id)); 996 goto err; 997 } 998 arg->state = SBI_SSE_STATE_REGISTERED; 999 1000 value = arg->prio; 1001 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value); 1002 if (ret.error) { 1003 sbiret_report_error(&ret, SBI_SUCCESS, "set event %s priority", 1004 sse_event_name(event_id)); 1005 goto err; 1006 } 1007 ret = sbi_sse_enable(event_id); 1008 if (ret.error) { 1009 sbiret_report_error(&ret, SBI_SUCCESS, "enable event %s", 1010 sse_event_name(event_id)); 1011 goto err; 1012 } 1013 arg->state = SBI_SSE_STATE_ENABLED; 1014 } 1015 1016 /* Inject first event */ 1017 ret = sbi_sse_inject(args[0]->event_id, current_thread_info()->hartid); 1018 sbiret_report_error(&ret, SBI_SUCCESS, "injection"); 1019 1020 /* Check that all handlers have been called */ 1021 for (i = 0; i < args_size; i++) { 1022 arg = args[i]; 1023 report(READ_ONCE(arg->called), "Event %s handler called", 1024 sse_event_name(arg->event_id)); 1025 } 1026 1027 err: 1028 for (i = 0; i < args_size; i++) { 1029 arg = args[i]; 1030 event_id = arg->event_id; 1031 1032 switch (arg->state) { 1033 case SBI_SSE_STATE_ENABLED: 1034 ret = sbi_sse_disable(event_id); 1035 if (ret.error) { 1036 sbiret_report_error(&ret, SBI_SUCCESS, "disable event 0x%x", 1037 event_id); 1038 break; 1039 } 1040 case SBI_SSE_STATE_REGISTERED: 1041 sbi_sse_unregister(event_id); 1042 if (ret.error) 1043 sbiret_report_error(&ret, SBI_SUCCESS, "unregister event 0x%x", 1044 event_id); 1045 default: 1046 break; 1047 } 1048 1049 event_arg = &event_args[i]; 1050 if (event_arg->stack) 1051 sse_free_stack(event_arg->stack); 1052 } 1053 1054 skip: 1055 report_prefix_pop(); 1056 } 1057 1058 static struct priority_test_arg hi_prio_args[] = { 1059 {.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE}, 1060 {.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE}, 1061 {.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS}, 1062 {.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS}, 1063 {.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW}, 1064 {.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS}, 1065 {.event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP}, 1066 {.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS}, 1067 }; 1068 1069 static struct priority_test_arg low_prio_args[] = { 1070 {.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS}, 1071 {.event_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP}, 1072 {.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS}, 1073 {.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW}, 1074 {.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS}, 1075 {.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS}, 1076 {.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE}, 1077 {.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE}, 1078 }; 1079 1080 static struct priority_test_arg prio_args[] = { 1081 {.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE, .prio = 5}, 1082 {.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE, .prio = 10}, 1083 {.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, .prio = 12}, 1084 {.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 15}, 1085 {.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 20}, 1086 {.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, .prio = 22}, 1087 {.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 25}, 1088 }; 1089 1090 static struct priority_test_arg same_prio_args[] = { 1091 {.event_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 0}, 1092 {.event_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, .prio = 0}, 1093 {.event_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 10}, 1094 {.event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE, .prio = 10}, 1095 {.event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE, .prio = 10}, 1096 {.event_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 20}, 1097 {.event_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, .prio = 20}, 1098 }; 1099 1100 static void sse_test_injection_priority(void) 1101 { 1102 report_prefix_push("prio"); 1103 1104 sse_test_injection_priority_arg(hi_prio_args, ARRAY_SIZE(hi_prio_args), 1105 sse_hi_priority_test_handler, "high"); 1106 1107 sse_test_injection_priority_arg(low_prio_args, ARRAY_SIZE(low_prio_args), 1108 sse_low_priority_test_handler, "low"); 1109 1110 sse_test_injection_priority_arg(prio_args, ARRAY_SIZE(prio_args), 1111 sse_low_priority_test_handler, "changed"); 1112 1113 sse_test_injection_priority_arg(same_prio_args, ARRAY_SIZE(same_prio_args), 1114 sse_low_priority_test_handler, "same_prio_args"); 1115 1116 report_prefix_pop(); 1117 } 1118 1119 static void test_invalid_event_id(unsigned long event_id) 1120 { 1121 struct sbiret ret; 1122 unsigned long value = 0; 1123 1124 ret = sbi_sse_register_raw(event_id, (unsigned long) sbi_sse_entry, 0); 1125 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1126 "register event_id 0x%lx", event_id); 1127 1128 ret = sbi_sse_unregister(event_id); 1129 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1130 "unregister event_id 0x%lx", event_id); 1131 1132 ret = sbi_sse_enable(event_id); 1133 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1134 "enable event_id 0x%lx", event_id); 1135 1136 ret = sbi_sse_disable(event_id); 1137 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1138 "disable event_id 0x%lx", event_id); 1139 1140 ret = sbi_sse_inject(event_id, 0); 1141 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1142 "inject event_id 0x%lx", event_id); 1143 1144 ret = sbi_sse_write_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value); 1145 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1146 "write attr event_id 0x%lx", event_id); 1147 1148 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_PRIORITY, 1, &value); 1149 sbiret_report_error(&ret, SBI_ERR_INVALID_PARAM, 1150 "read attr event_id 0x%lx", event_id); 1151 } 1152 1153 static void sse_test_invalid_event_id(void) 1154 { 1155 1156 report_prefix_push("event_id"); 1157 1158 test_invalid_event_id(SBI_SSE_EVENT_LOCAL_RESERVED_0_START); 1159 1160 report_prefix_pop(); 1161 } 1162 1163 static void sse_check_event_availability(uint32_t event_id, bool *can_inject, bool *supported) 1164 { 1165 unsigned long status; 1166 struct sbiret ret; 1167 1168 *can_inject = false; 1169 *supported = false; 1170 1171 ret = sbi_sse_read_attrs(event_id, SBI_SSE_ATTR_STATUS, 1, &status); 1172 if (ret.error != SBI_SUCCESS && ret.error != SBI_ERR_NOT_SUPPORTED) { 1173 report_fail("Get event status != SBI_SUCCESS && != SBI_ERR_NOT_SUPPORTED: %ld", 1174 ret.error); 1175 return; 1176 } 1177 if (ret.error == SBI_ERR_NOT_SUPPORTED) 1178 return; 1179 1180 *supported = true; 1181 *can_inject = (status >> SBI_SSE_ATTR_STATUS_INJECT_OFFSET) & 1; 1182 } 1183 1184 static void sse_secondary_boot_and_unmask(void *data) 1185 { 1186 sbi_sse_hart_unmask(); 1187 } 1188 1189 static void sse_check_mask(void) 1190 { 1191 struct sbiret ret; 1192 1193 /* Upon boot, event are masked, check that */ 1194 ret = sbi_sse_hart_mask(); 1195 sbiret_report_error(&ret, SBI_ERR_ALREADY_STOPPED, "hart mask at boot time"); 1196 1197 ret = sbi_sse_hart_unmask(); 1198 sbiret_report_error(&ret, SBI_SUCCESS, "hart unmask"); 1199 ret = sbi_sse_hart_unmask(); 1200 sbiret_report_error(&ret, SBI_ERR_ALREADY_STARTED, "hart unmask twice error"); 1201 1202 ret = sbi_sse_hart_mask(); 1203 sbiret_report_error(&ret, SBI_SUCCESS, "hart mask"); 1204 ret = sbi_sse_hart_mask(); 1205 sbiret_report_error(&ret, SBI_ERR_ALREADY_STOPPED, "hart mask twice"); 1206 } 1207 1208 static void run_inject_test(struct sse_event_info *info) 1209 { 1210 unsigned long event_id = info->event_id; 1211 1212 if (!info->can_inject) { 1213 report_skip("Event does not support injection, skipping injection tests"); 1214 return; 1215 } 1216 1217 sse_test_inject_simple(event_id); 1218 1219 if (sbi_sse_event_is_global(event_id)) 1220 sse_test_inject_global(event_id); 1221 else 1222 sse_test_inject_local(event_id); 1223 } 1224 1225 void check_sse(void) 1226 { 1227 struct sse_event_info *info; 1228 unsigned long i, event_id; 1229 bool supported; 1230 1231 report_prefix_push("sse"); 1232 1233 if (!sbi_probe(SBI_EXT_SSE)) { 1234 report_skip("extension not available"); 1235 report_prefix_pop(); 1236 return; 1237 } 1238 1239 if (__sbi_get_imp_id() == SBI_IMPL_OPENSBI && 1240 __sbi_get_imp_version() < sbi_impl_opensbi_mk_version(1, 7)) { 1241 report_skip("OpenSBI < v1.7 detected, skipping tests"); 1242 report_prefix_pop(); 1243 return; 1244 } 1245 1246 sse_check_mask(); 1247 1248 /* 1249 * Dummy wakeup of all processors since some of them will be targeted 1250 * by global events without going through the wakeup call as well as 1251 * unmasking SSE events on all harts 1252 */ 1253 on_cpus(sse_secondary_boot_and_unmask, NULL); 1254 1255 sse_test_invalid_event_id(); 1256 1257 for (i = 0; i < ARRAY_SIZE(sse_event_infos); i++) { 1258 info = &sse_event_infos[i]; 1259 event_id = info->event_id; 1260 report_prefix_push(info->name); 1261 sse_check_event_availability(event_id, &info->can_inject, &supported); 1262 if (!supported) { 1263 report_skip("Event is not supported, skipping tests"); 1264 report_prefix_pop(); 1265 continue; 1266 } 1267 1268 sse_test_attrs(event_id); 1269 sse_test_register_error(event_id); 1270 1271 run_inject_test(info); 1272 1273 report_prefix_pop(); 1274 } 1275 1276 sse_test_injection_priority(); 1277 1278 report_prefix_pop(); 1279 } 1280