1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality.
4 *
5 * Copyright (c) 2024, Rivos Inc.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include "kvm_util.h"
14 #include "test_util.h"
15 #include "processor.h"
16 #include "sbi.h"
17 #include "arch_timer.h"
18 #include "ucall_common.h"
19
20 /* Maximum counters(firmware + hardware) */
21 #define RISCV_MAX_PMU_COUNTERS 64
22 union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS];
23
24 /* Snapshot shared memory data */
25 #define PMU_SNAPSHOT_GPA_BASE BIT(30)
26 static void *snapshot_gva;
27 static vm_paddr_t snapshot_gpa;
28
29 static int vcpu_shared_irq_count;
30 static int counter_in_use;
31
32 /* Cache the available counters in a bitmask */
33 static unsigned long counter_mask_available;
34
35 static bool illegal_handler_invoked;
36
37 #define SBI_PMU_TEST_BASIC BIT(0)
38 #define SBI_PMU_TEST_EVENTS BIT(1)
39 #define SBI_PMU_TEST_SNAPSHOT BIT(2)
40 #define SBI_PMU_TEST_OVERFLOW BIT(3)
41
42 #define SBI_PMU_OVERFLOW_IRQNUM_DEFAULT 5
43 struct test_args {
44 int disabled_tests;
45 int overflow_irqnum;
46 };
47
48 static struct test_args targs;
49
pmu_csr_read_num(int csr_num)50 unsigned long pmu_csr_read_num(int csr_num)
51 {
52 #define switchcase_csr_read(__csr_num, __val) {\
53 case __csr_num: \
54 __val = csr_read(__csr_num); \
55 break; }
56 #define switchcase_csr_read_2(__csr_num, __val) {\
57 switchcase_csr_read(__csr_num + 0, __val) \
58 switchcase_csr_read(__csr_num + 1, __val)}
59 #define switchcase_csr_read_4(__csr_num, __val) {\
60 switchcase_csr_read_2(__csr_num + 0, __val) \
61 switchcase_csr_read_2(__csr_num + 2, __val)}
62 #define switchcase_csr_read_8(__csr_num, __val) {\
63 switchcase_csr_read_4(__csr_num + 0, __val) \
64 switchcase_csr_read_4(__csr_num + 4, __val)}
65 #define switchcase_csr_read_16(__csr_num, __val) {\
66 switchcase_csr_read_8(__csr_num + 0, __val) \
67 switchcase_csr_read_8(__csr_num + 8, __val)}
68 #define switchcase_csr_read_32(__csr_num, __val) {\
69 switchcase_csr_read_16(__csr_num + 0, __val) \
70 switchcase_csr_read_16(__csr_num + 16, __val)}
71
72 unsigned long ret = 0;
73
74 switch (csr_num) {
75 switchcase_csr_read_32(CSR_CYCLE, ret)
76 switchcase_csr_read_32(CSR_CYCLEH, ret)
77 default :
78 break;
79 }
80
81 return ret;
82 #undef switchcase_csr_read_32
83 #undef switchcase_csr_read_16
84 #undef switchcase_csr_read_8
85 #undef switchcase_csr_read_4
86 #undef switchcase_csr_read_2
87 #undef switchcase_csr_read
88 }
89
dummy_func_loop(uint64_t iter)90 static inline void dummy_func_loop(uint64_t iter)
91 {
92 int i = 0;
93
94 while (i < iter) {
95 asm volatile("nop");
96 i++;
97 }
98 }
99
start_counter(unsigned long counter,unsigned long start_flags,unsigned long ival)100 static void start_counter(unsigned long counter, unsigned long start_flags,
101 unsigned long ival)
102 {
103 struct sbiret ret;
104
105 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags,
106 ival, 0, 0);
107 __GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter);
108 }
109
110 /* This should be invoked only for reset counter use case */
stop_reset_counter(unsigned long counter,unsigned long stop_flags)111 static void stop_reset_counter(unsigned long counter, unsigned long stop_flags)
112 {
113 struct sbiret ret;
114
115 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1,
116 stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
117 __GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED,
118 "Unable to stop counter %ld\n", counter);
119 }
120
stop_counter(unsigned long counter,unsigned long stop_flags)121 static void stop_counter(unsigned long counter, unsigned long stop_flags)
122 {
123 struct sbiret ret;
124
125 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags,
126 0, 0, 0);
127 __GUEST_ASSERT(ret.error == 0 || ret.error == SBI_ERR_ALREADY_STOPPED,
128 "Unable to stop counter %ld error %ld\n", counter, ret.error);
129 }
130
guest_illegal_exception_handler(struct ex_regs * regs)131 static void guest_illegal_exception_handler(struct ex_regs *regs)
132 {
133 __GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL,
134 "Unexpected exception handler %lx\n", regs->cause);
135
136 illegal_handler_invoked = true;
137 /* skip the trapping instruction */
138 regs->epc += 4;
139 }
140
guest_irq_handler(struct ex_regs * regs)141 static void guest_irq_handler(struct ex_regs *regs)
142 {
143 unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG;
144 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
145 unsigned long overflown_mask;
146
147 /* Validate that we are in the correct irq handler */
148 GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF);
149
150 /* Stop all counters first to avoid further interrupts */
151 stop_counter(counter_in_use, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
152
153 csr_clear(CSR_SIP, BIT(IRQ_PMU_OVF));
154
155 overflown_mask = READ_ONCE(snapshot_data->ctr_overflow_mask);
156 GUEST_ASSERT(overflown_mask & 0x01);
157
158 WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1);
159 }
160
get_counter_index(unsigned long cbase,unsigned long cmask,unsigned long cflags,unsigned long event)161 static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask,
162 unsigned long cflags,
163 unsigned long event)
164 {
165 struct sbiret ret;
166
167 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
168 cflags, event, 0, 0);
169 __GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error);
170 GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS);
171 GUEST_ASSERT(BIT(ret.value) & counter_mask_available);
172
173 return ret.value;
174 }
175
get_num_counters(void)176 static unsigned long get_num_counters(void)
177 {
178 struct sbiret ret;
179
180 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
181
182 __GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU");
183 __GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS,
184 "Invalid number of counters %ld\n", ret.value);
185
186 return ret.value;
187 }
188
update_counter_info(int num_counters)189 static void update_counter_info(int num_counters)
190 {
191 int i = 0;
192 struct sbiret ret;
193
194 for (i = 0; i < num_counters; i++) {
195 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
196
197 /* There can be gaps in logical counter indicies*/
198 if (ret.error)
199 continue;
200 GUEST_ASSERT_NE(ret.value, 0);
201
202 ctrinfo_arr[i].value = ret.value;
203 counter_mask_available |= BIT(i);
204 }
205
206 GUEST_ASSERT(counter_mask_available > 0);
207 }
208
read_fw_counter(int idx,union sbi_pmu_ctr_info ctrinfo)209 static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
210 {
211 struct sbiret ret;
212
213 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0);
214 GUEST_ASSERT(ret.error == 0);
215 return ret.value;
216 }
217
read_counter(int idx,union sbi_pmu_ctr_info ctrinfo)218 static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
219 {
220 unsigned long counter_val = 0;
221
222 __GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type);
223
224 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW)
225 counter_val = pmu_csr_read_num(ctrinfo.csr);
226 else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW)
227 counter_val = read_fw_counter(idx, ctrinfo);
228
229 return counter_val;
230 }
231
verify_sbi_requirement_assert(void)232 static inline void verify_sbi_requirement_assert(void)
233 {
234 long out_val = 0;
235 bool probe;
236
237 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
238 GUEST_ASSERT(probe && out_val == 1);
239
240 if (get_host_sbi_spec_version() < sbi_mk_version(2, 0))
241 __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot");
242 }
243
snapshot_set_shmem(vm_paddr_t gpa,unsigned long flags)244 static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags)
245 {
246 unsigned long lo = (unsigned long)gpa;
247 #if __riscv_xlen == 32
248 unsigned long hi = (unsigned long)(gpa >> 32);
249 #else
250 unsigned long hi = gpa == -1 ? -1 : 0;
251 #endif
252 struct sbiret ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
253 lo, hi, flags, 0, 0, 0);
254
255 GUEST_ASSERT(ret.value == 0 && ret.error == 0);
256 }
257
test_pmu_event(unsigned long event)258 static void test_pmu_event(unsigned long event)
259 {
260 unsigned long counter;
261 unsigned long counter_value_pre, counter_value_post;
262 unsigned long counter_init_value = 100;
263
264 counter = get_counter_index(0, counter_mask_available, 0, event);
265 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
266
267 /* Do not set the initial value */
268 start_counter(counter, 0, 0);
269 dummy_func_loop(10000);
270 stop_counter(counter, 0);
271
272 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
273 __GUEST_ASSERT(counter_value_post > counter_value_pre,
274 "Event update verification failed: post [%lx] pre [%lx]\n",
275 counter_value_post, counter_value_pre);
276
277 /*
278 * We can't just update the counter without starting it.
279 * Do start/stop twice to simulate that by first initializing to a very
280 * high value and a low value after that.
281 */
282 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2);
283 stop_counter(counter, 0);
284 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
285
286 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
287 stop_counter(counter, 0);
288 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
289 __GUEST_ASSERT(counter_value_pre > counter_value_post,
290 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
291 counter_value_post, counter_value_pre);
292
293 /* Now set the initial value and compare */
294 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
295 dummy_func_loop(10000);
296 stop_counter(counter, 0);
297
298 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
299 __GUEST_ASSERT(counter_value_post > counter_init_value,
300 "Event update verification failed: post [%lx] pre [%lx]\n",
301 counter_value_post, counter_init_value);
302
303 stop_reset_counter(counter, 0);
304 }
305
test_pmu_event_snapshot(unsigned long event)306 static void test_pmu_event_snapshot(unsigned long event)
307 {
308 unsigned long counter;
309 unsigned long counter_value_pre, counter_value_post;
310 unsigned long counter_init_value = 100;
311 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
312
313 counter = get_counter_index(0, counter_mask_available, 0, event);
314 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
315
316 /* Do not set the initial value */
317 start_counter(counter, 0, 0);
318 dummy_func_loop(10000);
319 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
320
321 /* The counter value is updated w.r.t relative index of cbase */
322 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
323 __GUEST_ASSERT(counter_value_post > counter_value_pre,
324 "Event update verification failed: post [%lx] pre [%lx]\n",
325 counter_value_post, counter_value_pre);
326
327 /*
328 * We can't just update the counter without starting it.
329 * Do start/stop twice to simulate that by first initializing to a very
330 * high value and a low value after that.
331 */
332 WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2);
333 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
334 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
335 counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]);
336
337 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
338 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
339 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
340 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
341 __GUEST_ASSERT(counter_value_pre > counter_value_post,
342 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
343 counter_value_post, counter_value_pre);
344
345 /* Now set the initial value and compare */
346 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
347 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
348 dummy_func_loop(10000);
349 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
350
351 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
352 __GUEST_ASSERT(counter_value_post > counter_init_value,
353 "Event update verification failed: post [%lx] pre [%lx]\n",
354 counter_value_post, counter_init_value);
355
356 stop_reset_counter(counter, 0);
357 }
358
test_pmu_event_overflow(unsigned long event)359 static void test_pmu_event_overflow(unsigned long event)
360 {
361 unsigned long counter;
362 unsigned long counter_value_post;
363 unsigned long counter_init_value = ULONG_MAX - 10000;
364 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
365
366 counter = get_counter_index(0, counter_mask_available, 0, event);
367 counter_in_use = counter;
368
369 /* The counter value is updated w.r.t relative index of cbase passed to start/stop */
370 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
371 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
372 dummy_func_loop(10000);
373 udelay(msecs_to_usecs(2000));
374 /* irq handler should have stopped the counter */
375 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
376
377 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
378 /* The counter value after stopping should be less the init value due to overflow */
379 __GUEST_ASSERT(counter_value_post < counter_init_value,
380 "counter_value_post %lx counter_init_value %lx for counter\n",
381 counter_value_post, counter_init_value);
382
383 stop_reset_counter(counter, 0);
384 }
385
test_invalid_event(void)386 static void test_invalid_event(void)
387 {
388 struct sbiret ret;
389 unsigned long event = 0x1234; /* A random event */
390
391 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0,
392 counter_mask_available, 0, event, 0, 0);
393 GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED);
394 }
395
test_pmu_events(void)396 static void test_pmu_events(void)
397 {
398 int num_counters = 0;
399
400 /* Get the counter details */
401 num_counters = get_num_counters();
402 update_counter_info(num_counters);
403
404 /* Sanity testing for any random invalid event */
405 test_invalid_event();
406
407 /* Only these two events are guaranteed to be present */
408 test_pmu_event(SBI_PMU_HW_CPU_CYCLES);
409 test_pmu_event(SBI_PMU_HW_INSTRUCTIONS);
410
411 GUEST_DONE();
412 }
413
test_pmu_basic_sanity(void)414 static void test_pmu_basic_sanity(void)
415 {
416 long out_val = 0;
417 bool probe;
418 struct sbiret ret;
419 int num_counters = 0, i;
420 union sbi_pmu_ctr_info ctrinfo;
421
422 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
423 GUEST_ASSERT(probe && out_val == 1);
424
425 num_counters = get_num_counters();
426
427 for (i = 0; i < num_counters; i++) {
428 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i,
429 0, 0, 0, 0, 0);
430
431 /* There can be gaps in logical counter indicies*/
432 if (ret.error)
433 continue;
434 GUEST_ASSERT_NE(ret.value, 0);
435
436 ctrinfo.value = ret.value;
437
438 /**
439 * Accessibility check of hardware and read capability of firmware counters.
440 * The spec doesn't mandate any initial value. No need to check any value.
441 */
442 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) {
443 pmu_csr_read_num(ctrinfo.csr);
444 GUEST_ASSERT(illegal_handler_invoked);
445 } else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
446 read_fw_counter(i, ctrinfo);
447 }
448 }
449
450 GUEST_DONE();
451 }
452
test_pmu_events_snaphost(void)453 static void test_pmu_events_snaphost(void)
454 {
455 int num_counters = 0;
456 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
457 int i;
458
459 /* Verify presence of SBI PMU and minimum requrired SBI version */
460 verify_sbi_requirement_assert();
461
462 snapshot_set_shmem(snapshot_gpa, 0);
463
464 /* Get the counter details */
465 num_counters = get_num_counters();
466 update_counter_info(num_counters);
467
468 /* Validate shared memory access */
469 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0);
470 for (i = 0; i < num_counters; i++) {
471 if (counter_mask_available & (BIT(i)))
472 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0);
473 }
474 /* Only these two events are guranteed to be present */
475 test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES);
476 test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS);
477
478 GUEST_DONE();
479 }
480
test_pmu_events_overflow(void)481 static void test_pmu_events_overflow(void)
482 {
483 int num_counters = 0, i = 0;
484
485 /* Verify presence of SBI PMU and minimum requrired SBI version */
486 verify_sbi_requirement_assert();
487
488 snapshot_set_shmem(snapshot_gpa, 0);
489 csr_set(CSR_IE, BIT(IRQ_PMU_OVF));
490 local_irq_enable();
491
492 /* Get the counter details */
493 num_counters = get_num_counters();
494 update_counter_info(num_counters);
495
496 /*
497 * Qemu supports overflow for cycle/instruction.
498 * This test may fail on any platform that do not support overflow for these two events.
499 */
500 for (i = 0; i < targs.overflow_irqnum; i++)
501 test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
502 GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
503
504 vcpu_shared_irq_count = 0;
505
506 for (i = 0; i < targs.overflow_irqnum; i++)
507 test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
508 GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
509
510 GUEST_DONE();
511 }
512
run_vcpu(struct kvm_vcpu * vcpu)513 static void run_vcpu(struct kvm_vcpu *vcpu)
514 {
515 struct ucall uc;
516
517 vcpu_run(vcpu);
518 switch (get_ucall(vcpu, &uc)) {
519 case UCALL_ABORT:
520 REPORT_GUEST_ASSERT(uc);
521 break;
522 case UCALL_DONE:
523 case UCALL_SYNC:
524 break;
525 default:
526 TEST_FAIL("Unknown ucall %lu", uc.cmd);
527 break;
528 }
529 }
530
test_vm_destroy(struct kvm_vm * vm)531 void test_vm_destroy(struct kvm_vm *vm)
532 {
533 memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS);
534 counter_mask_available = 0;
535 kvm_vm_free(vm);
536 }
537
test_vm_basic_test(void * guest_code)538 static void test_vm_basic_test(void *guest_code)
539 {
540 struct kvm_vm *vm;
541 struct kvm_vcpu *vcpu;
542
543 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
544 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
545 "SBI PMU not available, skipping test");
546 vm_init_vector_tables(vm);
547 /* Illegal instruction handler is required to verify read access without configuration */
548 vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler);
549
550 vcpu_init_vector_tables(vcpu);
551 run_vcpu(vcpu);
552
553 test_vm_destroy(vm);
554 }
555
test_vm_events_test(void * guest_code)556 static void test_vm_events_test(void *guest_code)
557 {
558 struct kvm_vm *vm = NULL;
559 struct kvm_vcpu *vcpu = NULL;
560
561 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
562 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
563 "SBI PMU not available, skipping test");
564 run_vcpu(vcpu);
565
566 test_vm_destroy(vm);
567 }
568
test_vm_setup_snapshot_mem(struct kvm_vm * vm,struct kvm_vcpu * vcpu)569 static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
570 {
571 /* PMU Snapshot requires single page only */
572 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, PMU_SNAPSHOT_GPA_BASE, 1, 1, 0);
573 /* PMU_SNAPSHOT_GPA_BASE is identity mapped */
574 virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1);
575
576 snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE);
577 snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva);
578 sync_global_to_guest(vcpu->vm, snapshot_gva);
579 sync_global_to_guest(vcpu->vm, snapshot_gpa);
580 }
581
test_vm_events_snapshot_test(void * guest_code)582 static void test_vm_events_snapshot_test(void *guest_code)
583 {
584 struct kvm_vm *vm = NULL;
585 struct kvm_vcpu *vcpu;
586
587 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
588 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
589 "SBI PMU not available, skipping test");
590
591 test_vm_setup_snapshot_mem(vm, vcpu);
592
593 run_vcpu(vcpu);
594
595 test_vm_destroy(vm);
596 }
597
test_vm_events_overflow(void * guest_code)598 static void test_vm_events_overflow(void *guest_code)
599 {
600 struct kvm_vm *vm = NULL;
601 struct kvm_vcpu *vcpu;
602
603 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
604 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
605 "SBI PMU not available, skipping test");
606
607 __TEST_REQUIRE(__vcpu_has_isa_ext(vcpu, KVM_RISCV_ISA_EXT_SSCOFPMF),
608 "Sscofpmf is not available, skipping overflow test");
609
610 test_vm_setup_snapshot_mem(vm, vcpu);
611 vm_init_vector_tables(vm);
612 vm_install_interrupt_handler(vm, guest_irq_handler);
613
614 vcpu_init_vector_tables(vcpu);
615 /* Initialize guest timer frequency. */
616 timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency));
617
618 /* Export the shared variables to the guest */
619 sync_global_to_guest(vm, timer_freq);
620 sync_global_to_guest(vm, vcpu_shared_irq_count);
621 sync_global_to_guest(vm, targs);
622
623 run_vcpu(vcpu);
624
625 test_vm_destroy(vm);
626 }
627
test_print_help(char * name)628 static void test_print_help(char *name)
629 {
630 pr_info("Usage: %s [-h] [-t <test name>] [-n <number of LCOFI interrupt for overflow test>]\n",
631 name);
632 pr_info("\t-t: Test to run (default all). Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
633 pr_info("\t-n: Number of LCOFI interrupt to trigger for each event in overflow test (default: %d)\n",
634 SBI_PMU_OVERFLOW_IRQNUM_DEFAULT);
635 pr_info("\t-h: print this help screen\n");
636 }
637
parse_args(int argc,char * argv[])638 static bool parse_args(int argc, char *argv[])
639 {
640 int opt;
641 int temp_disabled_tests = SBI_PMU_TEST_BASIC | SBI_PMU_TEST_EVENTS | SBI_PMU_TEST_SNAPSHOT |
642 SBI_PMU_TEST_OVERFLOW;
643 int overflow_interrupts = 0;
644
645 while ((opt = getopt(argc, argv, "ht:n:")) != -1) {
646 switch (opt) {
647 case 't':
648 if (!strncmp("basic", optarg, 5))
649 temp_disabled_tests &= ~SBI_PMU_TEST_BASIC;
650 else if (!strncmp("events", optarg, 6))
651 temp_disabled_tests &= ~SBI_PMU_TEST_EVENTS;
652 else if (!strncmp("snapshot", optarg, 8))
653 temp_disabled_tests &= ~SBI_PMU_TEST_SNAPSHOT;
654 else if (!strncmp("overflow", optarg, 8))
655 temp_disabled_tests &= ~SBI_PMU_TEST_OVERFLOW;
656 else
657 goto done;
658 targs.disabled_tests = temp_disabled_tests;
659 break;
660 case 'n':
661 overflow_interrupts = atoi_positive("Number of LCOFI", optarg);
662 break;
663 case 'h':
664 default:
665 goto done;
666 }
667 }
668
669 if (overflow_interrupts > 0) {
670 if (targs.disabled_tests & SBI_PMU_TEST_OVERFLOW) {
671 pr_info("-n option is only available for overflow test\n");
672 goto done;
673 } else {
674 targs.overflow_irqnum = overflow_interrupts;
675 }
676 }
677
678 return true;
679 done:
680 test_print_help(argv[0]);
681 return false;
682 }
683
main(int argc,char * argv[])684 int main(int argc, char *argv[])
685 {
686 targs.disabled_tests = 0;
687 targs.overflow_irqnum = SBI_PMU_OVERFLOW_IRQNUM_DEFAULT;
688
689 if (!parse_args(argc, argv))
690 exit(KSFT_SKIP);
691
692 if (!(targs.disabled_tests & SBI_PMU_TEST_BASIC)) {
693 test_vm_basic_test(test_pmu_basic_sanity);
694 pr_info("SBI PMU basic test : PASS\n");
695 }
696
697 if (!(targs.disabled_tests & SBI_PMU_TEST_EVENTS)) {
698 test_vm_events_test(test_pmu_events);
699 pr_info("SBI PMU event verification test : PASS\n");
700 }
701
702 if (!(targs.disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
703 test_vm_events_snapshot_test(test_pmu_events_snaphost);
704 pr_info("SBI PMU event verification with snapshot test : PASS\n");
705 }
706
707 if (!(targs.disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
708 test_vm_events_overflow(test_pmu_events_overflow);
709 pr_info("SBI PMU event verification with overflow test : PASS\n");
710 }
711
712 return 0;
713 }
714