Lines Matching +full:riscv +full:- +full:sbi
1 // SPDX-License-Identifier: GPL-2.0
9 #define pr_fmt(fmt) "riscv-kvm-pmu: " fmt
17 #include <asm/sbi.h>
20 #define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
39 u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); in kvm_pmu_get_sample_period()
42 if (!pmc->counter_val) in kvm_pmu_get_sample_period()
45 sample_period = (-pmc->counter_val) & counter_val_mask; in kvm_pmu_get_sample_period()
80 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
81 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
82 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
83 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
148 return -EINVAL; in kvm_pmu_get_fixed_pmc_index()
155 return -EINVAL; in kvm_pmu_get_fixed_pmc_index()
161 int ctr_idx = -1; in kvm_pmu_get_programmable_pmc_index()
167 min = kvpmu->num_hw_ctrs; in kvm_pmu_get_programmable_pmc_index()
168 max = min + kvpmu->num_fw_ctrs; in kvm_pmu_get_programmable_pmc_index()
172 max = kvpmu->num_hw_ctrs; in kvm_pmu_get_programmable_pmc_index()
178 !test_bit(pmc_idx, kvpmu->pmc_in_use)) { in kvm_pmu_get_programmable_pmc_index()
209 return -EINVAL; in pmu_fw_ctr_read_hi()
214 return -EINVAL; in pmu_fw_ctr_read_hi()
217 pmc = &kvpmu->pmc[cidx]; in pmu_fw_ctr_read_hi()
219 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) in pmu_fw_ctr_read_hi()
220 return -EINVAL; in pmu_fw_ctr_read_hi()
222 fevent_code = get_event_code(pmc->event_idx); in pmu_fw_ctr_read_hi()
223 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_fw_ctr_read_hi()
225 *out_val = pmc->counter_val >> 32; in pmu_fw_ctr_read_hi()
240 return -EINVAL; in pmu_ctr_read()
243 pmc = &kvpmu->pmc[cidx]; in pmu_ctr_read()
245 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in pmu_ctr_read()
246 fevent_code = get_event_code(pmc->event_idx); in pmu_ctr_read()
247 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_ctr_read()
248 } else if (pmc->perf_event) { in pmu_ctr_read()
249 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); in pmu_ctr_read()
251 return -EINVAL; in pmu_ctr_read()
253 *out_val = pmc->counter_val; in pmu_ctr_read()
263 return -EINVAL; in kvm_pmu_validate_counter_mask()
272 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_riscv_pmu_overflow()
273 struct kvm_vcpu *vcpu = pmc->vcpu; in kvm_riscv_pmu_overflow()
275 struct riscv_pmu *rpmu = to_riscv_pmu(perf_event->pmu); in kvm_riscv_pmu_overflow()
285 rpmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_riscv_pmu_overflow()
295 * TODO: Should we keep it for RISC-V ? in kvm_riscv_pmu_overflow()
297 period = -(local64_read(&perf_event->count)); in kvm_riscv_pmu_overflow()
299 local64_set(&perf_event->hw.period_left, 0); in kvm_riscv_pmu_overflow()
300 perf_event->attr.sample_period = period; in kvm_riscv_pmu_overflow()
301 perf_event->hw.sample_period = period; in kvm_riscv_pmu_overflow()
303 set_bit(pmc->idx, kvpmu->pmc_overflown); in kvm_riscv_pmu_overflow()
306 rpmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_riscv_pmu_overflow()
316 attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata); in kvm_pmu_create_perf_event()
319 pmc->counter_val = 0; in kvm_pmu_create_perf_event()
326 attr->sample_period = kvm_pmu_get_sample_period(pmc); in kvm_pmu_create_perf_event()
328 event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc); in kvm_pmu_create_perf_event()
334 pmc->perf_event = event; in kvm_pmu_create_perf_event()
336 perf_event_enable(pmc->perf_event); in kvm_pmu_create_perf_event()
347 return -EINVAL; in kvm_riscv_vcpu_pmu_incr_fw()
349 fevent = &kvpmu->fw_event[fid]; in kvm_riscv_vcpu_pmu_incr_fw()
350 if (fevent->started) in kvm_riscv_vcpu_pmu_incr_fw()
351 fevent->value++; in kvm_riscv_vcpu_pmu_incr_fw()
363 if (!kvpmu || !kvpmu->init_done) { in kvm_riscv_vcpu_pmu_read_hpm()
369 * be access through SBI PMU only. in kvm_riscv_vcpu_pmu_read_hpm()
383 cidx = csr_num - CSR_CYCLE; in kvm_riscv_vcpu_pmu_read_hpm()
395 kfree(kvpmu->sdata); in kvm_pmu_clear_snapshot_area()
396 kvpmu->sdata = NULL; in kvm_pmu_clear_snapshot_area()
397 kvpmu->snapshot_addr = INVALID_GPA; in kvm_pmu_clear_snapshot_area()
437 kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
438 if (!kvpmu->sdata) in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
439 return -ENOMEM; in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
441 if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
442 kfree(kvpmu->sdata); in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
447 kvpmu->snapshot_addr = saddr; in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
450 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_snapshot_set_shmem()
460 retdata->out_val = kvm_pmu_num_counters(kvpmu); in kvm_riscv_vcpu_pmu_num_ctrs()
471 retdata->err_val = SBI_ERR_INVALID_PARAM; in kvm_riscv_vcpu_pmu_ctr_info()
475 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; in kvm_riscv_vcpu_pmu_ctr_info()
496 if (kvpmu->snapshot_addr == INVALID_GPA) { in kvm_riscv_vcpu_pmu_ctr_start()
500 if (kvm_vcpu_read_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, in kvm_riscv_vcpu_pmu_ctr_start()
510 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) in kvm_riscv_vcpu_pmu_ctr_start()
513 clear_bit(pmc_index, kvpmu->pmc_overflown); in kvm_riscv_vcpu_pmu_ctr_start()
514 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_start()
516 pmc->counter_val = ival; in kvm_riscv_vcpu_pmu_ctr_start()
519 pmc->counter_val = kvpmu->sdata->ctr_values[i]; in kvm_riscv_vcpu_pmu_ctr_start()
522 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_start()
523 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_start()
530 if (kvpmu->fw_event[fevent_code].started) { in kvm_riscv_vcpu_pmu_ctr_start()
535 kvpmu->fw_event[fevent_code].started = true; in kvm_riscv_vcpu_pmu_ctr_start()
536 kvpmu->fw_event[fevent_code].value = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_start()
537 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_start()
538 if (unlikely(pmc->started)) { in kvm_riscv_vcpu_pmu_ctr_start()
542 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); in kvm_riscv_vcpu_pmu_ctr_start()
543 perf_event_enable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_start()
544 pmc->started = true; in kvm_riscv_vcpu_pmu_ctr_start()
551 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_start()
573 if (snap_flag_set && kvpmu->snapshot_addr == INVALID_GPA) { in kvm_riscv_vcpu_pmu_ctr_stop()
581 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) in kvm_riscv_vcpu_pmu_ctr_stop()
583 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_stop()
584 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_stop()
585 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_stop()
591 if (!kvpmu->fw_event[fevent_code].started) in kvm_riscv_vcpu_pmu_ctr_stop()
594 kvpmu->fw_event[fevent_code].started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
595 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_stop()
596 if (pmc->started) { in kvm_riscv_vcpu_pmu_ctr_stop()
598 perf_event_disable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_stop()
599 pmc->started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
612 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) in kvm_riscv_vcpu_pmu_ctr_stop()
613 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in kvm_riscv_vcpu_pmu_ctr_stop()
614 else if (pmc->perf_event) in kvm_riscv_vcpu_pmu_ctr_stop()
615 pmc->counter_val += perf_event_read_value(pmc->perf_event, in kvm_riscv_vcpu_pmu_ctr_stop()
622 if (test_bit(pmc_index, kvpmu->pmc_overflown)) in kvm_riscv_vcpu_pmu_ctr_stop()
623 kvpmu->sdata->ctr_overflow_mask |= BIT(i); in kvm_riscv_vcpu_pmu_ctr_stop()
624 kvpmu->sdata->ctr_values[i] = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_stop()
629 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_ctr_stop()
630 clear_bit(pmc_index, kvpmu->pmc_in_use); in kvm_riscv_vcpu_pmu_ctr_stop()
631 clear_bit(pmc_index, kvpmu->pmc_overflown); in kvm_riscv_vcpu_pmu_ctr_stop()
637 kvpmu->sdata->ctr_overflow_mask &= ~BIT(i); in kvm_riscv_vcpu_pmu_ctr_stop()
644 kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, kvpmu->sdata, in kvm_riscv_vcpu_pmu_ctr_stop()
648 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_stop()
698 if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) { in kvm_riscv_vcpu_pmu_ctr_cfg_match()
711 pmc = &kvpmu->pmc[ctr_idx]; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
712 pmc->idx = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
716 kvpmu->fw_event[event_code].started = true; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
725 set_bit(ctr_idx, kvpmu->pmc_in_use); in kvm_riscv_vcpu_pmu_ctr_cfg_match()
726 pmc->event_idx = eidx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
727 retdata->out_val = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
729 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
739 ret = pmu_fw_ctr_read_hi(vcpu, cidx, &retdata->out_val); in kvm_riscv_vcpu_pmu_fw_ctr_read_hi()
740 if (ret == -EINVAL) in kvm_riscv_vcpu_pmu_fw_ctr_read_hi()
741 retdata->err_val = SBI_ERR_INVALID_PARAM; in kvm_riscv_vcpu_pmu_fw_ctr_read_hi()
751 ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val); in kvm_riscv_vcpu_pmu_fw_ctr_read()
752 if (ret == -EINVAL) in kvm_riscv_vcpu_pmu_fw_ctr_read()
753 retdata->err_val = SBI_ERR_INVALID_PARAM; in kvm_riscv_vcpu_pmu_fw_ctr_read()
779 kvpmu->num_hw_ctrs = num_hw_ctrs + 1; in kvm_riscv_vcpu_pmu_init()
780 kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; in kvm_riscv_vcpu_pmu_init()
781 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); in kvm_riscv_vcpu_pmu_init()
782 kvpmu->snapshot_addr = INVALID_GPA; in kvm_riscv_vcpu_pmu_init()
784 if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { in kvm_riscv_vcpu_pmu_init()
786 kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS; in kvm_riscv_vcpu_pmu_init()
799 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_init()
800 pmc->idx = i; in kvm_riscv_vcpu_pmu_init()
801 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_init()
802 pmc->vcpu = vcpu; in kvm_riscv_vcpu_pmu_init()
803 if (i < kvpmu->num_hw_ctrs) { in kvm_riscv_vcpu_pmu_init()
804 pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; in kvm_riscv_vcpu_pmu_init()
807 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
809 pmc->cinfo.width = hpm_width; in kvm_riscv_vcpu_pmu_init()
816 pmc->cinfo.csr = CSR_CYCLE + i; in kvm_riscv_vcpu_pmu_init()
818 pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; in kvm_riscv_vcpu_pmu_init()
819 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
823 kvpmu->init_done = true; in kvm_riscv_vcpu_pmu_init()
835 for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS) { in kvm_riscv_vcpu_pmu_deinit()
836 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_deinit()
837 pmc->counter_val = 0; in kvm_riscv_vcpu_pmu_deinit()
839 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_deinit()
841 bitmap_zero(kvpmu->pmc_in_use, RISCV_KVM_MAX_COUNTERS); in kvm_riscv_vcpu_pmu_deinit()
842 bitmap_zero(kvpmu->pmc_overflown, RISCV_KVM_MAX_COUNTERS); in kvm_riscv_vcpu_pmu_deinit()
843 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); in kvm_riscv_vcpu_pmu_deinit()