Lines Matching +full:riscv +full:- +full:sbi
1 // SPDX-License-Identifier: GPL-2.0
9 #define pr_fmt(fmt) "riscv-kvm-pmu: " fmt
19 #define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
38 u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); in kvm_pmu_get_sample_period()
41 if (!pmc->counter_val) in kvm_pmu_get_sample_period()
44 sample_period = (-pmc->counter_val) & counter_val_mask; in kvm_pmu_get_sample_period()
79 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
80 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
81 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
82 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
147 return -EINVAL; in kvm_pmu_get_fixed_pmc_index()
154 return -EINVAL; in kvm_pmu_get_fixed_pmc_index()
160 int ctr_idx = -1; in kvm_pmu_get_programmable_pmc_index()
166 min = kvpmu->num_hw_ctrs; in kvm_pmu_get_programmable_pmc_index()
167 max = min + kvpmu->num_fw_ctrs; in kvm_pmu_get_programmable_pmc_index()
171 max = kvpmu->num_hw_ctrs; in kvm_pmu_get_programmable_pmc_index()
177 !test_bit(pmc_idx, kvpmu->pmc_in_use)) { in kvm_pmu_get_programmable_pmc_index()
207 pmc = &kvpmu->pmc[cidx]; in pmu_ctr_read()
209 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in pmu_ctr_read()
210 fevent_code = get_event_code(pmc->event_idx); in pmu_ctr_read()
211 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_ctr_read()
212 } else if (pmc->perf_event) { in pmu_ctr_read()
213 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); in pmu_ctr_read()
215 return -EINVAL; in pmu_ctr_read()
217 *out_val = pmc->counter_val; in pmu_ctr_read()
227 return -EINVAL; in kvm_pmu_validate_counter_mask()
238 attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata); in kvm_pmu_create_perf_event()
241 pmc->counter_val = 0; in kvm_pmu_create_perf_event()
248 attr->sample_period = kvm_pmu_get_sample_period(pmc); in kvm_pmu_create_perf_event()
250 event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc); in kvm_pmu_create_perf_event()
256 pmc->perf_event = event; in kvm_pmu_create_perf_event()
258 perf_event_enable(pmc->perf_event); in kvm_pmu_create_perf_event()
269 return -EINVAL; in kvm_riscv_vcpu_pmu_incr_fw()
271 fevent = &kvpmu->fw_event[fid]; in kvm_riscv_vcpu_pmu_incr_fw()
272 if (fevent->started) in kvm_riscv_vcpu_pmu_incr_fw()
273 fevent->value++; in kvm_riscv_vcpu_pmu_incr_fw()
285 if (!kvpmu || !kvpmu->init_done) { in kvm_riscv_vcpu_pmu_read_hpm()
291 * be access through SBI PMU only. in kvm_riscv_vcpu_pmu_read_hpm()
305 cidx = csr_num - CSR_CYCLE; in kvm_riscv_vcpu_pmu_read_hpm()
318 retdata->out_val = kvm_pmu_num_counters(kvpmu); in kvm_riscv_vcpu_pmu_num_ctrs()
329 retdata->err_val = SBI_ERR_INVALID_PARAM; in kvm_riscv_vcpu_pmu_ctr_info()
333 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; in kvm_riscv_vcpu_pmu_ctr_info()
355 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) in kvm_riscv_vcpu_pmu_ctr_start()
357 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_start()
359 pmc->counter_val = ival; in kvm_riscv_vcpu_pmu_ctr_start()
360 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_start()
361 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_start()
368 if (kvpmu->fw_event[fevent_code].started) { in kvm_riscv_vcpu_pmu_ctr_start()
373 kvpmu->fw_event[fevent_code].started = true; in kvm_riscv_vcpu_pmu_ctr_start()
374 kvpmu->fw_event[fevent_code].value = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_start()
375 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_start()
376 if (unlikely(pmc->started)) { in kvm_riscv_vcpu_pmu_ctr_start()
380 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); in kvm_riscv_vcpu_pmu_ctr_start()
381 perf_event_enable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_start()
382 pmc->started = true; in kvm_riscv_vcpu_pmu_ctr_start()
389 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_start()
412 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) in kvm_riscv_vcpu_pmu_ctr_stop()
414 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_stop()
415 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_stop()
416 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_stop()
422 if (!kvpmu->fw_event[fevent_code].started) in kvm_riscv_vcpu_pmu_ctr_stop()
425 kvpmu->fw_event[fevent_code].started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
426 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_stop()
427 if (pmc->started) { in kvm_riscv_vcpu_pmu_ctr_stop()
429 perf_event_disable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_stop()
430 pmc->started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
437 pmc->counter_val += perf_event_read_value(pmc->perf_event, in kvm_riscv_vcpu_pmu_ctr_stop()
445 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_ctr_stop()
446 clear_bit(pmc_index, kvpmu->pmc_in_use); in kvm_riscv_vcpu_pmu_ctr_stop()
451 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_stop()
499 if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) { in kvm_riscv_vcpu_pmu_ctr_cfg_match()
512 pmc = &kvpmu->pmc[ctr_idx]; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
513 pmc->idx = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
517 kvpmu->fw_event[event_code].started = true; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
524 set_bit(ctr_idx, kvpmu->pmc_in_use); in kvm_riscv_vcpu_pmu_ctr_cfg_match()
525 pmc->event_idx = eidx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
526 retdata->out_val = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
528 retdata->err_val = sbiret; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
538 ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val); in kvm_riscv_vcpu_pmu_ctr_read()
539 if (ret == -EINVAL) in kvm_riscv_vcpu_pmu_ctr_read()
540 retdata->err_val = SBI_ERR_INVALID_PARAM; in kvm_riscv_vcpu_pmu_ctr_read()
566 kvpmu->num_hw_ctrs = num_hw_ctrs + 1; in kvm_riscv_vcpu_pmu_init()
567 kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX; in kvm_riscv_vcpu_pmu_init()
568 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); in kvm_riscv_vcpu_pmu_init()
570 if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) { in kvm_riscv_vcpu_pmu_init()
572 kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS; in kvm_riscv_vcpu_pmu_init()
585 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_init()
586 pmc->idx = i; in kvm_riscv_vcpu_pmu_init()
587 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_init()
588 if (i < kvpmu->num_hw_ctrs) { in kvm_riscv_vcpu_pmu_init()
589 pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; in kvm_riscv_vcpu_pmu_init()
592 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
594 pmc->cinfo.width = hpm_width; in kvm_riscv_vcpu_pmu_init()
601 pmc->cinfo.csr = CSR_CYCLE + i; in kvm_riscv_vcpu_pmu_init()
603 pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; in kvm_riscv_vcpu_pmu_init()
604 pmc->cinfo.width = BITS_PER_LONG - 1; in kvm_riscv_vcpu_pmu_init()
608 kvpmu->init_done = true; in kvm_riscv_vcpu_pmu_init()
620 for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_MAX_COUNTERS) { in kvm_riscv_vcpu_pmu_deinit()
621 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_deinit()
622 pmc->counter_val = 0; in kvm_riscv_vcpu_pmu_deinit()
624 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_deinit()
626 bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS); in kvm_riscv_vcpu_pmu_deinit()
627 memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); in kvm_riscv_vcpu_pmu_deinit()