xref: /linux/arch/riscv/kvm/vcpu_sbi_pmu.c (revision c771600c6af14749609b49565ffb4cac2959710d)
1cbddc4c4SAtish Patra // SPDX-License-Identifier: GPL-2.0
2cbddc4c4SAtish Patra /*
3cbddc4c4SAtish Patra  * Copyright (c) 2023 Rivos Inc
4cbddc4c4SAtish Patra  *
5cbddc4c4SAtish Patra  * Authors:
6cbddc4c4SAtish Patra  *     Atish Patra <atishp@rivosinc.com>
7cbddc4c4SAtish Patra  */
8cbddc4c4SAtish Patra 
9cbddc4c4SAtish Patra #include <linux/errno.h>
10cbddc4c4SAtish Patra #include <linux/err.h>
11cbddc4c4SAtish Patra #include <linux/kvm_host.h>
12cbddc4c4SAtish Patra #include <asm/csr.h>
13cbddc4c4SAtish Patra #include <asm/sbi.h>
14cbddc4c4SAtish Patra #include <asm/kvm_vcpu_sbi.h>
15cbddc4c4SAtish Patra 
kvm_sbi_ext_pmu_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)16cbddc4c4SAtish Patra static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
17cbddc4c4SAtish Patra 				   struct kvm_vcpu_sbi_return *retdata)
18cbddc4c4SAtish Patra {
19cbddc4c4SAtish Patra 	int ret = 0;
20cbddc4c4SAtish Patra 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
21cbddc4c4SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
22cbddc4c4SAtish Patra 	unsigned long funcid = cp->a6;
23cbddc4c4SAtish Patra 	u64 temp;
24cbddc4c4SAtish Patra 
25cbddc4c4SAtish Patra 	if (!kvpmu->init_done) {
26cbddc4c4SAtish Patra 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
27cbddc4c4SAtish Patra 		return 0;
28cbddc4c4SAtish Patra 	}
29cbddc4c4SAtish Patra 
30cbddc4c4SAtish Patra 	switch (funcid) {
31cbddc4c4SAtish Patra 	case SBI_EXT_PMU_NUM_COUNTERS:
32cbddc4c4SAtish Patra 		ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, retdata);
33cbddc4c4SAtish Patra 		break;
34cbddc4c4SAtish Patra 	case SBI_EXT_PMU_COUNTER_GET_INFO:
35cbddc4c4SAtish Patra 		ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, retdata);
36cbddc4c4SAtish Patra 		break;
37cbddc4c4SAtish Patra 	case SBI_EXT_PMU_COUNTER_CFG_MATCH:
38cbddc4c4SAtish Patra #if defined(CONFIG_32BIT)
39cbddc4c4SAtish Patra 		temp = ((uint64_t)cp->a5 << 32) | cp->a4;
40cbddc4c4SAtish Patra #else
41cbddc4c4SAtish Patra 		temp = cp->a4;
42cbddc4c4SAtish Patra #endif
43cbddc4c4SAtish Patra 		/*
44cbddc4c4SAtish Patra 		 * This can fail if perf core framework fails to create an event.
452196c066SAtish Patra 		 * No need to forward the error to userspace and exit the guest.
462196c066SAtish Patra 		 * The operation can continue without profiling. Forward the
472196c066SAtish Patra 		 * appropriate SBI error to the guest.
48cbddc4c4SAtish Patra 		 */
49cbddc4c4SAtish Patra 		ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1,
50cbddc4c4SAtish Patra 						       cp->a2, cp->a3, temp, retdata);
51cbddc4c4SAtish Patra 		break;
52cbddc4c4SAtish Patra 	case SBI_EXT_PMU_COUNTER_START:
53cbddc4c4SAtish Patra #if defined(CONFIG_32BIT)
54cbddc4c4SAtish Patra 		temp = ((uint64_t)cp->a4 << 32) | cp->a3;
55cbddc4c4SAtish Patra #else
56cbddc4c4SAtish Patra 		temp = cp->a3;
57cbddc4c4SAtish Patra #endif
58cbddc4c4SAtish Patra 		ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2,
59cbddc4c4SAtish Patra 						   temp, retdata);
60cbddc4c4SAtish Patra 		break;
61cbddc4c4SAtish Patra 	case SBI_EXT_PMU_COUNTER_STOP:
62cbddc4c4SAtish Patra 		ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata);
63cbddc4c4SAtish Patra 		break;
64cbddc4c4SAtish Patra 	case SBI_EXT_PMU_COUNTER_FW_READ:
654e21f223SAtish Patra 		ret = kvm_riscv_vcpu_pmu_fw_ctr_read(vcpu, cp->a0, retdata);
66cbddc4c4SAtish Patra 		break;
6708fb07d6SAtish Patra 	case SBI_EXT_PMU_COUNTER_FW_READ_HI:
6808fb07d6SAtish Patra 		if (IS_ENABLED(CONFIG_32BIT))
6908fb07d6SAtish Patra 			ret = kvm_riscv_vcpu_pmu_fw_ctr_read_hi(vcpu, cp->a0, retdata);
7008fb07d6SAtish Patra 		else
7108fb07d6SAtish Patra 			retdata->out_val = 0;
7208fb07d6SAtish Patra 		break;
73c2f41ddbSAtish Patra 	case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM:
74c2f41ddbSAtish Patra 		ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata);
75c2f41ddbSAtish Patra 		break;
76cbddc4c4SAtish Patra 	default:
77cbddc4c4SAtish Patra 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
78cbddc4c4SAtish Patra 	}
79cbddc4c4SAtish Patra 
80cbddc4c4SAtish Patra 	return ret;
81cbddc4c4SAtish Patra }
82cbddc4c4SAtish Patra 
kvm_sbi_ext_pmu_probe(struct kvm_vcpu * vcpu)83cbddc4c4SAtish Patra static unsigned long kvm_sbi_ext_pmu_probe(struct kvm_vcpu *vcpu)
84cbddc4c4SAtish Patra {
85cbddc4c4SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
86cbddc4c4SAtish Patra 
87cbddc4c4SAtish Patra 	return kvpmu->init_done;
88cbddc4c4SAtish Patra }
89cbddc4c4SAtish Patra 
90cbddc4c4SAtish Patra const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
91cbddc4c4SAtish Patra 	.extid_start = SBI_EXT_PMU,
92cbddc4c4SAtish Patra 	.extid_end = SBI_EXT_PMU,
93cbddc4c4SAtish Patra 	.handler = kvm_sbi_ext_pmu_handler,
94cbddc4c4SAtish Patra 	.probe = kvm_sbi_ext_pmu_probe,
95cbddc4c4SAtish Patra };
96