1dea8ee31SAtish Patra // SPDX-License-Identifier: GPL-2.0
20e2e6419SRandy Dunlap /*
3dea8ee31SAtish Patra * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4dea8ee31SAtish Patra *
5dea8ee31SAtish Patra * Authors:
6dea8ee31SAtish Patra * Atish Patra <atish.patra@wdc.com>
7dea8ee31SAtish Patra */
8dea8ee31SAtish Patra
9dea8ee31SAtish Patra #include <linux/errno.h>
10dea8ee31SAtish Patra #include <linux/err.h>
11dea8ee31SAtish Patra #include <linux/kvm_host.h>
12dea8ee31SAtish Patra #include <asm/sbi.h>
13cf70be9dSAtish Patra #include <asm/kvm_vcpu_sbi.h>
14dea8ee31SAtish Patra
153e5e56c6SConor Dooley #ifndef CONFIG_RISCV_SBI_V01
16a046c2d8SAtish Patra static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17a046c2d8SAtish Patra .extid_start = -1UL,
18a046c2d8SAtish Patra .extid_end = -1UL,
19a046c2d8SAtish Patra .handler = NULL,
20a046c2d8SAtish Patra };
21a046c2d8SAtish Patra #endif
225f862df5SAtish Patra
23377f71f6SBen Dooks #ifndef CONFIG_RISCV_PMU_SBI
24cbddc4c4SAtish Patra static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25cbddc4c4SAtish Patra .extid_start = -1UL,
26cbddc4c4SAtish Patra .extid_end = -1UL,
27cbddc4c4SAtish Patra .handler = NULL,
28cbddc4c4SAtish Patra };
29cbddc4c4SAtish Patra #endif
30cbddc4c4SAtish Patra
3196b3d4bdSAnup Patel struct kvm_riscv_sbi_extension_entry {
32ae328dadSAndrew Jones enum KVM_RISCV_SBI_EXT_ID ext_idx;
3396b3d4bdSAnup Patel const struct kvm_vcpu_sbi_extension *ext_ptr;
3496b3d4bdSAnup Patel };
3596b3d4bdSAnup Patel
3696b3d4bdSAnup Patel static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
3796b3d4bdSAnup Patel {
38ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_V01,
3996b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_v01,
4096b3d4bdSAnup Patel },
4196b3d4bdSAnup Patel {
42ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
4396b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_base,
4496b3d4bdSAnup Patel },
4596b3d4bdSAnup Patel {
46ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_TIME,
4796b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_time,
4896b3d4bdSAnup Patel },
4996b3d4bdSAnup Patel {
50ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_IPI,
5196b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_ipi,
5296b3d4bdSAnup Patel },
5396b3d4bdSAnup Patel {
54ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
5596b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_rfence,
5696b3d4bdSAnup Patel },
5796b3d4bdSAnup Patel {
58ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_SRST,
5996b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_srst,
6096b3d4bdSAnup Patel },
6196b3d4bdSAnup Patel {
62ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_HSM,
6396b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_hsm,
6496b3d4bdSAnup Patel },
6596b3d4bdSAnup Patel {
66ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_PMU,
6796b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_pmu,
6896b3d4bdSAnup Patel },
6996b3d4bdSAnup Patel {
70c667ad22SAnup Patel .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71c667ad22SAnup Patel .ext_ptr = &vcpu_sbi_ext_dbcn,
72c667ad22SAnup Patel },
73c667ad22SAnup Patel {
745fed84a8SAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_SUSP,
755fed84a8SAndrew Jones .ext_ptr = &vcpu_sbi_ext_susp,
765fed84a8SAndrew Jones },
775fed84a8SAndrew Jones {
78ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_STA,
7996b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_sta,
8096b3d4bdSAnup Patel },
8196b3d4bdSAnup Patel {
82ae328dadSAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_FWFT,
8396b3d4bdSAnup Patel .ext_ptr = &vcpu_sbi_ext_fwft,
8496b3d4bdSAnup Patel },
85a046c2d8SAtish Patra {
86a046c2d8SAtish Patra .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
8723e1dc45SAndrew Jones .ext_ptr = &vcpu_sbi_ext_experimental,
8823e1dc45SAndrew Jones },
8923e1dc45SAndrew Jones {
9023e1dc45SAndrew Jones .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
9123e1dc45SAndrew Jones .ext_ptr = &vcpu_sbi_ext_vendor,
9223e1dc45SAndrew Jones },
9323e1dc45SAndrew Jones };
9423e1dc45SAndrew Jones
9523e1dc45SAndrew Jones static const struct kvm_riscv_sbi_extension_entry *
riscv_vcpu_get_sbi_ext(struct kvm_vcpu * vcpu,unsigned long idx)9623e1dc45SAndrew Jones riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
9723e1dc45SAndrew Jones {
9823e1dc45SAndrew Jones const struct kvm_riscv_sbi_extension_entry *sext = NULL;
9923e1dc45SAndrew Jones
10023e1dc45SAndrew Jones if (idx >= KVM_RISCV_SBI_EXT_MAX)
10123e1dc45SAndrew Jones return NULL;
10223e1dc45SAndrew Jones
10323e1dc45SAndrew Jones for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
10423e1dc45SAndrew Jones if (sbi_ext[i].ext_idx == idx) {
10523e1dc45SAndrew Jones sext = &sbi_ext[i];
10623e1dc45SAndrew Jones break;
10723e1dc45SAndrew Jones }
10823e1dc45SAndrew Jones }
10923e1dc45SAndrew Jones
11023e1dc45SAndrew Jones return sext;
11123e1dc45SAndrew Jones }
11223e1dc45SAndrew Jones
riscv_vcpu_supports_sbi_ext(struct kvm_vcpu * vcpu,int idx)11323e1dc45SAndrew Jones static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
11423e1dc45SAndrew Jones {
115a046c2d8SAtish Patra struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
116dea8ee31SAtish Patra const struct kvm_riscv_sbi_extension_entry *sext;
117dea8ee31SAtish Patra
118dea8ee31SAtish Patra sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
119dea8ee31SAtish Patra
120dea8ee31SAtish Patra return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
121dea8ee31SAtish Patra }
122dea8ee31SAtish Patra
kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu * vcpu,struct kvm_run * run)123dea8ee31SAtish Patra void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
124dea8ee31SAtish Patra {
125dea8ee31SAtish Patra struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
126dea8ee31SAtish Patra
127dea8ee31SAtish Patra vcpu->arch.sbi_context.return_handled = 0;
128dea8ee31SAtish Patra vcpu->stat.ecall_exit_stat++;
129dea8ee31SAtish Patra run->exit_reason = KVM_EXIT_RISCV_SBI;
130dea8ee31SAtish Patra run->riscv_sbi.extension_id = cp->a7;
131dea8ee31SAtish Patra run->riscv_sbi.function_id = cp->a6;
132dea8ee31SAtish Patra run->riscv_sbi.args[0] = cp->a0;
133dea8ee31SAtish Patra run->riscv_sbi.args[1] = cp->a1;
1344b11d865SAnup Patel run->riscv_sbi.args[2] = cp->a2;
1354b11d865SAnup Patel run->riscv_sbi.args[3] = cp->a3;
136d495f942SPaolo Bonzini run->riscv_sbi.args[4] = cp->a4;
1374b11d865SAnup Patel run->riscv_sbi.args[5] = cp->a5;
1384b11d865SAnup Patel run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
1394b11d865SAnup Patel run->riscv_sbi.ret[1] = 0;
1404b11d865SAnup Patel }
1414b11d865SAnup Patel
kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu * vcpu,struct kvm_run * run,u32 type,u64 reason)1424b11d865SAnup Patel void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
1434b11d865SAnup Patel struct kvm_run *run,
1444b11d865SAnup Patel u32 type, u64 reason)
1454b11d865SAnup Patel {
1464b11d865SAnup Patel unsigned long i;
147d495f942SPaolo Bonzini struct kvm_vcpu *tmp;
148d495f942SPaolo Bonzini
1494b11d865SAnup Patel kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
1504b11d865SAnup Patel spin_lock(&tmp->arch.mp_state_lock);
1514b11d865SAnup Patel WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
152dea8ee31SAtish Patra spin_unlock(&tmp->arch.mp_state_lock);
153dea8ee31SAtish Patra }
154dea8ee31SAtish Patra kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
155dea8ee31SAtish Patra
156dea8ee31SAtish Patra memset(&run->system_event, 0, sizeof(run->system_event));
157dea8ee31SAtish Patra run->system_event.type = type;
158dea8ee31SAtish Patra run->system_event.ndata = 1;
159dea8ee31SAtish Patra run->system_event.data[0] = reason;
160dea8ee31SAtish Patra run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
161dea8ee31SAtish Patra }
162dea8ee31SAtish Patra
kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu * vcpu,unsigned long pc,unsigned long a1)163dea8ee31SAtish Patra void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
164dea8ee31SAtish Patra unsigned long pc, unsigned long a1)
165dea8ee31SAtish Patra {
166dea8ee31SAtish Patra spin_lock(&vcpu->arch.reset_state.lock);
167dea8ee31SAtish Patra vcpu->arch.reset_state.pc = pc;
168dea8ee31SAtish Patra vcpu->arch.reset_state.a1 = a1;
169dea8ee31SAtish Patra spin_unlock(&vcpu->arch.reset_state.lock);
170dea8ee31SAtish Patra
17196b3d4bdSAnup Patel kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
17296b3d4bdSAnup Patel }
17396b3d4bdSAnup Patel
kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu * vcpu)174cf70be9dSAtish Patra void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
17596b3d4bdSAnup Patel {
17623e1dc45SAndrew Jones struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1772a88f38cSDaniel Henrique Barboza struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1782a88f38cSDaniel Henrique Barboza struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
17996b3d4bdSAnup Patel
180cf70be9dSAtish Patra cntx->a0 = vcpu->vcpu_id;
18123e1dc45SAndrew Jones
18223e1dc45SAndrew Jones spin_lock(&vcpu->arch.reset_state.lock);
18396b3d4bdSAnup Patel cntx->sepc = reset_state->pc;
18496b3d4bdSAnup Patel cntx->a1 = reset_state->a1;
18556d8a385SAnup Patel spin_unlock(&vcpu->arch.reset_state.lock);
18623e1dc45SAndrew Jones
18723e1dc45SAndrew Jones cntx->sstatus &= ~SR_SIE;
18896b3d4bdSAnup Patel csr->vsatp = 0;
18996b3d4bdSAnup Patel }
19096b3d4bdSAnup Patel
kvm_riscv_vcpu_sbi_return(struct kvm_vcpu * vcpu,struct kvm_run * run)19196b3d4bdSAnup Patel int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
19296b3d4bdSAnup Patel {
19396b3d4bdSAnup Patel struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
19496b3d4bdSAnup Patel
19596b3d4bdSAnup Patel /* Handle SBI return only once */
19696b3d4bdSAnup Patel if (vcpu->arch.sbi_context.return_handled)
19723e1dc45SAndrew Jones return 0;
19896b3d4bdSAnup Patel vcpu->arch.sbi_context.return_handled = 1;
19923e1dc45SAndrew Jones
20023e1dc45SAndrew Jones /* Update return values */
20196b3d4bdSAnup Patel cp->a0 = run->riscv_sbi.ret[0];
20296b3d4bdSAnup Patel cp->a1 = run->riscv_sbi.ret[1];
20356d8a385SAnup Patel
20423e1dc45SAndrew Jones /* Move to next instruction */
20523e1dc45SAndrew Jones vcpu->arch.guest_context.sepc += 4;
20696b3d4bdSAnup Patel
20796b3d4bdSAnup Patel return 0;
20896b3d4bdSAnup Patel }
20996b3d4bdSAnup Patel
riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)21096b3d4bdSAnup Patel static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
21196b3d4bdSAnup Patel unsigned long reg_num,
21296b3d4bdSAnup Patel unsigned long reg_val)
21396b3d4bdSAnup Patel {
21496b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
21596b3d4bdSAnup Patel const struct kvm_riscv_sbi_extension_entry *sext;
2162a88f38cSDaniel Henrique Barboza
21796b3d4bdSAnup Patel if (reg_val != 1 && reg_val != 0)
21896b3d4bdSAnup Patel return -EINVAL;
21996b3d4bdSAnup Patel
22096b3d4bdSAnup Patel sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
22196b3d4bdSAnup Patel if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
22296b3d4bdSAnup Patel return -ENOENT;
22396b3d4bdSAnup Patel
22496b3d4bdSAnup Patel scontext->ext_status[sext->ext_idx] = (reg_val) ?
22596b3d4bdSAnup Patel KVM_RISCV_SBI_EXT_STATUS_ENABLED :
22696b3d4bdSAnup Patel KVM_RISCV_SBI_EXT_STATUS_DISABLED;
22796b3d4bdSAnup Patel
22896b3d4bdSAnup Patel return 0;
22996b3d4bdSAnup Patel }
23096b3d4bdSAnup Patel
riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)23196b3d4bdSAnup Patel static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
23296b3d4bdSAnup Patel unsigned long reg_num,
23396b3d4bdSAnup Patel unsigned long *reg_val)
23496b3d4bdSAnup Patel {
23596b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
2362a88f38cSDaniel Henrique Barboza const struct kvm_riscv_sbi_extension_entry *sext;
23796b3d4bdSAnup Patel
23896b3d4bdSAnup Patel sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
23996b3d4bdSAnup Patel if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
24096b3d4bdSAnup Patel return -ENOENT;
24196b3d4bdSAnup Patel
24296b3d4bdSAnup Patel *reg_val = scontext->ext_status[sext->ext_idx] ==
24396b3d4bdSAnup Patel KVM_RISCV_SBI_EXT_STATUS_ENABLED;
24496b3d4bdSAnup Patel
24596b3d4bdSAnup Patel return 0;
24696b3d4bdSAnup Patel }
24796b3d4bdSAnup Patel
riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)24896b3d4bdSAnup Patel static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
24996b3d4bdSAnup Patel unsigned long reg_num,
25096b3d4bdSAnup Patel unsigned long reg_val, bool enable)
25196b3d4bdSAnup Patel {
25296b3d4bdSAnup Patel unsigned long i, ext_id;
25396b3d4bdSAnup Patel
25496b3d4bdSAnup Patel if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
25596b3d4bdSAnup Patel return -ENOENT;
25696b3d4bdSAnup Patel
25796b3d4bdSAnup Patel for_each_set_bit(i, ®_val, BITS_PER_LONG) {
25896b3d4bdSAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
25996b3d4bdSAnup Patel if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
26096b3d4bdSAnup Patel break;
26196b3d4bdSAnup Patel
26296b3d4bdSAnup Patel riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
26396b3d4bdSAnup Patel }
26496b3d4bdSAnup Patel
26596b3d4bdSAnup Patel return 0;
26696b3d4bdSAnup Patel }
26796b3d4bdSAnup Patel
riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)26896b3d4bdSAnup Patel static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
26996b3d4bdSAnup Patel unsigned long reg_num,
27096b3d4bdSAnup Patel unsigned long *reg_val)
27196b3d4bdSAnup Patel {
27296b3d4bdSAnup Patel unsigned long i, ext_id, ext_val;
27396b3d4bdSAnup Patel
27496b3d4bdSAnup Patel if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
27596b3d4bdSAnup Patel return -ENOENT;
27696b3d4bdSAnup Patel
27796b3d4bdSAnup Patel for (i = 0; i < BITS_PER_LONG; i++) {
27896b3d4bdSAnup Patel ext_id = i + reg_num * BITS_PER_LONG;
27996b3d4bdSAnup Patel if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
28096b3d4bdSAnup Patel break;
28196b3d4bdSAnup Patel
2822a88f38cSDaniel Henrique Barboza ext_val = 0;
28396b3d4bdSAnup Patel riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
28496b3d4bdSAnup Patel if (ext_val)
28596b3d4bdSAnup Patel *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
28696b3d4bdSAnup Patel }
28796b3d4bdSAnup Patel
28896b3d4bdSAnup Patel return 0;
28996b3d4bdSAnup Patel }
29096b3d4bdSAnup Patel
kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu * vcpu,u64 __user * uindices)29196b3d4bdSAnup Patel int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices)
29296b3d4bdSAnup Patel {
29396b3d4bdSAnup Patel unsigned int n = 0;
29496b3d4bdSAnup Patel
29596b3d4bdSAnup Patel for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
29696b3d4bdSAnup Patel u64 size = IS_ENABLED(CONFIG_32BIT) ?
29796b3d4bdSAnup Patel KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
29896b3d4bdSAnup Patel u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
29996b3d4bdSAnup Patel KVM_REG_RISCV_SBI_SINGLE | i;
30096b3d4bdSAnup Patel
30196b3d4bdSAnup Patel if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
30296b3d4bdSAnup Patel continue;
30396b3d4bdSAnup Patel
30496b3d4bdSAnup Patel if (uindices) {
30596b3d4bdSAnup Patel if (put_user(reg, uindices))
30696b3d4bdSAnup Patel return -EFAULT;
30796b3d4bdSAnup Patel uindices++;
30896b3d4bdSAnup Patel }
30996b3d4bdSAnup Patel
31096b3d4bdSAnup Patel n++;
31196b3d4bdSAnup Patel }
31296b3d4bdSAnup Patel
31396b3d4bdSAnup Patel return n;
31496b3d4bdSAnup Patel }
31596b3d4bdSAnup Patel
kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)31696b3d4bdSAnup Patel int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
3172a88f38cSDaniel Henrique Barboza const struct kvm_one_reg *reg)
31896b3d4bdSAnup Patel {
31996b3d4bdSAnup Patel unsigned long __user *uaddr =
32096b3d4bdSAnup Patel (unsigned long __user *)(unsigned long)reg->addr;
32196b3d4bdSAnup Patel unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
32296b3d4bdSAnup Patel KVM_REG_SIZE_MASK |
32396b3d4bdSAnup Patel KVM_REG_RISCV_SBI_EXT);
32496b3d4bdSAnup Patel unsigned long reg_val, reg_subtype;
32596b3d4bdSAnup Patel
32696b3d4bdSAnup Patel if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
32796b3d4bdSAnup Patel return -EINVAL;
3285b9e4132SAndrew Jones
3295b9e4132SAndrew Jones if (vcpu->arch.ran_atleast_once)
3305b9e4132SAndrew Jones return -EBUSY;
3315b9e4132SAndrew Jones
3325b9e4132SAndrew Jones reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
3335b9e4132SAndrew Jones reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
3345b9e4132SAndrew Jones
3355b9e4132SAndrew Jones if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
3365b9e4132SAndrew Jones return -EFAULT;
3375b9e4132SAndrew Jones
3385b9e4132SAndrew Jones switch (reg_subtype) {
3395b9e4132SAndrew Jones case KVM_REG_RISCV_SBI_SINGLE:
3405b9e4132SAndrew Jones return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
3415b9e4132SAndrew Jones case KVM_REG_RISCV_SBI_MULTI_EN:
3425b9e4132SAndrew Jones return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
3435b9e4132SAndrew Jones case KVM_REG_RISCV_SBI_MULTI_DIS:
3445b9e4132SAndrew Jones return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
3455b9e4132SAndrew Jones default:
3465b9e4132SAndrew Jones return -ENOENT;
3475b9e4132SAndrew Jones }
348*f61ce890SAndrew Jones
349*f61ce890SAndrew Jones return 0;
3505b9e4132SAndrew Jones }
3515b9e4132SAndrew Jones
kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)3525b9e4132SAndrew Jones int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
3535b9e4132SAndrew Jones const struct kvm_one_reg *reg)
3545b9e4132SAndrew Jones {
3555b9e4132SAndrew Jones int rc;
3565b9e4132SAndrew Jones unsigned long __user *uaddr =
3575b9e4132SAndrew Jones (unsigned long __user *)(unsigned long)reg->addr;
3585b9e4132SAndrew Jones unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
3595b9e4132SAndrew Jones KVM_REG_SIZE_MASK |
3605b9e4132SAndrew Jones KVM_REG_RISCV_SBI_EXT);
3615b9e4132SAndrew Jones unsigned long reg_val, reg_subtype;
3625b9e4132SAndrew Jones
3635b9e4132SAndrew Jones if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
3645b9e4132SAndrew Jones return -EINVAL;
3655b9e4132SAndrew Jones
3665b9e4132SAndrew Jones reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
3675b9e4132SAndrew Jones reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
3685b9e4132SAndrew Jones
3695b9e4132SAndrew Jones reg_val = 0;
3705b9e4132SAndrew Jones switch (reg_subtype) {
3715b9e4132SAndrew Jones case KVM_REG_RISCV_SBI_SINGLE:
3725b9e4132SAndrew Jones rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
3735b9e4132SAndrew Jones break;
3745b9e4132SAndrew Jones case KVM_REG_RISCV_SBI_MULTI_EN:
375*f61ce890SAndrew Jones case KVM_REG_RISCV_SBI_MULTI_DIS:
376*f61ce890SAndrew Jones rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
377*f61ce890SAndrew Jones if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
3785b9e4132SAndrew Jones reg_val = ~reg_val;
3795b9e4132SAndrew Jones break;
3805b9e4132SAndrew Jones default:
3815b9e4132SAndrew Jones rc = -ENOENT;
3825b9e4132SAndrew Jones }
3835b9e4132SAndrew Jones if (rc)
3845b9e4132SAndrew Jones return rc;
3855b9e4132SAndrew Jones
3865b9e4132SAndrew Jones if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
3875b9e4132SAndrew Jones return -EFAULT;
3885b9e4132SAndrew Jones
3895b9e4132SAndrew Jones return 0;
3905b9e4132SAndrew Jones }
39196b3d4bdSAnup Patel
kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu * vcpu,u64 __user * uindices)39296b3d4bdSAnup Patel int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices)
39396b3d4bdSAnup Patel {
39496b3d4bdSAnup Patel struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
39595c99104SAndrew Jones const struct kvm_riscv_sbi_extension_entry *entry;
39695c99104SAndrew Jones const struct kvm_vcpu_sbi_extension *ext;
39795c99104SAndrew Jones unsigned long state_reg_count;
39896b3d4bdSAnup Patel int i, j, rc, count = 0;
39996b3d4bdSAnup Patel u64 reg;
40095c99104SAndrew Jones
40195c99104SAndrew Jones for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
40295c99104SAndrew Jones entry = &sbi_ext[i];
40395c99104SAndrew Jones ext = entry->ext_ptr;
40495c99104SAndrew Jones
40595c99104SAndrew Jones if (!ext->get_state_reg_count ||
40623e1dc45SAndrew Jones scontext->ext_status[entry->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED)
40795c99104SAndrew Jones continue;
40895c99104SAndrew Jones
40956d8a385SAnup Patel state_reg_count = ext->get_state_reg_count(vcpu);
41096b3d4bdSAnup Patel if (!uindices)
411cf70be9dSAtish Patra goto skip_put_user;
412cf70be9dSAtish Patra
413cf70be9dSAtish Patra for (j = 0; j < state_reg_count; j++) {
414cf70be9dSAtish Patra if (ext->get_state_reg_id) {
415cf70be9dSAtish Patra rc = ext->get_state_reg_id(vcpu, j, ®);
416cf70be9dSAtish Patra if (rc)
417cf70be9dSAtish Patra return rc;
418cf70be9dSAtish Patra } else {
419cf70be9dSAtish Patra reg = KVM_REG_RISCV |
420cf70be9dSAtish Patra (IS_ENABLED(CONFIG_32BIT) ?
421cf70be9dSAtish Patra KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
422cf70be9dSAtish Patra KVM_REG_RISCV_SBI_STATE |
423bae0dfd7SAtish Patra ext->state_reg_subtype | j;
424bae0dfd7SAtish Patra }
425bae0dfd7SAtish Patra
426bae0dfd7SAtish Patra if (put_user(reg, uindices))
427bae0dfd7SAtish Patra return -EFAULT;
428cf70be9dSAtish Patra uindices++;
429cf70be9dSAtish Patra }
43096b3d4bdSAnup Patel
431cf70be9dSAtish Patra skip_put_user:
432a046c2d8SAtish Patra count += state_reg_count;
433cf70be9dSAtish Patra }
434cf70be9dSAtish Patra
435cf70be9dSAtish Patra return count;
436a046c2d8SAtish Patra }
437bae0dfd7SAtish Patra
kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu * vcpu,unsigned long subtype)438cf70be9dSAtish Patra static const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu *vcpu,
439cf70be9dSAtish Patra unsigned long subtype)
440cf70be9dSAtish Patra {
441cf70be9dSAtish Patra struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
442cf70be9dSAtish Patra const struct kvm_riscv_sbi_extension_entry *entry;
443cf70be9dSAtish Patra const struct kvm_vcpu_sbi_extension *ext;
444bae0dfd7SAtish Patra int i;
445bae0dfd7SAtish Patra
446bae0dfd7SAtish Patra for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
447bae0dfd7SAtish Patra entry = &sbi_ext[i];
448bae0dfd7SAtish Patra ext = entry->ext_ptr;
449bae0dfd7SAtish Patra
450bae0dfd7SAtish Patra if (ext->get_state_reg_count &&
451bae0dfd7SAtish Patra ext->state_reg_subtype == subtype &&
452bae0dfd7SAtish Patra scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_ENABLED)
453cf70be9dSAtish Patra return ext;
454bae0dfd7SAtish Patra }
455cf70be9dSAtish Patra
456cf70be9dSAtish Patra return NULL;
457bae0dfd7SAtish Patra }
458bae0dfd7SAtish Patra
kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)459cf70be9dSAtish Patra int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
460cf70be9dSAtish Patra {
461cf70be9dSAtish Patra unsigned long __user *uaddr =
462cf70be9dSAtish Patra (unsigned long __user *)(unsigned long)reg->addr;
463cf70be9dSAtish Patra unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
464bae0dfd7SAtish Patra KVM_REG_SIZE_MASK |
465cf70be9dSAtish Patra KVM_REG_RISCV_SBI_STATE);
466cf70be9dSAtish Patra const struct kvm_vcpu_sbi_extension *ext;
467cf70be9dSAtish Patra unsigned long reg_subtype;
468bae0dfd7SAtish Patra void *reg_val;
469cf70be9dSAtish Patra u64 data64;
470cf70be9dSAtish Patra u32 data32;
471cf70be9dSAtish Patra u16 data16;
472dea8ee31SAtish Patra u8 data8;
473dea8ee31SAtish Patra
474bae0dfd7SAtish Patra switch (KVM_REG_SIZE(reg->id)) {
475bae0dfd7SAtish Patra case 1:
476bae0dfd7SAtish Patra reg_val = &data8;
477dea8ee31SAtish Patra break;
478dea8ee31SAtish Patra case 2:
479dea8ee31SAtish Patra reg_val = &data16;
48056d8a385SAnup Patel break;
48156d8a385SAnup Patel case 4:
48256d8a385SAnup Patel reg_val = &data32;
48356d8a385SAnup Patel break;
48456d8a385SAnup Patel case 8:
48556d8a385SAnup Patel reg_val = &data64;
48656d8a385SAnup Patel break;
48756d8a385SAnup Patel default:
48856d8a385SAnup Patel return -EINVAL;
48956d8a385SAnup Patel }
49056d8a385SAnup Patel
49156d8a385SAnup Patel if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
49256d8a385SAnup Patel return -EFAULT;
49356d8a385SAnup Patel
49423e1dc45SAndrew Jones reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
49556d8a385SAnup Patel reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
49656d8a385SAnup Patel
49756d8a385SAnup Patel ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
49823e1dc45SAndrew Jones if (!ext || !ext->set_state_reg)
49923e1dc45SAndrew Jones return -EINVAL;
50023e1dc45SAndrew Jones
50156d8a385SAnup Patel return ext->set_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
50256d8a385SAnup Patel }
503
kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)504 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
505 {
506 unsigned long __user *uaddr =
507 (unsigned long __user *)(unsigned long)reg->addr;
508 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
509 KVM_REG_SIZE_MASK |
510 KVM_REG_RISCV_SBI_STATE);
511 const struct kvm_vcpu_sbi_extension *ext;
512 unsigned long reg_subtype;
513 void *reg_val;
514 u64 data64;
515 u32 data32;
516 u16 data16;
517 u8 data8;
518 int ret;
519
520 switch (KVM_REG_SIZE(reg->id)) {
521 case 1:
522 reg_val = &data8;
523 break;
524 case 2:
525 reg_val = &data16;
526 break;
527 case 4:
528 reg_val = &data32;
529 break;
530 case 8:
531 reg_val = &data64;
532 break;
533 default:
534 return -EINVAL;
535 }
536
537 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
538 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
539
540 ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
541 if (!ext || !ext->get_state_reg)
542 return -EINVAL;
543
544 ret = ext->get_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
545 if (ret)
546 return ret;
547
548 if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
549 return -EFAULT;
550
551 return 0;
552 }
553
kvm_vcpu_sbi_find_ext(struct kvm_vcpu * vcpu,unsigned long extid)554 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
555 struct kvm_vcpu *vcpu, unsigned long extid)
556 {
557 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
558 const struct kvm_riscv_sbi_extension_entry *entry;
559 const struct kvm_vcpu_sbi_extension *ext;
560 int i;
561
562 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
563 entry = &sbi_ext[i];
564 ext = entry->ext_ptr;
565
566 if (ext->extid_start <= extid && ext->extid_end >= extid) {
567 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
568 scontext->ext_status[entry->ext_idx] ==
569 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
570 return ext;
571
572 return NULL;
573 }
574 }
575
576 return NULL;
577 }
578
kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu * vcpu,struct kvm_run * run)579 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
580 {
581 int ret = 1;
582 bool next_sepc = true;
583 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
584 const struct kvm_vcpu_sbi_extension *sbi_ext;
585 struct kvm_cpu_trap utrap = {0};
586 struct kvm_vcpu_sbi_return sbi_ret = {
587 .out_val = 0,
588 .err_val = 0,
589 .utrap = &utrap,
590 };
591 bool ext_is_v01 = false;
592
593 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
594 if (sbi_ext && sbi_ext->handler) {
595 #ifdef CONFIG_RISCV_SBI_V01
596 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
597 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
598 ext_is_v01 = true;
599 #endif
600 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
601 } else {
602 /* Return error for unsupported SBI calls */
603 cp->a0 = SBI_ERR_NOT_SUPPORTED;
604 goto ecall_done;
605 }
606
607 /*
608 * When the SBI extension returns a Linux error code, it exits the ioctl
609 * loop and forwards the error to userspace.
610 */
611 if (ret < 0) {
612 next_sepc = false;
613 goto ecall_done;
614 }
615
616 /* Handle special error cases i.e trap, exit or userspace forward */
617 if (sbi_ret.utrap->scause) {
618 /* No need to increment sepc or exit ioctl loop */
619 ret = 1;
620 sbi_ret.utrap->sepc = cp->sepc;
621 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
622 next_sepc = false;
623 goto ecall_done;
624 }
625
626 /* Exit ioctl loop or Propagate the error code the guest */
627 if (sbi_ret.uexit) {
628 next_sepc = false;
629 ret = 0;
630 } else {
631 cp->a0 = sbi_ret.err_val;
632 ret = 1;
633 }
634 ecall_done:
635 if (next_sepc)
636 cp->sepc += 4;
637 /* a1 should only be updated when we continue the ioctl loop */
638 if (!ext_is_v01 && ret == 1)
639 cp->a1 = sbi_ret.out_val;
640
641 return ret;
642 }
643
kvm_riscv_vcpu_sbi_init(struct kvm_vcpu * vcpu)644 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
645 {
646 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
647 const struct kvm_riscv_sbi_extension_entry *entry;
648 const struct kvm_vcpu_sbi_extension *ext;
649 int idx, i;
650
651 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
652 entry = &sbi_ext[i];
653 ext = entry->ext_ptr;
654 idx = entry->ext_idx;
655
656 if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
657 continue;
658
659 if (ext->probe && !ext->probe(vcpu)) {
660 scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
661 continue;
662 }
663
664 scontext->ext_status[idx] = ext->default_disabled ?
665 KVM_RISCV_SBI_EXT_STATUS_DISABLED :
666 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
667
668 if (ext->init && ext->init(vcpu) != 0)
669 scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
670 }
671 }
672
kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu * vcpu)673 void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu)
674 {
675 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
676 const struct kvm_riscv_sbi_extension_entry *entry;
677 const struct kvm_vcpu_sbi_extension *ext;
678 int idx, i;
679
680 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
681 entry = &sbi_ext[i];
682 ext = entry->ext_ptr;
683 idx = entry->ext_idx;
684
685 if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
686 continue;
687
688 if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE ||
689 !ext->deinit)
690 continue;
691
692 ext->deinit(vcpu);
693 }
694 }
695
kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu * vcpu)696 void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu)
697 {
698 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
699 const struct kvm_riscv_sbi_extension_entry *entry;
700 const struct kvm_vcpu_sbi_extension *ext;
701 int idx, i;
702
703 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
704 entry = &sbi_ext[i];
705 ext = entry->ext_ptr;
706 idx = entry->ext_idx;
707
708 if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
709 continue;
710
711 if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED ||
712 !ext->reset)
713 continue;
714
715 ext->reset(vcpu);
716 }
717 }
718