1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022 SiFive
4  *
5  * Authors:
6  *     Vincent Chen <vincent.chen@sifive.com>
7  *     Greentime Hu <greentime.hu@sifive.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpufeature.h>
15 #include <asm/kvm_vcpu_vector.h>
16 #include <asm/vector.h>
17 
18 #ifdef CONFIG_RISCV_ISA_V
19 void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
20 {
21 	unsigned long *isa = vcpu->arch.isa;
22 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
23 
24 	cntx->sstatus &= ~SR_VS;
25 
26 	cntx->vector.vlenb = riscv_v_vsize / 32;
27 
28 	if (riscv_isa_extension_available(isa, v)) {
29 		cntx->sstatus |= SR_VS_INITIAL;
30 		WARN_ON(!cntx->vector.datap);
31 		memset(cntx->vector.datap, 0, riscv_v_vsize);
32 	} else {
33 		cntx->sstatus |= SR_VS_OFF;
34 	}
35 }
36 
37 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
38 {
39 	cntx->sstatus &= ~SR_VS;
40 	cntx->sstatus |= SR_VS_CLEAN;
41 }
42 
43 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
44 				      unsigned long *isa)
45 {
46 	if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
47 		if (riscv_isa_extension_available(isa, v))
48 			__kvm_riscv_vector_save(cntx);
49 		kvm_riscv_vcpu_vector_clean(cntx);
50 	}
51 }
52 
53 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
54 					 unsigned long *isa)
55 {
56 	if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
57 		if (riscv_isa_extension_available(isa, v))
58 			__kvm_riscv_vector_restore(cntx);
59 		kvm_riscv_vcpu_vector_clean(cntx);
60 	}
61 }
62 
63 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
64 {
65 	/* No need to check host sstatus as it can be modified outside */
66 	if (riscv_isa_extension_available(NULL, v))
67 		__kvm_riscv_vector_save(cntx);
68 }
69 
70 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
71 {
72 	if (riscv_isa_extension_available(NULL, v))
73 		__kvm_riscv_vector_restore(cntx);
74 }
75 
76 int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
77 {
78 	vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
79 	if (!vcpu->arch.guest_context.vector.datap)
80 		return -ENOMEM;
81 
82 	vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
83 	if (!vcpu->arch.host_context.vector.datap)
84 		return -ENOMEM;
85 
86 	return 0;
87 }
88 
89 void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
90 {
91 	kfree(vcpu->arch.guest_context.vector.datap);
92 	kfree(vcpu->arch.host_context.vector.datap);
93 }
94 #endif
95 
96 static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
97 				    unsigned long reg_num,
98 				    size_t reg_size,
99 				    void **reg_addr)
100 {
101 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
102 	size_t vlenb = riscv_v_vsize / 32;
103 
104 	if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
105 		if (reg_size != sizeof(unsigned long))
106 			return -EINVAL;
107 		switch (reg_num) {
108 		case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
109 			*reg_addr = &cntx->vector.vstart;
110 			break;
111 		case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
112 			*reg_addr = &cntx->vector.vl;
113 			break;
114 		case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
115 			*reg_addr = &cntx->vector.vtype;
116 			break;
117 		case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
118 			*reg_addr = &cntx->vector.vcsr;
119 			break;
120 		case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
121 			*reg_addr = &cntx->vector.vlenb;
122 			break;
123 		case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
124 		default:
125 			return -ENOENT;
126 		}
127 	} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
128 		if (reg_size != vlenb)
129 			return -EINVAL;
130 		*reg_addr = cntx->vector.datap +
131 			    (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
132 	} else {
133 		return -ENOENT;
134 	}
135 
136 	return 0;
137 }
138 
139 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
140 				  const struct kvm_one_reg *reg)
141 {
142 	unsigned long *isa = vcpu->arch.isa;
143 	unsigned long __user *uaddr =
144 			(unsigned long __user *)(unsigned long)reg->addr;
145 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
146 					    KVM_REG_SIZE_MASK |
147 					    KVM_REG_RISCV_VECTOR);
148 	size_t reg_size = KVM_REG_SIZE(reg->id);
149 	void *reg_addr;
150 	int rc;
151 
152 	if (!riscv_isa_extension_available(isa, v))
153 		return -ENOENT;
154 
155 	rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
156 	if (rc)
157 		return rc;
158 
159 	if (copy_to_user(uaddr, reg_addr, reg_size))
160 		return -EFAULT;
161 
162 	return 0;
163 }
164 
165 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
166 				  const struct kvm_one_reg *reg)
167 {
168 	unsigned long *isa = vcpu->arch.isa;
169 	unsigned long __user *uaddr =
170 			(unsigned long __user *)(unsigned long)reg->addr;
171 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
172 					    KVM_REG_SIZE_MASK |
173 					    KVM_REG_RISCV_VECTOR);
174 	size_t reg_size = KVM_REG_SIZE(reg->id);
175 	void *reg_addr;
176 	int rc;
177 
178 	if (!riscv_isa_extension_available(isa, v))
179 		return -ENOENT;
180 
181 	if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
182 		struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
183 		unsigned long reg_val;
184 
185 		if (copy_from_user(&reg_val, uaddr, reg_size))
186 			return -EFAULT;
187 		if (reg_val != cntx->vector.vlenb)
188 			return -EINVAL;
189 
190 		return 0;
191 	}
192 
193 	rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
194 	if (rc)
195 		return rc;
196 
197 	if (copy_from_user(reg_addr, uaddr, reg_size))
198 		return -EFAULT;
199 
200 	return 0;
201 }
202