1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19
20 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
21
22 #define KVM_ISA_EXT_ARR(ext) \
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 /* Single letter extensions (alphabetically sorted) */
28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 /* Multi letter extensions (alphabetically sorted) */
37 KVM_ISA_EXT_ARR(SMSTATEEN),
38 KVM_ISA_EXT_ARR(SSAIA),
39 KVM_ISA_EXT_ARR(SSTC),
40 KVM_ISA_EXT_ARR(SVINVAL),
41 KVM_ISA_EXT_ARR(SVNAPOT),
42 KVM_ISA_EXT_ARR(SVPBMT),
43 KVM_ISA_EXT_ARR(ZBA),
44 KVM_ISA_EXT_ARR(ZBB),
45 KVM_ISA_EXT_ARR(ZBC),
46 KVM_ISA_EXT_ARR(ZBKB),
47 KVM_ISA_EXT_ARR(ZBKC),
48 KVM_ISA_EXT_ARR(ZBKX),
49 KVM_ISA_EXT_ARR(ZBS),
50 KVM_ISA_EXT_ARR(ZFA),
51 KVM_ISA_EXT_ARR(ZFH),
52 KVM_ISA_EXT_ARR(ZFHMIN),
53 KVM_ISA_EXT_ARR(ZICBOM),
54 KVM_ISA_EXT_ARR(ZICBOZ),
55 KVM_ISA_EXT_ARR(ZICNTR),
56 KVM_ISA_EXT_ARR(ZICOND),
57 KVM_ISA_EXT_ARR(ZICSR),
58 KVM_ISA_EXT_ARR(ZIFENCEI),
59 KVM_ISA_EXT_ARR(ZIHINTNTL),
60 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
61 KVM_ISA_EXT_ARR(ZIHPM),
62 KVM_ISA_EXT_ARR(ZKND),
63 KVM_ISA_EXT_ARR(ZKNE),
64 KVM_ISA_EXT_ARR(ZKNH),
65 KVM_ISA_EXT_ARR(ZKR),
66 KVM_ISA_EXT_ARR(ZKSED),
67 KVM_ISA_EXT_ARR(ZKSH),
68 KVM_ISA_EXT_ARR(ZKT),
69 KVM_ISA_EXT_ARR(ZVBB),
70 KVM_ISA_EXT_ARR(ZVBC),
71 KVM_ISA_EXT_ARR(ZVFH),
72 KVM_ISA_EXT_ARR(ZVFHMIN),
73 KVM_ISA_EXT_ARR(ZVKB),
74 KVM_ISA_EXT_ARR(ZVKG),
75 KVM_ISA_EXT_ARR(ZVKNED),
76 KVM_ISA_EXT_ARR(ZVKNHA),
77 KVM_ISA_EXT_ARR(ZVKNHB),
78 KVM_ISA_EXT_ARR(ZVKSED),
79 KVM_ISA_EXT_ARR(ZVKSH),
80 KVM_ISA_EXT_ARR(ZVKT),
81 };
82
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)83 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
84 {
85 unsigned long i;
86
87 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
88 if (kvm_isa_ext_arr[i] == base_ext)
89 return i;
90 }
91
92 return KVM_RISCV_ISA_EXT_MAX;
93 }
94
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)95 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
96 {
97 switch (ext) {
98 case KVM_RISCV_ISA_EXT_H:
99 return false;
100 case KVM_RISCV_ISA_EXT_V:
101 return riscv_v_vstate_ctrl_user_allowed();
102 default:
103 break;
104 }
105
106 return true;
107 }
108
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)109 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
110 {
111 switch (ext) {
112 /* Extensions which don't have any mechanism to disable */
113 case KVM_RISCV_ISA_EXT_A:
114 case KVM_RISCV_ISA_EXT_C:
115 case KVM_RISCV_ISA_EXT_I:
116 case KVM_RISCV_ISA_EXT_M:
117 case KVM_RISCV_ISA_EXT_SSTC:
118 case KVM_RISCV_ISA_EXT_SVINVAL:
119 case KVM_RISCV_ISA_EXT_SVNAPOT:
120 case KVM_RISCV_ISA_EXT_ZBA:
121 case KVM_RISCV_ISA_EXT_ZBB:
122 case KVM_RISCV_ISA_EXT_ZBC:
123 case KVM_RISCV_ISA_EXT_ZBKB:
124 case KVM_RISCV_ISA_EXT_ZBKC:
125 case KVM_RISCV_ISA_EXT_ZBKX:
126 case KVM_RISCV_ISA_EXT_ZBS:
127 case KVM_RISCV_ISA_EXT_ZFA:
128 case KVM_RISCV_ISA_EXT_ZFH:
129 case KVM_RISCV_ISA_EXT_ZFHMIN:
130 case KVM_RISCV_ISA_EXT_ZICNTR:
131 case KVM_RISCV_ISA_EXT_ZICOND:
132 case KVM_RISCV_ISA_EXT_ZICSR:
133 case KVM_RISCV_ISA_EXT_ZIFENCEI:
134 case KVM_RISCV_ISA_EXT_ZIHINTNTL:
135 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
136 case KVM_RISCV_ISA_EXT_ZIHPM:
137 case KVM_RISCV_ISA_EXT_ZKND:
138 case KVM_RISCV_ISA_EXT_ZKNE:
139 case KVM_RISCV_ISA_EXT_ZKNH:
140 case KVM_RISCV_ISA_EXT_ZKR:
141 case KVM_RISCV_ISA_EXT_ZKSED:
142 case KVM_RISCV_ISA_EXT_ZKSH:
143 case KVM_RISCV_ISA_EXT_ZKT:
144 case KVM_RISCV_ISA_EXT_ZVBB:
145 case KVM_RISCV_ISA_EXT_ZVBC:
146 case KVM_RISCV_ISA_EXT_ZVFH:
147 case KVM_RISCV_ISA_EXT_ZVFHMIN:
148 case KVM_RISCV_ISA_EXT_ZVKB:
149 case KVM_RISCV_ISA_EXT_ZVKG:
150 case KVM_RISCV_ISA_EXT_ZVKNED:
151 case KVM_RISCV_ISA_EXT_ZVKNHA:
152 case KVM_RISCV_ISA_EXT_ZVKNHB:
153 case KVM_RISCV_ISA_EXT_ZVKSED:
154 case KVM_RISCV_ISA_EXT_ZVKSH:
155 case KVM_RISCV_ISA_EXT_ZVKT:
156 return false;
157 /* Extensions which can be disabled using Smstateen */
158 case KVM_RISCV_ISA_EXT_SSAIA:
159 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
160 default:
161 break;
162 }
163
164 return true;
165 }
166
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)167 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
168 {
169 unsigned long host_isa, i;
170
171 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
172 host_isa = kvm_isa_ext_arr[i];
173 if (__riscv_isa_extension_available(NULL, host_isa) &&
174 kvm_riscv_vcpu_isa_enable_allowed(i))
175 set_bit(host_isa, vcpu->arch.isa);
176 }
177 }
178
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)179 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
180 const struct kvm_one_reg *reg)
181 {
182 unsigned long __user *uaddr =
183 (unsigned long __user *)(unsigned long)reg->addr;
184 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
185 KVM_REG_SIZE_MASK |
186 KVM_REG_RISCV_CONFIG);
187 unsigned long reg_val;
188
189 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
190 return -EINVAL;
191
192 switch (reg_num) {
193 case KVM_REG_RISCV_CONFIG_REG(isa):
194 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
195 break;
196 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
197 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
198 return -ENOENT;
199 reg_val = riscv_cbom_block_size;
200 break;
201 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
202 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
203 return -ENOENT;
204 reg_val = riscv_cboz_block_size;
205 break;
206 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
207 reg_val = vcpu->arch.mvendorid;
208 break;
209 case KVM_REG_RISCV_CONFIG_REG(marchid):
210 reg_val = vcpu->arch.marchid;
211 break;
212 case KVM_REG_RISCV_CONFIG_REG(mimpid):
213 reg_val = vcpu->arch.mimpid;
214 break;
215 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
216 reg_val = satp_mode >> SATP_MODE_SHIFT;
217 break;
218 default:
219 return -ENOENT;
220 }
221
222 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
223 return -EFAULT;
224
225 return 0;
226 }
227
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)228 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
229 const struct kvm_one_reg *reg)
230 {
231 unsigned long __user *uaddr =
232 (unsigned long __user *)(unsigned long)reg->addr;
233 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
234 KVM_REG_SIZE_MASK |
235 KVM_REG_RISCV_CONFIG);
236 unsigned long i, isa_ext, reg_val;
237
238 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
239 return -EINVAL;
240
241 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
242 return -EFAULT;
243
244 switch (reg_num) {
245 case KVM_REG_RISCV_CONFIG_REG(isa):
246 /*
247 * This ONE REG interface is only defined for
248 * single letter extensions.
249 */
250 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
251 return -EINVAL;
252
253 /*
254 * Return early (i.e. do nothing) if reg_val is the same
255 * value retrievable via kvm_riscv_vcpu_get_reg_config().
256 */
257 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
258 break;
259
260 if (!vcpu->arch.ran_atleast_once) {
261 /* Ignore the enable/disable request for certain extensions */
262 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
263 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
264 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
265 reg_val &= ~BIT(i);
266 continue;
267 }
268 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
269 if (reg_val & BIT(i))
270 reg_val &= ~BIT(i);
271 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
272 if (!(reg_val & BIT(i)))
273 reg_val |= BIT(i);
274 }
275 reg_val &= riscv_isa_extension_base(NULL);
276 /* Do not modify anything beyond single letter extensions */
277 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
278 (reg_val & KVM_RISCV_BASE_ISA_MASK);
279 vcpu->arch.isa[0] = reg_val;
280 kvm_riscv_vcpu_fp_reset(vcpu);
281 } else {
282 return -EBUSY;
283 }
284 break;
285 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
286 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
287 return -ENOENT;
288 if (reg_val != riscv_cbom_block_size)
289 return -EINVAL;
290 break;
291 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
292 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
293 return -ENOENT;
294 if (reg_val != riscv_cboz_block_size)
295 return -EINVAL;
296 break;
297 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
298 if (reg_val == vcpu->arch.mvendorid)
299 break;
300 if (!vcpu->arch.ran_atleast_once)
301 vcpu->arch.mvendorid = reg_val;
302 else
303 return -EBUSY;
304 break;
305 case KVM_REG_RISCV_CONFIG_REG(marchid):
306 if (reg_val == vcpu->arch.marchid)
307 break;
308 if (!vcpu->arch.ran_atleast_once)
309 vcpu->arch.marchid = reg_val;
310 else
311 return -EBUSY;
312 break;
313 case KVM_REG_RISCV_CONFIG_REG(mimpid):
314 if (reg_val == vcpu->arch.mimpid)
315 break;
316 if (!vcpu->arch.ran_atleast_once)
317 vcpu->arch.mimpid = reg_val;
318 else
319 return -EBUSY;
320 break;
321 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
322 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
323 return -EINVAL;
324 break;
325 default:
326 return -ENOENT;
327 }
328
329 return 0;
330 }
331
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)332 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
333 const struct kvm_one_reg *reg)
334 {
335 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
336 unsigned long __user *uaddr =
337 (unsigned long __user *)(unsigned long)reg->addr;
338 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
339 KVM_REG_SIZE_MASK |
340 KVM_REG_RISCV_CORE);
341 unsigned long reg_val;
342
343 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
344 return -EINVAL;
345 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
346 return -ENOENT;
347
348 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
349 reg_val = cntx->sepc;
350 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
351 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
352 reg_val = ((unsigned long *)cntx)[reg_num];
353 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
354 reg_val = (cntx->sstatus & SR_SPP) ?
355 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
356 else
357 return -ENOENT;
358
359 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
360 return -EFAULT;
361
362 return 0;
363 }
364
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)365 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
366 const struct kvm_one_reg *reg)
367 {
368 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
369 unsigned long __user *uaddr =
370 (unsigned long __user *)(unsigned long)reg->addr;
371 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
372 KVM_REG_SIZE_MASK |
373 KVM_REG_RISCV_CORE);
374 unsigned long reg_val;
375
376 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
377 return -EINVAL;
378 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
379 return -ENOENT;
380
381 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
382 return -EFAULT;
383
384 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
385 cntx->sepc = reg_val;
386 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
387 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
388 ((unsigned long *)cntx)[reg_num] = reg_val;
389 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
390 if (reg_val == KVM_RISCV_MODE_S)
391 cntx->sstatus |= SR_SPP;
392 else
393 cntx->sstatus &= ~SR_SPP;
394 } else
395 return -ENOENT;
396
397 return 0;
398 }
399
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)400 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
401 unsigned long reg_num,
402 unsigned long *out_val)
403 {
404 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
405
406 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
407 return -ENOENT;
408
409 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
410 kvm_riscv_vcpu_flush_interrupts(vcpu);
411 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
412 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
413 } else
414 *out_val = ((unsigned long *)csr)[reg_num];
415
416 return 0;
417 }
418
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)419 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
420 unsigned long reg_num,
421 unsigned long reg_val)
422 {
423 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
424
425 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
426 return -ENOENT;
427
428 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
429 reg_val &= VSIP_VALID_MASK;
430 reg_val <<= VSIP_TO_HVIP_SHIFT;
431 }
432
433 ((unsigned long *)csr)[reg_num] = reg_val;
434
435 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
436 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
437
438 return 0;
439 }
440
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)441 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
442 unsigned long reg_num,
443 unsigned long reg_val)
444 {
445 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
446
447 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
448 sizeof(unsigned long))
449 return -EINVAL;
450
451 ((unsigned long *)csr)[reg_num] = reg_val;
452 return 0;
453 }
454
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)455 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
456 unsigned long reg_num,
457 unsigned long *out_val)
458 {
459 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
460
461 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
462 sizeof(unsigned long))
463 return -EINVAL;
464
465 *out_val = ((unsigned long *)csr)[reg_num];
466 return 0;
467 }
468
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)469 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
470 const struct kvm_one_reg *reg)
471 {
472 int rc;
473 unsigned long __user *uaddr =
474 (unsigned long __user *)(unsigned long)reg->addr;
475 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
476 KVM_REG_SIZE_MASK |
477 KVM_REG_RISCV_CSR);
478 unsigned long reg_val, reg_subtype;
479
480 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
481 return -EINVAL;
482
483 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
484 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
485 switch (reg_subtype) {
486 case KVM_REG_RISCV_CSR_GENERAL:
487 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
488 break;
489 case KVM_REG_RISCV_CSR_AIA:
490 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
491 break;
492 case KVM_REG_RISCV_CSR_SMSTATEEN:
493 rc = -EINVAL;
494 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
495 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
496 ®_val);
497 break;
498 default:
499 rc = -ENOENT;
500 break;
501 }
502 if (rc)
503 return rc;
504
505 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
506 return -EFAULT;
507
508 return 0;
509 }
510
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)511 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
512 const struct kvm_one_reg *reg)
513 {
514 int rc;
515 unsigned long __user *uaddr =
516 (unsigned long __user *)(unsigned long)reg->addr;
517 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
518 KVM_REG_SIZE_MASK |
519 KVM_REG_RISCV_CSR);
520 unsigned long reg_val, reg_subtype;
521
522 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
523 return -EINVAL;
524
525 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
526 return -EFAULT;
527
528 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
529 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
530 switch (reg_subtype) {
531 case KVM_REG_RISCV_CSR_GENERAL:
532 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
533 break;
534 case KVM_REG_RISCV_CSR_AIA:
535 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
536 break;
537 case KVM_REG_RISCV_CSR_SMSTATEEN:
538 rc = -EINVAL;
539 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
540 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
541 reg_val);
542 break;
543 default:
544 rc = -ENOENT;
545 break;
546 }
547 if (rc)
548 return rc;
549
550 return 0;
551 }
552
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)553 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
554 unsigned long reg_num,
555 unsigned long *reg_val)
556 {
557 unsigned long host_isa_ext;
558
559 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
560 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
561 return -ENOENT;
562
563 host_isa_ext = kvm_isa_ext_arr[reg_num];
564 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
565 return -ENOENT;
566
567 *reg_val = 0;
568 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
569 *reg_val = 1; /* Mark the given extension as available */
570
571 return 0;
572 }
573
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)574 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
575 unsigned long reg_num,
576 unsigned long reg_val)
577 {
578 unsigned long host_isa_ext;
579
580 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
581 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
582 return -ENOENT;
583
584 host_isa_ext = kvm_isa_ext_arr[reg_num];
585 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
586 return -ENOENT;
587
588 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
589 return 0;
590
591 if (!vcpu->arch.ran_atleast_once) {
592 /*
593 * All multi-letter extension and a few single letter
594 * extension can be disabled
595 */
596 if (reg_val == 1 &&
597 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
598 set_bit(host_isa_ext, vcpu->arch.isa);
599 else if (!reg_val &&
600 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
601 clear_bit(host_isa_ext, vcpu->arch.isa);
602 else
603 return -EINVAL;
604 kvm_riscv_vcpu_fp_reset(vcpu);
605 } else {
606 return -EBUSY;
607 }
608
609 return 0;
610 }
611
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)612 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
613 unsigned long reg_num,
614 unsigned long *reg_val)
615 {
616 unsigned long i, ext_id, ext_val;
617
618 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
619 return -ENOENT;
620
621 for (i = 0; i < BITS_PER_LONG; i++) {
622 ext_id = i + reg_num * BITS_PER_LONG;
623 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
624 break;
625
626 ext_val = 0;
627 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
628 if (ext_val)
629 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
630 }
631
632 return 0;
633 }
634
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)635 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
636 unsigned long reg_num,
637 unsigned long reg_val, bool enable)
638 {
639 unsigned long i, ext_id;
640
641 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
642 return -ENOENT;
643
644 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
645 ext_id = i + reg_num * BITS_PER_LONG;
646 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
647 break;
648
649 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
650 }
651
652 return 0;
653 }
654
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)655 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
656 const struct kvm_one_reg *reg)
657 {
658 int rc;
659 unsigned long __user *uaddr =
660 (unsigned long __user *)(unsigned long)reg->addr;
661 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
662 KVM_REG_SIZE_MASK |
663 KVM_REG_RISCV_ISA_EXT);
664 unsigned long reg_val, reg_subtype;
665
666 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
667 return -EINVAL;
668
669 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
670 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
671
672 reg_val = 0;
673 switch (reg_subtype) {
674 case KVM_REG_RISCV_ISA_SINGLE:
675 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
676 break;
677 case KVM_REG_RISCV_ISA_MULTI_EN:
678 case KVM_REG_RISCV_ISA_MULTI_DIS:
679 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
680 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
681 reg_val = ~reg_val;
682 break;
683 default:
684 rc = -ENOENT;
685 }
686 if (rc)
687 return rc;
688
689 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
690 return -EFAULT;
691
692 return 0;
693 }
694
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)695 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
696 const struct kvm_one_reg *reg)
697 {
698 unsigned long __user *uaddr =
699 (unsigned long __user *)(unsigned long)reg->addr;
700 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
701 KVM_REG_SIZE_MASK |
702 KVM_REG_RISCV_ISA_EXT);
703 unsigned long reg_val, reg_subtype;
704
705 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
706 return -EINVAL;
707
708 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
709 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
710
711 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
712 return -EFAULT;
713
714 switch (reg_subtype) {
715 case KVM_REG_RISCV_ISA_SINGLE:
716 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
717 case KVM_REG_RISCV_SBI_MULTI_EN:
718 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
719 case KVM_REG_RISCV_SBI_MULTI_DIS:
720 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
721 default:
722 return -ENOENT;
723 }
724
725 return 0;
726 }
727
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)728 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
729 u64 __user *uindices)
730 {
731 int n = 0;
732
733 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
734 i++) {
735 u64 size;
736 u64 reg;
737
738 /*
739 * Avoid reporting config reg if the corresponding extension
740 * was not available.
741 */
742 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
743 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
744 continue;
745 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
746 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
747 continue;
748
749 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
750 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
751
752 if (uindices) {
753 if (put_user(reg, uindices))
754 return -EFAULT;
755 uindices++;
756 }
757
758 n++;
759 }
760
761 return n;
762 }
763
num_config_regs(const struct kvm_vcpu * vcpu)764 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
765 {
766 return copy_config_reg_indices(vcpu, NULL);
767 }
768
num_core_regs(void)769 static inline unsigned long num_core_regs(void)
770 {
771 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
772 }
773
copy_core_reg_indices(u64 __user * uindices)774 static int copy_core_reg_indices(u64 __user *uindices)
775 {
776 int n = num_core_regs();
777
778 for (int i = 0; i < n; i++) {
779 u64 size = IS_ENABLED(CONFIG_32BIT) ?
780 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
781 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
782
783 if (uindices) {
784 if (put_user(reg, uindices))
785 return -EFAULT;
786 uindices++;
787 }
788 }
789
790 return n;
791 }
792
num_csr_regs(const struct kvm_vcpu * vcpu)793 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
794 {
795 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
796
797 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
798 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
799 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
800 n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
801
802 return n;
803 }
804
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)805 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
806 u64 __user *uindices)
807 {
808 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
809 int n2 = 0, n3 = 0;
810
811 /* copy general csr regs */
812 for (int i = 0; i < n1; i++) {
813 u64 size = IS_ENABLED(CONFIG_32BIT) ?
814 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
815 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
816 KVM_REG_RISCV_CSR_GENERAL | i;
817
818 if (uindices) {
819 if (put_user(reg, uindices))
820 return -EFAULT;
821 uindices++;
822 }
823 }
824
825 /* copy AIA csr regs */
826 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
827 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
828
829 for (int i = 0; i < n2; i++) {
830 u64 size = IS_ENABLED(CONFIG_32BIT) ?
831 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
832 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
833 KVM_REG_RISCV_CSR_AIA | i;
834
835 if (uindices) {
836 if (put_user(reg, uindices))
837 return -EFAULT;
838 uindices++;
839 }
840 }
841 }
842
843 /* copy Smstateen csr regs */
844 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
845 n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
846
847 for (int i = 0; i < n3; i++) {
848 u64 size = IS_ENABLED(CONFIG_32BIT) ?
849 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
850 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
851 KVM_REG_RISCV_CSR_SMSTATEEN | i;
852
853 if (uindices) {
854 if (put_user(reg, uindices))
855 return -EFAULT;
856 uindices++;
857 }
858 }
859 }
860
861 return n1 + n2 + n3;
862 }
863
num_timer_regs(void)864 static inline unsigned long num_timer_regs(void)
865 {
866 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
867 }
868
copy_timer_reg_indices(u64 __user * uindices)869 static int copy_timer_reg_indices(u64 __user *uindices)
870 {
871 int n = num_timer_regs();
872
873 for (int i = 0; i < n; i++) {
874 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
875 KVM_REG_RISCV_TIMER | i;
876
877 if (uindices) {
878 if (put_user(reg, uindices))
879 return -EFAULT;
880 uindices++;
881 }
882 }
883
884 return n;
885 }
886
num_fp_f_regs(const struct kvm_vcpu * vcpu)887 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
888 {
889 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
890
891 if (riscv_isa_extension_available(vcpu->arch.isa, f))
892 return sizeof(cntx->fp.f) / sizeof(u32);
893 else
894 return 0;
895 }
896
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)897 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
898 u64 __user *uindices)
899 {
900 int n = num_fp_f_regs(vcpu);
901
902 for (int i = 0; i < n; i++) {
903 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
904 KVM_REG_RISCV_FP_F | i;
905
906 if (uindices) {
907 if (put_user(reg, uindices))
908 return -EFAULT;
909 uindices++;
910 }
911 }
912
913 return n;
914 }
915
num_fp_d_regs(const struct kvm_vcpu * vcpu)916 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
917 {
918 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
919
920 if (riscv_isa_extension_available(vcpu->arch.isa, d))
921 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
922 else
923 return 0;
924 }
925
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)926 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
927 u64 __user *uindices)
928 {
929 int i;
930 int n = num_fp_d_regs(vcpu);
931 u64 reg;
932
933 /* copy fp.d.f indices */
934 for (i = 0; i < n-1; i++) {
935 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
936 KVM_REG_RISCV_FP_D | i;
937
938 if (uindices) {
939 if (put_user(reg, uindices))
940 return -EFAULT;
941 uindices++;
942 }
943 }
944
945 /* copy fp.d.fcsr indices */
946 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
947 if (uindices) {
948 if (put_user(reg, uindices))
949 return -EFAULT;
950 uindices++;
951 }
952
953 return n;
954 }
955
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)956 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
957 u64 __user *uindices)
958 {
959 unsigned int n = 0;
960 unsigned long isa_ext;
961
962 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
963 u64 size = IS_ENABLED(CONFIG_32BIT) ?
964 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
965 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
966
967 isa_ext = kvm_isa_ext_arr[i];
968 if (!__riscv_isa_extension_available(NULL, isa_ext))
969 continue;
970
971 if (uindices) {
972 if (put_user(reg, uindices))
973 return -EFAULT;
974 uindices++;
975 }
976
977 n++;
978 }
979
980 return n;
981 }
982
num_isa_ext_regs(const struct kvm_vcpu * vcpu)983 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
984 {
985 return copy_isa_ext_reg_indices(vcpu, NULL);;
986 }
987
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)988 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
989 {
990 unsigned int n = 0;
991
992 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
993 u64 size = IS_ENABLED(CONFIG_32BIT) ?
994 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
995 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
996 KVM_REG_RISCV_SBI_SINGLE | i;
997
998 if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
999 continue;
1000
1001 if (uindices) {
1002 if (put_user(reg, uindices))
1003 return -EFAULT;
1004 uindices++;
1005 }
1006
1007 n++;
1008 }
1009
1010 return n;
1011 }
1012
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1013 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1014 {
1015 return copy_sbi_ext_reg_indices(vcpu, NULL);
1016 }
1017
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1018 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1019 {
1020 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1021 int total = 0;
1022
1023 if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1024 u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1025 int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1026
1027 for (int i = 0; i < n; i++) {
1028 u64 reg = KVM_REG_RISCV | size |
1029 KVM_REG_RISCV_SBI_STATE |
1030 KVM_REG_RISCV_SBI_STA | i;
1031
1032 if (uindices) {
1033 if (put_user(reg, uindices))
1034 return -EFAULT;
1035 uindices++;
1036 }
1037 }
1038
1039 total += n;
1040 }
1041
1042 return total;
1043 }
1044
num_sbi_regs(struct kvm_vcpu * vcpu)1045 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1046 {
1047 return copy_sbi_reg_indices(vcpu, NULL);
1048 }
1049
num_vector_regs(const struct kvm_vcpu * vcpu)1050 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1051 {
1052 if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1053 return 0;
1054
1055 /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1056 return 37;
1057 }
1058
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1059 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1060 u64 __user *uindices)
1061 {
1062 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1063 int n = num_vector_regs(vcpu);
1064 u64 reg, size;
1065 int i;
1066
1067 if (n == 0)
1068 return 0;
1069
1070 /* copy vstart, vl, vtype, vcsr and vlenb */
1071 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1072 for (i = 0; i < 5; i++) {
1073 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1074
1075 if (uindices) {
1076 if (put_user(reg, uindices))
1077 return -EFAULT;
1078 uindices++;
1079 }
1080 }
1081
1082 /* vector_regs have a variable 'vlenb' size */
1083 size = __builtin_ctzl(cntx->vector.vlenb);
1084 size <<= KVM_REG_SIZE_SHIFT;
1085 for (i = 0; i < 32; i++) {
1086 reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1087 KVM_REG_RISCV_VECTOR_REG(i);
1088
1089 if (uindices) {
1090 if (put_user(reg, uindices))
1091 return -EFAULT;
1092 uindices++;
1093 }
1094 }
1095
1096 return n;
1097 }
1098
1099 /*
1100 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1101 *
1102 * This is for all registers.
1103 */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1104 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1105 {
1106 unsigned long res = 0;
1107
1108 res += num_config_regs(vcpu);
1109 res += num_core_regs();
1110 res += num_csr_regs(vcpu);
1111 res += num_timer_regs();
1112 res += num_fp_f_regs(vcpu);
1113 res += num_fp_d_regs(vcpu);
1114 res += num_vector_regs(vcpu);
1115 res += num_isa_ext_regs(vcpu);
1116 res += num_sbi_ext_regs(vcpu);
1117 res += num_sbi_regs(vcpu);
1118
1119 return res;
1120 }
1121
1122 /*
1123 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1124 */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1125 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1126 u64 __user *uindices)
1127 {
1128 int ret;
1129
1130 ret = copy_config_reg_indices(vcpu, uindices);
1131 if (ret < 0)
1132 return ret;
1133 uindices += ret;
1134
1135 ret = copy_core_reg_indices(uindices);
1136 if (ret < 0)
1137 return ret;
1138 uindices += ret;
1139
1140 ret = copy_csr_reg_indices(vcpu, uindices);
1141 if (ret < 0)
1142 return ret;
1143 uindices += ret;
1144
1145 ret = copy_timer_reg_indices(uindices);
1146 if (ret < 0)
1147 return ret;
1148 uindices += ret;
1149
1150 ret = copy_fp_f_reg_indices(vcpu, uindices);
1151 if (ret < 0)
1152 return ret;
1153 uindices += ret;
1154
1155 ret = copy_fp_d_reg_indices(vcpu, uindices);
1156 if (ret < 0)
1157 return ret;
1158 uindices += ret;
1159
1160 ret = copy_vector_reg_indices(vcpu, uindices);
1161 if (ret < 0)
1162 return ret;
1163 uindices += ret;
1164
1165 ret = copy_isa_ext_reg_indices(vcpu, uindices);
1166 if (ret < 0)
1167 return ret;
1168 uindices += ret;
1169
1170 ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1171 if (ret < 0)
1172 return ret;
1173 uindices += ret;
1174
1175 ret = copy_sbi_reg_indices(vcpu, uindices);
1176 if (ret < 0)
1177 return ret;
1178 uindices += ret;
1179
1180 return 0;
1181 }
1182
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1183 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1184 const struct kvm_one_reg *reg)
1185 {
1186 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1187 case KVM_REG_RISCV_CONFIG:
1188 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1189 case KVM_REG_RISCV_CORE:
1190 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1191 case KVM_REG_RISCV_CSR:
1192 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1193 case KVM_REG_RISCV_TIMER:
1194 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1195 case KVM_REG_RISCV_FP_F:
1196 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1197 KVM_REG_RISCV_FP_F);
1198 case KVM_REG_RISCV_FP_D:
1199 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1200 KVM_REG_RISCV_FP_D);
1201 case KVM_REG_RISCV_VECTOR:
1202 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1203 case KVM_REG_RISCV_ISA_EXT:
1204 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1205 case KVM_REG_RISCV_SBI_EXT:
1206 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1207 case KVM_REG_RISCV_SBI_STATE:
1208 return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1209 default:
1210 break;
1211 }
1212
1213 return -ENOENT;
1214 }
1215
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1216 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1217 const struct kvm_one_reg *reg)
1218 {
1219 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1220 case KVM_REG_RISCV_CONFIG:
1221 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1222 case KVM_REG_RISCV_CORE:
1223 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1224 case KVM_REG_RISCV_CSR:
1225 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1226 case KVM_REG_RISCV_TIMER:
1227 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1228 case KVM_REG_RISCV_FP_F:
1229 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1230 KVM_REG_RISCV_FP_F);
1231 case KVM_REG_RISCV_FP_D:
1232 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1233 KVM_REG_RISCV_FP_D);
1234 case KVM_REG_RISCV_VECTOR:
1235 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1236 case KVM_REG_RISCV_ISA_EXT:
1237 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1238 case KVM_REG_RISCV_SBI_EXT:
1239 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1240 case KVM_REG_RISCV_SBI_STATE:
1241 return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1242 default:
1243 break;
1244 }
1245
1246 return -ENOENT;
1247 }
1248