1 /*
2 * RISC-V implementation of KVM hooks
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21 #include <sys/prctl.h>
22
23 #include <linux/kvm.h>
24
25 #include "qemu/timer.h"
26 #include "qapi/error.h"
27 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
29 #include "qapi/visitor.h"
30 #include "system/system.h"
31 #include "system/kvm.h"
32 #include "system/kvm_int.h"
33 #include "cpu.h"
34 #include "trace.h"
35 #include "accel/accel-cpu-target.h"
36 #include "hw/pci/pci.h"
37 #include "exec/memattrs.h"
38 #include "system/address-spaces.h"
39 #include "hw/boards.h"
40 #include "hw/irq.h"
41 #include "hw/intc/riscv_imsic.h"
42 #include "qemu/log.h"
43 #include "hw/loader.h"
44 #include "kvm_riscv.h"
45 #include "sbi_ecall_interface.h"
46 #include "chardev/char-fe.h"
47 #include "migration/misc.h"
48 #include "system/runstate.h"
49 #include "hw/riscv/numa.h"
50
51 #define PR_RISCV_V_SET_CONTROL 69
52 #define PR_RISCV_V_VSTATE_CTRL_ON 2
53
riscv_kvm_aplic_request(void * opaque,int irq,int level)54 void riscv_kvm_aplic_request(void *opaque, int irq, int level)
55 {
56 kvm_set_irq(kvm_state, irq, !!level);
57 }
58
59 static bool cap_has_mp_state;
60
61 #define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
62 type | idx)
63
64 #define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
65 type | idx)
66
67 #if defined(TARGET_RISCV64)
68 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
69 #else
70 #define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
71 #endif
72
kvm_encode_reg_size_id(uint64_t id,size_t size_b)73 static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
74 {
75 uint64_t size_ctz = __builtin_ctz(size_b);
76
77 return id | (size_ctz << KVM_REG_SIZE_SHIFT);
78 }
79
kvm_riscv_vector_reg_id(RISCVCPU * cpu,uint64_t idx)80 static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
81 uint64_t idx)
82 {
83 uint64_t id;
84 size_t size_b;
85
86 g_assert(idx < 32);
87
88 id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
89 size_b = cpu->cfg.vlenb;
90
91 return kvm_encode_reg_size_id(id, size_b);
92 }
93
94 #define RISCV_CORE_REG(name) \
95 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
96 KVM_REG_RISCV_CORE_REG(name))
97
98 #define RISCV_CSR_REG(name) \
99 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
100 KVM_REG_RISCV_CSR_REG(name))
101
102 #define RISCV_CONFIG_REG(name) \
103 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
104 KVM_REG_RISCV_CONFIG_REG(name))
105
106 #define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
107 KVM_REG_RISCV_TIMER_REG(name))
108
109 #define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
110
111 #define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
112
113 #define RISCV_VECTOR_CSR_REG(name) \
114 KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
115 KVM_REG_RISCV_VECTOR_CSR_REG(name))
116
117 #define KVM_RISCV_GET_TIMER(cs, name, reg) \
118 do { \
119 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \
120 if (ret) { \
121 abort(); \
122 } \
123 } while (0)
124
125 #define KVM_RISCV_SET_TIMER(cs, name, reg) \
126 do { \
127 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \
128 if (ret) { \
129 abort(); \
130 } \
131 } while (0)
132
133 typedef struct KVMCPUConfig {
134 const char *name;
135 const char *description;
136 target_ulong offset;
137 uint64_t kvm_reg_id;
138 uint32_t prop_size;
139 bool user_set;
140 bool supported;
141 } KVMCPUConfig;
142
143 #define KVM_MISA_CFG(_bit, _reg_id) \
144 {.offset = _bit, .kvm_reg_id = _reg_id}
145
146 /* KVM ISA extensions */
147 static KVMCPUConfig kvm_misa_ext_cfgs[] = {
148 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
149 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
150 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
151 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
152 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
153 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
154 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
155 KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
156 };
157
kvm_cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)158 static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
159 const char *name,
160 void *opaque, Error **errp)
161 {
162 KVMCPUConfig *misa_ext_cfg = opaque;
163 target_ulong misa_bit = misa_ext_cfg->offset;
164 RISCVCPU *cpu = RISCV_CPU(obj);
165 CPURISCVState *env = &cpu->env;
166 bool value = env->misa_ext_mask & misa_bit;
167
168 visit_type_bool(v, name, &value, errp);
169 }
170
kvm_cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)171 static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
172 const char *name,
173 void *opaque, Error **errp)
174 {
175 KVMCPUConfig *misa_ext_cfg = opaque;
176 target_ulong misa_bit = misa_ext_cfg->offset;
177 RISCVCPU *cpu = RISCV_CPU(obj);
178 CPURISCVState *env = &cpu->env;
179 bool value, host_bit;
180
181 if (!visit_type_bool(v, name, &value, errp)) {
182 return;
183 }
184
185 host_bit = env->misa_ext_mask & misa_bit;
186
187 if (value == host_bit) {
188 return;
189 }
190
191 if (!value) {
192 misa_ext_cfg->user_set = true;
193 return;
194 }
195
196 /*
197 * Forbid users to enable extensions that aren't
198 * available in the hart.
199 */
200 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
201 "enabled in the host", misa_ext_cfg->name);
202 }
203
kvm_riscv_update_cpu_misa_ext(RISCVCPU * cpu,CPUState * cs)204 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
205 {
206 CPURISCVState *env = &cpu->env;
207 uint64_t id, reg;
208 int i, ret;
209
210 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
211 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
212 target_ulong misa_bit = misa_cfg->offset;
213
214 if (!misa_cfg->user_set) {
215 continue;
216 }
217
218 /* If we're here we're going to disable the MISA bit */
219 reg = 0;
220 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
221 misa_cfg->kvm_reg_id);
222 ret = kvm_set_one_reg(cs, id, ®);
223 if (ret != 0) {
224 /*
225 * We're not checking for -EINVAL because if the bit is about
226 * to be disabled, it means that it was already enabled by
227 * KVM. We determined that by fetching the 'isa' register
228 * during init() time. Any error at this point is worth
229 * aborting.
230 */
231 error_report("Unable to set KVM reg %s, error %d",
232 misa_cfg->name, ret);
233 exit(EXIT_FAILURE);
234 }
235 env->misa_ext &= ~misa_bit;
236 }
237 }
238
239 #define KVM_CSR_CFG(_name, _env_prop, reg_id) \
240 {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
241 .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
242 .kvm_reg_id = reg_id}
243
244 static KVMCPUConfig kvm_csr_cfgs[] = {
245 KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
246 KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
247 KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
248 KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
249 KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
250 KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
251 KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
252 KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
253 KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
254 KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
255 KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
256 };
257
kvmconfig_get_env_addr(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)258 static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
259 {
260 return (void *)&cpu->env + csr_cfg->offset;
261 }
262
kvm_cpu_csr_get_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)263 static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
264 {
265 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
266 return *val32;
267 }
268
kvm_cpu_csr_get_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg)269 static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
270 {
271 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
272 return *val64;
273 }
274
kvm_cpu_csr_set_u32(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint32_t val)275 static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
276 uint32_t val)
277 {
278 uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
279 *val32 = val;
280 }
281
kvm_cpu_csr_set_u64(RISCVCPU * cpu,KVMCPUConfig * csr_cfg,uint64_t val)282 static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
283 uint64_t val)
284 {
285 uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
286 *val64 = val;
287 }
288
289 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
290 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
291 .kvm_reg_id = _reg_id}
292
293 static KVMCPUConfig kvm_multi_ext_cfgs[] = {
294 KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
295 KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
296 KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
297 KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
298 KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
299 KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
300 KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
301 KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
302 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
303 KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
304 KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
305 KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
306 KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
307 KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
308 KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
309 KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
310 KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
311 KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
312 KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
313 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
314 KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
315 KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
316 KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
317 KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
318 KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
319 KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA),
320 KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB),
321 KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD),
322 KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF),
323 KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
324 KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
325 KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
326 KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
327 KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
328 KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
329 KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
330 KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
331 KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
332 KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
333 KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
334 KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
335 KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
336 KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
337 KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
338 KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
339 KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
340 KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
341 KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
342 KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
343 KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
344 KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
345 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
346 KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
347 KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
348 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
349 KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
350 KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
351 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
352 KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
353 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
354 KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
355 };
356
kvmconfig_get_cfg_addr(RISCVCPU * cpu,KVMCPUConfig * kvmcfg)357 static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
358 {
359 return (void *)&cpu->cfg + kvmcfg->offset;
360 }
361
kvm_cpu_cfg_set(RISCVCPU * cpu,KVMCPUConfig * multi_ext,uint32_t val)362 static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
363 uint32_t val)
364 {
365 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
366
367 *ext_enabled = val;
368 }
369
kvm_cpu_cfg_get(RISCVCPU * cpu,KVMCPUConfig * multi_ext)370 static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
371 KVMCPUConfig *multi_ext)
372 {
373 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
374
375 return *ext_enabled;
376 }
377
kvm_cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)378 static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
379 const char *name,
380 void *opaque, Error **errp)
381 {
382 KVMCPUConfig *multi_ext_cfg = opaque;
383 RISCVCPU *cpu = RISCV_CPU(obj);
384 bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
385
386 visit_type_bool(v, name, &value, errp);
387 }
388
kvm_cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)389 static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
390 const char *name,
391 void *opaque, Error **errp)
392 {
393 KVMCPUConfig *multi_ext_cfg = opaque;
394 RISCVCPU *cpu = RISCV_CPU(obj);
395 bool value, host_val;
396
397 if (!visit_type_bool(v, name, &value, errp)) {
398 return;
399 }
400
401 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
402
403 /*
404 * Ignore if the user is setting the same value
405 * as the host.
406 */
407 if (value == host_val) {
408 return;
409 }
410
411 if (!multi_ext_cfg->supported) {
412 /*
413 * Error out if the user is trying to enable an
414 * extension that KVM doesn't support. Ignore
415 * option otherwise.
416 */
417 if (value) {
418 error_setg(errp, "KVM does not support disabling extension %s",
419 multi_ext_cfg->name);
420 }
421
422 return;
423 }
424
425 multi_ext_cfg->user_set = true;
426 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
427 }
428
429 static KVMCPUConfig kvm_cbom_blocksize = {
430 .name = "cbom_blocksize",
431 .offset = CPU_CFG_OFFSET(cbom_blocksize),
432 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
433 };
434
435 static KVMCPUConfig kvm_cboz_blocksize = {
436 .name = "cboz_blocksize",
437 .offset = CPU_CFG_OFFSET(cboz_blocksize),
438 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
439 };
440
441 static KVMCPUConfig kvm_v_vlenb = {
442 .name = "vlenb",
443 .offset = CPU_CFG_OFFSET(vlenb),
444 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
445 KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
446 };
447
448 static KVMCPUConfig kvm_sbi_dbcn = {
449 .name = "sbi_dbcn",
450 .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
451 KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
452 };
453
kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU * cpu,CPUState * cs)454 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
455 {
456 uint64_t id, reg;
457 int i, ret;
458
459 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
460 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
461
462 if (!multi_ext_cfg->user_set) {
463 continue;
464 }
465
466 id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
467 multi_ext_cfg->kvm_reg_id);
468 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
469 ret = kvm_set_one_reg(cs, id, ®);
470 if (ret != 0) {
471 if (!reg && ret == -EINVAL) {
472 warn_report("KVM cannot disable extension %s",
473 multi_ext_cfg->name);
474 } else {
475 error_report("Unable to enable extension %s in KVM, error %d",
476 multi_ext_cfg->name, ret);
477 exit(EXIT_FAILURE);
478 }
479 }
480 }
481 }
482
cpu_get_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)483 static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
484 const char *name,
485 void *opaque, Error **errp)
486 {
487 bool value = false;
488
489 visit_type_bool(v, name, &value, errp);
490 }
491
cpu_set_cfg_unavailable(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)492 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
493 const char *name,
494 void *opaque, Error **errp)
495 {
496 const char *propname = opaque;
497 bool value;
498
499 if (!visit_type_bool(v, name, &value, errp)) {
500 return;
501 }
502
503 if (value) {
504 error_setg(errp, "'%s' is not available with KVM",
505 propname);
506 }
507 }
508
riscv_cpu_add_kvm_unavail_prop(Object * obj,const char * prop_name)509 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
510 {
511 /* Check if KVM created the property already */
512 if (object_property_find(obj, prop_name)) {
513 return;
514 }
515
516 /*
517 * Set the default to disabled for every extension
518 * unknown to KVM and error out if the user attempts
519 * to enable any of them.
520 */
521 object_property_add(obj, prop_name, "bool",
522 cpu_get_cfg_unavailable,
523 cpu_set_cfg_unavailable,
524 NULL, (void *)prop_name);
525 }
526
riscv_cpu_add_kvm_unavail_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)527 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj,
528 const RISCVCPUMultiExtConfig *array)
529 {
530 const RISCVCPUMultiExtConfig *prop;
531
532 g_assert(array);
533
534 for (prop = array; prop && prop->name; prop++) {
535 riscv_cpu_add_kvm_unavail_prop(obj, prop->name);
536 }
537 }
538
kvm_riscv_add_cpu_user_properties(Object * cpu_obj)539 static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
540 {
541 int i;
542
543 riscv_add_satp_mode_properties(cpu_obj);
544
545 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
546 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
547 int bit = misa_cfg->offset;
548
549 misa_cfg->name = riscv_get_misa_ext_name(bit);
550 misa_cfg->description = riscv_get_misa_ext_description(bit);
551
552 object_property_add(cpu_obj, misa_cfg->name, "bool",
553 kvm_cpu_get_misa_ext_cfg,
554 kvm_cpu_set_misa_ext_cfg,
555 NULL, misa_cfg);
556 object_property_set_description(cpu_obj, misa_cfg->name,
557 misa_cfg->description);
558 }
559
560 for (i = 0; misa_bits[i] != 0; i++) {
561 const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]);
562 riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name);
563 }
564
565 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
566 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
567
568 object_property_add(cpu_obj, multi_cfg->name, "bool",
569 kvm_cpu_get_multi_ext_cfg,
570 kvm_cpu_set_multi_ext_cfg,
571 NULL, multi_cfg);
572 }
573
574 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
575 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
576 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
577
578 /* We don't have the needed KVM support for profiles */
579 for (i = 0; riscv_profiles[i] != NULL; i++) {
580 riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
581 }
582 }
583
kvm_riscv_get_regs_core(CPUState * cs)584 static int kvm_riscv_get_regs_core(CPUState *cs)
585 {
586 int ret = 0;
587 int i;
588 target_ulong reg;
589 CPURISCVState *env = &RISCV_CPU(cs)->env;
590
591 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
592 if (ret) {
593 return ret;
594 }
595 env->pc = reg;
596
597 for (i = 1; i < 32; i++) {
598 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
599 ret = kvm_get_one_reg(cs, id, ®);
600 if (ret) {
601 return ret;
602 }
603 env->gpr[i] = reg;
604 }
605
606 return ret;
607 }
608
kvm_riscv_put_regs_core(CPUState * cs)609 static int kvm_riscv_put_regs_core(CPUState *cs)
610 {
611 int ret = 0;
612 int i;
613 target_ulong reg;
614 CPURISCVState *env = &RISCV_CPU(cs)->env;
615
616 reg = env->pc;
617 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
618 if (ret) {
619 return ret;
620 }
621
622 for (i = 1; i < 32; i++) {
623 uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
624 reg = env->gpr[i];
625 ret = kvm_set_one_reg(cs, id, ®);
626 if (ret) {
627 return ret;
628 }
629 }
630
631 return ret;
632 }
633
kvm_riscv_get_regs_csr(CPUState * cs)634 static int kvm_riscv_get_regs_csr(CPUState *cs)
635 {
636 RISCVCPU *cpu = RISCV_CPU(cs);
637 uint64_t reg;
638 int i, ret;
639
640 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
641 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
642
643 if (!csr_cfg->supported) {
644 continue;
645 }
646
647 ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®);
648 if (ret) {
649 return ret;
650 }
651
652 if (csr_cfg->prop_size == sizeof(uint32_t)) {
653 kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
654 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
655 kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
656 } else {
657 g_assert_not_reached();
658 }
659 }
660
661 return 0;
662 }
663
kvm_riscv_put_regs_csr(CPUState * cs)664 static int kvm_riscv_put_regs_csr(CPUState *cs)
665 {
666 RISCVCPU *cpu = RISCV_CPU(cs);
667 uint64_t reg;
668 int i, ret;
669
670 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
671 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
672
673 if (!csr_cfg->supported) {
674 continue;
675 }
676
677 if (csr_cfg->prop_size == sizeof(uint32_t)) {
678 reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
679 } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
680 reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
681 } else {
682 g_assert_not_reached();
683 }
684
685 ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®);
686 if (ret) {
687 return ret;
688 }
689 }
690
691 return 0;
692 }
693
kvm_riscv_reset_regs_csr(CPURISCVState * env)694 static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
695 {
696 env->mstatus = 0;
697 env->mie = 0;
698 env->stvec = 0;
699 env->sscratch = 0;
700 env->sepc = 0;
701 env->scause = 0;
702 env->stval = 0;
703 env->mip = 0;
704 env->satp = 0;
705 env->scounteren = 0;
706 env->senvcfg = 0;
707 }
708
kvm_riscv_get_regs_fp(CPUState * cs)709 static int kvm_riscv_get_regs_fp(CPUState *cs)
710 {
711 int ret = 0;
712 int i;
713 CPURISCVState *env = &RISCV_CPU(cs)->env;
714
715 if (riscv_has_ext(env, RVD)) {
716 uint64_t reg;
717 for (i = 0; i < 32; i++) {
718 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
719 if (ret) {
720 return ret;
721 }
722 env->fpr[i] = reg;
723 }
724 return ret;
725 }
726
727 if (riscv_has_ext(env, RVF)) {
728 uint32_t reg;
729 for (i = 0; i < 32; i++) {
730 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
731 if (ret) {
732 return ret;
733 }
734 env->fpr[i] = reg;
735 }
736 return ret;
737 }
738
739 return ret;
740 }
741
kvm_riscv_put_regs_fp(CPUState * cs)742 static int kvm_riscv_put_regs_fp(CPUState *cs)
743 {
744 int ret = 0;
745 int i;
746 CPURISCVState *env = &RISCV_CPU(cs)->env;
747
748 if (riscv_has_ext(env, RVD)) {
749 uint64_t reg;
750 for (i = 0; i < 32; i++) {
751 reg = env->fpr[i];
752 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®);
753 if (ret) {
754 return ret;
755 }
756 }
757 return ret;
758 }
759
760 if (riscv_has_ext(env, RVF)) {
761 uint32_t reg;
762 for (i = 0; i < 32; i++) {
763 reg = env->fpr[i];
764 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®);
765 if (ret) {
766 return ret;
767 }
768 }
769 return ret;
770 }
771
772 return ret;
773 }
774
kvm_riscv_get_regs_timer(CPUState * cs)775 static void kvm_riscv_get_regs_timer(CPUState *cs)
776 {
777 CPURISCVState *env = &RISCV_CPU(cs)->env;
778
779 if (env->kvm_timer_dirty) {
780 return;
781 }
782
783 KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
784 KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
785 KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
786 KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
787
788 env->kvm_timer_dirty = true;
789 }
790
kvm_riscv_put_regs_timer(CPUState * cs)791 static void kvm_riscv_put_regs_timer(CPUState *cs)
792 {
793 uint64_t reg;
794 CPURISCVState *env = &RISCV_CPU(cs)->env;
795
796 if (!env->kvm_timer_dirty) {
797 return;
798 }
799
800 KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
801 KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
802
803 /*
804 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
805 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
806 * doesn't matter that adaping in QEMU now.
807 * TODO If KVM changes, adapt here.
808 */
809 if (env->kvm_timer_state) {
810 KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
811 }
812
813 /*
814 * For now, migration will not work between Hosts with different timer
815 * frequency. Therefore, we should check whether they are the same here
816 * during the migration.
817 */
818 if (migration_is_running()) {
819 KVM_RISCV_GET_TIMER(cs, frequency, reg);
820 if (reg != env->kvm_timer_frequency) {
821 error_report("Dst Hosts timer frequency != Src Hosts");
822 }
823 }
824
825 env->kvm_timer_dirty = false;
826 }
827
kvm_riscv_get_timebase_frequency(RISCVCPU * cpu)828 uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
829 {
830 uint64_t reg;
831
832 KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
833
834 return reg;
835 }
836
kvm_riscv_get_regs_vector(CPUState * cs)837 static int kvm_riscv_get_regs_vector(CPUState *cs)
838 {
839 RISCVCPU *cpu = RISCV_CPU(cs);
840 CPURISCVState *env = &cpu->env;
841 target_ulong reg;
842 uint64_t vreg_id;
843 int vreg_idx, ret = 0;
844
845 if (!riscv_has_ext(env, RVV)) {
846 return 0;
847 }
848
849 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
850 if (ret) {
851 return ret;
852 }
853 env->vstart = reg;
854
855 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
856 if (ret) {
857 return ret;
858 }
859 env->vl = reg;
860
861 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
862 if (ret) {
863 return ret;
864 }
865 env->vtype = reg;
866
867 if (kvm_v_vlenb.supported) {
868 ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
869 if (ret) {
870 return ret;
871 }
872 cpu->cfg.vlenb = reg;
873
874 for (int i = 0; i < 32; i++) {
875 /*
876 * vreg[] is statically allocated using RV_VLEN_MAX.
877 * Use it instead of vlenb to calculate vreg_idx for
878 * simplicity.
879 */
880 vreg_idx = i * RV_VLEN_MAX / 64;
881 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
882
883 ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
884 if (ret) {
885 return ret;
886 }
887 }
888 }
889
890 return 0;
891 }
892
kvm_riscv_put_regs_vector(CPUState * cs)893 static int kvm_riscv_put_regs_vector(CPUState *cs)
894 {
895 RISCVCPU *cpu = RISCV_CPU(cs);
896 CPURISCVState *env = &cpu->env;
897 target_ulong reg;
898 uint64_t vreg_id;
899 int vreg_idx, ret = 0;
900
901 if (!riscv_has_ext(env, RVV)) {
902 return 0;
903 }
904
905 reg = env->vstart;
906 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®);
907 if (ret) {
908 return ret;
909 }
910
911 reg = env->vl;
912 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®);
913 if (ret) {
914 return ret;
915 }
916
917 reg = env->vtype;
918 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®);
919 if (ret) {
920 return ret;
921 }
922
923 if (kvm_v_vlenb.supported) {
924 reg = cpu->cfg.vlenb;
925 ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®);
926
927 for (int i = 0; i < 32; i++) {
928 /*
929 * vreg[] is statically allocated using RV_VLEN_MAX.
930 * Use it instead of vlenb to calculate vreg_idx for
931 * simplicity.
932 */
933 vreg_idx = i * RV_VLEN_MAX / 64;
934 vreg_id = kvm_riscv_vector_reg_id(cpu, i);
935
936 ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
937 if (ret) {
938 return ret;
939 }
940 }
941 }
942
943 return ret;
944 }
945
946 typedef struct KVMScratchCPU {
947 int kvmfd;
948 int vmfd;
949 int cpufd;
950 } KVMScratchCPU;
951
952 /*
953 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
954 * from target/arm/kvm.c.
955 */
kvm_riscv_create_scratch_vcpu(KVMScratchCPU * scratch)956 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
957 {
958 int kvmfd = -1, vmfd = -1, cpufd = -1;
959
960 kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
961 if (kvmfd < 0) {
962 goto err;
963 }
964 do {
965 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
966 } while (vmfd == -1 && errno == EINTR);
967 if (vmfd < 0) {
968 goto err;
969 }
970 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
971 if (cpufd < 0) {
972 goto err;
973 }
974
975 scratch->kvmfd = kvmfd;
976 scratch->vmfd = vmfd;
977 scratch->cpufd = cpufd;
978
979 return true;
980
981 err:
982 if (cpufd >= 0) {
983 close(cpufd);
984 }
985 if (vmfd >= 0) {
986 close(vmfd);
987 }
988 if (kvmfd >= 0) {
989 close(kvmfd);
990 }
991
992 return false;
993 }
994
kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU * scratch)995 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
996 {
997 close(scratch->cpufd);
998 close(scratch->vmfd);
999 close(scratch->kvmfd);
1000 }
1001
kvm_riscv_init_machine_ids(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1002 static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1003 {
1004 struct kvm_one_reg reg;
1005 int ret;
1006
1007 reg.id = RISCV_CONFIG_REG(mvendorid);
1008 reg.addr = (uint64_t)&cpu->cfg.mvendorid;
1009 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1010 if (ret != 0) {
1011 error_report("Unable to retrieve mvendorid from host, error %d", ret);
1012 }
1013
1014 reg.id = RISCV_CONFIG_REG(marchid);
1015 reg.addr = (uint64_t)&cpu->cfg.marchid;
1016 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1017 if (ret != 0) {
1018 error_report("Unable to retrieve marchid from host, error %d", ret);
1019 }
1020
1021 reg.id = RISCV_CONFIG_REG(mimpid);
1022 reg.addr = (uint64_t)&cpu->cfg.mimpid;
1023 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1024 if (ret != 0) {
1025 error_report("Unable to retrieve mimpid from host, error %d", ret);
1026 }
1027 }
1028
kvm_riscv_init_misa_ext_mask(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1029 static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
1030 KVMScratchCPU *kvmcpu)
1031 {
1032 CPURISCVState *env = &cpu->env;
1033 struct kvm_one_reg reg;
1034 int ret;
1035
1036 reg.id = RISCV_CONFIG_REG(isa);
1037 reg.addr = (uint64_t)&env->misa_ext_mask;
1038 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1039
1040 if (ret) {
1041 error_report("Unable to fetch ISA register from KVM, "
1042 "error %d", ret);
1043 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
1044 exit(EXIT_FAILURE);
1045 }
1046
1047 env->misa_ext = env->misa_ext_mask;
1048 }
1049
kvm_riscv_read_cbomz_blksize(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,KVMCPUConfig * cbomz_cfg)1050 static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1051 KVMCPUConfig *cbomz_cfg)
1052 {
1053 struct kvm_one_reg reg;
1054 int ret;
1055
1056 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
1057 cbomz_cfg->kvm_reg_id);
1058 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
1059 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1060 if (ret != 0) {
1061 error_report("Unable to read KVM reg %s, error %d",
1062 cbomz_cfg->name, ret);
1063 exit(EXIT_FAILURE);
1064 }
1065 }
1066
kvm_riscv_read_multiext_legacy(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1067 static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
1068 KVMScratchCPU *kvmcpu)
1069 {
1070 uint64_t val;
1071 int i, ret;
1072
1073 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1074 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1075 struct kvm_one_reg reg;
1076
1077 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1078 multi_ext_cfg->kvm_reg_id);
1079 reg.addr = (uint64_t)&val;
1080 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1081 if (ret != 0) {
1082 if (errno == EINVAL) {
1083 /* Silently default to 'false' if KVM does not support it. */
1084 multi_ext_cfg->supported = false;
1085 val = false;
1086 } else {
1087 error_report("Unable to read ISA_EXT KVM register %s: %s",
1088 multi_ext_cfg->name, strerror(errno));
1089 exit(EXIT_FAILURE);
1090 }
1091 } else {
1092 multi_ext_cfg->supported = true;
1093 }
1094
1095 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1096 }
1097
1098 if (cpu->cfg.ext_zicbom) {
1099 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1100 }
1101
1102 if (cpu->cfg.ext_zicboz) {
1103 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1104 }
1105 }
1106
kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU * kvmcpu)1107 static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
1108 {
1109 uint64_t val;
1110 int i, ret;
1111
1112 for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1113 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1114 struct kvm_one_reg reg;
1115
1116 reg.id = csr_cfg->kvm_reg_id;
1117 reg.addr = (uint64_t)&val;
1118 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1119 if (ret != 0) {
1120 if (errno == EINVAL) {
1121 csr_cfg->supported = false;
1122 } else {
1123 error_report("Unable to read KVM CSR %s: %s",
1124 csr_cfg->name, strerror(errno));
1125 exit(EXIT_FAILURE);
1126 }
1127 } else {
1128 csr_cfg->supported = true;
1129 }
1130 }
1131 }
1132
uint64_cmp(const void * a,const void * b)1133 static int uint64_cmp(const void *a, const void *b)
1134 {
1135 uint64_t val1 = *(const uint64_t *)a;
1136 uint64_t val2 = *(const uint64_t *)b;
1137
1138 if (val1 < val2) {
1139 return -1;
1140 }
1141
1142 if (val1 > val2) {
1143 return 1;
1144 }
1145
1146 return 0;
1147 }
1148
kvm_riscv_check_sbi_dbcn_support(RISCVCPU * cpu,struct kvm_reg_list * reglist)1149 static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
1150 struct kvm_reg_list *reglist)
1151 {
1152 struct kvm_reg_list *reg_search;
1153
1154 reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
1155 sizeof(uint64_t), uint64_cmp);
1156
1157 if (reg_search) {
1158 kvm_sbi_dbcn.supported = true;
1159 }
1160 }
1161
kvm_riscv_read_vlenb(RISCVCPU * cpu,KVMScratchCPU * kvmcpu,struct kvm_reg_list * reglist)1162 static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
1163 struct kvm_reg_list *reglist)
1164 {
1165 struct kvm_one_reg reg;
1166 struct kvm_reg_list *reg_search;
1167 uint64_t val;
1168 int ret;
1169
1170 reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
1171 sizeof(uint64_t), uint64_cmp);
1172
1173 if (reg_search) {
1174 reg.id = kvm_v_vlenb.kvm_reg_id;
1175 reg.addr = (uint64_t)&val;
1176
1177 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1178 if (ret != 0) {
1179 error_report("Unable to read vlenb register, error code: %d",
1180 errno);
1181 exit(EXIT_FAILURE);
1182 }
1183
1184 kvm_v_vlenb.supported = true;
1185 cpu->cfg.vlenb = val;
1186 }
1187 }
1188
kvm_riscv_read_csr_cfg(struct kvm_reg_list * reglist)1189 static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
1190 {
1191 struct kvm_reg_list *reg_search;
1192 uint64_t reg_id;
1193
1194 for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
1195 KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
1196
1197 reg_id = csr_cfg->kvm_reg_id;
1198 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1199 sizeof(uint64_t), uint64_cmp);
1200 if (!reg_search) {
1201 continue;
1202 }
1203
1204 csr_cfg->supported = true;
1205 }
1206 }
1207
kvm_riscv_init_cfg(RISCVCPU * cpu,KVMScratchCPU * kvmcpu)1208 static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
1209 {
1210 g_autofree struct kvm_reg_list *reglist = NULL;
1211 KVMCPUConfig *multi_ext_cfg;
1212 struct kvm_one_reg reg;
1213 struct kvm_reg_list rl_struct;
1214 uint64_t val, reg_id, *reg_search;
1215 int i, ret;
1216
1217 rl_struct.n = 0;
1218 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct);
1219
1220 /*
1221 * If KVM_GET_REG_LIST isn't supported we'll get errno 22
1222 * (EINVAL). Use read_legacy() in this case.
1223 */
1224 if (errno == EINVAL) {
1225 kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
1226 kvm_riscv_read_csr_cfg_legacy(kvmcpu);
1227 return;
1228 } else if (errno != E2BIG) {
1229 /*
1230 * E2BIG is an expected error message for the API since we
1231 * don't know the number of registers. The right amount will
1232 * be written in rl_struct.n.
1233 *
1234 * Error out if we get any other errno.
1235 */
1236 error_report("Error when accessing get-reg-list: %s",
1237 strerror(errno));
1238 exit(EXIT_FAILURE);
1239 }
1240
1241 reglist = g_malloc(sizeof(struct kvm_reg_list) +
1242 rl_struct.n * sizeof(uint64_t));
1243 reglist->n = rl_struct.n;
1244 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist);
1245 if (ret) {
1246 error_report("Error when reading KVM_GET_REG_LIST: %s",
1247 strerror(errno));
1248 exit(EXIT_FAILURE);
1249 }
1250
1251 /* sort reglist to use bsearch() */
1252 qsort(®list->reg, reglist->n, sizeof(uint64_t), uint64_cmp);
1253
1254 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
1255 multi_ext_cfg = &kvm_multi_ext_cfgs[i];
1256 reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
1257 multi_ext_cfg->kvm_reg_id);
1258 reg_search = bsearch(®_id, reglist->reg, reglist->n,
1259 sizeof(uint64_t), uint64_cmp);
1260 if (!reg_search) {
1261 continue;
1262 }
1263
1264 reg.id = reg_id;
1265 reg.addr = (uint64_t)&val;
1266 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
1267 if (ret != 0) {
1268 error_report("Unable to read ISA_EXT KVM register %s: %s",
1269 multi_ext_cfg->name, strerror(errno));
1270 exit(EXIT_FAILURE);
1271 }
1272
1273 multi_ext_cfg->supported = true;
1274 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
1275 }
1276
1277 if (cpu->cfg.ext_zicbom) {
1278 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
1279 }
1280
1281 if (cpu->cfg.ext_zicboz) {
1282 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
1283 }
1284
1285 if (riscv_has_ext(&cpu->env, RVV)) {
1286 kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
1287 }
1288
1289 kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
1290 kvm_riscv_read_csr_cfg(reglist);
1291 }
1292
riscv_init_kvm_registers(Object * cpu_obj)1293 static void riscv_init_kvm_registers(Object *cpu_obj)
1294 {
1295 RISCVCPU *cpu = RISCV_CPU(cpu_obj);
1296 KVMScratchCPU kvmcpu;
1297
1298 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
1299 return;
1300 }
1301
1302 kvm_riscv_init_machine_ids(cpu, &kvmcpu);
1303 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
1304 kvm_riscv_init_cfg(cpu, &kvmcpu);
1305
1306 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
1307 }
1308
1309 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
1310 KVM_CAP_LAST_INFO
1311 };
1312
kvm_arch_get_registers(CPUState * cs,Error ** errp)1313 int kvm_arch_get_registers(CPUState *cs, Error **errp)
1314 {
1315 int ret = 0;
1316
1317 ret = kvm_riscv_get_regs_core(cs);
1318 if (ret) {
1319 return ret;
1320 }
1321
1322 ret = kvm_riscv_get_regs_csr(cs);
1323 if (ret) {
1324 return ret;
1325 }
1326
1327 ret = kvm_riscv_get_regs_fp(cs);
1328 if (ret) {
1329 return ret;
1330 }
1331
1332 ret = kvm_riscv_get_regs_vector(cs);
1333 if (ret) {
1334 return ret;
1335 }
1336
1337 return ret;
1338 }
1339
kvm_riscv_sync_mpstate_to_kvm(RISCVCPU * cpu,int state)1340 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
1341 {
1342 if (cap_has_mp_state) {
1343 struct kvm_mp_state mp_state = {
1344 .mp_state = state
1345 };
1346
1347 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1348 if (ret) {
1349 fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n",
1350 __func__, ret, strerror(-ret));
1351 return -1;
1352 }
1353 }
1354
1355 return 0;
1356 }
1357
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)1358 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
1359 {
1360 int ret = 0;
1361
1362 ret = kvm_riscv_put_regs_core(cs);
1363 if (ret) {
1364 return ret;
1365 }
1366
1367 ret = kvm_riscv_put_regs_csr(cs);
1368 if (ret) {
1369 return ret;
1370 }
1371
1372 ret = kvm_riscv_put_regs_fp(cs);
1373 if (ret) {
1374 return ret;
1375 }
1376
1377 ret = kvm_riscv_put_regs_vector(cs);
1378 if (ret) {
1379 return ret;
1380 }
1381
1382 if (KVM_PUT_RESET_STATE == level) {
1383 RISCVCPU *cpu = RISCV_CPU(cs);
1384 if (cs->cpu_index == 0) {
1385 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE);
1386 } else {
1387 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED);
1388 }
1389 if (ret) {
1390 return ret;
1391 }
1392 }
1393
1394 return ret;
1395 }
1396
kvm_arch_release_virq_post(int virq)1397 int kvm_arch_release_virq_post(int virq)
1398 {
1399 return 0;
1400 }
1401
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1402 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1403 uint64_t address, uint32_t data, PCIDevice *dev)
1404 {
1405 return 0;
1406 }
1407
kvm_arch_destroy_vcpu(CPUState * cs)1408 int kvm_arch_destroy_vcpu(CPUState *cs)
1409 {
1410 return 0;
1411 }
1412
kvm_arch_vcpu_id(CPUState * cpu)1413 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
1414 {
1415 return cpu->cpu_index;
1416 }
1417
kvm_riscv_vm_state_change(void * opaque,bool running,RunState state)1418 static void kvm_riscv_vm_state_change(void *opaque, bool running,
1419 RunState state)
1420 {
1421 CPUState *cs = opaque;
1422
1423 if (running) {
1424 kvm_riscv_put_regs_timer(cs);
1425 } else {
1426 kvm_riscv_get_regs_timer(cs);
1427 }
1428 }
1429
kvm_arch_init_irq_routing(KVMState * s)1430 void kvm_arch_init_irq_routing(KVMState *s)
1431 {
1432 }
1433
kvm_vcpu_set_machine_ids(RISCVCPU * cpu,CPUState * cs)1434 static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
1435 {
1436 target_ulong reg;
1437 uint64_t id;
1438 int ret;
1439
1440 id = RISCV_CONFIG_REG(mvendorid);
1441 /*
1442 * cfg.mvendorid is an uint32 but a target_ulong will
1443 * be written. Assign it to a target_ulong var to avoid
1444 * writing pieces of other cpu->cfg fields in the reg.
1445 */
1446 reg = cpu->cfg.mvendorid;
1447 ret = kvm_set_one_reg(cs, id, ®);
1448 if (ret != 0) {
1449 return ret;
1450 }
1451
1452 id = RISCV_CONFIG_REG(marchid);
1453 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
1454 if (ret != 0) {
1455 return ret;
1456 }
1457
1458 id = RISCV_CONFIG_REG(mimpid);
1459 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
1460
1461 return ret;
1462 }
1463
kvm_vcpu_enable_sbi_dbcn(RISCVCPU * cpu,CPUState * cs)1464 static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
1465 {
1466 target_ulong reg = 1;
1467
1468 if (!kvm_sbi_dbcn.supported) {
1469 return 0;
1470 }
1471
1472 return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®);
1473 }
1474
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)1475 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
1476 {
1477 return 0;
1478 }
1479
kvm_arch_init_vcpu(CPUState * cs)1480 int kvm_arch_init_vcpu(CPUState *cs)
1481 {
1482 int ret = 0;
1483 RISCVCPU *cpu = RISCV_CPU(cs);
1484
1485 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
1486
1487 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
1488 ret = kvm_vcpu_set_machine_ids(cpu, cs);
1489 if (ret != 0) {
1490 return ret;
1491 }
1492 }
1493
1494 kvm_riscv_update_cpu_misa_ext(cpu, cs);
1495 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
1496
1497 ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
1498
1499 return ret;
1500 }
1501
kvm_arch_msi_data_to_gsi(uint32_t data)1502 int kvm_arch_msi_data_to_gsi(uint32_t data)
1503 {
1504 abort();
1505 }
1506
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1507 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1508 int vector, PCIDevice *dev)
1509 {
1510 return 0;
1511 }
1512
kvm_arch_get_default_type(MachineState * ms)1513 int kvm_arch_get_default_type(MachineState *ms)
1514 {
1515 return 0;
1516 }
1517
kvm_arch_init(MachineState * ms,KVMState * s)1518 int kvm_arch_init(MachineState *ms, KVMState *s)
1519 {
1520 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
1521 return 0;
1522 }
1523
kvm_arch_irqchip_create(KVMState * s)1524 int kvm_arch_irqchip_create(KVMState *s)
1525 {
1526 /*
1527 * We can create the VAIA using the newer device control API.
1528 */
1529 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
1530 }
1531
kvm_arch_process_async_events(CPUState * cs)1532 int kvm_arch_process_async_events(CPUState *cs)
1533 {
1534 return 0;
1535 }
1536
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1537 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1538 {
1539 }
1540
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1541 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1542 {
1543 return MEMTXATTRS_UNSPECIFIED;
1544 }
1545
kvm_arch_stop_on_emulation_error(CPUState * cs)1546 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1547 {
1548 return true;
1549 }
1550
kvm_riscv_handle_sbi_dbcn(CPUState * cs,struct kvm_run * run)1551 static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
1552 {
1553 g_autofree uint8_t *buf = NULL;
1554 RISCVCPU *cpu = RISCV_CPU(cs);
1555 target_ulong num_bytes;
1556 uint64_t addr;
1557 unsigned char ch;
1558 int ret;
1559
1560 switch (run->riscv_sbi.function_id) {
1561 case SBI_EXT_DBCN_CONSOLE_READ:
1562 case SBI_EXT_DBCN_CONSOLE_WRITE:
1563 num_bytes = run->riscv_sbi.args[0];
1564
1565 if (num_bytes == 0) {
1566 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1567 run->riscv_sbi.ret[1] = 0;
1568 break;
1569 }
1570
1571 addr = run->riscv_sbi.args[1];
1572
1573 /*
1574 * Handle the case where a 32 bit CPU is running in a
1575 * 64 bit addressing env.
1576 */
1577 if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
1578 addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
1579 }
1580
1581 buf = g_malloc0(num_bytes);
1582
1583 if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
1584 ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
1585 if (ret < 0) {
1586 error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
1587 "reading chardev");
1588 exit(1);
1589 }
1590
1591 cpu_physical_memory_write(addr, buf, ret);
1592 } else {
1593 cpu_physical_memory_read(addr, buf, num_bytes);
1594
1595 ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
1596 if (ret < 0) {
1597 error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
1598 "writing chardev");
1599 exit(1);
1600 }
1601 }
1602
1603 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1604 run->riscv_sbi.ret[1] = ret;
1605 break;
1606 case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
1607 ch = run->riscv_sbi.args[0];
1608 ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1609
1610 if (ret < 0) {
1611 error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
1612 "writing chardev");
1613 exit(1);
1614 }
1615
1616 run->riscv_sbi.ret[0] = SBI_SUCCESS;
1617 run->riscv_sbi.ret[1] = 0;
1618 break;
1619 default:
1620 run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
1621 }
1622 }
1623
kvm_riscv_handle_sbi(CPUState * cs,struct kvm_run * run)1624 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
1625 {
1626 int ret = 0;
1627 unsigned char ch;
1628 switch (run->riscv_sbi.extension_id) {
1629 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
1630 ch = run->riscv_sbi.args[0];
1631 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
1632 break;
1633 case SBI_EXT_0_1_CONSOLE_GETCHAR:
1634 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
1635 if (ret == sizeof(ch)) {
1636 run->riscv_sbi.ret[0] = ch;
1637 } else {
1638 run->riscv_sbi.ret[0] = -1;
1639 }
1640 ret = 0;
1641 break;
1642 case SBI_EXT_DBCN:
1643 kvm_riscv_handle_sbi_dbcn(cs, run);
1644 break;
1645 default:
1646 qemu_log_mask(LOG_UNIMP,
1647 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
1648 __func__, run->riscv_sbi.extension_id);
1649 ret = -1;
1650 break;
1651 }
1652 return ret;
1653 }
1654
kvm_riscv_handle_csr(CPUState * cs,struct kvm_run * run)1655 static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
1656 {
1657 target_ulong csr_num = run->riscv_csr.csr_num;
1658 target_ulong new_value = run->riscv_csr.new_value;
1659 target_ulong write_mask = run->riscv_csr.write_mask;
1660 int ret = 0;
1661
1662 switch (csr_num) {
1663 case CSR_SEED:
1664 run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
1665 break;
1666 default:
1667 qemu_log_mask(LOG_UNIMP,
1668 "%s: un-handled CSR EXIT for CSR %lx\n",
1669 __func__, csr_num);
1670 ret = -1;
1671 break;
1672 }
1673
1674 return ret;
1675 }
1676
kvm_riscv_handle_debug(CPUState * cs)1677 static bool kvm_riscv_handle_debug(CPUState *cs)
1678 {
1679 RISCVCPU *cpu = RISCV_CPU(cs);
1680 CPURISCVState *env = &cpu->env;
1681
1682 /* Ensure PC is synchronised */
1683 kvm_cpu_synchronize_state(cs);
1684
1685 if (kvm_find_sw_breakpoint(cs, env->pc)) {
1686 return true;
1687 }
1688
1689 return false;
1690 }
1691
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1692 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1693 {
1694 int ret = 0;
1695 switch (run->exit_reason) {
1696 case KVM_EXIT_RISCV_SBI:
1697 ret = kvm_riscv_handle_sbi(cs, run);
1698 break;
1699 case KVM_EXIT_RISCV_CSR:
1700 ret = kvm_riscv_handle_csr(cs, run);
1701 break;
1702 case KVM_EXIT_DEBUG:
1703 if (kvm_riscv_handle_debug(cs)) {
1704 ret = EXCP_DEBUG;
1705 }
1706 break;
1707 default:
1708 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
1709 __func__, run->exit_reason);
1710 ret = -1;
1711 break;
1712 }
1713 return ret;
1714 }
1715
kvm_riscv_reset_vcpu(RISCVCPU * cpu)1716 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
1717 {
1718 CPURISCVState *env = &cpu->env;
1719 int i;
1720
1721 for (i = 0; i < 32; i++) {
1722 env->gpr[i] = 0;
1723 }
1724 env->pc = cpu->env.kernel_addr;
1725 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
1726 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
1727
1728 kvm_riscv_reset_regs_csr(env);
1729 }
1730
kvm_riscv_set_irq(RISCVCPU * cpu,int irq,int level)1731 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
1732 {
1733 int ret;
1734 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
1735
1736 if (irq != IRQ_S_EXT) {
1737 perror("kvm riscv set irq != IRQ_S_EXT\n");
1738 abort();
1739 }
1740
1741 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1742 if (ret < 0) {
1743 perror("Set irq failed");
1744 abort();
1745 }
1746 }
1747
1748 static int aia_mode;
1749
kvm_aia_mode_str(uint64_t mode)1750 static const char *kvm_aia_mode_str(uint64_t mode)
1751 {
1752 switch (mode) {
1753 case KVM_DEV_RISCV_AIA_MODE_EMUL:
1754 return "emul";
1755 case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
1756 return "hwaccel";
1757 case KVM_DEV_RISCV_AIA_MODE_AUTO:
1758 default:
1759 return "auto";
1760 };
1761 }
1762
riscv_get_kvm_aia(Object * obj,Error ** errp)1763 static char *riscv_get_kvm_aia(Object *obj, Error **errp)
1764 {
1765 return g_strdup(kvm_aia_mode_str(aia_mode));
1766 }
1767
riscv_set_kvm_aia(Object * obj,const char * val,Error ** errp)1768 static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
1769 {
1770 if (!strcmp(val, "emul")) {
1771 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
1772 } else if (!strcmp(val, "hwaccel")) {
1773 aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
1774 } else if (!strcmp(val, "auto")) {
1775 aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
1776 } else {
1777 error_setg(errp, "Invalid KVM AIA mode");
1778 error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
1779 }
1780 }
1781
kvm_arch_accel_class_init(ObjectClass * oc)1782 void kvm_arch_accel_class_init(ObjectClass *oc)
1783 {
1784 object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
1785 riscv_set_kvm_aia);
1786 object_class_property_set_description(oc, "riscv-aia",
1787 "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. "
1788 "Changing KVM AIA modes relies on host support. Defaults to 'auto' "
1789 "if the host supports it");
1790 object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
1791 "auto");
1792 }
1793
kvm_riscv_aia_create(MachineState * machine,uint64_t group_shift,uint64_t aia_irq_num,uint64_t aia_msi_num,uint64_t aplic_base,uint64_t imsic_base,uint64_t guest_num)1794 void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
1795 uint64_t aia_irq_num, uint64_t aia_msi_num,
1796 uint64_t aplic_base, uint64_t imsic_base,
1797 uint64_t guest_num)
1798 {
1799 int ret, i;
1800 int aia_fd = -1;
1801 uint64_t default_aia_mode;
1802 uint64_t socket_count = riscv_socket_count(machine);
1803 uint64_t max_hart_per_socket = 0;
1804 uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
1805 uint64_t socket_bits, hart_bits, guest_bits;
1806 uint64_t max_group_id;
1807
1808 aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
1809
1810 if (aia_fd < 0) {
1811 error_report("Unable to create in-kernel irqchip");
1812 exit(1);
1813 }
1814
1815 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1816 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1817 &default_aia_mode, false, NULL);
1818 if (ret < 0) {
1819 error_report("KVM AIA: failed to get current KVM AIA mode");
1820 exit(1);
1821 }
1822
1823 if (default_aia_mode != aia_mode) {
1824 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1825 KVM_DEV_RISCV_AIA_CONFIG_MODE,
1826 &aia_mode, true, NULL);
1827 if (ret < 0) {
1828 warn_report("KVM AIA: failed to set KVM AIA mode '%s', using "
1829 "default host mode '%s'",
1830 kvm_aia_mode_str(aia_mode),
1831 kvm_aia_mode_str(default_aia_mode));
1832
1833 /* failed to change AIA mode, use default */
1834 aia_mode = default_aia_mode;
1835 }
1836 }
1837
1838 /*
1839 * Skip APLIC creation in KVM if we're running split mode.
1840 * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS
1841 * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC
1842 * since KVM won't be using it.
1843 */
1844 if (!kvm_kernel_irqchip_split()) {
1845 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1846 KVM_DEV_RISCV_AIA_CONFIG_SRCS,
1847 &aia_irq_num, true, NULL);
1848 if (ret < 0) {
1849 error_report("KVM AIA: failed to set number of input irq lines");
1850 exit(1);
1851 }
1852
1853 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1854 KVM_DEV_RISCV_AIA_ADDR_APLIC,
1855 &aplic_base, true, NULL);
1856 if (ret < 0) {
1857 error_report("KVM AIA: failed to set the base address of APLIC");
1858 exit(1);
1859 }
1860 }
1861
1862 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1863 KVM_DEV_RISCV_AIA_CONFIG_IDS,
1864 &aia_msi_num, true, NULL);
1865 if (ret < 0) {
1866 error_report("KVM AIA: failed to set number of msi");
1867 exit(1);
1868 }
1869
1870
1871 if (socket_count > 1) {
1872 max_group_id = socket_count - 1;
1873 socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
1874 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1875 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
1876 &socket_bits, true, NULL);
1877 if (ret < 0) {
1878 error_report("KVM AIA: failed to set group_bits");
1879 exit(1);
1880 }
1881
1882 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1883 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
1884 &group_shift, true, NULL);
1885 if (ret < 0) {
1886 error_report("KVM AIA: failed to set group_shift");
1887 exit(1);
1888 }
1889 }
1890
1891 guest_bits = guest_num == 0 ? 0 :
1892 find_last_bit(&guest_num, BITS_PER_LONG) + 1;
1893 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1894 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
1895 &guest_bits, true, NULL);
1896 if (ret < 0) {
1897 error_report("KVM AIA: failed to set guest_bits");
1898 exit(1);
1899 }
1900
1901 for (socket = 0; socket < socket_count; socket++) {
1902 socket_imsic_base = imsic_base + socket * (1U << group_shift);
1903 hart_count = riscv_socket_hart_count(machine, socket);
1904 base_hart = riscv_socket_first_hartid(machine, socket);
1905
1906 if (max_hart_per_socket < hart_count) {
1907 max_hart_per_socket = hart_count;
1908 }
1909
1910 for (i = 0; i < hart_count; i++) {
1911 imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
1912 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
1913 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
1914 &imsic_addr, true, NULL);
1915 if (ret < 0) {
1916 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
1917 exit(1);
1918 }
1919 }
1920 }
1921
1922
1923 if (max_hart_per_socket > 1) {
1924 max_hart_per_socket--;
1925 hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
1926 } else {
1927 hart_bits = 0;
1928 }
1929
1930 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
1931 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
1932 &hart_bits, true, NULL);
1933 if (ret < 0) {
1934 error_report("KVM AIA: failed to set hart_bits");
1935 exit(1);
1936 }
1937
1938 if (kvm_has_gsi_routing()) {
1939 for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
1940 /* KVM AIA only has one APLIC instance */
1941 kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
1942 }
1943 kvm_gsi_routing_allowed = true;
1944 kvm_irqchip_commit_routes(kvm_state);
1945 }
1946
1947 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
1948 KVM_DEV_RISCV_AIA_CTRL_INIT,
1949 NULL, true, NULL);
1950 if (ret < 0) {
1951 error_report("KVM AIA: initialized fail");
1952 exit(1);
1953 }
1954
1955 kvm_msi_via_irqfd_allowed = true;
1956 }
1957
kvm_cpu_instance_init(CPUState * cs)1958 static void kvm_cpu_instance_init(CPUState *cs)
1959 {
1960 Object *obj = OBJECT(RISCV_CPU(cs));
1961
1962 riscv_init_kvm_registers(obj);
1963
1964 kvm_riscv_add_cpu_user_properties(obj);
1965 }
1966
1967 /*
1968 * We'll get here via the following path:
1969 *
1970 * riscv_cpu_realize()
1971 * -> cpu_exec_realizefn()
1972 * -> kvm_cpu_realize() (via accel_cpu_common_realize())
1973 */
kvm_cpu_realize(CPUState * cs,Error ** errp)1974 static bool kvm_cpu_realize(CPUState *cs, Error **errp)
1975 {
1976 RISCVCPU *cpu = RISCV_CPU(cs);
1977 int ret;
1978
1979 if (riscv_has_ext(&cpu->env, RVV)) {
1980 ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
1981 if (ret) {
1982 error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
1983 strerrorname_np(errno));
1984 return false;
1985 }
1986 }
1987
1988 return true;
1989 }
1990
riscv_kvm_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1991 void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1992 {
1993 CPURISCVState *env = &cpu->env;
1994 KVMScratchCPU kvmcpu;
1995 struct kvm_one_reg reg;
1996 uint64_t val;
1997 int ret;
1998
1999 /* short-circuit without spinning the scratch CPU */
2000 if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
2001 !riscv_has_ext(env, RVV)) {
2002 return;
2003 }
2004
2005 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
2006 error_setg(errp, "Unable to create scratch KVM cpu");
2007 return;
2008 }
2009
2010 if (cpu->cfg.ext_zicbom &&
2011 riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
2012
2013 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2014 kvm_cbom_blocksize.kvm_reg_id);
2015 reg.addr = (uint64_t)&val;
2016 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2017 if (ret != 0) {
2018 error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
2019 return;
2020 }
2021
2022 if (cpu->cfg.cbom_blocksize != val) {
2023 error_setg(errp, "Unable to set cbom_blocksize to a different "
2024 "value than the host (%lu)", val);
2025 return;
2026 }
2027 }
2028
2029 if (cpu->cfg.ext_zicboz &&
2030 riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
2031
2032 reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
2033 kvm_cboz_blocksize.kvm_reg_id);
2034 reg.addr = (uint64_t)&val;
2035 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2036 if (ret != 0) {
2037 error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
2038 return;
2039 }
2040
2041 if (cpu->cfg.cboz_blocksize != val) {
2042 error_setg(errp, "Unable to set cboz_blocksize to a different "
2043 "value than the host (%lu)", val);
2044 return;
2045 }
2046 }
2047
2048 /* Users are setting vlen, not vlenb */
2049 if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
2050 if (!kvm_v_vlenb.supported) {
2051 error_setg(errp, "Unable to set 'vlenb': register not supported");
2052 return;
2053 }
2054
2055 reg.id = kvm_v_vlenb.kvm_reg_id;
2056 reg.addr = (uint64_t)&val;
2057 ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®);
2058 if (ret != 0) {
2059 error_setg(errp, "Unable to read vlenb register, error %d", errno);
2060 return;
2061 }
2062
2063 if (cpu->cfg.vlenb != val) {
2064 error_setg(errp, "Unable to set 'vlen' to a different "
2065 "value than the host (%lu)", val * 8);
2066 return;
2067 }
2068 }
2069
2070 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
2071 }
2072
kvm_cpu_accel_class_init(ObjectClass * oc,const void * data)2073 static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data)
2074 {
2075 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
2076
2077 acc->cpu_instance_init = kvm_cpu_instance_init;
2078 acc->cpu_target_realize = kvm_cpu_realize;
2079 }
2080
2081 static const TypeInfo kvm_cpu_accel_type_info = {
2082 .name = ACCEL_CPU_NAME("kvm"),
2083
2084 .parent = TYPE_ACCEL_CPU,
2085 .class_init = kvm_cpu_accel_class_init,
2086 .abstract = true,
2087 };
kvm_cpu_accel_register_types(void)2088 static void kvm_cpu_accel_register_types(void)
2089 {
2090 type_register_static(&kvm_cpu_accel_type_info);
2091 }
2092 type_init(kvm_cpu_accel_register_types);
2093
2094 static const TypeInfo riscv_kvm_cpu_type_infos[] = {
2095 {
2096 .name = TYPE_RISCV_CPU_HOST,
2097 .parent = TYPE_RISCV_CPU,
2098 #if defined(TARGET_RISCV32)
2099 .class_data = &(const RISCVCPUDef) {
2100 .misa_mxl_max = MXL_RV32,
2101 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2102 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2103 .cfg.max_satp_mode = -1,
2104 },
2105 #elif defined(TARGET_RISCV64)
2106 .class_data = &(const RISCVCPUDef) {
2107 .misa_mxl_max = MXL_RV64,
2108 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2109 .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
2110 .cfg.max_satp_mode = -1,
2111 },
2112 #endif
2113 }
2114 };
2115
2116 DEFINE_TYPES(riscv_kvm_cpu_type_infos)
2117
2118 static const uint32_t ebreak_insn = 0x00100073;
2119 static const uint16_t c_ebreak_insn = 0x9002;
2120
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2121 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2122 {
2123 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
2124 return -EINVAL;
2125 }
2126
2127 if ((bp->saved_insn & 0x3) == 0x3) {
2128 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
2129 || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
2130 return -EINVAL;
2131 }
2132 } else {
2133 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
2134 return -EINVAL;
2135 }
2136 }
2137
2138 return 0;
2139 }
2140
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)2141 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2142 {
2143 uint32_t ebreak;
2144 uint16_t c_ebreak;
2145
2146 if ((bp->saved_insn & 0x3) == 0x3) {
2147 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
2148 ebreak != ebreak_insn ||
2149 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
2150 return -EINVAL;
2151 }
2152 } else {
2153 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
2154 c_ebreak != c_ebreak_insn ||
2155 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
2156 return -EINVAL;
2157 }
2158 }
2159
2160 return 0;
2161 }
2162
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)2163 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
2164 {
2165 /* TODO; To be implemented later. */
2166 return -EINVAL;
2167 }
2168
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)2169 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
2170 {
2171 /* TODO; To be implemented later. */
2172 return -EINVAL;
2173 }
2174
kvm_arch_remove_all_hw_breakpoints(void)2175 void kvm_arch_remove_all_hw_breakpoints(void)
2176 {
2177 /* TODO; To be implemented later. */
2178 }
2179
kvm_arch_update_guest_debug(CPUState * cs,struct kvm_guest_debug * dbg)2180 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
2181 {
2182 if (kvm_sw_breakpoints_active(cs)) {
2183 dbg->control |= KVM_GUESTDBG_ENABLE;
2184 }
2185 }
2186