1 /* 2 * RISC-V implementation of KVM hooks 3 * 4 * Copyright (c) 2020 Huawei Technologies Co., Ltd 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include <sys/ioctl.h> 21 22 #include <linux/kvm.h> 23 24 #include "qemu/timer.h" 25 #include "qapi/error.h" 26 #include "qemu/error-report.h" 27 #include "qemu/main-loop.h" 28 #include "qapi/visitor.h" 29 #include "sysemu/sysemu.h" 30 #include "sysemu/kvm.h" 31 #include "sysemu/kvm_int.h" 32 #include "cpu.h" 33 #include "trace.h" 34 #include "hw/core/accel-cpu.h" 35 #include "hw/pci/pci.h" 36 #include "exec/memattrs.h" 37 #include "exec/address-spaces.h" 38 #include "hw/boards.h" 39 #include "hw/irq.h" 40 #include "hw/intc/riscv_imsic.h" 41 #include "qemu/log.h" 42 #include "hw/loader.h" 43 #include "kvm_riscv.h" 44 #include "sbi_ecall_interface.h" 45 #include "chardev/char-fe.h" 46 #include "migration/migration.h" 47 #include "sysemu/runstate.h" 48 #include "hw/riscv/numa.h" 49 50 void riscv_kvm_aplic_request(void *opaque, int irq, int level) 51 { 52 kvm_set_irq(kvm_state, irq, !!level); 53 } 54 55 static bool cap_has_mp_state; 56 57 static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, 58 uint64_t idx) 59 { 60 uint64_t id = KVM_REG_RISCV | type | idx; 61 62 switch (riscv_cpu_mxl(env)) { 63 case MXL_RV32: 64 id |= KVM_REG_SIZE_U32; 65 break; 66 case MXL_RV64: 67 id |= KVM_REG_SIZE_U64; 68 break; 69 default: 70 g_assert_not_reached(); 71 } 72 return id; 73 } 74 75 static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) 76 { 77 return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; 78 } 79 80 static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) 81 { 82 return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; 83 } 84 85 #define RISCV_CORE_REG(env, name) \ 86 kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ 87 KVM_REG_RISCV_CORE_REG(name)) 88 89 #define RISCV_CSR_REG(env, name) \ 90 kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ 91 KVM_REG_RISCV_CSR_REG(name)) 92 93 #define RISCV_CONFIG_REG(env, name) \ 94 kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ 95 KVM_REG_RISCV_CONFIG_REG(name)) 96 97 #define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ 98 KVM_REG_RISCV_TIMER_REG(name)) 99 100 #define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) 101 102 #define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) 103 104 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ 105 do { \ 106 int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ 107 if (_ret) { \ 108 return _ret; \ 109 } \ 110 } while (0) 111 112 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ 113 do { \ 114 int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ 115 if (_ret) { \ 116 return _ret; \ 117 } \ 118 } while (0) 119 120 #define KVM_RISCV_GET_TIMER(cs, name, reg) \ 121 do { \ 122 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ 123 if (ret) { \ 124 abort(); \ 125 } \ 126 } while (0) 127 128 #define KVM_RISCV_SET_TIMER(cs, name, reg) \ 129 do { \ 130 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \ 131 if (ret) { \ 132 abort(); \ 133 } \ 134 } while (0) 135 136 typedef struct KVMCPUConfig { 137 const char *name; 138 const char *description; 139 target_ulong offset; 140 int kvm_reg_id; 141 bool user_set; 142 bool supported; 143 } KVMCPUConfig; 144 145 #define KVM_MISA_CFG(_bit, _reg_id) \ 146 {.offset = _bit, .kvm_reg_id = _reg_id} 147 148 /* KVM ISA extensions */ 149 static KVMCPUConfig kvm_misa_ext_cfgs[] = { 150 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A), 151 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C), 152 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D), 153 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F), 154 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H), 155 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I), 156 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M), 157 }; 158 159 static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v, 160 const char *name, 161 void *opaque, Error **errp) 162 { 163 KVMCPUConfig *misa_ext_cfg = opaque; 164 target_ulong misa_bit = misa_ext_cfg->offset; 165 RISCVCPU *cpu = RISCV_CPU(obj); 166 CPURISCVState *env = &cpu->env; 167 bool value = env->misa_ext_mask & misa_bit; 168 169 visit_type_bool(v, name, &value, errp); 170 } 171 172 static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v, 173 const char *name, 174 void *opaque, Error **errp) 175 { 176 KVMCPUConfig *misa_ext_cfg = opaque; 177 target_ulong misa_bit = misa_ext_cfg->offset; 178 RISCVCPU *cpu = RISCV_CPU(obj); 179 CPURISCVState *env = &cpu->env; 180 bool value, host_bit; 181 182 if (!visit_type_bool(v, name, &value, errp)) { 183 return; 184 } 185 186 host_bit = env->misa_ext_mask & misa_bit; 187 188 if (value == host_bit) { 189 return; 190 } 191 192 if (!value) { 193 misa_ext_cfg->user_set = true; 194 return; 195 } 196 197 /* 198 * Forbid users to enable extensions that aren't 199 * available in the hart. 200 */ 201 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not " 202 "enabled in the host", misa_ext_cfg->name); 203 } 204 205 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) 206 { 207 CPURISCVState *env = &cpu->env; 208 uint64_t id, reg; 209 int i, ret; 210 211 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) { 212 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i]; 213 target_ulong misa_bit = misa_cfg->offset; 214 215 if (!misa_cfg->user_set) { 216 continue; 217 } 218 219 /* If we're here we're going to disable the MISA bit */ 220 reg = 0; 221 id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, 222 misa_cfg->kvm_reg_id); 223 ret = kvm_set_one_reg(cs, id, ®); 224 if (ret != 0) { 225 /* 226 * We're not checking for -EINVAL because if the bit is about 227 * to be disabled, it means that it was already enabled by 228 * KVM. We determined that by fetching the 'isa' register 229 * during init() time. Any error at this point is worth 230 * aborting. 231 */ 232 error_report("Unable to set KVM reg %s, error %d", 233 misa_cfg->name, ret); 234 exit(EXIT_FAILURE); 235 } 236 env->misa_ext &= ~misa_bit; 237 } 238 } 239 240 #define KVM_EXT_CFG(_name, _prop, _reg_id) \ 241 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ 242 .kvm_reg_id = _reg_id} 243 244 static KVMCPUConfig kvm_multi_ext_cfgs[] = { 245 KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM), 246 KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ), 247 KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR), 248 KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR), 249 KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI), 250 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE), 251 KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM), 252 KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA), 253 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB), 254 KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS), 255 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA), 256 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC), 257 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL), 258 KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT), 259 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT), 260 }; 261 262 static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg) 263 { 264 return (void *)&cpu->cfg + kvmcfg->offset; 265 } 266 267 static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext, 268 uint32_t val) 269 { 270 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext); 271 272 *ext_enabled = val; 273 } 274 275 static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu, 276 KVMCPUConfig *multi_ext) 277 { 278 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext); 279 280 return *ext_enabled; 281 } 282 283 static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v, 284 const char *name, 285 void *opaque, Error **errp) 286 { 287 KVMCPUConfig *multi_ext_cfg = opaque; 288 RISCVCPU *cpu = RISCV_CPU(obj); 289 bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg); 290 291 visit_type_bool(v, name, &value, errp); 292 } 293 294 static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v, 295 const char *name, 296 void *opaque, Error **errp) 297 { 298 KVMCPUConfig *multi_ext_cfg = opaque; 299 RISCVCPU *cpu = RISCV_CPU(obj); 300 bool value, host_val; 301 302 if (!visit_type_bool(v, name, &value, errp)) { 303 return; 304 } 305 306 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg); 307 308 /* 309 * Ignore if the user is setting the same value 310 * as the host. 311 */ 312 if (value == host_val) { 313 return; 314 } 315 316 if (!multi_ext_cfg->supported) { 317 /* 318 * Error out if the user is trying to enable an 319 * extension that KVM doesn't support. Ignore 320 * option otherwise. 321 */ 322 if (value) { 323 error_setg(errp, "KVM does not support disabling extension %s", 324 multi_ext_cfg->name); 325 } 326 327 return; 328 } 329 330 multi_ext_cfg->user_set = true; 331 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value); 332 } 333 334 static KVMCPUConfig kvm_cbom_blocksize = { 335 .name = "cbom_blocksize", 336 .offset = CPU_CFG_OFFSET(cbom_blocksize), 337 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) 338 }; 339 340 static KVMCPUConfig kvm_cboz_blocksize = { 341 .name = "cboz_blocksize", 342 .offset = CPU_CFG_OFFSET(cboz_blocksize), 343 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) 344 }; 345 346 static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v, 347 const char *name, 348 void *opaque, Error **errp) 349 { 350 KVMCPUConfig *cbomz_cfg = opaque; 351 RISCVCPU *cpu = RISCV_CPU(obj); 352 uint16_t value, *host_val; 353 354 if (!visit_type_uint16(v, name, &value, errp)) { 355 return; 356 } 357 358 host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg); 359 360 if (value != *host_val) { 361 error_report("Unable to set %s to a different value than " 362 "the host (%u)", 363 cbomz_cfg->name, *host_val); 364 exit(EXIT_FAILURE); 365 } 366 367 cbomz_cfg->user_set = true; 368 } 369 370 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) 371 { 372 CPURISCVState *env = &cpu->env; 373 uint64_t id, reg; 374 int i, ret; 375 376 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { 377 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; 378 379 if (!multi_ext_cfg->user_set) { 380 continue; 381 } 382 383 id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, 384 multi_ext_cfg->kvm_reg_id); 385 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); 386 ret = kvm_set_one_reg(cs, id, ®); 387 if (ret != 0) { 388 error_report("Unable to %s extension %s in KVM, error %d", 389 reg ? "enable" : "disable", 390 multi_ext_cfg->name, ret); 391 exit(EXIT_FAILURE); 392 } 393 } 394 } 395 396 static void cpu_get_cfg_unavailable(Object *obj, Visitor *v, 397 const char *name, 398 void *opaque, Error **errp) 399 { 400 bool value = false; 401 402 visit_type_bool(v, name, &value, errp); 403 } 404 405 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 406 const char *name, 407 void *opaque, Error **errp) 408 { 409 const char *propname = opaque; 410 bool value; 411 412 if (!visit_type_bool(v, name, &value, errp)) { 413 return; 414 } 415 416 if (value) { 417 error_setg(errp, "'%s' is not available with KVM", 418 propname); 419 } 420 } 421 422 static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name) 423 { 424 /* Check if KVM created the property already */ 425 if (object_property_find(obj, prop_name)) { 426 return; 427 } 428 429 /* 430 * Set the default to disabled for every extension 431 * unknown to KVM and error out if the user attempts 432 * to enable any of them. 433 */ 434 object_property_add(obj, prop_name, "bool", 435 cpu_get_cfg_unavailable, 436 cpu_set_cfg_unavailable, 437 NULL, (void *)prop_name); 438 } 439 440 static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj, 441 const RISCVCPUMultiExtConfig *array) 442 { 443 const RISCVCPUMultiExtConfig *prop; 444 445 g_assert(array); 446 447 for (prop = array; prop && prop->name; prop++) { 448 riscv_cpu_add_kvm_unavail_prop(obj, prop->name); 449 } 450 } 451 452 static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj) 453 { 454 int i; 455 456 riscv_add_satp_mode_properties(cpu_obj); 457 458 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) { 459 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i]; 460 int bit = misa_cfg->offset; 461 462 misa_cfg->name = riscv_get_misa_ext_name(bit); 463 misa_cfg->description = riscv_get_misa_ext_description(bit); 464 465 object_property_add(cpu_obj, misa_cfg->name, "bool", 466 kvm_cpu_get_misa_ext_cfg, 467 kvm_cpu_set_misa_ext_cfg, 468 NULL, misa_cfg); 469 object_property_set_description(cpu_obj, misa_cfg->name, 470 misa_cfg->description); 471 } 472 473 for (i = 0; misa_bits[i] != 0; i++) { 474 const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]); 475 riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name); 476 } 477 478 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { 479 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i]; 480 481 object_property_add(cpu_obj, multi_cfg->name, "bool", 482 kvm_cpu_get_multi_ext_cfg, 483 kvm_cpu_set_multi_ext_cfg, 484 NULL, multi_cfg); 485 } 486 487 object_property_add(cpu_obj, "cbom_blocksize", "uint16", 488 NULL, kvm_cpu_set_cbomz_blksize, 489 NULL, &kvm_cbom_blocksize); 490 491 object_property_add(cpu_obj, "cboz_blocksize", "uint16", 492 NULL, kvm_cpu_set_cbomz_blksize, 493 NULL, &kvm_cboz_blocksize); 494 495 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions); 496 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts); 497 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts); 498 499 /* We don't have the needed KVM support for profiles */ 500 for (i = 0; riscv_profiles[i] != NULL; i++) { 501 riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name); 502 } 503 } 504 505 static int kvm_riscv_get_regs_core(CPUState *cs) 506 { 507 int ret = 0; 508 int i; 509 target_ulong reg; 510 CPURISCVState *env = &RISCV_CPU(cs)->env; 511 512 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); 513 if (ret) { 514 return ret; 515 } 516 env->pc = reg; 517 518 for (i = 1; i < 32; i++) { 519 uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); 520 ret = kvm_get_one_reg(cs, id, ®); 521 if (ret) { 522 return ret; 523 } 524 env->gpr[i] = reg; 525 } 526 527 return ret; 528 } 529 530 static int kvm_riscv_put_regs_core(CPUState *cs) 531 { 532 int ret = 0; 533 int i; 534 target_ulong reg; 535 CPURISCVState *env = &RISCV_CPU(cs)->env; 536 537 reg = env->pc; 538 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); 539 if (ret) { 540 return ret; 541 } 542 543 for (i = 1; i < 32; i++) { 544 uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); 545 reg = env->gpr[i]; 546 ret = kvm_set_one_reg(cs, id, ®); 547 if (ret) { 548 return ret; 549 } 550 } 551 552 return ret; 553 } 554 555 static int kvm_riscv_get_regs_csr(CPUState *cs) 556 { 557 CPURISCVState *env = &RISCV_CPU(cs)->env; 558 559 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); 560 KVM_RISCV_GET_CSR(cs, env, sie, env->mie); 561 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); 562 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); 563 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); 564 KVM_RISCV_GET_CSR(cs, env, scause, env->scause); 565 KVM_RISCV_GET_CSR(cs, env, stval, env->stval); 566 KVM_RISCV_GET_CSR(cs, env, sip, env->mip); 567 KVM_RISCV_GET_CSR(cs, env, satp, env->satp); 568 569 return 0; 570 } 571 572 static int kvm_riscv_put_regs_csr(CPUState *cs) 573 { 574 CPURISCVState *env = &RISCV_CPU(cs)->env; 575 576 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); 577 KVM_RISCV_SET_CSR(cs, env, sie, env->mie); 578 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); 579 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); 580 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); 581 KVM_RISCV_SET_CSR(cs, env, scause, env->scause); 582 KVM_RISCV_SET_CSR(cs, env, stval, env->stval); 583 KVM_RISCV_SET_CSR(cs, env, sip, env->mip); 584 KVM_RISCV_SET_CSR(cs, env, satp, env->satp); 585 586 return 0; 587 } 588 589 static int kvm_riscv_get_regs_fp(CPUState *cs) 590 { 591 int ret = 0; 592 int i; 593 CPURISCVState *env = &RISCV_CPU(cs)->env; 594 595 if (riscv_has_ext(env, RVD)) { 596 uint64_t reg; 597 for (i = 0; i < 32; i++) { 598 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®); 599 if (ret) { 600 return ret; 601 } 602 env->fpr[i] = reg; 603 } 604 return ret; 605 } 606 607 if (riscv_has_ext(env, RVF)) { 608 uint32_t reg; 609 for (i = 0; i < 32; i++) { 610 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®); 611 if (ret) { 612 return ret; 613 } 614 env->fpr[i] = reg; 615 } 616 return ret; 617 } 618 619 return ret; 620 } 621 622 static int kvm_riscv_put_regs_fp(CPUState *cs) 623 { 624 int ret = 0; 625 int i; 626 CPURISCVState *env = &RISCV_CPU(cs)->env; 627 628 if (riscv_has_ext(env, RVD)) { 629 uint64_t reg; 630 for (i = 0; i < 32; i++) { 631 reg = env->fpr[i]; 632 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®); 633 if (ret) { 634 return ret; 635 } 636 } 637 return ret; 638 } 639 640 if (riscv_has_ext(env, RVF)) { 641 uint32_t reg; 642 for (i = 0; i < 32; i++) { 643 reg = env->fpr[i]; 644 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®); 645 if (ret) { 646 return ret; 647 } 648 } 649 return ret; 650 } 651 652 return ret; 653 } 654 655 static void kvm_riscv_get_regs_timer(CPUState *cs) 656 { 657 CPURISCVState *env = &RISCV_CPU(cs)->env; 658 659 if (env->kvm_timer_dirty) { 660 return; 661 } 662 663 KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time); 664 KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare); 665 KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state); 666 KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency); 667 668 env->kvm_timer_dirty = true; 669 } 670 671 static void kvm_riscv_put_regs_timer(CPUState *cs) 672 { 673 uint64_t reg; 674 CPURISCVState *env = &RISCV_CPU(cs)->env; 675 676 if (!env->kvm_timer_dirty) { 677 return; 678 } 679 680 KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time); 681 KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare); 682 683 /* 684 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM 685 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it 686 * doesn't matter that adaping in QEMU now. 687 * TODO If KVM changes, adapt here. 688 */ 689 if (env->kvm_timer_state) { 690 KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state); 691 } 692 693 /* 694 * For now, migration will not work between Hosts with different timer 695 * frequency. Therefore, we should check whether they are the same here 696 * during the migration. 697 */ 698 if (migration_is_running(migrate_get_current()->state)) { 699 KVM_RISCV_GET_TIMER(cs, frequency, reg); 700 if (reg != env->kvm_timer_frequency) { 701 error_report("Dst Hosts timer frequency != Src Hosts"); 702 } 703 } 704 705 env->kvm_timer_dirty = false; 706 } 707 708 typedef struct KVMScratchCPU { 709 int kvmfd; 710 int vmfd; 711 int cpufd; 712 } KVMScratchCPU; 713 714 /* 715 * Heavily inspired by kvm_arm_create_scratch_host_vcpu() 716 * from target/arm/kvm.c. 717 */ 718 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch) 719 { 720 int kvmfd = -1, vmfd = -1, cpufd = -1; 721 722 kvmfd = qemu_open_old("/dev/kvm", O_RDWR); 723 if (kvmfd < 0) { 724 goto err; 725 } 726 do { 727 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0); 728 } while (vmfd == -1 && errno == EINTR); 729 if (vmfd < 0) { 730 goto err; 731 } 732 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0); 733 if (cpufd < 0) { 734 goto err; 735 } 736 737 scratch->kvmfd = kvmfd; 738 scratch->vmfd = vmfd; 739 scratch->cpufd = cpufd; 740 741 return true; 742 743 err: 744 if (cpufd >= 0) { 745 close(cpufd); 746 } 747 if (vmfd >= 0) { 748 close(vmfd); 749 } 750 if (kvmfd >= 0) { 751 close(kvmfd); 752 } 753 754 return false; 755 } 756 757 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch) 758 { 759 close(scratch->cpufd); 760 close(scratch->vmfd); 761 close(scratch->kvmfd); 762 } 763 764 static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) 765 { 766 CPURISCVState *env = &cpu->env; 767 struct kvm_one_reg reg; 768 int ret; 769 770 reg.id = RISCV_CONFIG_REG(env, mvendorid); 771 reg.addr = (uint64_t)&cpu->cfg.mvendorid; 772 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 773 if (ret != 0) { 774 error_report("Unable to retrieve mvendorid from host, error %d", ret); 775 } 776 777 reg.id = RISCV_CONFIG_REG(env, marchid); 778 reg.addr = (uint64_t)&cpu->cfg.marchid; 779 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 780 if (ret != 0) { 781 error_report("Unable to retrieve marchid from host, error %d", ret); 782 } 783 784 reg.id = RISCV_CONFIG_REG(env, mimpid); 785 reg.addr = (uint64_t)&cpu->cfg.mimpid; 786 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 787 if (ret != 0) { 788 error_report("Unable to retrieve mimpid from host, error %d", ret); 789 } 790 } 791 792 static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, 793 KVMScratchCPU *kvmcpu) 794 { 795 CPURISCVState *env = &cpu->env; 796 struct kvm_one_reg reg; 797 int ret; 798 799 reg.id = RISCV_CONFIG_REG(env, isa); 800 reg.addr = (uint64_t)&env->misa_ext_mask; 801 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 802 803 if (ret) { 804 error_report("Unable to fetch ISA register from KVM, " 805 "error %d", ret); 806 kvm_riscv_destroy_scratch_vcpu(kvmcpu); 807 exit(EXIT_FAILURE); 808 } 809 810 env->misa_ext = env->misa_ext_mask; 811 } 812 813 static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, 814 KVMCPUConfig *cbomz_cfg) 815 { 816 CPURISCVState *env = &cpu->env; 817 struct kvm_one_reg reg; 818 int ret; 819 820 reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, 821 cbomz_cfg->kvm_reg_id); 822 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); 823 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 824 if (ret != 0) { 825 error_report("Unable to read KVM reg %s, error %d", 826 cbomz_cfg->name, ret); 827 exit(EXIT_FAILURE); 828 } 829 } 830 831 static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, 832 KVMScratchCPU *kvmcpu) 833 { 834 CPURISCVState *env = &cpu->env; 835 uint64_t val; 836 int i, ret; 837 838 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { 839 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; 840 struct kvm_one_reg reg; 841 842 reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, 843 multi_ext_cfg->kvm_reg_id); 844 reg.addr = (uint64_t)&val; 845 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 846 if (ret != 0) { 847 if (errno == EINVAL) { 848 /* Silently default to 'false' if KVM does not support it. */ 849 multi_ext_cfg->supported = false; 850 val = false; 851 } else { 852 error_report("Unable to read ISA_EXT KVM register %s: %s", 853 multi_ext_cfg->name, strerror(errno)); 854 exit(EXIT_FAILURE); 855 } 856 } else { 857 multi_ext_cfg->supported = true; 858 } 859 860 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val); 861 } 862 863 if (cpu->cfg.ext_zicbom) { 864 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize); 865 } 866 867 if (cpu->cfg.ext_zicboz) { 868 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize); 869 } 870 } 871 872 static int uint64_cmp(const void *a, const void *b) 873 { 874 uint64_t val1 = *(const uint64_t *)a; 875 uint64_t val2 = *(const uint64_t *)b; 876 877 if (val1 < val2) { 878 return -1; 879 } 880 881 if (val1 > val2) { 882 return 1; 883 } 884 885 return 0; 886 } 887 888 static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) 889 { 890 KVMCPUConfig *multi_ext_cfg; 891 struct kvm_one_reg reg; 892 struct kvm_reg_list rl_struct; 893 struct kvm_reg_list *reglist; 894 uint64_t val, reg_id, *reg_search; 895 int i, ret; 896 897 rl_struct.n = 0; 898 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct); 899 900 /* 901 * If KVM_GET_REG_LIST isn't supported we'll get errno 22 902 * (EINVAL). Use read_legacy() in this case. 903 */ 904 if (errno == EINVAL) { 905 return kvm_riscv_read_multiext_legacy(cpu, kvmcpu); 906 } else if (errno != E2BIG) { 907 /* 908 * E2BIG is an expected error message for the API since we 909 * don't know the number of registers. The right amount will 910 * be written in rl_struct.n. 911 * 912 * Error out if we get any other errno. 913 */ 914 error_report("Error when accessing get-reg-list: %s", 915 strerror(errno)); 916 exit(EXIT_FAILURE); 917 } 918 919 reglist = g_malloc(sizeof(struct kvm_reg_list) + 920 rl_struct.n * sizeof(uint64_t)); 921 reglist->n = rl_struct.n; 922 ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist); 923 if (ret) { 924 error_report("Error when reading KVM_GET_REG_LIST: %s", 925 strerror(errno)); 926 exit(EXIT_FAILURE); 927 } 928 929 /* sort reglist to use bsearch() */ 930 qsort(®list->reg, reglist->n, sizeof(uint64_t), uint64_cmp); 931 932 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { 933 multi_ext_cfg = &kvm_multi_ext_cfgs[i]; 934 reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, 935 multi_ext_cfg->kvm_reg_id); 936 reg_search = bsearch(®_id, reglist->reg, reglist->n, 937 sizeof(uint64_t), uint64_cmp); 938 if (!reg_search) { 939 continue; 940 } 941 942 reg.id = reg_id; 943 reg.addr = (uint64_t)&val; 944 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); 945 if (ret != 0) { 946 error_report("Unable to read ISA_EXT KVM register %s: %s", 947 multi_ext_cfg->name, strerror(errno)); 948 exit(EXIT_FAILURE); 949 } 950 951 multi_ext_cfg->supported = true; 952 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val); 953 } 954 955 if (cpu->cfg.ext_zicbom) { 956 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize); 957 } 958 959 if (cpu->cfg.ext_zicboz) { 960 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize); 961 } 962 } 963 964 static void riscv_init_kvm_registers(Object *cpu_obj) 965 { 966 RISCVCPU *cpu = RISCV_CPU(cpu_obj); 967 KVMScratchCPU kvmcpu; 968 969 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) { 970 return; 971 } 972 973 kvm_riscv_init_machine_ids(cpu, &kvmcpu); 974 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu); 975 kvm_riscv_init_multiext_cfg(cpu, &kvmcpu); 976 977 kvm_riscv_destroy_scratch_vcpu(&kvmcpu); 978 } 979 980 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 981 KVM_CAP_LAST_INFO 982 }; 983 984 int kvm_arch_get_registers(CPUState *cs) 985 { 986 int ret = 0; 987 988 ret = kvm_riscv_get_regs_core(cs); 989 if (ret) { 990 return ret; 991 } 992 993 ret = kvm_riscv_get_regs_csr(cs); 994 if (ret) { 995 return ret; 996 } 997 998 ret = kvm_riscv_get_regs_fp(cs); 999 if (ret) { 1000 return ret; 1001 } 1002 1003 return ret; 1004 } 1005 1006 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state) 1007 { 1008 if (cap_has_mp_state) { 1009 struct kvm_mp_state mp_state = { 1010 .mp_state = state 1011 }; 1012 1013 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 1014 if (ret) { 1015 fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n", 1016 __func__, ret, strerror(-ret)); 1017 return -1; 1018 } 1019 } 1020 1021 return 0; 1022 } 1023 1024 int kvm_arch_put_registers(CPUState *cs, int level) 1025 { 1026 int ret = 0; 1027 1028 ret = kvm_riscv_put_regs_core(cs); 1029 if (ret) { 1030 return ret; 1031 } 1032 1033 ret = kvm_riscv_put_regs_csr(cs); 1034 if (ret) { 1035 return ret; 1036 } 1037 1038 ret = kvm_riscv_put_regs_fp(cs); 1039 if (ret) { 1040 return ret; 1041 } 1042 1043 if (KVM_PUT_RESET_STATE == level) { 1044 RISCVCPU *cpu = RISCV_CPU(cs); 1045 if (cs->cpu_index == 0) { 1046 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE); 1047 } else { 1048 ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED); 1049 } 1050 if (ret) { 1051 return ret; 1052 } 1053 } 1054 1055 return ret; 1056 } 1057 1058 int kvm_arch_release_virq_post(int virq) 1059 { 1060 return 0; 1061 } 1062 1063 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 1064 uint64_t address, uint32_t data, PCIDevice *dev) 1065 { 1066 return 0; 1067 } 1068 1069 int kvm_arch_destroy_vcpu(CPUState *cs) 1070 { 1071 return 0; 1072 } 1073 1074 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 1075 { 1076 return cpu->cpu_index; 1077 } 1078 1079 static void kvm_riscv_vm_state_change(void *opaque, bool running, 1080 RunState state) 1081 { 1082 CPUState *cs = opaque; 1083 1084 if (running) { 1085 kvm_riscv_put_regs_timer(cs); 1086 } else { 1087 kvm_riscv_get_regs_timer(cs); 1088 } 1089 } 1090 1091 void kvm_arch_init_irq_routing(KVMState *s) 1092 { 1093 } 1094 1095 static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) 1096 { 1097 CPURISCVState *env = &cpu->env; 1098 target_ulong reg; 1099 uint64_t id; 1100 int ret; 1101 1102 id = RISCV_CONFIG_REG(env, mvendorid); 1103 /* 1104 * cfg.mvendorid is an uint32 but a target_ulong will 1105 * be written. Assign it to a target_ulong var to avoid 1106 * writing pieces of other cpu->cfg fields in the reg. 1107 */ 1108 reg = cpu->cfg.mvendorid; 1109 ret = kvm_set_one_reg(cs, id, ®); 1110 if (ret != 0) { 1111 return ret; 1112 } 1113 1114 id = RISCV_CONFIG_REG(env, marchid); 1115 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); 1116 if (ret != 0) { 1117 return ret; 1118 } 1119 1120 id = RISCV_CONFIG_REG(env, mimpid); 1121 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); 1122 1123 return ret; 1124 } 1125 1126 int kvm_arch_init_vcpu(CPUState *cs) 1127 { 1128 int ret = 0; 1129 RISCVCPU *cpu = RISCV_CPU(cs); 1130 1131 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs); 1132 1133 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) { 1134 ret = kvm_vcpu_set_machine_ids(cpu, cs); 1135 if (ret != 0) { 1136 return ret; 1137 } 1138 } 1139 1140 kvm_riscv_update_cpu_misa_ext(cpu, cs); 1141 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs); 1142 1143 return ret; 1144 } 1145 1146 int kvm_arch_msi_data_to_gsi(uint32_t data) 1147 { 1148 abort(); 1149 } 1150 1151 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 1152 int vector, PCIDevice *dev) 1153 { 1154 return 0; 1155 } 1156 1157 int kvm_arch_get_default_type(MachineState *ms) 1158 { 1159 return 0; 1160 } 1161 1162 int kvm_arch_init(MachineState *ms, KVMState *s) 1163 { 1164 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); 1165 return 0; 1166 } 1167 1168 int kvm_arch_irqchip_create(KVMState *s) 1169 { 1170 if (kvm_kernel_irqchip_split()) { 1171 error_report("-machine kernel_irqchip=split is not supported on RISC-V."); 1172 exit(1); 1173 } 1174 1175 /* 1176 * We can create the VAIA using the newer device control API. 1177 */ 1178 return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL); 1179 } 1180 1181 int kvm_arch_process_async_events(CPUState *cs) 1182 { 1183 return 0; 1184 } 1185 1186 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 1187 { 1188 } 1189 1190 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1191 { 1192 return MEMTXATTRS_UNSPECIFIED; 1193 } 1194 1195 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 1196 { 1197 return true; 1198 } 1199 1200 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run) 1201 { 1202 int ret = 0; 1203 unsigned char ch; 1204 switch (run->riscv_sbi.extension_id) { 1205 case SBI_EXT_0_1_CONSOLE_PUTCHAR: 1206 ch = run->riscv_sbi.args[0]; 1207 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch)); 1208 break; 1209 case SBI_EXT_0_1_CONSOLE_GETCHAR: 1210 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch)); 1211 if (ret == sizeof(ch)) { 1212 run->riscv_sbi.ret[0] = ch; 1213 } else { 1214 run->riscv_sbi.ret[0] = -1; 1215 } 1216 ret = 0; 1217 break; 1218 default: 1219 qemu_log_mask(LOG_UNIMP, 1220 "%s: un-handled SBI EXIT, specific reasons is %lu\n", 1221 __func__, run->riscv_sbi.extension_id); 1222 ret = -1; 1223 break; 1224 } 1225 return ret; 1226 } 1227 1228 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1229 { 1230 int ret = 0; 1231 switch (run->exit_reason) { 1232 case KVM_EXIT_RISCV_SBI: 1233 ret = kvm_riscv_handle_sbi(cs, run); 1234 break; 1235 default: 1236 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", 1237 __func__, run->exit_reason); 1238 ret = -1; 1239 break; 1240 } 1241 return ret; 1242 } 1243 1244 void kvm_riscv_reset_vcpu(RISCVCPU *cpu) 1245 { 1246 CPURISCVState *env = &cpu->env; 1247 int i; 1248 1249 if (!kvm_enabled()) { 1250 return; 1251 } 1252 for (i = 0; i < 32; i++) { 1253 env->gpr[i] = 0; 1254 } 1255 env->pc = cpu->env.kernel_addr; 1256 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */ 1257 env->gpr[11] = cpu->env.fdt_addr; /* a1 */ 1258 env->satp = 0; 1259 env->mie = 0; 1260 env->stvec = 0; 1261 env->sscratch = 0; 1262 env->sepc = 0; 1263 env->scause = 0; 1264 env->stval = 0; 1265 env->mip = 0; 1266 } 1267 1268 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) 1269 { 1270 int ret; 1271 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET; 1272 1273 if (irq != IRQ_S_EXT) { 1274 perror("kvm riscv set irq != IRQ_S_EXT\n"); 1275 abort(); 1276 } 1277 1278 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq); 1279 if (ret < 0) { 1280 perror("Set irq failed"); 1281 abort(); 1282 } 1283 } 1284 1285 bool kvm_arch_cpu_check_are_resettable(void) 1286 { 1287 return true; 1288 } 1289 1290 static int aia_mode; 1291 1292 static const char *kvm_aia_mode_str(uint64_t mode) 1293 { 1294 switch (mode) { 1295 case KVM_DEV_RISCV_AIA_MODE_EMUL: 1296 return "emul"; 1297 case KVM_DEV_RISCV_AIA_MODE_HWACCEL: 1298 return "hwaccel"; 1299 case KVM_DEV_RISCV_AIA_MODE_AUTO: 1300 default: 1301 return "auto"; 1302 }; 1303 } 1304 1305 static char *riscv_get_kvm_aia(Object *obj, Error **errp) 1306 { 1307 return g_strdup(kvm_aia_mode_str(aia_mode)); 1308 } 1309 1310 static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp) 1311 { 1312 if (!strcmp(val, "emul")) { 1313 aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL; 1314 } else if (!strcmp(val, "hwaccel")) { 1315 aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL; 1316 } else if (!strcmp(val, "auto")) { 1317 aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO; 1318 } else { 1319 error_setg(errp, "Invalid KVM AIA mode"); 1320 error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n"); 1321 } 1322 } 1323 1324 void kvm_arch_accel_class_init(ObjectClass *oc) 1325 { 1326 object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia, 1327 riscv_set_kvm_aia); 1328 object_class_property_set_description(oc, "riscv-aia", 1329 "Set KVM AIA mode. Valid values are " 1330 "emul, hwaccel, and auto. Default " 1331 "is auto."); 1332 object_property_set_default_str(object_class_property_find(oc, "riscv-aia"), 1333 "auto"); 1334 } 1335 1336 void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, 1337 uint64_t aia_irq_num, uint64_t aia_msi_num, 1338 uint64_t aplic_base, uint64_t imsic_base, 1339 uint64_t guest_num) 1340 { 1341 int ret, i; 1342 int aia_fd = -1; 1343 uint64_t default_aia_mode; 1344 uint64_t socket_count = riscv_socket_count(machine); 1345 uint64_t max_hart_per_socket = 0; 1346 uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr; 1347 uint64_t socket_bits, hart_bits, guest_bits; 1348 1349 aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false); 1350 1351 if (aia_fd < 0) { 1352 error_report("Unable to create in-kernel irqchip"); 1353 exit(1); 1354 } 1355 1356 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1357 KVM_DEV_RISCV_AIA_CONFIG_MODE, 1358 &default_aia_mode, false, NULL); 1359 if (ret < 0) { 1360 error_report("KVM AIA: failed to get current KVM AIA mode"); 1361 exit(1); 1362 } 1363 qemu_log("KVM AIA: default mode is %s\n", 1364 kvm_aia_mode_str(default_aia_mode)); 1365 1366 if (default_aia_mode != aia_mode) { 1367 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1368 KVM_DEV_RISCV_AIA_CONFIG_MODE, 1369 &aia_mode, true, NULL); 1370 if (ret < 0) 1371 warn_report("KVM AIA: failed to set KVM AIA mode"); 1372 else 1373 qemu_log("KVM AIA: set current mode to %s\n", 1374 kvm_aia_mode_str(aia_mode)); 1375 } 1376 1377 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1378 KVM_DEV_RISCV_AIA_CONFIG_SRCS, 1379 &aia_irq_num, true, NULL); 1380 if (ret < 0) { 1381 error_report("KVM AIA: failed to set number of input irq lines"); 1382 exit(1); 1383 } 1384 1385 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1386 KVM_DEV_RISCV_AIA_CONFIG_IDS, 1387 &aia_msi_num, true, NULL); 1388 if (ret < 0) { 1389 error_report("KVM AIA: failed to set number of msi"); 1390 exit(1); 1391 } 1392 1393 1394 if (socket_count > 1) { 1395 socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; 1396 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1397 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, 1398 &socket_bits, true, NULL); 1399 if (ret < 0) { 1400 error_report("KVM AIA: failed to set group_bits"); 1401 exit(1); 1402 } 1403 1404 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1405 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT, 1406 &group_shift, true, NULL); 1407 if (ret < 0) { 1408 error_report("KVM AIA: failed to set group_shift"); 1409 exit(1); 1410 } 1411 } 1412 1413 guest_bits = guest_num == 0 ? 0 : 1414 find_last_bit(&guest_num, BITS_PER_LONG) + 1; 1415 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1416 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS, 1417 &guest_bits, true, NULL); 1418 if (ret < 0) { 1419 error_report("KVM AIA: failed to set guest_bits"); 1420 exit(1); 1421 } 1422 1423 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR, 1424 KVM_DEV_RISCV_AIA_ADDR_APLIC, 1425 &aplic_base, true, NULL); 1426 if (ret < 0) { 1427 error_report("KVM AIA: failed to set the base address of APLIC"); 1428 exit(1); 1429 } 1430 1431 for (socket = 0; socket < socket_count; socket++) { 1432 socket_imsic_base = imsic_base + socket * (1U << group_shift); 1433 hart_count = riscv_socket_hart_count(machine, socket); 1434 base_hart = riscv_socket_first_hartid(machine, socket); 1435 1436 if (max_hart_per_socket < hart_count) { 1437 max_hart_per_socket = hart_count; 1438 } 1439 1440 for (i = 0; i < hart_count; i++) { 1441 imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits); 1442 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR, 1443 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart), 1444 &imsic_addr, true, NULL); 1445 if (ret < 0) { 1446 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i); 1447 exit(1); 1448 } 1449 } 1450 } 1451 1452 hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1; 1453 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, 1454 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS, 1455 &hart_bits, true, NULL); 1456 if (ret < 0) { 1457 error_report("KVM AIA: failed to set hart_bits"); 1458 exit(1); 1459 } 1460 1461 if (kvm_has_gsi_routing()) { 1462 for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) { 1463 /* KVM AIA only has one APLIC instance */ 1464 kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx); 1465 } 1466 kvm_gsi_routing_allowed = true; 1467 kvm_irqchip_commit_routes(kvm_state); 1468 } 1469 1470 ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL, 1471 KVM_DEV_RISCV_AIA_CTRL_INIT, 1472 NULL, true, NULL); 1473 if (ret < 0) { 1474 error_report("KVM AIA: initialized fail"); 1475 exit(1); 1476 } 1477 1478 kvm_msi_via_irqfd_allowed = true; 1479 } 1480 1481 static void kvm_cpu_instance_init(CPUState *cs) 1482 { 1483 Object *obj = OBJECT(RISCV_CPU(cs)); 1484 DeviceState *dev = DEVICE(obj); 1485 1486 riscv_init_kvm_registers(obj); 1487 1488 kvm_riscv_add_cpu_user_properties(obj); 1489 1490 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 1491 /* Check if we have a specific KVM handler for the option */ 1492 if (object_property_find(obj, prop->name)) { 1493 continue; 1494 } 1495 qdev_property_add_static(dev, prop); 1496 } 1497 } 1498 1499 static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) 1500 { 1501 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1502 1503 acc->cpu_instance_init = kvm_cpu_instance_init; 1504 } 1505 1506 static const TypeInfo kvm_cpu_accel_type_info = { 1507 .name = ACCEL_CPU_NAME("kvm"), 1508 1509 .parent = TYPE_ACCEL_CPU, 1510 .class_init = kvm_cpu_accel_class_init, 1511 .abstract = true, 1512 }; 1513 static void kvm_cpu_accel_register_types(void) 1514 { 1515 type_register_static(&kvm_cpu_accel_type_info); 1516 } 1517 type_init(kvm_cpu_accel_register_types); 1518 1519 static void riscv_host_cpu_init(Object *obj) 1520 { 1521 CPURISCVState *env = &RISCV_CPU(obj)->env; 1522 1523 #if defined(TARGET_RISCV32) 1524 env->misa_mxl_max = env->misa_mxl = MXL_RV32; 1525 #elif defined(TARGET_RISCV64) 1526 env->misa_mxl_max = env->misa_mxl = MXL_RV64; 1527 #endif 1528 } 1529 1530 static const TypeInfo riscv_kvm_cpu_type_infos[] = { 1531 { 1532 .name = TYPE_RISCV_CPU_HOST, 1533 .parent = TYPE_RISCV_CPU, 1534 .instance_init = riscv_host_cpu_init, 1535 } 1536 }; 1537 1538 DEFINE_TYPES(riscv_kvm_cpu_type_infos) 1539