1 /* 2 * RISC-V implementation of KVM hooks 3 * 4 * Copyright (c) 2020 Huawei Technologies Co., Ltd 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include <sys/ioctl.h> 21 22 #include <linux/kvm.h> 23 24 #include "qemu-common.h" 25 #include "qemu/timer.h" 26 #include "qemu/error-report.h" 27 #include "qemu/main-loop.h" 28 #include "sysemu/sysemu.h" 29 #include "sysemu/kvm.h" 30 #include "sysemu/kvm_int.h" 31 #include "cpu.h" 32 #include "trace.h" 33 #include "hw/pci/pci.h" 34 #include "exec/memattrs.h" 35 #include "exec/address-spaces.h" 36 #include "hw/boards.h" 37 #include "hw/irq.h" 38 #include "qemu/log.h" 39 #include "hw/loader.h" 40 #include "kvm_riscv.h" 41 42 static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, 43 uint64_t idx) 44 { 45 uint64_t id = KVM_REG_RISCV | type | idx; 46 47 switch (riscv_cpu_mxl(env)) { 48 case MXL_RV32: 49 id |= KVM_REG_SIZE_U32; 50 break; 51 case MXL_RV64: 52 id |= KVM_REG_SIZE_U64; 53 break; 54 default: 55 g_assert_not_reached(); 56 } 57 return id; 58 } 59 60 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \ 61 KVM_REG_RISCV_CORE_REG(name)) 62 63 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \ 64 KVM_REG_RISCV_CSR_REG(name)) 65 66 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx) 67 68 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx) 69 70 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ 71 do { \ 72 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ 73 if (ret) { \ 74 return ret; \ 75 } \ 76 } while (0) 77 78 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ 79 do { \ 80 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ 81 if (ret) { \ 82 return ret; \ 83 } \ 84 } while (0) 85 86 static int kvm_riscv_get_regs_core(CPUState *cs) 87 { 88 int ret = 0; 89 int i; 90 target_ulong reg; 91 CPURISCVState *env = &RISCV_CPU(cs)->env; 92 93 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); 94 if (ret) { 95 return ret; 96 } 97 env->pc = reg; 98 99 for (i = 1; i < 32; i++) { 100 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); 101 ret = kvm_get_one_reg(cs, id, ®); 102 if (ret) { 103 return ret; 104 } 105 env->gpr[i] = reg; 106 } 107 108 return ret; 109 } 110 111 static int kvm_riscv_put_regs_core(CPUState *cs) 112 { 113 int ret = 0; 114 int i; 115 target_ulong reg; 116 CPURISCVState *env = &RISCV_CPU(cs)->env; 117 118 reg = env->pc; 119 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); 120 if (ret) { 121 return ret; 122 } 123 124 for (i = 1; i < 32; i++) { 125 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); 126 reg = env->gpr[i]; 127 ret = kvm_set_one_reg(cs, id, ®); 128 if (ret) { 129 return ret; 130 } 131 } 132 133 return ret; 134 } 135 136 static int kvm_riscv_get_regs_csr(CPUState *cs) 137 { 138 int ret = 0; 139 CPURISCVState *env = &RISCV_CPU(cs)->env; 140 141 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); 142 KVM_RISCV_GET_CSR(cs, env, sie, env->mie); 143 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); 144 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); 145 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); 146 KVM_RISCV_GET_CSR(cs, env, scause, env->scause); 147 KVM_RISCV_GET_CSR(cs, env, stval, env->stval); 148 KVM_RISCV_GET_CSR(cs, env, sip, env->mip); 149 KVM_RISCV_GET_CSR(cs, env, satp, env->satp); 150 return ret; 151 } 152 153 static int kvm_riscv_put_regs_csr(CPUState *cs) 154 { 155 int ret = 0; 156 CPURISCVState *env = &RISCV_CPU(cs)->env; 157 158 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); 159 KVM_RISCV_SET_CSR(cs, env, sie, env->mie); 160 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); 161 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); 162 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); 163 KVM_RISCV_SET_CSR(cs, env, scause, env->scause); 164 KVM_RISCV_SET_CSR(cs, env, stval, env->stval); 165 KVM_RISCV_SET_CSR(cs, env, sip, env->mip); 166 KVM_RISCV_SET_CSR(cs, env, satp, env->satp); 167 168 return ret; 169 } 170 171 static int kvm_riscv_get_regs_fp(CPUState *cs) 172 { 173 int ret = 0; 174 int i; 175 CPURISCVState *env = &RISCV_CPU(cs)->env; 176 177 if (riscv_has_ext(env, RVD)) { 178 uint64_t reg; 179 for (i = 0; i < 32; i++) { 180 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®); 181 if (ret) { 182 return ret; 183 } 184 env->fpr[i] = reg; 185 } 186 return ret; 187 } 188 189 if (riscv_has_ext(env, RVF)) { 190 uint32_t reg; 191 for (i = 0; i < 32; i++) { 192 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®); 193 if (ret) { 194 return ret; 195 } 196 env->fpr[i] = reg; 197 } 198 return ret; 199 } 200 201 return ret; 202 } 203 204 static int kvm_riscv_put_regs_fp(CPUState *cs) 205 { 206 int ret = 0; 207 int i; 208 CPURISCVState *env = &RISCV_CPU(cs)->env; 209 210 if (riscv_has_ext(env, RVD)) { 211 uint64_t reg; 212 for (i = 0; i < 32; i++) { 213 reg = env->fpr[i]; 214 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®); 215 if (ret) { 216 return ret; 217 } 218 } 219 return ret; 220 } 221 222 if (riscv_has_ext(env, RVF)) { 223 uint32_t reg; 224 for (i = 0; i < 32; i++) { 225 reg = env->fpr[i]; 226 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®); 227 if (ret) { 228 return ret; 229 } 230 } 231 return ret; 232 } 233 234 return ret; 235 } 236 237 238 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 239 KVM_CAP_LAST_INFO 240 }; 241 242 int kvm_arch_get_registers(CPUState *cs) 243 { 244 int ret = 0; 245 246 ret = kvm_riscv_get_regs_core(cs); 247 if (ret) { 248 return ret; 249 } 250 251 ret = kvm_riscv_get_regs_csr(cs); 252 if (ret) { 253 return ret; 254 } 255 256 ret = kvm_riscv_get_regs_fp(cs); 257 if (ret) { 258 return ret; 259 } 260 261 return ret; 262 } 263 264 int kvm_arch_put_registers(CPUState *cs, int level) 265 { 266 int ret = 0; 267 268 ret = kvm_riscv_put_regs_core(cs); 269 if (ret) { 270 return ret; 271 } 272 273 ret = kvm_riscv_put_regs_csr(cs); 274 if (ret) { 275 return ret; 276 } 277 278 ret = kvm_riscv_put_regs_fp(cs); 279 if (ret) { 280 return ret; 281 } 282 283 return ret; 284 } 285 286 int kvm_arch_release_virq_post(int virq) 287 { 288 return 0; 289 } 290 291 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 292 uint64_t address, uint32_t data, PCIDevice *dev) 293 { 294 return 0; 295 } 296 297 int kvm_arch_destroy_vcpu(CPUState *cs) 298 { 299 return 0; 300 } 301 302 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 303 { 304 return cpu->cpu_index; 305 } 306 307 void kvm_arch_init_irq_routing(KVMState *s) 308 { 309 } 310 311 int kvm_arch_init_vcpu(CPUState *cs) 312 { 313 int ret = 0; 314 target_ulong isa; 315 RISCVCPU *cpu = RISCV_CPU(cs); 316 CPURISCVState *env = &cpu->env; 317 uint64_t id; 318 319 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, 320 KVM_REG_RISCV_CONFIG_REG(isa)); 321 ret = kvm_get_one_reg(cs, id, &isa); 322 if (ret) { 323 return ret; 324 } 325 env->misa_ext = isa; 326 327 return ret; 328 } 329 330 int kvm_arch_msi_data_to_gsi(uint32_t data) 331 { 332 abort(); 333 } 334 335 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 336 int vector, PCIDevice *dev) 337 { 338 return 0; 339 } 340 341 int kvm_arch_init(MachineState *ms, KVMState *s) 342 { 343 return 0; 344 } 345 346 int kvm_arch_irqchip_create(KVMState *s) 347 { 348 return 0; 349 } 350 351 int kvm_arch_process_async_events(CPUState *cs) 352 { 353 return 0; 354 } 355 356 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) 357 { 358 } 359 360 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 361 { 362 return MEMTXATTRS_UNSPECIFIED; 363 } 364 365 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 366 { 367 return true; 368 } 369 370 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 371 { 372 return 0; 373 } 374 375 void kvm_riscv_reset_vcpu(RISCVCPU *cpu) 376 { 377 CPURISCVState *env = &cpu->env; 378 379 if (!kvm_enabled()) { 380 return; 381 } 382 env->pc = cpu->env.kernel_addr; 383 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */ 384 env->gpr[11] = cpu->env.fdt_addr; /* a1 */ 385 env->satp = 0; 386 } 387 388 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) 389 { 390 int ret; 391 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET; 392 393 if (irq != IRQ_S_EXT) { 394 perror("kvm riscv set irq != IRQ_S_EXT\n"); 395 abort(); 396 } 397 398 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq); 399 if (ret < 0) { 400 perror("Set irq failed"); 401 abort(); 402 } 403 } 404 405 bool kvm_arch_cpu_check_are_resettable(void) 406 { 407 return true; 408 } 409