xref: /qemu/target/riscv/kvm/kvm-cpu.c (revision 4eb471258bd0e331678bece4c894c477928b3b0b)
1 /*
2  * RISC-V implementation of KVM hooks
3  *
4  * Copyright (c) 2020 Huawei Technologies Co., Ltd
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
21 
22 #include <linux/kvm.h>
23 
24 #include "qemu-common.h"
25 #include "qemu/timer.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "sysemu/sysemu.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/kvm_int.h"
31 #include "cpu.h"
32 #include "trace.h"
33 #include "hw/pci/pci.h"
34 #include "exec/memattrs.h"
35 #include "exec/address-spaces.h"
36 #include "hw/boards.h"
37 #include "hw/irq.h"
38 #include "qemu/log.h"
39 #include "hw/loader.h"
40 #include "kvm_riscv.h"
41 #include "sbi_ecall_interface.h"
42 #include "chardev/char-fe.h"
43 
44 static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
45                                  uint64_t idx)
46 {
47     uint64_t id = KVM_REG_RISCV | type | idx;
48 
49     switch (riscv_cpu_mxl(env)) {
50     case MXL_RV32:
51         id |= KVM_REG_SIZE_U32;
52         break;
53     case MXL_RV64:
54         id |= KVM_REG_SIZE_U64;
55         break;
56     default:
57         g_assert_not_reached();
58     }
59     return id;
60 }
61 
62 #define RISCV_CORE_REG(env, name)  kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
63                  KVM_REG_RISCV_CORE_REG(name))
64 
65 #define RISCV_CSR_REG(env, name)  kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
66                  KVM_REG_RISCV_CSR_REG(name))
67 
68 #define RISCV_FP_F_REG(env, idx)  kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
69 
70 #define RISCV_FP_D_REG(env, idx)  kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
71 
72 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
73     do { \
74         int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
75         if (ret) { \
76             return ret; \
77         } \
78     } while (0)
79 
80 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
81     do { \
82         int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
83         if (ret) { \
84             return ret; \
85         } \
86     } while (0)
87 
88 static int kvm_riscv_get_regs_core(CPUState *cs)
89 {
90     int ret = 0;
91     int i;
92     target_ulong reg;
93     CPURISCVState *env = &RISCV_CPU(cs)->env;
94 
95     ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
96     if (ret) {
97         return ret;
98     }
99     env->pc = reg;
100 
101     for (i = 1; i < 32; i++) {
102         uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
103         ret = kvm_get_one_reg(cs, id, &reg);
104         if (ret) {
105             return ret;
106         }
107         env->gpr[i] = reg;
108     }
109 
110     return ret;
111 }
112 
113 static int kvm_riscv_put_regs_core(CPUState *cs)
114 {
115     int ret = 0;
116     int i;
117     target_ulong reg;
118     CPURISCVState *env = &RISCV_CPU(cs)->env;
119 
120     reg = env->pc;
121     ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
122     if (ret) {
123         return ret;
124     }
125 
126     for (i = 1; i < 32; i++) {
127         uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
128         reg = env->gpr[i];
129         ret = kvm_set_one_reg(cs, id, &reg);
130         if (ret) {
131             return ret;
132         }
133     }
134 
135     return ret;
136 }
137 
138 static int kvm_riscv_get_regs_csr(CPUState *cs)
139 {
140     int ret = 0;
141     CPURISCVState *env = &RISCV_CPU(cs)->env;
142 
143     KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
144     KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
145     KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
146     KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
147     KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
148     KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
149     KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
150     KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
151     KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
152     return ret;
153 }
154 
155 static int kvm_riscv_put_regs_csr(CPUState *cs)
156 {
157     int ret = 0;
158     CPURISCVState *env = &RISCV_CPU(cs)->env;
159 
160     KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
161     KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
162     KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
163     KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
164     KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
165     KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
166     KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
167     KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
168     KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
169 
170     return ret;
171 }
172 
173 static int kvm_riscv_get_regs_fp(CPUState *cs)
174 {
175     int ret = 0;
176     int i;
177     CPURISCVState *env = &RISCV_CPU(cs)->env;
178 
179     if (riscv_has_ext(env, RVD)) {
180         uint64_t reg;
181         for (i = 0; i < 32; i++) {
182             ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
183             if (ret) {
184                 return ret;
185             }
186             env->fpr[i] = reg;
187         }
188         return ret;
189     }
190 
191     if (riscv_has_ext(env, RVF)) {
192         uint32_t reg;
193         for (i = 0; i < 32; i++) {
194             ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
195             if (ret) {
196                 return ret;
197             }
198             env->fpr[i] = reg;
199         }
200         return ret;
201     }
202 
203     return ret;
204 }
205 
206 static int kvm_riscv_put_regs_fp(CPUState *cs)
207 {
208     int ret = 0;
209     int i;
210     CPURISCVState *env = &RISCV_CPU(cs)->env;
211 
212     if (riscv_has_ext(env, RVD)) {
213         uint64_t reg;
214         for (i = 0; i < 32; i++) {
215             reg = env->fpr[i];
216             ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
217             if (ret) {
218                 return ret;
219             }
220         }
221         return ret;
222     }
223 
224     if (riscv_has_ext(env, RVF)) {
225         uint32_t reg;
226         for (i = 0; i < 32; i++) {
227             reg = env->fpr[i];
228             ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
229             if (ret) {
230                 return ret;
231             }
232         }
233         return ret;
234     }
235 
236     return ret;
237 }
238 
239 
240 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
241     KVM_CAP_LAST_INFO
242 };
243 
244 int kvm_arch_get_registers(CPUState *cs)
245 {
246     int ret = 0;
247 
248     ret = kvm_riscv_get_regs_core(cs);
249     if (ret) {
250         return ret;
251     }
252 
253     ret = kvm_riscv_get_regs_csr(cs);
254     if (ret) {
255         return ret;
256     }
257 
258     ret = kvm_riscv_get_regs_fp(cs);
259     if (ret) {
260         return ret;
261     }
262 
263     return ret;
264 }
265 
266 int kvm_arch_put_registers(CPUState *cs, int level)
267 {
268     int ret = 0;
269 
270     ret = kvm_riscv_put_regs_core(cs);
271     if (ret) {
272         return ret;
273     }
274 
275     ret = kvm_riscv_put_regs_csr(cs);
276     if (ret) {
277         return ret;
278     }
279 
280     ret = kvm_riscv_put_regs_fp(cs);
281     if (ret) {
282         return ret;
283     }
284 
285     return ret;
286 }
287 
288 int kvm_arch_release_virq_post(int virq)
289 {
290     return 0;
291 }
292 
293 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
294                              uint64_t address, uint32_t data, PCIDevice *dev)
295 {
296     return 0;
297 }
298 
299 int kvm_arch_destroy_vcpu(CPUState *cs)
300 {
301     return 0;
302 }
303 
304 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
305 {
306     return cpu->cpu_index;
307 }
308 
309 void kvm_arch_init_irq_routing(KVMState *s)
310 {
311 }
312 
313 int kvm_arch_init_vcpu(CPUState *cs)
314 {
315     int ret = 0;
316     target_ulong isa;
317     RISCVCPU *cpu = RISCV_CPU(cs);
318     CPURISCVState *env = &cpu->env;
319     uint64_t id;
320 
321     id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
322                           KVM_REG_RISCV_CONFIG_REG(isa));
323     ret = kvm_get_one_reg(cs, id, &isa);
324     if (ret) {
325         return ret;
326     }
327     env->misa_ext = isa;
328 
329     return ret;
330 }
331 
332 int kvm_arch_msi_data_to_gsi(uint32_t data)
333 {
334     abort();
335 }
336 
337 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
338                                 int vector, PCIDevice *dev)
339 {
340     return 0;
341 }
342 
343 int kvm_arch_init(MachineState *ms, KVMState *s)
344 {
345     return 0;
346 }
347 
348 int kvm_arch_irqchip_create(KVMState *s)
349 {
350     return 0;
351 }
352 
353 int kvm_arch_process_async_events(CPUState *cs)
354 {
355     return 0;
356 }
357 
358 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
359 {
360 }
361 
362 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
363 {
364     return MEMTXATTRS_UNSPECIFIED;
365 }
366 
367 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
368 {
369     return true;
370 }
371 
372 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
373 {
374     int ret = 0;
375     unsigned char ch;
376     switch (run->riscv_sbi.extension_id) {
377     case SBI_EXT_0_1_CONSOLE_PUTCHAR:
378         ch = run->riscv_sbi.args[0];
379         qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
380         break;
381     case SBI_EXT_0_1_CONSOLE_GETCHAR:
382         ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
383         if (ret == sizeof(ch)) {
384             run->riscv_sbi.args[0] = ch;
385         } else {
386             run->riscv_sbi.args[0] = -1;
387         }
388         break;
389     default:
390         qemu_log_mask(LOG_UNIMP,
391                       "%s: un-handled SBI EXIT, specific reasons is %lu\n",
392                       __func__, run->riscv_sbi.extension_id);
393         ret = -1;
394         break;
395     }
396     return ret;
397 }
398 
399 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
400 {
401     int ret = 0;
402     switch (run->exit_reason) {
403     case KVM_EXIT_RISCV_SBI:
404         ret = kvm_riscv_handle_sbi(cs, run);
405         break;
406     default:
407         qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
408                       __func__, run->exit_reason);
409         ret = -1;
410         break;
411     }
412     return ret;
413 }
414 
415 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
416 {
417     CPURISCVState *env = &cpu->env;
418 
419     if (!kvm_enabled()) {
420         return;
421     }
422     env->pc = cpu->env.kernel_addr;
423     env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
424     env->gpr[11] = cpu->env.fdt_addr;          /* a1 */
425     env->satp = 0;
426 }
427 
428 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
429 {
430     int ret;
431     unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
432 
433     if (irq != IRQ_S_EXT) {
434         perror("kvm riscv set irq != IRQ_S_EXT\n");
435         abort();
436     }
437 
438     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
439     if (ret < 0) {
440         perror("Set irq failed");
441         abort();
442     }
443 }
444 
445 bool kvm_arch_cpu_check_are_resettable(void)
446 {
447     return true;
448 }
449