1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14
15 #include <linux/kvm.h>
16
17 #include "cpu.h"
18 #include "internal.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "system/kvm.h"
22 #include "system/kvm_int.h"
23 #include "system/runstate.h"
24 #include "kvm_mips.h"
25 #include "hw/boards.h"
26 #include "fpu_helper.h"
27
28 #define DEBUG_KVM 0
29
30 #define DPRINTF(fmt, ...) \
31 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32
33 static int kvm_mips_fpu_cap;
34 static int kvm_mips_msa_cap;
35
36 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
37 KVM_CAP_LAST_INFO
38 };
39
40 static void kvm_mips_update_state(void *opaque, bool running, RunState state);
41
kvm_arch_vcpu_id(CPUState * cs)42 unsigned long kvm_arch_vcpu_id(CPUState *cs)
43 {
44 return cs->cpu_index;
45 }
46
kvm_arch_init(MachineState * ms,KVMState * s)47 int kvm_arch_init(MachineState *ms, KVMState *s)
48 {
49 /* MIPS has 128 signals */
50 kvm_set_sigmask_len(s, 16);
51
52 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
53 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
54
55 DPRINTF("%s\n", __func__);
56 return 0;
57 }
58
kvm_arch_irqchip_create(KVMState * s)59 int kvm_arch_irqchip_create(KVMState *s)
60 {
61 return 0;
62 }
63
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)64 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
65 {
66 return 0;
67 }
68
kvm_arch_init_vcpu(CPUState * cs)69 int kvm_arch_init_vcpu(CPUState *cs)
70 {
71 CPUMIPSState *env = cpu_env(cs);
72 int ret = 0;
73
74 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
75
76 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
77 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
78 if (ret < 0) {
79 /* mark unsupported so it gets disabled on reset */
80 kvm_mips_fpu_cap = 0;
81 ret = 0;
82 }
83 }
84
85 if (kvm_mips_msa_cap && ase_msa_available(env)) {
86 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
87 if (ret < 0) {
88 /* mark unsupported so it gets disabled on reset */
89 kvm_mips_msa_cap = 0;
90 ret = 0;
91 }
92 }
93
94 DPRINTF("%s\n", __func__);
95 return ret;
96 }
97
kvm_arch_destroy_vcpu(CPUState * cs)98 int kvm_arch_destroy_vcpu(CPUState *cs)
99 {
100 return 0;
101 }
102
kvm_mips_reset_vcpu(MIPSCPU * cpu)103 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
104 {
105 CPUMIPSState *env = &cpu->env;
106
107 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
108 warn_report("KVM does not support FPU, disabling");
109 env->CP0_Config1 &= ~(1 << CP0C1_FP);
110 }
111 if (!kvm_mips_msa_cap && ase_msa_available(env)) {
112 warn_report("KVM does not support MSA, disabling");
113 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
114 }
115
116 DPRINTF("%s\n", __func__);
117 }
118
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)119 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
120 {
121 DPRINTF("%s\n", __func__);
122 return 0;
123 }
124
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)125 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
126 {
127 DPRINTF("%s\n", __func__);
128 return 0;
129 }
130
cpu_mips_io_interrupts_pending(MIPSCPU * cpu)131 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
132 {
133 CPUMIPSState *env = &cpu->env;
134
135 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
136 }
137
138
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)139 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
140 {
141 MIPSCPU *cpu = MIPS_CPU(cs);
142 int r;
143 struct kvm_mips_interrupt intr;
144
145 bql_lock();
146
147 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
148 cpu_mips_io_interrupts_pending(cpu)) {
149 intr.cpu = -1;
150 intr.irq = 2;
151 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
152 if (r < 0) {
153 error_report("%s: cpu %d: failed to inject IRQ %x",
154 __func__, cs->cpu_index, intr.irq);
155 }
156 }
157
158 bql_unlock();
159 }
160
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)161 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
162 {
163 return MEMTXATTRS_UNSPECIFIED;
164 }
165
kvm_arch_process_async_events(CPUState * cs)166 int kvm_arch_process_async_events(CPUState *cs)
167 {
168 return cs->halted;
169 }
170
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)171 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
172 {
173 int ret;
174
175 DPRINTF("%s\n", __func__);
176 switch (run->exit_reason) {
177 default:
178 error_report("%s: unknown exit reason %d",
179 __func__, run->exit_reason);
180 ret = -1;
181 break;
182 }
183
184 return ret;
185 }
186
kvm_arch_stop_on_emulation_error(CPUState * cs)187 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
188 {
189 DPRINTF("%s\n", __func__);
190 return true;
191 }
192
kvm_arch_init_irq_routing(KVMState * s)193 void kvm_arch_init_irq_routing(KVMState *s)
194 {
195 }
196
kvm_mips_set_interrupt(MIPSCPU * cpu,int irq,int level)197 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
198 {
199 CPUState *cs = CPU(cpu);
200 struct kvm_mips_interrupt intr;
201
202 assert(kvm_enabled());
203
204 intr.cpu = -1;
205
206 if (level) {
207 intr.irq = irq;
208 } else {
209 intr.irq = -irq;
210 }
211
212 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
213
214 return 0;
215 }
216
kvm_mips_set_ipi_interrupt(MIPSCPU * cpu,int irq,int level)217 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
218 {
219 CPUState *cs = current_cpu;
220 CPUState *dest_cs = CPU(cpu);
221 struct kvm_mips_interrupt intr;
222
223 assert(kvm_enabled());
224
225 intr.cpu = dest_cs->cpu_index;
226
227 if (level) {
228 intr.irq = irq;
229 } else {
230 intr.irq = -irq;
231 }
232
233 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
234
235 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
236
237 return 0;
238 }
239
240 #define MIPS_CP0_32(_R, _S) \
241 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
242
243 #define MIPS_CP0_64(_R, _S) \
244 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
245
246 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
247 #define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
248 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
249 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
250 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
251 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
252 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
253 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
254 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
255 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
256 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
257 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
258 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
259 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
260 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
261 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
262 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
263 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
264 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
265 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
266 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
267 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
268 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
269 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
270 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
271 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
272 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
273 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
274 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
275 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
276 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
277 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
278 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
279 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
280 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
281 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
282
kvm_mips_put_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr)283 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
284 int32_t *addr)
285 {
286 struct kvm_one_reg cp0reg = {
287 .id = reg_id,
288 .addr = (uintptr_t)addr
289 };
290
291 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
292 }
293
kvm_mips_put_one_ureg(CPUState * cs,uint64_t reg_id,uint32_t * addr)294 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
295 uint32_t *addr)
296 {
297 struct kvm_one_reg cp0reg = {
298 .id = reg_id,
299 .addr = (uintptr_t)addr
300 };
301
302 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
303 }
304
kvm_mips_put_one_ulreg(CPUState * cs,uint64_t reg_id,target_ulong * addr)305 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
306 target_ulong *addr)
307 {
308 uint64_t val64 = *addr;
309 struct kvm_one_reg cp0reg = {
310 .id = reg_id,
311 .addr = (uintptr_t)&val64
312 };
313
314 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
315 }
316
kvm_mips_put_one_reg64(CPUState * cs,uint64_t reg_id,int64_t * addr)317 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
318 int64_t *addr)
319 {
320 struct kvm_one_reg cp0reg = {
321 .id = reg_id,
322 .addr = (uintptr_t)addr
323 };
324
325 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
326 }
327
kvm_mips_put_one_ureg64(CPUState * cs,uint64_t reg_id,uint64_t * addr)328 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
329 uint64_t *addr)
330 {
331 struct kvm_one_reg cp0reg = {
332 .id = reg_id,
333 .addr = (uintptr_t)addr
334 };
335
336 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
337 }
338
kvm_mips_get_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr)339 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
340 int32_t *addr)
341 {
342 struct kvm_one_reg cp0reg = {
343 .id = reg_id,
344 .addr = (uintptr_t)addr
345 };
346
347 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
348 }
349
kvm_mips_get_one_ureg(CPUState * cs,uint64_t reg_id,uint32_t * addr)350 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
351 uint32_t *addr)
352 {
353 struct kvm_one_reg cp0reg = {
354 .id = reg_id,
355 .addr = (uintptr_t)addr
356 };
357
358 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
359 }
360
kvm_mips_get_one_ulreg(CPUState * cs,uint64_t reg_id,target_ulong * addr)361 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
362 target_ulong *addr)
363 {
364 int ret;
365 uint64_t val64 = 0;
366 struct kvm_one_reg cp0reg = {
367 .id = reg_id,
368 .addr = (uintptr_t)&val64
369 };
370
371 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
372 if (ret >= 0) {
373 *addr = val64;
374 }
375 return ret;
376 }
377
kvm_mips_get_one_reg64(CPUState * cs,uint64_t reg_id,int64_t * addr)378 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
379 int64_t *addr)
380 {
381 struct kvm_one_reg cp0reg = {
382 .id = reg_id,
383 .addr = (uintptr_t)addr
384 };
385
386 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
387 }
388
kvm_mips_get_one_ureg64(CPUState * cs,uint64_t reg_id,uint64_t * addr)389 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
390 uint64_t *addr)
391 {
392 struct kvm_one_reg cp0reg = {
393 .id = reg_id,
394 .addr = (uintptr_t)addr
395 };
396
397 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
398 }
399
400 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
401 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
402 (1U << CP0C1_FP))
403 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
404 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
405 (1U << CP0C3_MSAP))
406 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
407 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
408 (1U << CP0C5_UFE) | \
409 (1U << CP0C5_FRE) | \
410 (1U << CP0C5_UFR))
411 #define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
412 (0x3fU << CP0C6_KPOS) | \
413 (1U << CP0C6_KE) | \
414 (1U << CP0C6_VTLBONLY) | \
415 (1U << CP0C6_LASX) | \
416 (1U << CP0C6_SSEN) | \
417 (1U << CP0C6_DISDRTIME) | \
418 (1U << CP0C6_PIXNUEN) | \
419 (1U << CP0C6_SCRAND) | \
420 (1U << CP0C6_LLEXCEN) | \
421 (1U << CP0C6_DISVC) | \
422 (1U << CP0C6_VCLRU) | \
423 (1U << CP0C6_DCLRU) | \
424 (1U << CP0C6_PIXUEN) | \
425 (1U << CP0C6_DISBLKLYEN) | \
426 (1U << CP0C6_UMEMUALEN) | \
427 (1U << CP0C6_SFBEN) | \
428 (1U << CP0C6_FLTINT) | \
429 (1U << CP0C6_VLTINT) | \
430 (1U << CP0C6_DISBTB) | \
431 (3U << CP0C6_STPREFCTL) | \
432 (1U << CP0C6_INSTPREF) | \
433 (1U << CP0C6_DATAPREF))
434
kvm_mips_change_one_reg(CPUState * cs,uint64_t reg_id,int32_t * addr,int32_t mask)435 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
436 int32_t *addr, int32_t mask)
437 {
438 int err;
439 int32_t tmp, change;
440
441 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
442 if (err < 0) {
443 return err;
444 }
445
446 /* only change bits in mask */
447 change = (*addr ^ tmp) & mask;
448 if (!change) {
449 return 0;
450 }
451
452 tmp = tmp ^ change;
453 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
454 }
455
456 /*
457 * We freeze the KVM timer when either the VM clock is stopped or the state is
458 * saved (the state is dirty).
459 */
460
461 /*
462 * Save the state of the KVM timer when VM clock is stopped or state is synced
463 * to QEMU.
464 */
kvm_mips_save_count(CPUState * cs)465 static int kvm_mips_save_count(CPUState *cs)
466 {
467 CPUMIPSState *env = cpu_env(cs);
468 uint64_t count_ctl;
469 int err, ret = 0;
470
471 /* freeze KVM timer */
472 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
473 if (err < 0) {
474 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
475 ret = err;
476 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
477 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
478 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
479 if (err < 0) {
480 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
481 ret = err;
482 }
483 }
484
485 /* read CP0_Cause */
486 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
487 if (err < 0) {
488 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
489 ret = err;
490 }
491
492 /* read CP0_Count */
493 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
494 if (err < 0) {
495 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
496 ret = err;
497 }
498
499 return ret;
500 }
501
502 /*
503 * Restore the state of the KVM timer when VM clock is restarted or state is
504 * synced to KVM.
505 */
kvm_mips_restore_count(CPUState * cs)506 static int kvm_mips_restore_count(CPUState *cs)
507 {
508 CPUMIPSState *env = cpu_env(cs);
509 uint64_t count_ctl;
510 int err_dc, err, ret = 0;
511
512 /* check the timer is frozen */
513 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
514 if (err_dc < 0) {
515 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
516 ret = err_dc;
517 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
518 /* freeze timer (sets COUNT_RESUME for us) */
519 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
520 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
521 if (err < 0) {
522 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
523 ret = err;
524 }
525 }
526
527 /* load CP0_Cause */
528 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
529 if (err < 0) {
530 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
531 ret = err;
532 }
533
534 /* load CP0_Count */
535 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
536 if (err < 0) {
537 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
538 ret = err;
539 }
540
541 /* resume KVM timer */
542 if (err_dc >= 0) {
543 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
544 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
545 if (err < 0) {
546 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
547 ret = err;
548 }
549 }
550
551 return ret;
552 }
553
554 /*
555 * Handle the VM clock being started or stopped
556 */
kvm_mips_update_state(void * opaque,bool running,RunState state)557 static void kvm_mips_update_state(void *opaque, bool running, RunState state)
558 {
559 CPUState *cs = opaque;
560 int ret;
561 uint64_t count_resume;
562
563 /*
564 * If state is already dirty (synced to QEMU) then the KVM timer state is
565 * already saved and can be restored when it is synced back to KVM.
566 */
567 if (!running) {
568 if (!cs->vcpu_dirty) {
569 ret = kvm_mips_save_count(cs);
570 if (ret < 0) {
571 warn_report("Failed saving count");
572 }
573 }
574 } else {
575 /* Set clock restore time to now */
576 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
577 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
578 &count_resume);
579 if (ret < 0) {
580 warn_report("Failed setting COUNT_RESUME");
581 return;
582 }
583
584 if (!cs->vcpu_dirty) {
585 ret = kvm_mips_restore_count(cs);
586 if (ret < 0) {
587 warn_report("Failed restoring count");
588 }
589 }
590 }
591 }
592
kvm_mips_put_fpu_registers(CPUState * cs,int level)593 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
594 {
595 CPUMIPSState *env = cpu_env(cs);
596 int err, ret = 0;
597 unsigned int i;
598
599 /* Only put FPU state if we're emulating a CPU with an FPU */
600 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
601 /* FPU Control Registers */
602 if (level == KVM_PUT_FULL_STATE) {
603 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
604 &env->active_fpu.fcr0);
605 if (err < 0) {
606 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
607 ret = err;
608 }
609 }
610 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
611 &env->active_fpu.fcr31);
612 if (err < 0) {
613 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
614 ret = err;
615 }
616
617 /*
618 * FPU register state is a subset of MSA vector state, so don't put FPU
619 * registers if we're emulating a CPU with MSA.
620 */
621 if (!ase_msa_available(env)) {
622 /* Floating point registers */
623 for (i = 0; i < 32; ++i) {
624 if (env->CP0_Status & (1 << CP0St_FR)) {
625 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
626 &env->active_fpu.fpr[i].d);
627 } else {
628 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
629 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
630 }
631 if (err < 0) {
632 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
633 ret = err;
634 }
635 }
636 }
637 }
638
639 /* Only put MSA state if we're emulating a CPU with MSA */
640 if (ase_msa_available(env)) {
641 /* MSA Control Registers */
642 if (level == KVM_PUT_FULL_STATE) {
643 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
644 &env->msair);
645 if (err < 0) {
646 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
647 ret = err;
648 }
649 }
650 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
651 &env->active_tc.msacsr);
652 if (err < 0) {
653 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
654 ret = err;
655 }
656
657 /* Vector registers (includes FP registers) */
658 for (i = 0; i < 32; ++i) {
659 /* Big endian MSA not supported by QEMU yet anyway */
660 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
661 env->active_fpu.fpr[i].wr.d);
662 if (err < 0) {
663 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
664 ret = err;
665 }
666 }
667 }
668
669 return ret;
670 }
671
kvm_mips_get_fpu_registers(CPUState * cs)672 static int kvm_mips_get_fpu_registers(CPUState *cs)
673 {
674 CPUMIPSState *env = cpu_env(cs);
675 int err, ret = 0;
676 unsigned int i;
677
678 /* Only get FPU state if we're emulating a CPU with an FPU */
679 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
680 /* FPU Control Registers */
681 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
682 &env->active_fpu.fcr0);
683 if (err < 0) {
684 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
685 ret = err;
686 }
687 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
688 &env->active_fpu.fcr31);
689 if (err < 0) {
690 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
691 ret = err;
692 } else {
693 restore_fp_status(env);
694 }
695
696 /*
697 * FPU register state is a subset of MSA vector state, so don't save FPU
698 * registers if we're emulating a CPU with MSA.
699 */
700 if (!ase_msa_available(env)) {
701 /* Floating point registers */
702 for (i = 0; i < 32; ++i) {
703 if (env->CP0_Status & (1 << CP0St_FR)) {
704 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
705 &env->active_fpu.fpr[i].d);
706 } else {
707 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
708 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
709 }
710 if (err < 0) {
711 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
712 ret = err;
713 }
714 }
715 }
716 }
717
718 /* Only get MSA state if we're emulating a CPU with MSA */
719 if (ase_msa_available(env)) {
720 /* MSA Control Registers */
721 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
722 &env->msair);
723 if (err < 0) {
724 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
725 ret = err;
726 }
727 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
728 &env->active_tc.msacsr);
729 if (err < 0) {
730 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
731 ret = err;
732 } else {
733 restore_msa_fp_status(env);
734 }
735
736 /* Vector registers (includes FP registers) */
737 for (i = 0; i < 32; ++i) {
738 /* Big endian MSA not supported by QEMU yet anyway */
739 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
740 env->active_fpu.fpr[i].wr.d);
741 if (err < 0) {
742 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
743 ret = err;
744 }
745 }
746 }
747
748 return ret;
749 }
750
751
kvm_mips_put_cp0_registers(CPUState * cs,int level)752 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
753 {
754 CPUMIPSState *env = cpu_env(cs);
755 int err, ret = 0;
756
757 (void)level;
758
759 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
760 if (err < 0) {
761 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
762 ret = err;
763 }
764 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
765 if (err < 0) {
766 DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
767 ret = err;
768 }
769 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
770 &env->CP0_Context);
771 if (err < 0) {
772 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
773 ret = err;
774 }
775 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
776 &env->active_tc.CP0_UserLocal);
777 if (err < 0) {
778 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
779 ret = err;
780 }
781 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
782 &env->CP0_PageMask);
783 if (err < 0) {
784 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
785 ret = err;
786 }
787 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
788 &env->CP0_PageGrain);
789 if (err < 0) {
790 DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
791 ret = err;
792 }
793 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
794 &env->CP0_PWBase);
795 if (err < 0) {
796 DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
797 ret = err;
798 }
799 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
800 &env->CP0_PWField);
801 if (err < 0) {
802 DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
803 ret = err;
804 }
805 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
806 &env->CP0_PWSize);
807 if (err < 0) {
808 DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
809 ret = err;
810 }
811 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
812 if (err < 0) {
813 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
814 ret = err;
815 }
816 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
817 if (err < 0) {
818 DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
819 ret = err;
820 }
821 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
822 if (err < 0) {
823 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
824 ret = err;
825 }
826 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
827 &env->CP0_BadVAddr);
828 if (err < 0) {
829 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
830 ret = err;
831 }
832
833 /* If VM clock stopped then state will be restored when it is restarted */
834 if (runstate_is_running()) {
835 err = kvm_mips_restore_count(cs);
836 if (err < 0) {
837 ret = err;
838 }
839 }
840
841 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
842 &env->CP0_EntryHi);
843 if (err < 0) {
844 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
845 ret = err;
846 }
847 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
848 &env->CP0_Compare);
849 if (err < 0) {
850 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
851 ret = err;
852 }
853 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
854 if (err < 0) {
855 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
856 ret = err;
857 }
858 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
859 if (err < 0) {
860 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
861 ret = err;
862 }
863 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
864 if (err < 0) {
865 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
866 ret = err;
867 }
868 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
869 if (err < 0) {
870 DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
871 ret = err;
872 }
873 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
874 &env->CP0_Config0,
875 KVM_REG_MIPS_CP0_CONFIG_MASK);
876 if (err < 0) {
877 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
878 ret = err;
879 }
880 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
881 &env->CP0_Config1,
882 KVM_REG_MIPS_CP0_CONFIG1_MASK);
883 if (err < 0) {
884 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
885 ret = err;
886 }
887 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
888 &env->CP0_Config2,
889 KVM_REG_MIPS_CP0_CONFIG2_MASK);
890 if (err < 0) {
891 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
892 ret = err;
893 }
894 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
895 &env->CP0_Config3,
896 KVM_REG_MIPS_CP0_CONFIG3_MASK);
897 if (err < 0) {
898 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
899 ret = err;
900 }
901 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
902 &env->CP0_Config4,
903 KVM_REG_MIPS_CP0_CONFIG4_MASK);
904 if (err < 0) {
905 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
906 ret = err;
907 }
908 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
909 &env->CP0_Config5,
910 KVM_REG_MIPS_CP0_CONFIG5_MASK);
911 if (err < 0) {
912 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
913 ret = err;
914 }
915 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
916 &env->CP0_Config6,
917 KVM_REG_MIPS_CP0_CONFIG6_MASK);
918 if (err < 0) {
919 DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
920 ret = err;
921 }
922 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
923 &env->CP0_XContext);
924 if (err < 0) {
925 DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
926 ret = err;
927 }
928 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
929 &env->CP0_ErrorEPC);
930 if (err < 0) {
931 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
932 ret = err;
933 }
934 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
935 &env->CP0_KScratch[0]);
936 if (err < 0) {
937 DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
938 ret = err;
939 }
940 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
941 &env->CP0_KScratch[1]);
942 if (err < 0) {
943 DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
944 ret = err;
945 }
946 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
947 &env->CP0_KScratch[2]);
948 if (err < 0) {
949 DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
950 ret = err;
951 }
952 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
953 &env->CP0_KScratch[3]);
954 if (err < 0) {
955 DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
956 ret = err;
957 }
958 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
959 &env->CP0_KScratch[4]);
960 if (err < 0) {
961 DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
962 ret = err;
963 }
964 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
965 &env->CP0_KScratch[5]);
966 if (err < 0) {
967 DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
968 ret = err;
969 }
970
971 return ret;
972 }
973
kvm_mips_get_cp0_registers(CPUState * cs)974 static int kvm_mips_get_cp0_registers(CPUState *cs)
975 {
976 CPUMIPSState *env = cpu_env(cs);
977 int err, ret = 0;
978
979 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
980 if (err < 0) {
981 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
982 ret = err;
983 }
984 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
985 if (err < 0) {
986 DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
987 ret = err;
988 }
989 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
990 &env->CP0_Context);
991 if (err < 0) {
992 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
993 ret = err;
994 }
995 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
996 &env->active_tc.CP0_UserLocal);
997 if (err < 0) {
998 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
999 ret = err;
1000 }
1001 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
1002 &env->CP0_PageMask);
1003 if (err < 0) {
1004 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
1005 ret = err;
1006 }
1007 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
1008 &env->CP0_PageGrain);
1009 if (err < 0) {
1010 DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
1011 ret = err;
1012 }
1013 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
1014 &env->CP0_PWBase);
1015 if (err < 0) {
1016 DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
1017 ret = err;
1018 }
1019 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
1020 &env->CP0_PWField);
1021 if (err < 0) {
1022 DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
1023 ret = err;
1024 }
1025 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
1026 &env->CP0_PWSize);
1027 if (err < 0) {
1028 DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
1029 ret = err;
1030 }
1031 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
1032 if (err < 0) {
1033 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
1034 ret = err;
1035 }
1036 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
1037 if (err < 0) {
1038 DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
1039 ret = err;
1040 }
1041 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
1042 if (err < 0) {
1043 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
1044 ret = err;
1045 }
1046 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
1047 &env->CP0_BadVAddr);
1048 if (err < 0) {
1049 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
1050 ret = err;
1051 }
1052 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
1053 &env->CP0_EntryHi);
1054 if (err < 0) {
1055 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
1056 ret = err;
1057 }
1058 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
1059 &env->CP0_Compare);
1060 if (err < 0) {
1061 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
1062 ret = err;
1063 }
1064 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
1065 if (err < 0) {
1066 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
1067 ret = err;
1068 }
1069
1070 /* If VM clock stopped then state was already saved when it was stopped */
1071 if (runstate_is_running()) {
1072 err = kvm_mips_save_count(cs);
1073 if (err < 0) {
1074 ret = err;
1075 }
1076 }
1077
1078 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
1079 if (err < 0) {
1080 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
1081 ret = err;
1082 }
1083 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
1084 if (err < 0) {
1085 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
1086 ret = err;
1087 }
1088 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
1089 if (err < 0) {
1090 DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
1091 ret = err;
1092 }
1093 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
1094 if (err < 0) {
1095 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
1096 ret = err;
1097 }
1098 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
1099 if (err < 0) {
1100 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
1101 ret = err;
1102 }
1103 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
1104 if (err < 0) {
1105 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
1106 ret = err;
1107 }
1108 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
1109 if (err < 0) {
1110 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
1111 ret = err;
1112 }
1113 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
1114 if (err < 0) {
1115 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
1116 ret = err;
1117 }
1118 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
1119 if (err < 0) {
1120 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
1121 ret = err;
1122 }
1123 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
1124 if (err < 0) {
1125 DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
1126 ret = err;
1127 }
1128 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
1129 &env->CP0_XContext);
1130 if (err < 0) {
1131 DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
1132 ret = err;
1133 }
1134 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
1135 &env->CP0_ErrorEPC);
1136 if (err < 0) {
1137 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
1138 ret = err;
1139 }
1140 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
1141 &env->CP0_KScratch[0]);
1142 if (err < 0) {
1143 DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
1144 ret = err;
1145 }
1146 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
1147 &env->CP0_KScratch[1]);
1148 if (err < 0) {
1149 DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
1150 ret = err;
1151 }
1152 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
1153 &env->CP0_KScratch[2]);
1154 if (err < 0) {
1155 DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
1156 ret = err;
1157 }
1158 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
1159 &env->CP0_KScratch[3]);
1160 if (err < 0) {
1161 DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
1162 ret = err;
1163 }
1164 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
1165 &env->CP0_KScratch[4]);
1166 if (err < 0) {
1167 DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
1168 ret = err;
1169 }
1170 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
1171 &env->CP0_KScratch[5]);
1172 if (err < 0) {
1173 DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
1174 ret = err;
1175 }
1176
1177 return ret;
1178 }
1179
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)1180 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
1181 {
1182 CPUMIPSState *env = cpu_env(cs);
1183 struct kvm_regs regs;
1184 int ret;
1185 int i;
1186
1187 /* Set the registers based on QEMU's view of things */
1188 for (i = 0; i < 32; i++) {
1189 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
1190 }
1191
1192 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
1193 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
1194 regs.pc = (int64_t)(target_long)env->active_tc.PC;
1195
1196 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
1197
1198 if (ret < 0) {
1199 return ret;
1200 }
1201
1202 ret = kvm_mips_put_cp0_registers(cs, level);
1203 if (ret < 0) {
1204 return ret;
1205 }
1206
1207 ret = kvm_mips_put_fpu_registers(cs, level);
1208 if (ret < 0) {
1209 return ret;
1210 }
1211
1212 return ret;
1213 }
1214
kvm_arch_get_registers(CPUState * cs,Error ** errp)1215 int kvm_arch_get_registers(CPUState *cs, Error **errp)
1216 {
1217 CPUMIPSState *env = cpu_env(cs);
1218 int ret = 0;
1219 struct kvm_regs regs;
1220 int i;
1221
1222 /* Get the current register set as KVM seems it */
1223 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1224
1225 if (ret < 0) {
1226 return ret;
1227 }
1228
1229 for (i = 0; i < 32; i++) {
1230 env->active_tc.gpr[i] = regs.gpr[i];
1231 }
1232
1233 env->active_tc.HI[0] = regs.hi;
1234 env->active_tc.LO[0] = regs.lo;
1235 env->active_tc.PC = regs.pc;
1236
1237 kvm_mips_get_cp0_registers(cs);
1238 kvm_mips_get_fpu_registers(cs);
1239
1240 return ret;
1241 }
1242
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1243 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1244 uint64_t address, uint32_t data, PCIDevice *dev)
1245 {
1246 return 0;
1247 }
1248
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1249 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1250 int vector, PCIDevice *dev)
1251 {
1252 return 0;
1253 }
1254
kvm_arch_release_virq_post(int virq)1255 int kvm_arch_release_virq_post(int virq)
1256 {
1257 return 0;
1258 }
1259
kvm_arch_msi_data_to_gsi(uint32_t data)1260 int kvm_arch_msi_data_to_gsi(uint32_t data)
1261 {
1262 abort();
1263 }
1264
kvm_arch_get_default_type(MachineState * machine)1265 int kvm_arch_get_default_type(MachineState *machine)
1266 {
1267 #if defined(KVM_CAP_MIPS_VZ)
1268 int r;
1269 KVMState *s = KVM_STATE(machine->accelerator);
1270
1271 r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
1272 if (r > 0) {
1273 return KVM_VM_MIPS_VZ;
1274 }
1275 #endif
1276
1277 error_report("KVM_VM_MIPS_VZ type is not available");
1278 return -1;
1279 }
1280
kvm_arch_accel_class_init(ObjectClass * oc)1281 void kvm_arch_accel_class_init(ObjectClass *oc)
1282 {
1283 }
1284