xref: /qemu/target/loongarch/kvm/kvm.c (revision 98721058d6d50ef218e0c26e4f67c8ef96965859)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * QEMU LoongArch KVM
4  *
5  * Copyright (c) 2023 Loongson Technology Corporation Limited
6  */
7 
8 #include "qemu/osdep.h"
9 #include <sys/ioctl.h>
10 #include <linux/kvm.h>
11 #include "asm-loongarch/kvm_para.h"
12 #include "qapi/error.h"
13 #include "qemu/timer.h"
14 #include "qemu/error-report.h"
15 #include "qemu/main-loop.h"
16 #include "system/system.h"
17 #include "system/kvm.h"
18 #include "system/kvm_int.h"
19 #include "hw/pci/pci.h"
20 #include "exec/memattrs.h"
21 #include "system/address-spaces.h"
22 #include "hw/boards.h"
23 #include "hw/irq.h"
24 #include "hw/loongarch/virt.h"
25 #include "qemu/log.h"
26 #include "hw/loader.h"
27 #include "system/runstate.h"
28 #include "cpu-csr.h"
29 #include "kvm_loongarch.h"
30 #include "trace.h"
31 
32 static bool cap_has_mp_state;
33 static unsigned int brk_insn;
34 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
35     KVM_CAP_LAST_INFO
36 };
37 
kvm_get_stealtime(CPUState * cs)38 static int kvm_get_stealtime(CPUState *cs)
39 {
40     CPULoongArchState *env = cpu_env(cs);
41     int err;
42     struct kvm_device_attr attr = {
43         .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
44         .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
45         .addr = (uint64_t)&env->stealtime.guest_addr,
46     };
47 
48     err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
49     if (err) {
50         return 0;
51     }
52 
53     err = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, attr);
54     if (err) {
55         error_report("PVTIME: KVM_GET_DEVICE_ATTR: %s", strerror(errno));
56         return err;
57     }
58 
59     return 0;
60 }
61 
kvm_set_stealtime(CPUState * cs)62 static int kvm_set_stealtime(CPUState *cs)
63 {
64     CPULoongArchState *env = cpu_env(cs);
65     int err;
66     struct kvm_device_attr attr = {
67         .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL,
68         .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA,
69         .addr = (uint64_t)&env->stealtime.guest_addr,
70     };
71 
72     err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
73     if (err) {
74         return 0;
75     }
76 
77     err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
78     if (err) {
79         error_report("PVTIME: KVM_SET_DEVICE_ATTR %s with gpa "TARGET_FMT_lx,
80                       strerror(errno), env->stealtime.guest_addr);
81         return err;
82     }
83 
84     return 0;
85 }
86 
kvm_set_pv_features(CPUState * cs)87 static int kvm_set_pv_features(CPUState *cs)
88 {
89     CPULoongArchState *env = cpu_env(cs);
90     int err;
91     uint64_t val;
92     struct kvm_device_attr attr = {
93         .group = KVM_LOONGARCH_VCPU_CPUCFG,
94         .attr = CPUCFG_KVM_FEATURE,
95         .addr = (uint64_t)&val,
96     };
97 
98     err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
99     if (err) {
100         return 0;
101     }
102 
103     val = env->pv_features;
104     err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
105     if (err) {
106         error_report("Fail to set pv feature "TARGET_FMT_lx " with error %s",
107                       val, strerror(errno));
108         return err;
109     }
110 
111     return 0;
112 }
113 
kvm_loongarch_get_regs_core(CPUState * cs)114 static int kvm_loongarch_get_regs_core(CPUState *cs)
115 {
116     int ret = 0;
117     int i;
118     struct kvm_regs regs;
119     CPULoongArchState *env = cpu_env(cs);
120 
121     /* Get the current register set as KVM seems it */
122     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
123     if (ret < 0) {
124         trace_kvm_failed_get_regs_core(strerror(errno));
125         return ret;
126     }
127     /* gpr[0] value is always 0 */
128     env->gpr[0] = 0;
129     for (i = 1; i < 32; i++) {
130         env->gpr[i] = regs.gpr[i];
131     }
132 
133     env->pc = regs.pc;
134     return ret;
135 }
136 
kvm_loongarch_put_regs_core(CPUState * cs)137 static int kvm_loongarch_put_regs_core(CPUState *cs)
138 {
139     int ret = 0;
140     int i;
141     struct kvm_regs regs;
142     CPULoongArchState *env = cpu_env(cs);
143 
144     /* Set the registers based on QEMU's view of things */
145     for (i = 0; i < 32; i++) {
146         regs.gpr[i] = env->gpr[i];
147     }
148 
149     regs.pc = env->pc;
150     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
151     if (ret < 0) {
152         trace_kvm_failed_put_regs_core(strerror(errno));
153     }
154 
155     return ret;
156 }
157 
kvm_loongarch_get_csr(CPUState * cs)158 static int kvm_loongarch_get_csr(CPUState *cs)
159 {
160     int ret = 0;
161     CPULoongArchState *env = cpu_env(cs);
162 
163     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
164                            &env->CSR_CRMD);
165 
166     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
167                            &env->CSR_PRMD);
168 
169     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
170                            &env->CSR_EUEN);
171 
172     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
173                            &env->CSR_MISC);
174 
175     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
176                            &env->CSR_ECFG);
177 
178     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
179                            &env->CSR_ESTAT);
180 
181     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
182                            &env->CSR_ERA);
183 
184     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
185                            &env->CSR_BADV);
186 
187     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
188                            &env->CSR_BADI);
189 
190     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
191                            &env->CSR_EENTRY);
192 
193     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
194                            &env->CSR_TLBIDX);
195 
196     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
197                            &env->CSR_TLBEHI);
198 
199     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
200                            &env->CSR_TLBELO0);
201 
202     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
203                            &env->CSR_TLBELO1);
204 
205     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
206                            &env->CSR_ASID);
207 
208     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
209                            &env->CSR_PGDL);
210 
211     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
212                            &env->CSR_PGDH);
213 
214     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
215                            &env->CSR_PGD);
216 
217     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
218                            &env->CSR_PWCL);
219 
220     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
221                            &env->CSR_PWCH);
222 
223     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
224                            &env->CSR_STLBPS);
225 
226     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
227                            &env->CSR_RVACFG);
228 
229     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
230                            &env->CSR_CPUID);
231 
232     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
233                            &env->CSR_PRCFG1);
234 
235     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
236                            &env->CSR_PRCFG2);
237 
238     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
239                            &env->CSR_PRCFG3);
240 
241     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
242                            &env->CSR_SAVE[0]);
243 
244     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
245                            &env->CSR_SAVE[1]);
246 
247     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
248                            &env->CSR_SAVE[2]);
249 
250     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
251                            &env->CSR_SAVE[3]);
252 
253     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
254                            &env->CSR_SAVE[4]);
255 
256     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
257                            &env->CSR_SAVE[5]);
258 
259     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
260                            &env->CSR_SAVE[6]);
261 
262     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
263                            &env->CSR_SAVE[7]);
264 
265     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
266                            &env->CSR_TID);
267 
268     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
269                            &env->CSR_CNTC);
270 
271     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
272                            &env->CSR_TICLR);
273 
274     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
275                            &env->CSR_LLBCTL);
276 
277     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
278                            &env->CSR_IMPCTL1);
279 
280     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
281                            &env->CSR_IMPCTL2);
282 
283     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
284                            &env->CSR_TLBRENTRY);
285 
286     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
287                            &env->CSR_TLBRBADV);
288 
289     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
290                            &env->CSR_TLBRERA);
291 
292     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
293                            &env->CSR_TLBRSAVE);
294 
295     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
296                            &env->CSR_TLBRELO0);
297 
298     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
299                            &env->CSR_TLBRELO1);
300 
301     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
302                            &env->CSR_TLBREHI);
303 
304     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
305                            &env->CSR_TLBRPRMD);
306 
307     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
308                            &env->CSR_DMW[0]);
309 
310     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
311                            &env->CSR_DMW[1]);
312 
313     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
314                            &env->CSR_DMW[2]);
315 
316     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
317                            &env->CSR_DMW[3]);
318 
319     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
320                            &env->CSR_TVAL);
321 
322     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
323                            &env->CSR_TCFG);
324 
325     return ret;
326 }
327 
kvm_loongarch_put_csr(CPUState * cs,int level)328 static int kvm_loongarch_put_csr(CPUState *cs, int level)
329 {
330     int ret = 0;
331     CPULoongArchState *env = cpu_env(cs);
332 
333     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
334                            &env->CSR_CRMD);
335 
336     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
337                            &env->CSR_PRMD);
338 
339     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
340                            &env->CSR_EUEN);
341 
342     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
343                            &env->CSR_MISC);
344 
345     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
346                            &env->CSR_ECFG);
347 
348     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
349                            &env->CSR_ESTAT);
350 
351     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
352                            &env->CSR_ERA);
353 
354     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
355                            &env->CSR_BADV);
356 
357     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
358                            &env->CSR_BADI);
359 
360     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
361                            &env->CSR_EENTRY);
362 
363     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
364                            &env->CSR_TLBIDX);
365 
366     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
367                            &env->CSR_TLBEHI);
368 
369     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
370                            &env->CSR_TLBELO0);
371 
372     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
373                            &env->CSR_TLBELO1);
374 
375     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
376                            &env->CSR_ASID);
377 
378     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
379                            &env->CSR_PGDL);
380 
381     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
382                            &env->CSR_PGDH);
383 
384     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
385                            &env->CSR_PGD);
386 
387     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
388                            &env->CSR_PWCL);
389 
390     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
391                            &env->CSR_PWCH);
392 
393     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
394                            &env->CSR_STLBPS);
395 
396     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
397                            &env->CSR_RVACFG);
398 
399     /* CPUID is constant after poweron, it should be set only once */
400     if (level >= KVM_PUT_FULL_STATE) {
401         ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
402                            &env->CSR_CPUID);
403     }
404 
405     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
406                            &env->CSR_PRCFG1);
407 
408     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
409                            &env->CSR_PRCFG2);
410 
411     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
412                            &env->CSR_PRCFG3);
413 
414     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
415                            &env->CSR_SAVE[0]);
416 
417     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
418                            &env->CSR_SAVE[1]);
419 
420     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
421                            &env->CSR_SAVE[2]);
422 
423     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
424                            &env->CSR_SAVE[3]);
425 
426     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
427                            &env->CSR_SAVE[4]);
428 
429     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
430                            &env->CSR_SAVE[5]);
431 
432     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
433                            &env->CSR_SAVE[6]);
434 
435     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
436                            &env->CSR_SAVE[7]);
437 
438     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
439                            &env->CSR_TID);
440 
441     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
442                            &env->CSR_CNTC);
443 
444     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
445                            &env->CSR_TICLR);
446 
447     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
448                            &env->CSR_LLBCTL);
449 
450     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
451                            &env->CSR_IMPCTL1);
452 
453     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
454                            &env->CSR_IMPCTL2);
455 
456     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
457                            &env->CSR_TLBRENTRY);
458 
459     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
460                            &env->CSR_TLBRBADV);
461 
462     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
463                            &env->CSR_TLBRERA);
464 
465     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
466                            &env->CSR_TLBRSAVE);
467 
468     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
469                            &env->CSR_TLBRELO0);
470 
471     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
472                            &env->CSR_TLBRELO1);
473 
474     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
475                            &env->CSR_TLBREHI);
476 
477     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
478                            &env->CSR_TLBRPRMD);
479 
480     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
481                            &env->CSR_DMW[0]);
482 
483     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
484                            &env->CSR_DMW[1]);
485 
486     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
487                            &env->CSR_DMW[2]);
488 
489     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
490                            &env->CSR_DMW[3]);
491     /*
492      * timer cfg must be put at last since it is used to enable
493      * guest timer
494      */
495     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
496                            &env->CSR_TVAL);
497 
498     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
499                            &env->CSR_TCFG);
500     return ret;
501 }
502 
kvm_loongarch_get_regs_fp(CPUState * cs)503 static int kvm_loongarch_get_regs_fp(CPUState *cs)
504 {
505     int ret, i;
506     struct kvm_fpu fpu;
507     CPULoongArchState *env = cpu_env(cs);
508 
509     ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
510     if (ret < 0) {
511         trace_kvm_failed_get_fpu(strerror(errno));
512         return ret;
513     }
514 
515     env->fcsr0 = fpu.fcsr;
516     for (i = 0; i < 32; i++) {
517         env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
518         env->fpr[i].vreg.UD[1] = fpu.fpr[i].val64[1];
519         env->fpr[i].vreg.UD[2] = fpu.fpr[i].val64[2];
520         env->fpr[i].vreg.UD[3] = fpu.fpr[i].val64[3];
521     }
522     for (i = 0; i < 8; i++) {
523         env->cf[i] = fpu.fcc & 0xFF;
524         fpu.fcc = fpu.fcc >> 8;
525     }
526 
527     return ret;
528 }
529 
kvm_loongarch_put_regs_fp(CPUState * cs)530 static int kvm_loongarch_put_regs_fp(CPUState *cs)
531 {
532     int ret, i;
533     struct kvm_fpu fpu;
534     CPULoongArchState *env = cpu_env(cs);
535 
536     fpu.fcsr = env->fcsr0;
537     fpu.fcc = 0;
538     for (i = 0; i < 32; i++) {
539         fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
540         fpu.fpr[i].val64[1] = env->fpr[i].vreg.UD[1];
541         fpu.fpr[i].val64[2] = env->fpr[i].vreg.UD[2];
542         fpu.fpr[i].val64[3] = env->fpr[i].vreg.UD[3];
543     }
544 
545     for (i = 0; i < 8; i++) {
546         fpu.fcc |= env->cf[i] << (8 * i);
547     }
548 
549     ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
550     if (ret < 0) {
551         trace_kvm_failed_put_fpu(strerror(errno));
552     }
553 
554     return ret;
555 }
556 
kvm_loongarch_put_lbt(CPUState * cs)557 static int kvm_loongarch_put_lbt(CPUState *cs)
558 {
559     CPULoongArchState *env = cpu_env(cs);
560     uint64_t val;
561     int ret;
562 
563     /* check whether vm support LBT firstly */
564     if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
565         return 0;
566     }
567 
568     /* set six LBT registers including scr0-scr3, eflags, ftop */
569     ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
570     ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
571     ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
572     ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
573     /*
574      * Be cautious, KVM_REG_LOONGARCH_LBT_FTOP is defined as 64-bit however
575      * lbt.ftop is 32-bit; the same with KVM_REG_LOONGARCH_LBT_EFLAGS register
576      */
577     val = env->lbt.eflags;
578     ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
579     val = env->lbt.ftop;
580     ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
581 
582     return ret;
583 }
584 
kvm_loongarch_get_lbt(CPUState * cs)585 static int kvm_loongarch_get_lbt(CPUState *cs)
586 {
587     CPULoongArchState *env = cpu_env(cs);
588     uint64_t val;
589     int ret;
590 
591     /* check whether vm support LBT firstly */
592     if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) {
593         return 0;
594     }
595 
596     /* get six LBT registers including scr0-scr3, eflags, ftop */
597     ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0);
598     ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1);
599     ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2);
600     ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3);
601     ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val);
602     env->lbt.eflags = (uint32_t)val;
603     ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val);
604     env->lbt.ftop = (uint32_t)val;
605 
606     return ret;
607 }
608 
kvm_arch_reset_vcpu(CPUState * cs)609 void kvm_arch_reset_vcpu(CPUState *cs)
610 {
611     CPULoongArchState *env = cpu_env(cs);
612     int ret = 0;
613     uint64_t unused = 0;
614 
615     env->mp_state = KVM_MP_STATE_RUNNABLE;
616     ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, &unused);
617     if (ret) {
618         error_report("Failed to set KVM_REG_LOONGARCH_VCPU_RESET: %s",
619                      strerror(errno));
620         exit(EXIT_FAILURE);
621     }
622 }
623 
kvm_loongarch_get_mpstate(CPUState * cs)624 static int kvm_loongarch_get_mpstate(CPUState *cs)
625 {
626     int ret = 0;
627     struct kvm_mp_state mp_state;
628     CPULoongArchState *env = cpu_env(cs);
629 
630     if (cap_has_mp_state) {
631         ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
632         if (ret) {
633             trace_kvm_failed_get_mpstate(strerror(errno));
634             return ret;
635         }
636         env->mp_state = mp_state.mp_state;
637     }
638 
639     return ret;
640 }
641 
kvm_loongarch_put_mpstate(CPUState * cs)642 static int kvm_loongarch_put_mpstate(CPUState *cs)
643 {
644     int ret = 0;
645     struct kvm_mp_state mp_state = {
646         .mp_state = cpu_env(cs)->mp_state
647     };
648 
649     if (cap_has_mp_state) {
650         ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state);
651         if (ret) {
652             trace_kvm_failed_put_mpstate(strerror(errno));
653         }
654     }
655 
656     return ret;
657 }
658 
kvm_loongarch_get_cpucfg(CPUState * cs)659 static int kvm_loongarch_get_cpucfg(CPUState *cs)
660 {
661     int i, ret = 0;
662     uint64_t val;
663     CPULoongArchState *env = cpu_env(cs);
664 
665     for (i = 0; i < 21; i++) {
666         ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
667         if (ret < 0) {
668             trace_kvm_failed_get_cpucfg(strerror(errno));
669         }
670         env->cpucfg[i] = (uint32_t)val;
671     }
672     return ret;
673 }
674 
kvm_check_cpucfg2(CPUState * cs)675 static int kvm_check_cpucfg2(CPUState *cs)
676 {
677     int ret;
678     uint64_t val;
679     struct kvm_device_attr attr = {
680         .group = KVM_LOONGARCH_VCPU_CPUCFG,
681         .attr = 2,
682         .addr = (uint64_t)&val,
683     };
684     CPULoongArchState *env = cpu_env(cs);
685 
686     ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
687 
688     if (!ret) {
689         kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
690         env->cpucfg[2] &= val;
691 
692         if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) {
693             /* The FP minimal version is 1. */
694             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1);
695         }
696 
697         if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) {
698             /* The LLFTP minimal version is 1. */
699             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1);
700         }
701     }
702 
703     return ret;
704 }
705 
kvm_loongarch_put_cpucfg(CPUState * cs)706 static int kvm_loongarch_put_cpucfg(CPUState *cs)
707 {
708     int i, ret = 0;
709     CPULoongArchState *env = cpu_env(cs);
710     uint64_t val;
711 
712     for (i = 0; i < 21; i++) {
713 	if (i == 2) {
714             ret = kvm_check_cpucfg2(cs);
715             if (ret) {
716                 return ret;
717             }
718 	}
719         val = env->cpucfg[i];
720         ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
721         if (ret < 0) {
722             trace_kvm_failed_put_cpucfg(strerror(errno));
723         }
724     }
725     return ret;
726 }
727 
kvm_arch_get_registers(CPUState * cs,Error ** errp)728 int kvm_arch_get_registers(CPUState *cs, Error **errp)
729 {
730     int ret;
731 
732     ret = kvm_loongarch_get_regs_core(cs);
733     if (ret) {
734         return ret;
735     }
736 
737     ret = kvm_loongarch_get_cpucfg(cs);
738     if (ret) {
739         return ret;
740     }
741 
742     ret = kvm_loongarch_get_csr(cs);
743     if (ret) {
744         return ret;
745     }
746 
747     ret = kvm_loongarch_get_regs_fp(cs);
748     if (ret) {
749         return ret;
750     }
751 
752     ret = kvm_loongarch_get_lbt(cs);
753     if (ret) {
754         return ret;
755     }
756 
757     ret = kvm_get_stealtime(cs);
758     if (ret) {
759         return ret;
760     }
761 
762     ret = kvm_loongarch_get_mpstate(cs);
763     return ret;
764 }
765 
kvm_arch_put_registers(CPUState * cs,int level,Error ** errp)766 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
767 {
768     int ret;
769     static int once;
770 
771     ret = kvm_loongarch_put_regs_core(cs);
772     if (ret) {
773         return ret;
774     }
775 
776     ret = kvm_loongarch_put_cpucfg(cs);
777     if (ret) {
778         return ret;
779     }
780 
781     ret = kvm_loongarch_put_csr(cs, level);
782     if (ret) {
783         return ret;
784     }
785 
786     ret = kvm_loongarch_put_regs_fp(cs);
787     if (ret) {
788         return ret;
789     }
790 
791     ret = kvm_loongarch_put_lbt(cs);
792     if (ret) {
793         return ret;
794     }
795 
796     if (!once) {
797         ret = kvm_set_pv_features(cs);
798         if (ret) {
799             return ret;
800         }
801         once = 1;
802     }
803 
804     if (level >= KVM_PUT_FULL_STATE) {
805         /*
806          * only KVM_PUT_FULL_STATE is required, kvm kernel will clear
807          * guest_addr for KVM_PUT_RESET_STATE
808          */
809         ret = kvm_set_stealtime(cs);
810         if (ret) {
811             return ret;
812         }
813     }
814 
815     ret = kvm_loongarch_put_mpstate(cs);
816     return ret;
817 }
818 
kvm_loongarch_vm_stage_change(void * opaque,bool running,RunState state)819 static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
820                                           RunState state)
821 {
822     int ret;
823     CPUState *cs = opaque;
824     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
825 
826     if (running) {
827         ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
828                               &cpu->kvm_state_counter);
829         if (ret < 0) {
830             trace_kvm_failed_put_counter(strerror(errno));
831         }
832     } else {
833         ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
834                               &cpu->kvm_state_counter);
835         if (ret < 0) {
836             trace_kvm_failed_get_counter(strerror(errno));
837         }
838     }
839 }
840 
kvm_feature_supported(CPUState * cs,enum loongarch_features feature)841 static bool kvm_feature_supported(CPUState *cs, enum loongarch_features feature)
842 {
843     int ret;
844     struct kvm_device_attr attr;
845     uint64_t val;
846 
847     switch (feature) {
848     case LOONGARCH_FEATURE_LSX:
849         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
850         attr.attr = KVM_LOONGARCH_VM_FEAT_LSX;
851         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
852         if (ret == 0) {
853             return true;
854         }
855 
856         /* Fallback to old kernel detect interface */
857         val = 0;
858         attr.group = KVM_LOONGARCH_VCPU_CPUCFG;
859         /* Cpucfg2 */
860         attr.attr  = 2;
861         attr.addr = (uint64_t)&val;
862         ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
863         if (!ret) {
864             ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
865             if (ret) {
866                 return false;
867             }
868 
869             ret = FIELD_EX32((uint32_t)val, CPUCFG2, LSX);
870             return (ret != 0);
871         }
872         return false;
873 
874     case LOONGARCH_FEATURE_LASX:
875         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
876         attr.attr = KVM_LOONGARCH_VM_FEAT_LASX;
877         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
878         if (ret == 0) {
879             return true;
880         }
881 
882         /* Fallback to old kernel detect interface */
883         val = 0;
884         attr.group = KVM_LOONGARCH_VCPU_CPUCFG;
885         /* Cpucfg2 */
886         attr.attr  = 2;
887         attr.addr = (uint64_t)&val;
888         ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
889         if (!ret) {
890             ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
891             if (ret) {
892                 return false;
893             }
894 
895             ret = FIELD_EX32((uint32_t)val, CPUCFG2, LASX);
896             return (ret != 0);
897         }
898         return false;
899 
900     case LOONGARCH_FEATURE_LBT:
901         /*
902          * Return all if all the LBT features are supported such as:
903          *  KVM_LOONGARCH_VM_FEAT_X86BT
904          *  KVM_LOONGARCH_VM_FEAT_ARMBT
905          *  KVM_LOONGARCH_VM_FEAT_MIPSBT
906          */
907         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
908         attr.attr = KVM_LOONGARCH_VM_FEAT_X86BT;
909         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
910         attr.attr = KVM_LOONGARCH_VM_FEAT_ARMBT;
911         ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
912         attr.attr = KVM_LOONGARCH_VM_FEAT_MIPSBT;
913         ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
914         return (ret == 0);
915 
916     case LOONGARCH_FEATURE_PMU:
917         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
918         attr.attr = KVM_LOONGARCH_VM_FEAT_PMU;
919         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
920         return (ret == 0);
921 
922     case LOONGARCH_FEATURE_PV_IPI:
923         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
924         attr.attr = KVM_LOONGARCH_VM_FEAT_PV_IPI;
925         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
926         return (ret == 0);
927 
928     case LOONGARCH_FEATURE_STEALTIME:
929         attr.group = KVM_LOONGARCH_VM_FEAT_CTRL;
930         attr.attr = KVM_LOONGARCH_VM_FEAT_PV_STEALTIME;
931         ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr);
932         return (ret == 0);
933 
934     default:
935         return false;
936     }
937 
938     return false;
939 }
940 
kvm_cpu_check_lsx(CPUState * cs,Error ** errp)941 static int kvm_cpu_check_lsx(CPUState *cs, Error **errp)
942 {
943     CPULoongArchState *env = cpu_env(cs);
944     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
945     bool kvm_supported;
946 
947     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LSX);
948     env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 0);
949     if (cpu->lsx == ON_OFF_AUTO_ON) {
950         if (kvm_supported) {
951             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1);
952         } else {
953             error_setg(errp, "'lsx' feature not supported by KVM on this host");
954             return -ENOTSUP;
955         }
956     } else if ((cpu->lsx == ON_OFF_AUTO_AUTO) && kvm_supported) {
957         env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1);
958     }
959 
960     return 0;
961 }
962 
kvm_cpu_check_lasx(CPUState * cs,Error ** errp)963 static int kvm_cpu_check_lasx(CPUState *cs, Error **errp)
964 {
965     CPULoongArchState *env = cpu_env(cs);
966     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
967     bool kvm_supported;
968 
969     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LASX);
970     env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 0);
971     if (cpu->lasx == ON_OFF_AUTO_ON) {
972         if (kvm_supported) {
973             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1);
974         } else {
975             error_setg(errp, "'lasx' feature not supported by KVM on host");
976             return -ENOTSUP;
977         }
978     } else if ((cpu->lasx == ON_OFF_AUTO_AUTO) && kvm_supported) {
979         env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1);
980     }
981 
982     return 0;
983 }
984 
kvm_cpu_check_lbt(CPUState * cs,Error ** errp)985 static int kvm_cpu_check_lbt(CPUState *cs, Error **errp)
986 {
987     CPULoongArchState *env = cpu_env(cs);
988     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
989     bool kvm_supported;
990 
991     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LBT);
992     if (cpu->lbt == ON_OFF_AUTO_ON) {
993         if (kvm_supported) {
994             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
995         } else {
996             error_setg(errp, "'lbt' feature not supported by KVM on this host");
997             return -ENOTSUP;
998         }
999     } else if ((cpu->lbt == ON_OFF_AUTO_AUTO) && kvm_supported) {
1000         env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7);
1001     }
1002 
1003     return 0;
1004 }
1005 
kvm_cpu_check_pmu(CPUState * cs,Error ** errp)1006 static int kvm_cpu_check_pmu(CPUState *cs, Error **errp)
1007 {
1008     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1009     CPULoongArchState *env = cpu_env(cs);
1010     bool kvm_supported;
1011 
1012     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PMU);
1013     if (cpu->pmu == ON_OFF_AUTO_ON) {
1014         if (!kvm_supported) {
1015             error_setg(errp, "'pmu' feature not supported by KVM on the host");
1016             return -ENOTSUP;
1017         }
1018     } else if (cpu->pmu != ON_OFF_AUTO_AUTO) {
1019         /* disable pmu if ON_OFF_AUTO_OFF is set */
1020         kvm_supported = false;
1021     }
1022 
1023     if (kvm_supported) {
1024         env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMP, 1);
1025         env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMNUM, 3);
1026         env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMBITS, 63);
1027         env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, UPM, 1);
1028     }
1029     return 0;
1030 }
1031 
kvm_cpu_check_pv_features(CPUState * cs,Error ** errp)1032 static int kvm_cpu_check_pv_features(CPUState *cs, Error **errp)
1033 {
1034     MachineState *ms = MACHINE(qdev_get_machine());
1035     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1036     CPULoongArchState *env = cpu_env(cs);
1037     bool kvm_supported;
1038 
1039     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PV_IPI);
1040     if (cpu->kvm_pv_ipi == ON_OFF_AUTO_ON) {
1041         if (!kvm_supported) {
1042             error_setg(errp, "'pv_ipi' feature not supported by KVM host");
1043             return -ENOTSUP;
1044         }
1045     } else if (cpu->kvm_pv_ipi != ON_OFF_AUTO_AUTO) {
1046         kvm_supported = false;
1047     }
1048 
1049     if (kvm_supported) {
1050         env->pv_features |= BIT(KVM_FEATURE_IPI);
1051     }
1052 
1053     kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_STEALTIME);
1054     if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
1055         if (!kvm_supported) {
1056             error_setg(errp, "'kvm stealtime' feature not supported by KVM host");
1057             return -ENOTSUP;
1058         }
1059     } else if (cpu->kvm_steal_time != ON_OFF_AUTO_AUTO) {
1060         kvm_supported = false;
1061     }
1062 
1063     if (kvm_supported) {
1064         env->pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
1065     }
1066 
1067     if (object_dynamic_cast(OBJECT(ms), TYPE_LOONGARCH_VIRT_MACHINE)) {
1068         LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms);
1069 
1070         if (virt_is_veiointc_enabled(lvms)) {
1071             env->pv_features |= BIT(KVM_FEATURE_VIRT_EXTIOI);
1072         }
1073     }
1074     return 0;
1075 }
1076 
kvm_arch_pre_create_vcpu(CPUState * cpu,Error ** errp)1077 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
1078 {
1079     return 0;
1080 }
1081 
kvm_arch_init_vcpu(CPUState * cs)1082 int kvm_arch_init_vcpu(CPUState *cs)
1083 {
1084     uint64_t val;
1085     int ret;
1086     Error *local_err = NULL;
1087     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1088 
1089     cpu->vmsentry = qemu_add_vm_change_state_handler(
1090                     kvm_loongarch_vm_stage_change, cs);
1091 
1092     if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
1093         brk_insn = val;
1094     }
1095 
1096     ret = kvm_cpu_check_lsx(cs, &local_err);
1097     if (ret < 0) {
1098         error_report_err(local_err);
1099         return ret;
1100     }
1101 
1102     ret = kvm_cpu_check_lasx(cs, &local_err);
1103     if (ret < 0) {
1104         error_report_err(local_err);
1105         return ret;
1106     }
1107 
1108     ret = kvm_cpu_check_lbt(cs, &local_err);
1109     if (ret < 0) {
1110         error_report_err(local_err);
1111         return ret;
1112     }
1113 
1114     ret = kvm_cpu_check_pmu(cs, &local_err);
1115     if (ret < 0) {
1116         error_report_err(local_err);
1117         return ret;
1118     }
1119 
1120     ret = kvm_cpu_check_pv_features(cs, &local_err);
1121     if (ret < 0) {
1122         error_report_err(local_err);
1123         return ret;
1124     }
1125 
1126     return 0;
1127 }
1128 
loongarch_get_lbt(Object * obj,Error ** errp)1129 static bool loongarch_get_lbt(Object *obj, Error **errp)
1130 {
1131     return LOONGARCH_CPU(obj)->lbt != ON_OFF_AUTO_OFF;
1132 }
1133 
loongarch_set_lbt(Object * obj,bool value,Error ** errp)1134 static void loongarch_set_lbt(Object *obj, bool value, Error **errp)
1135 {
1136     LoongArchCPU *cpu = LOONGARCH_CPU(obj);
1137 
1138     cpu->lbt = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1139 }
1140 
loongarch_get_pmu(Object * obj,Error ** errp)1141 static bool loongarch_get_pmu(Object *obj, Error **errp)
1142 {
1143     return LOONGARCH_CPU(obj)->pmu != ON_OFF_AUTO_OFF;
1144 }
1145 
loongarch_set_pmu(Object * obj,bool value,Error ** errp)1146 static void loongarch_set_pmu(Object *obj, bool value, Error **errp)
1147 {
1148     LoongArchCPU *cpu = LOONGARCH_CPU(obj);
1149 
1150     cpu->pmu = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1151 }
1152 
kvm_pv_ipi_get(Object * obj,Error ** errp)1153 static bool kvm_pv_ipi_get(Object *obj, Error **errp)
1154 {
1155     return LOONGARCH_CPU(obj)->kvm_pv_ipi != ON_OFF_AUTO_OFF;
1156 }
1157 
kvm_pv_ipi_set(Object * obj,bool value,Error ** errp)1158 static void kvm_pv_ipi_set(Object *obj, bool value, Error **errp)
1159 {
1160     LoongArchCPU *cpu = LOONGARCH_CPU(obj);
1161 
1162     cpu->kvm_pv_ipi = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1163 }
1164 
kvm_steal_time_get(Object * obj,Error ** errp)1165 static bool kvm_steal_time_get(Object *obj, Error **errp)
1166 {
1167     return LOONGARCH_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
1168 }
1169 
kvm_steal_time_set(Object * obj,bool value,Error ** errp)1170 static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
1171 {
1172     LoongArchCPU *cpu = LOONGARCH_CPU(obj);
1173 
1174     cpu->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1175 }
1176 
kvm_loongarch_cpu_post_init(LoongArchCPU * cpu)1177 void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu)
1178 {
1179     cpu->lbt = ON_OFF_AUTO_AUTO;
1180     object_property_add_bool(OBJECT(cpu), "lbt", loongarch_get_lbt,
1181                              loongarch_set_lbt);
1182     object_property_set_description(OBJECT(cpu), "lbt",
1183                                    "Set off to disable Binary Tranlation.");
1184 
1185     cpu->pmu = ON_OFF_AUTO_AUTO;
1186     object_property_add_bool(OBJECT(cpu), "pmu", loongarch_get_pmu,
1187                              loongarch_set_pmu);
1188     object_property_set_description(OBJECT(cpu), "pmu",
1189                                "Set off to disable performance monitor unit.");
1190 
1191     cpu->kvm_pv_ipi = ON_OFF_AUTO_AUTO;
1192     object_property_add_bool(OBJECT(cpu), "kvm-pv-ipi", kvm_pv_ipi_get,
1193                              kvm_pv_ipi_set);
1194     object_property_set_description(OBJECT(cpu), "kvm-pv-ipi",
1195                                     "Set off to disable KVM paravirt IPI.");
1196 
1197     cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
1198     object_property_add_bool(OBJECT(cpu), "kvm-steal-time", kvm_steal_time_get,
1199                              kvm_steal_time_set);
1200     object_property_set_description(OBJECT(cpu), "kvm-steal-time",
1201                                     "Set off to disable KVM steal time.");
1202 }
1203 
kvm_arch_destroy_vcpu(CPUState * cs)1204 int kvm_arch_destroy_vcpu(CPUState *cs)
1205 {
1206     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1207 
1208     qemu_del_vm_change_state_handler(cpu->vmsentry);
1209     return 0;
1210 }
1211 
kvm_arch_vcpu_id(CPUState * cs)1212 unsigned long kvm_arch_vcpu_id(CPUState *cs)
1213 {
1214     return cs->cpu_index;
1215 }
1216 
kvm_arch_release_virq_post(int virq)1217 int kvm_arch_release_virq_post(int virq)
1218 {
1219     return 0;
1220 }
1221 
kvm_arch_msi_data_to_gsi(uint32_t data)1222 int kvm_arch_msi_data_to_gsi(uint32_t data)
1223 {
1224     abort();
1225 }
1226 
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)1227 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1228                              uint64_t address, uint32_t data, PCIDevice *dev)
1229 {
1230     return 0;
1231 }
1232 
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)1233 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1234                                 int vector, PCIDevice *dev)
1235 {
1236     return 0;
1237 }
1238 
kvm_arch_init_irq_routing(KVMState * s)1239 void kvm_arch_init_irq_routing(KVMState *s)
1240 {
1241 }
1242 
kvm_arch_get_default_type(MachineState * ms)1243 int kvm_arch_get_default_type(MachineState *ms)
1244 {
1245     return 0;
1246 }
1247 
kvm_arch_init(MachineState * ms,KVMState * s)1248 int kvm_arch_init(MachineState *ms, KVMState *s)
1249 {
1250     cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
1251     return 0;
1252 }
1253 
kvm_arch_irqchip_create(KVMState * s)1254 int kvm_arch_irqchip_create(KVMState *s)
1255 {
1256     return 0;
1257 }
1258 
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)1259 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1260 {
1261 }
1262 
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)1263 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1264 {
1265     return MEMTXATTRS_UNSPECIFIED;
1266 }
1267 
kvm_arch_process_async_events(CPUState * cs)1268 int kvm_arch_process_async_events(CPUState *cs)
1269 {
1270     return cs->halted;
1271 }
1272 
kvm_arch_stop_on_emulation_error(CPUState * cs)1273 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
1274 {
1275     return true;
1276 }
1277 
kvm_arch_update_guest_debug(CPUState * cpu,struct kvm_guest_debug * dbg)1278 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
1279 {
1280     if (kvm_sw_breakpoints_active(cpu)) {
1281         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1282     }
1283 }
1284 
kvm_arch_insert_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)1285 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1286 {
1287     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1288         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1289         error_report("%s failed", __func__);
1290         return -EINVAL;
1291     }
1292     return 0;
1293 }
1294 
kvm_arch_remove_sw_breakpoint(CPUState * cs,struct kvm_sw_breakpoint * bp)1295 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1296 {
1297     static uint32_t brk;
1298 
1299     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1300         brk != brk_insn ||
1301         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1302         error_report("%s failed", __func__);
1303         return -EINVAL;
1304     }
1305     return 0;
1306 }
1307 
kvm_arch_insert_hw_breakpoint(vaddr addr,vaddr len,int type)1308 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
1309 {
1310     return -ENOSYS;
1311 }
1312 
kvm_arch_remove_hw_breakpoint(vaddr addr,vaddr len,int type)1313 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
1314 {
1315     return -ENOSYS;
1316 }
1317 
kvm_arch_remove_all_hw_breakpoints(void)1318 void kvm_arch_remove_all_hw_breakpoints(void)
1319 {
1320 }
1321 
kvm_loongarch_handle_debug(CPUState * cs,struct kvm_run * run)1322 static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
1323 {
1324     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
1325     CPULoongArchState *env = &cpu->env;
1326 
1327     kvm_cpu_synchronize_state(cs);
1328     if (cs->singlestep_enabled) {
1329         return true;
1330     }
1331 
1332     if (kvm_find_sw_breakpoint(cs, env->pc)) {
1333         return true;
1334     }
1335 
1336     return false;
1337 }
1338 
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)1339 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1340 {
1341     int ret = 0;
1342     CPULoongArchState *env = cpu_env(cs);
1343     MemTxAttrs attrs = {};
1344 
1345     attrs.requester_id = env_cpu(env)->cpu_index;
1346 
1347     trace_kvm_arch_handle_exit(run->exit_reason);
1348     switch (run->exit_reason) {
1349     case KVM_EXIT_LOONGARCH_IOCSR:
1350         address_space_rw(env->address_space_iocsr,
1351                          run->iocsr_io.phys_addr,
1352                          attrs,
1353                          run->iocsr_io.data,
1354                          run->iocsr_io.len,
1355                          run->iocsr_io.is_write);
1356         break;
1357 
1358     case KVM_EXIT_DEBUG:
1359         if (kvm_loongarch_handle_debug(cs, run)) {
1360             ret = EXCP_DEBUG;
1361         }
1362         break;
1363 
1364     default:
1365         ret = -1;
1366         warn_report("KVM: unknown exit reason %d", run->exit_reason);
1367         break;
1368     }
1369     return ret;
1370 }
1371 
kvm_loongarch_set_interrupt(LoongArchCPU * cpu,int irq,int level)1372 int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level)
1373 {
1374     struct kvm_interrupt intr;
1375     CPUState *cs = CPU(cpu);
1376 
1377     if (level) {
1378         intr.irq = irq;
1379     } else {
1380         intr.irq = -irq;
1381     }
1382 
1383     trace_kvm_set_intr(irq, level);
1384     return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
1385 }
1386 
kvm_arch_accel_class_init(ObjectClass * oc)1387 void kvm_arch_accel_class_init(ObjectClass *oc)
1388 {
1389 }
1390