1 /*
2 * RISC-V VMState Description
3 *
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "qemu/error-report.h"
22 #include "system/kvm.h"
23 #include "migration/cpu.h"
24 #include "exec/icount.h"
25 #include "debug.h"
26
pmp_needed(void * opaque)27 static bool pmp_needed(void *opaque)
28 {
29 RISCVCPU *cpu = opaque;
30
31 return cpu->cfg.pmp;
32 }
33
pmp_post_load(void * opaque,int version_id)34 static int pmp_post_load(void *opaque, int version_id)
35 {
36 RISCVCPU *cpu = opaque;
37 CPURISCVState *env = &cpu->env;
38 int i;
39
40 for (i = 0; i < MAX_RISCV_PMPS; i++) {
41 pmp_update_rule_addr(env, i);
42 }
43 pmp_update_rule_nums(env);
44
45 return 0;
46 }
47
48 static const VMStateDescription vmstate_pmp_entry = {
49 .name = "cpu/pmp/entry",
50 .version_id = 1,
51 .minimum_version_id = 1,
52 .fields = (const VMStateField[]) {
53 VMSTATE_UINTTL(addr_reg, pmp_entry_t),
54 VMSTATE_UINT8(cfg_reg, pmp_entry_t),
55 VMSTATE_END_OF_LIST()
56 }
57 };
58
59 static const VMStateDescription vmstate_pmp = {
60 .name = "cpu/pmp",
61 .version_id = 1,
62 .minimum_version_id = 1,
63 .needed = pmp_needed,
64 .post_load = pmp_post_load,
65 .fields = (const VMStateField[]) {
66 VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
67 0, vmstate_pmp_entry, pmp_entry_t),
68 VMSTATE_END_OF_LIST()
69 }
70 };
71
hyper_needed(void * opaque)72 static bool hyper_needed(void *opaque)
73 {
74 RISCVCPU *cpu = opaque;
75 CPURISCVState *env = &cpu->env;
76
77 return riscv_has_ext(env, RVH);
78 }
79
80 static const VMStateDescription vmstate_hyper = {
81 .name = "cpu/hyper",
82 .version_id = 4,
83 .minimum_version_id = 4,
84 .needed = hyper_needed,
85 .fields = (const VMStateField[]) {
86 VMSTATE_UINTTL(env.hstatus, RISCVCPU),
87 VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
88 VMSTATE_UINT64(env.hideleg, RISCVCPU),
89 VMSTATE_UINT32(env.hcounteren, RISCVCPU),
90 VMSTATE_UINTTL(env.htval, RISCVCPU),
91 VMSTATE_UINTTL(env.htinst, RISCVCPU),
92 VMSTATE_UINTTL(env.hgatp, RISCVCPU),
93 VMSTATE_UINTTL(env.hgeie, RISCVCPU),
94 VMSTATE_UINTTL(env.hgeip, RISCVCPU),
95 VMSTATE_UINT64(env.hvien, RISCVCPU),
96 VMSTATE_UINT64(env.hvip, RISCVCPU),
97 VMSTATE_UINT64(env.htimedelta, RISCVCPU),
98 VMSTATE_UINT64(env.vstimecmp, RISCVCPU),
99
100 VMSTATE_UINTTL(env.hvictl, RISCVCPU),
101 VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64),
102
103 VMSTATE_UINT64(env.vsstatus, RISCVCPU),
104 VMSTATE_UINTTL(env.vstvec, RISCVCPU),
105 VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
106 VMSTATE_UINTTL(env.vsepc, RISCVCPU),
107 VMSTATE_UINTTL(env.vscause, RISCVCPU),
108 VMSTATE_UINTTL(env.vstval, RISCVCPU),
109 VMSTATE_UINTTL(env.vsatp, RISCVCPU),
110 VMSTATE_UINTTL(env.vsiselect, RISCVCPU),
111 VMSTATE_UINT64(env.vsie, RISCVCPU),
112
113 VMSTATE_UINTTL(env.mtval2, RISCVCPU),
114 VMSTATE_UINTTL(env.mtinst, RISCVCPU),
115
116 VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
117 VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
118 VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
119 VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
120 VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
121 VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
122 VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
123
124 VMSTATE_END_OF_LIST()
125 }
126 };
127
vector_needed(void * opaque)128 static bool vector_needed(void *opaque)
129 {
130 RISCVCPU *cpu = opaque;
131 CPURISCVState *env = &cpu->env;
132
133 return riscv_has_ext(env, RVV);
134 }
135
136 static const VMStateDescription vmstate_vector = {
137 .name = "cpu/vector",
138 .version_id = 2,
139 .minimum_version_id = 2,
140 .needed = vector_needed,
141 .fields = (const VMStateField[]) {
142 VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
143 VMSTATE_UINTTL(env.vxrm, RISCVCPU),
144 VMSTATE_UINTTL(env.vxsat, RISCVCPU),
145 VMSTATE_UINTTL(env.vl, RISCVCPU),
146 VMSTATE_UINTTL(env.vstart, RISCVCPU),
147 VMSTATE_UINTTL(env.vtype, RISCVCPU),
148 VMSTATE_BOOL(env.vill, RISCVCPU),
149 VMSTATE_END_OF_LIST()
150 }
151 };
152
pointermasking_needed(void * opaque)153 static bool pointermasking_needed(void *opaque)
154 {
155 return false;
156 }
157
158 static const VMStateDescription vmstate_pointermasking = {
159 .name = "cpu/pointer_masking",
160 .version_id = 2,
161 .minimum_version_id = 2,
162 .needed = pointermasking_needed,
163 .fields = (const VMStateField[]) {
164
165 VMSTATE_END_OF_LIST()
166 }
167 };
168
rv128_needed(void * opaque)169 static bool rv128_needed(void *opaque)
170 {
171 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
172
173 return mcc->def->misa_mxl_max == MXL_RV128;
174 }
175
176 static const VMStateDescription vmstate_rv128 = {
177 .name = "cpu/rv128",
178 .version_id = 1,
179 .minimum_version_id = 1,
180 .needed = rv128_needed,
181 .fields = (const VMStateField[]) {
182 VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
183 VMSTATE_UINT64(env.mscratchh, RISCVCPU),
184 VMSTATE_UINT64(env.sscratchh, RISCVCPU),
185 VMSTATE_END_OF_LIST()
186 }
187 };
188
189 #ifdef CONFIG_KVM
kvmtimer_needed(void * opaque)190 static bool kvmtimer_needed(void *opaque)
191 {
192 return kvm_enabled();
193 }
194
cpu_kvmtimer_post_load(void * opaque,int version_id)195 static int cpu_kvmtimer_post_load(void *opaque, int version_id)
196 {
197 RISCVCPU *cpu = opaque;
198 CPURISCVState *env = &cpu->env;
199
200 env->kvm_timer_dirty = true;
201 return 0;
202 }
203
204 static const VMStateDescription vmstate_kvmtimer = {
205 .name = "cpu/kvmtimer",
206 .version_id = 1,
207 .minimum_version_id = 1,
208 .needed = kvmtimer_needed,
209 .post_load = cpu_kvmtimer_post_load,
210 .fields = (const VMStateField[]) {
211 VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
212 VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
213 VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
214 VMSTATE_END_OF_LIST()
215 }
216 };
217 #endif
218
debug_needed(void * opaque)219 static bool debug_needed(void *opaque)
220 {
221 RISCVCPU *cpu = opaque;
222
223 return cpu->cfg.debug;
224 }
225
debug_post_load(void * opaque,int version_id)226 static int debug_post_load(void *opaque, int version_id)
227 {
228 RISCVCPU *cpu = opaque;
229 CPURISCVState *env = &cpu->env;
230
231 if (icount_enabled()) {
232 env->itrigger_enabled = riscv_itrigger_enabled(env);
233 }
234
235 return 0;
236 }
237
238 static const VMStateDescription vmstate_debug = {
239 .name = "cpu/debug",
240 .version_id = 2,
241 .minimum_version_id = 2,
242 .needed = debug_needed,
243 .post_load = debug_post_load,
244 .fields = (const VMStateField[]) {
245 VMSTATE_UINTTL(env.trigger_cur, RISCVCPU),
246 VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS),
247 VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS),
248 VMSTATE_UINTTL_ARRAY(env.tdata3, RISCVCPU, RV_MAX_TRIGGERS),
249 VMSTATE_END_OF_LIST()
250 }
251 };
252
riscv_cpu_post_load(void * opaque,int version_id)253 static int riscv_cpu_post_load(void *opaque, int version_id)
254 {
255 RISCVCPU *cpu = opaque;
256 CPURISCVState *env = &cpu->env;
257
258 env->xl = cpu_recompute_xl(env);
259 return 0;
260 }
261
smstateen_needed(void * opaque)262 static bool smstateen_needed(void *opaque)
263 {
264 RISCVCPU *cpu = opaque;
265
266 return cpu->cfg.ext_smstateen;
267 }
268
269 static const VMStateDescription vmstate_smstateen = {
270 .name = "cpu/smtateen",
271 .version_id = 1,
272 .minimum_version_id = 1,
273 .needed = smstateen_needed,
274 .fields = (const VMStateField[]) {
275 VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4),
276 VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4),
277 VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4),
278 VMSTATE_END_OF_LIST()
279 }
280 };
281
envcfg_needed(void * opaque)282 static bool envcfg_needed(void *opaque)
283 {
284 RISCVCPU *cpu = opaque;
285 CPURISCVState *env = &cpu->env;
286
287 return (env->priv_ver >= PRIV_VERSION_1_12_0 ? 1 : 0);
288 }
289
290 static const VMStateDescription vmstate_envcfg = {
291 .name = "cpu/envcfg",
292 .version_id = 1,
293 .minimum_version_id = 1,
294 .needed = envcfg_needed,
295 .fields = (const VMStateField[]) {
296 VMSTATE_UINT64(env.menvcfg, RISCVCPU),
297 VMSTATE_UINTTL(env.senvcfg, RISCVCPU),
298 VMSTATE_UINT64(env.henvcfg, RISCVCPU),
299 VMSTATE_END_OF_LIST()
300 }
301 };
302
ctr_needed(void * opaque)303 static bool ctr_needed(void *opaque)
304 {
305 RISCVCPU *cpu = opaque;
306
307 return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
308 }
309
310 static const VMStateDescription vmstate_ctr = {
311 .name = "cpu/ctr",
312 .version_id = 1,
313 .minimum_version_id = 1,
314 .needed = ctr_needed,
315 .fields = (const VMStateField[]) {
316 VMSTATE_UINT64(env.mctrctl, RISCVCPU),
317 VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
318 VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
319 VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
320 VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
321 VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
322 VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
323 VMSTATE_END_OF_LIST()
324 }
325 };
326
pmu_needed(void * opaque)327 static bool pmu_needed(void *opaque)
328 {
329 RISCVCPU *cpu = opaque;
330
331 return (cpu->cfg.pmu_mask > 0);
332 }
333
334 static const VMStateDescription vmstate_pmu_ctr_state = {
335 .name = "cpu/pmu",
336 .version_id = 2,
337 .minimum_version_id = 2,
338 .needed = pmu_needed,
339 .fields = (const VMStateField[]) {
340 VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
341 VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
342 VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
343 VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
344 VMSTATE_END_OF_LIST()
345 }
346 };
347
jvt_needed(void * opaque)348 static bool jvt_needed(void *opaque)
349 {
350 RISCVCPU *cpu = opaque;
351
352 return cpu->cfg.ext_zcmt;
353 }
354
355 static const VMStateDescription vmstate_jvt = {
356 .name = "cpu/jvt",
357 .version_id = 1,
358 .minimum_version_id = 1,
359 .needed = jvt_needed,
360 .fields = (const VMStateField[]) {
361 VMSTATE_UINTTL(env.jvt, RISCVCPU),
362 VMSTATE_END_OF_LIST()
363 }
364 };
365
elp_needed(void * opaque)366 static bool elp_needed(void *opaque)
367 {
368 RISCVCPU *cpu = opaque;
369
370 return cpu->cfg.ext_zicfilp;
371 }
372
373 static const VMStateDescription vmstate_elp = {
374 .name = "cpu/elp",
375 .version_id = 1,
376 .minimum_version_id = 1,
377 .needed = elp_needed,
378 .fields = (const VMStateField[]) {
379 VMSTATE_BOOL(env.elp, RISCVCPU),
380 VMSTATE_END_OF_LIST()
381 }
382 };
383
ssp_needed(void * opaque)384 static bool ssp_needed(void *opaque)
385 {
386 RISCVCPU *cpu = opaque;
387
388 return cpu->cfg.ext_zicfiss;
389 }
390
391 static const VMStateDescription vmstate_ssp = {
392 .name = "cpu/ssp",
393 .version_id = 1,
394 .minimum_version_id = 1,
395 .needed = ssp_needed,
396 .fields = (const VMStateField[]) {
397 VMSTATE_UINTTL(env.ssp, RISCVCPU),
398 VMSTATE_END_OF_LIST()
399 }
400 };
401
402 const VMStateDescription vmstate_riscv_cpu = {
403 .name = "cpu",
404 .version_id = 10,
405 .minimum_version_id = 10,
406 .post_load = riscv_cpu_post_load,
407 .fields = (const VMStateField[]) {
408 VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
409 VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
410 VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64),
411 VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64),
412 VMSTATE_UINTTL(env.pc, RISCVCPU),
413 VMSTATE_UINTTL(env.load_res, RISCVCPU),
414 VMSTATE_UINTTL(env.load_val, RISCVCPU),
415 VMSTATE_UINTTL(env.frm, RISCVCPU),
416 VMSTATE_UINTTL(env.badaddr, RISCVCPU),
417 VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
418 VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
419 VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
420 VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
421 VMSTATE_UINT32(env.misa_ext, RISCVCPU),
422 VMSTATE_UNUSED(4),
423 VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
424 VMSTATE_UINTTL(env.priv, RISCVCPU),
425 VMSTATE_BOOL(env.virt_enabled, RISCVCPU),
426 VMSTATE_UINT64(env.resetvec, RISCVCPU),
427 VMSTATE_UINTTL(env.mhartid, RISCVCPU),
428 VMSTATE_UINT64(env.mstatus, RISCVCPU),
429 VMSTATE_UINT64(env.mip, RISCVCPU),
430 VMSTATE_UINT64(env.miclaim, RISCVCPU),
431 VMSTATE_UINT64(env.mie, RISCVCPU),
432 VMSTATE_UINT64(env.mvien, RISCVCPU),
433 VMSTATE_UINT64(env.mvip, RISCVCPU),
434 VMSTATE_UINT64(env.sie, RISCVCPU),
435 VMSTATE_UINT64(env.mideleg, RISCVCPU),
436 VMSTATE_UINTTL(env.satp, RISCVCPU),
437 VMSTATE_UINTTL(env.stval, RISCVCPU),
438 VMSTATE_UINTTL(env.medeleg, RISCVCPU),
439 VMSTATE_UINTTL(env.stvec, RISCVCPU),
440 VMSTATE_UINTTL(env.sepc, RISCVCPU),
441 VMSTATE_UINTTL(env.scause, RISCVCPU),
442 VMSTATE_UINTTL(env.mtvec, RISCVCPU),
443 VMSTATE_UINTTL(env.mepc, RISCVCPU),
444 VMSTATE_UINTTL(env.mcause, RISCVCPU),
445 VMSTATE_UINTTL(env.mtval, RISCVCPU),
446 VMSTATE_UINTTL(env.miselect, RISCVCPU),
447 VMSTATE_UINTTL(env.siselect, RISCVCPU),
448 VMSTATE_UINT32(env.scounteren, RISCVCPU),
449 VMSTATE_UINT32(env.mcounteren, RISCVCPU),
450 VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
451 VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
452 VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
453 vmstate_pmu_ctr_state, PMUCTRState),
454 VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
455 VMSTATE_UINTTL_ARRAY(env.mhpmeventh_val, RISCVCPU, RV_MAX_MHPMEVENTS),
456 VMSTATE_UINTTL(env.sscratch, RISCVCPU),
457 VMSTATE_UINTTL(env.mscratch, RISCVCPU),
458 VMSTATE_UINT64(env.stimecmp, RISCVCPU),
459
460 VMSTATE_END_OF_LIST()
461 },
462 .subsections = (const VMStateDescription * const []) {
463 &vmstate_pmp,
464 &vmstate_hyper,
465 &vmstate_vector,
466 &vmstate_pointermasking,
467 &vmstate_rv128,
468 #ifdef CONFIG_KVM
469 &vmstate_kvmtimer,
470 #endif
471 &vmstate_envcfg,
472 &vmstate_debug,
473 &vmstate_smstateen,
474 &vmstate_jvt,
475 &vmstate_elp,
476 &vmstate_ssp,
477 &vmstate_ctr,
478 NULL
479 }
480 };
481