xref: /qemu/target/arm/machine.c (revision 5cb8b0988bdf1e1b22f66925604fe9a44a568993)
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "qemu/error-report.h"
4 #include "system/kvm.h"
5 #include "system/tcg.h"
6 #include "kvm_arm.h"
7 #include "internals.h"
8 #include "cpu-features.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "target/arm/gtimer.h"
12 
vfp_needed(void * opaque)13 static bool vfp_needed(void *opaque)
14 {
15     ARMCPU *cpu = opaque;
16 
17     return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
18             ? cpu_isar_feature(aa64_fp_simd, cpu)
19             : cpu_isar_feature(aa32_vfp_simd, cpu));
20 }
21 
vfp_fpcr_fpsr_needed(void * opaque)22 static bool vfp_fpcr_fpsr_needed(void *opaque)
23 {
24     /*
25      * If either the FPCR or the FPSR include set bits that are not
26      * visible in the AArch32 FPSCR view of floating point control/status
27      * then we must send the FPCR and FPSR as two separate fields in the
28      * cpu/vfp/fpcr_fpsr subsection, and we will send a 0 for the old
29      * FPSCR field in cpu/vfp.
30      *
31      * If all the set bits are representable in an AArch32 FPSCR then we
32      * send that value as the cpu/vfp FPSCR field, and don't send the
33      * cpu/vfp/fpcr_fpsr subsection.
34      *
35      * On incoming migration, if the cpu/vfp FPSCR field is non-zero we
36      * use it, and if the fpcr_fpsr subsection is present we use that.
37      * (The subsection will never be present with a non-zero FPSCR field,
38      * and if FPSCR is zero and the subsection is not present that means
39      * that FPSCR/FPSR/FPCR are zero.)
40      *
41      * This preserves migration compatibility with older QEMU versions,
42      * in both directions.
43      */
44     ARMCPU *cpu = opaque;
45     CPUARMState *env = &cpu->env;
46 
47     return (vfp_get_fpcr(env) & ~FPSCR_FPCR_MASK) ||
48         (vfp_get_fpsr(env) & ~FPSCR_FPSR_MASK);
49 }
50 
get_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)51 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
52                      const VMStateField *field)
53 {
54     ARMCPU *cpu = opaque;
55     CPUARMState *env = &cpu->env;
56     uint32_t val = qemu_get_be32(f);
57 
58     if (val) {
59         /* 0 means we might have the data in the fpcr_fpsr subsection */
60         vfp_set_fpscr(env, val);
61     }
62     return 0;
63 }
64 
put_fpscr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)65 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
66                      const VMStateField *field, JSONWriter *vmdesc)
67 {
68     ARMCPU *cpu = opaque;
69     CPUARMState *env = &cpu->env;
70     uint32_t fpscr = vfp_fpcr_fpsr_needed(opaque) ? 0 : vfp_get_fpscr(env);
71 
72     qemu_put_be32(f, fpscr);
73     return 0;
74 }
75 
76 static const VMStateInfo vmstate_fpscr = {
77     .name = "fpscr",
78     .get = get_fpscr,
79     .put = put_fpscr,
80 };
81 
get_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)82 static int get_fpcr(QEMUFile *f, void *opaque, size_t size,
83                      const VMStateField *field)
84 {
85     ARMCPU *cpu = opaque;
86     CPUARMState *env = &cpu->env;
87     uint64_t val = qemu_get_be64(f);
88 
89     vfp_set_fpcr(env, val);
90     return 0;
91 }
92 
put_fpcr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)93 static int put_fpcr(QEMUFile *f, void *opaque, size_t size,
94                      const VMStateField *field, JSONWriter *vmdesc)
95 {
96     ARMCPU *cpu = opaque;
97     CPUARMState *env = &cpu->env;
98 
99     qemu_put_be64(f, vfp_get_fpcr(env));
100     return 0;
101 }
102 
103 static const VMStateInfo vmstate_fpcr = {
104     .name = "fpcr",
105     .get = get_fpcr,
106     .put = put_fpcr,
107 };
108 
get_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)109 static int get_fpsr(QEMUFile *f, void *opaque, size_t size,
110                      const VMStateField *field)
111 {
112     ARMCPU *cpu = opaque;
113     CPUARMState *env = &cpu->env;
114     uint64_t val = qemu_get_be64(f);
115 
116     vfp_set_fpsr(env, val);
117     return 0;
118 }
119 
put_fpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)120 static int put_fpsr(QEMUFile *f, void *opaque, size_t size,
121                      const VMStateField *field, JSONWriter *vmdesc)
122 {
123     ARMCPU *cpu = opaque;
124     CPUARMState *env = &cpu->env;
125 
126     qemu_put_be64(f, vfp_get_fpsr(env));
127     return 0;
128 }
129 
130 static const VMStateInfo vmstate_fpsr = {
131     .name = "fpsr",
132     .get = get_fpsr,
133     .put = put_fpsr,
134 };
135 
136 static const VMStateDescription vmstate_vfp_fpcr_fpsr = {
137     .name = "cpu/vfp/fpcr_fpsr",
138     .version_id = 1,
139     .minimum_version_id = 1,
140     .needed = vfp_fpcr_fpsr_needed,
141     .fields = (const VMStateField[]) {
142         {
143             .name = "fpcr",
144             .version_id = 0,
145             .size = sizeof(uint64_t),
146             .info = &vmstate_fpcr,
147             .flags = VMS_SINGLE,
148             .offset = 0,
149         },
150         {
151             .name = "fpsr",
152             .version_id = 0,
153             .size = sizeof(uint64_t),
154             .info = &vmstate_fpsr,
155             .flags = VMS_SINGLE,
156             .offset = 0,
157         },
158         VMSTATE_END_OF_LIST()
159     },
160 };
161 
162 static const VMStateDescription vmstate_vfp = {
163     .name = "cpu/vfp",
164     .version_id = 3,
165     .minimum_version_id = 3,
166     .needed = vfp_needed,
167     .fields = (const VMStateField[]) {
168         /* For compatibility, store Qn out of Zn here.  */
169         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
170         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
171         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
172         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
173         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
174         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
175         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
176         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
177         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
178         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
179         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
180         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
181         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
182         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
183         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
184         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
185         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
186         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
187         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
188         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
189         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
190         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
191         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
192         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
193         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
194         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
195         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
196         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
197         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
198         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
199         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
200         VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
201 
202         /* The xregs array is a little awkward because element 1 (FPSCR)
203          * requires a specific accessor, so we have to split it up in
204          * the vmstate:
205          */
206         VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
207         VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
208         {
209             .name = "fpscr",
210             .version_id = 0,
211             .size = sizeof(uint32_t),
212             .info = &vmstate_fpscr,
213             .flags = VMS_SINGLE,
214             .offset = 0,
215         },
216         VMSTATE_END_OF_LIST()
217     },
218     .subsections = (const VMStateDescription * const []) {
219         &vmstate_vfp_fpcr_fpsr,
220         NULL
221     }
222 };
223 
iwmmxt_needed(void * opaque)224 static bool iwmmxt_needed(void *opaque)
225 {
226     ARMCPU *cpu = opaque;
227     CPUARMState *env = &cpu->env;
228 
229     return arm_feature(env, ARM_FEATURE_IWMMXT);
230 }
231 
232 static const VMStateDescription vmstate_iwmmxt = {
233     .name = "cpu/iwmmxt",
234     .version_id = 1,
235     .minimum_version_id = 1,
236     .needed = iwmmxt_needed,
237     .fields = (const VMStateField[]) {
238         VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
239         VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
240         VMSTATE_END_OF_LIST()
241     }
242 };
243 
244 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
245  * and ARMPredicateReg is actively empty.  This triggers errors
246  * in the expansion of the VMSTATE macros.
247  */
248 
sve_needed(void * opaque)249 static bool sve_needed(void *opaque)
250 {
251     ARMCPU *cpu = opaque;
252 
253     return cpu_isar_feature(aa64_sve, cpu);
254 }
255 
256 /* The first two words of each Zreg is stored in VFP state.  */
257 static const VMStateDescription vmstate_zreg_hi_reg = {
258     .name = "cpu/sve/zreg_hi",
259     .version_id = 1,
260     .minimum_version_id = 1,
261     .fields = (const VMStateField[]) {
262         VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
263         VMSTATE_END_OF_LIST()
264     }
265 };
266 
267 static const VMStateDescription vmstate_preg_reg = {
268     .name = "cpu/sve/preg",
269     .version_id = 1,
270     .minimum_version_id = 1,
271     .fields = (const VMStateField[]) {
272         VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
273         VMSTATE_END_OF_LIST()
274     }
275 };
276 
277 static const VMStateDescription vmstate_sve = {
278     .name = "cpu/sve",
279     .version_id = 1,
280     .minimum_version_id = 1,
281     .needed = sve_needed,
282     .fields = (const VMStateField[]) {
283         VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
284                              vmstate_zreg_hi_reg, ARMVectorReg),
285         VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
286                              vmstate_preg_reg, ARMPredicateReg),
287         VMSTATE_END_OF_LIST()
288     }
289 };
290 
291 static const VMStateDescription vmstate_vreg = {
292     .name = "vreg",
293     .version_id = 1,
294     .minimum_version_id = 1,
295     .fields = (const VMStateField[]) {
296         VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
297         VMSTATE_END_OF_LIST()
298     }
299 };
300 
za_needed(void * opaque)301 static bool za_needed(void *opaque)
302 {
303     ARMCPU *cpu = opaque;
304 
305     /*
306      * When ZA storage is disabled, its contents are discarded.
307      * It will be zeroed when ZA storage is re-enabled.
308      */
309     return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
310 }
311 
312 static const VMStateDescription vmstate_za = {
313     .name = "cpu/sme",
314     .version_id = 1,
315     .minimum_version_id = 1,
316     .needed = za_needed,
317     .fields = (const VMStateField[]) {
318         VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
319                              vmstate_vreg, ARMVectorReg),
320         VMSTATE_END_OF_LIST()
321     }
322 };
323 
serror_needed(void * opaque)324 static bool serror_needed(void *opaque)
325 {
326     ARMCPU *cpu = opaque;
327     CPUARMState *env = &cpu->env;
328 
329     return env->serror.pending != 0;
330 }
331 
332 static const VMStateDescription vmstate_serror = {
333     .name = "cpu/serror",
334     .version_id = 1,
335     .minimum_version_id = 1,
336     .needed = serror_needed,
337     .fields = (const VMStateField[]) {
338         VMSTATE_UINT8(env.serror.pending, ARMCPU),
339         VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
340         VMSTATE_UINT64(env.serror.esr, ARMCPU),
341         VMSTATE_END_OF_LIST()
342     }
343 };
344 
irq_line_state_needed(void * opaque)345 static bool irq_line_state_needed(void *opaque)
346 {
347     return true;
348 }
349 
350 static const VMStateDescription vmstate_irq_line_state = {
351     .name = "cpu/irq-line-state",
352     .version_id = 1,
353     .minimum_version_id = 1,
354     .needed = irq_line_state_needed,
355     .fields = (const VMStateField[]) {
356         VMSTATE_UINT32(env.irq_line_state, ARMCPU),
357         VMSTATE_END_OF_LIST()
358     }
359 };
360 
wfxt_timer_needed(void * opaque)361 static bool wfxt_timer_needed(void *opaque)
362 {
363     ARMCPU *cpu = opaque;
364 
365     /* We'll only have the timer object if FEAT_WFxT is implemented */
366     return cpu->wfxt_timer;
367 }
368 
369 static const VMStateDescription vmstate_wfxt_timer = {
370     .name = "cpu/wfxt-timer",
371     .version_id = 1,
372     .minimum_version_id = 1,
373     .needed = wfxt_timer_needed,
374     .fields = (const VMStateField[]) {
375         VMSTATE_TIMER_PTR(wfxt_timer, ARMCPU),
376         VMSTATE_END_OF_LIST()
377     }
378 };
379 
m_needed(void * opaque)380 static bool m_needed(void *opaque)
381 {
382     ARMCPU *cpu = opaque;
383     CPUARMState *env = &cpu->env;
384 
385     return arm_feature(env, ARM_FEATURE_M);
386 }
387 
388 static const VMStateDescription vmstate_m_faultmask_primask = {
389     .name = "cpu/m/faultmask-primask",
390     .version_id = 1,
391     .minimum_version_id = 1,
392     .needed = m_needed,
393     .fields = (const VMStateField[]) {
394         VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
395         VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
396         VMSTATE_END_OF_LIST()
397     }
398 };
399 
400 /* CSSELR is in a subsection because we didn't implement it previously.
401  * Migration from an old implementation will leave it at zero, which
402  * is OK since the only CPUs in the old implementation make the
403  * register RAZ/WI.
404  * Since there was no version of QEMU which implemented the CSSELR for
405  * just non-secure, we transfer both banks here rather than putting
406  * the secure banked version in the m-security subsection.
407  */
csselr_vmstate_validate(void * opaque,int version_id)408 static bool csselr_vmstate_validate(void *opaque, int version_id)
409 {
410     ARMCPU *cpu = opaque;
411 
412     return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
413         && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
414 }
415 
m_csselr_needed(void * opaque)416 static bool m_csselr_needed(void *opaque)
417 {
418     ARMCPU *cpu = opaque;
419 
420     return !arm_v7m_csselr_razwi(cpu);
421 }
422 
423 static const VMStateDescription vmstate_m_csselr = {
424     .name = "cpu/m/csselr",
425     .version_id = 1,
426     .minimum_version_id = 1,
427     .needed = m_csselr_needed,
428     .fields = (const VMStateField[]) {
429         VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
430         VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
431         VMSTATE_END_OF_LIST()
432     }
433 };
434 
435 static const VMStateDescription vmstate_m_scr = {
436     .name = "cpu/m/scr",
437     .version_id = 1,
438     .minimum_version_id = 1,
439     .needed = m_needed,
440     .fields = (const VMStateField[]) {
441         VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
442         VMSTATE_END_OF_LIST()
443     }
444 };
445 
446 static const VMStateDescription vmstate_m_other_sp = {
447     .name = "cpu/m/other-sp",
448     .version_id = 1,
449     .minimum_version_id = 1,
450     .needed = m_needed,
451     .fields = (const VMStateField[]) {
452         VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
453         VMSTATE_END_OF_LIST()
454     }
455 };
456 
m_v8m_needed(void * opaque)457 static bool m_v8m_needed(void *opaque)
458 {
459     ARMCPU *cpu = opaque;
460     CPUARMState *env = &cpu->env;
461 
462     return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
463 }
464 
465 static const VMStateDescription vmstate_m_v8m = {
466     .name = "cpu/m/v8m",
467     .version_id = 1,
468     .minimum_version_id = 1,
469     .needed = m_v8m_needed,
470     .fields = (const VMStateField[]) {
471         VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
472         VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
473         VMSTATE_END_OF_LIST()
474     }
475 };
476 
477 static const VMStateDescription vmstate_m_fp = {
478     .name = "cpu/m/fp",
479     .version_id = 1,
480     .minimum_version_id = 1,
481     .needed = vfp_needed,
482     .fields = (const VMStateField[]) {
483         VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
484         VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
485         VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
486         VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
487         VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
488         VMSTATE_END_OF_LIST()
489     }
490 };
491 
mve_needed(void * opaque)492 static bool mve_needed(void *opaque)
493 {
494     ARMCPU *cpu = opaque;
495 
496     return cpu_isar_feature(aa32_mve, cpu);
497 }
498 
499 static const VMStateDescription vmstate_m_mve = {
500     .name = "cpu/m/mve",
501     .version_id = 1,
502     .minimum_version_id = 1,
503     .needed = mve_needed,
504     .fields = (const VMStateField[]) {
505         VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
506         VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
507         VMSTATE_END_OF_LIST()
508     },
509 };
510 
511 static const VMStateDescription vmstate_m = {
512     .name = "cpu/m",
513     .version_id = 4,
514     .minimum_version_id = 4,
515     .needed = m_needed,
516     .fields = (const VMStateField[]) {
517         VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
518         VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
519         VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
520         VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
521         VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
522         VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
523         VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
524         VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
525         VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
526         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
527         VMSTATE_INT32(env.v7m.exception, ARMCPU),
528         VMSTATE_END_OF_LIST()
529     },
530     .subsections = (const VMStateDescription * const []) {
531         &vmstate_m_faultmask_primask,
532         &vmstate_m_csselr,
533         &vmstate_m_scr,
534         &vmstate_m_other_sp,
535         &vmstate_m_v8m,
536         &vmstate_m_fp,
537         &vmstate_m_mve,
538         NULL
539     }
540 };
541 
thumb2ee_needed(void * opaque)542 static bool thumb2ee_needed(void *opaque)
543 {
544     ARMCPU *cpu = opaque;
545     CPUARMState *env = &cpu->env;
546 
547     return arm_feature(env, ARM_FEATURE_THUMB2EE);
548 }
549 
550 static const VMStateDescription vmstate_thumb2ee = {
551     .name = "cpu/thumb2ee",
552     .version_id = 1,
553     .minimum_version_id = 1,
554     .needed = thumb2ee_needed,
555     .fields = (const VMStateField[]) {
556         VMSTATE_UINT32(env.teecr, ARMCPU),
557         VMSTATE_UINT32(env.teehbr, ARMCPU),
558         VMSTATE_END_OF_LIST()
559     }
560 };
561 
pmsav7_needed(void * opaque)562 static bool pmsav7_needed(void *opaque)
563 {
564     ARMCPU *cpu = opaque;
565     CPUARMState *env = &cpu->env;
566 
567     return arm_feature(env, ARM_FEATURE_PMSA) &&
568            arm_feature(env, ARM_FEATURE_V7) &&
569            !arm_feature(env, ARM_FEATURE_V8);
570 }
571 
pmsav7_rgnr_vmstate_validate(void * opaque,int version_id)572 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
573 {
574     ARMCPU *cpu = opaque;
575 
576     return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
577 }
578 
579 static const VMStateDescription vmstate_pmsav7 = {
580     .name = "cpu/pmsav7",
581     .version_id = 1,
582     .minimum_version_id = 1,
583     .needed = pmsav7_needed,
584     .fields = (const VMStateField[]) {
585         VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
586                               vmstate_info_uint32, uint32_t),
587         VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
588                               vmstate_info_uint32, uint32_t),
589         VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
590                               vmstate_info_uint32, uint32_t),
591         VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
592         VMSTATE_END_OF_LIST()
593     }
594 };
595 
pmsav7_rnr_needed(void * opaque)596 static bool pmsav7_rnr_needed(void *opaque)
597 {
598     ARMCPU *cpu = opaque;
599     CPUARMState *env = &cpu->env;
600 
601     /* For R profile cores pmsav7.rnr is migrated via the cpreg
602      * "RGNR" definition in helper.h. For M profile we have to
603      * migrate it separately.
604      */
605     return arm_feature(env, ARM_FEATURE_M);
606 }
607 
608 static const VMStateDescription vmstate_pmsav7_rnr = {
609     .name = "cpu/pmsav7-rnr",
610     .version_id = 1,
611     .minimum_version_id = 1,
612     .needed = pmsav7_rnr_needed,
613     .fields = (const VMStateField[]) {
614         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
615         VMSTATE_END_OF_LIST()
616     }
617 };
618 
pmsav8_needed(void * opaque)619 static bool pmsav8_needed(void *opaque)
620 {
621     ARMCPU *cpu = opaque;
622     CPUARMState *env = &cpu->env;
623 
624     return arm_feature(env, ARM_FEATURE_PMSA) &&
625         arm_feature(env, ARM_FEATURE_V8);
626 }
627 
pmsav8r_needed(void * opaque)628 static bool pmsav8r_needed(void *opaque)
629 {
630     ARMCPU *cpu = opaque;
631     CPUARMState *env = &cpu->env;
632 
633     return arm_feature(env, ARM_FEATURE_PMSA) &&
634         arm_feature(env, ARM_FEATURE_V8) &&
635         !arm_feature(env, ARM_FEATURE_M);
636 }
637 
638 static const VMStateDescription vmstate_pmsav8r = {
639     .name = "cpu/pmsav8/pmsav8r",
640     .version_id = 1,
641     .minimum_version_id = 1,
642     .needed = pmsav8r_needed,
643     .fields = (const VMStateField[]) {
644         VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU,
645                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
646         VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU,
647                         pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t),
648         VMSTATE_END_OF_LIST()
649     },
650 };
651 
652 static const VMStateDescription vmstate_pmsav8 = {
653     .name = "cpu/pmsav8",
654     .version_id = 1,
655     .minimum_version_id = 1,
656     .needed = pmsav8_needed,
657     .fields = (const VMStateField[]) {
658         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
659                               0, vmstate_info_uint32, uint32_t),
660         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
661                               0, vmstate_info_uint32, uint32_t),
662         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
663         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
664         VMSTATE_END_OF_LIST()
665     },
666     .subsections = (const VMStateDescription * const []) {
667         &vmstate_pmsav8r,
668         NULL
669     }
670 };
671 
s_rnr_vmstate_validate(void * opaque,int version_id)672 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
673 {
674     ARMCPU *cpu = opaque;
675 
676     return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
677 }
678 
sau_rnr_vmstate_validate(void * opaque,int version_id)679 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
680 {
681     ARMCPU *cpu = opaque;
682 
683     return cpu->env.sau.rnr < cpu->sau_sregion;
684 }
685 
m_security_needed(void * opaque)686 static bool m_security_needed(void *opaque)
687 {
688     ARMCPU *cpu = opaque;
689     CPUARMState *env = &cpu->env;
690 
691     return arm_feature(env, ARM_FEATURE_M_SECURITY);
692 }
693 
694 static const VMStateDescription vmstate_m_security = {
695     .name = "cpu/m-security",
696     .version_id = 1,
697     .minimum_version_id = 1,
698     .needed = m_security_needed,
699     .fields = (const VMStateField[]) {
700         VMSTATE_UINT32(env.v7m.secure, ARMCPU),
701         VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
702         VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
703         VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
704         VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
705         VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
706         VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
707         VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
708         VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
709         VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
710         VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
711                               0, vmstate_info_uint32, uint32_t),
712         VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
713                               0, vmstate_info_uint32, uint32_t),
714         VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
715         VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
716         VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
717         VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
718         VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
719         VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
720         VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
721         VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
722         VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
723                               vmstate_info_uint32, uint32_t),
724         VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
725                               vmstate_info_uint32, uint32_t),
726         VMSTATE_UINT32(env.sau.rnr, ARMCPU),
727         VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
728         VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
729         VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
730         /* AIRCR is not secure-only, but our implementation is R/O if the
731          * security extension is unimplemented, so we migrate it here.
732          */
733         VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
734         VMSTATE_END_OF_LIST()
735     }
736 };
737 
get_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)738 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
739                     const VMStateField *field)
740 {
741     ARMCPU *cpu = opaque;
742     CPUARMState *env = &cpu->env;
743     uint32_t val = qemu_get_be32(f);
744 
745     if (arm_feature(env, ARM_FEATURE_M)) {
746         if (val & XPSR_EXCP) {
747             /* This is a CPSR format value from an older QEMU. (We can tell
748              * because values transferred in XPSR format always have zero
749              * for the EXCP field, and CPSR format will always have bit 4
750              * set in CPSR_M.) Rearrange it into XPSR format. The significant
751              * differences are that the T bit is not in the same place, the
752              * primask/faultmask info may be in the CPSR I and F bits, and
753              * we do not want the mode bits.
754              * We know that this cleanup happened before v8M, so there
755              * is no complication with banked primask/faultmask.
756              */
757             uint32_t newval = val;
758 
759             assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
760 
761             newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
762             if (val & CPSR_T) {
763                 newval |= XPSR_T;
764             }
765             /* If the I or F bits are set then this is a migration from
766              * an old QEMU which still stored the M profile FAULTMASK
767              * and PRIMASK in env->daif. For a new QEMU, the data is
768              * transferred using the vmstate_m_faultmask_primask subsection.
769              */
770             if (val & CPSR_F) {
771                 env->v7m.faultmask[M_REG_NS] = 1;
772             }
773             if (val & CPSR_I) {
774                 env->v7m.primask[M_REG_NS] = 1;
775             }
776             val = newval;
777         }
778         /* Ignore the low bits, they are handled by vmstate_m. */
779         xpsr_write(env, val, ~XPSR_EXCP);
780         return 0;
781     }
782 
783     env->aarch64 = ((val & PSTATE_nRW) == 0);
784 
785     if (is_a64(env)) {
786         pstate_write(env, val);
787         return 0;
788     }
789 
790     cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
791     return 0;
792 }
793 
put_cpsr(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)794 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
795                     const VMStateField *field, JSONWriter *vmdesc)
796 {
797     ARMCPU *cpu = opaque;
798     CPUARMState *env = &cpu->env;
799     uint32_t val;
800 
801     if (arm_feature(env, ARM_FEATURE_M)) {
802         /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
803         val = xpsr_read(env) & ~XPSR_EXCP;
804     } else if (is_a64(env)) {
805         val = pstate_read(env);
806     } else {
807         val = cpsr_read(env);
808     }
809 
810     qemu_put_be32(f, val);
811     return 0;
812 }
813 
814 static const VMStateInfo vmstate_cpsr = {
815     .name = "cpsr",
816     .get = get_cpsr,
817     .put = put_cpsr,
818 };
819 
get_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field)820 static int get_power(QEMUFile *f, void *opaque, size_t size,
821                     const VMStateField *field)
822 {
823     ARMCPU *cpu = opaque;
824     bool powered_off = qemu_get_byte(f);
825     cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
826     return 0;
827 }
828 
put_power(QEMUFile * f,void * opaque,size_t size,const VMStateField * field,JSONWriter * vmdesc)829 static int put_power(QEMUFile *f, void *opaque, size_t size,
830                     const VMStateField *field, JSONWriter *vmdesc)
831 {
832     ARMCPU *cpu = opaque;
833 
834     /* Migration should never happen while we transition power states */
835 
836     if (cpu->power_state == PSCI_ON ||
837         cpu->power_state == PSCI_OFF) {
838         bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
839         qemu_put_byte(f, powered_off);
840         return 0;
841     } else {
842         return 1;
843     }
844 }
845 
846 static const VMStateInfo vmstate_powered_off = {
847     .name = "powered_off",
848     .get = get_power,
849     .put = put_power,
850 };
851 
cpu_pre_save(void * opaque)852 static int cpu_pre_save(void *opaque)
853 {
854     ARMCPU *cpu = opaque;
855 
856     if (!kvm_enabled()) {
857         pmu_op_start(&cpu->env);
858     }
859 
860     if (kvm_enabled()) {
861         if (!write_kvmstate_to_list(cpu)) {
862             /* This should never fail */
863             g_assert_not_reached();
864         }
865 
866         /*
867          * kvm_arm_cpu_pre_save() must be called after
868          * write_kvmstate_to_list()
869          */
870         kvm_arm_cpu_pre_save(cpu);
871     } else {
872         if (!write_cpustate_to_list(cpu, false)) {
873             /* This should never fail. */
874             g_assert_not_reached();
875         }
876     }
877 
878     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
879     memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
880            cpu->cpreg_array_len * sizeof(uint64_t));
881     memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
882            cpu->cpreg_array_len * sizeof(uint64_t));
883 
884     return 0;
885 }
886 
cpu_post_save(void * opaque)887 static int cpu_post_save(void *opaque)
888 {
889     ARMCPU *cpu = opaque;
890 
891     if (!kvm_enabled()) {
892         pmu_op_finish(&cpu->env);
893     }
894 
895     return 0;
896 }
897 
cpu_pre_load(void * opaque)898 static int cpu_pre_load(void *opaque)
899 {
900     ARMCPU *cpu = opaque;
901     CPUARMState *env = &cpu->env;
902 
903     /*
904      * In an inbound migration where on the source FPSCR/FPSR/FPCR are 0,
905      * there will be no fpcr_fpsr subsection so we won't call vfp_set_fpcr()
906      * and vfp_set_fpsr() from get_fpcr() and get_fpsr(); also the get_fpscr()
907      * function will not call vfp_set_fpscr() because it will see a 0 in the
908      * inbound data. Ensure that in this case we have a correctly set up
909      * zero FPSCR/FPCR/FPSR.
910      *
911      * This is not strictly needed because FPSCR is zero out of reset, but
912      * it avoids the possibility of future confusing migration bugs if some
913      * future architecture change makes the reset value non-zero.
914      */
915     vfp_set_fpscr(env, 0);
916 
917     /*
918      * Pre-initialize irq_line_state to a value that's never valid as
919      * real data, so cpu_post_load() can tell whether we've seen the
920      * irq-line-state subsection in the incoming migration state.
921      */
922     env->irq_line_state = UINT32_MAX;
923 
924     if (!kvm_enabled()) {
925         pmu_op_start(env);
926     }
927 
928     return 0;
929 }
930 
cpu_post_load(void * opaque,int version_id)931 static int cpu_post_load(void *opaque, int version_id)
932 {
933     ARMCPU *cpu = opaque;
934     CPUARMState *env = &cpu->env;
935     int i, v;
936 
937     /*
938      * Handle migration compatibility from old QEMU which didn't
939      * send the irq-line-state subsection. A QEMU without it did not
940      * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
941      * so for TCG the line state matches the bits set in cs->interrupt_request.
942      * For KVM the line state is not stored in cs->interrupt_request
943      * and so this will leave irq_line_state as 0, but this is OK because
944      * we only need to care about it for TCG.
945      */
946     if (env->irq_line_state == UINT32_MAX) {
947         CPUState *cs = CPU(cpu);
948 
949         env->irq_line_state = cs->interrupt_request &
950             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
951              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
952     }
953 
954     /* Update the values list from the incoming migration data.
955      * Anything in the incoming data which we don't know about is
956      * a migration failure; anything we know about but the incoming
957      * data doesn't specify retains its current (reset) value.
958      * The indexes list remains untouched -- we only inspect the
959      * incoming migration index list so we can match the values array
960      * entries with the right slots in our own values array.
961      */
962 
963     for (i = 0, v = 0; i < cpu->cpreg_array_len
964              && v < cpu->cpreg_vmstate_array_len; i++) {
965         if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
966             /* register in our list but not incoming : skip it */
967             continue;
968         }
969         if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
970             /* register in their list but not ours: fail migration */
971             return -1;
972         }
973         /* matching register, copy the value over */
974         cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
975         v++;
976     }
977 
978     if (kvm_enabled()) {
979         if (!kvm_arm_cpu_post_load(cpu)) {
980             return -1;
981         }
982     } else {
983         if (!write_list_to_cpustate(cpu)) {
984             return -1;
985         }
986     }
987 
988     /*
989      * Misaligned thumb pc is architecturally impossible. Fail the
990      * incoming migration. For TCG it would trigger the assert in
991      * thumb_tr_translate_insn().
992      */
993     if (!is_a64(env) && env->thumb && (env->regs[15] & 1)) {
994         return -1;
995     }
996 
997     if (tcg_enabled()) {
998         hw_breakpoint_update_all(cpu);
999         hw_watchpoint_update_all(cpu);
1000     }
1001 
1002     /*
1003      * TCG gen_update_fp_context() relies on the invariant that
1004      * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
1005      * forbid bogus incoming data with some other value.
1006      */
1007     if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
1008         if (extract32(env->v7m.fpdscr[M_REG_NS],
1009                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
1010             extract32(env->v7m.fpdscr[M_REG_S],
1011                       FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
1012             return -1;
1013         }
1014     }
1015 
1016     if (!kvm_enabled()) {
1017         pmu_op_finish(env);
1018     }
1019 
1020     if (tcg_enabled()) {
1021         arm_rebuild_hflags(env);
1022     }
1023 
1024     return 0;
1025 }
1026 
1027 const VMStateDescription vmstate_arm_cpu = {
1028     .name = "cpu",
1029     .version_id = 22,
1030     .minimum_version_id = 22,
1031     .pre_save = cpu_pre_save,
1032     .post_save = cpu_post_save,
1033     .pre_load = cpu_pre_load,
1034     .post_load = cpu_post_load,
1035     .fields = (const VMStateField[]) {
1036         VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
1037         VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
1038         VMSTATE_UINT64(env.pc, ARMCPU),
1039         {
1040             .name = "cpsr",
1041             .version_id = 0,
1042             .size = sizeof(uint32_t),
1043             .info = &vmstate_cpsr,
1044             .flags = VMS_SINGLE,
1045             .offset = 0,
1046         },
1047         VMSTATE_UINT32(env.spsr, ARMCPU),
1048         VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
1049         VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
1050         VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
1051         VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
1052         VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
1053         VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
1054         VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
1055         /* The length-check must come before the arrays to avoid
1056          * incoming data possibly overflowing the array.
1057          */
1058         VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
1059         VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
1060                              cpreg_vmstate_array_len,
1061                              0, vmstate_info_uint64, uint64_t),
1062         VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
1063                              cpreg_vmstate_array_len,
1064                              0, vmstate_info_uint64, uint64_t),
1065         VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
1066         VMSTATE_UINT64(env.exclusive_val, ARMCPU),
1067         VMSTATE_UINT64(env.exclusive_high, ARMCPU),
1068         VMSTATE_UNUSED(sizeof(uint64_t)),
1069         VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
1070         VMSTATE_UINT32(env.exception.fsr, ARMCPU),
1071         VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
1072         VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
1073         VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
1074         {
1075             .name = "power_state",
1076             .version_id = 0,
1077             .size = sizeof(bool),
1078             .info = &vmstate_powered_off,
1079             .flags = VMS_SINGLE,
1080             .offset = 0,
1081         },
1082         VMSTATE_END_OF_LIST()
1083     },
1084     .subsections = (const VMStateDescription * const []) {
1085         &vmstate_vfp,
1086         &vmstate_iwmmxt,
1087         &vmstate_m,
1088         &vmstate_thumb2ee,
1089         /* pmsav7_rnr must come before pmsav7 so that we have the
1090          * region number before we test it in the VMSTATE_VALIDATE
1091          * in vmstate_pmsav7.
1092          */
1093         &vmstate_pmsav7_rnr,
1094         &vmstate_pmsav7,
1095         &vmstate_pmsav8,
1096         &vmstate_m_security,
1097         &vmstate_sve,
1098         &vmstate_za,
1099         &vmstate_serror,
1100         &vmstate_irq_line_state,
1101         &vmstate_wfxt_timer,
1102         NULL
1103     }
1104 };
1105