xref: /qemu/target/ppc/machine.c (revision a6af54434400099b8afd59ba036cf9a662006d1e)
1  #include "qemu/osdep.h"
2  #include "cpu.h"
3  #include "exec/exec-all.h"
4  #include "system/kvm.h"
5  #include "system/tcg.h"
6  #include "helper_regs.h"
7  #include "mmu-hash64.h"
8  #include "migration/cpu.h"
9  #include "qapi/error.h"
10  #include "kvm_ppc.h"
11  #include "power8-pmu.h"
12  #include "system/replay.h"
13  
14  static void post_load_update_msr(CPUPPCState *env)
15  {
16      target_ulong msr = env->msr;
17  
18      /*
19       * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
20       * before restoring.  Note that this recomputes hflags.
21       */
22      env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
23      ppc_store_msr(env, msr);
24  }
25  
26  static int get_avr(QEMUFile *f, void *pv, size_t size,
27                     const VMStateField *field)
28  {
29      ppc_avr_t *v = pv;
30  
31      v->u64[0] = qemu_get_be64(f);
32      v->u64[1] = qemu_get_be64(f);
33  
34      return 0;
35  }
36  
37  static int put_avr(QEMUFile *f, void *pv, size_t size,
38                     const VMStateField *field, JSONWriter *vmdesc)
39  {
40      ppc_avr_t *v = pv;
41  
42      qemu_put_be64(f, v->u64[0]);
43      qemu_put_be64(f, v->u64[1]);
44      return 0;
45  }
46  
47  static const VMStateInfo vmstate_info_avr = {
48      .name = "avr",
49      .get  = get_avr,
50      .put  = put_avr,
51  };
52  
53  #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
54      VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
55  
56  #define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
57      VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
58  
59  static int get_fpr(QEMUFile *f, void *pv, size_t size,
60                     const VMStateField *field)
61  {
62      ppc_vsr_t *v = pv;
63  
64      v->VsrD(0) = qemu_get_be64(f);
65  
66      return 0;
67  }
68  
69  static int put_fpr(QEMUFile *f, void *pv, size_t size,
70                     const VMStateField *field, JSONWriter *vmdesc)
71  {
72      ppc_vsr_t *v = pv;
73  
74      qemu_put_be64(f, v->VsrD(0));
75      return 0;
76  }
77  
78  static const VMStateInfo vmstate_info_fpr = {
79      .name = "fpr",
80      .get  = get_fpr,
81      .put  = put_fpr,
82  };
83  
84  #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v)                       \
85      VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
86  
87  #define VMSTATE_FPR_ARRAY(_f, _s, _n)                             \
88      VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
89  
90  static int get_vsr(QEMUFile *f, void *pv, size_t size,
91                     const VMStateField *field)
92  {
93      ppc_vsr_t *v = pv;
94  
95      v->VsrD(1) = qemu_get_be64(f);
96  
97      return 0;
98  }
99  
100  static int put_vsr(QEMUFile *f, void *pv, size_t size,
101                     const VMStateField *field, JSONWriter *vmdesc)
102  {
103      ppc_vsr_t *v = pv;
104  
105      qemu_put_be64(f, v->VsrD(1));
106      return 0;
107  }
108  
109  static const VMStateInfo vmstate_info_vsr = {
110      .name = "vsr",
111      .get  = get_vsr,
112      .put  = put_vsr,
113  };
114  
115  #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v)                       \
116      VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
117  
118  #define VMSTATE_VSR_ARRAY(_f, _s, _n)                             \
119      VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
120  
121  static int cpu_pre_save(void *opaque)
122  {
123      PowerPCCPU *cpu = opaque;
124      CPUPPCState *env = &cpu->env;
125      int i;
126  
127      env->spr[SPR_LR] = env->lr;
128      env->spr[SPR_CTR] = env->ctr;
129      env->spr[SPR_XER] = cpu_read_xer(env);
130  #if defined(TARGET_PPC64)
131      env->spr[SPR_CFAR] = env->cfar;
132  #endif
133      env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
134  
135      for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
136          env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
137          env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
138          env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
139          env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
140      }
141      for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
142          env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
143          env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
144          env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
145          env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
146      }
147  
148      /* Used to retain migration compatibility for pre 6.0 for 601 machines. */
149      env->hflags_compat_nmsr = 0;
150  
151      if (tcg_enabled()) {
152          /*
153           * TCG does not maintain the DECR spr (unlike KVM) so have to save
154           * it here.
155           */
156          env->spr[SPR_DECR] = cpu_ppc_load_decr(env);
157      }
158  
159      return 0;
160  }
161  
162  /*
163   * Determine if a given PVR is a "close enough" match to the CPU
164   * object.  For TCG and KVM PR it would probably be sufficient to
165   * require an exact PVR match.  However for KVM HV the user is
166   * restricted to a PVR exactly matching the host CPU.  The correct way
167   * to handle this is to put the guest into an architected
168   * compatibility mode.  However, to allow a more forgiving transition
169   * and migration from before this was widely done, we allow migration
170   * between sufficiently similar PVRs, as determined by the CPU class's
171   * pvr_match() hook.
172   */
173  static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
174  {
175      PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
176  
177      if (pvr == pcc->pvr) {
178          return true;
179      }
180      return pcc->pvr_match(pcc, pvr, true);
181  }
182  
183  static int cpu_post_load(void *opaque, int version_id)
184  {
185      PowerPCCPU *cpu = opaque;
186      CPUPPCState *env = &cpu->env;
187      int i;
188  
189      /*
190       * If we're operating in compat mode, we should be ok as long as
191       * the destination supports the same compatibility mode.
192       *
193       * Otherwise, however, we require that the destination has exactly
194       * the same CPU model as the source.
195       */
196  
197  #if defined(TARGET_PPC64)
198      if (cpu->compat_pvr) {
199          uint32_t compat_pvr = cpu->compat_pvr;
200          Error *local_err = NULL;
201          int ret;
202  
203          cpu->compat_pvr = 0;
204          ret = ppc_set_compat(cpu, compat_pvr, &local_err);
205          if (ret < 0) {
206              error_report_err(local_err);
207              return ret;
208          }
209      } else
210  #endif
211      {
212          if (!pvr_match(cpu, env->spr[SPR_PVR])) {
213              return -EINVAL;
214          }
215      }
216  
217      /*
218       * If we're running with KVM HV, there is a chance that the guest
219       * is running with KVM HV and its kernel does not have the
220       * capability of dealing with a different PVR other than this
221       * exact host PVR in KVM_SET_SREGS. If that happens, the
222       * guest freezes after migration.
223       *
224       * The function kvmppc_pvr_workaround_required does this verification
225       * by first checking if the kernel has the cap, returning true immediately
226       * if that is the case. Otherwise, it checks if we're running in KVM PR.
227       * If the guest kernel does not have the cap and we're not running KVM-PR
228       * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
229       * receive the PVR it expects as a workaround.
230       *
231       */
232      if (kvmppc_pvr_workaround_required(cpu)) {
233          env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
234      }
235  
236      env->lr = env->spr[SPR_LR];
237      env->ctr = env->spr[SPR_CTR];
238      cpu_write_xer(env, env->spr[SPR_XER]);
239  #if defined(TARGET_PPC64)
240      env->cfar = env->spr[SPR_CFAR];
241  #endif
242      env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
243  
244      for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
245          env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
246          env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
247          env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
248          env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
249      }
250      for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
251          env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
252          env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
253          env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
254          env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
255      }
256  
257      if (!cpu->vhyp) {
258          ppc_store_sdr1(env, env->spr[SPR_SDR1]);
259      }
260  
261      post_load_update_msr(env);
262  
263      if (tcg_enabled()) {
264          /* Re-set breaks based on regs */
265  #if defined(TARGET_PPC64)
266          ppc_update_ciabr(env);
267          ppc_update_daw(env, 0);
268          ppc_update_daw(env, 1);
269  #endif
270          /*
271           * TCG needs to re-start the decrementer timer and/or raise the
272           * interrupt. This works for level-triggered decrementer. Edge
273           * triggered types (including HDEC) would need to carry more state.
274           */
275          cpu_ppc_store_decr(env, env->spr[SPR_DECR]);
276          pmu_mmcr01a_updated(env);
277      }
278  
279      return 0;
280  }
281  
282  static bool fpu_needed(void *opaque)
283  {
284      PowerPCCPU *cpu = opaque;
285  
286      return cpu->env.insns_flags & PPC_FLOAT;
287  }
288  
289  static const VMStateDescription vmstate_fpu = {
290      .name = "cpu/fpu",
291      .version_id = 1,
292      .minimum_version_id = 1,
293      .needed = fpu_needed,
294      .fields = (const VMStateField[]) {
295          VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
296          VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
297          VMSTATE_END_OF_LIST()
298      },
299  };
300  
301  static bool altivec_needed(void *opaque)
302  {
303      PowerPCCPU *cpu = opaque;
304  
305      return cpu->env.insns_flags & PPC_ALTIVEC;
306  }
307  
308  static int get_vscr(QEMUFile *f, void *opaque, size_t size,
309                      const VMStateField *field)
310  {
311      PowerPCCPU *cpu = opaque;
312      ppc_store_vscr(&cpu->env, qemu_get_be32(f));
313      return 0;
314  }
315  
316  static int put_vscr(QEMUFile *f, void *opaque, size_t size,
317                      const VMStateField *field, JSONWriter *vmdesc)
318  {
319      PowerPCCPU *cpu = opaque;
320      qemu_put_be32(f, ppc_get_vscr(&cpu->env));
321      return 0;
322  }
323  
324  static const VMStateInfo vmstate_vscr = {
325      .name = "cpu/altivec/vscr",
326      .get = get_vscr,
327      .put = put_vscr,
328  };
329  
330  static const VMStateDescription vmstate_altivec = {
331      .name = "cpu/altivec",
332      .version_id = 1,
333      .minimum_version_id = 1,
334      .needed = altivec_needed,
335      .fields = (const VMStateField[]) {
336          VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
337          /*
338           * Save the architecture value of the vscr, not the internally
339           * expanded version.  Since this architecture value does not
340           * exist in memory to be stored, this requires a but of hoop
341           * jumping.  We want OFFSET=0 so that we effectively pass CPU
342           * to the helper functions.
343           */
344          {
345              .name = "vscr",
346              .version_id = 0,
347              .size = sizeof(uint32_t),
348              .info = &vmstate_vscr,
349              .flags = VMS_SINGLE,
350              .offset = 0
351          },
352          VMSTATE_END_OF_LIST()
353      },
354  };
355  
356  static bool vsx_needed(void *opaque)
357  {
358      PowerPCCPU *cpu = opaque;
359  
360      return cpu->env.insns_flags2 & PPC2_VSX;
361  }
362  
363  static const VMStateDescription vmstate_vsx = {
364      .name = "cpu/vsx",
365      .version_id = 1,
366      .minimum_version_id = 1,
367      .needed = vsx_needed,
368      .fields = (const VMStateField[]) {
369          VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
370          VMSTATE_END_OF_LIST()
371      },
372  };
373  
374  #ifdef TARGET_PPC64
375  /* Transactional memory state */
376  static bool tm_needed(void *opaque)
377  {
378      PowerPCCPU *cpu = opaque;
379      CPUPPCState *env = &cpu->env;
380      return FIELD_EX64(env->msr, MSR, TS);
381  }
382  
383  static const VMStateDescription vmstate_tm = {
384      .name = "cpu/tm",
385      .version_id = 1,
386      .minimum_version_id = 1,
387      .needed = tm_needed,
388      .fields = (const VMStateField []) {
389          VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
390          VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
391          VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
392          VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
393          VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
394          VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
395          VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
396          VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
397          VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
398          VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
399          VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
400          VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
401          VMSTATE_END_OF_LIST()
402      },
403  };
404  #endif
405  
406  static bool sr_needed(void *opaque)
407  {
408  #ifdef TARGET_PPC64
409      PowerPCCPU *cpu = opaque;
410  
411      return !mmu_is_64bit(cpu->env.mmu_model);
412  #else
413      return true;
414  #endif
415  }
416  
417  static const VMStateDescription vmstate_sr = {
418      .name = "cpu/sr",
419      .version_id = 1,
420      .minimum_version_id = 1,
421      .needed = sr_needed,
422      .fields = (const VMStateField[]) {
423          VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
424          VMSTATE_END_OF_LIST()
425      },
426  };
427  
428  #ifdef TARGET_PPC64
429  static int get_slbe(QEMUFile *f, void *pv, size_t size,
430                      const VMStateField *field)
431  {
432      ppc_slb_t *v = pv;
433  
434      v->esid = qemu_get_be64(f);
435      v->vsid = qemu_get_be64(f);
436  
437      return 0;
438  }
439  
440  static int put_slbe(QEMUFile *f, void *pv, size_t size,
441                      const VMStateField *field, JSONWriter *vmdesc)
442  {
443      ppc_slb_t *v = pv;
444  
445      qemu_put_be64(f, v->esid);
446      qemu_put_be64(f, v->vsid);
447      return 0;
448  }
449  
450  static const VMStateInfo vmstate_info_slbe = {
451      .name = "slbe",
452      .get  = get_slbe,
453      .put  = put_slbe,
454  };
455  
456  #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
457      VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
458  
459  #define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
460      VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
461  
462  static bool slb_needed(void *opaque)
463  {
464      PowerPCCPU *cpu = opaque;
465  
466      /* We don't support any of the old segment table based 64-bit CPUs */
467      return mmu_is_64bit(cpu->env.mmu_model);
468  }
469  
470  static int slb_post_load(void *opaque, int version_id)
471  {
472      PowerPCCPU *cpu = opaque;
473      CPUPPCState *env = &cpu->env;
474      int i;
475  
476      /*
477       * We've pulled in the raw esid and vsid values from the migration
478       * stream, but we need to recompute the page size pointers
479       */
480      for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
481          if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
482              /* Migration source had bad values in its SLB */
483              return -1;
484          }
485      }
486  
487      return 0;
488  }
489  
490  static const VMStateDescription vmstate_slb = {
491      .name = "cpu/slb",
492      .version_id = 2,
493      .minimum_version_id = 1,
494      .needed = slb_needed,
495      .post_load = slb_post_load,
496      .fields = (const VMStateField[]) {
497          VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
498          VMSTATE_END_OF_LIST()
499      }
500  };
501  #endif /* TARGET_PPC64 */
502  
503  static const VMStateDescription vmstate_tlb6xx_entry = {
504      .name = "cpu/tlb6xx_entry",
505      .version_id = 1,
506      .minimum_version_id = 1,
507      .fields = (const VMStateField[]) {
508          VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
509          VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
510          VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
511          VMSTATE_END_OF_LIST()
512      },
513  };
514  
515  static bool tlb6xx_needed(void *opaque)
516  {
517      PowerPCCPU *cpu = opaque;
518      CPUPPCState *env = &cpu->env;
519  
520      return env->nb_tlb && (env->tlb_type == TLB_6XX);
521  }
522  
523  static const VMStateDescription vmstate_tlb6xx = {
524      .name = "cpu/tlb6xx",
525      .version_id = 1,
526      .minimum_version_id = 1,
527      .needed = tlb6xx_needed,
528      .fields = (const VMStateField[]) {
529          VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
530          VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
531                                              env.nb_tlb,
532                                              vmstate_tlb6xx_entry,
533                                              ppc6xx_tlb_t),
534          VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
535          VMSTATE_END_OF_LIST()
536      }
537  };
538  
539  static const VMStateDescription vmstate_tlbemb_entry = {
540      .name = "cpu/tlbemb_entry",
541      .version_id = 1,
542      .minimum_version_id = 1,
543      .fields = (const VMStateField[]) {
544          VMSTATE_UINT64(RPN, ppcemb_tlb_t),
545          VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
546          VMSTATE_UINTTL(PID, ppcemb_tlb_t),
547          VMSTATE_UINTTL(size, ppcemb_tlb_t),
548          VMSTATE_UINT32(prot, ppcemb_tlb_t),
549          VMSTATE_UINT32(attr, ppcemb_tlb_t),
550          VMSTATE_END_OF_LIST()
551      },
552  };
553  
554  static bool tlbemb_needed(void *opaque)
555  {
556      PowerPCCPU *cpu = opaque;
557      CPUPPCState *env = &cpu->env;
558  
559      return env->nb_tlb && (env->tlb_type == TLB_EMB);
560  }
561  
562  static const VMStateDescription vmstate_tlbemb = {
563      .name = "cpu/tlbemb",
564      .version_id = 1,
565      .minimum_version_id = 1,
566      .needed = tlbemb_needed,
567      .fields = (const VMStateField[]) {
568          VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
569          VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
570                                              env.nb_tlb,
571                                              vmstate_tlbemb_entry,
572                                              ppcemb_tlb_t),
573          VMSTATE_END_OF_LIST()
574      },
575  };
576  
577  static const VMStateDescription vmstate_tlbmas_entry = {
578      .name = "cpu/tlbmas_entry",
579      .version_id = 1,
580      .minimum_version_id = 1,
581      .fields = (const VMStateField[]) {
582          VMSTATE_UINT32(mas8, ppcmas_tlb_t),
583          VMSTATE_UINT32(mas1, ppcmas_tlb_t),
584          VMSTATE_UINT64(mas2, ppcmas_tlb_t),
585          VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
586          VMSTATE_END_OF_LIST()
587      },
588  };
589  
590  static bool tlbmas_needed(void *opaque)
591  {
592      PowerPCCPU *cpu = opaque;
593      CPUPPCState *env = &cpu->env;
594  
595      return env->nb_tlb && (env->tlb_type == TLB_MAS);
596  }
597  
598  static const VMStateDescription vmstate_tlbmas = {
599      .name = "cpu/tlbmas",
600      .version_id = 1,
601      .minimum_version_id = 1,
602      .needed = tlbmas_needed,
603      .fields = (const VMStateField[]) {
604          VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
605          VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
606                                              env.nb_tlb,
607                                              vmstate_tlbmas_entry,
608                                              ppcmas_tlb_t),
609          VMSTATE_END_OF_LIST()
610      }
611  };
612  
613  static bool compat_needed(void *opaque)
614  {
615      PowerPCCPU *cpu = opaque;
616  
617      assert(!(cpu->compat_pvr && !cpu->vhyp));
618      return cpu->compat_pvr != 0;
619  }
620  
621  static const VMStateDescription vmstate_compat = {
622      .name = "cpu/compat",
623      .version_id = 1,
624      .minimum_version_id = 1,
625      .needed = compat_needed,
626      .fields = (const VMStateField[]) {
627          VMSTATE_UINT32(compat_pvr, PowerPCCPU),
628          VMSTATE_END_OF_LIST()
629      }
630  };
631  
632  static bool reservation_needed(void *opaque)
633  {
634      return (replay_mode != REPLAY_MODE_NONE);
635  }
636  
637  static const VMStateDescription vmstate_reservation = {
638      .name = "cpu/reservation",
639      .version_id = 1,
640      .minimum_version_id = 1,
641      .needed = reservation_needed,
642      .fields = (const VMStateField[]) {
643          VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
644          VMSTATE_UINTTL(env.reserve_length, PowerPCCPU),
645          VMSTATE_UINTTL(env.reserve_val, PowerPCCPU),
646  #if defined(TARGET_PPC64)
647          VMSTATE_UINTTL(env.reserve_val2, PowerPCCPU),
648  #endif
649          VMSTATE_END_OF_LIST()
650      }
651  };
652  
653  #ifdef TARGET_PPC64
654  static bool bhrb_needed(void *opaque)
655  {
656      PowerPCCPU *cpu = opaque;
657      return (cpu->env.flags & POWERPC_FLAG_BHRB) != 0;
658  }
659  
660  static const VMStateDescription vmstate_bhrb = {
661      .name = "cpu/bhrb",
662      .version_id = 1,
663      .minimum_version_id = 1,
664      .needed = bhrb_needed,
665      .fields = (VMStateField[]) {
666          VMSTATE_UINTTL(env.bhrb_offset, PowerPCCPU),
667          VMSTATE_UINT64_ARRAY(env.bhrb, PowerPCCPU, BHRB_MAX_NUM_ENTRIES),
668          VMSTATE_END_OF_LIST()
669      }
670  };
671  #endif
672  
673  const VMStateDescription vmstate_ppc_cpu = {
674      .name = "cpu",
675      .version_id = 5,
676      .minimum_version_id = 5,
677      .pre_save = cpu_pre_save,
678      .post_load = cpu_post_load,
679      .fields = (const VMStateField[]) {
680          VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
681  
682          /* User mode architected state */
683          VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
684  #if !defined(TARGET_PPC64)
685          VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
686  #endif
687          VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
688          VMSTATE_UINTTL(env.nip, PowerPCCPU),
689  
690          /* SPRs */
691          VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
692          VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
693  
694          VMSTATE_UNUSED(sizeof(target_ulong)), /* was env.reserve_addr */
695  
696          /* Supervisor mode architected state */
697          VMSTATE_UINTTL(env.msr, PowerPCCPU),
698  
699          /* Backward compatible internal state */
700          VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
701  
702          VMSTATE_END_OF_LIST()
703      },
704      .subsections = (const VMStateDescription * const []) {
705          &vmstate_fpu,
706          &vmstate_altivec,
707          &vmstate_vsx,
708          &vmstate_sr,
709  #ifdef TARGET_PPC64
710          &vmstate_tm,
711          &vmstate_slb,
712          &vmstate_bhrb,
713  #endif /* TARGET_PPC64 */
714          &vmstate_tlb6xx,
715          &vmstate_tlbemb,
716          &vmstate_tlbmas,
717          &vmstate_compat,
718          &vmstate_reservation,
719          NULL
720      }
721  };
722