Lines Matching full:env
222 CPUX86State *env = &cpu->env; in cpu_pre_save() local
224 env->v_tpr = env->int_ctl & V_TPR_MASK; in cpu_pre_save()
226 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; in cpu_pre_save()
227 env->fptag_vmstate = 0; in cpu_pre_save()
229 env->fptag_vmstate |= ((!env->fptags[i]) << i); in cpu_pre_save()
232 env->fpregs_format_vmstate = 0; in cpu_pre_save()
241 if (!(env->cr[0] & CR0_PE_MASK) && in cpu_pre_save()
242 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { in cpu_pre_save()
243 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); in cpu_pre_save()
244 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); in cpu_pre_save()
245 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); in cpu_pre_save()
246 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); in cpu_pre_save()
247 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); in cpu_pre_save()
248 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); in cpu_pre_save()
264 if (kvm_enabled() && cpu_vmx_maybe_enabled(env) && in cpu_pre_save()
265 (!env->nested_state || in cpu_pre_save()
266 (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) && in cpu_pre_save()
267 env->exception_injected))) { in cpu_pre_save()
297 if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) { in cpu_pre_save()
298 env->exception_pending = 0; in cpu_pre_save()
299 env->exception_injected = 1; in cpu_pre_save()
301 if (env->exception_has_payload) { in cpu_pre_save()
302 if (env->exception_nr == EXCP01_DB) { in cpu_pre_save()
303 env->dr[6] = env->exception_payload; in cpu_pre_save()
304 } else if (env->exception_nr == EXCP0E_PAGE) { in cpu_pre_save()
305 env->cr[2] = env->exception_payload; in cpu_pre_save()
317 CPUX86State *env = &cpu->env; in cpu_post_load() local
320 if (env->tsc_khz && env->user_tsc_khz && in cpu_post_load()
321 env->tsc_khz != env->user_tsc_khz) { in cpu_post_load()
327 if (env->fpregs_format_vmstate) { in cpu_post_load()
339 if (!(env->cr[0] & CR0_PE_MASK) && in cpu_post_load()
340 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { in cpu_post_load()
341 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); in cpu_post_load()
342 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); in cpu_post_load()
343 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); in cpu_post_load()
344 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); in cpu_post_load()
345 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); in cpu_post_load()
346 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); in cpu_post_load()
354 env->hflags &= ~HF_CPL_MASK; in cpu_post_load()
355 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; in cpu_post_load()
358 if ((env->hflags & HF_GUEST_MASK) && in cpu_post_load()
359 (!env->nested_state || in cpu_post_load()
360 !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) { in cpu_post_load()
379 if ((env->exception_nr != -1) && in cpu_post_load()
380 !env->exception_pending && !env->exception_injected) { in cpu_post_load()
381 env->exception_injected = 1; in cpu_post_load()
384 env->fpstt = (env->fpus_vmstate >> 11) & 7; in cpu_post_load()
385 env->fpus = env->fpus_vmstate & ~0x3800; in cpu_post_load()
386 env->fptag_vmstate ^= 0xff; in cpu_post_load()
388 env->fptags[i] = (env->fptag_vmstate >> i) & 1; in cpu_post_load()
392 update_fp_status(env); in cpu_post_load()
393 update_mxcsr_status(env); in cpu_post_load()
400 dr7 = env->dr[7]; in cpu_post_load()
401 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK); in cpu_post_load()
402 cpu_x86_update_dr7(env, dr7); in cpu_post_load()
412 return cpu->env.async_pf_en_msr != 0; in async_pf_msr_needed()
419 return cpu->env.async_pf_int_msr != 0; in async_pf_int_msr_needed()
426 return cpu->env.pv_eoi_en_msr != 0; in pv_eoi_msr_needed()
433 return cpu->env.steal_time_msr != 0; in steal_time_msr_needed()
439 CPUX86State *env = &cpu->env; in exception_info_needed() local
448 return env->exception_pending && (env->hflags & HF_GUEST_MASK); in exception_info_needed()
457 VMSTATE_UINT8(env.exception_pending, X86CPU),
458 VMSTATE_UINT8(env.exception_injected, X86CPU),
459 VMSTATE_UINT8(env.exception_has_payload, X86CPU),
460 VMSTATE_UINT64(env.exception_payload, X86CPU),
470 return cpu->env.poll_control_msr != 1; in poll_control_msr_needed()
479 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
490 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
501 VMSTATE_UINT64(env.async_pf_int_msr, X86CPU),
512 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
523 VMSTATE_UINT64(env.poll_control_msr, X86CPU),
531 CPUX86State *env = &cpu->env; in fpop_ip_dp_needed() local
533 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0; in fpop_ip_dp_needed()
542 VMSTATE_UINT16(env.fpop, X86CPU),
543 VMSTATE_UINT64(env.fpip, X86CPU),
544 VMSTATE_UINT64(env.fpdp, X86CPU),
552 CPUX86State *env = &cpu->env; in tsc_adjust_needed() local
554 return env->tsc_adjust != 0; in tsc_adjust_needed()
563 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
571 CPUX86State *env = &cpu->env; in msr_smi_count_needed() local
573 return cpu->migrate_smi_count && env->msr_smi_count != 0; in msr_smi_count_needed()
582 VMSTATE_UINT64(env.msr_smi_count, X86CPU),
590 CPUX86State *env = &cpu->env; in tscdeadline_needed() local
592 return env->tsc_deadline != 0; in tscdeadline_needed()
601 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
609 CPUX86State *env = &cpu->env; in misc_enable_needed() local
611 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; in misc_enable_needed()
617 CPUX86State *env = &cpu->env; in feature_control_needed() local
619 return env->msr_ia32_feature_control != 0; in feature_control_needed()
628 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
639 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
647 CPUX86State *env = &cpu->env; in pmu_enable_needed() local
650 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || in pmu_enable_needed()
651 env->msr_global_status || env->msr_global_ovf_ctrl) { in pmu_enable_needed()
655 if (env->msr_fixed_counters[i]) { in pmu_enable_needed()
660 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) { in pmu_enable_needed()
674 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
675 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
676 VMSTATE_UINT64(env.msr_global_status, X86CPU),
677 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
678 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
679 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
680 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
688 CPUX86State *env = &cpu->env; in mpx_needed() local
692 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) { in mpx_needed()
697 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) { in mpx_needed()
701 return !!env->msr_bndcfgs; in mpx_needed()
710 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
711 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
712 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
713 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
721 CPUX86State *env = &cpu->env; in hyperv_hypercall_enable_needed() local
723 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0; in hyperv_hypercall_enable_needed()
732 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
733 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
741 CPUX86State *env = &cpu->env; in hyperv_vapic_enable_needed() local
743 return env->msr_hv_vapic != 0; in hyperv_vapic_enable_needed()
752 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
760 CPUX86State *env = &cpu->env; in hyperv_time_enable_needed() local
762 return env->msr_hv_tsc != 0; in hyperv_time_enable_needed()
771 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
779 CPUX86State *env = &cpu->env; in hyperv_crash_enable_needed() local
783 if (env->msr_hv_crash_params[i]) { in hyperv_crash_enable_needed()
796 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
804 CPUX86State *env = &cpu->env; in hyperv_runtime_enable_needed() local
810 return env->msr_hv_runtime != 0; in hyperv_runtime_enable_needed()
819 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
827 CPUX86State *env = &cpu->env; in hyperv_synic_enable_needed() local
830 if (env->msr_hv_synic_control != 0 || in hyperv_synic_enable_needed()
831 env->msr_hv_synic_evt_page != 0 || in hyperv_synic_enable_needed()
832 env->msr_hv_synic_msg_page != 0) { in hyperv_synic_enable_needed()
836 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { in hyperv_synic_enable_needed()
837 if (env->msr_hv_synic_sint[i] != 0) { in hyperv_synic_enable_needed()
859 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
860 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
861 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
862 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
870 CPUX86State *env = &cpu->env; in hyperv_stimer_enable_needed() local
873 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) { in hyperv_stimer_enable_needed()
874 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) { in hyperv_stimer_enable_needed()
887 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
889 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
897 CPUX86State *env = &cpu->env; in hyperv_reenlightenment_enable_needed() local
899 return env->msr_hv_reenlightenment_control != 0 || in hyperv_reenlightenment_enable_needed()
900 env->msr_hv_tsc_emulation_control != 0 || in hyperv_reenlightenment_enable_needed()
901 env->msr_hv_tsc_emulation_status != 0; in hyperv_reenlightenment_enable_needed()
907 CPUX86State *env = &cpu->env; in hyperv_reenlightenment_post_load() local
913 if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) && in hyperv_reenlightenment_post_load()
914 !env->user_tsc_khz) { in hyperv_reenlightenment_post_load()
930 VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
931 VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
932 VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
940 CPUX86State *env = &cpu->env; in avx512_needed() local
944 if (env->opmask_regs[i]) { in avx512_needed()
950 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field)) in avx512_needed()
974 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
975 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
977 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
986 CPUX86State *env = &cpu->env; in xss_needed() local
988 return env->xss != 0; in xss_needed()
997 VMSTATE_UINT64(env.xss, X86CPU),
1005 CPUX86State *env = &cpu->env; in umwait_needed() local
1007 return env->umwait != 0; in umwait_needed()
1016 VMSTATE_UINT32(env.umwait, X86CPU),
1024 CPUX86State *env = &cpu->env; in pkru_needed() local
1026 return env->pkru != 0; in pkru_needed()
1035 VMSTATE_UINT32(env.pkru, X86CPU),
1043 CPUX86State *env = &cpu->env; in pkrs_needed() local
1045 return env->pkrs != 0; in pkrs_needed()
1054 VMSTATE_UINT32(env.pkrs, X86CPU),
1062 CPUX86State *env = &cpu->env; in tsc_khz_needed() local
1064 return env->tsc_khz; in tsc_khz_needed()
1073 VMSTATE_INT64(env.tsc_khz, X86CPU),
1176 CPUX86State *env = &cpu->env; in nested_state_needed() local
1178 return (env->nested_state && in nested_state_needed()
1179 (vmx_nested_state_needed(env->nested_state) || in nested_state_needed()
1180 svm_nested_state_needed(env->nested_state))); in nested_state_needed()
1186 CPUX86State *env = &cpu->env; in nested_state_post_load() local
1187 struct kvm_nested_state *nested_state = env->nested_state; in nested_state_post_load()
1254 VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
1272 VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
1273 VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
1274 VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
1275 VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
1276 VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
1277 VMSTATE_UINT16_ARRAY(env.xen_virq, X86CPU, XEN_NR_VIRQS),
1278 VMSTATE_UINT64(env.xen_singleshot_timer_ns, X86CPU),
1279 VMSTATE_UINT64(env.xen_periodic_timer_period, X86CPU),
1288 CPUX86State *env = &cpu->env; in mcg_ext_ctl_needed() local
1289 return cpu->enable_lmce && env->mcg_ext_ctl; in mcg_ext_ctl_needed()
1298 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
1306 CPUX86State *env = &cpu->env; in spec_ctrl_needed() local
1308 return env->spec_ctrl != 0; in spec_ctrl_needed()
1317 VMSTATE_UINT64(env.spec_ctrl, X86CPU),
1326 CPUX86State *env = &cpu->env; in amd_tsc_scale_msr_needed() local
1328 return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE); in amd_tsc_scale_msr_needed()
1337 VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU),
1346 CPUX86State *env = &cpu->env; in intel_pt_enable_needed() local
1349 if (env->msr_rtit_ctrl || env->msr_rtit_status || in intel_pt_enable_needed()
1350 env->msr_rtit_output_base || env->msr_rtit_output_mask || in intel_pt_enable_needed()
1351 env->msr_rtit_cr3_match) { in intel_pt_enable_needed()
1356 if (env->msr_rtit_addrs[i]) { in intel_pt_enable_needed()
1370 VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
1371 VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
1372 VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
1373 VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
1374 VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
1375 VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
1383 CPUX86State *env = &cpu->env; in virt_ssbd_needed() local
1385 return env->virt_ssbd != 0; in virt_ssbd_needed()
1394 VMSTATE_UINT64(env.virt_ssbd, X86CPU),
1402 CPUX86State *env = &cpu->env; in svm_npt_needed() local
1404 return !!(env->hflags2 & HF2_NPT_MASK); in svm_npt_needed()
1413 VMSTATE_UINT64(env.nested_cr3, X86CPU),
1414 VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
1422 CPUX86State *env = &cpu->env; in svm_guest_needed() local
1424 return tcg_enabled() && env->int_ctl; in svm_guest_needed()
1433 VMSTATE_UINT32(env.int_ctl, X86CPU),
1442 CPUX86State *env = &cpu->env; in intel_efer32_needed() local
1444 return env->efer != 0; in intel_efer32_needed()
1453 VMSTATE_UINT64(env.efer, X86CPU),
1462 CPUX86State *env = &cpu->env; in msr_tsx_ctrl_needed() local
1464 return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR; in msr_tsx_ctrl_needed()
1473 VMSTATE_UINT32(env.tsx_ctrl, X86CPU),
1481 CPUX86State *env = &cpu->env; in intel_sgx_msrs_needed() local
1483 return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC); in intel_sgx_msrs_needed()
1492 VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4),
1500 CPUX86State *env = &cpu->env; in pdptrs_needed() local
1501 return env->pdptrs_valid; in pdptrs_needed()
1507 CPUX86State *env = &cpu->env; in pdptrs_post_load() local
1508 env->pdptrs_valid = true; in pdptrs_post_load()
1520 VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4),
1528 CPUX86State *env = &cpu->env; in xfd_msrs_needed() local
1530 return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); in xfd_msrs_needed()
1539 VMSTATE_UINT64(env.msr_xfd, X86CPU),
1540 VMSTATE_UINT64(env.msr_xfd_err, X86CPU),
1548 CPUX86State *env = &cpu->env; in msr_hwcr_needed() local
1550 return env->msr_hwcr != 0; in msr_hwcr_needed()
1559 VMSTATE_UINT64(env.msr_hwcr, X86CPU),
1568 CPUX86State *env = &cpu->env; in intel_fred_msrs_needed() local
1570 return !!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED); in intel_fred_msrs_needed()
1579 VMSTATE_UINT64(env.fred_rsp0, X86CPU),
1580 VMSTATE_UINT64(env.fred_rsp1, X86CPU),
1581 VMSTATE_UINT64(env.fred_rsp2, X86CPU),
1582 VMSTATE_UINT64(env.fred_rsp3, X86CPU),
1583 VMSTATE_UINT64(env.fred_stklvls, X86CPU),
1584 VMSTATE_UINT64(env.fred_ssp1, X86CPU),
1585 VMSTATE_UINT64(env.fred_ssp2, X86CPU),
1586 VMSTATE_UINT64(env.fred_ssp3, X86CPU),
1587 VMSTATE_UINT64(env.fred_config, X86CPU),
1595 CPUX86State *env = &cpu->env; in amx_xtile_needed() local
1597 return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); in amx_xtile_needed()
1606 VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64),
1607 VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192),
1616 CPUX86State *env = &cpu->env; in arch_lbr_needed() local
1618 return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR); in arch_lbr_needed()
1627 VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU),
1628 VMSTATE_UINT64(env.msr_lbr_depth, X86CPU),
1629 VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1),
1637 CPUX86State *env = &cpu->env; in triple_fault_needed() local
1639 return env->triple_fault_pending; in triple_fault_needed()
1648 VMSTATE_UINT8(env.triple_fault_pending, X86CPU),
1660 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
1661 VMSTATE_UINTTL(env.eip, X86CPU),
1662 VMSTATE_UINTTL(env.eflags, X86CPU),
1663 VMSTATE_UINT32(env.hflags, X86CPU),
1665 VMSTATE_UINT16(env.fpuc, X86CPU),
1666 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
1667 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
1668 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
1670 VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
1672 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
1673 VMSTATE_SEGMENT(env.ldt, X86CPU),
1674 VMSTATE_SEGMENT(env.tr, X86CPU),
1675 VMSTATE_SEGMENT(env.gdt, X86CPU),
1676 VMSTATE_SEGMENT(env.idt, X86CPU),
1678 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
1679 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
1680 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
1682 VMSTATE_UINTTL(env.cr[0], X86CPU),
1683 VMSTATE_UINTTL(env.cr[2], X86CPU),
1684 VMSTATE_UINTTL(env.cr[3], X86CPU),
1685 VMSTATE_UINTTL(env.cr[4], X86CPU),
1686 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1688 VMSTATE_INT32(env.a20_mask, X86CPU),
1690 VMSTATE_UINT32(env.mxcsr, X86CPU),
1691 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1694 VMSTATE_UINT64(env.efer, X86CPU),
1695 VMSTATE_UINT64(env.star, X86CPU),
1696 VMSTATE_UINT64(env.lstar, X86CPU),
1697 VMSTATE_UINT64(env.cstar, X86CPU),
1698 VMSTATE_UINT64(env.fmask, X86CPU),
1699 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1701 VMSTATE_UINT32(env.smbase, X86CPU),
1703 VMSTATE_UINT64(env.pat, X86CPU),
1704 VMSTATE_UINT32(env.hflags2, X86CPU),
1706 VMSTATE_UINT64(env.vm_hsave, X86CPU),
1707 VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1708 VMSTATE_UINT64(env.tsc_offset, X86CPU),
1709 VMSTATE_UINT64(env.intercept, X86CPU),
1710 VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1711 VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1712 VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1713 VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1714 VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1715 VMSTATE_UINT8(env.v_tpr, X86CPU),
1717 VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1718 VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1719 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1721 VMSTATE_INT32(env.interrupt_injected, X86CPU),
1722 VMSTATE_UINT32(env.mp_state, X86CPU),
1723 VMSTATE_UINT64(env.tsc, X86CPU),
1724 VMSTATE_INT32(env.exception_nr, X86CPU),
1725 VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1726 VMSTATE_UINT8(env.nmi_injected, X86CPU),
1727 VMSTATE_UINT8(env.nmi_pending, X86CPU),
1728 VMSTATE_UINT8(env.has_error_code, X86CPU),
1729 VMSTATE_UINT32(env.sipi_vector, X86CPU),
1731 VMSTATE_UINT64(env.mcg_cap, X86CPU),
1732 VMSTATE_UINT64(env.mcg_status, X86CPU),
1733 VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1734 VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1736 VMSTATE_UINT64(env.tsc_aux, X86CPU),
1738 VMSTATE_UINT64(env.system_time_msr, X86CPU),
1739 VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1741 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1742 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1743 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),