Lines Matching defs:vmx

52 #include <asm/vmx.h>
71 #include "vmx.h"
356 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
360 if (!vmx->disable_fb_clear)
367 vmx->msr_ia32_mcu_opt_ctrl = msr;
370 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
372 if (!vmx->disable_fb_clear)
375 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
376 native_wrmsrq(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
379 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
389 vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
405 vmx->disable_fb_clear = false;
652 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
658 return &vmx->guest_uret_msrs[i];
662 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
665 unsigned int slot = msr - vmx->guest_uret_msrs;
765 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
771 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
772 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
773 vmx->segment_cache.bitmask = 0;
775 ret = vmx->segment_cache.bitmask & mask;
776 vmx->segment_cache.bitmask |= mask;
780 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
782 u16 *p = &vmx->segment_cache.seg[seg].selector;
784 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
789 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
791 ulong *p = &vmx->segment_cache.seg[seg].base;
793 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
798 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
800 u32 *p = &vmx->segment_cache.seg[seg].limit;
802 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
807 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
809 u32 *p = &vmx->segment_cache.seg[seg].ar;
811 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
884 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
886 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
889 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
892 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
896 if (vmx->loaded_vmcs->launched)
902 * it after vmexit and store it in vmx->spec_ctrl.
904 if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
908 kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
914 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
917 vm_entry_controls_clearbit(vmx, entry);
918 vm_exit_controls_clearbit(vmx, exit);
932 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
935 struct msr_autoload *m = &vmx->msr_autoload;
940 clear_atomic_switch_msr_special(vmx,
948 clear_atomic_switch_msr_special(vmx,
972 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
980 vm_entry_controls_setbit(vmx, entry);
981 vm_exit_controls_setbit(vmx, exit);
984 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
988 struct msr_autoload *m = &vmx->msr_autoload;
993 add_atomic_switch_msr_special(vmx,
1004 add_atomic_switch_msr_special(vmx,
1050 static bool update_transition_efer(struct vcpu_vmx *vmx)
1052 u64 guest_efer = vmx->vcpu.arch.efer;
1077 (enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
1081 add_atomic_switch_msr(vmx, MSR_EFER,
1084 clear_atomic_switch_msr(vmx, MSR_EFER);
1092 clear_atomic_switch_msr(vmx, MSR_EFER);
1097 vmx->guest_uret_msrs[i].data = guest_efer;
1098 vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1132 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1135 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1172 static void pt_guest_enter(struct vcpu_vmx *vmx)
1181 rdmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1182 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1184 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1185 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1189 static void pt_guest_exit(struct vcpu_vmx *vmx)
1194 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1195 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1196 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1203 if (vmx->pt_desc.host.ctl)
1204 wrmsrq(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1236 struct vcpu_vmx *vmx = to_vmx(vcpu);
1251 if (!vmx->guest_uret_msrs_loaded) {
1252 vmx->guest_uret_msrs_loaded = true;
1254 if (!vmx->guest_uret_msrs[i].load_into_hardware)
1258 vmx->guest_uret_msrs[i].data,
1259 vmx->guest_uret_msrs[i].mask);
1263 if (vmx->nested.need_vmcs12_to_shadow_sync)
1269 host_state = &vmx->loaded_vmcs->host_state;
1295 wrmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1307 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1311 if (!vmx->vt.guest_state_loaded)
1314 host_state = &vmx->loaded_vmcs->host_state;
1316 ++vmx->vcpu.stat.host_state_reload;
1319 rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1339 wrmsrq(MSR_KERNEL_GS_BASE, vmx->vt.msr_host_kernel_gs_base);
1342 vmx->vt.guest_state_loaded = false;
1343 vmx->guest_uret_msrs_loaded = false;
1347 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1350 if (vmx->vt.guest_state_loaded)
1351 rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1353 return vmx->msr_guest_kernel_gs_base;
1356 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1359 if (vmx->vt.guest_state_loaded)
1362 vmx->msr_guest_kernel_gs_base = data;
1368 struct vcpu_vmx *vmx = to_vmx(vcpu);
1369 unsigned int old = vmx->ple_window;
1371 vmx->ple_window = __grow_ple_window(old, ple_window,
1375 if (vmx->ple_window != old) {
1376 vmx->ple_window_dirty = true;
1378 vmx->ple_window, old);
1384 struct vcpu_vmx *vmx = to_vmx(vcpu);
1385 unsigned int old = vmx->ple_window;
1387 vmx->ple_window = __shrink_ple_window(old, ple_window,
1391 if (vmx->ple_window != old) {
1392 vmx->ple_window_dirty = true;
1394 vmx->ple_window, old);
1400 struct vcpu_vmx *vmx = to_vmx(vcpu);
1401 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1405 loaded_vmcs_clear(vmx->loaded_vmcs);
1416 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1422 if (prev != vmx->loaded_vmcs->vmcs) {
1423 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1424 vmcs_load(vmx->loaded_vmcs->vmcs);
1450 vmx->loaded_vmcs->cpu = cpu;
1482 struct vcpu_vmx *vmx = to_vmx(vcpu);
1488 if (vmx->rmode.vm86_active) {
1490 save_rflags = vmx->rmode.save_rflags;
1493 vmx->rflags = rflags;
1495 return vmx->rflags;
1500 struct vcpu_vmx *vmx = to_vmx(vcpu);
1510 vmx->rflags = rflags;
1516 vmx->rflags = rflags;
1517 if (vmx->rmode.vm86_active) {
1518 vmx->rmode.save_rflags = rflags;
1523 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1524 vmx->vt.emulation_required = vmx_emulation_required(vcpu);
1563 struct vcpu_vmx *vmx = to_vmx(vcpu);
1570 if (data & vmx->pt_desc.ctl_bitmask)
1577 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1579 data != vmx->pt_desc.guest.ctl)
1589 !intel_pt_validate_cap(vmx->pt_desc.caps,
1597 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1598 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1602 value = intel_pt_validate_cap(vmx->pt_desc.caps,
1604 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1608 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1609 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1619 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1622 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1625 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1628 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1728 struct vcpu_vmx *vmx = to_vmx(vcpu);
1749 vmx->nested.mtf_pending = true;
1752 vmx->nested.mtf_pending = false;
1779 struct vcpu_vmx *vmx = to_vmx(vcpu);
1798 if (vmx->rmode.vm86_active) {
1806 WARN_ON_ONCE(vmx->vt.emulation_required);
1810 vmx->vcpu.arch.event_exit_inst_len);
1820 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1825 uret_msr = vmx_find_uret_msr(vmx, msr);
1838 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1847 load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1848 (vmx->vcpu.arch.efer & EFER_SCE);
1850 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1851 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1852 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1854 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1856 vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1857 guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1858 guest_cpu_cap_has(&vmx->vcpu, X86_FEATURE_RDPID));
1866 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1872 vmx->guest_uret_msrs_loaded = false;
1920 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1929 WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1933 (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1939 valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1963 struct vcpu_vmx *vmx = to_vmx(vcpu);
1976 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
1987 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
1990 msr_info->data = vmx->msr_ia32_umwait_control;
2017 !(vmx->msr_ia32_feature_control &
2023 msr_info->data = vmx->msr_ia32_feature_control;
2035 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2054 msr_info->data = vmx->pt_desc.guest.ctl;
2059 msr_info->data = vmx->pt_desc.guest.status;
2063 !intel_pt_validate_cap(vmx->pt_desc.caps,
2066 msr_info->data = vmx->pt_desc.guest.cr3_match;
2070 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2072 !intel_pt_validate_cap(vmx->pt_desc.caps,
2075 msr_info->data = vmx->pt_desc.guest.output_base;
2079 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2081 !intel_pt_validate_cap(vmx->pt_desc.caps,
2084 msr_info->data = vmx->pt_desc.guest.output_mask;
2089 (index >= 2 * vmx->pt_desc.num_address_ranges))
2092 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2094 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2101 msr = vmx_find_uret_msr(vmx, msr_info->index);
2160 struct vcpu_vmx *vmx = to_vmx(vcpu);
2173 vmx_segment_cache_clear(vmx);
2177 vmx_segment_cache_clear(vmx);
2181 vmx_write_guest_kernel_gs_base(vmx, data);
2247 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2248 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2254 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2261 vmx->msr_ia32_umwait_control = data;
2271 vmx->spec_ctrl = data;
2319 if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2322 vmx->msr_ia32_feature_control = data;
2343 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2344 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2346 vmx->msr_ia32_sgxlepubkeyhash
2358 vmx->nested.vmxon)
2361 vmx->pt_desc.guest.ctl = data;
2365 if (!pt_can_write_msr(vmx))
2369 vmx->pt_desc.guest.status = data;
2372 if (!pt_can_write_msr(vmx))
2374 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2377 vmx->pt_desc.guest.cr3_match = data;
2380 if (!pt_can_write_msr(vmx))
2382 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2384 !intel_pt_validate_cap(vmx->pt_desc.caps,
2389 vmx->pt_desc.guest.output_base = data;
2392 if (!pt_can_write_msr(vmx))
2394 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2396 !intel_pt_validate_cap(vmx->pt_desc.caps,
2399 vmx->pt_desc.guest.output_mask = data;
2402 if (!pt_can_write_msr(vmx))
2405 if (index >= 2 * vmx->pt_desc.num_address_ranges)
2410 vmx->pt_desc.guest.addr_b[index / 2] = data;
2412 vmx->pt_desc.guest.addr_a[index / 2] = data;
2438 msr = vmx_find_uret_msr(vmx, msr_index);
2440 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2447 vmx_update_fb_clear_dis(vcpu, vmx);
2997 struct vcpu_vmx *vmx = to_vmx(vcpu);
3003 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3004 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3005 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3006 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3007 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3008 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3010 vmx->rmode.vm86_active = 0;
3012 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3016 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3024 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3025 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3026 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3027 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3028 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3029 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3067 struct vcpu_vmx *vmx = to_vmx(vcpu);
3079 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3080 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3081 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3082 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3083 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3084 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3085 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3087 vmx->rmode.vm86_active = 1;
3089 vmx_segment_cache_clear(vmx);
3096 vmx->rmode.save_rflags = flags;
3104 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3105 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3106 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3107 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3108 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3109 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3114 struct vcpu_vmx *vmx = to_vmx(vcpu);
3117 if (!vmx_find_uret_msr(vmx, MSR_EFER))
3123 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3125 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3131 vmx_setup_uret_msrs(vmx);
3163 struct vcpu_vmx *vmx = to_vmx(vcpu);
3178 vpid_sync_vcpu_single(vmx->vpid);
3179 vpid_sync_vcpu_single(vmx->nested.vpid02);
3274 struct vcpu_vmx *vmx = to_vmx(vcpu);
3288 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3291 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3335 exec_controls_setbit(vmx, CR3_EXITING_BITS);
3337 exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3339 tmp = exec_controls_get(vmx);
3342 exec_controls_set(vmx, tmp);
3351 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3358 vmx->vt.emulation_required = vmx_emulation_required(vcpu);
3430 struct vcpu_vmx *vmx = to_vmx(vcpu);
3441 else if (vmx->rmode.vm86_active)
3448 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3452 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3493 struct vcpu_vmx *vmx = to_vmx(vcpu);
3496 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3497 *var = vmx->rmode.segs[seg];
3499 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3501 var->base = vmx_read_guest_seg_base(vmx, seg);
3502 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3505 var->base = vmx_read_guest_seg_base(vmx, seg);
3506 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3507 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3508 ar = vmx_read_guest_seg_ar(vmx, seg);
3540 struct vcpu_vmx *vmx = to_vmx(vcpu);
3543 if (unlikely(vmx->rmode.vm86_active))
3549 ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3582 struct vcpu_vmx *vmx = to_vmx(vcpu);
3585 vmx_segment_cache_clear(vmx);
3587 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3588 vmx->rmode.segs[seg] = *var;
3592 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3952 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3960 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3967 vmx->nested.force_msr_bitmap_recalc = true;
3972 struct vcpu_vmx *vmx = to_vmx(vcpu);
3973 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3978 vmx_msr_bitmap_l01_changed(vmx);
4004 struct vcpu_vmx *vmx = to_vmx(vcpu);
4005 u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4012 (secondary_exec_controls_get(vmx) &
4021 if (mode == vmx->x2apic_msr_bitmap_mode)
4024 vmx->x2apic_msr_bitmap_mode = mode;
4057 struct vcpu_vmx *vmx = to_vmx(vcpu);
4058 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4065 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4127 struct vcpu_vmx *vmx = to_vmx(vcpu);
4137 vector == vmx->nested.posted_intr_nv) {
4142 vmx->nested.pi_pending = true;
4208 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4224 vmx->loaded_vmcs->host_state.cr3 = cr3;
4229 vmx->loaded_vmcs->host_state.cr4 = cr4;
4275 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4277 struct kvm_vcpu *vcpu = &vmx->vcpu;
4285 if (is_guest_mode(&vmx->vcpu))
4291 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4295 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4345 struct vcpu_vmx *vmx = to_vmx(vcpu);
4348 vmx->nested.update_vmcs01_apicv_status = true;
4352 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4355 secondary_exec_controls_setbit(vmx,
4359 tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4361 secondary_exec_controls_clearbit(vmx,
4365 tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4371 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4388 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4391 if (!cpu_need_tpr_shadow(&vmx->vcpu))
4407 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4410 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4415 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4423 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4435 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4454 kvm_check_has_quirk(vmx->vcpu.kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
4463 vmx->nested.msrs.secondary_ctls_high |= control;
4465 vmx->nested.msrs.secondary_ctls_high &= ~control;
4474 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4476 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4481 vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4487 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4488 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4490 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4491 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4493 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4495 struct kvm_vcpu *vcpu = &vmx->vcpu;
4503 if (vmx->vpid == 0)
4512 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4544 vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4559 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4564 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4566 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4567 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4569 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4613 static void init_vmcs(struct vcpu_vmx *vmx)
4615 struct kvm *kvm = vmx->vcpu.kvm;
4622 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4627 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4629 exec_controls_set(vmx, vmx_exec_control(vmx));
4632 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4633 if (vmx->ve_info)
4635 __pa(vmx->ve_info));
4639 tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4641 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4650 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->vt.pi_desc)));
4653 if (vmx_can_use_ipiv(&vmx->vcpu)) {
4660 vmx->ple_window = ple_window;
4661 vmx->ple_window_dirty = true;
4673 vmx_set_constant_host_state(vmx);
4682 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4684 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4687 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4689 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4692 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4694 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4695 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4697 set_cr4_guest_host_mask(vmx);
4699 if (vmx->vpid != 0)
4700 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4706 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4710 vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4713 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4715 vmx->pt_desc.guest.output_mask = 0x7F;
4723 vmx_guest_debugctl_write(&vmx->vcpu, 0);
4727 if (cpu_need_tpr_shadow(&vmx->vcpu))
4729 __pa(vmx->vcpu.arch.apic->regs));
4733 vmx_setup_uret_msrs(vmx);
4738 struct vcpu_vmx *vmx = to_vmx(vcpu);
4740 init_vmcs(vmx);
4744 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4748 vmx->nested.posted_intr_nv = -1;
4749 vmx->nested.vmxon_ptr = INVALID_GPA;
4750 vmx->nested.current_vmptr = INVALID_GPA;
4753 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4758 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4764 vmx->vt.pi_desc.nv = POSTED_INTR_VECTOR;
4765 __pi_set_sn(&vmx->vt.pi_desc);
4770 struct vcpu_vmx *vmx = to_vmx(vcpu);
4775 vmx->rmode.vm86_active = 0;
4776 vmx->spec_ctrl = 0;
4778 vmx->msr_ia32_umwait_control = 0;
4780 vmx->hv_deadline_tsc = -1;
4809 vmx_segment_cache_clear(vmx);
4822 vpid_sync_context(vmx->vpid);
4824 vmx_update_fb_clear_dis(vcpu, vmx);
4845 struct vcpu_vmx *vmx = to_vmx(vcpu);
4852 if (vmx->rmode.vm86_active) {
4863 vmx->vcpu.arch.event_exit_inst_len);
4873 struct vcpu_vmx *vmx = to_vmx(vcpu);
4884 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4885 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4889 vmx->loaded_vmcs->nmi_known_unmasked = false;
4891 if (vmx->rmode.vm86_active) {
4904 struct vcpu_vmx *vmx = to_vmx(vcpu);
4908 return vmx->loaded_vmcs->soft_vnmi_blocked;
4909 if (vmx->loaded_vmcs->nmi_known_unmasked)
4912 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
4918 struct vcpu_vmx *vmx = to_vmx(vcpu);
4921 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
4922 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
4923 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4926 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5107 struct vcpu_vmx *vmx = to_vmx(vcpu);
5113 vect_info = vmx->idt_vectoring_info;
5140 struct vmx_ve_information *ve_info = vmx->ve_info;
5153 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5201 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5251 vmx->vcpu.arch.event_exit_inst_len =
5619 struct vcpu_vmx *vmx = to_vmx(vcpu);
5626 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5627 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5628 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5644 if (vmx->idt_vectoring_info &
5747 struct vcpu_vmx *vmx = to_vmx(vcpu);
5749 if (!vmx->vt.emulation_required)
5760 if (vmx->nested.nested_run_pending)
5768 return !vmx->rmode.vm86_active &&
5774 struct vcpu_vmx *vmx = to_vmx(vcpu);
5778 intr_window_requested = exec_controls_get(vmx) &
5781 while (vmx->vt.emulation_required && count-- != 0) {
5783 return handle_interrupt_window(&vmx->vcpu);
5906 struct vcpu_vmx *vmx = to_vmx(vcpu);
5913 if (unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
6072 struct vcpu_vmx *vmx = to_vmx(vcpu);
6074 *reason = vmx->vt.exit_reason.full;
6076 if (!(vmx->vt.exit_reason.failed_vmentry)) {
6077 *info2 = vmx->idt_vectoring_info;
6099 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6101 if (vmx->pml_pg) {
6102 __free_page(vmx->pml_pg);
6103 vmx->pml_pg = NULL;
6109 struct vcpu_vmx *vmx = to_vmx(vcpu);
6132 pml_buf = page_address(vmx->pml_pg);
6174 struct vcpu_vmx *vmx = to_vmx(vcpu);
6203 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6234 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6239 vmx->msr_autoload.guest.val[efer_slot].value);
6264 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6266 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6297 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6346 struct vmx_ve_information *ve_info = vmx->ve_info;
6371 struct vcpu_vmx *vmx = to_vmx(vcpu);
6373 u32 vectoring_info = vmx->idt_vectoring_info;
6393 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6428 if (vmx->vt.emulation_required) {
6438 if (vmx->vt.emulation_required)
6450 if (unlikely(vmx->fail)) {
6472 vmx->loaded_vmcs->soft_vnmi_blocked)) {
6474 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6475 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6486 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6518 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6637 struct vcpu_vmx *vmx = to_vmx(vcpu);
6649 vmx->nested.change_vmcs01_virtual_apic_mode = true;
6653 sec_exec_control = secondary_exec_controls_get(vmx);
6684 secondary_exec_controls_set(vmx, sec_exec_control);
6963 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6970 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6973 if (vmx->loaded_vmcs->nmi_known_unmasked)
6976 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
6994 vmx->loaded_vmcs->nmi_known_unmasked =
6997 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
6998 vmx->loaded_vmcs->vnmi_blocked_time +=
7000 vmx->loaded_vmcs->entry_time));
7061 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7063 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7078 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7082 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7095 clear_atomic_switch_msr(vmx, msrs[i].msr);
7097 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7103 struct vcpu_vmx *vmx = to_vmx(vcpu);
7109 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7110 } else if (vmx->hv_deadline_tsc != -1) {
7112 if (vmx->hv_deadline_tsc > tscl)
7114 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7120 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7121 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7123 vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7127 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7129 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7130 vmx->loaded_vmcs->host_state.rsp = host_rsp;
7135 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7144 vmx->spec_ctrl = native_rdmsrq(MSR_IA32_SPEC_CTRL);
7154 vmx->spec_ctrl != hostval)
7200 struct vcpu_vmx *vmx = to_vmx(vcpu);
7220 vmx_disable_fb_clear(vmx);
7225 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7231 vmx->idt_vectoring_info = 0;
7233 vmx_enable_fb_clear(vmx);
7235 if (unlikely(vmx->fail)) {
7236 vmx->vt.exit_reason.full = 0xdead;
7240 vmx->vt.exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7242 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7253 struct vcpu_vmx *vmx = to_vmx(vcpu);
7258 vmx->loaded_vmcs->soft_vnmi_blocked))
7259 vmx->loaded_vmcs->entry_time = ktime_get();
7266 if (unlikely(vmx->vt.emulation_required)) {
7267 vmx->fail = 0;
7269 vmx->vt.exit_reason.full = EXIT_REASON_INVALID_STATE;
7270 vmx->vt.exit_reason.failed_vmentry = 1;
7272 vmx->vt.exit_qualification = ENTRY_FAIL_DEFAULT;
7274 vmx->vt.exit_intr_info = 0;
7280 if (vmx->ple_window_dirty) {
7281 vmx->ple_window_dirty = false;
7282 vmcs_write32(PLE_WINDOW, vmx->ple_window);
7289 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7311 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7313 vmx->loaded_vmcs->host_state.cr3 = cr3;
7317 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7319 vmx->loaded_vmcs->host_state.cr4 = cr4;
7332 pt_guest_enter(vmx);
7334 atomic_switch_perf_msrs(vmx);
7346 vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7373 pt_guest_exit(vmx);
7382 if (vmx->nested.nested_run_pending &&
7386 vmx->nested.nested_run_pending = 0;
7389 if (unlikely(vmx->fail))
7400 vmx->loaded_vmcs->launched = 1;
7402 vmx_recover_nmi_blocking(vmx);
7403 vmx_complete_interrupts(vmx);
7410 struct vcpu_vmx *vmx = to_vmx(vcpu);
7413 vmx_destroy_pml_buffer(vmx);
7414 free_vpid(vmx->vpid);
7416 free_loaded_vmcs(vmx->loaded_vmcs);
7417 free_page((unsigned long)vmx->ve_info);
7423 struct vcpu_vmx *vmx;
7427 vmx = to_vmx(vcpu);
7429 INIT_LIST_HEAD(&vmx->vt.pi_wakeup_list);
7433 vmx->vpid = allocate_vpid();
7442 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7443 if (!vmx->pml_pg)
7448 vmx->guest_uret_msrs[i].mask = -1ull;
7455 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7460 err = alloc_loaded_vmcs(&vmx->vmcs01);
7472 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7477 vmx->loaded_vmcs = &vmx->vmcs01;
7495 BUILD_BUG_ON(sizeof(*vmx->ve_info) > PAGE_SIZE);
7502 vmx->ve_info = page_to_virt(page);
7507 __pa(&vmx->vt.pi_desc) | PID_TABLE_ENTRY_VALID);
7512 free_loaded_vmcs(vmx->loaded_vmcs);
7514 vmx_destroy_pml_buffer(vmx);
7516 free_vpid(vmx->vpid);
7585 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7599 u32 cur_ctl = secondary_exec_controls_get(vmx);
7601 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7610 struct vcpu_vmx *vmx = to_vmx(vcpu);
7613 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7614 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7618 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7653 struct vcpu_vmx *vmx = to_vmx(vcpu);
7661 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7662 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7663 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7664 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7668 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7672 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7680 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7681 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7687 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7688 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7694 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7695 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7699 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7700 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7704 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7705 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7708 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7709 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7712 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7713 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7716 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7717 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7722 struct vcpu_vmx *vmx = to_vmx(vcpu);
7732 vmx_setup_uret_msrs(vmx);
7735 vmcs_set_secondary_exec_control(vmx,
7736 vmx_secondary_exec_control(vmx));
7739 vmx->msr_ia32_feature_control_valid_bits |=
7743 vmx->msr_ia32_feature_control_valid_bits &=
7756 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7759 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7763 set_cr4_guest_host_mask(vmx);
7767 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7769 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7772 vmx->msr_ia32_feature_control_valid_bits |=
7775 vmx->msr_ia32_feature_control_valid_bits &=
8053 struct vcpu_vmx *vmx;
8057 vmx = to_vmx(vcpu);
8085 vmx->hv_deadline_tsc = tscl + delta_tsc;
8098 struct vcpu_vmx *vmx = to_vmx(vcpu);
8104 vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8114 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8116 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8140 struct vcpu_vmx *vmx = to_vmx(vcpu);
8149 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8150 if (vmx->nested.smm.guest_mode)
8153 vmx->nested.smm.vmxon = vmx->nested.vmxon;
8154 vmx->nested.vmxon = false;
8161 struct vcpu_vmx *vmx = to_vmx(vcpu);
8164 if (vmx->nested.smm.vmxon) {
8165 vmx->nested.vmxon = true;
8166 vmx->nested.smm.vmxon = false;
8169 if (vmx->nested.smm.guest_mode) {
8174 vmx->nested.nested_run_pending = 1;
8175 vmx->nested.smm.guest_mode = false;