Lines Matching +full:never +full:- +full:post +full:- +full:merge +full:- +full:rules

1 // SPDX-License-Identifier: GPL-2.0
31 * Hyper-V requires all of these, so mark them as supported even though
32 * they are just treated the same as all-context.
177 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; in nested_vmx_failValid()
184 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_failValid()
197 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail()
225 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
234 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map); in nested_release_evmcs()
235 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
236 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in nested_release_evmcs()
239 hv_vcpu->nested.pa_page_gpa = INVALID_GPA; in nested_release_evmcs()
240 hv_vcpu->nested.vm_id = 0; in nested_release_evmcs()
241 hv_vcpu->nested.vp_id = 0; in nested_release_evmcs()
254 * writing to the non-existent 'launch_state' field. The area doesn't in nested_evmcs_handle_vmclear()
256 * nothing KVM has to do to transition it from 'active' to 'non-active' in nested_evmcs_handle_vmclear()
258 * vmx->nested.hv_evmcs but this shouldn't be a problem. in nested_evmcs_handle_vmclear()
264 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr) in nested_evmcs_handle_vmclear()
278 if (unlikely(!vmx->guest_state_loaded)) in vmx_sync_vmcs_host_state()
281 src = &prev->host_state; in vmx_sync_vmcs_host_state()
282 dest = &vmx->loaded_vmcs->host_state; in vmx_sync_vmcs_host_state()
284 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); in vmx_sync_vmcs_host_state()
285 dest->ldt_sel = src->ldt_sel; in vmx_sync_vmcs_host_state()
287 dest->ds_sel = src->ds_sel; in vmx_sync_vmcs_host_state()
288 dest->es_sel = src->es_sel; in vmx_sync_vmcs_host_state()
298 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) in vmx_switch_vmcs()
302 prev = vmx->loaded_vmcs; in vmx_switch_vmcs()
303 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
308 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; in vmx_switch_vmcs()
314 vcpu->arch.regs_dirty = 0; in vmx_switch_vmcs()
321 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map); in nested_put_vmcs12_pages()
322 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map); in nested_put_vmcs12_pages()
323 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map); in nested_put_vmcs12_pages()
324 vmx->nested.pi_desc = NULL; in nested_put_vmcs12_pages()
328 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
335 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) in free_nested()
336 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in free_nested()
338 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
343 vmx->nested.vmxon = false; in free_nested()
344 vmx->nested.smm.vmxon = false; in free_nested()
345 vmx->nested.vmxon_ptr = INVALID_GPA; in free_nested()
346 free_vpid(vmx->nested.vpid02); in free_nested()
347 vmx->nested.posted_intr_nv = -1; in free_nested()
348 vmx->nested.current_vmptr = INVALID_GPA; in free_nested()
351 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
352 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
353 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
355 kfree(vmx->nested.cached_vmcs12); in free_nested()
356 vmx->nested.cached_vmcs12 = NULL; in free_nested()
357 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
358 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
362 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in free_nested()
366 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
398 cached_root = &vcpu->arch.mmu->prev_roots[i]; in nested_ept_invalidate_addr()
400 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, in nested_ept_invalidate_addr()
405 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); in nested_ept_invalidate_addr()
416 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
418 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
421 * It should be impossible to trigger a nested PML Full VM-Exit in nested_ept_inject_page_fault()
426 * VM-Exits as writes. in nested_ept_inject_page_fault()
428 WARN_ON_ONCE(vmx->exit_reason.basic != EXIT_REASON_EPT_VIOLATION); in nested_ept_inject_page_fault()
431 * PML Full and EPT Violation VM-Exits both use bit 12 to report in nested_ept_inject_page_fault()
433 * as-is from the original EXIT_QUALIFICATION. in nested_ept_inject_page_fault()
437 if (fault->error_code & PFERR_RSVD_MASK) { in nested_ept_inject_page_fault()
441 exit_qualification = fault->exit_qualification; in nested_ept_inject_page_fault()
455 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer, in nested_ept_inject_page_fault()
456 fault->address); in nested_ept_inject_page_fault()
460 vmcs12->guest_physical_address = fault->address; in nested_ept_inject_page_fault()
466 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; in nested_ept_new_eptp()
467 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); in nested_ept_new_eptp()
478 vcpu->arch.mmu = &vcpu->arch.guest_mmu; in nested_ept_init_mmu_context()
480 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; in nested_ept_init_mmu_context()
481 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
482 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; in nested_ept_init_mmu_context()
484 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
489 vcpu->arch.mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
490 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
498 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
500 (error_code & vmcs12->page_fault_error_code_mask) != in nested_vmx_is_page_fault_vmexit()
501 vmcs12->page_fault_error_code_match; in nested_vmx_is_page_fault_vmexit()
512 * check. All VMCS fields involved are 32 bits, but Intel CPUs never in nested_vmx_is_exception_vmexit()
514 * error code. Including the to-be-dropped bits in the check might in nested_vmx_is_exception_vmexit()
520 return (vmcs12->exception_bitmap & (1u << vector)); in nested_vmx_is_exception_vmexit()
529 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || in nested_vmx_check_io_bitmap_controls()
530 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) in nested_vmx_check_io_bitmap_controls()
531 return -EINVAL; in nested_vmx_check_io_bitmap_controls()
542 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) in nested_vmx_check_msr_bitmap_controls()
543 return -EINVAL; in nested_vmx_check_msr_bitmap_controls()
554 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) in nested_vmx_check_tpr_shadow_controls()
555 return -EINVAL; in nested_vmx_check_tpr_shadow_controls()
594 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
617 * Merge L0's and L1's MSR bitmap, return false to indicate that
626 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
636 * - MSR bitmap for L1 hasn't changed. in nested_vmx_prepare_msr_bitmap()
637 * - Nested hypervisor (L1) is attempting to launch the same L2 as in nested_vmx_prepare_msr_bitmap()
639 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature in nested_vmx_prepare_msr_bitmap()
642 if (!vmx->nested.force_msr_bitmap_recalc) { in nested_vmx_prepare_msr_bitmap()
645 if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap && in nested_vmx_prepare_msr_bitmap()
646 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) in nested_vmx_prepare_msr_bitmap()
650 if (kvm_vcpu_map_readonly(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), &map)) in nested_vmx_prepare_msr_bitmap()
656 * To keep the control flow simple, pay eight 8-byte writes (sixteen in nested_vmx_prepare_msr_bitmap()
657 * 4-byte writes on 32-bit systems) up front to enable intercepts for in nested_vmx_prepare_msr_bitmap()
667 * from the virtual-APIC page; take those 256 bits in nested_vmx_prepare_msr_bitmap()
696 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. in nested_vmx_prepare_msr_bitmap()
719 vmx->nested.force_msr_bitmap_recalc = false; in nested_vmx_prepare_msr_bitmap()
728 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_cache_shadow_vmcs12()
731 vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_cache_shadow_vmcs12()
734 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_cache_shadow_vmcs12()
735 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_cache_shadow_vmcs12()
736 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) in nested_cache_shadow_vmcs12()
739 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_cache_shadow_vmcs12()
747 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_flush_cached_shadow_vmcs12()
750 vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_flush_cached_shadow_vmcs12()
753 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_flush_cached_shadow_vmcs12()
754 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_flush_cached_shadow_vmcs12()
755 vmcs12->vmcs_link_pointer, VMCS12_SIZE)) in nested_flush_cached_shadow_vmcs12()
758 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_flush_cached_shadow_vmcs12()
768 return get_vmcs12(vcpu)->vm_exit_controls & in nested_exit_intr_ack_set()
776 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) in nested_vmx_check_apic_access_controls()
777 return -EINVAL; in nested_vmx_check_apic_access_controls()
797 return -EINVAL; in nested_vmx_check_apicv_controls()
804 return -EINVAL; in nested_vmx_check_apicv_controls()
816 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
817 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64)))) in nested_vmx_check_apicv_controls()
818 return -EINVAL; in nested_vmx_check_apicv_controls()
822 return -EINVAL; in nested_vmx_check_apicv_controls()
834 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) in nested_vmx_check_msr_switch()
835 return -EINVAL; in nested_vmx_check_msr_switch()
844 vmcs12->vm_exit_msr_load_count, in nested_vmx_check_exit_msr_switch_controls()
845 vmcs12->vm_exit_msr_load_addr)) || in nested_vmx_check_exit_msr_switch_controls()
847 vmcs12->vm_exit_msr_store_count, in nested_vmx_check_exit_msr_switch_controls()
848 vmcs12->vm_exit_msr_store_addr))) in nested_vmx_check_exit_msr_switch_controls()
849 return -EINVAL; in nested_vmx_check_exit_msr_switch_controls()
858 vmcs12->vm_entry_msr_load_count, in nested_vmx_check_entry_msr_switch_controls()
859 vmcs12->vm_entry_msr_load_addr))) in nested_vmx_check_entry_msr_switch_controls()
860 return -EINVAL; in nested_vmx_check_entry_msr_switch_controls()
872 CC(!page_address_valid(vcpu, vmcs12->pml_address))) in nested_vmx_check_pml_controls()
873 return -EINVAL; in nested_vmx_check_pml_controls()
883 return -EINVAL; in nested_vmx_check_unrestricted_guest_controls()
892 return -EINVAL; in nested_vmx_check_mode_based_ept_exec_controls()
902 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || in nested_vmx_check_shadow_vmcs_controls()
903 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) in nested_vmx_check_shadow_vmcs_controls()
904 return -EINVAL; in nested_vmx_check_shadow_vmcs_controls()
913 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
914 return -EINVAL; in nested_vmx_msr_check_common()
915 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ in nested_vmx_msr_check_common()
916 CC(e->index == MSR_IA32_UCODE_REV)) in nested_vmx_msr_check_common()
917 return -EINVAL; in nested_vmx_msr_check_common()
918 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
919 return -EINVAL; in nested_vmx_msr_check_common()
926 if (CC(e->index == MSR_FS_BASE) || in nested_vmx_load_msr_check()
927 CC(e->index == MSR_GS_BASE) || in nested_vmx_load_msr_check()
928 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ in nested_vmx_load_msr_check()
930 return -EINVAL; in nested_vmx_load_msr_check()
937 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ in nested_vmx_store_msr_check()
939 return -EINVAL; in nested_vmx_store_msr_check()
946 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
947 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
1005 * does not include the time taken for emulation of the L2->L1 in nested_vmx_get_vmexit_msr_value()
1006 * VM-exit in L0, use the more accurate value. in nested_vmx_get_vmexit_msr_value()
1009 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, in nested_vmx_get_vmexit_msr_value()
1013 u64 val = vmx->msr_autostore.guest.val[i].value; in nested_vmx_get_vmexit_msr_value()
1042 __func__, i, e->index, e->reserved); in read_and_check_msr_entry()
1057 return -EINVAL; in nested_vmx_store_msr()
1060 return -EINVAL; in nested_vmx_store_msr()
1063 return -EINVAL; in nested_vmx_store_msr()
1072 return -EINVAL; in nested_vmx_store_msr()
1081 u32 count = vmcs12->vm_exit_msr_store_count; in nested_msr_store_list_has_msr()
1082 u64 gpa = vmcs12->vm_exit_msr_store_addr; in nested_msr_store_list_has_msr()
1100 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; in prepare_vmx_msr_autostore_list()
1111 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) { in prepare_vmx_msr_autostore_list()
1117 * the vmcs02 VMExit MSR-store area. in prepare_vmx_msr_autostore_list()
1124 last = autostore->nr++; in prepare_vmx_msr_autostore_list()
1125 autostore->val[last].index = msr_index; in prepare_vmx_msr_autostore_list()
1127 last = --autostore->nr; in prepare_vmx_msr_autostore_list()
1128 autostore->val[msr_autostore_slot] = autostore->val[last]; in prepare_vmx_msr_autostore_list()
1134 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1135 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1144 return -EINVAL; in nested_vmx_load_cr3()
1154 return -EINVAL; in nested_vmx_load_cr3()
1157 vcpu->arch.cr3 = cr3; in nested_vmx_load_cr3()
1160 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ in nested_vmx_load_cr3()
1179 * with different VPID (L1 entries are tagged with vmx->vpid
1180 * while L2 entries are tagged with vmx->nested.vpid02).
1187 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); in nested_has_guest_tlb_tag()
1196 /* Handle pending Hyper-V TLB flush requests */ in nested_vmx_transition_tlb_flush()
1202 * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM in nested_vmx_transition_tlb_flush()
1210 * EPT is a special snowflake, as guest-physical mappings aren't in nested_vmx_transition_tlb_flush()
1211 * flushed on VPID invalidations, including VM-Enter or VM-Exit with in nested_vmx_transition_tlb_flush()
1213 * entries on VM-Enter because L1 can't rely on VM-Enter to flush in nested_vmx_transition_tlb_flush()
1221 /* L2 should never have a VPID if VPID is disabled. */ in nested_vmx_transition_tlb_flush()
1228 * that the new vpid12 has never been used and thus represents a new in nested_vmx_transition_tlb_flush()
1231 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1232 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1270 * Except for 32BIT_PHYS_ADDR_ONLY, which is an anti-feature bit (has in vmx_restore_vmx_basic()
1273 * multi-bit values, are explicitly checked below. in vmx_restore_vmx_basic()
1276 return -EINVAL; in vmx_restore_vmx_basic()
1280 * addresses of VMX structures (e.g. VMCS) to 32-bits. in vmx_restore_vmx_basic()
1283 return -EINVAL; in vmx_restore_vmx_basic()
1287 return -EINVAL; in vmx_restore_vmx_basic()
1290 return -EINVAL; in vmx_restore_vmx_basic()
1292 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1301 *low = &msrs->pinbased_ctls_low; in vmx_get_control_msr()
1302 *high = &msrs->pinbased_ctls_high; in vmx_get_control_msr()
1305 *low = &msrs->procbased_ctls_low; in vmx_get_control_msr()
1306 *high = &msrs->procbased_ctls_high; in vmx_get_control_msr()
1309 *low = &msrs->exit_ctls_low; in vmx_get_control_msr()
1310 *high = &msrs->exit_ctls_high; in vmx_get_control_msr()
1313 *low = &msrs->entry_ctls_low; in vmx_get_control_msr()
1314 *high = &msrs->entry_ctls_high; in vmx_get_control_msr()
1317 *low = &msrs->secondary_ctls_low; in vmx_get_control_msr()
1318 *high = &msrs->secondary_ctls_high; in vmx_get_control_msr()
1335 /* Check must-be-1 bits are still 1. */ in vmx_restore_control_msr()
1337 return -EINVAL; in vmx_restore_control_msr()
1339 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1341 return -EINVAL; in vmx_restore_control_msr()
1343 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1370 * aren't allowed/supported by KVM. Fields, i.e. multi-bit values, are in vmx_restore_vmx_misc()
1374 return -EINVAL; in vmx_restore_vmx_misc()
1376 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1380 return -EINVAL; in vmx_restore_vmx_misc()
1383 return -EINVAL; in vmx_restore_vmx_misc()
1386 return -EINVAL; in vmx_restore_vmx_misc()
1389 return -EINVAL; in vmx_restore_vmx_misc()
1391 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1392 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1403 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) in vmx_restore_vmx_ept_vpid_cap()
1404 return -EINVAL; in vmx_restore_vmx_ept_vpid_cap()
1406 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1407 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1415 return &msrs->cr0_fixed0; in vmx_get_fixed0_msr()
1417 return &msrs->cr4_fixed0; in vmx_get_fixed0_msr()
1428 * 1 bits (which indicates bits which "must-be-1" during VMX operation) in vmx_restore_fixed0_msr()
1431 if (!is_bitwise_subset(data, *msr, -1ULL)) in vmx_restore_fixed0_msr()
1432 return -EINVAL; in vmx_restore_fixed0_msr()
1434 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1441 * Returns 0 on success, non-0 otherwise.
1451 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1452 return -EBUSY; in vmx_set_vmx_msr()
1462 * The "non-true" VMX capability MSRs are generated from the in vmx_set_vmx_msr()
1466 * should restore the "true" MSRs with the must-be-1 bits in vmx_set_vmx_msr()
1470 return -EINVAL; in vmx_set_vmx_msr()
1488 return -EINVAL; in vmx_set_vmx_msr()
1492 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1496 return -EINVAL; in vmx_set_vmx_msr()
1497 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1503 return -EINVAL; in vmx_set_vmx_msr()
1507 /* Returns 0 on success, non-0 otherwise. */
1512 *pdata = msrs->basic; in vmx_get_vmx_msr()
1517 msrs->pinbased_ctls_low, in vmx_get_vmx_msr()
1518 msrs->pinbased_ctls_high); in vmx_get_vmx_msr()
1525 msrs->procbased_ctls_low, in vmx_get_vmx_msr()
1526 msrs->procbased_ctls_high); in vmx_get_vmx_msr()
1533 msrs->exit_ctls_low, in vmx_get_vmx_msr()
1534 msrs->exit_ctls_high); in vmx_get_vmx_msr()
1541 msrs->entry_ctls_low, in vmx_get_vmx_msr()
1542 msrs->entry_ctls_high); in vmx_get_vmx_msr()
1548 msrs->misc_low, in vmx_get_vmx_msr()
1549 msrs->misc_high); in vmx_get_vmx_msr()
1552 *pdata = msrs->cr0_fixed0; in vmx_get_vmx_msr()
1555 *pdata = msrs->cr0_fixed1; in vmx_get_vmx_msr()
1558 *pdata = msrs->cr4_fixed0; in vmx_get_vmx_msr()
1561 *pdata = msrs->cr4_fixed1; in vmx_get_vmx_msr()
1564 *pdata = msrs->vmcs_enum; in vmx_get_vmx_msr()
1568 msrs->secondary_ctls_low, in vmx_get_vmx_msr()
1569 msrs->secondary_ctls_high); in vmx_get_vmx_msr()
1572 *pdata = msrs->ept_caps | in vmx_get_vmx_msr()
1573 ((u64)msrs->vpid_caps << 32); in vmx_get_vmx_msr()
1576 *pdata = msrs->vmfunc_controls; in vmx_get_vmx_msr()
1589 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1590 * VM-exit information fields (which are actually writable if the vCPU is
1595 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
1596 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_shadow_to_vmcs12()
1615 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
1630 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
1631 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_vmcs12_to_shadow()
1651 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
1657 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1659 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); in copy_enlightened_to_vmcs12()
1662 vmcs12->tpr_threshold = evmcs->tpr_threshold; in copy_enlightened_to_vmcs12()
1663 vmcs12->guest_rip = evmcs->guest_rip; in copy_enlightened_to_vmcs12()
1667 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page; in copy_enlightened_to_vmcs12()
1668 hv_vcpu->nested.vm_id = evmcs->hv_vm_id; in copy_enlightened_to_vmcs12()
1669 hv_vcpu->nested.vp_id = evmcs->hv_vp_id; in copy_enlightened_to_vmcs12()
1674 vmcs12->guest_rsp = evmcs->guest_rsp; in copy_enlightened_to_vmcs12()
1675 vmcs12->guest_rflags = evmcs->guest_rflags; in copy_enlightened_to_vmcs12()
1676 vmcs12->guest_interruptibility_info = in copy_enlightened_to_vmcs12()
1677 evmcs->guest_interruptibility_info; in copy_enlightened_to_vmcs12()
1680 * vmcs12->guest_ssp = evmcs->guest_ssp; in copy_enlightened_to_vmcs12()
1686 vmcs12->cpu_based_vm_exec_control = in copy_enlightened_to_vmcs12()
1687 evmcs->cpu_based_vm_exec_control; in copy_enlightened_to_vmcs12()
1692 vmcs12->exception_bitmap = evmcs->exception_bitmap; in copy_enlightened_to_vmcs12()
1697 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; in copy_enlightened_to_vmcs12()
1702 vmcs12->vm_entry_intr_info_field = in copy_enlightened_to_vmcs12()
1703 evmcs->vm_entry_intr_info_field; in copy_enlightened_to_vmcs12()
1704 vmcs12->vm_entry_exception_error_code = in copy_enlightened_to_vmcs12()
1705 evmcs->vm_entry_exception_error_code; in copy_enlightened_to_vmcs12()
1706 vmcs12->vm_entry_instruction_len = in copy_enlightened_to_vmcs12()
1707 evmcs->vm_entry_instruction_len; in copy_enlightened_to_vmcs12()
1712 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; in copy_enlightened_to_vmcs12()
1713 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; in copy_enlightened_to_vmcs12()
1714 vmcs12->host_cr0 = evmcs->host_cr0; in copy_enlightened_to_vmcs12()
1715 vmcs12->host_cr3 = evmcs->host_cr3; in copy_enlightened_to_vmcs12()
1716 vmcs12->host_cr4 = evmcs->host_cr4; in copy_enlightened_to_vmcs12()
1717 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; in copy_enlightened_to_vmcs12()
1718 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; in copy_enlightened_to_vmcs12()
1719 vmcs12->host_rip = evmcs->host_rip; in copy_enlightened_to_vmcs12()
1720 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; in copy_enlightened_to_vmcs12()
1721 vmcs12->host_es_selector = evmcs->host_es_selector; in copy_enlightened_to_vmcs12()
1722 vmcs12->host_cs_selector = evmcs->host_cs_selector; in copy_enlightened_to_vmcs12()
1723 vmcs12->host_ss_selector = evmcs->host_ss_selector; in copy_enlightened_to_vmcs12()
1724 vmcs12->host_ds_selector = evmcs->host_ds_selector; in copy_enlightened_to_vmcs12()
1725 vmcs12->host_fs_selector = evmcs->host_fs_selector; in copy_enlightened_to_vmcs12()
1726 vmcs12->host_gs_selector = evmcs->host_gs_selector; in copy_enlightened_to_vmcs12()
1727 vmcs12->host_tr_selector = evmcs->host_tr_selector; in copy_enlightened_to_vmcs12()
1728 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl; in copy_enlightened_to_vmcs12()
1731 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet; in copy_enlightened_to_vmcs12()
1732 * vmcs12->host_ssp = evmcs->host_ssp; in copy_enlightened_to_vmcs12()
1733 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr; in copy_enlightened_to_vmcs12()
1739 vmcs12->pin_based_vm_exec_control = in copy_enlightened_to_vmcs12()
1740 evmcs->pin_based_vm_exec_control; in copy_enlightened_to_vmcs12()
1741 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; in copy_enlightened_to_vmcs12()
1742 vmcs12->secondary_vm_exec_control = in copy_enlightened_to_vmcs12()
1743 evmcs->secondary_vm_exec_control; in copy_enlightened_to_vmcs12()
1748 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; in copy_enlightened_to_vmcs12()
1749 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; in copy_enlightened_to_vmcs12()
1754 vmcs12->msr_bitmap = evmcs->msr_bitmap; in copy_enlightened_to_vmcs12()
1759 vmcs12->guest_es_base = evmcs->guest_es_base; in copy_enlightened_to_vmcs12()
1760 vmcs12->guest_cs_base = evmcs->guest_cs_base; in copy_enlightened_to_vmcs12()
1761 vmcs12->guest_ss_base = evmcs->guest_ss_base; in copy_enlightened_to_vmcs12()
1762 vmcs12->guest_ds_base = evmcs->guest_ds_base; in copy_enlightened_to_vmcs12()
1763 vmcs12->guest_fs_base = evmcs->guest_fs_base; in copy_enlightened_to_vmcs12()
1764 vmcs12->guest_gs_base = evmcs->guest_gs_base; in copy_enlightened_to_vmcs12()
1765 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; in copy_enlightened_to_vmcs12()
1766 vmcs12->guest_tr_base = evmcs->guest_tr_base; in copy_enlightened_to_vmcs12()
1767 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; in copy_enlightened_to_vmcs12()
1768 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; in copy_enlightened_to_vmcs12()
1769 vmcs12->guest_es_limit = evmcs->guest_es_limit; in copy_enlightened_to_vmcs12()
1770 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; in copy_enlightened_to_vmcs12()
1771 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; in copy_enlightened_to_vmcs12()
1772 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; in copy_enlightened_to_vmcs12()
1773 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; in copy_enlightened_to_vmcs12()
1774 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; in copy_enlightened_to_vmcs12()
1775 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; in copy_enlightened_to_vmcs12()
1776 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; in copy_enlightened_to_vmcs12()
1777 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; in copy_enlightened_to_vmcs12()
1778 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; in copy_enlightened_to_vmcs12()
1779 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; in copy_enlightened_to_vmcs12()
1780 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; in copy_enlightened_to_vmcs12()
1781 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; in copy_enlightened_to_vmcs12()
1782 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; in copy_enlightened_to_vmcs12()
1783 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; in copy_enlightened_to_vmcs12()
1784 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; in copy_enlightened_to_vmcs12()
1785 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; in copy_enlightened_to_vmcs12()
1786 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; in copy_enlightened_to_vmcs12()
1787 vmcs12->guest_es_selector = evmcs->guest_es_selector; in copy_enlightened_to_vmcs12()
1788 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; in copy_enlightened_to_vmcs12()
1789 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; in copy_enlightened_to_vmcs12()
1790 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; in copy_enlightened_to_vmcs12()
1791 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; in copy_enlightened_to_vmcs12()
1792 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; in copy_enlightened_to_vmcs12()
1793 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; in copy_enlightened_to_vmcs12()
1794 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; in copy_enlightened_to_vmcs12()
1799 vmcs12->tsc_offset = evmcs->tsc_offset; in copy_enlightened_to_vmcs12()
1800 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; in copy_enlightened_to_vmcs12()
1801 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; in copy_enlightened_to_vmcs12()
1802 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap; in copy_enlightened_to_vmcs12()
1803 vmcs12->tsc_multiplier = evmcs->tsc_multiplier; in copy_enlightened_to_vmcs12()
1808 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; in copy_enlightened_to_vmcs12()
1809 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; in copy_enlightened_to_vmcs12()
1810 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; in copy_enlightened_to_vmcs12()
1811 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; in copy_enlightened_to_vmcs12()
1812 vmcs12->guest_cr0 = evmcs->guest_cr0; in copy_enlightened_to_vmcs12()
1813 vmcs12->guest_cr3 = evmcs->guest_cr3; in copy_enlightened_to_vmcs12()
1814 vmcs12->guest_cr4 = evmcs->guest_cr4; in copy_enlightened_to_vmcs12()
1815 vmcs12->guest_dr7 = evmcs->guest_dr7; in copy_enlightened_to_vmcs12()
1820 vmcs12->host_fs_base = evmcs->host_fs_base; in copy_enlightened_to_vmcs12()
1821 vmcs12->host_gs_base = evmcs->host_gs_base; in copy_enlightened_to_vmcs12()
1822 vmcs12->host_tr_base = evmcs->host_tr_base; in copy_enlightened_to_vmcs12()
1823 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; in copy_enlightened_to_vmcs12()
1824 vmcs12->host_idtr_base = evmcs->host_idtr_base; in copy_enlightened_to_vmcs12()
1825 vmcs12->host_rsp = evmcs->host_rsp; in copy_enlightened_to_vmcs12()
1830 vmcs12->ept_pointer = evmcs->ept_pointer; in copy_enlightened_to_vmcs12()
1831 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; in copy_enlightened_to_vmcs12()
1836 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; in copy_enlightened_to_vmcs12()
1837 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; in copy_enlightened_to_vmcs12()
1838 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; in copy_enlightened_to_vmcs12()
1839 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; in copy_enlightened_to_vmcs12()
1840 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; in copy_enlightened_to_vmcs12()
1841 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; in copy_enlightened_to_vmcs12()
1842 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; in copy_enlightened_to_vmcs12()
1843 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; in copy_enlightened_to_vmcs12()
1844 vmcs12->guest_pending_dbg_exceptions = in copy_enlightened_to_vmcs12()
1845 evmcs->guest_pending_dbg_exceptions; in copy_enlightened_to_vmcs12()
1846 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; in copy_enlightened_to_vmcs12()
1847 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; in copy_enlightened_to_vmcs12()
1848 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; in copy_enlightened_to_vmcs12()
1849 vmcs12->guest_activity_state = evmcs->guest_activity_state; in copy_enlightened_to_vmcs12()
1850 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; in copy_enlightened_to_vmcs12()
1851 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl; in copy_enlightened_to_vmcs12()
1854 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet; in copy_enlightened_to_vmcs12()
1855 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl; in copy_enlightened_to_vmcs12()
1856 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr; in copy_enlightened_to_vmcs12()
1862 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; in copy_enlightened_to_vmcs12()
1863 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; in copy_enlightened_to_vmcs12()
1864 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; in copy_enlightened_to_vmcs12()
1865 * vmcs12->page_fault_error_code_mask = in copy_enlightened_to_vmcs12()
1866 * evmcs->page_fault_error_code_mask; in copy_enlightened_to_vmcs12()
1867 * vmcs12->page_fault_error_code_match = in copy_enlightened_to_vmcs12()
1868 * evmcs->page_fault_error_code_match; in copy_enlightened_to_vmcs12()
1869 * vmcs12->cr3_target_count = evmcs->cr3_target_count; in copy_enlightened_to_vmcs12()
1870 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; in copy_enlightened_to_vmcs12()
1871 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; in copy_enlightened_to_vmcs12()
1872 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; in copy_enlightened_to_vmcs12()
1877 * vmcs12->guest_physical_address = evmcs->guest_physical_address; in copy_enlightened_to_vmcs12()
1878 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; in copy_enlightened_to_vmcs12()
1879 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; in copy_enlightened_to_vmcs12()
1880 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; in copy_enlightened_to_vmcs12()
1881 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; in copy_enlightened_to_vmcs12()
1882 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; in copy_enlightened_to_vmcs12()
1883 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; in copy_enlightened_to_vmcs12()
1884 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; in copy_enlightened_to_vmcs12()
1885 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; in copy_enlightened_to_vmcs12()
1886 * vmcs12->exit_qualification = evmcs->exit_qualification; in copy_enlightened_to_vmcs12()
1887 * vmcs12->guest_linear_address = evmcs->guest_linear_address; in copy_enlightened_to_vmcs12()
1890 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; in copy_enlightened_to_vmcs12()
1891 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; in copy_enlightened_to_vmcs12()
1892 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; in copy_enlightened_to_vmcs12()
1893 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; in copy_enlightened_to_vmcs12()
1898 KVM_BUG_ON(1, vmx->vcpu.kvm); in copy_enlightened_to_vmcs12()
1905 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1911 * evmcs->host_es_selector = vmcs12->host_es_selector; in copy_vmcs12_to_enlightened()
1912 * evmcs->host_cs_selector = vmcs12->host_cs_selector; in copy_vmcs12_to_enlightened()
1913 * evmcs->host_ss_selector = vmcs12->host_ss_selector; in copy_vmcs12_to_enlightened()
1914 * evmcs->host_ds_selector = vmcs12->host_ds_selector; in copy_vmcs12_to_enlightened()
1915 * evmcs->host_fs_selector = vmcs12->host_fs_selector; in copy_vmcs12_to_enlightened()
1916 * evmcs->host_gs_selector = vmcs12->host_gs_selector; in copy_vmcs12_to_enlightened()
1917 * evmcs->host_tr_selector = vmcs12->host_tr_selector; in copy_vmcs12_to_enlightened()
1918 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; in copy_vmcs12_to_enlightened()
1919 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; in copy_vmcs12_to_enlightened()
1920 * evmcs->host_cr0 = vmcs12->host_cr0; in copy_vmcs12_to_enlightened()
1921 * evmcs->host_cr3 = vmcs12->host_cr3; in copy_vmcs12_to_enlightened()
1922 * evmcs->host_cr4 = vmcs12->host_cr4; in copy_vmcs12_to_enlightened()
1923 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; in copy_vmcs12_to_enlightened()
1924 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; in copy_vmcs12_to_enlightened()
1925 * evmcs->host_rip = vmcs12->host_rip; in copy_vmcs12_to_enlightened()
1926 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; in copy_vmcs12_to_enlightened()
1927 * evmcs->host_fs_base = vmcs12->host_fs_base; in copy_vmcs12_to_enlightened()
1928 * evmcs->host_gs_base = vmcs12->host_gs_base; in copy_vmcs12_to_enlightened()
1929 * evmcs->host_tr_base = vmcs12->host_tr_base; in copy_vmcs12_to_enlightened()
1930 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; in copy_vmcs12_to_enlightened()
1931 * evmcs->host_idtr_base = vmcs12->host_idtr_base; in copy_vmcs12_to_enlightened()
1932 * evmcs->host_rsp = vmcs12->host_rsp; in copy_vmcs12_to_enlightened()
1934 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; in copy_vmcs12_to_enlightened()
1935 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; in copy_vmcs12_to_enlightened()
1936 * evmcs->msr_bitmap = vmcs12->msr_bitmap; in copy_vmcs12_to_enlightened()
1937 * evmcs->ept_pointer = vmcs12->ept_pointer; in copy_vmcs12_to_enlightened()
1938 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; in copy_vmcs12_to_enlightened()
1939 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; in copy_vmcs12_to_enlightened()
1940 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; in copy_vmcs12_to_enlightened()
1941 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; in copy_vmcs12_to_enlightened()
1942 * evmcs->tpr_threshold = vmcs12->tpr_threshold; in copy_vmcs12_to_enlightened()
1943 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; in copy_vmcs12_to_enlightened()
1944 * evmcs->exception_bitmap = vmcs12->exception_bitmap; in copy_vmcs12_to_enlightened()
1945 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; in copy_vmcs12_to_enlightened()
1946 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; in copy_vmcs12_to_enlightened()
1947 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; in copy_vmcs12_to_enlightened()
1948 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; in copy_vmcs12_to_enlightened()
1949 * evmcs->page_fault_error_code_mask = in copy_vmcs12_to_enlightened()
1950 * vmcs12->page_fault_error_code_mask; in copy_vmcs12_to_enlightened()
1951 * evmcs->page_fault_error_code_match = in copy_vmcs12_to_enlightened()
1952 * vmcs12->page_fault_error_code_match; in copy_vmcs12_to_enlightened()
1953 * evmcs->cr3_target_count = vmcs12->cr3_target_count; in copy_vmcs12_to_enlightened()
1954 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; in copy_vmcs12_to_enlightened()
1955 * evmcs->tsc_offset = vmcs12->tsc_offset; in copy_vmcs12_to_enlightened()
1956 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; in copy_vmcs12_to_enlightened()
1957 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; in copy_vmcs12_to_enlightened()
1958 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; in copy_vmcs12_to_enlightened()
1959 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; in copy_vmcs12_to_enlightened()
1960 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; in copy_vmcs12_to_enlightened()
1961 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; in copy_vmcs12_to_enlightened()
1962 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; in copy_vmcs12_to_enlightened()
1963 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; in copy_vmcs12_to_enlightened()
1964 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl; in copy_vmcs12_to_enlightened()
1965 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl; in copy_vmcs12_to_enlightened()
1966 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap; in copy_vmcs12_to_enlightened()
1967 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier; in copy_vmcs12_to_enlightened()
1970 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; in copy_vmcs12_to_enlightened()
1971 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; in copy_vmcs12_to_enlightened()
1972 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; in copy_vmcs12_to_enlightened()
1973 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; in copy_vmcs12_to_enlightened()
1974 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet; in copy_vmcs12_to_enlightened()
1975 * evmcs->host_ssp = vmcs12->host_ssp; in copy_vmcs12_to_enlightened()
1976 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr; in copy_vmcs12_to_enlightened()
1977 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet; in copy_vmcs12_to_enlightened()
1978 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl; in copy_vmcs12_to_enlightened()
1979 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr; in copy_vmcs12_to_enlightened()
1980 * evmcs->guest_ssp = vmcs12->guest_ssp; in copy_vmcs12_to_enlightened()
1983 evmcs->guest_es_selector = vmcs12->guest_es_selector; in copy_vmcs12_to_enlightened()
1984 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; in copy_vmcs12_to_enlightened()
1985 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; in copy_vmcs12_to_enlightened()
1986 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; in copy_vmcs12_to_enlightened()
1987 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; in copy_vmcs12_to_enlightened()
1988 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; in copy_vmcs12_to_enlightened()
1989 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; in copy_vmcs12_to_enlightened()
1990 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; in copy_vmcs12_to_enlightened()
1992 evmcs->guest_es_limit = vmcs12->guest_es_limit; in copy_vmcs12_to_enlightened()
1993 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; in copy_vmcs12_to_enlightened()
1994 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; in copy_vmcs12_to_enlightened()
1995 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; in copy_vmcs12_to_enlightened()
1996 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; in copy_vmcs12_to_enlightened()
1997 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; in copy_vmcs12_to_enlightened()
1998 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; in copy_vmcs12_to_enlightened()
1999 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; in copy_vmcs12_to_enlightened()
2000 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; in copy_vmcs12_to_enlightened()
2001 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; in copy_vmcs12_to_enlightened()
2003 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; in copy_vmcs12_to_enlightened()
2004 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; in copy_vmcs12_to_enlightened()
2005 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; in copy_vmcs12_to_enlightened()
2006 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; in copy_vmcs12_to_enlightened()
2007 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; in copy_vmcs12_to_enlightened()
2008 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; in copy_vmcs12_to_enlightened()
2009 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; in copy_vmcs12_to_enlightened()
2010 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; in copy_vmcs12_to_enlightened()
2012 evmcs->guest_es_base = vmcs12->guest_es_base; in copy_vmcs12_to_enlightened()
2013 evmcs->guest_cs_base = vmcs12->guest_cs_base; in copy_vmcs12_to_enlightened()
2014 evmcs->guest_ss_base = vmcs12->guest_ss_base; in copy_vmcs12_to_enlightened()
2015 evmcs->guest_ds_base = vmcs12->guest_ds_base; in copy_vmcs12_to_enlightened()
2016 evmcs->guest_fs_base = vmcs12->guest_fs_base; in copy_vmcs12_to_enlightened()
2017 evmcs->guest_gs_base = vmcs12->guest_gs_base; in copy_vmcs12_to_enlightened()
2018 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; in copy_vmcs12_to_enlightened()
2019 evmcs->guest_tr_base = vmcs12->guest_tr_base; in copy_vmcs12_to_enlightened()
2020 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; in copy_vmcs12_to_enlightened()
2021 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; in copy_vmcs12_to_enlightened()
2023 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; in copy_vmcs12_to_enlightened()
2024 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; in copy_vmcs12_to_enlightened()
2026 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; in copy_vmcs12_to_enlightened()
2027 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; in copy_vmcs12_to_enlightened()
2028 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; in copy_vmcs12_to_enlightened()
2029 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; in copy_vmcs12_to_enlightened()
2031 evmcs->guest_pending_dbg_exceptions = in copy_vmcs12_to_enlightened()
2032 vmcs12->guest_pending_dbg_exceptions; in copy_vmcs12_to_enlightened()
2033 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; in copy_vmcs12_to_enlightened()
2034 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; in copy_vmcs12_to_enlightened()
2036 evmcs->guest_activity_state = vmcs12->guest_activity_state; in copy_vmcs12_to_enlightened()
2037 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; in copy_vmcs12_to_enlightened()
2039 evmcs->guest_cr0 = vmcs12->guest_cr0; in copy_vmcs12_to_enlightened()
2040 evmcs->guest_cr3 = vmcs12->guest_cr3; in copy_vmcs12_to_enlightened()
2041 evmcs->guest_cr4 = vmcs12->guest_cr4; in copy_vmcs12_to_enlightened()
2042 evmcs->guest_dr7 = vmcs12->guest_dr7; in copy_vmcs12_to_enlightened()
2044 evmcs->guest_physical_address = vmcs12->guest_physical_address; in copy_vmcs12_to_enlightened()
2046 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; in copy_vmcs12_to_enlightened()
2047 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; in copy_vmcs12_to_enlightened()
2048 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; in copy_vmcs12_to_enlightened()
2049 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; in copy_vmcs12_to_enlightened()
2050 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; in copy_vmcs12_to_enlightened()
2051 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; in copy_vmcs12_to_enlightened()
2052 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; in copy_vmcs12_to_enlightened()
2053 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; in copy_vmcs12_to_enlightened()
2055 evmcs->exit_qualification = vmcs12->exit_qualification; in copy_vmcs12_to_enlightened()
2057 evmcs->guest_linear_address = vmcs12->guest_linear_address; in copy_vmcs12_to_enlightened()
2058 evmcs->guest_rsp = vmcs12->guest_rsp; in copy_vmcs12_to_enlightened()
2059 evmcs->guest_rflags = vmcs12->guest_rflags; in copy_vmcs12_to_enlightened()
2061 evmcs->guest_interruptibility_info = in copy_vmcs12_to_enlightened()
2062 vmcs12->guest_interruptibility_info; in copy_vmcs12_to_enlightened()
2063 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; in copy_vmcs12_to_enlightened()
2064 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; in copy_vmcs12_to_enlightened()
2065 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; in copy_vmcs12_to_enlightened()
2066 evmcs->vm_entry_exception_error_code = in copy_vmcs12_to_enlightened()
2067 vmcs12->vm_entry_exception_error_code; in copy_vmcs12_to_enlightened()
2068 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; in copy_vmcs12_to_enlightened()
2070 evmcs->guest_rip = vmcs12->guest_rip; in copy_vmcs12_to_enlightened()
2072 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; in copy_vmcs12_to_enlightened()
2076 KVM_BUG_ON(1, vmx->vcpu.kvm); in copy_vmcs12_to_enlightened()
2101 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2102 vmx->nested.current_vmptr = INVALID_GPA; in nested_vmx_handle_enlightened_vmptrld()
2107 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2110 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2123 * However, it turns out that Microsoft Hyper-V fails to comply in nested_vmx_handle_enlightened_vmptrld()
2124 * to their own invented interface: When Hyper-V use eVMCS, it in nested_vmx_handle_enlightened_vmptrld()
2130 * To overcome Hyper-V bug, we accept here either a supported in nested_vmx_handle_enlightened_vmptrld()
2134 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2135 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2140 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2152 vmcs12->hdr.revision_id = VMCS12_REVISION; in nested_vmx_handle_enlightened_vmptrld()
2162 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2165 vmx->nested.force_msr_bitmap_recalc = true; in nested_vmx_handle_enlightened_vmptrld()
2183 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2191 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2192 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
2193 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
2206 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2207 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2208 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2209 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2211 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2224 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2228 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2233 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
2234 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2241 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2242 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) in nested_vmx_calc_efer()
2243 return vmcs12->guest_ia32_efer; in nested_vmx_calc_efer()
2244 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) in nested_vmx_calc_efer()
2245 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2247 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2252 struct kvm *kvm = vmx->vcpu.kvm; in prepare_vmcs02_constant_state()
2260 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2262 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2271 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2273 if (vmx->ve_info) in prepare_vmcs02_constant_state()
2274 vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); in prepare_vmcs02_constant_state()
2284 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2287 * PML is emulated for L2, but never enabled in hardware as the MMU in prepare_vmcs02_constant_state()
2293 vmcs_write16(GUEST_PML_INDEX, -1); in prepare_vmcs02_constant_state()
2300 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window); in prepare_vmcs02_constant_state()
2307 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); in prepare_vmcs02_constant_state()
2308 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_constant_state()
2309 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_constant_state()
2324 * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter in prepare_vmcs02_early_rare()
2325 * and VM-Exit are architecturally required to flush VPID=0, but *only* in prepare_vmcs02_early_rare()
2327 * required flushes), but doing so would cause KVM to over-flush. E.g. in prepare_vmcs02_early_rare()
2333 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2334 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2336 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_early_rare()
2346 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) in prepare_vmcs02_early()
2353 exec_control |= (vmcs12->pin_based_vm_exec_control & in prepare_vmcs02_early()
2357 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2359 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2361 vmx->nested.posted_intr_nv = -1; in prepare_vmcs02_early()
2373 exec_control |= vmcs12->cpu_based_vm_exec_control; in prepare_vmcs02_early()
2375 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2377 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); in prepare_vmcs02_early()
2422 exec_control |= vmcs12->secondary_vm_exec_control; in prepare_vmcs02_early()
2424 /* PML is emulated and never enabled in hardware for L2. */ in prepare_vmcs02_early()
2434 if (vmx_umip_emulated() && (vmcs12->guest_cr4 & X86_CR4_UMIP)) in prepare_vmcs02_early()
2439 vmcs12->guest_intr_status); in prepare_vmcs02_early()
2445 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); in prepare_vmcs02_early()
2463 exec_control |= (vmcs12->vm_entry_controls & in prepare_vmcs02_early()
2477 * L2->L1 exit controls are emulated - the hardware exit is to L0 so in prepare_vmcs02_early()
2491 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2493 vmcs12->vm_entry_intr_info_field); in prepare_vmcs02_early()
2495 vmcs12->vm_entry_exception_error_code); in prepare_vmcs02_early()
2497 vmcs12->vm_entry_instruction_len); in prepare_vmcs02_early()
2499 vmcs12->guest_interruptibility_info); in prepare_vmcs02_early()
2500 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02_early()
2501 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); in prepare_vmcs02_early()
2511 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & in prepare_vmcs02_rare()
2514 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); in prepare_vmcs02_rare()
2515 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); in prepare_vmcs02_rare()
2516 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); in prepare_vmcs02_rare()
2517 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); in prepare_vmcs02_rare()
2518 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); in prepare_vmcs02_rare()
2519 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); in prepare_vmcs02_rare()
2520 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); in prepare_vmcs02_rare()
2521 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); in prepare_vmcs02_rare()
2522 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); in prepare_vmcs02_rare()
2523 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); in prepare_vmcs02_rare()
2524 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); in prepare_vmcs02_rare()
2525 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); in prepare_vmcs02_rare()
2526 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); in prepare_vmcs02_rare()
2527 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); in prepare_vmcs02_rare()
2528 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); in prepare_vmcs02_rare()
2529 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); in prepare_vmcs02_rare()
2530 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); in prepare_vmcs02_rare()
2531 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); in prepare_vmcs02_rare()
2532 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); in prepare_vmcs02_rare()
2533 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); in prepare_vmcs02_rare()
2534 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); in prepare_vmcs02_rare()
2535 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); in prepare_vmcs02_rare()
2536 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); in prepare_vmcs02_rare()
2537 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); in prepare_vmcs02_rare()
2538 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); in prepare_vmcs02_rare()
2539 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); in prepare_vmcs02_rare()
2540 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); in prepare_vmcs02_rare()
2541 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); in prepare_vmcs02_rare()
2542 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); in prepare_vmcs02_rare()
2543 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); in prepare_vmcs02_rare()
2544 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); in prepare_vmcs02_rare()
2545 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); in prepare_vmcs02_rare()
2546 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); in prepare_vmcs02_rare()
2547 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); in prepare_vmcs02_rare()
2548 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); in prepare_vmcs02_rare()
2549 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); in prepare_vmcs02_rare()
2554 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & in prepare_vmcs02_rare()
2556 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); in prepare_vmcs02_rare()
2558 vmcs12->guest_pending_dbg_exceptions); in prepare_vmcs02_rare()
2559 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); in prepare_vmcs02_rare()
2560 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); in prepare_vmcs02_rare()
2567 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); in prepare_vmcs02_rare()
2568 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); in prepare_vmcs02_rare()
2569 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); in prepare_vmcs02_rare()
2570 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); in prepare_vmcs02_rare()
2573 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2574 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) in prepare_vmcs02_rare()
2575 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); in prepare_vmcs02_rare()
2579 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); in prepare_vmcs02_rare()
2582 * Whether page-faults are trapped is determined by a combination of in prepare_vmcs02_rare()
2586 * is not easy (if at all possible?) to merge L0 and L1's desires, we in prepare_vmcs02_rare()
2594 if (vmx_need_pf_intercept(&vmx->vcpu)) { in prepare_vmcs02_rare()
2602 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask); in prepare_vmcs02_rare()
2603 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match); in prepare_vmcs02_rare()
2607 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); in prepare_vmcs02_rare()
2608 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); in prepare_vmcs02_rare()
2609 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); in prepare_vmcs02_rare()
2610 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); in prepare_vmcs02_rare()
2617 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); in prepare_vmcs02_rare()
2619 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); in prepare_vmcs02_rare()
2620 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_rare()
2621 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_rare()
2632 * function also has additional necessary side-effects, like setting various
2633 * vcpu->arch fields.
2645 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) { in prepare_vmcs02()
2647 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2650 !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); in prepare_vmcs02()
2653 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2654 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { in prepare_vmcs02()
2655 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); in prepare_vmcs02()
2656 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); in prepare_vmcs02()
2658 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
2659 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); in prepare_vmcs02()
2661 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2662 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) in prepare_vmcs02()
2663 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); in prepare_vmcs02()
2664 vmx_set_rflags(vcpu, vmcs12->guest_rflags); in prepare_vmcs02()
2667 * bitwise-or of what L1 wants to trap for L2, and what we want to in prepare_vmcs02()
2668 * trap. Note that CR0.TS also needs updating - we do this later. in prepare_vmcs02()
2671 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
2672 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
2674 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2675 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { in prepare_vmcs02()
2676 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); in prepare_vmcs02()
2677 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
2679 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
2682 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in prepare_vmcs02()
2683 vcpu->arch.l1_tsc_offset, in prepare_vmcs02()
2687 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in prepare_vmcs02()
2688 vcpu->arch.l1_tsc_scaling_ratio, in prepare_vmcs02()
2691 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in prepare_vmcs02()
2693 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); in prepare_vmcs02()
2705 vmx_set_cr0(vcpu, vmcs12->guest_cr0); in prepare_vmcs02()
2708 vmx_set_cr4(vcpu, vmcs12->guest_cr4); in prepare_vmcs02()
2711 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02()
2713 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
2727 return -EINVAL; in prepare_vmcs02()
2731 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), in prepare_vmcs02()
2733 return -EINVAL; in prepare_vmcs02()
2737 * on nested VM-Exit, which can occur without actually running L2 and in prepare_vmcs02()
2743 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); in prepare_vmcs02()
2748 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); in prepare_vmcs02()
2749 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); in prepare_vmcs02()
2750 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); in prepare_vmcs02()
2751 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); in prepare_vmcs02()
2754 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && in prepare_vmcs02()
2757 vmcs12->guest_ia32_perf_global_ctrl))) { in prepare_vmcs02()
2759 return -EINVAL; in prepare_vmcs02()
2762 kvm_rsp_write(vcpu, vmcs12->guest_rsp); in prepare_vmcs02()
2763 kvm_rip_write(vcpu, vmcs12->guest_rip); in prepare_vmcs02()
2766 * It was observed that genuine Hyper-V running in L1 doesn't reset in prepare_vmcs02()
2772 evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; in prepare_vmcs02()
2781 return -EINVAL; in nested_vmx_check_nmi_controls()
2785 return -EINVAL; in nested_vmx_check_nmi_controls()
2797 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2801 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2808 /* Page-walk levels validity. */ in nested_vmx_check_eptp()
2811 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2815 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2828 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2836 * Checks related to VM-Execution Control Fields
2843 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, in nested_check_vm_execution_controls()
2844 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2845 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2846 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, in nested_check_vm_execution_controls()
2847 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2848 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2849 return -EINVAL; in nested_check_vm_execution_controls()
2852 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, in nested_check_vm_execution_controls()
2853 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2854 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2855 return -EINVAL; in nested_check_vm_execution_controls()
2857 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || in nested_check_vm_execution_controls()
2868 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) in nested_check_vm_execution_controls()
2869 return -EINVAL; in nested_check_vm_execution_controls()
2873 return -EINVAL; in nested_check_vm_execution_controls()
2876 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) in nested_check_vm_execution_controls()
2877 return -EINVAL; in nested_check_vm_execution_controls()
2880 if (CC(vmcs12->vm_function_control & in nested_check_vm_execution_controls()
2881 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2882 return -EINVAL; in nested_check_vm_execution_controls()
2886 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) in nested_check_vm_execution_controls()
2887 return -EINVAL; in nested_check_vm_execution_controls()
2895 * Checks related to VM-Exit Control Fields
2902 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, in nested_check_vm_exit_controls()
2903 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2904 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2906 return -EINVAL; in nested_check_vm_exit_controls()
2912 * Checks related to VM-Entry Control Fields
2919 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, in nested_check_vm_entry_controls()
2920 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2921 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
2922 return -EINVAL; in nested_check_vm_entry_controls()
2926 * Fields relevant to VM-entry event injection must be set properly. in nested_check_vm_entry_controls()
2927 * These fields are the VM-entry interruption-information field, the in nested_check_vm_entry_controls()
2928 * VM-entry exception error code, and the VM-entry instruction length. in nested_check_vm_entry_controls()
2930 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { in nested_check_vm_entry_controls()
2931 u32 intr_info = vmcs12->vm_entry_intr_info_field; in nested_check_vm_entry_controls()
2938 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; in nested_check_vm_entry_controls()
2940 /* VM-entry interruption-info field: interruption type */ in nested_check_vm_entry_controls()
2944 return -EINVAL; in nested_check_vm_entry_controls()
2946 /* VM-entry interruption-info field: vector */ in nested_check_vm_entry_controls()
2950 return -EINVAL; in nested_check_vm_entry_controls()
2952 /* VM-entry interruption-info field: deliver error code */ in nested_check_vm_entry_controls()
2957 return -EINVAL; in nested_check_vm_entry_controls()
2959 /* VM-entry exception error code */ in nested_check_vm_entry_controls()
2961 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) in nested_check_vm_entry_controls()
2962 return -EINVAL; in nested_check_vm_entry_controls()
2964 /* VM-entry interruption-info field: reserved bits */ in nested_check_vm_entry_controls()
2966 return -EINVAL; in nested_check_vm_entry_controls()
2968 /* VM-entry instruction length */ in nested_check_vm_entry_controls()
2973 if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH) || in nested_check_vm_entry_controls()
2974 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
2976 return -EINVAL; in nested_check_vm_entry_controls()
2981 return -EINVAL; in nested_check_vm_entry_controls()
2992 return -EINVAL; in nested_vmx_check_controls()
3006 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != in nested_vmx_check_address_space_size()
3007 !!(vcpu->arch.efer & EFER_LMA))) in nested_vmx_check_address_space_size()
3008 return -EINVAL; in nested_vmx_check_address_space_size()
3019 u8 l1_address_bits_on_exit = (vmcs12->host_cr4 & X86_CR4_LA57) ? 57 : 48; in is_l1_noncanonical_address_on_vmexit()
3027 bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); in nested_vmx_check_host_state()
3029 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || in nested_vmx_check_host_state()
3030 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || in nested_vmx_check_host_state()
3031 CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) in nested_vmx_check_host_state()
3032 return -EINVAL; in nested_vmx_check_host_state()
3034 if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || in nested_vmx_check_host_state()
3035 CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu))) in nested_vmx_check_host_state()
3036 return -EINVAL; in nested_vmx_check_host_state()
3038 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && in nested_vmx_check_host_state()
3039 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) in nested_vmx_check_host_state()
3040 return -EINVAL; in nested_vmx_check_host_state()
3042 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && in nested_vmx_check_host_state()
3044 vmcs12->host_ia32_perf_global_ctrl))) in nested_vmx_check_host_state()
3045 return -EINVAL; in nested_vmx_check_host_state()
3048 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) in nested_vmx_check_host_state()
3049 return -EINVAL; in nested_vmx_check_host_state()
3051 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || in nested_vmx_check_host_state()
3052 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || in nested_vmx_check_host_state()
3053 CC((vmcs12->host_rip) >> 32)) in nested_vmx_check_host_state()
3054 return -EINVAL; in nested_vmx_check_host_state()
3057 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3058 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3059 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3060 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3061 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3062 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3063 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || in nested_vmx_check_host_state()
3064 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
3065 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
3066 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
3067 return -EINVAL; in nested_vmx_check_host_state()
3069 if (CC(is_noncanonical_base_address(vmcs12->host_fs_base, vcpu)) || in nested_vmx_check_host_state()
3070 CC(is_noncanonical_base_address(vmcs12->host_gs_base, vcpu)) || in nested_vmx_check_host_state()
3071 CC(is_noncanonical_base_address(vmcs12->host_gdtr_base, vcpu)) || in nested_vmx_check_host_state()
3072 CC(is_noncanonical_base_address(vmcs12->host_idtr_base, vcpu)) || in nested_vmx_check_host_state()
3073 CC(is_noncanonical_base_address(vmcs12->host_tr_base, vcpu)) || in nested_vmx_check_host_state()
3074 CC(is_l1_noncanonical_address_on_vmexit(vmcs12->host_rip, vmcs12))) in nested_vmx_check_host_state()
3075 return -EINVAL; in nested_vmx_check_host_state()
3078 * If the load IA32_EFER VM-exit control is 1, bits reserved in the in nested_vmx_check_host_state()
3081 * the host address-space size VM-exit control. in nested_vmx_check_host_state()
3083 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { in nested_vmx_check_host_state()
3084 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || in nested_vmx_check_host_state()
3085 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || in nested_vmx_check_host_state()
3086 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) in nested_vmx_check_host_state()
3087 return -EINVAL; in nested_vmx_check_host_state()
3097 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_vmx_check_vmcs_link_ptr()
3100 if (vmcs12->vmcs_link_pointer == INVALID_GPA) in nested_vmx_check_vmcs_link_ptr()
3103 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) in nested_vmx_check_vmcs_link_ptr()
3104 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3106 if (ghc->gpa != vmcs12->vmcs_link_pointer && in nested_vmx_check_vmcs_link_ptr()
3107 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, in nested_vmx_check_vmcs_link_ptr()
3108 vmcs12->vmcs_link_pointer, VMCS12_SIZE))) in nested_vmx_check_vmcs_link_ptr()
3109 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3111 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, in nested_vmx_check_vmcs_link_ptr()
3114 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3118 return -EINVAL; in nested_vmx_check_vmcs_link_ptr()
3124 * Checks related to Guest Non-register State
3128 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && in nested_check_guest_non_reg_state()
3129 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT && in nested_check_guest_non_reg_state()
3130 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI)) in nested_check_guest_non_reg_state()
3131 return -EINVAL; in nested_check_guest_non_reg_state()
3140 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); in nested_vmx_check_guest_state()
3144 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || in nested_vmx_check_guest_state()
3145 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) in nested_vmx_check_guest_state()
3146 return -EINVAL; in nested_vmx_check_guest_state()
3148 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && in nested_vmx_check_guest_state()
3149 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) in nested_vmx_check_guest_state()
3150 return -EINVAL; in nested_vmx_check_guest_state()
3152 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && in nested_vmx_check_guest_state()
3153 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) in nested_vmx_check_guest_state()
3154 return -EINVAL; in nested_vmx_check_guest_state()
3158 return -EINVAL; in nested_vmx_check_guest_state()
3161 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && in nested_vmx_check_guest_state()
3163 vmcs12->guest_ia32_perf_global_ctrl))) in nested_vmx_check_guest_state()
3164 return -EINVAL; in nested_vmx_check_guest_state()
3166 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) in nested_vmx_check_guest_state()
3167 return -EINVAL; in nested_vmx_check_guest_state()
3169 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || in nested_vmx_check_guest_state()
3170 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) in nested_vmx_check_guest_state()
3171 return -EINVAL; in nested_vmx_check_guest_state()
3174 * If the load IA32_EFER VM-entry control is 1, the following checks in nested_vmx_check_guest_state()
3176 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3177 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of in nested_vmx_check_guest_state()
3178 * the IA-32e mode guest VM-exit control. It must also be identical in nested_vmx_check_guest_state()
3182 if (to_vmx(vcpu)->nested.nested_run_pending && in nested_vmx_check_guest_state()
3183 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { in nested_vmx_check_guest_state()
3184 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || in nested_vmx_check_guest_state()
3185 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || in nested_vmx_check_guest_state()
3186 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && in nested_vmx_check_guest_state()
3187 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) in nested_vmx_check_guest_state()
3188 return -EINVAL; in nested_vmx_check_guest_state()
3191 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && in nested_vmx_check_guest_state()
3192 (CC(is_noncanonical_msr_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || in nested_vmx_check_guest_state()
3193 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) in nested_vmx_check_guest_state()
3194 return -EINVAL; in nested_vmx_check_guest_state()
3197 return -EINVAL; in nested_vmx_check_guest_state()
3211 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3213 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3229 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in nested_vmx_check_vmentry_hw()
3231 vmx->loaded_vmcs->host_state.cr3 = cr3; in nested_vmx_check_vmentry_hw()
3235 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in nested_vmx_check_vmentry_hw()
3237 vmx->loaded_vmcs->host_state.cr4 = cr4; in nested_vmx_check_vmentry_hw()
3240 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in nested_vmx_check_vmentry_hw()
3243 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3244 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_check_vmentry_hw()
3245 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3246 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_check_vmentry_hw()
3254 "early hardware check VM-instruction error: ", error); in nested_vmx_check_vmentry_hw()
3268 * A non-failing VMEntry means we somehow entered guest mode with in nested_vmx_check_vmentry_hw()
3291 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { in nested_get_evmcs_page()
3300 * Post migration VMCS12 always provides the most actual in nested_get_evmcs_page()
3303 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_get_evmcs_page()
3316 if (!vcpu->arch.pdptrs_from_userspace && in nested_get_vmcs12_pages()
3323 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) in nested_get_vmcs12_pages()
3329 map = &vmx->nested.apic_access_page_map; in nested_get_vmcs12_pages()
3331 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) { in nested_get_vmcs12_pages()
3332 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn)); in nested_get_vmcs12_pages()
3334 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n", in nested_get_vmcs12_pages()
3336 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in nested_get_vmcs12_pages()
3337 vcpu->run->internal.suberror = in nested_get_vmcs12_pages()
3339 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3345 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3347 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { in nested_get_vmcs12_pages()
3348 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); in nested_get_vmcs12_pages()
3353 * The processor will never use the TPR shadow, simply in nested_get_vmcs12_pages()
3364 * force VM-Entry to fail. in nested_get_vmcs12_pages()
3371 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3373 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { in nested_get_vmcs12_pages()
3374 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3375 (struct pi_desc *)(((void *)map->hva) + in nested_get_vmcs12_pages()
3376 offset_in_page(vmcs12->posted_intr_desc_addr)); in nested_get_vmcs12_pages()
3378 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); in nested_get_vmcs12_pages()
3386 vmx->nested.pi_desc = NULL; in nested_get_vmcs12_pages()
3404 * to make nested_evmcs_l2_tlb_flush_enabled() work correctly post in vmx_get_nested_state_pages()
3410 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_get_nested_state_pages()
3411 vcpu->run->internal.suberror = in vmx_get_nested_state_pages()
3413 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3434 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3445 if (vmcs12->guest_pml_index >= PML_LOG_NR_ENTRIES) { in nested_vmx_write_pml_buffer()
3446 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3451 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; in nested_vmx_write_pml_buffer()
3453 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, in nested_vmx_write_pml_buffer()
3457 vmcs12->guest_pml_index--; in nested_vmx_write_pml_buffer()
3471 if (!to_vmx(vcpu)->nested.vmxon) { in nested_vmx_check_permission()
3492 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3510 vmx->nested.current_vmptr, in nested_vmx_enter_non_root_mode()
3511 vmcs12->guest_rip, in nested_vmx_enter_non_root_mode()
3512 vmcs12->guest_intr_status, in nested_vmx_enter_non_root_mode()
3513 vmcs12->vm_entry_intr_info_field, in nested_vmx_enter_non_root_mode()
3514 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT, in nested_vmx_enter_non_root_mode()
3515 vmcs12->ept_pointer, in nested_vmx_enter_non_root_mode()
3516 vmcs12->guest_cr3, in nested_vmx_enter_non_root_mode()
3521 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3522 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) in nested_vmx_enter_non_root_mode()
3523 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3525 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3526 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) in nested_vmx_enter_non_root_mode()
3527 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3531 * nested early checks are disabled. In the event of a "late" VM-Fail, in nested_vmx_enter_non_root_mode()
3532 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its in nested_vmx_enter_non_root_mode()
3533 * software model to the pre-VMEntry host state. When EPT is disabled, in nested_vmx_enter_non_root_mode()
3535 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing in nested_vmx_enter_non_root_mode()
3538 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is in nested_vmx_enter_non_root_mode()
3539 * guaranteed to be overwritten with a shadow CR3 prior to re-entering in nested_vmx_enter_non_root_mode()
3541 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks in nested_vmx_enter_non_root_mode()
3542 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail in nested_vmx_enter_non_root_mode()
3546 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); in nested_vmx_enter_non_root_mode()
3548 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3550 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); in nested_vmx_enter_non_root_mode()
3554 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3559 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3566 vmcs12->exit_qualification = entry_failure_code; in nested_vmx_enter_non_root_mode()
3575 vmcs12->exit_qualification = entry_failure_code; in nested_vmx_enter_non_root_mode()
3581 vmcs12->vm_entry_msr_load_addr, in nested_vmx_enter_non_root_mode()
3582 vmcs12->vm_entry_msr_load_count); in nested_vmx_enter_non_root_mode()
3585 vmcs12->exit_qualification = failed_index; in nested_vmx_enter_non_root_mode()
3593 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs in nested_vmx_enter_non_root_mode()
3600 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI in nested_vmx_enter_non_root_mode()
3601 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can in nested_vmx_enter_non_root_mode()
3602 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit in nested_vmx_enter_non_root_mode()
3606 if ((__exec_controls_get(&vmx->vmcs01) & (CPU_BASED_INTR_WINDOW_EXITING | in nested_vmx_enter_non_root_mode()
3617 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3634 * 26.7 "VM-entry failures during or after loading guest state". in nested_vmx_enter_non_root_mode()
3637 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) in nested_vmx_enter_non_root_mode()
3638 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; in nested_vmx_enter_non_root_mode()
3642 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3648 vmcs12->vm_exit_reason = exit_reason.full; in nested_vmx_enter_non_root_mode()
3650 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3681 vmx->nested.current_vmptr == INVALID_GPA)) in nested_vmx_run()
3690 * VM-instruction error field. in nested_vmx_run()
3692 if (CC(vmcs12->hdr.shadow_vmcs)) in nested_vmx_run()
3698 copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields); in nested_vmx_run()
3700 vmcs12->launch_state = !launch; in nested_vmx_run()
3718 if (CC(vmcs12->launch_state == launch)) in nested_vmx_run()
3736 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3737 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3743 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
3757 switch (vmcs12->guest_activity_state) { in nested_vmx_run()
3761 * awakened by event injection or by an NMI-window VM-exit or in nested_vmx_run()
3762 * by an interrupt-window VM-exit, halt the vcpu. in nested_vmx_run()
3764 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && in nested_vmx_run()
3767 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { in nested_vmx_run()
3768 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3773 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3783 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3793 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3806 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3813 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
3814 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | in vmcs12_guest_cr0()
3815 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | in vmcs12_guest_cr0()
3816 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
3823 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
3824 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | in vmcs12_guest_cr4()
3825 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | in vmcs12_guest_cr4()
3826 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
3837 * Per the SDM, VM-Exits due to double and triple faults are never in vmcs12_save_pending_event()
3842 * event results in a double-fault exception". It's unclear why the in vmcs12_save_pending_event()
3844 * while vectoring a different exception (injected events are never in vmcs12_save_pending_event()
3856 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3857 } else if (vcpu->arch.exception.injected) { in vmcs12_save_pending_event()
3858 nr = vcpu->arch.exception.vector; in vmcs12_save_pending_event()
3862 vmcs12->vm_exit_instruction_len = in vmcs12_save_pending_event()
3863 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
3868 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
3870 vmcs12->idt_vectoring_error_code = in vmcs12_save_pending_event()
3871 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
3874 vmcs12->idt_vectoring_info_field = idt_vectoring; in vmcs12_save_pending_event()
3875 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
3876 vmcs12->idt_vectoring_info_field = in vmcs12_save_pending_event()
3878 } else if (vcpu->arch.interrupt.injected) { in vmcs12_save_pending_event()
3879 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
3882 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
3884 vmcs12->vm_entry_instruction_len = in vmcs12_save_pending_event()
3885 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
3889 vmcs12->idt_vectoring_info_field = idt_vectoring; in vmcs12_save_pending_event()
3891 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3902 * Don't need to mark the APIC access page dirty; it is never in nested_mark_vmcs12_pages_dirty()
3907 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; in nested_mark_vmcs12_pages_dirty()
3912 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; in nested_mark_vmcs12_pages_dirty()
3924 if (!vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3927 if (!vmx->nested.pi_desc) in vmx_complete_nested_posted_interrupt()
3930 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3932 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3935 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); in vmx_complete_nested_posted_interrupt()
3937 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3941 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
3956 return -ENXIO; in vmx_complete_nested_posted_interrupt()
3961 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in nested_vmx_inject_exception_vmexit()
3962 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK; in nested_vmx_inject_exception_vmexit()
3966 if (ex->has_payload) { in nested_vmx_inject_exception_vmexit()
3967 exit_qual = ex->payload; in nested_vmx_inject_exception_vmexit()
3968 } else if (ex->vector == PF_VECTOR) { in nested_vmx_inject_exception_vmexit()
3969 exit_qual = vcpu->arch.cr2; in nested_vmx_inject_exception_vmexit()
3970 } else if (ex->vector == DB_VECTOR) { in nested_vmx_inject_exception_vmexit()
3971 exit_qual = vcpu->arch.dr6; in nested_vmx_inject_exception_vmexit()
3980 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the in nested_vmx_inject_exception_vmexit()
3981 * "has error code" flags on VM-Exit if the CPU is in Real Mode. in nested_vmx_inject_exception_vmexit()
3983 if (ex->has_error_code && is_protmode(vcpu)) { in nested_vmx_inject_exception_vmexit()
3987 * injected error code for VM-Entry. Drop the bits to mimic in nested_vmx_inject_exception_vmexit()
3988 * hardware and avoid inducing failure on nested VM-Entry if L1 in nested_vmx_inject_exception_vmexit()
3990 * generate "full" 32-bit error codes, so KVM allows userspace in nested_vmx_inject_exception_vmexit()
3993 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code; in nested_vmx_inject_exception_vmexit()
3997 if (kvm_exception_is_soft(ex->vector)) in nested_vmx_inject_exception_vmexit()
4002 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && in nested_vmx_inject_exception_vmexit()
4011 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6).
4012 * Using the payload is flawed because code breakpoints (fault-like) and data
4013 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e.
4014 * this will return false positives if a to-be-injected code breakpoint #DB is
4017 * too is trap-like.
4022 * from the emulator (because such #DBs are fault-like and thus don't trigger
4027 if (!ex->pending || ex->vector != DB_VECTOR) in vmx_get_pending_dbg_trap()
4030 /* General Detect #DBs are always fault-like. */ in vmx_get_pending_dbg_trap()
4031 return ex->payload & ~DR6_BD; in vmx_get_pending_dbg_trap()
4036 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by
4046 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
4047 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
4050 * field if a VM-exit is delivered before the debug trap.
4056 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception); in nested_vmx_update_pending_dbg()
4064 to_vmx(vcpu)->nested.preemption_timer_expired; in nested_vmx_preemption_timer_pending()
4070 void *vapic = vmx->nested.virtual_apic_map.hva; in vmx_has_nested_events()
4074 vmx->nested.mtf_pending) in vmx_has_nested_events()
4080 * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move in vmx_has_nested_events()
4099 if (vmx->nested.pi_pending && vmx->nested.pi_desc && in vmx_has_nested_events()
4100 pi_test_on(vmx->nested.pi_desc)) { in vmx_has_nested_events()
4101 max_irr = pi_find_highest_vector(vmx->nested.pi_desc); in vmx_has_nested_events()
4111 * edits to fill in missing examples, e.g. #DB due to split-lock accesses,
4112 * and less minor edits to splice in the priority of VMX Non-Root specific
4113 * events, e.g. MTF and NMI/INTR-window exiting.
4116 * - RESET
4117 * - Machine Check
4120 * - T flag in TSS is set (on task switch)
4123 * - FLUSH
4124 * - STOPCLK
4125 * - SMI
4126 * - INIT
4128 * 3.5 Monitor Trap Flag (MTF) VM-exit[1]
4131 * - Breakpoints
4132 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O
4133 * breakpoint, or #DB due to a split-lock access)
4135 * 4.3 VMX-preemption timer expired VM-exit
4137 * 4.6 NMI-window exiting VM-exit[2]
4141 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery
4148 * - Code-Segment Limit Violation
4149 * - Code Page Fault
4150 * - Control protection exception (missing ENDBRANCH at target of indirect
4154 * - Instruction length > 15 bytes
4155 * - Invalid Opcode
4156 * - Coprocessor Not Available
4159 * - Overflow
4160 * - Bound error
4161 * - Invalid TSS
4162 * - Segment Not Present
4163 * - Stack fault
4164 * - General Protection
4165 * - Data Page Fault
4166 * - Alignment Check
4167 * - x86 FPU Floating-point exception
4168 * - SIMD floating-point exception
4169 * - Virtualization exception
4170 * - Control protection exception
4172 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs),
4174 * MTF VM exits take priority over debug-trap exceptions and lower priority
4177 * [2] Debug-trap exceptions and higher priority events take priority over VM exits
4178 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption
4179 * timer take priority over VM exits caused by the "NMI-window exiting"
4180 * VM-execution control and lower priority events.
4182 * [3] Debug-trap exceptions and higher priority events take priority over VM exits
4183 * caused by "NMI-window exiting". VM exits caused by this control take
4184 * priority over non-maskable interrupts (NMIs) and lower priority events.
4186 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to
4187 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus,
4188 * non-maskable interrupts (NMIs) and higher priority events take priority over
4194 struct kvm_lapic *apic = vcpu->arch.apic; in vmx_check_nested_events()
4201 bool block_nested_exceptions = vmx->nested.nested_run_pending; in vmx_check_nested_events()
4204 * hardware, aren't blocked by a pending VM-Enter as KVM doesn't need in vmx_check_nested_events()
4210 * VM-Exit that occurred _during_ instruction execution; new events, in vmx_check_nested_events()
4216 * Inject events are blocked by nested VM-Enter, as KVM is responsible in vmx_check_nested_events()
4218 * wait until after VM-Enter completes to deliver injected events. in vmx_check_nested_events()
4224 test_bit(KVM_APIC_INIT, &apic->pending_events)) { in vmx_check_nested_events()
4226 return -EBUSY; in vmx_check_nested_events()
4228 clear_bit(KVM_APIC_INIT, &apic->pending_events); in vmx_check_nested_events()
4229 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED) in vmx_check_nested_events()
4233 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
4238 test_bit(KVM_APIC_SIPI, &apic->pending_events)) { in vmx_check_nested_events()
4240 return -EBUSY; in vmx_check_nested_events()
4242 clear_bit(KVM_APIC_SIPI, &apic->pending_events); in vmx_check_nested_events()
4243 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vmx_check_nested_events()
4245 apic->sipi_vector & 0xFFUL); in vmx_check_nested_events()
4253 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but in vmx_check_nested_events()
4256 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except in vmx_check_nested_events()
4259 * prioritize SMI over MTF and trap-like #DBs. in vmx_check_nested_events()
4261 if (vcpu->arch.exception_vmexit.pending && in vmx_check_nested_events()
4262 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) { in vmx_check_nested_events()
4264 return -EBUSY; in vmx_check_nested_events()
4270 if (vcpu->arch.exception.pending && in vmx_check_nested_events()
4271 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) { in vmx_check_nested_events()
4273 return -EBUSY; in vmx_check_nested_events()
4277 if (vmx->nested.mtf_pending) { in vmx_check_nested_events()
4279 return -EBUSY; in vmx_check_nested_events()
4285 if (vcpu->arch.exception_vmexit.pending) { in vmx_check_nested_events()
4287 return -EBUSY; in vmx_check_nested_events()
4293 if (vcpu->arch.exception.pending) { in vmx_check_nested_events()
4295 return -EBUSY; in vmx_check_nested_events()
4301 return -EBUSY; in vmx_check_nested_events()
4306 if (vcpu->arch.smi_pending && !is_smm(vcpu)) { in vmx_check_nested_events()
4308 return -EBUSY; in vmx_check_nested_events()
4312 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) { in vmx_check_nested_events()
4314 return -EBUSY; in vmx_check_nested_events()
4322 * The NMI-triggered VM exit counts as injection: in vmx_check_nested_events()
4325 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
4335 return -EBUSY; in vmx_check_nested_events()
4342 return -EBUSY; in vmx_check_nested_events()
4349 if (irq != -1) { in vmx_check_nested_events()
4351 return -EBUSY; in vmx_check_nested_events()
4364 * interrupts for L2 instead of injecting VM-Exit, as the in vmx_check_nested_events()
4368 * and enabling posted interrupts requires ACK-on-exit. in vmx_check_nested_events()
4370 if (irq == vmx->nested.posted_intr_nv) { in vmx_check_nested_events()
4377 return -EBUSY; in vmx_check_nested_events()
4379 vmx->nested.pi_pending = true; in vmx_check_nested_events()
4385 return -EBUSY; in vmx_check_nested_events()
4391 * ACK the interrupt _after_ emulating VM-Exit, as the IRQ must in vmx_check_nested_events()
4392 * be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI in vmx_check_nested_events()
4406 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
4412 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
4469 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4470 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4471 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4472 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4473 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4474 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4475 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4476 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); in sync_vmcs02_to_vmcs12_rare()
4477 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4478 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4479 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4480 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4481 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4482 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4483 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4484 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4485 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4486 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); in sync_vmcs02_to_vmcs12_rare()
4487 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4488 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4489 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4490 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4491 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4492 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); in sync_vmcs02_to_vmcs12_rare()
4493 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); in sync_vmcs02_to_vmcs12_rare()
4494 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); in sync_vmcs02_to_vmcs12_rare()
4495 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); in sync_vmcs02_to_vmcs12_rare()
4496 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); in sync_vmcs02_to_vmcs12_rare()
4497 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); in sync_vmcs02_to_vmcs12_rare()
4498 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); in sync_vmcs02_to_vmcs12_rare()
4499 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4500 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); in sync_vmcs02_to_vmcs12_rare()
4501 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4502 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); in sync_vmcs02_to_vmcs12_rare()
4503 vmcs12->guest_pending_dbg_exceptions = in sync_vmcs02_to_vmcs12_rare()
4506 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4515 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4519 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4522 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4523 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4527 vmx->loaded_vmcs = &vmx->vmcs01; in copy_vmcs02_to_vmcs12_rare()
4528 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4534 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4535 * VM-entry controls is also updated, since this is really a guest
4545 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = in sync_vmcs02_to_vmcs12()
4548 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); in sync_vmcs02_to_vmcs12()
4549 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); in sync_vmcs02_to_vmcs12()
4551 vmcs12->guest_rsp = kvm_rsp_read(vcpu); in sync_vmcs02_to_vmcs12()
4552 vmcs12->guest_rip = kvm_rip_read(vcpu); in sync_vmcs02_to_vmcs12()
4553 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); in sync_vmcs02_to_vmcs12()
4555 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); in sync_vmcs02_to_vmcs12()
4556 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); in sync_vmcs02_to_vmcs12()
4558 vmcs12->guest_interruptibility_info = in sync_vmcs02_to_vmcs12()
4561 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in sync_vmcs02_to_vmcs12()
4562 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; in sync_vmcs02_to_vmcs12()
4563 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in sync_vmcs02_to_vmcs12()
4564 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI; in sync_vmcs02_to_vmcs12()
4566 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; in sync_vmcs02_to_vmcs12()
4569 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER && in sync_vmcs02_to_vmcs12()
4570 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4571 vmcs12->vmx_preemption_timer_value = in sync_vmcs02_to_vmcs12()
4583 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); in sync_vmcs02_to_vmcs12()
4585 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); in sync_vmcs02_to_vmcs12()
4586 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); in sync_vmcs02_to_vmcs12()
4587 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); in sync_vmcs02_to_vmcs12()
4588 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); in sync_vmcs02_to_vmcs12()
4592 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); in sync_vmcs02_to_vmcs12()
4595 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); in sync_vmcs02_to_vmcs12()
4597 vmcs12->vm_entry_controls = in sync_vmcs02_to_vmcs12()
4598 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | in sync_vmcs02_to_vmcs12()
4601 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) in sync_vmcs02_to_vmcs12()
4602 vmcs12->guest_dr7 = vcpu->arch.dr7; in sync_vmcs02_to_vmcs12()
4604 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) in sync_vmcs02_to_vmcs12()
4605 vmcs12->guest_ia32_efer = vcpu->arch.efer; in sync_vmcs02_to_vmcs12()
4615 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4616 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4624 vmcs12->vm_exit_reason = vm_exit_reason; in prepare_vmcs12()
4625 if (to_vmx(vcpu)->exit_reason.enclave_mode) in prepare_vmcs12()
4626 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; in prepare_vmcs12()
4627 vmcs12->exit_qualification = exit_qualification; in prepare_vmcs12()
4630 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched in prepare_vmcs12()
4634 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { in prepare_vmcs12()
4635 vmcs12->launch_state = 1; in prepare_vmcs12()
4639 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; in prepare_vmcs12()
4648 vmcs12->vm_exit_intr_info = exit_intr_info; in prepare_vmcs12()
4649 vmcs12->vm_exit_instruction_len = exit_insn_len; in prepare_vmcs12()
4650 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); in prepare_vmcs12()
4654 * MSRs if the exit is due to a VM-entry failure that occurs in prepare_vmcs12()
4659 vmcs12->vm_exit_msr_store_addr, in prepare_vmcs12()
4660 vmcs12->vm_exit_msr_store_count)) in prepare_vmcs12()
4671 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4681 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) in load_vmcs12_host_state()
4682 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
4683 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) in load_vmcs12_host_state()
4684 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
4686 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
4687 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
4689 kvm_rsp_write(vcpu, vmcs12->host_rsp); in load_vmcs12_host_state()
4690 kvm_rip_write(vcpu, vmcs12->host_rip); in load_vmcs12_host_state()
4701 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); in load_vmcs12_host_state()
4702 vmx_set_cr0(vcpu, vmcs12->host_cr0); in load_vmcs12_host_state()
4704 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ in load_vmcs12_host_state()
4705 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
4706 vmx_set_cr4(vcpu, vmcs12->host_cr4); in load_vmcs12_host_state()
4714 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored)) in load_vmcs12_host_state()
4719 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); in load_vmcs12_host_state()
4720 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); in load_vmcs12_host_state()
4721 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); in load_vmcs12_host_state()
4722 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); in load_vmcs12_host_state()
4723 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); in load_vmcs12_host_state()
4728 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) in load_vmcs12_host_state()
4731 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { in load_vmcs12_host_state()
4732 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); in load_vmcs12_host_state()
4733 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
4735 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && in load_vmcs12_host_state()
4738 vmcs12->host_ia32_perf_global_ctrl)); in load_vmcs12_host_state()
4741 27.5.2 Loading Host Segment and Descriptor-Table Registers */ in load_vmcs12_host_state()
4745 .selector = vmcs12->host_cs_selector, in load_vmcs12_host_state()
4751 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) in load_vmcs12_host_state()
4765 seg.selector = vmcs12->host_ds_selector; in load_vmcs12_host_state()
4767 seg.selector = vmcs12->host_es_selector; in load_vmcs12_host_state()
4769 seg.selector = vmcs12->host_ss_selector; in load_vmcs12_host_state()
4771 seg.selector = vmcs12->host_fs_selector; in load_vmcs12_host_state()
4772 seg.base = vmcs12->host_fs_base; in load_vmcs12_host_state()
4774 seg.selector = vmcs12->host_gs_selector; in load_vmcs12_host_state()
4775 seg.base = vmcs12->host_gs_base; in load_vmcs12_host_state()
4778 .base = vmcs12->host_tr_base, in load_vmcs12_host_state()
4780 .selector = vmcs12->host_tr_selector, in load_vmcs12_host_state()
4793 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, in load_vmcs12_host_state()
4794 vmcs12->vm_exit_msr_load_count)) in load_vmcs12_host_state()
4797 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); in load_vmcs12_host_state()
4811 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4812 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) in nested_vmx_get_vmcs01_guest_efer()
4813 return vmx->msr_autoload.guest.val[i].value; in nested_vmx_get_vmcs01_guest_efer()
4818 return efer_msr->data; in nested_vmx_get_vmcs01_guest_efer()
4831 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); in nested_vmx_restore_host_state()
4833 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { in nested_vmx_restore_host_state()
4837 * and vcpu->arch.dr7 is not squirreled away before the in nested_vmx_restore_host_state()
4840 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in nested_vmx_restore_host_state()
4852 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); in nested_vmx_restore_host_state()
4855 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in nested_vmx_restore_host_state()
4859 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in nested_vmx_restore_host_state()
4866 * software model is up-to-date. in nested_vmx_restore_host_state()
4884 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
4885 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); in nested_vmx_restore_host_state()
4893 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
4894 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); in nested_vmx_restore_host_state()
4940 /* Pending MTF traps are discarded on VM-Exit. */ in __nested_vmx_vmexit()
4941 vmx->nested.mtf_pending = false; in __nested_vmx_vmexit()
4944 WARN_ON_ONCE(vmx->nested.nested_run_pending); in __nested_vmx_vmexit()
4951 * do that when something is forcing L2->L1 exit prior to in __nested_vmx_vmexit()
4964 * up-to-date before switching to L1. in __nested_vmx_vmexit()
4972 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in __nested_vmx_vmexit()
4975 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset; in __nested_vmx_vmexit()
4977 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; in __nested_vmx_vmexit()
4980 if (likely(!vmx->fail)) { in __nested_vmx_vmexit()
4983 if (vm_exit_reason != -1) in __nested_vmx_vmexit()
4994 * point it is already assumed by user-space to be in __nested_vmx_vmexit()
5000 * The only expected VM-instruction error is "VM entry with in __nested_vmx_vmexit()
5002 * problem with L0. And we should never get here with a in __nested_vmx_vmexit()
5011 * Drop events/exceptions that were queued for re-injection to L2 in __nested_vmx_vmexit()
5014 * prepare_vmcs12(), events/exceptions queued for re-injection need to in __nested_vmx_vmexit()
5017 vcpu->arch.nmi_injected = false; in __nested_vmx_vmexit()
5021 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in __nested_vmx_vmexit()
5027 * Bare metal isolates VMX root (host) from VMX non-root (guest), but in __nested_vmx_vmexit()
5035 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in __nested_vmx_vmexit()
5036 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in __nested_vmx_vmexit()
5037 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); in __nested_vmx_vmexit()
5039 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio); in __nested_vmx_vmexit()
5041 if (vmx->nested.l1_tpr_threshold != -1) in __nested_vmx_vmexit()
5042 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in __nested_vmx_vmexit()
5044 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in __nested_vmx_vmexit()
5045 vmx->nested.change_vmcs01_virtual_apic_mode = false; in __nested_vmx_vmexit()
5049 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { in __nested_vmx_vmexit()
5050 vmx->nested.update_vmcs01_cpu_dirty_logging = false; in __nested_vmx_vmexit()
5056 if (vmx->nested.reload_vmcs01_apic_access_page) { in __nested_vmx_vmexit()
5057 vmx->nested.reload_vmcs01_apic_access_page = false; in __nested_vmx_vmexit()
5061 if (vmx->nested.update_vmcs01_apicv_status) { in __nested_vmx_vmexit()
5062 vmx->nested.update_vmcs01_apicv_status = false; in __nested_vmx_vmexit()
5066 if (vmx->nested.update_vmcs01_hwapic_isr) { in __nested_vmx_vmexit()
5067 vmx->nested.update_vmcs01_hwapic_isr = false; in __nested_vmx_vmexit()
5071 if ((vm_exit_reason != -1) && in __nested_vmx_vmexit()
5073 vmx->nested.need_vmcs12_to_shadow_sync = true; in __nested_vmx_vmexit()
5078 if (likely(!vmx->fail)) { in __nested_vmx_vmexit()
5079 if (vm_exit_reason != -1) in __nested_vmx_vmexit()
5080 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, in __nested_vmx_vmexit()
5081 vmcs12->exit_qualification, in __nested_vmx_vmexit()
5082 vmcs12->idt_vectoring_info_field, in __nested_vmx_vmexit()
5083 vmcs12->vm_exit_intr_info, in __nested_vmx_vmexit()
5084 vmcs12->vm_exit_intr_error_code, in __nested_vmx_vmexit()
5091 * if the event is blocked (RFLAGS.IF is cleared on VM-Exit). in __nested_vmx_vmexit()
5095 * non-root mode. INIT/SIPI don't need to be checked as INIT in __nested_vmx_vmexit()
5096 * is blocked post-VMXON, and SIPIs are ignored. in __nested_vmx_vmexit()
5098 if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending) in __nested_vmx_vmexit()
5104 * After an early L2 VM-entry failure, we're now back in __nested_vmx_vmexit()
5107 * flag and the VM-instruction error field of the VMCS in __nested_vmx_vmexit()
5120 vmx->fail = 0; in __nested_vmx_vmexit()
5130 * Decode the memory-address operand of a vmx instruction, as recorded on an
5146 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). in get_vmx_mem_address()
5192 * The virtual/linear address is never truncated in 64-bit in get_vmx_mem_address()
5193 * mode, e.g. a 32-bit address size can yield a 64-bit virtual in get_vmx_mem_address()
5194 * address when using FS/GS with a non-zero base. in get_vmx_mem_address()
5203 * non-canonical form. This is the only check on the memory in get_vmx_mem_address()
5217 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
5218 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5219 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5223 * read-only data segment or any code segment. in get_vmx_mem_address()
5228 * execute-only code segment in get_vmx_mem_address()
5243 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
5247 exn = exn || ((u64)off + len - 1 > s.limit); in get_vmx_mem_address()
5271 return -EINVAL; in nested_vmx_get_vmptr()
5277 return -EINVAL; in nested_vmx_get_vmptr()
5291 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
5296 * operation. VMXON faults if the CPU is already post-VMXON, so it in alloc_shadow_vmcs()
5301 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) in alloc_shadow_vmcs()
5302 return loaded_vmcs->shadow_vmcs; in alloc_shadow_vmcs()
5304 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); in alloc_shadow_vmcs()
5305 if (loaded_vmcs->shadow_vmcs) in alloc_shadow_vmcs()
5306 vmcs_clear(loaded_vmcs->shadow_vmcs); in alloc_shadow_vmcs()
5308 return loaded_vmcs->shadow_vmcs; in alloc_shadow_vmcs()
5316 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5320 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5321 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
5324 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; in enter_vmx_operation()
5325 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5326 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
5332 hrtimer_setup(&vmx->nested.preemption_timer, vmx_preemption_timer_fn, CLOCK_MONOTONIC, in enter_vmx_operation()
5335 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
5337 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
5338 vmx->nested.vmxon = true; in enter_vmx_operation()
5341 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5348 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
5351 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
5354 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5357 return -ENOMEM; in enter_vmx_operation()
5373 * which has higher priority than VM-Exit (see Intel SDM's pseudocode in handle_vmxon()
5376 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 in handle_vmxon()
5389 * and has higher priority than the VM-Fail due to being post-VMXON, in handle_vmxon()
5390 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, in handle_vmxon()
5391 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits in handle_vmxon()
5393 * VMX non-root. in handle_vmxon()
5395 * Forwarding the VM-Exit unconditionally, i.e. without performing the in handle_vmxon()
5397 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's in handle_vmxon()
5406 if (vmx->nested.vmxon) in handle_vmxon()
5412 * have lower priority than the VM-Fail above. in handle_vmxon()
5420 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmxon()
5434 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; in handle_vmxon()
5440 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || in handle_vmxon()
5444 vmx->nested.vmxon_ptr = vmptr; in handle_vmxon()
5456 if (vmx->nested.current_vmptr == INVALID_GPA) in nested_release_vmcs12()
5467 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
5471 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
5472 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5474 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in nested_release_vmcs12()
5476 vmx->nested.current_vmptr = INVALID_GPA; in nested_release_vmcs12()
5510 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5514 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5570 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, in handle_vmread()
5573 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmread()
5575 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) in handle_vmread()
5585 /* Read the field, zero-extended to a u64 value */ in handle_vmread()
5589 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an in handle_vmread()
5593 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a in handle_vmread()
5594 * workaround, as misbehaving guests will panic on VM-Fail. in handle_vmread()
5605 /* Read the field, zero-extended to a u64 value */ in handle_vmread()
5670 * possible lengths. The code below first zero-extends the value to 64 in handle_vmwrite()
5680 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA, in handle_vmwrite()
5683 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmwrite()
5685 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA)) in handle_vmwrite()
5708 * VMCS," then the "read-only" fields are actually read/write. in handle_vmwrite()
5715 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties in handle_vmwrite()
5735 * Do not track vmcs12 dirty-state if in guest-mode as we actually in handle_vmwrite()
5743 * shadow VMCS is up-to-date. in handle_vmwrite()
5747 vmcs_load(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5751 vmcs_clear(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5752 vmcs_load(vmx->loaded_vmcs->vmcs); in handle_vmwrite()
5755 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5763 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5767 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
5768 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5770 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5771 vmx->nested.force_msr_bitmap_recalc = true; in set_current_vmptr()
5790 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5797 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5798 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; in handle_vmptrld()
5801 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { in handle_vmptrld()
5812 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, in handle_vmptrld()
5832 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, in handle_vmptrld()
5849 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; in handle_vmptrst()
5886 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5888 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5900 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5919 mmu = &vcpu->arch.guest_mmu; in handle_invept()
5928 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd, in handle_invept()
5933 if (nested_ept_root_matches(mmu->prev_roots[i].hpa, in handle_invept()
5934 mmu->prev_roots[i].pgd, in handle_invept()
5948 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in handle_invept()
5967 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5969 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5981 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
6003 * Always flush the effective vpid02, i.e. never flush the current VPID in handle_invvpid()
6004 * and never explicitly flush vpid01. INVVPID targets a VPID, not a in handle_invvpid()
6047 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu); in handle_invvpid()
6063 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, in nested_vmx_eptp_switching()
6071 if (vmcs12->ept_pointer != new_eptp) { in nested_vmx_eptp_switching()
6075 vmcs12->ept_pointer = new_eptp; in nested_vmx_eptp_switching()
6092 * VMFUNC should never execute cleanly while L1 is active; KVM supports in handle_vmfunc()
6103 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC in handle_vmfunc()
6111 if (!(vmcs12->vm_function_control & BIT_ULL(function))) in handle_vmfunc()
6126 * This is effectively a reflected VM-Exit, as opposed to a synthesized in handle_vmfunc()
6127 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode in handle_vmfunc()
6130 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, in handle_vmfunc()
6138 * a VM-exit into L1.
6148 b = -1; in nested_vmx_check_io_bitmaps()
6152 bitmap = vmcs12->io_bitmap_a; in nested_vmx_check_io_bitmaps()
6154 bitmap = vmcs12->io_bitmap_b; in nested_vmx_check_io_bitmaps()
6166 size--; in nested_vmx_check_io_bitmaps()
6208 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, in nested_vmx_exit_handled_msr()
6212 bitmap = vmcs12->msr_bitmap; in nested_vmx_exit_handled_msr()
6216 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
6249 if (vmcs12->cr0_guest_host_mask & in nested_vmx_exit_handled_cr()
6250 (val ^ vmcs12->cr0_read_shadow)) in nested_vmx_exit_handled_cr()
6258 if (vmcs12->cr4_guest_host_mask & in nested_vmx_exit_handled_cr()
6259 (vmcs12->cr4_read_shadow ^ val)) in nested_vmx_exit_handled_cr()
6269 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && in nested_vmx_exit_handled_cr()
6270 (vmcs12->cr0_read_shadow & X86_CR0_TS)) in nested_vmx_exit_handled_cr()
6276 if (vmcs12->cpu_based_vm_exec_control & in nested_vmx_exit_handled_cr()
6281 if (vmcs12->cpu_based_vm_exec_control & in nested_vmx_exit_handled_cr()
6293 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
6294 (val ^ vmcs12->cr0_read_shadow)) in nested_vmx_exit_handled_cr()
6296 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
6297 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
6317 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); in nested_vmx_exit_handled_encls()
6334 /* Out-of-range fields always cause a VM exit from L2 to L1 */ in nested_vmx_exit_handled_vmcs_access()
6346 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; in nested_vmx_exit_handled_mtf()
6352 * An MTF VM-exit may be injected into the guest by setting the in nested_vmx_exit_handled_mtf()
6353 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6354 * is the case regardless of the 'monitor trap flag' VM-execution in nested_vmx_exit_handled_mtf()
6376 return vcpu->arch.apf.host_apf_flags || in nested_vmx_l0_wants_exit()
6379 vcpu->guest_debug & in nested_vmx_l0_wants_exit()
6383 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in nested_vmx_l0_wants_exit()
6405 * L2 never uses directly L1's EPT, but rather L0's own EPT in nested_vmx_l0_wants_exit()
6415 * PML is emulated for an L1 VMM and should never be enabled in in nested_vmx_l0_wants_exit()
6420 /* VM functions are emulated through L2->L0 vmexits. */ in nested_vmx_l0_wants_exit()
6424 * At present, bus lock VM exit is never exposed to L1. in nested_vmx_l0_wants_exit()
6430 /* Hyper-V L2 TLB flush hypercall is handled by L0 */ in nested_vmx_l0_wants_exit()
6458 return vmcs12->exception_bitmap & in nested_vmx_l1_wants_exit()
6488 vmcs12->vmread_bitmap); in nested_vmx_l1_wants_exit()
6491 vmcs12->vmwrite_bitmap); in nested_vmx_l1_wants_exit()
6499 * emulate them for its L2 guest, i.e., allows 3-level nesting! in nested_vmx_l1_wants_exit()
6533 * The controls for "virtualize APIC accesses," "APIC- in nested_vmx_l1_wants_exit()
6534 * register virtualization," and "virtual-interrupt in nested_vmx_l1_wants_exit()
6548 * This should never happen, since it is not possible to in nested_vmx_l1_wants_exit()
6549 * set XSS to a non-zero value---neither in L1 nor in L2. in nested_vmx_l1_wants_exit()
6569 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6575 union vmx_exit_reason exit_reason = vmx->exit_reason; in nested_vmx_reflect_vmexit()
6579 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6582 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM in nested_vmx_reflect_vmexit()
6585 if (unlikely(vmx->fail)) { in nested_vmx_reflect_vmexit()
6587 "hardware VM-instruction error: ", in nested_vmx_reflect_vmexit()
6606 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would in nested_vmx_reflect_vmexit()
6607 * need to be synthesized by querying the in-kernel LAPIC, but external in nested_vmx_reflect_vmexit()
6608 * interrupts are never reflected to L1 so it's a non-issue. in nested_vmx_reflect_vmexit()
6614 vmcs12->vm_exit_intr_error_code = in nested_vmx_reflect_vmexit()
6640 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6649 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6650 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6651 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6654 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); in vmx_get_nested_state()
6662 vmcs12->vmcs_link_pointer != INVALID_GPA) in vmx_get_nested_state()
6663 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); in vmx_get_nested_state()
6666 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6669 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6675 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6678 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6682 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6686 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6695 return -EFAULT; in vmx_get_nested_state()
6712 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6716 * clean fields data always up-to-date while in vmx_get_nested_state()
6727 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); in vmx_get_nested_state()
6728 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); in vmx_get_nested_state()
6734 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) in vmx_get_nested_state()
6735 return -EFAULT; in vmx_get_nested_state()
6738 vmcs12->vmcs_link_pointer != INVALID_GPA) { in vmx_get_nested_state()
6739 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, in vmx_get_nested_state()
6741 return -EFAULT; in vmx_get_nested_state()
6750 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6751 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6764 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6767 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) in vmx_set_nested_state()
6768 return -EINVAL; in vmx_set_nested_state()
6770 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { in vmx_set_nested_state()
6771 if (kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6772 return -EINVAL; in vmx_set_nested_state()
6774 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) in vmx_set_nested_state()
6775 return -EINVAL; in vmx_set_nested_state()
6786 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) in vmx_set_nested_state()
6787 return -EINVAL; in vmx_set_nested_state()
6790 return -EINVAL; in vmx_set_nested_state()
6792 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) in vmx_set_nested_state()
6793 return -EINVAL; in vmx_set_nested_state()
6796 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6797 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) in vmx_set_nested_state()
6798 return -EINVAL; in vmx_set_nested_state()
6800 if (kvm_state->hdr.vmx.smm.flags & in vmx_set_nested_state()
6802 return -EINVAL; in vmx_set_nested_state()
6804 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) in vmx_set_nested_state()
6805 return -EINVAL; in vmx_set_nested_state()
6813 (kvm_state->flags & in vmx_set_nested_state()
6815 : kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6816 return -EINVAL; in vmx_set_nested_state()
6818 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6819 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
6820 return -EINVAL; in vmx_set_nested_state()
6822 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && in vmx_set_nested_state()
6824 !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6825 return -EINVAL; in vmx_set_nested_state()
6829 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) in vmx_set_nested_state()
6832 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6838 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { in vmx_set_nested_state()
6840 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || in vmx_set_nested_state()
6841 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || in vmx_set_nested_state()
6842 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) in vmx_set_nested_state()
6843 return -EINVAL; in vmx_set_nested_state()
6848 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { in vmx_set_nested_state()
6849 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || in vmx_set_nested_state()
6850 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) in vmx_set_nested_state()
6851 return -EINVAL; in vmx_set_nested_state()
6853 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); in vmx_set_nested_state()
6855 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { in vmx_set_nested_state()
6862 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; in vmx_set_nested_state()
6866 return -EINVAL; in vmx_set_nested_state()
6869 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
6870 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6871 vmx->nested.vmxon = false; in vmx_set_nested_state()
6873 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
6874 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6878 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) in vmx_set_nested_state()
6879 return -EFAULT; in vmx_set_nested_state()
6881 if (vmcs12->hdr.revision_id != VMCS12_REVISION) in vmx_set_nested_state()
6882 return -EINVAL; in vmx_set_nested_state()
6884 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) in vmx_set_nested_state()
6887 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6888 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); in vmx_set_nested_state()
6890 vmx->nested.mtf_pending = in vmx_set_nested_state()
6891 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); in vmx_set_nested_state()
6893 ret = -EINVAL; in vmx_set_nested_state()
6895 vmcs12->vmcs_link_pointer != INVALID_GPA) { in vmx_set_nested_state()
6898 if (kvm_state->size < in vmx_set_nested_state()
6900 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) in vmx_set_nested_state()
6904 user_vmx_nested_state->shadow_vmcs12, in vmx_set_nested_state()
6906 ret = -EFAULT; in vmx_set_nested_state()
6910 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || in vmx_set_nested_state()
6911 !shadow_vmcs12->hdr.shadow_vmcs) in vmx_set_nested_state()
6915 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6916 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { in vmx_set_nested_state()
6917 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6918 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6919 kvm_state->hdr.vmx.preemption_timer_deadline; in vmx_set_nested_state()
6927 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6928 vmx->nested.force_msr_bitmap_recalc = true; in vmx_set_nested_state()
6933 if (vmx->nested.mtf_pending) in vmx_set_nested_state()
6939 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6988 msrs->pinbased_ctls_low = in nested_vmx_setup_pinbased_ctls()
6991 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl; in nested_vmx_setup_pinbased_ctls()
6992 msrs->pinbased_ctls_high &= in nested_vmx_setup_pinbased_ctls()
6997 msrs->pinbased_ctls_high |= in nested_vmx_setup_pinbased_ctls()
7005 msrs->exit_ctls_low = in nested_vmx_setup_exit_ctls()
7008 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl; in nested_vmx_setup_exit_ctls()
7009 msrs->exit_ctls_high &= in nested_vmx_setup_exit_ctls()
7015 msrs->exit_ctls_high |= in nested_vmx_setup_exit_ctls()
7022 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; in nested_vmx_setup_exit_ctls()
7028 msrs->entry_ctls_low = in nested_vmx_setup_entry_ctls()
7031 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl; in nested_vmx_setup_entry_ctls()
7032 msrs->entry_ctls_high &= in nested_vmx_setup_entry_ctls()
7037 msrs->entry_ctls_high |= in nested_vmx_setup_entry_ctls()
7042 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; in nested_vmx_setup_entry_ctls()
7048 msrs->procbased_ctls_low = in nested_vmx_setup_cpubased_ctls()
7051 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl; in nested_vmx_setup_cpubased_ctls()
7052 msrs->procbased_ctls_high &= in nested_vmx_setup_cpubased_ctls()
7068 * hardware. For example, L1 can specify an MSR bitmap - and we in nested_vmx_setup_cpubased_ctls()
7069 * can use it to avoid exits to L1 - even when L0 runs L2 in nested_vmx_setup_cpubased_ctls()
7072 msrs->procbased_ctls_high |= in nested_vmx_setup_cpubased_ctls()
7077 msrs->procbased_ctls_low &= in nested_vmx_setup_cpubased_ctls()
7085 msrs->secondary_ctls_low = 0; in nested_vmx_setup_secondary_ctls()
7087 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; in nested_vmx_setup_secondary_ctls()
7088 msrs->secondary_ctls_high &= in nested_vmx_setup_secondary_ctls()
7107 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7112 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7114 msrs->ept_caps = in nested_vmx_setup_secondary_ctls()
7121 msrs->ept_caps &= ept_caps; in nested_vmx_setup_secondary_ctls()
7122 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | in nested_vmx_setup_secondary_ctls()
7126 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7128 msrs->ept_caps |= VMX_EPT_AD_BIT; in nested_vmx_setup_secondary_ctls()
7136 msrs->vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; in nested_vmx_setup_secondary_ctls()
7140 * Old versions of KVM use the single-context version without in nested_vmx_setup_secondary_ctls()
7143 * not failing the single-context invvpid, and it is worse. in nested_vmx_setup_secondary_ctls()
7146 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7148 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | in nested_vmx_setup_secondary_ctls()
7153 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7157 msrs->secondary_ctls_high |= in nested_vmx_setup_secondary_ctls()
7161 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; in nested_vmx_setup_secondary_ctls()
7167 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; in nested_vmx_setup_misc_data()
7168 msrs->misc_low |= in nested_vmx_setup_misc_data()
7173 msrs->misc_high = 0; in nested_vmx_setup_misc_data()
7181 * guest, and the VMCS structure we give it - not about the in nested_vmx_setup_basic()
7184 msrs->basic = vmx_basic_encode_vmcs_info(VMCS12_REVISION, VMCS12_SIZE, in nested_vmx_setup_basic()
7187 msrs->basic |= VMX_BASIC_TRUE_CTLS; in nested_vmx_setup_basic()
7189 msrs->basic |= VMX_BASIC_INOUT; in nested_vmx_setup_basic()
7201 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; in nested_vmx_setup_cr_fixed()
7202 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; in nested_vmx_setup_cr_fixed()
7205 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); in nested_vmx_setup_cr_fixed()
7206 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); in nested_vmx_setup_cr_fixed()
7209 msrs->cr4_fixed1 |= X86_CR4_UMIP; in nested_vmx_setup_cr_fixed()
7217 * Each of these control msrs has a low and high 32-bit half: A low bit is on
7218 * if the corresponding bit in the (32-bit) control field *must* be on, and a
7224 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; in nested_vmx_setup_ctls_msrs()
7230 * can be supported) and the list of features we want to expose - in nested_vmx_setup_ctls_msrs()
7235 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control in nested_vmx_setup_ctls_msrs()
7236 * fields of vmcs01 and vmcs02, will turn these bits off - and in nested_vmx_setup_ctls_msrs()
7238 * These rules have exceptions below. in nested_vmx_setup_ctls_msrs()
7256 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); in nested_vmx_setup_ctls_msrs()
7285 return -ENOMEM; in nested_vmx_hardware_setup()