Lines Matching full:nested

80  * If nested=1, nested virtualization is supported, i.e., guests may use
81 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
84 static bool __read_mostly nested = 0; variable
85 module_param(nested, bool, S_IRUGO);
152 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
335 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
378 * non-nested (L1) guest, it always points to vmcs01. For a nested
420 /* Support for a guest hypervisor (nested VMX) */
421 struct nested_vmx nested; member
583 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
1186 /* When we are running a nested L2 guest and L1 specified for it a in update_exception_bitmap()
1546 * Return the cr0 value that a nested guest would read. This is a combination
1578 * only update vmcs12->guest_cr0 on nested exit). in vmx_fpu_deactivate()
1671 * checks whether in a nested guest, we need to inject them to L1 or L2.
1806 * counter, even if a nested guest (L2) is currently running.
1814 to_vmx(vcpu)->nested.vmcs01_tsc_offset : in vmx_read_l1_tsc()
1842 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; in vmx_write_tsc_offset()
1859 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; in vmx_adjust_tsc_offset()
1876 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1877 * all guests if the "nested" module option is off, and can also be disabled
1882 return nested && guest_cpuid_has_vmx(vcpu); in nested_vmx_allowed()
1887 * returned for the various VMX controls MSRs when nested VMX is enabled.
1889 * valid during nested entry from L1 to L2.
1991 * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
2074 /* Currently, no nested ept or nested vpid */ in vmx_get_vmx_msr()
2643 if (nested) in hardware_setup()
3040 * So basically the check on whether to allow nested VMX in vmx_set_cr4()
3045 } else if (to_vmx(vcpu)->nested.vmxon) in vmx_set_cr4()
3936 * In nested virtualization, check if L1 asked to exit on external interrupts.
4084 if (to_vmx(vcpu)->nested.nested_run_pending || in vmx_interrupt_allowed()
4337 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
4932 * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
4942 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_get_current_vmcs02()
4943 if (item->vmptr == vmx->nested.current_vmptr) { in nested_get_current_vmcs02()
4944 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
4948 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { in nested_get_current_vmcs02()
4950 item = list_entry(vmx->nested.vmcs02_pool.prev, in nested_get_current_vmcs02()
4952 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
4953 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
4968 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
4969 list_add(&(item->list), &(vmx->nested.vmcs02_pool)); in nested_get_current_vmcs02()
4970 vmx->nested.vmcs02_num++; in nested_get_current_vmcs02()
4978 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_free_vmcs02()
4983 vmx->nested.vmcs02_num--; in nested_free_vmcs02()
4996 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { in nested_free_all_saved_vmcss()
5002 vmx->nested.vmcs02_num = 0; in nested_free_all_saved_vmcss()
5044 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); in handle_vmon()
5045 vmx->nested.vmcs02_num = 0; in handle_vmon()
5047 vmx->nested.vmxon = true; in handle_vmon()
5063 if (!vmx->nested.vmxon) { in nested_vmx_check_permission()
5084 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
5089 if (!vmx->nested.vmxon) in free_nested()
5091 vmx->nested.vmxon = false; in free_nested()
5092 if (vmx->nested.current_vmptr != -1ull) { in free_nested()
5093 kunmap(vmx->nested.current_vmcs12_page); in free_nested()
5094 nested_release_page(vmx->nested.current_vmcs12_page); in free_nested()
5095 vmx->nested.current_vmptr = -1ull; in free_nested()
5096 vmx->nested.current_vmcs12 = NULL; in free_nested()
5099 if (vmx->nested.apic_access_page) { in free_nested()
5100 nested_release_page(vmx->nested.apic_access_page); in free_nested()
5101 vmx->nested.apic_access_page = 0; in free_nested()
5193 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
5237 if (vmptr == vmx->nested.current_vmptr) { in handle_vmclear()
5238 kunmap(vmx->nested.current_vmcs12_page); in handle_vmclear()
5239 nested_release_page(vmx->nested.current_vmcs12_page); in handle_vmclear()
5240 vmx->nested.current_vmptr = -1ull; in handle_vmclear()
5241 vmx->nested.current_vmcs12 = NULL; in handle_vmclear()
5249 * possible that a nested vmx bug, not a guest hypervisor bug, in handle_vmclear()
5345 if (vmx->nested.current_vmptr == -1ull) { in nested_vmx_check_vmcs12()
5499 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5517 if (vmx->nested.current_vmptr != -1ull) { in handle_vmptrld()
5518 kunmap(vmx->nested.current_vmcs12_page); in handle_vmptrld()
5519 nested_release_page(vmx->nested.current_vmcs12_page); in handle_vmptrld()
5522 vmx->nested.current_vmptr = vmptr; in handle_vmptrld()
5523 vmx->nested.current_vmcs12 = new_vmcs12; in handle_vmptrld()
5524 vmx->nested.current_vmcs12_page = page; in handle_vmptrld()
5548 (void *)&to_vmx(vcpu)->nested.current_vmptr, in handle_vmptrst()
5735 if (vmx->nested.nested_run_pending) in nested_vmx_exit_handled()
5853 if (vmx->nested.nested_run_pending) in vmx_handle_exit()
5858 vmx->nested.nested_run_pending = 1; in vmx_handle_exit()
5860 vmx->nested.nested_run_pending = 0; in vmx_handle_exit()
6103 if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) { in vmx_vcpu_run()
6344 vmx->nested.current_vmptr = -1ull;
6345 vmx->nested.current_vmcs12 = NULL;
6441 if (func == 1 && nested)
6446 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
6562 if (vmx->nested.apic_access_page) /* shouldn't happen */
6563 nested_release_page(vmx->nested.apic_access_page);
6564 vmx->nested.apic_access_page =
6572 if (!vmx->nested.apic_access_page)
6577 page_to_phys(vmx->nested.apic_access_page));
6640 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
6642 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
6686 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
6687 * for running an L2 nested guest.
6704 * The nested entry process starts with enforcing various prerequisites
6779 * the nested entry.
6788 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
6811 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
6848 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
6941 * A part of what we need to when the nested L2 guest exits and we want to
6944 * This function is to be called not only on normal nested exit, but also on
6945 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
7022 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
7044 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
7049 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
7055 if (vmx->nested.apic_access_page) {
7056 nested_release_page(vmx->nested.apic_access_page);
7057 vmx->nested.apic_access_page = 0;