Lines Matching full:nested
62 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
107 /* Nested Paging related state */
137 struct nested_state nested; member
184 static int nested = 1; variable
185 module_param(nested, int, S_IRUGO);
248 h = &svm->nested.hsave->control; in recalc_intercepts()
249 g = &svm->nested; in recalc_intercepts()
260 return svm->nested.hsave; in get_host_vmcb()
506 * If we are within a nested VM we'd better #VMEXIT and let the guest in svm_queue_exception()
545 /* Use _safe variants to not break nested virtualization */ in svm_init_erratum_383()
826 if (nested) { in svm_hardware_setup()
827 printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); in svm_hardware_setup()
841 printk(KERN_INFO "kvm: Nested Paging disabled\n"); in svm_hardware_setup()
846 printk(KERN_INFO "kvm: Nested Paging enabled\n"); in svm_hardware_setup()
952 svm->nested.hsave->control.tsc_offset; in svm_write_tsc_offset()
953 svm->nested.hsave->control.tsc_offset = offset; in svm_write_tsc_offset()
967 svm->nested.hsave->control.tsc_offset += adjustment; in svm_adjust_tsc_offset()
1091 /* Setup VMCB for Nested Paging */ in init_vmcb()
1103 svm->nested.vmcb = 0; in init_vmcb()
1171 svm->nested.hsave = page_address(hsave_page); in svm_create_vcpu()
1176 svm->nested.msrpm = page_address(nested_msrpm_pages); in svm_create_vcpu()
1177 svm_vcpu_init_msrpm(svm->nested.msrpm); in svm_create_vcpu()
1218 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_vcpu()
1219 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1849 return svm->nested.nested_cr3; in nested_svm_get_tdp_cr3()
1855 u64 cr3 = svm->nested.nested_cr3; in nested_svm_get_tdp_pdptr()
1941 svm->nested.exit_required = true; in nested_svm_check_exception()
1963 if (svm->nested.exit_required) in nested_svm_intr()
1970 if (svm->nested.intercept & 1ULL) { in nested_svm_intr()
1977 svm->nested.exit_required = true; in nested_svm_intr()
1991 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) in nested_svm_nmi()
1995 svm->nested.exit_required = true; in nested_svm_nmi()
2033 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
2037 gpa = svm->nested.vmcb_iopm + (port / 8); in nested_svm_intercept_ioio()
2052 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
2066 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
2118 if (svm->nested.intercept_cr & bit) in nested_svm_intercept()
2124 if (svm->nested.intercept_dr & bit) in nested_svm_intercept()
2130 if (svm->nested.intercept_exceptions & excp_bits) in nested_svm_intercept()
2144 if (svm->nested.intercept & exit_bits) in nested_svm_intercept()
2197 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
2208 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); in nested_svm_vmexit()
2214 svm->nested.vmcb = 0; in nested_svm_vmexit()
2278 svm->nested.nested_cr3 = 0; in nested_svm_vmexit()
2319 * nested vmcb. It is omptimized in that it only merges the parts where in nested_svm_vmrun_msrpm()
2324 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
2335 offset = svm->nested.vmcb_msrpm + (p * 4); in nested_svm_vmrun_msrpm()
2340 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
2343 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); in nested_svm_vmrun_msrpm()
2365 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
2433 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; in nested_svm_vmrun()
2437 /* Load the nested guest state */ in nested_svm_vmrun()
2470 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2471 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2474 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; in nested_svm_vmrun()
2475 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; in nested_svm_vmrun()
2476 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; in nested_svm_vmrun()
2477 svm->nested.intercept = nested_vmcb->control.intercept; in nested_svm_vmrun()
2492 /* We don't want to see VMMCALLs from a nested guest */ in nested_svm_vmrun()
2513 svm->nested.vmcb = vmcb_gpa; in nested_svm_vmrun()
2798 intercept = svm->nested.intercept; in check_selective_cr0_intercepted()
2996 *data = svm->nested.hsave_msr; in svm_get_msr()
2999 *data = svm->nested.vm_cr_msr; in svm_get_msr()
3039 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
3042 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
3043 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
3045 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
3107 svm->nested.hsave_msr = data; in svm_set_msr()
3362 if (unlikely(svm->nested.exit_required)) { in handle_exit()
3364 svm->nested.exit_required = false; in handle_exit()
3708 if (unlikely(svm->nested.exit_required)) in svm_vcpu_run()
3918 if (nested)
3924 ASID emulation to nested SVM */
4059 intercept = svm->nested.intercept;