Lines Matching +full:is +full:- +full:decoded +full:- +full:cs
18 #include <linux/amd-iommu.h>
24 #include <linux/psp-sev.h>
39 #include <asm/spec-ctrl.h>
85 bool always; /* True if intercept is initially cleared */
130 * AMD does not virtualize APIC TSC-deadline timer mode, but it is
147 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
150 * count value. On VMRUN this value is loaded into an internal counter.
151 * Each time a pause instruction is executed, this counter is decremented
152 * until it reaches zero at which time a #VMEXIT is generated if pause
153 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
159 * the amount of time a guest is allowed to execute in a pause loop.
160 * In this mode, a 16-bit pause filter threshold field is added in the
161 * VMCB. The threshold value is a cycle count that is used to reset the
166 * If the elapsed cycle count is greater than the pause filter threshold,
167 * then the internal pause count is reloaded from the VMCB and execution
168 * continues. If the elapsed cycle count is less than the pause filter
169 * threshold, then the internal pause count is decremented. If the count
170 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
171 * triggered. If advanced pause filtering is supported and pause filter
172 * threshold field is set to zero, the filter will operate in the simpler,
182 /* Default doubles per-vcpu window every exit. */
186 /* Default resets per-vcpu window every exit to pause_filter_count. */
190 /* Default is to compute the maximum so we can never overflow. */
250 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
256 static int tsc_aux_uret_slot __read_mostly = -1;
274 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ in svm_msrpm_offset()
277 /* Now we have the u8 offset - but need the u32 offset */ in svm_msrpm_offset()
299 u64 old_efer = vcpu->arch.efer; in svm_set_efer()
300 vcpu->arch.efer = efer; in svm_set_efer()
314 /* #GP intercept is still needed for vmware backdoor */ in svm_set_efer()
330 vcpu->arch.efer = old_efer; in svm_set_efer()
338 if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) in svm_set_efer()
343 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
344 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
353 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
363 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
365 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
376 * SEV-ES does not expose the next RIP. The RIP update is controlled by in __svm_skip_emulated_instruction()
379 if (sev_es_guest(vcpu->kvm)) in __svm_skip_emulated_instruction()
382 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
384 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction()
387 if (!svm->next_rip) { in __svm_skip_emulated_instruction()
389 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
395 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
397 kvm_rip_write(vcpu, svm->next_rip); in __svm_skip_emulated_instruction()
421 * the instruction even if NextRIP is supported to acquire the next in svm_update_soft_interrupt_rip()
425 * retry the instruction, it's the least awful option. If NRIPS is in svm_update_soft_interrupt_rip()
430 return -EIO; in svm_update_soft_interrupt_rip()
436 * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection in svm_update_soft_interrupt_rip()
437 * doesn't complete due to a VM-Exit occurring while the CPU is in svm_update_soft_interrupt_rip()
439 * work as there may be no backing instruction, e.g. if the event is in svm_update_soft_interrupt_rip()
440 * being injected by L1 for L2, or if the guest is patching INT3 into in svm_update_soft_interrupt_rip()
443 svm->soft_int_injected = true; in svm_update_soft_interrupt_rip()
444 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
445 svm->soft_int_old_rip = old_rip; in svm_update_soft_interrupt_rip()
446 svm->soft_int_next_rip = rip; in svm_update_soft_interrupt_rip()
452 svm->vmcb->control.next_rip = rip; in svm_update_soft_interrupt_rip()
459 struct kvm_queued_exception *ex = &vcpu->arch.exception; in svm_inject_exception()
464 if (kvm_exception_is_soft(ex->vector) && in svm_inject_exception()
468 svm->vmcb->control.event_inj = ex->vector in svm_inject_exception()
470 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0) in svm_inject_exception()
472 svm->vmcb->control.event_inj_err = ex->error_code; in svm_inject_exception()
505 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; in svm_init_osvw()
506 vcpu->arch.osvw.status = osvw_status & ~(6ULL); in svm_init_osvw()
510 * all osvw.status bits inside that length, including bit 0 (which is in svm_init_osvw()
512 * osvw_len is 0 then osvw_status[0] carries no information. We need to in svm_init_osvw()
514 * is present (because we really don't know). in svm_init_osvw()
517 vcpu->arch.osvw.status |= 1; in svm_init_osvw()
525 if (c->x86_vendor != X86_VENDOR_AMD && in __kvm_is_svm_supported()
526 c->x86_vendor != X86_VENDOR_HYGON) { in __kvm_is_svm_supported()
537 pr_info("KVM is unsupported when running as an SEV guest\n"); in __kvm_is_svm_supported()
558 return -EIO; in svm_check_processor_compat()
615 return -EBUSY; in svm_hardware_enable()
618 sd->asid_generation = 1; in svm_hardware_enable()
619 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; in svm_hardware_enable()
620 sd->next_asid = sd->max_asid + 1; in svm_hardware_enable()
621 sd->min_asid = max_sev_asid + 1; in svm_hardware_enable()
625 wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa); in svm_hardware_enable()
639 * Note that it is possible to have a system with mixed processor in svm_hardware_enable()
642 * is present on one processor and not on another then assume that the in svm_hardware_enable()
643 * erratum is present everywhere). in svm_hardware_enable()
660 osvw_status &= (1ULL << osvw_len) - 1; in svm_hardware_enable()
670 * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type in svm_hardware_enable()
671 * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests. in svm_hardware_enable()
679 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); in svm_hardware_enable()
681 rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi); in svm_hardware_enable()
691 if (!sd->save_area) in svm_cpu_uninit()
694 kfree(sd->sev_vmcbs); in svm_cpu_uninit()
695 __free_page(sd->save_area); in svm_cpu_uninit()
696 sd->save_area_pa = 0; in svm_cpu_uninit()
697 sd->save_area = NULL; in svm_cpu_uninit()
703 int ret = -ENOMEM; in svm_cpu_init()
706 sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO); in svm_cpu_init()
707 if (!sd->save_area) in svm_cpu_init()
714 sd->save_area_pa = __sme_page_pa(sd->save_area); in svm_cpu_init()
718 __free_page(sd->save_area); in svm_cpu_init()
719 sd->save_area = NULL; in svm_cpu_init()
726 struct vmcb *vmcb = svm->vmcb01.ptr; in set_dr_intercepts()
728 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); in set_dr_intercepts()
729 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); in set_dr_intercepts()
730 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); in set_dr_intercepts()
731 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); in set_dr_intercepts()
732 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); in set_dr_intercepts()
733 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); in set_dr_intercepts()
734 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); in set_dr_intercepts()
735 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); in set_dr_intercepts()
736 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); in set_dr_intercepts()
737 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); in set_dr_intercepts()
738 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); in set_dr_intercepts()
739 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); in set_dr_intercepts()
740 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); in set_dr_intercepts()
741 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); in set_dr_intercepts()
742 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); in set_dr_intercepts()
743 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); in set_dr_intercepts()
750 struct vmcb *vmcb = svm->vmcb01.ptr; in clr_dr_intercepts()
752 vmcb->control.intercepts[INTERCEPT_DR] = 0; in clr_dr_intercepts()
765 return -ENOENT; in direct_access_msr_slot()
774 if (slot == -ENOENT) in set_shadow_msr_intercept()
779 set_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
781 clear_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
784 set_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
786 clear_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
791 return direct_access_msr_slot(index) != -ENOENT; in valid_msr_intercept()
802 * For non-nested case: in msr_write_intercepted()
810 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: in msr_write_intercepted()
811 to_svm(vcpu)->msrpm; in msr_write_intercepted()
856 svm->nested.force_msr_bitmap_recalc = true; in set_msr_interception_bitmap()
896 if (intercept == svm->x2avic_msrs_intercepted) in svm_set_x2apic_msr_interception()
908 set_msr_interception(&svm->vcpu, svm->msrpm, index, in svm_set_x2apic_msr_interception()
912 svm->x2avic_msrs_intercepted = intercept; in svm_set_x2apic_msr_interception()
932 u32 read = test_bit(i, svm->shadow_msr_intercept.read); in svm_msr_filter_changed()
933 u32 write = test_bit(i, svm->shadow_msr_intercept.write); in svm_msr_filter_changed()
935 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); in svm_msr_filter_changed()
984 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; in svm_copy_lbrs()
985 to_vmcb->save.br_from = from_vmcb->save.br_from; in svm_copy_lbrs()
986 to_vmcb->save.br_to = from_vmcb->save.br_to; in svm_copy_lbrs()
987 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; in svm_copy_lbrs()
988 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; in svm_copy_lbrs()
997 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
998 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in svm_enable_lbrv()
999 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in svm_enable_lbrv()
1000 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in svm_enable_lbrv()
1001 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in svm_enable_lbrv()
1005 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); in svm_enable_lbrv()
1012 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
1013 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
1014 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
1015 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
1016 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
1023 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); in svm_disable_lbrv()
1029 * If LBR virtualization is disabled, the LBR MSRs are always kept in in svm_get_lbr_vmcb()
1030 * vmcb01. If LBR virtualization is enabled and L1 is running VMs of in svm_get_lbr_vmcb()
1033 return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb : in svm_get_lbr_vmcb()
1034 svm->vmcb01.ptr; in svm_get_lbr_vmcb()
1040 bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; in svm_update_lbrv()
1041 bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) || in svm_update_lbrv()
1043 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); in svm_update_lbrv()
1056 svm->nmi_singlestep = false; in disable_nmi_singlestep()
1058 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
1060 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
1061 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
1062 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
1063 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
1070 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
1071 int old = control->pause_filter_count; in grow_ple_window()
1073 if (kvm_pause_in_guest(vcpu->kvm)) in grow_ple_window()
1076 control->pause_filter_count = __grow_ple_window(old, in grow_ple_window()
1081 if (control->pause_filter_count != old) { in grow_ple_window()
1082 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
1083 trace_kvm_ple_window_update(vcpu->vcpu_id, in grow_ple_window()
1084 control->pause_filter_count, old); in grow_ple_window()
1091 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
1092 int old = control->pause_filter_count; in shrink_ple_window()
1094 if (kvm_pause_in_guest(vcpu->kvm)) in shrink_ple_window()
1097 control->pause_filter_count = in shrink_ple_window()
1102 if (control->pause_filter_count != old) { in shrink_ple_window()
1103 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1104 trace_kvm_ple_window_update(vcpu->vcpu_id, in shrink_ple_window()
1105 control->pause_filter_count, old); in shrink_ple_window()
1125 seg->selector = 0; in init_seg()
1126 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | in init_seg()
1128 seg->limit = 0xffff; in init_seg()
1129 seg->base = 0; in init_seg()
1134 seg->selector = 0; in init_sys_seg()
1135 seg->attrib = SVM_SELECTOR_P_MASK | type; in init_sys_seg()
1136 seg->limit = 0xffff; in init_sys_seg()
1137 seg->base = 0; in init_sys_seg()
1144 return svm->nested.ctl.tsc_offset; in svm_get_l2_tsc_offset()
1151 return svm->tsc_ratio_msr; in svm_get_l2_tsc_multiplier()
1158 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; in svm_write_tsc_offset()
1159 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; in svm_write_tsc_offset()
1160 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
1166 if (to_svm(vcpu)->guest_state_loaded) in svm_write_tsc_multiplier()
1167 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); in svm_write_tsc_multiplier()
1176 * Intercept INVPCID if shadow paging is enabled to sync/free shadow in svm_recalc_instruction_intercepts()
1177 * roots, or if INVPCID is disabled in the guest to inject #UD. in svm_recalc_instruction_intercepts()
1181 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) in svm_recalc_instruction_intercepts()
1207 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1209 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); in init_vmcb_after_set_cpuid()
1210 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); in init_vmcb_after_set_cpuid()
1219 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1222 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); in init_vmcb_after_set_cpuid()
1223 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); in init_vmcb_after_set_cpuid()
1230 struct vmcb *vmcb = svm->vmcb01.ptr; in init_vmcb()
1231 struct vmcb_control_area *control = &vmcb->control; in init_vmcb()
1232 struct vmcb_save_area *save = &vmcb->save; in init_vmcb()
1287 if (!kvm_mwait_in_guest(vcpu->kvm)) { in init_vmcb()
1292 if (!kvm_hlt_in_guest(vcpu->kvm)) in init_vmcb()
1295 control->iopm_base_pa = __sme_set(iopm_base); in init_vmcb()
1296 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1297 control->int_ctl = V_INTR_MASKING_MASK; in init_vmcb()
1299 init_seg(&save->es); in init_vmcb()
1300 init_seg(&save->ss); in init_vmcb()
1301 init_seg(&save->ds); in init_vmcb()
1302 init_seg(&save->fs); in init_vmcb()
1303 init_seg(&save->gs); in init_vmcb()
1305 save->cs.selector = 0xf000; in init_vmcb()
1306 save->cs.base = 0xffff0000; in init_vmcb()
1308 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | in init_vmcb()
1310 save->cs.limit = 0xffff; in init_vmcb()
1312 save->gdtr.base = 0; in init_vmcb()
1313 save->gdtr.limit = 0xffff; in init_vmcb()
1314 save->idtr.base = 0; in init_vmcb()
1315 save->idtr.limit = 0xffff; in init_vmcb()
1317 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); in init_vmcb()
1318 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); in init_vmcb()
1322 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; in init_vmcb()
1327 save->g_pat = vcpu->arch.pat; in init_vmcb()
1328 save->cr3 = 0; in init_vmcb()
1330 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1331 svm->asid = 0; in init_vmcb()
1333 svm->nested.vmcb12_gpa = INVALID_GPA; in init_vmcb()
1334 svm->nested.last_vmcb12_gpa = INVALID_GPA; in init_vmcb()
1336 if (!kvm_pause_in_guest(vcpu->kvm)) { in init_vmcb()
1337 control->pause_filter_count = pause_filter_count; in init_vmcb()
1339 control->pause_filter_thresh = pause_filter_thresh; in init_vmcb()
1352 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in init_vmcb()
1358 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; in init_vmcb()
1363 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1366 if (sev_guest(vcpu->kvm)) in init_vmcb()
1381 svm_vcpu_init_msrpm(vcpu, svm->msrpm); in __svm_vcpu_reset()
1384 vcpu->arch.microcode_version = 0x01000065; in __svm_vcpu_reset()
1385 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; in __svm_vcpu_reset()
1387 svm->nmi_masked = false; in __svm_vcpu_reset()
1388 svm->awaiting_iret_completion = false; in __svm_vcpu_reset()
1390 if (sev_es_guest(vcpu->kvm)) in __svm_vcpu_reset()
1398 svm->spec_ctrl = 0; in svm_vcpu_reset()
1399 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1409 svm->current_vmcb = target_vmcb; in svm_switch_vmcb()
1410 svm->vmcb = target_vmcb->ptr; in svm_switch_vmcb()
1423 err = -ENOMEM; in svm_vcpu_create()
1428 if (sev_es_guest(vcpu->kvm)) { in svm_vcpu_create()
1430 * SEV-ES guests require a separate VMSA page used to contain in svm_vcpu_create()
1438 * SEV-ES guests maintain an encrypted version of their FPU in svm_vcpu_create()
1439 * state which is restored and saved on VMRUN and VMEXIT. in svm_vcpu_create()
1440 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't in svm_vcpu_create()
1443 fpstate_set_confidential(&vcpu->arch.guest_fpu); in svm_vcpu_create()
1450 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_vcpu_create()
1451 if (!svm->msrpm) { in svm_vcpu_create()
1452 err = -ENOMEM; in svm_vcpu_create()
1456 svm->x2avic_msrs_intercepted = true; in svm_vcpu_create()
1458 svm->vmcb01.ptr = page_address(vmcb01_page); in svm_vcpu_create()
1459 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); in svm_vcpu_create()
1460 svm_switch_vmcb(svm, &svm->vmcb01); in svm_vcpu_create()
1463 svm->sev_es.vmsa = page_address(vmsa_page); in svm_vcpu_create()
1465 svm->guest_state_loaded = false; in svm_vcpu_create()
1495 svm_clear_current_vmcb(svm->vmcb); in svm_vcpu_free()
1502 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); in svm_vcpu_free()
1503 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); in svm_vcpu_free()
1509 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); in svm_prepare_switch_to_guest()
1511 if (sev_es_guest(vcpu->kvm)) in svm_prepare_switch_to_guest()
1514 if (svm->guest_state_loaded) in svm_prepare_switch_to_guest()
1518 * Save additional host state that will be restored on VMEXIT (sev-es) in svm_prepare_switch_to_guest()
1521 vmsave(sd->save_area_pa); in svm_prepare_switch_to_guest()
1522 if (sev_es_guest(vcpu->kvm)) { in svm_prepare_switch_to_guest()
1524 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); in svm_prepare_switch_to_guest()
1530 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); in svm_prepare_switch_to_guest()
1533 * TSC_AUX is always virtualized for SEV-ES guests when the feature is in svm_prepare_switch_to_guest()
1534 * available. The user return MSR support is not required in this case in svm_prepare_switch_to_guest()
1535 * because TSC_AUX is restored on #VMEXIT from the host save area in svm_prepare_switch_to_guest()
1539 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) in svm_prepare_switch_to_guest()
1540 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); in svm_prepare_switch_to_guest()
1542 svm->guest_state_loaded = true; in svm_prepare_switch_to_guest()
1547 to_svm(vcpu)->guest_state_loaded = false; in svm_prepare_host_switch()
1555 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
1556 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
1572 ++vcpu->stat.host_state_reload; in svm_vcpu_put()
1578 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1580 if (svm->nmi_singlestep) { in svm_get_rflags()
1582 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1584 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1592 if (to_svm(vcpu)->nmi_singlestep) in svm_set_rflags()
1596 * Any change of EFLAGS.VM is accompanied by a reload of SS in svm_set_rflags()
1597 * (caused by either a task switch or an inter-privilege IRET), in svm_set_rflags()
1600 to_svm(vcpu)->vmcb->save.rflags = rflags; in svm_set_rflags()
1605 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_get_if_flag()
1607 return sev_es_guest(vcpu->kvm) in svm_get_if_flag()
1608 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK in svm_get_if_flag()
1619 * When !npt_enabled, mmu->pdptrs[] is already available since in svm_cache_reg()
1620 * it is always updated per SDM when moving to CRs. in svm_cache_reg()
1626 KVM_BUG_ON(1, vcpu->kvm); in svm_cache_reg()
1635 * The following fields are ignored when AVIC is enabled in svm_set_vintr()
1637 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); in svm_set_vintr()
1643 * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF in svm_set_vintr()
1644 * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN. in svm_set_vintr()
1645 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as in svm_set_vintr()
1646 * interrupts will never be unblocked while L2 is running. in svm_set_vintr()
1652 * This is just a dummy VINTR to actually cause a vmexit to happen. in svm_set_vintr()
1655 control = &svm->vmcb->control; in svm_set_vintr()
1656 control->int_vector = 0x0; in svm_set_vintr()
1657 control->int_ctl &= ~V_INTR_PRIO_MASK; in svm_set_vintr()
1658 control->int_ctl |= V_IRQ_MASK | in svm_set_vintr()
1659 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); in svm_set_vintr()
1660 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1668 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1669 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1670 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1672 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1673 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1675 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & in svm_clear_vintr()
1678 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in svm_clear_vintr()
1681 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1686 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_seg()
1687 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; in svm_seg()
1690 case VCPU_SREG_CS: return &save->cs; in svm_seg()
1691 case VCPU_SREG_DS: return &save->ds; in svm_seg()
1692 case VCPU_SREG_ES: return &save->es; in svm_seg()
1693 case VCPU_SREG_FS: return &save01->fs; in svm_seg()
1694 case VCPU_SREG_GS: return &save01->gs; in svm_seg()
1695 case VCPU_SREG_SS: return &save->ss; in svm_seg()
1696 case VCPU_SREG_TR: return &save01->tr; in svm_seg()
1697 case VCPU_SREG_LDTR: return &save01->ldtr; in svm_seg()
1707 return s->base; in svm_get_segment_base()
1715 var->base = s->base; in svm_get_segment()
1716 var->limit = s->limit; in svm_get_segment()
1717 var->selector = s->selector; in svm_get_segment()
1718 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; in svm_get_segment()
1719 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; in svm_get_segment()
1720 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; in svm_get_segment()
1721 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; in svm_get_segment()
1722 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; in svm_get_segment()
1723 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; in svm_get_segment()
1724 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; in svm_get_segment()
1727 * AMD CPUs circa 2014 track the G bit for all segments except CS. in svm_get_segment()
1728 * However, the SVM spec states that the G bit is not observed by the in svm_get_segment()
1731 * running KVM nested. It also helps cross-vendor migration, because in svm_get_segment()
1734 var->g = s->limit > 0xfffff; in svm_get_segment()
1740 var->unusable = !var->present; in svm_get_segment()
1748 var->type |= 0x2; in svm_get_segment()
1759 * cross-vendor migration. in svm_get_segment()
1761 if (!var->unusable) in svm_get_segment()
1762 var->type |= 0x1; in svm_get_segment()
1767 * descriptor is left as 1, although the whole segment has in svm_get_segment()
1771 if (var->unusable) in svm_get_segment()
1772 var->db = 0; in svm_get_segment()
1773 /* This is symmetric with svm_set_segment() */ in svm_get_segment()
1774 var->dpl = to_svm(vcpu)->vmcb->save.cpl; in svm_get_segment()
1781 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_get_cpl()
1783 return save->cpl; in svm_get_cpl()
1788 struct kvm_segment cs; in svm_get_cs_db_l_bits() local
1790 svm_get_segment(vcpu, &cs, VCPU_SREG_CS); in svm_get_cs_db_l_bits()
1791 *db = cs.db; in svm_get_cs_db_l_bits()
1792 *l = cs.l; in svm_get_cs_db_l_bits()
1799 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1800 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1807 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1808 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1809 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1816 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1817 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1824 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1825 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1826 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1834 * For guests that don't set guest_state_protected, the cr3 update is in sev_post_set_cr3()
1836 * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to in sev_post_set_cr3()
1841 if (sev_es_guest(vcpu->kvm)) { in sev_post_set_cr3()
1842 svm->vmcb->save.cr3 = cr3; in sev_post_set_cr3()
1843 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in sev_post_set_cr3()
1859 if (vcpu->arch.efer & EFER_LME) { in svm_set_cr0()
1861 vcpu->arch.efer |= EFER_LMA; in svm_set_cr0()
1862 if (!vcpu->arch.guest_state_protected) in svm_set_cr0()
1863 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1867 vcpu->arch.efer &= ~EFER_LMA; in svm_set_cr0()
1868 if (!vcpu->arch.guest_state_protected) in svm_set_cr0()
1869 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1873 vcpu->arch.cr0 = cr0; in svm_set_cr0()
1882 * re-enable caching here because the QEMU bios in svm_set_cr0()
1883 * does not do it - this results in some delay at in svm_set_cr0()
1886 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in svm_set_cr0()
1889 svm->vmcb->save.cr0 = hcr0; in svm_set_cr0()
1890 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1893 * SEV-ES guests must always keep the CR intercepts cleared. CR in svm_set_cr0()
1894 * tracking is done using the CR write traps. in svm_set_cr0()
1896 if (sev_es_guest(vcpu->kvm)) in svm_set_cr0()
1917 unsigned long old_cr4 = vcpu->arch.cr4; in svm_set_cr4()
1922 vcpu->arch.cr4 = cr4; in svm_set_cr4()
1930 to_svm(vcpu)->vmcb->save.cr4 = cr4; in svm_set_cr4()
1931 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_cr4()
1943 s->base = var->base; in svm_set_segment()
1944 s->limit = var->limit; in svm_set_segment()
1945 s->selector = var->selector; in svm_set_segment()
1946 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); in svm_set_segment()
1947 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; in svm_set_segment()
1948 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; in svm_set_segment()
1949 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; in svm_set_segment()
1950 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; in svm_set_segment()
1951 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; in svm_set_segment()
1952 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; in svm_set_segment()
1953 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; in svm_set_segment()
1956 * This is always accurate, except if SYSRET returned to a segment in svm_set_segment()
1962 /* This is symmetric with svm_get_segment() */ in svm_set_segment()
1963 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1965 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1974 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { in svm_update_exception_bitmap()
1975 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in svm_update_exception_bitmap()
1982 if (sd->next_asid > sd->max_asid) { in new_asid()
1983 ++sd->asid_generation; in new_asid()
1984 sd->next_asid = sd->min_asid; in new_asid()
1985 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1986 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1989 svm->current_vmcb->asid_generation = sd->asid_generation; in new_asid()
1990 svm->asid = sd->next_asid++; in new_asid()
1995 struct vmcb *vmcb = svm->vmcb; in svm_set_dr6()
1997 if (svm->vcpu.arch.guest_state_protected) in svm_set_dr6()
2000 if (unlikely(value != vmcb->save.dr6)) { in svm_set_dr6()
2001 vmcb->save.dr6 = value; in svm_set_dr6()
2010 if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm))) in svm_sync_dirty_debug_regs()
2013 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
2014 get_debugreg(vcpu->arch.db[1], 1); in svm_sync_dirty_debug_regs()
2015 get_debugreg(vcpu->arch.db[2], 2); in svm_sync_dirty_debug_regs()
2016 get_debugreg(vcpu->arch.db[3], 3); in svm_sync_dirty_debug_regs()
2018 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, in svm_sync_dirty_debug_regs()
2021 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
2022 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
2023 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in svm_sync_dirty_debug_regs()
2031 if (vcpu->arch.guest_state_protected) in svm_set_dr7()
2034 svm->vmcb->save.dr7 = value; in svm_set_dr7()
2035 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
2042 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
2043 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
2047 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
2048 svm->vmcb->control.insn_len); in pf_interception()
2055 u64 fault_address = svm->vmcb->control.exit_info_2; in npf_interception()
2056 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
2061 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
2062 svm->vmcb->control.insn_len); in npf_interception()
2067 struct kvm_run *kvm_run = vcpu->run; in db_interception()
2070 if (!(vcpu->guest_debug & in db_interception()
2072 !svm->nmi_singlestep) { in db_interception()
2073 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; in db_interception()
2078 if (svm->nmi_singlestep) { in db_interception()
2084 if (vcpu->guest_debug & in db_interception()
2086 kvm_run->exit_reason = KVM_EXIT_DEBUG; in db_interception()
2087 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
2088 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
2089 kvm_run->debug.arch.pc = in db_interception()
2090 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
2091 kvm_run->debug.arch.exception = DB_VECTOR; in db_interception()
2101 struct kvm_run *kvm_run = vcpu->run; in bp_interception()
2103 kvm_run->exit_reason = KVM_EXIT_DEBUG; in bp_interception()
2104 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
2105 kvm_run->debug.arch.exception = BP_VECTOR; in bp_interception()
2153 /* Flush tlb to evict multi-match entries */ in is_erratum_383()
2163 * Erratum 383 triggered. Guest state is corrupt so kill the in svm_handle_mce()
2174 * On an #MC intercept the MCE handler is not called automatically in in svm_handle_mce()
2187 struct kvm_run *kvm_run = vcpu->run; in shutdown_interception()
2192 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put in shutdown_interception()
2195 * userspace. At a platform view, INIT is acceptable behavior as in shutdown_interception()
2199 * The VM save area for SEV-ES guests has already been encrypted so it in shutdown_interception()
2200 * cannot be reinitialized, i.e. synthesizing INIT is futile. in shutdown_interception()
2202 if (!sev_es_guest(vcpu->kvm)) { in shutdown_interception()
2203 clear_page(svm->vmcb); in shutdown_interception()
2207 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; in shutdown_interception()
2214 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2218 ++vcpu->stat.io_exits; in io_interception()
2225 if (sev_es_guest(vcpu->kvm)) in io_interception()
2231 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2248 ++vcpu->stat.irq_exits; in intr_interception()
2262 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_vmsave_interception()
2264 if (ret == -EINVAL) in vmload_vmsave_interception()
2274 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); in vmload_vmsave_interception()
2275 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2276 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2278 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); in vmload_vmsave_interception()
2314 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in svm_instr_opcode()
2316 if (ctxt->b != 0x1 || ctxt->opcode_len != 2) in svm_instr_opcode()
2319 switch (ctxt->modrm) { in svm_instr_opcode()
2349 /* Returns '1' or -errno on failure, '0' on success. */ in emulate_svm_instr()
2361 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2369 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
2395 if (svm->vmcb->save.rax & ~PAGE_MASK) in gp_interception()
2410 * If VGIF is enabled, the STGI intercept is only added to in svm_set_gif()
2421 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2422 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2423 kvm_cpu_has_injectable_intr(&svm->vcpu) || in svm_set_gif()
2424 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) in svm_set_gif()
2425 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2430 * After a CLGI no interrupts should come. But if vGIF is in svm_set_gif()
2472 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); in invlpga_interception()
2482 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); in skinit_interception()
2493 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2495 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2497 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2499 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2503 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2505 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2508 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2519 vcpu->arch.nmi_injected = false; in task_switch_interception()
2522 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2526 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2548 int_vec = -1; in task_switch_interception()
2556 if (!sev_es_guest(svm->vcpu.kvm)) in svm_clr_iret_intercept()
2562 if (!sev_es_guest(svm->vcpu.kvm)) in svm_set_iret_intercept()
2570 WARN_ON_ONCE(sev_es_guest(vcpu->kvm)); in iret_interception()
2572 ++vcpu->stat.nmi_window_exits; in iret_interception()
2573 svm->awaiting_iret_completion = true; in iret_interception()
2576 svm->nmi_iret_rip = kvm_rip_read(vcpu); in iret_interception()
2587 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); in invlpg_interception()
2605 unsigned long cr0 = vcpu->arch.cr0; in check_selective_cr0_intercepted()
2609 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2616 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2635 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2638 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2639 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2640 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; in cr_interception()
2642 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2646 cr -= 16; in cr_interception()
2677 val = vcpu->arch.cr2; in cr_interception()
2706 new_value = (unsigned long)svm->vmcb->control.exit_info_1; in cr_trap()
2708 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; in cr_trap()
2742 * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT in dr_interception()
2745 if (sev_es_guest(vcpu->kvm)) in dr_interception()
2748 if (vcpu->guest_debug == 0) { in dr_interception()
2755 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2762 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2763 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2765 dr -= 16; in dr_interception()
2787 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in cr8_write_interception()
2799 * whether the guest has X86_FEATURE_SVM - this avoids a failure if in efer_trap()
2804 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; in efer_trap()
2812 msr->data = 0; in svm_get_msr_feature()
2814 switch (msr->index) { in svm_get_msr_feature()
2817 msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; in svm_get_msr_feature()
2830 switch (msr_info->index) { in svm_get_msr()
2832 if (!msr_info->host_initiated && in svm_get_msr()
2835 msr_info->data = svm->tsc_ratio_msr; in svm_get_msr()
2838 msr_info->data = svm->vmcb01.ptr->save.star; in svm_get_msr()
2842 msr_info->data = svm->vmcb01.ptr->save.lstar; in svm_get_msr()
2845 msr_info->data = svm->vmcb01.ptr->save.cstar; in svm_get_msr()
2848 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; in svm_get_msr()
2851 msr_info->data = svm->vmcb01.ptr->save.sfmask; in svm_get_msr()
2855 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; in svm_get_msr()
2858 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; in svm_get_msr()
2860 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; in svm_get_msr()
2863 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; in svm_get_msr()
2865 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; in svm_get_msr()
2868 msr_info->data = svm->tsc_aux; in svm_get_msr()
2871 msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl; in svm_get_msr()
2874 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from; in svm_get_msr()
2877 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to; in svm_get_msr()
2880 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from; in svm_get_msr()
2883 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to; in svm_get_msr()
2886 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2889 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2892 if (!msr_info->host_initiated && in svm_get_msr()
2897 msr_info->data = svm->vmcb->save.spec_ctrl; in svm_get_msr()
2899 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2902 if (!msr_info->host_initiated && in svm_get_msr()
2906 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2918 msr_info->data = 0; in svm_get_msr()
2922 msr_info->data = 0x1E; in svm_get_msr()
2926 msr_info->data = svm->msr_decfg; in svm_get_msr()
2937 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) in svm_complete_emulated_msr()
2940 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); in svm_complete_emulated_msr()
2941 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, in svm_complete_emulated_msr()
2958 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2961 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2962 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2964 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
2966 /* check for svm_disable while efer.svme is set */ in svm_set_vm_cr()
2967 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) in svm_set_vm_cr()
2978 u32 ecx = msr->index; in svm_set_msr()
2979 u64 data = msr->data; in svm_set_msr()
2985 if (!msr->host_initiated) in svm_set_msr()
2988 * In case TSC scaling is not enabled, always in svm_set_msr()
2992 * this msr to 0 if tsc scaling is not enabled. in svm_set_msr()
2995 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
3003 svm->tsc_ratio_msr = data; in svm_set_msr()
3015 svm->vmcb01.ptr->save.g_pat = data; in svm_set_msr()
3018 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
3021 if (!msr->host_initiated && in svm_set_msr()
3029 svm->vmcb->save.spec_ctrl = data; in svm_set_msr()
3031 svm->spec_ctrl = data; in svm_set_msr()
3036 * For non-nested: in svm_set_msr()
3037 * When it's written (to non-zero) for the first time, pass in svm_set_msr()
3041 * The handling of the MSR bitmap for L2 guests is done in in svm_set_msr()
3046 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
3049 if (!msr->host_initiated && in svm_set_msr()
3056 svm->virt_spec_ctrl = data; in svm_set_msr()
3059 svm->vmcb01.ptr->save.star = data; in svm_set_msr()
3063 svm->vmcb01.ptr->save.lstar = data; in svm_set_msr()
3066 svm->vmcb01.ptr->save.cstar = data; in svm_set_msr()
3069 svm->vmcb01.ptr->save.kernel_gs_base = data; in svm_set_msr()
3072 svm->vmcb01.ptr->save.sfmask = data; in svm_set_msr()
3076 svm->vmcb01.ptr->save.sysenter_cs = data; in svm_set_msr()
3079 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; in svm_set_msr()
3087 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3090 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; in svm_set_msr()
3091 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3095 * TSC_AUX is always virtualized for SEV-ES guests when the in svm_set_msr()
3096 * feature is available. The user return MSR support is not in svm_set_msr()
3097 * required in this case because TSC_AUX is restored on #VMEXIT in svm_set_msr()
3101 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm)) in svm_set_msr()
3105 * TSC_AUX is usually changed only during boot and never read in svm_set_msr()
3110 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); in svm_set_msr()
3115 svm->tsc_aux = data; in svm_set_msr()
3125 svm_get_lbr_vmcb(svm)->save.dbgctl = data; in svm_set_msr()
3135 if (!msr->host_initiated && !page_address_valid(vcpu, data)) in svm_set_msr()
3138 svm->nested.hsave_msr = data & PAGE_MASK; in svm_set_msr()
3148 msr_entry.index = msr->index; in svm_set_msr()
3157 if (!msr->host_initiated && (data ^ msr_entry.data)) in svm_set_msr()
3160 svm->msr_decfg = data; in svm_set_msr()
3171 if (to_svm(vcpu)->vmcb->control.exit_info_1) in msr_interception()
3183 * If not running nested, for AVIC, the only reason to end up here is ExtINTs. in interrupt_window_interception()
3185 * requesting the IRQ window and we have to re-enable it. in interrupt_window_interception()
3192 * AVIC still inhibited due to per-cpu AVIC inhibition. in interrupt_window_interception()
3194 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); in interrupt_window_interception()
3196 ++vcpu->stat.irq_window_exits; in interrupt_window_interception()
3204 * CPL is not made available for an SEV-ES guest, therefore in pause_interception()
3205 * vcpu->arch.preempted_in_kernel can never be true. Just in pause_interception()
3208 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; in pause_interception()
3232 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
3233 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
3315 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
3316 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3317 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; in dump_vmcb()
3325 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); in dump_vmcb()
3327 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); in dump_vmcb()
3328 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); in dump_vmcb()
3329 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); in dump_vmcb()
3330 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); in dump_vmcb()
3331 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); in dump_vmcb()
3332 pr_err("%-20s%08x %08x\n", "intercepts:", in dump_vmcb()
3333 control->intercepts[INTERCEPT_WORD3], in dump_vmcb()
3334 control->intercepts[INTERCEPT_WORD4]); in dump_vmcb()
3335 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); in dump_vmcb()
3336 pr_err("%-20s%d\n", "pause filter threshold:", in dump_vmcb()
3337 control->pause_filter_thresh); in dump_vmcb()
3338 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); in dump_vmcb()
3339 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); in dump_vmcb()
3340 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); in dump_vmcb()
3341 pr_err("%-20s%d\n", "asid:", control->asid); in dump_vmcb()
3342 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); in dump_vmcb()
3343 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); in dump_vmcb()
3344 pr_err("%-20s%08x\n", "int_vector:", control->int_vector); in dump_vmcb()
3345 pr_err("%-20s%08x\n", "int_state:", control->int_state); in dump_vmcb()
3346 pr_err("%-20s%08x\n", "exit_code:", control->exit_code); in dump_vmcb()
3347 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); in dump_vmcb()
3348 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); in dump_vmcb()
3349 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); in dump_vmcb()
3350 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); in dump_vmcb()
3351 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); in dump_vmcb()
3352 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); in dump_vmcb()
3353 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); in dump_vmcb()
3354 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); in dump_vmcb()
3355 pr_err("%-20s%08x\n", "event_inj:", control->event_inj); in dump_vmcb()
3356 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); in dump_vmcb()
3357 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); in dump_vmcb()
3358 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); in dump_vmcb()
3359 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); in dump_vmcb()
3360 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); in dump_vmcb()
3361 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); in dump_vmcb()
3362 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); in dump_vmcb()
3364 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3366 save->es.selector, save->es.attrib, in dump_vmcb()
3367 save->es.limit, save->es.base); in dump_vmcb()
3368 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3369 "cs:", in dump_vmcb()
3370 save->cs.selector, save->cs.attrib, in dump_vmcb()
3371 save->cs.limit, save->cs.base); in dump_vmcb()
3372 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3374 save->ss.selector, save->ss.attrib, in dump_vmcb()
3375 save->ss.limit, save->ss.base); in dump_vmcb()
3376 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3378 save->ds.selector, save->ds.attrib, in dump_vmcb()
3379 save->ds.limit, save->ds.base); in dump_vmcb()
3380 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3382 save01->fs.selector, save01->fs.attrib, in dump_vmcb()
3383 save01->fs.limit, save01->fs.base); in dump_vmcb()
3384 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3386 save01->gs.selector, save01->gs.attrib, in dump_vmcb()
3387 save01->gs.limit, save01->gs.base); in dump_vmcb()
3388 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3390 save->gdtr.selector, save->gdtr.attrib, in dump_vmcb()
3391 save->gdtr.limit, save->gdtr.base); in dump_vmcb()
3392 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3394 save01->ldtr.selector, save01->ldtr.attrib, in dump_vmcb()
3395 save01->ldtr.limit, save01->ldtr.base); in dump_vmcb()
3396 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3398 save->idtr.selector, save->idtr.attrib, in dump_vmcb()
3399 save->idtr.limit, save->idtr.base); in dump_vmcb()
3400 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", in dump_vmcb()
3402 save01->tr.selector, save01->tr.attrib, in dump_vmcb()
3403 save01->tr.limit, save01->tr.base); in dump_vmcb()
3405 save->vmpl, save->cpl, save->efer); in dump_vmcb()
3406 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3407 "cr0:", save->cr0, "cr2:", save->cr2); in dump_vmcb()
3408 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3409 "cr3:", save->cr3, "cr4:", save->cr4); in dump_vmcb()
3410 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3411 "dr6:", save->dr6, "dr7:", save->dr7); in dump_vmcb()
3412 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3413 "rip:", save->rip, "rflags:", save->rflags); in dump_vmcb()
3414 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3415 "rsp:", save->rsp, "rax:", save->rax); in dump_vmcb()
3416 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3417 "star:", save01->star, "lstar:", save01->lstar); in dump_vmcb()
3418 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3419 "cstar:", save01->cstar, "sfmask:", save01->sfmask); in dump_vmcb()
3420 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3421 "kernel_gs_base:", save01->kernel_gs_base, in dump_vmcb()
3422 "sysenter_cs:", save01->sysenter_cs); in dump_vmcb()
3423 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3424 "sysenter_esp:", save01->sysenter_esp, in dump_vmcb()
3425 "sysenter_eip:", save01->sysenter_eip); in dump_vmcb()
3426 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3427 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); in dump_vmcb()
3428 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3429 "br_from:", save->br_from, "br_to:", save->br_to); in dump_vmcb()
3430 pr_err("%-15s %016llx %-13s %016llx\n", in dump_vmcb()
3431 "excp_from:", save->last_excp_from, in dump_vmcb()
3432 "excp_to:", save->last_excp_to); in dump_vmcb()
3445 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in svm_handle_invalid_exit()
3446 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; in svm_handle_invalid_exit()
3447 vcpu->run->internal.ndata = 2; in svm_handle_invalid_exit()
3448 vcpu->run->internal.data[0] = exit_code; in svm_handle_invalid_exit()
3449 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; in svm_handle_invalid_exit()
3477 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; in svm_get_exit_info()
3479 *reason = control->exit_code; in svm_get_exit_info()
3480 *info1 = control->exit_info_1; in svm_get_exit_info()
3481 *info2 = control->exit_info_2; in svm_get_exit_info()
3482 *intr_info = control->exit_int_info; in svm_get_exit_info()
3485 *error_code = control->exit_int_info_err; in svm_get_exit_info()
3493 struct kvm_run *kvm_run = vcpu->run; in svm_handle_exit()
3494 u32 exit_code = svm->vmcb->control.exit_code; in svm_handle_exit()
3496 /* SEV-ES guests must use the CR write traps to track CR registers. */ in svm_handle_exit()
3497 if (!sev_es_guest(vcpu->kvm)) { in svm_handle_exit()
3499 vcpu->arch.cr0 = svm->vmcb->save.cr0; in svm_handle_exit()
3501 vcpu->arch.cr3 = svm->vmcb->save.cr3; in svm_handle_exit()
3518 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in svm_handle_exit()
3519 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; in svm_handle_exit()
3520 kvm_run->fail_entry.hardware_entry_failure_reason in svm_handle_exit()
3521 = svm->vmcb->control.exit_code; in svm_handle_exit()
3522 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in svm_handle_exit()
3535 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu); in pre_svm_run()
3543 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { in pre_svm_run()
3544 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3545 vmcb_mark_all_dirty(svm->vmcb); in pre_svm_run()
3546 svm->current_vmcb->cpu = vcpu->cpu; in pre_svm_run()
3549 if (sev_guest(vcpu->kvm)) in pre_svm_run()
3550 return pre_sev_run(svm, vcpu->cpu); in pre_svm_run()
3553 if (svm->current_vmcb->asid_generation != sd->asid_generation) in pre_svm_run()
3561 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3563 if (svm->nmi_l1_to_l2) in svm_inject_nmi()
3567 * No need to manually track NMI masking when vNMI is enabled, hardware in svm_inject_nmi()
3572 svm->nmi_masked = true; in svm_inject_nmi()
3575 ++vcpu->stat.nmi_injections; in svm_inject_nmi()
3585 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); in svm_is_vnmi_pending()
3595 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) in svm_set_vnmi_pending()
3598 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; in svm_set_vnmi_pending()
3599 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vnmi_pending()
3602 * Because the pending NMI is serviced by hardware, KVM can't know when in svm_set_vnmi_pending()
3603 * the NMI is "injected", but for all intents and purposes, passing the in svm_set_vnmi_pending()
3606 ++vcpu->stat.nmi_injections; in svm_set_vnmi_pending()
3616 if (vcpu->arch.interrupt.soft) { in svm_inject_irq()
3625 trace_kvm_inj_virq(vcpu->arch.interrupt.nr, in svm_inject_irq()
3626 vcpu->arch.interrupt.soft, reinjected); in svm_inject_irq()
3627 ++vcpu->stat.irq_injections; in svm_inject_irq()
3629 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_inject_irq()
3637 * apic->apicv_active must be read after vcpu->mode. in svm_complete_interrupt_delivery()
3640 bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); in svm_complete_interrupt_delivery()
3642 /* Note, this is called iff the local APIC is in-kernel. */ in svm_complete_interrupt_delivery()
3643 if (!READ_ONCE(vcpu->arch.apic->apicv_active)) { in svm_complete_interrupt_delivery()
3650 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); in svm_complete_interrupt_delivery()
3673 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in in svm_deliver_interrupt()
3674 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before in svm_deliver_interrupt()
3680 svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector); in svm_deliver_interrupt()
3688 * SEV-ES guests must always keep the CR intercepts cleared. CR in svm_update_cr8_intercept()
3689 * tracking is done using the CR write traps. in svm_update_cr8_intercept()
3691 if (sev_es_guest(vcpu->kvm)) in svm_update_cr8_intercept()
3699 if (irr == -1) in svm_update_cr8_intercept()
3711 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; in svm_get_nmi_mask()
3713 return svm->nmi_masked; in svm_get_nmi_mask()
3722 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3724 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3727 svm->nmi_masked = masked; in svm_set_nmi_mask()
3738 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3749 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; in svm_nmi_blocked()
3755 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3756 return -EBUSY; in svm_nmi_allowed()
3761 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ in svm_nmi_allowed()
3763 return -EBUSY; in svm_nmi_allowed()
3770 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3777 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3778 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3790 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); in svm_interrupt_blocked()
3797 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3798 return -EBUSY; in svm_interrupt_allowed()
3804 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, in svm_interrupt_allowed()
3808 return -EBUSY; in svm_interrupt_allowed()
3821 * we'll get the vintr intercept. However, if the vGIF feature is in svm_enable_irq_window()
3827 * IRQ window is not needed when AVIC is enabled, in svm_enable_irq_window()
3832 * If running nested, AVIC is already locally inhibited in svm_enable_irq_window()
3833 * on this vCPU, therefore there is no need to request in svm_enable_irq_window()
3837 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN); in svm_enable_irq_window()
3848 * KVM should never request an NMI window when vNMI is enabled, as KVM in svm_enable_nmi_window()
3849 * allows at most one to-be-injected NMI and one pending NMI, i.e. if in svm_enable_nmi_window()
3852 * single-step approach to try and salvage the pending NMI. in svm_enable_nmi_window()
3856 if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion) in svm_enable_nmi_window()
3860 * SEV-ES guests are responsible for signaling when a vCPU is ready to in svm_enable_nmi_window()
3861 * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e. in svm_enable_nmi_window()
3862 * KVM can't intercept and single-step IRET to detect when NMIs are in svm_enable_nmi_window()
3865 * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware in svm_enable_nmi_window()
3866 * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not in svm_enable_nmi_window()
3869 if (sev_es_guest(vcpu->kvm)) in svm_enable_nmi_window()
3882 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in svm_enable_nmi_window()
3883 svm->nmi_singlestep = true; in svm_enable_nmi_window()
3884 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in svm_enable_nmi_window()
3894 * entries, and thus is a superset of Hyper-V's fine grained flushing. in svm_flush_tlb_asid()
3902 * unconditionally does a TLB flush on both nested VM-Enter and nested in svm_flush_tlb_asid()
3903 * VM-Exit (via kvm_mmu_reset_context()). in svm_flush_tlb_asid()
3906 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb_asid()
3908 svm->current_vmcb->asid_generation--; in svm_flush_tlb_asid()
3913 hpa_t root_tdp = vcpu->arch.mmu->root.hpa; in svm_flush_tlb_current()
3916 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly in svm_flush_tlb_current()
3930 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB in svm_flush_tlb_all()
3937 hv_flush_remote_tlbs(vcpu->kvm); in svm_flush_tlb_all()
3946 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3957 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3972 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3973 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3984 * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's in svm_complete_soft_interrupt()
3985 * associated with the original soft exception/interrupt. next_rip is in svm_complete_soft_interrupt()
3987 * needs to manually set next_rip for re-injection. Unlike the !nrips in svm_complete_soft_interrupt()
3988 * case below, this needs to be done if and only if KVM is re-injecting in svm_complete_soft_interrupt()
3989 * the same event, i.e. if the event is a soft exception/interrupt, in svm_complete_soft_interrupt()
3990 * otherwise next_rip is unused on VMRUN. in svm_complete_soft_interrupt()
3993 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
3994 svm->vmcb->control.next_rip = svm->soft_int_next_rip; in svm_complete_soft_interrupt()
4004 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
4005 kvm_rip_write(vcpu, svm->soft_int_old_rip); in svm_complete_soft_interrupt()
4013 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
4014 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; in svm_complete_interrupts()
4015 bool soft_int_injected = svm->soft_int_injected; in svm_complete_interrupts()
4017 svm->nmi_l1_to_l2 = false; in svm_complete_interrupts()
4018 svm->soft_int_injected = false; in svm_complete_interrupts()
4024 if (svm->awaiting_iret_completion && in svm_complete_interrupts()
4025 kvm_rip_read(vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
4026 svm->awaiting_iret_completion = false; in svm_complete_interrupts()
4027 svm->nmi_masked = false; in svm_complete_interrupts()
4031 vcpu->arch.nmi_injected = false; in svm_complete_interrupts()
4048 vcpu->arch.nmi_injected = true; in svm_complete_interrupts()
4049 svm->nmi_l1_to_l2 = nmi_l1_to_l2; in svm_complete_interrupts()
4053 * Never re-inject a #VC exception. in svm_complete_interrupts()
4059 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
4080 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
4082 control->exit_int_info = control->event_inj; in svm_cancel_injection()
4083 control->exit_int_info_err = control->event_inj_err; in svm_cancel_injection()
4084 control->event_inj = 0; in svm_cancel_injection()
4095 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && in svm_exit_handlers_fastpath()
4096 to_svm(vcpu)->vmcb->control.exit_info_1) in svm_exit_handlers_fastpath()
4110 if (sev_es_guest(vcpu->kvm)) in svm_vcpu_enter_exit()
4125 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
4126 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
4127 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
4135 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
4139 * is enough to force an immediate vmexit. in svm_vcpu_run()
4142 smp_send_reschedule(vcpu->cpu); in svm_vcpu_run()
4149 if (unlikely(svm->asid != svm->vmcb->control.asid)) { in svm_vcpu_run()
4150 svm->vmcb->control.asid = svm->asid; in svm_vcpu_run()
4151 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in svm_vcpu_run()
4153 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
4155 svm_hv_update_vp_id(svm->vmcb, vcpu); in svm_vcpu_run()
4158 * Run with all-zero DR6 unless needed, so that we can get the exact cause in svm_vcpu_run()
4161 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) in svm_vcpu_run()
4162 svm_set_dr6(svm, vcpu->arch.dr6); in svm_vcpu_run()
4173 * it's non-zero. Since vmentry is serialising on affected CPUs, there in svm_vcpu_run()
4174 * is no need to worry about the conditional branch over the wrmsr in svm_vcpu_run()
4178 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); in svm_vcpu_run()
4183 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); in svm_vcpu_run()
4185 if (!sev_es_guest(vcpu->kvm)) { in svm_vcpu_run()
4186 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
4187 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
4188 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
4189 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
4191 vcpu->arch.regs_dirty = 0; in svm_vcpu_run()
4193 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4201 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4206 svm->next_rip = 0; in svm_vcpu_run()
4211 if (svm->nested.nested_run_pending && in svm_vcpu_run()
4212 svm->vmcb->control.exit_code != SVM_EXIT_ERR) in svm_vcpu_run()
4213 ++vcpu->stat.nested_run; in svm_vcpu_run()
4215 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4218 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
4219 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
4222 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
4223 vcpu->arch.apf.host_apf_flags = in svm_vcpu_run()
4226 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; in svm_vcpu_run()
4232 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
4253 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); in svm_load_mmu_pgd()
4254 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
4258 cr3 = vcpu->arch.cr3; in svm_load_mmu_pgd()
4262 /* PCID in the guest should be impossible with a 32-bit MMU. */ in svm_load_mmu_pgd()
4267 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
4268 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
4295 /* SEV-ES guests do not support SMM, so report false */ in svm_has_emulated_msr()
4317 * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give in svm_vcpu_after_set_cpuid()
4330 * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that in svm_vcpu_after_set_cpuid()
4332 * SVM on Intel is bonkers and extremely unlikely to work). in svm_vcpu_after_set_cpuid()
4345 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, in svm_vcpu_after_set_cpuid()
4349 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, in svm_vcpu_after_set_cpuid()
4352 if (sev_guest(vcpu->kvm)) in svm_vcpu_after_set_cpuid()
4435 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4437 if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) in svm_check_intercept()
4440 icpt_info = x86_intercept_map[info->intercept]; in svm_check_intercept()
4447 if (info->intercept == x86_intercept_cr_read) in svm_check_intercept()
4448 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4453 if (info->intercept == x86_intercept_cr_write) in svm_check_intercept()
4454 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4457 info->intercept == x86_intercept_clts) in svm_check_intercept()
4460 if (!(vmcb12_is_intercept(&svm->nested.ctl, in svm_check_intercept()
4464 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4465 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4467 if (info->intercept == x86_intercept_lmsw) { in svm_check_intercept()
4470 /* lmsw can't clear PE - catch this here */ in svm_check_intercept()
4482 icpt_info.exit_code += info->modrm_reg; in svm_check_intercept()
4485 if (info->intercept == x86_intercept_wrmsr) in svm_check_intercept()
4486 vmcb->control.exit_info_1 = 1; in svm_check_intercept()
4488 vmcb->control.exit_info_1 = 0; in svm_check_intercept()
4493 * is rep not, check this here in svm_check_intercept()
4495 if (info->rep_prefix != REPE_PREFIX) in svm_check_intercept()
4502 if (info->intercept == x86_intercept_in || in svm_check_intercept()
4503 info->intercept == x86_intercept_ins) { in svm_check_intercept()
4504 exit_info = ((info->src_val & 0xffff) << 16) | in svm_check_intercept()
4506 bytes = info->dst_bytes; in svm_check_intercept()
4508 exit_info = (info->dst_val & 0xffff) << 16; in svm_check_intercept()
4509 bytes = info->src_bytes; in svm_check_intercept()
4512 if (info->intercept == x86_intercept_outs || in svm_check_intercept()
4513 info->intercept == x86_intercept_ins) in svm_check_intercept()
4516 if (info->rep_prefix) in svm_check_intercept()
4523 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); in svm_check_intercept()
4525 vmcb->control.exit_info_1 = exit_info; in svm_check_intercept()
4526 vmcb->control.exit_info_2 = info->next_rip; in svm_check_intercept()
4536 vmcb->control.next_rip = info->next_rip; in svm_check_intercept()
4537 vmcb->control.exit_code = icpt_info.exit_code; in svm_check_intercept()
4549 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) in svm_handle_exit_irqoff()
4550 vcpu->arch.at_instruction_boundary = true; in svm_handle_exit_irqoff()
4555 if (!kvm_pause_in_guest(vcpu->kvm)) in svm_sched_in()
4562 vcpu->arch.mcg_cap &= 0x1ff; in svm_setup_mce()
4580 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4581 return -EBUSY; in svm_smi_allowed()
4586 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ in svm_smi_allowed()
4588 return -EBUSY; in svm_smi_allowed()
4603 * 32-bit SMRAM format doesn't preserve EFER and SVM state. Userspace is in svm_enter_smm()
4610 smram->smram64.svm_guest_flag = 1; in svm_enter_smm()
4611 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; in svm_enter_smm()
4613 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_enter_smm()
4614 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_enter_smm()
4615 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_enter_smm()
4623 * VMCB01 is going to be used during SMM and thus the state will in svm_enter_smm()
4624 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save in svm_enter_smm()
4626 * format of the area is identical to guest save area offsetted in svm_enter_smm()
4633 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_enter_smm()
4639 &svm->vmcb01.ptr->save); in svm_enter_smm()
4652 const struct kvm_smram_state_64 *smram64 = &smram->smram64; in svm_leave_smm()
4657 /* Non-zero if SMI arrived while vCPU was in guest mode. */ in svm_leave_smm()
4658 if (!smram64->svm_guest_flag) in svm_leave_smm()
4664 if (!(smram64->efer & EFER_SVME)) in svm_leave_smm()
4667 if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map)) in svm_leave_smm()
4671 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_leave_smm()
4682 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4688 vmcb_mark_all_dirty(svm->vmcb01.ptr); in svm_leave_smm()
4691 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in svm_leave_smm()
4692 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in svm_leave_smm()
4693 ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); in svm_leave_smm()
4698 svm->nested.nested_run_pending = 1; in svm_leave_smm()
4727 /* Emulation is always possible when KVM has access to all guest state. */ in svm_check_emulate_instruction()
4728 if (!sev_guest(vcpu->kvm)) in svm_check_emulate_instruction()
4737 * Emulation is impossible for SEV-ES guests as KVM doesn't have access in svm_check_emulate_instruction()
4740 if (sev_es_guest(vcpu->kvm)) in svm_check_emulate_instruction()
4744 * Emulation is possible if the instruction is already decoded, e.g. in svm_check_emulate_instruction()
4751 * Emulation is possible for SEV guests if and only if a prefilled in svm_check_emulate_instruction()
4752 * buffer containing the bytes of the intercepted instruction is in svm_check_emulate_instruction()
4753 * available. SEV guest memory is encrypted with a guest specific key in svm_check_emulate_instruction()
4757 * If KVM is NOT trying to simply skip an instruction, inject #UD if in svm_check_emulate_instruction()
4759 * this path should never be hit by a well-behaved guest, e.g. KVM in svm_check_emulate_instruction()
4760 * doesn't intercept #UD or #GP for SEV guests, but this path is still in svm_check_emulate_instruction()
4761 * theoretically reachable, e.g. via unaccelerated fault-like AVIC in svm_check_emulate_instruction()
4763 * into an infinite loop. Injecting #UD is somewhat arbitrary, but in svm_check_emulate_instruction()
4766 * If KVM is trying to skip an instruction, simply resume the guest. in svm_check_emulate_instruction()
4767 * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM in svm_check_emulate_instruction()
4768 * will attempt to re-inject the INT3/INTO and skip the instruction. in svm_check_emulate_instruction()
4770 * make forward progress is the only option that has a chance of in svm_check_emulate_instruction()
4782 * Emulate for SEV guests if the insn buffer is not empty. The buffer in svm_check_emulate_instruction()
4786 * table used to translate CS:RIP resides in emulated MMIO. in svm_check_emulate_instruction()
4795 * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is in svm_check_emulate_instruction()
4797 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly in svm_check_emulate_instruction()
4798 * be '0'. This happens because microcode reads CS:RIP using a _data_ in svm_check_emulate_instruction()
4802 * As above, KVM reaches this point iff the VM is an SEV guest, the CPU in svm_check_emulate_instruction()
4808 * DecodeAssist will also fail if the load for CS:RIP hits a legitimate in svm_check_emulate_instruction()
4810 * encountered a reserved/not-present #PF. in svm_check_emulate_instruction()
4817 * 3. The #NPF is not due to a code fetch, in which case failure to in svm_check_emulate_instruction()
4818 * retrieve the instruction bytes is legitimate (see abvoe). in svm_check_emulate_instruction()
4823 error_code = to_svm(vcpu)->vmcb->control.exit_info_1; in svm_check_emulate_instruction()
4840 * In practice, the triple fault is moot as no sane SEV kernel in svm_check_emulate_instruction()
4854 * if the fault is at CPL=0, it's the lesser of all evils. Exiting to in svm_check_emulate_instruction()
4858 * Simply resuming the guest is technically not a violation of the SEV in svm_check_emulate_instruction()
4860 * accesses for SEV guest are encrypted, regardless of the C-Bit. The in svm_check_emulate_instruction()
4863 * the guest spin is technically "ignoring" the access. in svm_check_emulate_instruction()
4877 if (!sev_es_guest(vcpu->kvm)) in svm_vcpu_deliver_sipi_vector()
4892 kvm->arch.pause_in_guest = true; in svm_vm_init()
5037 * The default MMIO mask is a single bit (excluding the present bit),
5040 * memory encryption is enabled.
5047 /* If there is no memory encryption support, use existing mask */ in svm_adjust_mmio_mask()
5051 /* If memory encryption is not enabled, use existing mask */ in svm_adjust_mmio_mask()
5059 /* Increment the mask bit if it is the same as the encryption bit */ in svm_adjust_mmio_mask()
5064 * If the mask bit location is below 52, then some bits above the in svm_adjust_mmio_mask()
5070 * If the mask bit location is 52 (or above), then clear the mask. in svm_adjust_mmio_mask()
5092 * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush. in svm_set_cpu_caps()
5161 * NX is required for shadow paging and for NPT if the NX huge pages in svm_hardware_setup()
5162 * mitigation is enabled. in svm_hardware_setup()
5166 return -EOPNOTSUPP; in svm_hardware_setup()
5173 return -ENOMEM; in svm_hardware_setup()
5217 * KVM's MMU doesn't support using 2-level paging for itself, and thus in svm_hardware_setup()
5218 * NPT isn't supported if the host is using 2-level paging since host in svm_hardware_setup()
5219 * CR4 is unchanged on VMRUN. in svm_hardware_setup()
5301 pr_info("PMU virtualization is disabled\n"); in svm_hardware_setup()
5306 * It seems that on AMD processors PTE's accessed bit is in svm_hardware_setup()
5308 * This is not expected behaviour and our tests fail because in svm_hardware_setup()
5310 * A workaround here is to disable support for in svm_hardware_setup()
5311 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. in svm_hardware_setup()
5312 * In this case userspace can know if there is support using in svm_hardware_setup()
5349 return -EOPNOTSUPP; in svm_init()
5358 * Common KVM initialization _must_ come last, after this, /dev/kvm is in svm_init()