Lines Matching full:vcpu

109 	((struct kvm_vcpu *)(ctxt)->vcpu)
129 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
130 static void process_nmi(struct kvm_vcpu *vcpu);
131 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
132 static void store_regs(struct kvm_vcpu *vcpu);
133 static int sync_regs(struct kvm_vcpu *vcpu);
134 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
136 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
137 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
258 STATS_DESC_COUNTER(VCPU, pf_taken),
259 STATS_DESC_COUNTER(VCPU, pf_fixed),
260 STATS_DESC_COUNTER(VCPU, pf_emulate),
261 STATS_DESC_COUNTER(VCPU, pf_spurious),
262 STATS_DESC_COUNTER(VCPU, pf_fast),
263 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
264 STATS_DESC_COUNTER(VCPU, pf_guest),
265 STATS_DESC_COUNTER(VCPU, tlb_flush),
266 STATS_DESC_COUNTER(VCPU, invlpg),
267 STATS_DESC_COUNTER(VCPU, exits),
268 STATS_DESC_COUNTER(VCPU, io_exits),
269 STATS_DESC_COUNTER(VCPU, mmio_exits),
270 STATS_DESC_COUNTER(VCPU, signal_exits),
271 STATS_DESC_COUNTER(VCPU, irq_window_exits),
272 STATS_DESC_COUNTER(VCPU, nmi_window_exits),
273 STATS_DESC_COUNTER(VCPU, l1d_flush),
274 STATS_DESC_COUNTER(VCPU, halt_exits),
275 STATS_DESC_COUNTER(VCPU, request_irq_exits),
276 STATS_DESC_COUNTER(VCPU, irq_exits),
277 STATS_DESC_COUNTER(VCPU, host_state_reload),
278 STATS_DESC_COUNTER(VCPU, fpu_reload),
279 STATS_DESC_COUNTER(VCPU, insn_emulation),
280 STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
281 STATS_DESC_COUNTER(VCPU, hypercalls),
282 STATS_DESC_COUNTER(VCPU, irq_injections),
283 STATS_DESC_COUNTER(VCPU, nmi_injections),
284 STATS_DESC_COUNTER(VCPU, req_event),
285 STATS_DESC_COUNTER(VCPU, nested_run),
286 STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
287 STATS_DESC_COUNTER(VCPU, directed_yield_successful),
288 STATS_DESC_COUNTER(VCPU, preemption_reported),
289 STATS_DESC_COUNTER(VCPU, preemption_other),
290 STATS_DESC_IBOOLEAN(VCPU, guest_mode),
291 STATS_DESC_COUNTER(VCPU, notify_window_exits),
461 * patch, are immutable once the vCPU model is defined.
495 typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
498 static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr, in kvm_do_msr_access() argument
512 ret = msr_access_fn(vcpu, msr, data, host_initiated); in kvm_do_msr_access()
553 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
557 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
736 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, in kvm_deliver_exception_payload() argument
749 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
766 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
767 vcpu->arch.dr6 |= ex->payload; in kvm_deliver_exception_payload()
768 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
776 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
779 vcpu->arch.cr2 = ex->payload; in kvm_deliver_exception_payload()
788 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, in kvm_queue_exception_vmexit() argument
792 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in kvm_queue_exception_vmexit()
803 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr, in kvm_multiple_exception() argument
810 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
816 if (is_guest_mode(vcpu) && in kvm_multiple_exception()
817 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { in kvm_multiple_exception()
818 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, in kvm_multiple_exception()
823 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
825 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
826 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
828 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
829 vcpu->arch.exception.vector = nr; in kvm_multiple_exception()
830 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
831 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
832 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
833 if (!is_guest_mode(vcpu)) in kvm_multiple_exception()
834 kvm_deliver_exception_payload(vcpu, in kvm_multiple_exception()
835 &vcpu->arch.exception); in kvm_multiple_exception()
840 prev_nr = vcpu->arch.exception.vector; in kvm_multiple_exception()
843 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
854 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
855 vcpu->arch.exception.pending = false; in kvm_multiple_exception()
857 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); in kvm_multiple_exception()
866 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
868 kvm_multiple_exception(vcpu, nr, false, 0, false, 0); in kvm_queue_exception()
873 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_p() argument
876 kvm_multiple_exception(vcpu, nr, false, 0, true, payload); in kvm_queue_exception_p()
880 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_e_p() argument
883 kvm_multiple_exception(vcpu, nr, true, error_code, true, payload); in kvm_queue_exception_e_p()
886 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr, in kvm_requeue_exception() argument
896 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); in kvm_requeue_exception()
904 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_requeue_exception()
906 vcpu->arch.exception.injected = true; in kvm_requeue_exception()
907 vcpu->arch.exception.has_error_code = has_error_code; in kvm_requeue_exception()
908 vcpu->arch.exception.vector = nr; in kvm_requeue_exception()
909 vcpu->arch.exception.error_code = error_code; in kvm_requeue_exception()
910 vcpu->arch.exception.has_payload = false; in kvm_requeue_exception()
911 vcpu->arch.exception.payload = 0; in kvm_requeue_exception()
915 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
918 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
920 return kvm_skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
926 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) in complete_emulated_insn_gp() argument
929 kvm_inject_gp(vcpu, 0); in complete_emulated_insn_gp()
933 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | in complete_emulated_insn_gp()
937 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
939 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
945 if (is_guest_mode(vcpu) && fault->async_page_fault) in kvm_inject_page_fault()
946 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, in kvm_inject_page_fault()
950 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
954 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, in kvm_inject_emulated_page_fault() argument
960 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
961 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
969 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
972 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
976 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
978 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
979 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
982 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
984 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0); in kvm_queue_exception_e()
992 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
994 if (kvm_x86_call(get_cpl)(vcpu) <= required_cpl) in kvm_require_cpl()
996 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
1000 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
1002 if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE)) in kvm_require_dr()
1005 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
1010 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) in pdptr_rsvd_bits() argument
1012 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
1018 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) in load_pdptrs() argument
1020 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()
1031 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), in load_pdptrs()
1037 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, in load_pdptrs()
1044 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { in load_pdptrs()
1054 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); in load_pdptrs()
1057 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in load_pdptrs()
1058 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); in load_pdptrs()
1059 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
1065 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_is_valid_cr0() argument
1078 return kvm_x86_call(is_valid_cr0)(vcpu, cr0); in kvm_is_valid_cr0()
1081 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) in kvm_post_set_cr0() argument
1096 kvm_init_mmu(vcpu); in kvm_post_set_cr0()
1102 kvm_clear_async_pf_completion_queue(vcpu); in kvm_post_set_cr0()
1103 kvm_async_pf_hash_reset(vcpu); in kvm_post_set_cr0()
1110 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_post_set_cr0()
1114 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr0()
1118 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
1120 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
1122 if (!kvm_is_valid_cr0(vcpu, cr0)) in kvm_set_cr0()
1131 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
1135 if (!is_pae(vcpu)) in kvm_set_cr0()
1137 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
1142 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
1143 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && in kvm_set_cr0()
1144 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
1148 (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) in kvm_set_cr0()
1151 kvm_x86_call(set_cr0)(vcpu, cr0); in kvm_set_cr0()
1153 kvm_post_set_cr0(vcpu, old_cr0, cr0); in kvm_set_cr0()
1159 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
1161 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
1165 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_guest_xsave_state() argument
1167 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
1170 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_guest_xsave_state()
1172 if (vcpu->arch.xcr0 != kvm_host.xcr0) in kvm_load_guest_xsave_state()
1173 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
1175 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && in kvm_load_guest_xsave_state()
1176 vcpu->arch.ia32_xss != kvm_host.xss) in kvm_load_guest_xsave_state()
1177 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
1181 vcpu->arch.pkru != vcpu->arch.host_pkru && in kvm_load_guest_xsave_state()
1182 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_guest_xsave_state()
1183 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) in kvm_load_guest_xsave_state()
1184 wrpkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
1188 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_host_xsave_state() argument
1190 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
1194 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_host_xsave_state()
1195 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) { in kvm_load_host_xsave_state()
1196 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
1197 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
1198 wrpkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
1201 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_host_xsave_state()
1203 if (vcpu->arch.xcr0 != kvm_host.xcr0) in kvm_load_host_xsave_state()
1206 if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) && in kvm_load_host_xsave_state()
1207 vcpu->arch.ia32_xss != kvm_host.xss) in kvm_load_host_xsave_state()
1215 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) in kvm_guest_supported_xfd() argument
1217 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; in kvm_guest_supported_xfd()
1221 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
1224 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
1240 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
1259 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
1262 vcpu->arch.cpuid_dynamic_bits_dirty = true; in __kvm_set_xcr()
1266 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) in kvm_emulate_xsetbv() argument
1269 if (kvm_x86_call(get_cpl)(vcpu) != 0 || in kvm_emulate_xsetbv()
1270 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { in kvm_emulate_xsetbv()
1271 kvm_inject_gp(vcpu, 0); in kvm_emulate_xsetbv()
1275 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_xsetbv()
1279 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_is_valid_cr4() argument
1281 return __kvm_is_valid_cr4(vcpu, cr4) && in kvm_is_valid_cr4()
1282 kvm_x86_call(is_valid_cr4)(vcpu, cr4); in kvm_is_valid_cr4()
1285 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) in kvm_post_set_cr4() argument
1288 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr4()
1300 kvm_mmu_unload(vcpu); in kvm_post_set_cr4()
1312 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_post_set_cr4()
1322 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_post_set_cr4()
1327 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
1329 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
1331 if (!kvm_is_valid_cr4(vcpu, cr4)) in kvm_set_cr4()
1334 if (is_long_mode(vcpu)) { in kvm_set_cr4()
1339 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
1341 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) in kvm_set_cr4()
1346 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
1350 kvm_x86_call(set_cr4)(vcpu, cr4); in kvm_set_cr4()
1352 kvm_post_set_cr4(vcpu, old_cr4, cr4); in kvm_set_cr4()
1358 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) in kvm_invalidate_pcid() argument
1360 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1372 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_invalidate_pcid()
1381 if (kvm_get_active_pcid(vcpu) == pcid) { in kvm_invalidate_pcid()
1382 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); in kvm_invalidate_pcid()
1383 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_invalidate_pcid()
1391 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) in kvm_invalidate_pcid()
1395 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) in kvm_invalidate_pcid()
1398 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in kvm_invalidate_pcid()
1401 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
1406 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) { in kvm_set_cr3()
1414 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) in kvm_set_cr3()
1420 * the current vCPU mode is accurate. in kvm_set_cr3()
1422 if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) in kvm_set_cr3()
1425 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) in kvm_set_cr3()
1428 if (cr3 != kvm_read_cr3(vcpu)) in kvm_set_cr3()
1429 kvm_mmu_new_pgd(vcpu, cr3); in kvm_set_cr3()
1431 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1432 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in kvm_set_cr3()
1444 kvm_invalidate_pcid(vcpu, pcid); in kvm_set_cr3()
1450 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
1454 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
1455 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
1457 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1462 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
1464 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
1465 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
1467 return vcpu->arch.cr8; in kvm_get_cr8()
1471 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
1475 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1477 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1481 void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
1485 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1486 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1488 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1489 kvm_x86_call(set_dr7)(vcpu, dr7); in kvm_update_dr7()
1490 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1492 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1496 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
1500 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_RTM)) in kvm_dr6_fixed()
1503 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) in kvm_dr6_fixed()
1508 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
1510 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1514 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1515 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_set_dr()
1516 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1522 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1528 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1529 kvm_update_dr7(vcpu); in kvm_set_dr()
1537 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) in kvm_get_dr() argument
1539 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1543 return vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1546 return vcpu->arch.dr6; in kvm_get_dr()
1549 return vcpu->arch.dr7; in kvm_get_dr()
1554 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) in kvm_emulate_rdpmc() argument
1556 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdpmc()
1559 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { in kvm_emulate_rdpmc()
1560 kvm_inject_gp(vcpu, 0); in kvm_emulate_rdpmc()
1564 kvm_rax_write(vcpu, (u32)data); in kvm_emulate_rdpmc()
1565 kvm_rdx_write(vcpu, data >> 32); in kvm_emulate_rdpmc()
1566 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_rdpmc()
1649 static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in kvm_get_feature_msr() argument
1673 static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_feature_msr() argument
1675 return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R, in do_get_feature_msr()
1679 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument
1681 if (efer & EFER_AUTOIBRS && !guest_cpu_cap_has(vcpu, X86_FEATURE_AUTOIBRS)) in __kvm_valid_efer()
1684 if (efer & EFER_FFXSR && !guest_cpu_cap_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer()
1687 if (efer & EFER_SVME && !guest_cpu_cap_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer()
1691 !guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) in __kvm_valid_efer()
1694 if (efer & EFER_NX && !guest_cpu_cap_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer()
1700 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
1705 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer()
1709 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_efer() argument
1711 u64 old_efer = vcpu->arch.efer; in set_efer()
1719 if (!__kvm_valid_efer(vcpu, efer)) in set_efer()
1722 if (is_paging(vcpu) && in set_efer()
1723 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1728 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1730 r = kvm_x86_call(set_efer)(vcpu, efer); in set_efer()
1737 kvm_mmu_reset_context(vcpu); in set_efer()
1741 kvm_hv_xsaves_xsavec_maybe_warn(vcpu); in set_efer()
1752 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) in kvm_msr_allowed() argument
1756 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1801 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, in __kvm_set_msr() argument
1812 if (is_noncanonical_msr_address(data, vcpu)) in __kvm_set_msr()
1836 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_set_msr()
1837 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID)) in __kvm_set_msr()
1849 if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0) in __kvm_set_msr()
1860 return kvm_x86_call(set_msr)(vcpu, &msr); in __kvm_set_msr()
1863 static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in _kvm_set_msr() argument
1866 return __kvm_set_msr(vcpu, index, *data, host_initiated); in _kvm_set_msr()
1869 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_set_msr_ignored_check() argument
1872 return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W, in kvm_set_msr_ignored_check()
1882 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in __kvm_get_msr() argument
1894 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_get_msr()
1895 !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID)) in __kvm_get_msr()
1903 ret = kvm_x86_call(get_msr)(vcpu, &msr); in __kvm_get_msr()
1909 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_get_msr_ignored_check() argument
1912 return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R, in kvm_get_msr_ignored_check()
1916 int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr_with_filter() argument
1918 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) in kvm_get_msr_with_filter()
1920 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr_with_filter()
1924 int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr_with_filter() argument
1926 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) in kvm_set_msr_with_filter()
1928 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr_with_filter()
1932 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr() argument
1934 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr()
1938 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr() argument
1940 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr()
1944 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) in complete_userspace_rdmsr() argument
1946 if (!vcpu->run->msr.error) { in complete_userspace_rdmsr()
1947 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_userspace_rdmsr()
1948 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_userspace_rdmsr()
1952 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) in complete_emulated_msr_access() argument
1954 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); in complete_emulated_msr_access()
1957 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) in complete_emulated_rdmsr() argument
1959 complete_userspace_rdmsr(vcpu); in complete_emulated_rdmsr()
1960 return complete_emulated_msr_access(vcpu); in complete_emulated_rdmsr()
1963 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) in complete_fast_msr_access() argument
1965 return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error); in complete_fast_msr_access()
1968 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) in complete_fast_rdmsr() argument
1970 complete_userspace_rdmsr(vcpu); in complete_fast_rdmsr()
1971 return complete_fast_msr_access(vcpu); in complete_fast_rdmsr()
1986 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, in kvm_msr_user_space() argument
1988 int (*completion)(struct kvm_vcpu *vcpu), in kvm_msr_user_space() argument
1994 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1997 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1998 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1999 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
2000 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
2001 vcpu->run->msr.index = index; in kvm_msr_user_space()
2002 vcpu->run->msr.data = data; in kvm_msr_user_space()
2003 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
2008 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) in kvm_emulate_rdmsr() argument
2010 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr()
2014 r = kvm_get_msr_with_filter(vcpu, ecx, &data); in kvm_emulate_rdmsr()
2019 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
2020 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
2023 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, in kvm_emulate_rdmsr()
2029 return kvm_x86_call(complete_emulated_msr)(vcpu, r); in kvm_emulate_rdmsr()
2033 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) in kvm_emulate_wrmsr() argument
2035 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr()
2036 u64 data = kvm_read_edx_eax(vcpu); in kvm_emulate_wrmsr()
2039 r = kvm_set_msr_with_filter(vcpu, ecx, data); in kvm_emulate_wrmsr()
2045 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, in kvm_emulate_wrmsr()
2054 return kvm_x86_call(complete_emulated_msr)(vcpu, r); in kvm_emulate_wrmsr()
2058 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) in kvm_emulate_as_nop() argument
2060 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_as_nop()
2063 int kvm_emulate_invd(struct kvm_vcpu *vcpu) in kvm_emulate_invd() argument
2066 return kvm_emulate_as_nop(vcpu); in kvm_emulate_invd()
2070 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) in kvm_handle_invalid_op() argument
2072 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_handle_invalid_op()
2078 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) in kvm_emulate_monitor_mwait() argument
2082 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)) in kvm_emulate_monitor_mwait()
2085 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) in kvm_emulate_monitor_mwait()
2086 enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT); in kvm_emulate_monitor_mwait()
2088 enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT; in kvm_emulate_monitor_mwait()
2091 return kvm_handle_invalid_op(vcpu); in kvm_emulate_monitor_mwait()
2095 return kvm_emulate_as_nop(vcpu); in kvm_emulate_monitor_mwait()
2097 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) in kvm_emulate_mwait() argument
2099 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); in kvm_emulate_mwait()
2103 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) in kvm_emulate_monitor() argument
2105 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); in kvm_emulate_monitor()
2109 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) in kvm_vcpu_exit_request() argument
2113 return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE || in kvm_vcpu_exit_request()
2114 kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending(); in kvm_vcpu_exit_request()
2124 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_x2apic_icr_irqoff() argument
2126 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
2133 return kvm_x2apic_icr_write(vcpu->arch.apic, data); in handle_fastpath_set_x2apic_icr_irqoff()
2138 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_tscdeadline() argument
2140 if (!kvm_can_use_hv_timer(vcpu)) in handle_fastpath_set_tscdeadline()
2143 kvm_set_lapic_tscdeadline_msr(vcpu, data); in handle_fastpath_set_tscdeadline()
2147 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) in handle_fastpath_set_msr_irqoff() argument
2149 u32 msr = kvm_rcx_read(vcpu); in handle_fastpath_set_msr_irqoff()
2154 kvm_vcpu_srcu_read_lock(vcpu); in handle_fastpath_set_msr_irqoff()
2158 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2159 handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data); in handle_fastpath_set_msr_irqoff()
2162 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2163 handled = !handle_fastpath_set_tscdeadline(vcpu, data); in handle_fastpath_set_msr_irqoff()
2171 if (!kvm_skip_emulated_instruction(vcpu)) in handle_fastpath_set_msr_irqoff()
2180 kvm_vcpu_srcu_read_unlock(vcpu); in handle_fastpath_set_msr_irqoff()
2189 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
2191 return kvm_get_msr_ignored_check(vcpu, index, data, true); in do_get_msr()
2194 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
2200 * not support modifying the guest vCPU model on the fly, e.g. changing in do_set_msr()
2205 if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index) && in do_set_msr()
2206 (do_get_msr(vcpu, index, &val) || *data != val)) in do_set_msr()
2209 return kvm_set_msr_ignored_check(vcpu, index, *data, true); in do_set_msr()
2319 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, in kvm_write_system_time() argument
2322 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2324 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2326 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_write_system_time()
2331 vcpu->arch.time = system_time; in kvm_write_system_time()
2332 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_write_system_time()
2336 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2339 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvm_write_system_time()
2392 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2394 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
2400 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); in set_tsc_khz()
2407 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2408 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2426 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); in set_tsc_khz()
2430 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) in kvm_set_tsc_khz() argument
2438 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); in kvm_set_tsc_khz()
2444 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2445 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2446 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2461 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); in kvm_set_tsc_khz()
2464 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
2466 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2467 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2468 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2469 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2480 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation) in kvm_track_tsc_matching() argument
2483 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2489 * vCPUs doesn't include the reference vCPU, hence "+1". in kvm_track_tsc_matching()
2492 atomic_read(&vcpu->kvm->online_vcpus)) && in kvm_track_tsc_matching()
2503 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
2505 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2506 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2536 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_l1_tsc_offset() argument
2540 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2545 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
2547 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2548 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2577 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) in kvm_vcpu_write_tsc_offset() argument
2579 if (vcpu->arch.guest_tsc_protected) in kvm_vcpu_write_tsc_offset()
2582 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in kvm_vcpu_write_tsc_offset()
2583 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2586 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2593 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_offset()
2594 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2596 kvm_x86_call(get_l2_tsc_offset)(vcpu), in kvm_vcpu_write_tsc_offset()
2597 kvm_x86_call(get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_offset()
2599 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2601 kvm_x86_call(write_tsc_offset)(vcpu); in kvm_vcpu_write_tsc_offset()
2604 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) in kvm_vcpu_write_tsc_multiplier() argument
2606 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2609 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_multiplier()
2610 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2612 kvm_x86_call(get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_multiplier()
2614 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2617 kvm_x86_call(write_tsc_multiplier)(vcpu); in kvm_vcpu_write_tsc_multiplier()
2635 * offset for the vcpu and tracks the TSC matching generation that the vcpu
2638 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, in __kvm_synchronize_tsc() argument
2641 struct kvm *kvm = vcpu->kvm; in __kvm_synchronize_tsc()
2645 if (vcpu->arch.guest_tsc_protected) in __kvm_synchronize_tsc()
2649 vcpu->kvm->arch.user_set_tsc = true; in __kvm_synchronize_tsc()
2657 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2660 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2662 kvm_vcpu_write_tsc_offset(vcpu, offset); in __kvm_synchronize_tsc()
2679 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2683 /* Keep track of which generation this VCPU has synchronized to */ in __kvm_synchronize_tsc()
2684 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2685 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2686 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2688 kvm_track_tsc_matching(vcpu, !matched); in __kvm_synchronize_tsc()
2691 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value) in kvm_synchronize_tsc() argument
2694 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2701 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2705 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2708 * Force synchronization when creating a vCPU, or when in kvm_synchronize_tsc()
2714 nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2715 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2719 * previously set vCPU, we assume that they were intended to be in kvm_synchronize_tsc()
2726 * value written by userspace (on any vCPU) should not be subject in kvm_synchronize_tsc()
2728 * come from the kernel's default vCPU creation. Make the 1-second in kvm_synchronize_tsc()
2744 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2748 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2750 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2755 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value); in kvm_synchronize_tsc()
2759 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
2762 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2763 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest()
2766 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
2768 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2771 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2772 adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
3041 struct kvm_vcpu *vcpu; in kvm_end_pvclock_update() local
3046 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
3047 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_end_pvclock_update()
3050 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
3051 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_end_pvclock_update()
3134 struct kvm_vcpu *vcpu, in kvm_setup_guest_pvclock() argument
3157 * This VCPU is paused, but it's legal for a guest to read another in kvm_setup_guest_pvclock()
3158 * VCPU's kvmclock, so we really have to follow the specification where in kvm_setup_guest_pvclock()
3178 trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock); in kvm_setup_guest_pvclock()
3186 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
3227 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
3233 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
3249 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
3251 &vcpu->pvclock_tsc_shift, in kvm_guest_time_update()
3252 &vcpu->pvclock_tsc_mul); in kvm_guest_time_update()
3253 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
3256 hv_clock.tsc_shift = vcpu->pvclock_tsc_shift; in kvm_guest_time_update()
3257 hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul; in kvm_guest_time_update()
3260 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
3267 if (vcpu->pv_time.active) { in kvm_guest_time_update()
3273 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_guest_time_update()
3275 vcpu->pvclock_set_guest_stopped_request = false; in kvm_guest_time_update()
3277 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3296 if (vcpu->xen.vcpu_info_cache.active) in kvm_guest_time_update()
3297 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache, in kvm_guest_time_update()
3299 if (vcpu->xen.vcpu_time_info_cache.active) in kvm_guest_time_update()
3300 kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3382 * kvmclock updates which are isolated to a given vcpu, such as
3383 * vcpu->cpu migration, should not allow system_timestamp from
3385 * correction applies to one vcpu's system_timestamp but not
3391 * The time for a remote vcpu to update its kvmclock is bound
3404 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
3406 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
3407 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
3408 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
3448 static bool can_set_mci_status(struct kvm_vcpu *vcpu) in can_set_mci_status() argument
3451 if (guest_cpuid_is_amd_compatible(vcpu)) in can_set_mci_status()
3452 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3457 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_msr_mce() argument
3459 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3467 vcpu->arch.mcg_status = data; in set_msr_mce()
3475 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3489 vcpu->arch.mci_ctl2_banks[offset] = data; in set_msr_mce()
3516 data != 0 && !can_set_mci_status(vcpu)) in set_msr_mce()
3521 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3529 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) in kvm_pv_async_pf_enabled() argument
3533 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3536 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
3544 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && in kvm_pv_enable_async_pf()
3548 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && in kvm_pv_enable_async_pf()
3552 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf()
3555 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3557 if (!kvm_pv_async_pf_enabled(vcpu)) { in kvm_pv_enable_async_pf()
3558 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
3559 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
3563 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3567 vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3568 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3570 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
3575 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf_int() argument
3581 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf_int()
3584 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3586 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3591 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
3593 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvmclock_reset()
3594 vcpu->arch.time = 0; in kvmclock_reset()
3597 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_all() argument
3599 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3600 kvm_x86_call(flush_tlb_all)(vcpu); in kvm_vcpu_flush_tlb_all()
3603 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_vcpu_flush_tlb_all()
3606 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_guest() argument
3608 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3617 kvm_mmu_sync_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3618 kvm_mmu_sync_prev_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3621 kvm_x86_call(flush_tlb_guest)(vcpu); in kvm_vcpu_flush_tlb_guest()
3627 kvm_hv_vcpu_purge_flush_tlb(vcpu); in kvm_vcpu_flush_tlb_guest()
3631 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_current() argument
3633 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_current()
3634 kvm_x86_call(flush_tlb_current)(vcpu); in kvm_vcpu_flush_tlb_current()
3643 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) in kvm_service_local_tlb_flush_requests() argument
3645 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) in kvm_service_local_tlb_flush_requests()
3646 kvm_vcpu_flush_tlb_current(vcpu); in kvm_service_local_tlb_flush_requests()
3648 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) in kvm_service_local_tlb_flush_requests()
3649 kvm_vcpu_flush_tlb_guest(vcpu); in kvm_service_local_tlb_flush_requests()
3653 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
3655 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3658 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3662 if (kvm_xen_msr_enabled(vcpu->kvm)) { in record_steal_time()
3663 kvm_xen_runstate_set_running(vcpu); in record_steal_time()
3667 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3670 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3673 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3681 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || in record_steal_time()
3691 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { in record_steal_time()
3710 vcpu->arch.st.preempted = 0; in record_steal_time()
3712 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3715 kvm_vcpu_flush_tlb_guest(vcpu); in record_steal_time()
3724 vcpu->arch.st.preempted = 0; in record_steal_time()
3738 vcpu->arch.st.last_steal; in record_steal_time()
3739 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3748 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in record_steal_time()
3751 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
3761 if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) && in kvm_set_msr_common()
3763 return kvm_xen_write_hypercall_page(vcpu, data); in kvm_set_msr_common()
3778 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3782 !guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_set_msr_common()
3784 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3788 !guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM)) in kvm_set_msr_common()
3796 * disallows changing feature MSRs after the vCPU has run; PMU in kvm_set_msr_common()
3797 * refresh will bug the VM if called after the vCPU has run. in kvm_set_msr_common()
3799 if (vcpu->arch.perf_capabilities == data) in kvm_set_msr_common()
3802 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3803 kvm_pmu_refresh(vcpu); in kvm_set_msr_common()
3809 if ((!guest_has_pred_cmd_msr(vcpu))) in kvm_set_msr_common()
3812 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) && in kvm_set_msr_common()
3813 !guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB)) in kvm_set_msr_common()
3816 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB)) in kvm_set_msr_common()
3837 !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D)) in kvm_set_msr_common()
3848 return set_efer(vcpu, msr_info); in kvm_set_msr_common()
3860 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3863 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3867 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3875 vcpu->arch.pat = data; in kvm_set_msr_common()
3879 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
3881 return kvm_apic_set_base(vcpu, data, msr_info->host_initiated); in kvm_set_msr_common()
3883 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
3885 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
3888 if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSC_ADJUST)) { in kvm_set_msr_common()
3890 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3891 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3895 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
3897 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3901 u64 old_val = vcpu->arch.ia32_misc_enable_msr; in kvm_set_msr_common()
3913 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3915 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3)) in kvm_set_msr_common()
3917 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3918 vcpu->arch.cpuid_dynamic_bits_dirty = true; in kvm_set_msr_common()
3920 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3927 vcpu->arch.smbase = data; in kvm_set_msr_common()
3930 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3934 kvm_synchronize_tsc(vcpu, &data); in kvm_set_msr_common()
3935 } else if (!vcpu->arch.guest_tsc_protected) { in kvm_set_msr_common()
3936 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3937 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3938 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3943 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_set_msr_common()
3952 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3953 vcpu->arch.cpuid_dynamic_bits_dirty = true; in kvm_set_msr_common()
3958 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3961 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3964 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3965 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3968 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3971 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3972 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3975 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3978 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3981 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3984 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3987 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_set_msr_common()
3990 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
3994 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3997 if (kvm_pv_enable_async_pf_int(vcpu, data)) in kvm_set_msr_common()
4001 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
4004 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
4005 kvm_check_async_pf_completion(vcpu); in kvm_set_msr_common()
4009 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_set_msr_common()
4018 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
4023 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
4027 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_set_msr_common()
4030 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) in kvm_set_msr_common()
4035 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_set_msr_common()
4042 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
4049 return set_msr_mce(vcpu, msr_info); in kvm_set_msr_common()
4055 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
4056 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
4059 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
4082 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
4089 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
4092 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
4094 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
4097 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
4099 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
4104 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
4109 !supports_cpuid_fault(vcpu))) in kvm_set_msr_common()
4111 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
4116 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) in kvm_set_msr_common()
4119 if (data & ~kvm_guest_supported_xfd(vcpu)) in kvm_set_msr_common()
4122 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); in kvm_set_msr_common()
4126 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) in kvm_set_msr_common()
4129 if (data & ~kvm_guest_supported_xfd(vcpu)) in kvm_set_msr_common()
4132 vcpu->arch.guest_fpu.xfd_err = data; in kvm_set_msr_common()
4136 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
4137 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
4145 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) in get_msr_mce() argument
4148 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
4158 data = vcpu->arch.mcg_cap; in get_msr_mce()
4163 data = vcpu->arch.mcg_ctl; in get_msr_mce()
4166 data = vcpu->arch.mcg_status; in get_msr_mce()
4177 data = vcpu->arch.mci_ctl2_banks[offset]; in get_msr_mce()
4186 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
4195 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
4233 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4234 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
4238 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
4241 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_get_msr_common()
4243 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
4246 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM)) in kvm_get_msr_common()
4248 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
4251 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
4266 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
4267 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
4269 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
4270 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
4277 msr_info->data = vcpu->arch.pat; in kvm_get_msr_common()
4282 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4301 msr_info->data = vcpu->arch.apic_base; in kvm_get_msr_common()
4304 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4306 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
4309 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
4312 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
4317 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
4320 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
4329 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
4332 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
4335 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4338 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
4341 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4344 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
4347 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4350 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
4353 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4356 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_get_msr_common()
4359 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
4362 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
4365 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
4368 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
4374 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_get_msr_common()
4377 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
4380 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_get_msr_common()
4383 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
4386 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_get_msr_common()
4389 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
4398 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
4402 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_get_msr_common()
4404 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
4429 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
4447 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
4449 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
4452 if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
4454 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
4458 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
4460 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
4463 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
4466 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
4471 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) in kvm_get_msr_common()
4474 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; in kvm_get_msr_common()
4478 !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) in kvm_get_msr_common()
4481 msr_info->data = vcpu->arch.guest_fpu.xfd_err; in kvm_get_msr_common()
4485 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4486 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
4499 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
4501 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
4507 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
4518 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
4519 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
4543 r = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
4575 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, in kvm_ioctl_get_supported_hv_cpuid() argument
4585 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_ioctl_get_supported_hv_cpuid()
4973 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
4975 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4978 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
4980 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_arch_vcpu_load()
4982 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_vcpu_load()
4984 if (vcpu->scheduled_out && pmu->version && pmu->event_count) { in kvm_arch_vcpu_load()
4986 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_arch_vcpu_load()
4990 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
4992 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4993 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4994 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4998 kvm_x86_call(vcpu_load)(vcpu, cpu); in kvm_arch_vcpu_load()
5001 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
5004 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
5005 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
5006 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
5007 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
5010 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
5011 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
5012 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
5017 u64 offset = kvm_compute_l1_tsc_offset(vcpu, in kvm_arch_vcpu_load()
5018 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
5019 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
5020 if (!vcpu->arch.guest_tsc_protected) in kvm_arch_vcpu_load()
5021 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
5024 if (kvm_lapic_hv_timer_in_use(vcpu)) in kvm_arch_vcpu_load()
5025 kvm_lapic_restart_hv_timer(vcpu); in kvm_arch_vcpu_load()
5029 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
5031 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
5032 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
5033 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
5034 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); in kvm_arch_vcpu_load()
5035 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
5038 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
5041 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) in kvm_steal_time_set_preempted() argument
5043 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
5047 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
5050 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
5053 * when this is true, for example allowing the vCPU to be marked in kvm_steal_time_set_preempted()
5056 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
5057 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
5061 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
5062 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
5065 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
5069 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
5072 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
5083 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
5085 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_steal_time_set_preempted()
5088 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
5092 if (vcpu->preempted) { in kvm_arch_vcpu_put()
5098 vcpu->arch.preempted_in_kernel = vcpu->arch.guest_state_protected || in kvm_arch_vcpu_put()
5099 !kvm_x86_call(get_cpl_no_cache)(vcpu); in kvm_arch_vcpu_put()
5105 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
5106 if (kvm_xen_msr_enabled(vcpu->kvm)) in kvm_arch_vcpu_put()
5107 kvm_xen_runstate_set_preempted(vcpu); in kvm_arch_vcpu_put()
5109 kvm_steal_time_set_preempted(vcpu); in kvm_arch_vcpu_put()
5110 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
5113 kvm_x86_call(vcpu_put)(vcpu); in kvm_arch_vcpu_put()
5114 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
5117 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
5120 kvm_x86_call(sync_pir_to_irr)(vcpu); in kvm_vcpu_ioctl_get_lapic()
5122 return kvm_apic_get_state(vcpu, s); in kvm_vcpu_ioctl_get_lapic()
5125 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
5130 r = kvm_apic_set_state(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
5133 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
5138 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
5146 if (kvm_cpu_has_extint(vcpu)) in kvm_cpu_accept_dm_intr()
5150 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
5151 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
5154 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
5163 return (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
5164 kvm_cpu_accept_dm_intr(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
5165 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
5166 !kvm_is_exception_pending(vcpu)); in kvm_vcpu_ready_for_interrupt_injection()
5169 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
5175 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
5176 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
5177 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
5185 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
5188 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
5191 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
5192 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
5196 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
5198 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
5203 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
5208 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
5212 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
5224 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
5227 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5230 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5232 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5235 kvm_apic_after_set_mcg_cap(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
5237 kvm_x86_call(setup_mce)(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
5259 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) in kvm_vcpu_x86_set_ucna() argument
5261 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_x86_set_ucna()
5266 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_x86_set_ucna()
5269 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) in kvm_vcpu_x86_set_ucna()
5272 if (lapic_in_kernel(vcpu)) in kvm_vcpu_x86_set_ucna()
5273 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); in kvm_vcpu_x86_set_ucna()
5278 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
5281 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
5283 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
5291 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); in kvm_vcpu_ioctl_x86_set_mce()
5298 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5307 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
5308 !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
5309 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
5316 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
5318 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
5331 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
5336 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5339 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5340 process_smi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5350 if (vcpu->arch.exception_vmexit.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5351 !vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5352 !vcpu->arch.exception.injected) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5353 ex = &vcpu->arch.exception_vmexit; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5355 ex = &vcpu->arch.exception; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5365 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5367 kvm_deliver_exception_payload(vcpu, ex); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5385 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5395 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5396 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5397 events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5399 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5400 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5401 events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5406 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5407 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5409 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5411 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5416 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5418 if (vcpu->kvm->arch.triple_fault_event) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5419 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5424 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
5436 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5454 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5457 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5467 vcpu->arch.exception_from_userspace = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5469 vcpu->arch.exception_vmexit.pending = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5471 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5472 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5473 vcpu->arch.exception.vector = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5474 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5475 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5476 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5477 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5479 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5480 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5481 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5483 kvm_x86_call(set_interrupt_shadow)(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
5486 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5488 vcpu->arch.nmi_pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5489 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5491 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5493 kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5496 lapic_in_kernel(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5497 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5501 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5502 kvm_leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5503 kvm_smm_changed(vcpu, events->smi.smm); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5506 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5510 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5512 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5521 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5523 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5525 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5530 if (!vcpu->kvm->arch.triple_fault_event) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5533 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5535 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5538 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5543 static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
5548 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_get_debugregs()
5549 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_get_debugregs()
5554 BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db)); in kvm_vcpu_ioctl_x86_get_debugregs()
5555 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_get_debugregs()
5556 dbgregs->db[i] = vcpu->arch.db[i]; in kvm_vcpu_ioctl_x86_get_debugregs()
5558 dbgregs->dr6 = vcpu->arch.dr6; in kvm_vcpu_ioctl_x86_get_debugregs()
5559 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
5563 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
5568 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_set_debugregs()
5569 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_set_debugregs()
5580 for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++) in kvm_vcpu_ioctl_x86_set_debugregs()
5581 vcpu->arch.db[i] = dbgregs->db[i]; in kvm_vcpu_ioctl_x86_set_debugregs()
5583 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
5584 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
5585 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
5586 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
5592 static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave2() argument
5607 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 | in kvm_vcpu_ioctl_x86_get_xsave2()
5610 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave2()
5611 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_get_xsave2()
5613 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size, in kvm_vcpu_ioctl_x86_get_xsave2()
5614 supported_xcr0, vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave2()
5618 static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
5621 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
5625 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
5628 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
5629 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_vcpu_ioctl_x86_set_xsave()
5631 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
5634 &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
5637 static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
5640 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_get_xcrs()
5641 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_get_xcrs()
5652 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5656 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
5661 if (vcpu->kvm->arch.has_protected_state && in kvm_vcpu_ioctl_x86_set_xcrs()
5662 vcpu->arch.guest_state_protected) in kvm_vcpu_ioctl_x86_set_xcrs()
5674 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
5689 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
5691 if (!vcpu->arch.pv_time.active) in kvm_set_guest_paused()
5693 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
5694 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
5698 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_has_attr() argument
5714 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_get_attr() argument
5723 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
5734 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_set_attr() argument
5738 struct kvm *kvm = vcpu->kvm; in kvm_arch_tsc_set_attr()
5753 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5754 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5757 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
5760 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true); in kvm_arch_tsc_set_attr()
5773 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_device_attr() argument
5788 r = kvm_arch_tsc_has_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5791 r = kvm_arch_tsc_get_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5794 r = kvm_arch_tsc_set_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5801 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
5815 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
5817 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
5827 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
5840 return kvm_x86_call(enable_l2_tlb_flush)(vcpu); in kvm_vcpu_ioctl_enable_cap()
5843 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
5847 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5857 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
5868 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
5874 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5881 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5892 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5900 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5909 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
5913 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
5917 r = kvm_inject_smi(vcpu); in kvm_arch_vcpu_ioctl()
5927 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5937 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5948 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5959 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5960 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
5961 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5965 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5966 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
5967 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5976 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
5990 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5995 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5996 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
5997 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6006 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
6015 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
6021 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
6036 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl()
6037 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
6038 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl()
6044 r = kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
6063 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
6068 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) in kvm_arch_vcpu_ioctl()
6076 r = kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
6087 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
6095 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
6100 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
6107 r = kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); in kvm_arch_vcpu_ioctl()
6125 r = kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
6143 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
6159 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
6165 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
6169 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
6178 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
6194 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
6238 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6239 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
6240 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6245 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); in kvm_arch_vcpu_ioctl()
6255 r = kvm_xen_vcpu_get_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
6266 r = kvm_xen_vcpu_set_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
6272 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl()
6273 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl()
6280 __get_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
6289 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl()
6290 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl()
6299 r = __set_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
6305 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); in kvm_arch_vcpu_ioctl()
6313 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
6317 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
6491 struct kvm_vcpu *vcpu; in kvm_arch_sync_dirty_log() local
6497 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_sync_dirty_log()
6498 kvm_vcpu_kick(vcpu); in kvm_arch_sync_dirty_log()
6937 struct kvm_vcpu *vcpu; in kvm_arch_suspend_notifier() local
6941 * Ignore the return, marking the guest paused only "fails" if the vCPU in kvm_arch_suspend_notifier()
6944 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_suspend_notifier()
6945 (void)kvm_set_guest_paused(vcpu); in kvm_arch_suspend_notifier()
7501 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
7509 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_write()
7510 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
7511 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
7522 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
7529 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_read()
7530 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
7532 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
7544 void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
7547 kvm_x86_call(set_segment)(vcpu, var, seg); in kvm_set_segment()
7550 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
7553 kvm_x86_call(get_segment)(vcpu, var, seg); in kvm_get_segment()
7556 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, in translate_nested_gpa() argument
7559 struct kvm_mmu *mmu = vcpu->arch.mmu; in translate_nested_gpa()
7562 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
7566 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); in translate_nested_gpa()
7571 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
7574 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()
7576 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
7577 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
7581 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
7584 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()
7586 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
7588 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
7593 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
7596 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()
7598 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
7602 struct kvm_vcpu *vcpu, u64 access, in kvm_read_guest_virt_helper() argument
7605 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()
7610 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_read_guest_virt_helper()
7617 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
7637 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
7638 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()
7639 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
7644 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
7652 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
7660 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, in kvm_read_guest_virt() argument
7664 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
7673 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
7682 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std() local
7687 else if (kvm_x86_call(get_cpl)(vcpu) == 3) in emulator_read_std()
7690 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); in emulator_read_std()
7694 struct kvm_vcpu *vcpu, u64 access, in kvm_write_guest_virt_helper() argument
7697 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()
7702 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_write_guest_virt_helper()
7709 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_helper()
7727 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_write_std() local
7732 else if (kvm_x86_call(get_cpl)(vcpu) == 3) in emulator_write_std()
7735 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in emulator_write_std()
7739 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, in kvm_write_guest_virt_system() argument
7743 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
7745 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in kvm_write_guest_virt_system()
7750 static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, in kvm_check_emulate_insn() argument
7753 return kvm_x86_call(check_emulate_instruction)(vcpu, emul_type, in kvm_check_emulate_insn()
7757 int handle_ud(struct kvm_vcpu *vcpu) in handle_ud() argument
7766 r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0); in handle_ud()
7771 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), in handle_ud()
7775 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); in handle_ud()
7776 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); in handle_ud()
7780 return kvm_emulate_instruction(vcpu, emul_type); in handle_ud()
7784 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument
7791 if (vcpu_match_mmio_gpa(vcpu, gpa)) { in vcpu_is_mmio_gpa()
7799 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
7803 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()
7804 u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
7812 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || in vcpu_mmio_gva_to_gpa()
7813 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
7814 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
7815 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
7821 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
7826 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); in vcpu_mmio_gva_to_gpa()
7829 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
7834 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
7837 kvm_page_track_write(vcpu, gpa, val, bytes); in emulator_write_phys()
7842 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
7844 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
7846 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7848 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7853 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
7855 if (vcpu->mmio_read_completed) { in read_prepare()
7857 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
7858 vcpu->mmio_read_completed = 0; in read_prepare()
7865 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
7868 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
7871 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
7874 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
7877 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
7880 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
7883 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
7890 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
7893 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
7895 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
7916 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
7923 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
7935 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); in emulator_read_write_onepage()
7937 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
7942 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
7948 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
7956 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
7957 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
7970 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
7975 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
7978 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
7986 vcpu, ops); in emulator_read_write()
7998 vcpu, ops); in emulator_read_write()
8002 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
8005 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
8007 vcpu->mmio_needed = 1; in emulator_read_write()
8008 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
8010 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
8011 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
8012 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
8013 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
8015 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
8048 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
8058 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
8076 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); in emulator_cmpxchg_emulated()
8109 kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa)); in emulator_cmpxchg_emulated()
8114 kvm_page_track_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
8124 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
8131 WARN_ON_ONCE(vcpu->arch.pio.count); in emulator_pio_in_out()
8134 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); in emulator_pio_in_out()
8136 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); in emulator_pio_in_out()
8156 vcpu->arch.pio.port = port; in emulator_pio_in_out()
8157 vcpu->arch.pio.in = in; in emulator_pio_in_out()
8158 vcpu->arch.pio.count = count; in emulator_pio_in_out()
8159 vcpu->arch.pio.size = size; in emulator_pio_in_out()
8162 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
8164 memcpy(vcpu->arch.pio_data, data, size * count); in emulator_pio_in_out()
8166 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
8167 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
8168 vcpu->run->io.size = size; in emulator_pio_in_out()
8169 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
8170 vcpu->run->io.count = count; in emulator_pio_in_out()
8171 vcpu->run->io.port = port; in emulator_pio_in_out()
8175 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, in emulator_pio_in() argument
8178 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in()
8185 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) in complete_emulator_pio_in() argument
8187 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
8188 unsigned int count = vcpu->arch.pio.count; in complete_emulator_pio_in()
8189 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
8190 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
8191 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
8198 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_in_emulated() local
8199 if (vcpu->arch.pio.count) { in emulator_pio_in_emulated()
8203 * can modify ECX before rerunning the vCPU. Ignore any such in emulator_pio_in_emulated()
8207 complete_emulator_pio_in(vcpu, val); in emulator_pio_in_emulated()
8211 return emulator_pio_in(vcpu, size, port, val, count); in emulator_pio_in_emulated()
8214 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_out() argument
8219 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out()
8229 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
8231 return kvm_x86_call(get_segment_base)(vcpu, seg); in get_segment_base()
8239 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
8241 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
8247 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8248 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
8251 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8257 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
8259 kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
8260 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
8290 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
8295 value = kvm_read_cr0(vcpu); in emulator_get_cr()
8298 value = vcpu->arch.cr2; in emulator_get_cr()
8301 value = kvm_read_cr3(vcpu); in emulator_get_cr()
8304 value = kvm_read_cr4(vcpu); in emulator_get_cr()
8307 value = kvm_get_cr8(vcpu); in emulator_get_cr()
8319 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
8324 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
8327 vcpu->arch.cr2 = val; in emulator_set_cr()
8330 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
8333 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
8336 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
8417 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
8439 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
8446 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_msr_with_filter() local
8449 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); in emulator_get_msr_with_filter()
8454 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, in emulator_get_msr_with_filter()
8469 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_msr_with_filter() local
8472 r = kvm_set_msr_with_filter(vcpu, msr_index, data); in emulator_set_msr_with_filter()
8477 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, in emulator_set_msr_with_filter()
8662 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
8664 u32 int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in toggle_interruptibility()
8675 kvm_x86_call(set_interrupt_shadow)(vcpu, mask); in toggle_interruptibility()
8677 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
8681 static void inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
8683 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
8686 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
8688 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
8691 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
8694 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) in alloc_emulate_ctxt() argument
8700 pr_err("failed to allocate vcpu's emulator\n"); in alloc_emulate_ctxt()
8704 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
8706 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
8711 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
8713 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
8716 kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
8719 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
8722 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
8723 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
8725 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
8734 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
8737 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
8739 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
8742 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
8750 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_inject_realmode_interrupt()
8753 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
8754 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
8759 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in prepare_emulation_failure_exit() argument
8762 struct kvm_run *run = vcpu->run; in prepare_emulation_failure_exit()
8772 kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2], in prepare_emulation_failure_exit()
8809 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) in prepare_emulation_ctxt_failure_exit() argument
8811 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
8813 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
8817 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in __kvm_prepare_emulation_failure_exit() argument
8820 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); in __kvm_prepare_emulation_failure_exit()
8824 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) in kvm_prepare_emulation_failure_exit() argument
8826 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); in kvm_prepare_emulation_failure_exit()
8830 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_prepare_event_vectoring_exit() argument
8833 struct kvm_run *run = vcpu->run; in kvm_prepare_event_vectoring_exit()
8837 kvm_x86_call(get_exit_info)(vcpu, &reason, &info1, &info2, in kvm_prepare_event_vectoring_exit()
8844 run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; in kvm_prepare_event_vectoring_exit()
8852 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) in handle_emulation_failure() argument
8854 struct kvm *kvm = vcpu->kvm; in handle_emulation_failure()
8856 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
8857 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
8860 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in handle_emulation_failure()
8866 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
8870 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
8872 if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) { in handle_emulation_failure()
8873 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
8880 static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu, in kvm_unprotect_and_retry_on_failure() argument
8892 * the vCPU into an infinite loop of page faults. E.g. KVM will create in kvm_unprotect_and_retry_on_failure()
8906 __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true); in kvm_unprotect_and_retry_on_failure()
8909 * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible in kvm_unprotect_and_retry_on_failure()
8918 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
8919 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
8936 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) in kvm_vcpu_do_singlestep() argument
8938 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
8940 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
8942 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
8947 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); in kvm_vcpu_do_singlestep()
8951 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) in kvm_skip_emulated_instruction() argument
8953 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu); in kvm_skip_emulated_instruction()
8956 r = kvm_x86_call(skip_emulated_instruction)(vcpu); in kvm_skip_emulated_instruction()
8960 kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED); in kvm_skip_emulated_instruction()
8971 r = kvm_vcpu_do_singlestep(vcpu); in kvm_skip_emulated_instruction()
8976 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) in kvm_is_code_breakpoint_inhibited() argument
8978 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) in kvm_is_code_breakpoint_inhibited()
8985 if (!guest_cpuid_is_intel_compatible(vcpu)) in kvm_is_code_breakpoint_inhibited()
8988 return kvm_x86_call(get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS; in kvm_is_code_breakpoint_inhibited()
8991 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, in kvm_vcpu_check_code_breakpoint() argument
9014 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
9015 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
9016 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
9017 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
9019 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
9020 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
9032 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
9033 !kvm_is_code_breakpoint_inhibited(vcpu)) { in kvm_vcpu_check_code_breakpoint()
9034 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
9036 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
9037 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
9040 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); in kvm_vcpu_check_code_breakpoint()
9090 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, in x86_decode_emulated_instruction() argument
9093 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
9096 init_emulate_ctxt(vcpu); in x86_decode_emulated_instruction()
9100 trace_kvm_emulate_insn_start(vcpu); in x86_decode_emulated_instruction()
9101 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
9107 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in x86_emulate_instruction() argument
9111 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
9115 (WARN_ON_ONCE(is_guest_mode(vcpu)) || in x86_emulate_instruction()
9119 r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len); in x86_emulate_instruction()
9124 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa, in x86_emulate_instruction()
9129 kvm_prepare_event_vectoring_exit(vcpu, cr2_or_gpa); in x86_emulate_instruction()
9134 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
9137 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
9140 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
9147 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) in x86_emulate_instruction()
9150 r = x86_decode_emulated_instruction(vcpu, emulation_type, in x86_emulate_instruction()
9155 kvm_queue_exception(vcpu, UD_VECTOR); in x86_emulate_instruction()
9158 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa, in x86_emulate_instruction()
9170 inject_emulated_exception(vcpu); in x86_emulate_instruction()
9173 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
9179 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in x86_emulate_instruction()
9200 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
9202 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
9209 * and retry the instruction, as the vCPU is likely no longer using the in x86_emulate_instruction()
9214 kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa)) in x86_emulate_instruction()
9219 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
9220 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
9230 if (vcpu->arch.mmu->root_role.direct) { in x86_emulate_instruction()
9245 if (kvm_unprotect_and_retry_on_failure(vcpu, cr2_or_gpa, in x86_emulate_instruction()
9249 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
9253 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write); in x86_emulate_instruction()
9254 vcpu->mmio_needed = false; in x86_emulate_instruction()
9256 inject_emulated_exception(vcpu); in x86_emulate_instruction()
9257 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
9258 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
9260 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
9263 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
9266 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
9267 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
9269 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
9272 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
9273 } else if (vcpu->arch.complete_userspace_io) { in x86_emulate_instruction()
9283 unsigned long rflags = kvm_x86_call(get_rflags)(vcpu); in x86_emulate_instruction()
9284 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
9285 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
9294 kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED); in x86_emulate_instruction()
9296 kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED); in x86_emulate_instruction()
9297 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
9298 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
9299 r = kvm_vcpu_do_singlestep(vcpu); in x86_emulate_instruction()
9300 kvm_x86_call(update_emulated_instruction)(vcpu); in x86_emulate_instruction()
9301 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
9311 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
9313 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
9318 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) in kvm_emulate_instruction() argument
9320 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
9324 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, in kvm_emulate_instruction_from_buffer() argument
9327 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
9331 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) in complete_fast_pio_out_port_0x7e() argument
9333 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9337 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) in complete_fast_pio_out() argument
9339 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9341 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
9344 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_out()
9347 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_out() argument
9350 unsigned long val = kvm_rax_read(vcpu); in kvm_fast_pio_out()
9351 int ret = emulator_pio_out(vcpu, size, port, &val, 1); in kvm_fast_pio_out()
9361 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
9362 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
9364 kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio_out()
9366 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
9367 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
9372 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) in complete_fast_pio_in() argument
9377 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
9379 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
9380 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9385 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9387 complete_emulator_pio_in(vcpu, &val); in complete_fast_pio_in()
9388 kvm_rax_write(vcpu, val); in complete_fast_pio_in()
9390 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_in()
9393 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_in() argument
9400 val = (size < 4) ? kvm_rax_read(vcpu) : 0; in kvm_fast_pio_in()
9402 ret = emulator_pio_in(vcpu, size, port, &val, 1); in kvm_fast_pio_in()
9404 kvm_rax_write(vcpu, val); in kvm_fast_pio_in()
9408 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
9409 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
9414 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) in kvm_fast_pio() argument
9419 ret = kvm_fast_pio_in(vcpu, size, port); in kvm_fast_pio()
9421 ret = kvm_fast_pio_out(vcpu, size, port); in kvm_fast_pio()
9422 return ret && kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio()
9481 struct kvm_vcpu *vcpu; in __kvmclock_cpufreq_notifier() local
9496 * the TSC for each VCPU. We must flag these local variables in __kvmclock_cpufreq_notifier()
9515 * anytime after the setting of the VCPU's request bit, the in __kvmclock_cpufreq_notifier()
9528 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvmclock_cpufreq_notifier()
9529 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
9531 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in __kvmclock_cpufreq_notifier()
9532 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
9612 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
9617 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
9618 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
9722 * vCPU's FPU state as a fxregs_state struct. in kvm_x86_vendor_init()
9884 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, in kvm_pv_clock_pairing() argument
9899 if (vcpu->arch.tsc_always_catchup) in kvm_pv_clock_pairing()
9907 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); in kvm_pv_clock_pairing()
9912 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
9921 * kvm_pv_kick_cpu_op: Kick a vcpu.
9923 * @apicid - apicid of vcpu to be kicked.
9947 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) in kvm_vcpu_apicv_activated() argument
9949 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); in kvm_vcpu_apicv_activated()
9951 kvm_x86_call(vcpu_get_apicv_inhibit_reasons)(vcpu); in kvm_vcpu_apicv_activated()
9982 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) in kvm_sched_yield() argument
9987 vcpu->stat.directed_yield_attempted++; in kvm_sched_yield()
9993 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
9996 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
10004 if (vcpu == target) in kvm_sched_yield()
10010 vcpu->stat.directed_yield_successful++; in kvm_sched_yield()
10016 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) in complete_hypercall_exit() argument
10018 u64 ret = vcpu->run->hypercall.ret; in complete_hypercall_exit()
10020 if (!is_64_bit_hypercall(vcpu)) in complete_hypercall_exit()
10022 kvm_rax_write(vcpu, ret); in complete_hypercall_exit()
10023 return kvm_skip_emulated_instruction(vcpu); in complete_hypercall_exit()
10026 int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, in ____kvm_emulate_hypercall() argument
10034 ++vcpu->stat.hypercalls; in ____kvm_emulate_hypercall()
10058 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) in ____kvm_emulate_hypercall()
10061 kvm_pv_kick_cpu_op(vcpu->kvm, a1); in ____kvm_emulate_hypercall()
10062 kvm_sched_yield(vcpu, a1); in ____kvm_emulate_hypercall()
10067 ret = kvm_pv_clock_pairing(vcpu, a0, a1); in ____kvm_emulate_hypercall()
10071 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) in ____kvm_emulate_hypercall()
10074 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in ____kvm_emulate_hypercall()
10077 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) in ____kvm_emulate_hypercall()
10080 kvm_sched_yield(vcpu, a0); in ____kvm_emulate_hypercall()
10087 if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) in ____kvm_emulate_hypercall()
10096 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in ____kvm_emulate_hypercall()
10097 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in ____kvm_emulate_hypercall()
10100 * assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that in ____kvm_emulate_hypercall()
10102 * vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU. in ____kvm_emulate_hypercall()
10104 vcpu->run->hypercall.ret = 0; in ____kvm_emulate_hypercall()
10105 vcpu->run->hypercall.args[0] = gpa; in ____kvm_emulate_hypercall()
10106 vcpu->run->hypercall.args[1] = npages; in ____kvm_emulate_hypercall()
10107 vcpu->run->hypercall.args[2] = attrs; in ____kvm_emulate_hypercall()
10108 vcpu->run->hypercall.flags = 0; in ____kvm_emulate_hypercall()
10110 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE; in ____kvm_emulate_hypercall()
10112 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); in ____kvm_emulate_hypercall()
10113 vcpu->arch.complete_userspace_io = complete_hypercall; in ____kvm_emulate_hypercall()
10122 vcpu->run->hypercall.ret = ret; in ____kvm_emulate_hypercall()
10127 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
10129 if (kvm_xen_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
10130 return kvm_xen_hypercall(vcpu); in kvm_emulate_hypercall()
10132 if (kvm_hv_hypercall_enabled(vcpu)) in kvm_emulate_hypercall()
10133 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
10135 return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi, in kvm_emulate_hypercall()
10136 is_64_bit_hypercall(vcpu), in kvm_emulate_hypercall()
10137 kvm_x86_call(get_cpl)(vcpu), in kvm_emulate_hypercall()
10144 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
10146 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
10152 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { in emulator_fix_hypercall()
10159 kvm_x86_call(patch_hypercall)(vcpu, instruction); in emulator_fix_hypercall()
10165 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
10167 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
10168 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
10172 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
10174 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
10176 kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu); in post_kvm_run_save()
10177 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
10178 kvm_run->apic_base = vcpu->arch.apic_base; in post_kvm_run_save()
10181 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
10182 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
10184 if (is_smm(vcpu)) in post_kvm_run_save()
10186 if (is_guest_mode(vcpu)) in post_kvm_run_save()
10190 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
10197 if (!lapic_in_kernel(vcpu)) in update_cr8_intercept()
10200 if (vcpu->arch.apic->apicv_active) in update_cr8_intercept()
10203 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
10204 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
10211 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
10213 kvm_x86_call(update_cr8_intercept)(vcpu, tpr, max_irr); in update_cr8_intercept()
10217 int kvm_check_nested_events(struct kvm_vcpu *vcpu) in kvm_check_nested_events() argument
10219 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in kvm_check_nested_events()
10220 kvm_x86_ops.nested_ops->triple_fault(vcpu); in kvm_check_nested_events()
10224 return kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_check_nested_events()
10227 static void kvm_inject_exception(struct kvm_vcpu *vcpu) in kvm_inject_exception() argument
10230 * Suppress the error code if the vCPU is in Real Mode, as Real Mode in kvm_inject_exception()
10236 vcpu->arch.exception.has_error_code &= is_protmode(vcpu); in kvm_inject_exception()
10238 trace_kvm_inj_exception(vcpu->arch.exception.vector, in kvm_inject_exception()
10239 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
10240 vcpu->arch.exception.error_code, in kvm_inject_exception()
10241 vcpu->arch.exception.injected); in kvm_inject_exception()
10243 kvm_x86_call(inject_exception)(vcpu); in kvm_inject_exception()
10285 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, in kvm_check_and_inject_events() argument
10296 if (is_guest_mode(vcpu)) in kvm_check_and_inject_events()
10297 r = kvm_check_nested_events(vcpu); in kvm_check_and_inject_events()
10324 if (vcpu->arch.exception.injected) in kvm_check_and_inject_events()
10325 kvm_inject_exception(vcpu); in kvm_check_and_inject_events()
10326 else if (kvm_is_exception_pending(vcpu)) in kvm_check_and_inject_events()
10328 else if (vcpu->arch.nmi_injected) in kvm_check_and_inject_events()
10329 kvm_x86_call(inject_nmi)(vcpu); in kvm_check_and_inject_events()
10330 else if (vcpu->arch.interrupt.injected) in kvm_check_and_inject_events()
10331 kvm_x86_call(inject_irq)(vcpu, true); in kvm_check_and_inject_events()
10338 WARN_ON_ONCE(vcpu->arch.exception.injected && in kvm_check_and_inject_events()
10339 vcpu->arch.exception.pending); in kvm_check_and_inject_events()
10358 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || in kvm_check_and_inject_events()
10359 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10366 can_inject = !kvm_event_needs_reinjection(vcpu); in kvm_check_and_inject_events()
10368 if (vcpu->arch.exception.pending) { in kvm_check_and_inject_events()
10379 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) in kvm_check_and_inject_events()
10380 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in kvm_check_and_inject_events()
10383 if (vcpu->arch.exception.vector == DB_VECTOR) { in kvm_check_and_inject_events()
10384 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); in kvm_check_and_inject_events()
10385 if (vcpu->arch.dr7 & DR7_GD) { in kvm_check_and_inject_events()
10386 vcpu->arch.dr7 &= ~DR7_GD; in kvm_check_and_inject_events()
10387 kvm_update_dr7(vcpu); in kvm_check_and_inject_events()
10391 kvm_inject_exception(vcpu); in kvm_check_and_inject_events()
10393 vcpu->arch.exception.pending = false; in kvm_check_and_inject_events()
10394 vcpu->arch.exception.injected = true; in kvm_check_and_inject_events()
10400 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) in kvm_check_and_inject_events()
10415 if (vcpu->arch.smi_pending) { in kvm_check_and_inject_events()
10416 r = can_inject ? kvm_x86_call(smi_allowed)(vcpu, true) : in kvm_check_and_inject_events()
10421 vcpu->arch.smi_pending = false; in kvm_check_and_inject_events()
10422 ++vcpu->arch.smi_count; in kvm_check_and_inject_events()
10423 enter_smm(vcpu); in kvm_check_and_inject_events()
10426 kvm_x86_call(enable_smi_window)(vcpu); in kvm_check_and_inject_events()
10430 if (vcpu->arch.nmi_pending) { in kvm_check_and_inject_events()
10431 r = can_inject ? kvm_x86_call(nmi_allowed)(vcpu, true) : in kvm_check_and_inject_events()
10436 --vcpu->arch.nmi_pending; in kvm_check_and_inject_events()
10437 vcpu->arch.nmi_injected = true; in kvm_check_and_inject_events()
10438 kvm_x86_call(inject_nmi)(vcpu); in kvm_check_and_inject_events()
10440 WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10442 if (vcpu->arch.nmi_pending) in kvm_check_and_inject_events()
10443 kvm_x86_call(enable_nmi_window)(vcpu); in kvm_check_and_inject_events()
10446 if (kvm_cpu_has_injectable_intr(vcpu)) { in kvm_check_and_inject_events()
10447 r = can_inject ? kvm_x86_call(interrupt_allowed)(vcpu, true) : in kvm_check_and_inject_events()
10452 int irq = kvm_cpu_get_interrupt(vcpu); in kvm_check_and_inject_events()
10455 kvm_queue_interrupt(vcpu, irq, false); in kvm_check_and_inject_events()
10456 kvm_x86_call(inject_irq)(vcpu, false); in kvm_check_and_inject_events()
10457 WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10460 if (kvm_cpu_has_injectable_intr(vcpu)) in kvm_check_and_inject_events()
10461 kvm_x86_call(enable_irq_window)(vcpu); in kvm_check_and_inject_events()
10464 if (is_guest_mode(vcpu) && in kvm_check_and_inject_events()
10466 kvm_x86_ops.nested_ops->has_events(vcpu, true)) in kvm_check_and_inject_events()
10472 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an in kvm_check_and_inject_events()
10477 * vCPU into an infinite loop. Triple fault can be queued when running in kvm_check_and_inject_events()
10481 WARN_ON_ONCE(vcpu->arch.exception.pending || in kvm_check_and_inject_events()
10482 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10493 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
10499 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is in process_nmi()
10507 if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
10514 * tracked in vcpu->arch.nmi_pending. in process_nmi()
10516 if (kvm_x86_call(is_vnmi_pending)(vcpu)) in process_nmi()
10519 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
10520 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
10522 if (vcpu->arch.nmi_pending && in process_nmi()
10523 (kvm_x86_call(set_vnmi_pending)(vcpu))) in process_nmi()
10524 vcpu->arch.nmi_pending--; in process_nmi()
10526 if (vcpu->arch.nmi_pending) in process_nmi()
10527 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
10531 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu) in kvm_get_nr_pending_nmis() argument
10533 return vcpu->arch.nmi_pending + in kvm_get_nr_pending_nmis()
10534 kvm_x86_call(is_vnmi_pending)(vcpu); in kvm_get_nr_pending_nmis()
10548 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in __kvm_vcpu_update_apicv() argument
10550 struct kvm_lapic *apic = vcpu->arch.apic; in __kvm_vcpu_update_apicv()
10553 if (!lapic_in_kernel(vcpu)) in __kvm_vcpu_update_apicv()
10556 down_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10560 activate = kvm_vcpu_apicv_activated(vcpu) && in __kvm_vcpu_update_apicv()
10561 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); in __kvm_vcpu_update_apicv()
10567 kvm_apic_update_apicv(vcpu); in __kvm_vcpu_update_apicv()
10568 kvm_x86_call(refresh_apicv_exec_ctrl)(vcpu); in __kvm_vcpu_update_apicv()
10577 kvm_make_request(KVM_REQ_EVENT, vcpu); in __kvm_vcpu_update_apicv()
10581 up_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10585 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in kvm_vcpu_update_apicv() argument
10587 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_update_apicv()
10592 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but in kvm_vcpu_update_apicv()
10597 * the vCPU would incorrectly be able to access the vAPIC page via MMIO in kvm_vcpu_update_apicv()
10601 if (apic_x2apic_mode(vcpu->arch.apic) && in kvm_vcpu_update_apicv()
10603 kvm_inhibit_apic_access_page(vcpu); in kvm_vcpu_update_apicv()
10605 __kvm_vcpu_update_apicv(vcpu); in kvm_vcpu_update_apicv()
10661 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
10663 if (!kvm_apic_present(vcpu)) in vcpu_scan_ioapic()
10666 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
10668 kvm_x86_call(sync_pir_to_irr)(vcpu); in vcpu_scan_ioapic()
10670 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
10671 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10672 else if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
10673 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10675 if (is_guest_mode(vcpu)) in vcpu_scan_ioapic()
10676 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
10678 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in vcpu_scan_ioapic()
10681 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vcpu_load_eoi_exitmap() argument
10683 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
10687 if (to_hv_vcpu(vcpu)) { in vcpu_load_eoi_exitmap()
10691 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
10692 to_hv_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
10693 kvm_x86_call(load_eoi_exitmap)(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
10698 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
10706 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
10708 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
10711 kvm_x86_call(set_apic_access_page_addr)(vcpu); in kvm_vcpu_reload_apic_access_page()
10720 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
10724 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
10725 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
10730 if (kvm_request_pending(vcpu)) { in vcpu_enter_guest()
10731 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { in vcpu_enter_guest()
10736 if (kvm_dirty_ring_check_request(vcpu)) { in vcpu_enter_guest()
10741 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { in vcpu_enter_guest()
10742 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
10747 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) in vcpu_enter_guest()
10748 kvm_mmu_free_obsolete_roots(vcpu); in vcpu_enter_guest()
10749 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
10750 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
10751 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
10752 kvm_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
10753 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
10754 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
10755 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
10756 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
10760 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
10761 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
10762 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) in vcpu_enter_guest()
10763 kvm_mmu_load_pgd(vcpu); in vcpu_enter_guest()
10770 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in vcpu_enter_guest()
10771 kvm_vcpu_flush_tlb_all(vcpu); in vcpu_enter_guest()
10773 kvm_service_local_tlb_flush_requests(vcpu); in vcpu_enter_guest()
10777 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but in vcpu_enter_guest()
10782 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && in vcpu_enter_guest()
10783 kvm_hv_vcpu_flush_tlb(vcpu)) in vcpu_enter_guest()
10784 kvm_vcpu_flush_tlb_guest(vcpu); in vcpu_enter_guest()
10787 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
10788 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
10792 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
10793 if (is_guest_mode(vcpu)) in vcpu_enter_guest()
10794 kvm_x86_ops.nested_ops->triple_fault(vcpu); in vcpu_enter_guest()
10796 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
10797 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
10798 vcpu->mmio_needed = 0; in vcpu_enter_guest()
10803 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
10805 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
10809 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
10810 record_steal_time(vcpu); in vcpu_enter_guest()
10811 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
10812 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
10813 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
10814 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
10816 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
10817 process_smi(vcpu); in vcpu_enter_guest()
10819 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
10820 process_nmi(vcpu); in vcpu_enter_guest()
10821 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
10822 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
10823 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
10824 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
10825 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
10826 vcpu->run->eoi.vector = in vcpu_enter_guest()
10827 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
10832 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
10833 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
10834 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) in vcpu_enter_guest()
10835 vcpu_load_eoi_exitmap(vcpu); in vcpu_enter_guest()
10836 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
10837 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
10839 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
10840 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10841 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
10842 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10846 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
10847 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10848 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
10849 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10853 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { in vcpu_enter_guest()
10854 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in vcpu_enter_guest()
10856 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
10857 vcpu->run->hyperv = hv_vcpu->exit; in vcpu_enter_guest()
10867 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) in vcpu_enter_guest()
10868 kvm_hv_process_stimers(vcpu); in vcpu_enter_guest()
10870 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) in vcpu_enter_guest()
10871 kvm_vcpu_update_apicv(vcpu); in vcpu_enter_guest()
10872 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) in vcpu_enter_guest()
10873 kvm_check_async_pf_completion(vcpu); in vcpu_enter_guest()
10874 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) in vcpu_enter_guest()
10875 kvm_x86_call(msr_filter_changed)(vcpu); in vcpu_enter_guest()
10877 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) in vcpu_enter_guest()
10878 kvm_x86_call(update_cpu_dirty_logging)(vcpu); in vcpu_enter_guest()
10880 if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) { in vcpu_enter_guest()
10881 kvm_vcpu_reset(vcpu, true); in vcpu_enter_guest()
10882 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) { in vcpu_enter_guest()
10889 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || in vcpu_enter_guest()
10890 kvm_xen_has_interrupt(vcpu)) { in vcpu_enter_guest()
10891 ++vcpu->stat.req_event; in vcpu_enter_guest()
10892 r = kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
10897 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
10902 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); in vcpu_enter_guest()
10908 kvm_x86_call(enable_irq_window)(vcpu); in vcpu_enter_guest()
10910 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
10911 update_cr8_intercept(vcpu); in vcpu_enter_guest()
10912 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
10916 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
10923 kvm_x86_call(prepare_switch_to_guest)(vcpu); in vcpu_enter_guest()
10932 /* Store vcpu->apicv_active before vcpu->mode. */ in vcpu_enter_guest()
10933 smp_store_release(&vcpu->mode, IN_GUEST_MODE); in vcpu_enter_guest()
10935 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_enter_guest()
10946 * tables done while the VCPU is running. Please see the comment in vcpu_enter_guest()
10954 * target vCPU wasn't running). Do this regardless of the vCPU's APICv in vcpu_enter_guest()
10958 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
10959 kvm_x86_call(sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
10961 if (kvm_vcpu_exit_request(vcpu)) { in vcpu_enter_guest()
10962 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
10966 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_enter_guest()
10972 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
10978 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10979 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in vcpu_enter_guest()
10981 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
10983 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
10984 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
10985 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
10986 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
10988 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) in vcpu_enter_guest()
10989 kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6); in vcpu_enter_guest()
10994 vcpu->arch.host_debugctl = get_debugctlmsr(); in vcpu_enter_guest()
11000 * Assert that vCPU vs. VM APICv state is consistent. An APICv in vcpu_enter_guest()
11005 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && in vcpu_enter_guest()
11006 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); in vcpu_enter_guest()
11008 exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, in vcpu_enter_guest()
11013 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
11014 kvm_x86_call(sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
11016 if (unlikely(kvm_vcpu_exit_request(vcpu))) { in vcpu_enter_guest()
11022 ++vcpu->stat.exits; in vcpu_enter_guest()
11031 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
11032 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
11033 kvm_x86_call(sync_dirty_debug_regs)(vcpu); in vcpu_enter_guest()
11034 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
11035 kvm_update_dr7(vcpu); in vcpu_enter_guest()
11048 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
11049 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
11051 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
11059 if (vcpu->arch.xfd_no_write_intercept) in vcpu_enter_guest()
11062 kvm_x86_call(handle_exit_irqoff)(vcpu); in vcpu_enter_guest()
11064 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
11074 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); in vcpu_enter_guest()
11076 ++vcpu->stat.exits; in vcpu_enter_guest()
11078 kvm_after_interrupt(vcpu); in vcpu_enter_guest()
11092 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_enter_guest()
11104 !vcpu->arch.guest_state_protected)) { in vcpu_enter_guest()
11105 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
11109 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
11110 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
11112 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
11113 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
11118 r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath); in vcpu_enter_guest()
11123 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
11124 kvm_x86_call(cancel_injection)(vcpu); in vcpu_enter_guest()
11125 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
11126 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
11131 static bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
11133 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
11134 !vcpu->arch.apf.halted); in kvm_vcpu_running()
11137 static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
11139 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
11142 if (kvm_apic_has_pending_init_or_sipi(vcpu) && in kvm_vcpu_has_events()
11143 kvm_apic_init_sipi_allowed(vcpu)) in kvm_vcpu_has_events()
11146 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
11149 if (kvm_is_exception_pending(vcpu)) in kvm_vcpu_has_events()
11152 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_vcpu_has_events()
11153 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
11154 kvm_x86_call(nmi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
11158 if (kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_vcpu_has_events()
11159 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
11160 kvm_x86_call(smi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
11164 if (kvm_test_request(KVM_REQ_PMI, vcpu)) in kvm_vcpu_has_events()
11167 if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) in kvm_vcpu_has_events()
11170 if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)) in kvm_vcpu_has_events()
11173 if (kvm_hv_has_stimer_pending(vcpu)) in kvm_vcpu_has_events()
11176 if (is_guest_mode(vcpu) && in kvm_vcpu_has_events()
11178 kvm_x86_ops.nested_ops->has_events(vcpu, false)) in kvm_vcpu_has_events()
11181 if (kvm_xen_has_pending_events(vcpu)) in kvm_vcpu_has_events()
11187 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
11189 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
11193 static inline int vcpu_block(struct kvm_vcpu *vcpu) in vcpu_block() argument
11197 if (!kvm_arch_vcpu_runnable(vcpu)) { in vcpu_block()
11200 * the guest's timer may be a break event for the vCPU, and the in vcpu_block()
11205 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); in vcpu_block()
11207 kvm_lapic_switch_to_sw_timer(vcpu); in vcpu_block()
11209 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_block()
11210 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in vcpu_block()
11211 kvm_vcpu_halt(vcpu); in vcpu_block()
11213 kvm_vcpu_block(vcpu); in vcpu_block()
11214 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_block()
11217 kvm_lapic_switch_to_hv_timer(vcpu); in vcpu_block()
11220 * If the vCPU is not runnable, a signal or another host event in vcpu_block()
11222 * vCPU's activity state. in vcpu_block()
11224 if (!kvm_arch_vcpu_runnable(vcpu)) in vcpu_block()
11234 if (is_guest_mode(vcpu)) { in vcpu_block()
11235 int r = kvm_check_nested_events(vcpu); in vcpu_block()
11242 if (kvm_apic_accept_events(vcpu) < 0) in vcpu_block()
11244 switch(vcpu->arch.mp_state) { in vcpu_block()
11247 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); in vcpu_block()
11250 vcpu->arch.apf.halted = false; in vcpu_block()
11262 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
11266 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; in vcpu_run()
11270 * If another guest vCPU requests a PV TLB flush in the middle in vcpu_run()
11275 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
11276 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
11277 r = vcpu_enter_guest(vcpu); in vcpu_run()
11279 r = vcpu_block(vcpu); in vcpu_run()
11285 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); in vcpu_run()
11286 if (kvm_xen_has_pending_events(vcpu)) in vcpu_run()
11287 kvm_xen_inject_pending_events(vcpu); in vcpu_run()
11289 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
11290 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
11292 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
11293 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
11295 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
11296 ++vcpu->stat.request_irq_exits; in vcpu_run()
11301 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_run()
11302 r = xfer_to_guest_mode_handle_work(vcpu); in vcpu_run()
11303 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_run()
11312 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) in __kvm_emulate_halt() argument
11315 * The vCPU has halted, e.g. executed HLT. Update the run state if the in __kvm_emulate_halt()
11317 * state and halt the vCPU. Exit to userspace if the local APIC is in __kvm_emulate_halt()
11321 ++vcpu->stat.halt_exits; in __kvm_emulate_halt()
11322 if (lapic_in_kernel(vcpu)) { in __kvm_emulate_halt()
11323 if (kvm_vcpu_has_events(vcpu)) in __kvm_emulate_halt()
11325 kvm_set_mp_state(vcpu, state); in __kvm_emulate_halt()
11328 vcpu->run->exit_reason = reason; in __kvm_emulate_halt()
11333 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_halt_noskip() argument
11335 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); in kvm_emulate_halt_noskip()
11339 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
11341 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_halt()
11346 return kvm_emulate_halt_noskip(vcpu) && ret; in kvm_emulate_halt()
11350 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu) in handle_fastpath_hlt() argument
11354 kvm_vcpu_srcu_read_lock(vcpu); in handle_fastpath_hlt()
11355 ret = kvm_emulate_halt(vcpu); in handle_fastpath_hlt()
11356 kvm_vcpu_srcu_read_unlock(vcpu); in handle_fastpath_hlt()
11361 if (kvm_vcpu_running(vcpu)) in handle_fastpath_hlt()
11368 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) in kvm_emulate_ap_reset_hold() argument
11370 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_ap_reset_hold()
11372 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, in kvm_emulate_ap_reset_hold()
11377 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) in kvm_arch_dy_has_pending_interrupt() argument
11379 return kvm_vcpu_apicv_active(vcpu) && in kvm_arch_dy_has_pending_interrupt()
11380 kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu); in kvm_arch_dy_has_pending_interrupt()
11383 bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_preempted_in_kernel() argument
11385 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_preempted_in_kernel()
11388 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
11390 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11393 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_arch_dy_runnable()
11395 kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_arch_dy_runnable()
11397 kvm_test_request(KVM_REQ_EVENT, vcpu)) in kvm_arch_dy_runnable()
11400 return kvm_arch_dy_has_pending_interrupt(vcpu); in kvm_arch_dy_runnable()
11403 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
11405 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
11408 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
11410 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
11412 return complete_emulated_io(vcpu); in complete_emulated_pio()
11433 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
11435 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
11439 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
11442 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
11444 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
11450 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
11458 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
11459 vcpu->mmio_needed = 0; in complete_emulated_mmio()
11462 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11464 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
11465 return complete_emulated_io(vcpu); in complete_emulated_mmio()
11470 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11473 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
11474 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
11479 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
11482 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
11487 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
11489 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
11490 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
11494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
11496 struct kvm_queued_exception *ex = &vcpu->arch.exception; in kvm_arch_vcpu_ioctl_run()
11497 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
11501 r = kvm_mmu_post_init_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
11505 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
11506 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
11508 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
11510 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
11511 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
11512 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
11520 * APIC timer to be active is if userspace stuffed vCPU state, in kvm_arch_vcpu_ioctl_run()
11521 * i.e. put the vCPU into a nonsensical state. Only an INIT in kvm_arch_vcpu_ioctl_run()
11522 * will transition the vCPU out of UNINITIALIZED (without more in kvm_arch_vcpu_ioctl_run()
11527 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
11528 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
11529 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
11531 if (kvm_apic_accept_events(vcpu) < 0) { in kvm_arch_vcpu_ioctl_run()
11539 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
11544 sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
11552 r = sync_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
11558 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
11559 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
11569 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && in kvm_arch_vcpu_ioctl_run()
11570 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11572 kvm_queue_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11578 vcpu->arch.exception_from_userspace = false; in kvm_arch_vcpu_ioctl_run()
11580 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
11581 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
11582 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
11583 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
11587 WARN_ON_ONCE(vcpu->arch.pio.count); in kvm_arch_vcpu_ioctl_run()
11588 WARN_ON_ONCE(vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
11591 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
11596 r = kvm_x86_call(vcpu_pre_run)(vcpu); in kvm_arch_vcpu_ioctl_run()
11600 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
11603 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
11604 if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected)) in kvm_arch_vcpu_ioctl_run()
11605 store_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
11606 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
11607 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
11609 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
11610 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
11614 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __get_regs() argument
11616 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
11620 * back from emulation context to vcpu. Userspace shouldn't do in __get_regs()
11624 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
11625 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
11627 regs->rax = kvm_rax_read(vcpu); in __get_regs()
11628 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
11629 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
11630 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
11631 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
11632 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
11633 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
11634 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
11636 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
11637 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
11638 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
11639 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
11640 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
11641 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
11642 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
11643 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
11646 regs->rip = kvm_rip_read(vcpu); in __get_regs()
11647 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
11650 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
11652 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_get_regs()
11653 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_get_regs()
11656 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
11657 __get_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_get_regs()
11658 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
11662 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __set_regs() argument
11664 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
11665 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
11667 kvm_rax_write(vcpu, regs->rax); in __set_regs()
11668 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
11669 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
11670 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
11671 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
11672 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
11673 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
11674 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
11676 kvm_r8_write(vcpu, regs->r8); in __set_regs()
11677 kvm_r9_write(vcpu, regs->r9); in __set_regs()
11678 kvm_r10_write(vcpu, regs->r10); in __set_regs()
11679 kvm_r11_write(vcpu, regs->r11); in __set_regs()
11680 kvm_r12_write(vcpu, regs->r12); in __set_regs()
11681 kvm_r13_write(vcpu, regs->r13); in __set_regs()
11682 kvm_r14_write(vcpu, regs->r14); in __set_regs()
11683 kvm_r15_write(vcpu, regs->r15); in __set_regs()
11686 kvm_rip_write(vcpu, regs->rip); in __set_regs()
11687 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
11689 vcpu->arch.exception.pending = false; in __set_regs()
11690 vcpu->arch.exception_vmexit.pending = false; in __set_regs()
11692 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_regs()
11695 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
11697 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_set_regs()
11698 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_regs()
11701 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
11702 __set_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_set_regs()
11703 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
11707 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs_common() argument
11711 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
11714 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
11715 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
11716 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
11717 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
11718 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
11719 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
11721 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
11722 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
11724 kvm_x86_call(get_idt)(vcpu, &dt); in __get_sregs_common()
11727 kvm_x86_call(get_gdt)(vcpu, &dt); in __get_sregs_common()
11731 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
11732 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
11735 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
11736 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
11737 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
11738 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
11739 sregs->apic_base = vcpu->arch.apic_base; in __get_sregs_common()
11742 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
11744 __get_sregs_common(vcpu, sregs); in __get_sregs()
11746 if (vcpu->arch.guest_state_protected) in __get_sregs()
11749 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
11750 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
11754 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __get_sregs2() argument
11758 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); in __get_sregs2()
11760 if (vcpu->arch.guest_state_protected) in __get_sregs2()
11763 if (is_pae_paging(vcpu)) { in __get_sregs2()
11765 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); in __get_sregs2()
11770 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
11773 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_get_sregs()
11774 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_get_sregs()
11777 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
11778 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
11779 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
11783 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
11788 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11790 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11792 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11794 r = kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11799 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
11800 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
11801 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
11804 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
11807 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11810 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11811 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11815 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
11820 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11828 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl_set_mpstate()
11845 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
11851 kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED); in kvm_arch_vcpu_ioctl_set_mpstate()
11852 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
11854 kvm_set_mp_state(vcpu, mp_state->mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
11855 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11859 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11863 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
11866 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
11869 init_emulate_ctxt(vcpu); in kvm_task_switch()
11878 if (ret || vcpu->mmio_needed) { in kvm_task_switch()
11879 vcpu->mmio_needed = false; in kvm_task_switch()
11880 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
11881 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
11882 vcpu->run->internal.ndata = 0; in kvm_task_switch()
11886 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
11887 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
11892 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_is_valid_sregs() argument
11902 if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
11913 return kvm_is_valid_cr4(vcpu, sregs->cr4) && in kvm_is_valid_sregs()
11914 kvm_is_valid_cr0(vcpu, sregs->cr0); in kvm_is_valid_sregs()
11917 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, in __set_sregs_common() argument
11923 if (!kvm_is_valid_sregs(vcpu, sregs)) in __set_sregs_common()
11926 if (kvm_apic_set_base(vcpu, sregs->apic_base, true)) in __set_sregs_common()
11929 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
11934 kvm_x86_call(set_idt)(vcpu, &dt); in __set_sregs_common()
11937 kvm_x86_call(set_gdt)(vcpu, &dt); in __set_sregs_common()
11939 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
11940 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
11941 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
11942 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in __set_sregs_common()
11943 kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3); in __set_sregs_common()
11945 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
11947 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
11948 kvm_x86_call(set_efer)(vcpu, sregs->efer); in __set_sregs_common()
11950 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
11951 kvm_x86_call(set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
11953 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
11954 kvm_x86_call(set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
11957 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs_common()
11958 if (is_pae_paging(vcpu)) { in __set_sregs_common()
11959 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); in __set_sregs_common()
11962 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs_common()
11965 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
11966 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
11967 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
11968 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
11969 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
11970 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
11972 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
11973 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
11975 update_cr8_intercept(vcpu); in __set_sregs_common()
11977 /* Older userspace won't unhalt the vcpu on reset. */ in __set_sregs_common()
11978 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs_common()
11980 !is_protmode(vcpu)) in __set_sregs_common()
11981 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); in __set_sregs_common()
11986 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
11990 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); in __set_sregs()
11996 kvm_mmu_reset_context(vcpu); in __set_sregs()
11997 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in __set_sregs()
12005 kvm_queue_interrupt(vcpu, pending_vec, false); in __set_sregs()
12007 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_sregs()
12012 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __set_sregs2() argument
12023 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
12026 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, in __set_sregs2()
12033 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); in __set_sregs2()
12035 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in __set_sregs2()
12037 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
12040 kvm_mmu_reset_context(vcpu); in __set_sregs2()
12041 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in __set_sregs2()
12046 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
12051 if (vcpu->kvm->arch.has_protected_state && in kvm_arch_vcpu_ioctl_set_sregs()
12052 vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_sregs()
12055 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
12056 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
12057 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
12064 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_guestdbg_update_apicv_inhibit() local
12072 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
12073 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
12082 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
12088 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
12091 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12095 if (kvm_is_exception_pending(vcpu)) in kvm_arch_vcpu_ioctl_set_guest_debug()
12098 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
12100 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
12107 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12109 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
12110 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
12111 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
12113 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
12115 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12116 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12119 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
12121 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12123 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
12124 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12130 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
12132 kvm_x86_call(update_exception_bitmap)(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12134 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_guest_debug()
12139 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
12146 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
12153 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_translate()
12155 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
12156 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
12157 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
12163 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_translate()
12167 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
12171 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
12172 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_get_fpu()
12174 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
12176 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
12186 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
12190 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
12194 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
12195 return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; in kvm_arch_vcpu_ioctl_set_fpu()
12197 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
12199 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
12210 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
12214 static void store_regs(struct kvm_vcpu *vcpu) in store_regs() argument
12218 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
12219 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
12221 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
12222 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
12224 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
12226 vcpu, &vcpu->run->s.regs.events); in store_regs()
12229 static int sync_regs(struct kvm_vcpu *vcpu) in sync_regs() argument
12231 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
12232 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
12233 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
12236 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
12237 struct kvm_sregs sregs = vcpu->run->s.regs.sregs; in sync_regs()
12239 if (__set_sregs(vcpu, &sregs)) in sync_regs()
12242 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
12245 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
12246 struct kvm_vcpu_events events = vcpu->run->s.regs.events; in sync_regs()
12248 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events)) in sync_regs()
12251 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
12272 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
12277 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
12278 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
12279 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
12281 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm); in kvm_arch_vcpu_create()
12283 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
12284 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); in kvm_arch_vcpu_create()
12286 kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED); in kvm_arch_vcpu_create()
12288 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_create()
12292 r = kvm_create_lapic(vcpu); in kvm_arch_vcpu_create()
12301 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
12303 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), in kvm_arch_vcpu_create()
12305 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), in kvm_arch_vcpu_create()
12307 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) in kvm_arch_vcpu_create()
12309 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
12311 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
12315 if (!alloc_emulate_ctxt(vcpu)) in kvm_arch_vcpu_create()
12318 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
12319 pr_err("failed to allocate vcpu's fpu\n"); in kvm_arch_vcpu_create()
12323 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_create()
12325 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) { in kvm_arch_vcpu_create()
12326 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
12327 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
12328 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; in kvm_arch_vcpu_create()
12330 kvm_pmu_init(vcpu); in kvm_arch_vcpu_create()
12332 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
12333 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
12336 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
12339 r = kvm_x86_call(vcpu_create)(vcpu); in kvm_arch_vcpu_create()
12343 kvm_xen_init_vcpu(vcpu); in kvm_arch_vcpu_create()
12344 vcpu_load(vcpu); in kvm_arch_vcpu_create()
12345 kvm_vcpu_after_set_cpuid(vcpu); in kvm_arch_vcpu_create()
12346 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); in kvm_arch_vcpu_create()
12347 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_create()
12348 kvm_init_mmu(vcpu); in kvm_arch_vcpu_create()
12349 vcpu_put(vcpu); in kvm_arch_vcpu_create()
12353 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
12355 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
12357 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
12359 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
12360 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_create()
12361 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
12363 kvm_free_lapic(vcpu); in kvm_arch_vcpu_create()
12365 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_create()
12369 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
12371 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
12373 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
12375 vcpu_load(vcpu); in kvm_arch_vcpu_postcreate()
12376 kvm_synchronize_tsc(vcpu, NULL); in kvm_arch_vcpu_postcreate()
12377 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
12380 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
12382 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
12384 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
12389 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
12393 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_destroy()
12394 kvm_mmu_unload(vcpu); in kvm_arch_vcpu_destroy()
12396 kvmclock_reset(vcpu); in kvm_arch_vcpu_destroy()
12398 kvm_x86_call(vcpu_free)(vcpu); in kvm_arch_vcpu_destroy()
12400 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
12401 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
12402 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
12404 kvm_xen_destroy_vcpu(vcpu); in kvm_arch_vcpu_destroy()
12405 kvm_hv_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
12406 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
12407 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
12408 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_destroy()
12409 kvm_free_lapic(vcpu); in kvm_arch_vcpu_destroy()
12410 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
12411 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
12412 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
12413 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
12414 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
12417 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
12420 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_vcpu_reset()
12427 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel in kvm_vcpu_reset()
12431 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); in kvm_vcpu_reset()
12435 * possible to INIT the vCPU while L2 is active. Force the vCPU back in kvm_vcpu_reset()
12439 if (is_guest_mode(vcpu)) in kvm_vcpu_reset()
12440 kvm_leave_nested(vcpu); in kvm_vcpu_reset()
12442 kvm_lapic_reset(vcpu, init_event); in kvm_vcpu_reset()
12444 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); in kvm_vcpu_reset()
12445 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12447 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12448 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12449 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12450 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12451 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
12452 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
12453 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
12455 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12456 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
12457 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
12458 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
12459 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
12461 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12463 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
12464 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12465 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12466 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12468 kvmclock_reset(vcpu); in kvm_vcpu_reset()
12470 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
12471 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
12472 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
12474 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { in kvm_vcpu_reset()
12475 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_vcpu_reset()
12482 kvm_put_guest_fpu(vcpu); in kvm_vcpu_reset()
12488 kvm_load_guest_fpu(vcpu); in kvm_vcpu_reset()
12492 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12494 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_vcpu_reset()
12496 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12497 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | in kvm_vcpu_reset()
12500 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); in kvm_vcpu_reset()
12501 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); in kvm_vcpu_reset()
12505 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12506 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); in kvm_vcpu_reset()
12511 * RESET since KVM emulates RESET before exposing the vCPU to userspace, in kvm_vcpu_reset()
12515 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); in kvm_vcpu_reset()
12516 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
12518 kvm_x86_call(vcpu_reset)(vcpu, init_event); in kvm_vcpu_reset()
12520 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in kvm_vcpu_reset()
12521 kvm_rip_write(vcpu, 0xfff0); in kvm_vcpu_reset()
12523 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12524 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in kvm_vcpu_reset()
12537 kvm_x86_call(set_cr0)(vcpu, new_cr0); in kvm_vcpu_reset()
12538 kvm_x86_call(set_cr4)(vcpu, 0); in kvm_vcpu_reset()
12539 kvm_x86_call(set_efer)(vcpu, 0); in kvm_vcpu_reset()
12540 kvm_x86_call(update_exception_bitmap)(vcpu); in kvm_vcpu_reset()
12551 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_vcpu_reset()
12552 kvm_mmu_reset_context(vcpu); in kvm_vcpu_reset()
12565 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_vcpu_reset()
12569 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
12573 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
12576 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
12577 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
12594 struct kvm_vcpu *vcpu; in kvm_arch_enable_virtualization_cpu() local
12614 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_enable_virtualization_cpu()
12615 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_enable_virtualization_cpu()
12616 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_enable_virtualization_cpu()
12617 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_enable_virtualization_cpu()
12619 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_enable_virtualization_cpu()
12620 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_enable_virtualization_cpu()
12637 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, in kvm_arch_enable_virtualization_cpu()
12639 * adjustments, in case multiple suspend cycles happen before some VCPU in kvm_arch_enable_virtualization_cpu()
12667 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_enable_virtualization_cpu()
12668 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_enable_virtualization_cpu()
12669 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_enable_virtualization_cpu()
12670 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_enable_virtualization_cpu()
12693 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
12695 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
12698 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
12700 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
13027 struct kvm_vcpu *vcpu; in kvm_arch_memslots_updated() local
13037 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_memslots_updated()
13038 kvm_vcpu_kick(vcpu); in kvm_arch_memslots_updated()
13225 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
13227 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)); in kvm_arch_vcpu_in_kernel()
13229 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
13232 return kvm_x86_call(get_cpl)(vcpu) == 0; in kvm_arch_vcpu_in_kernel()
13235 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_get_ip() argument
13237 WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)); in kvm_arch_vcpu_get_ip()
13239 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_get_ip()
13242 return kvm_rip_read(vcpu); in kvm_arch_vcpu_get_ip()
13245 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
13247 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
13250 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
13252 return kvm_x86_call(interrupt_allowed)(vcpu, false); in kvm_arch_interrupt_allowed()
13255 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
13258 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
13261 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
13262 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
13263 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
13264 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
13268 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
13270 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
13274 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
13278 rflags = kvm_x86_call(get_rflags)(vcpu); in kvm_get_rflags()
13279 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
13285 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
13287 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
13288 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
13290 kvm_x86_call(set_rflags)(vcpu, rflags); in __kvm_set_rflags()
13293 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
13295 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
13296 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
13312 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
13316 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13319 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
13322 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
13328 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
13329 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13335 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
13337 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
13340 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
13344 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
13346 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
13350 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13353 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13355 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
13362 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
13367 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) in apf_put_user_notpresent() argument
13371 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
13375 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) in apf_put_user_ready() argument
13379 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
13383 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) in apf_pageready_slot_free() argument
13388 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
13395 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) in kvm_can_deliver_async_pf() argument
13398 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_can_deliver_async_pf()
13401 if (!vcpu->arch.apf.send_always && in kvm_can_deliver_async_pf()
13402 (vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu))) in kvm_can_deliver_async_pf()
13405 if (is_guest_mode(vcpu)) { in kvm_can_deliver_async_pf()
13410 return vcpu->arch.apf.delivery_as_pf_vmexit; in kvm_can_deliver_async_pf()
13417 return is_paging(vcpu); in kvm_can_deliver_async_pf()
13421 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) in kvm_can_do_async_pf() argument
13423 if (unlikely(!lapic_in_kernel(vcpu) || in kvm_can_do_async_pf()
13424 kvm_event_needs_reinjection(vcpu) || in kvm_can_do_async_pf()
13425 kvm_is_exception_pending(vcpu))) in kvm_can_do_async_pf()
13428 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
13435 return kvm_arch_interrupt_allowed(vcpu); in kvm_can_do_async_pf()
13438 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
13444 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
13446 if (kvm_can_deliver_async_pf(vcpu) && in kvm_arch_async_page_not_present()
13447 !apf_put_user_notpresent(vcpu)) { in kvm_arch_async_page_not_present()
13454 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
13465 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
13470 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
13475 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
13481 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
13485 kvm_pv_async_pf_enabled(vcpu) && in kvm_arch_async_page_present()
13486 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
13487 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
13488 kvm_apic_set_irq(vcpu, &irq, NULL); in kvm_arch_async_page_present()
13491 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
13492 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); in kvm_arch_async_page_present()
13495 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) in kvm_arch_async_page_present_queued() argument
13497 kvm_make_request(KVM_REQ_APF_READY, vcpu); in kvm_arch_async_page_present_queued()
13498 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
13499 kvm_vcpu_kick(vcpu); in kvm_arch_async_page_present_queued()
13502 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_dequeue_async_page_present() argument
13504 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_arch_can_dequeue_async_page_present()
13507 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); in kvm_arch_can_dequeue_async_page_present()
13638 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) in kvm_arch_no_poll() argument
13640 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13684 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) in kvm_fixup_and_inject_pf_error() argument
13686 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fixup_and_inject_pf_error()
13692 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { in kvm_fixup_and_inject_pf_error()
13694 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
13705 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
13714 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, in kvm_handle_memory_failure() argument
13718 if (KVM_BUG_ON(!e, vcpu->kvm)) in kvm_handle_memory_failure()
13721 kvm_inject_emulated_page_fault(vcpu, e); in kvm_handle_memory_failure()
13732 kvm_prepare_emulation_failure_exit(vcpu); in kvm_handle_memory_failure()
13738 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) in kvm_handle_invpcid() argument
13748 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); in kvm_handle_invpcid()
13750 return kvm_handle_memory_failure(vcpu, r, &e); in kvm_handle_invpcid()
13753 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13757 pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE); in kvm_handle_invpcid()
13766 is_noncanonical_invlpg_address(operand.gla, vcpu)) { in kvm_handle_invpcid()
13767 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13770 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); in kvm_handle_invpcid()
13771 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13775 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13779 kvm_invalidate_pcid(vcpu, operand.pcid); in kvm_handle_invpcid()
13780 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13792 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_handle_invpcid()
13793 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13796 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13802 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_mmio() argument
13804 struct kvm_run *run = vcpu->run; in complete_sev_es_emulated_mmio()
13808 BUG_ON(!vcpu->mmio_needed); in complete_sev_es_emulated_mmio()
13811 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_sev_es_emulated_mmio()
13813 if (!vcpu->mmio_is_write) in complete_sev_es_emulated_mmio()
13819 vcpu->mmio_cur_fragment++; in complete_sev_es_emulated_mmio()
13827 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_sev_es_emulated_mmio()
13828 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
13838 run->mmio.is_write = vcpu->mmio_is_write; in complete_sev_es_emulated_mmio()
13843 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
13848 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_write() argument
13857 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_write()
13866 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_write()
13867 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_write()
13872 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_write()
13873 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
13875 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_write()
13876 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_write()
13877 vcpu->run->mmio.is_write = 1; in kvm_sev_es_mmio_write()
13878 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in kvm_sev_es_mmio_write()
13879 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_write()
13881 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
13887 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_read() argument
13896 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_read()
13905 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_read()
13906 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_read()
13911 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_read()
13912 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
13914 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_read()
13915 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_read()
13916 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
13917 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_read()
13919 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
13925 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) in advance_sev_es_emulated_pio() argument
13927 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_pio()
13928 vcpu->arch.sev_pio_data += count * size; in advance_sev_es_emulated_pio()
13931 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13934 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_outs() argument
13936 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
13937 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
13939 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
13940 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
13941 return kvm_sev_es_outs(vcpu, size, port); in complete_sev_es_emulated_outs()
13945 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_outs() argument
13950 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
13951 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
13954 advance_sev_es_emulated_pio(vcpu, count, size); in kvm_sev_es_outs()
13959 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
13963 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
13967 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13970 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_ins() argument
13972 unsigned count = vcpu->arch.pio.count; in complete_sev_es_emulated_ins()
13973 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
13974 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
13976 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in complete_sev_es_emulated_ins()
13977 advance_sev_es_emulated_pio(vcpu, count, size); in complete_sev_es_emulated_ins()
13978 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
13979 return kvm_sev_es_ins(vcpu, size, port); in complete_sev_es_emulated_ins()
13983 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_ins() argument
13988 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
13989 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) in kvm_sev_es_ins()
13993 advance_sev_es_emulated_pio(vcpu, count, size); in kvm_sev_es_ins()
13994 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
13998 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
14002 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_string_io() argument
14006 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
14007 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()
14008 return in ? kvm_sev_es_ins(vcpu, size, port) in kvm_sev_es_string_io()
14009 : kvm_sev_es_outs(vcpu, size, port); in kvm_sev_es_string_io()