Lines Matching full:vcpu
112 void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) in kvm_smm_changed() argument
114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
117 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
122 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
129 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
132 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
136 void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
138 vcpu->arch.smi_pending = true; in process_smi()
139 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
156 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, in enter_smm_save_seg_32() argument
162 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_32()
170 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, in enter_smm_save_seg_64() argument
176 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_64()
184 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, in enter_smm_save_state_32() argument
190 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_32()
191 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_32()
192 smram->eflags = kvm_get_rflags(vcpu); in enter_smm_save_state_32()
193 smram->eip = kvm_rip_read(vcpu); in enter_smm_save_state_32()
196 smram->gprs[i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_32()
198 smram->dr6 = (u32)vcpu->arch.dr6; in enter_smm_save_state_32()
199 smram->dr7 = (u32)vcpu->arch.dr7; in enter_smm_save_state_32()
201 enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); in enter_smm_save_state_32()
202 enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); in enter_smm_save_state_32()
204 kvm_x86_call(get_gdt)(vcpu, &dt); in enter_smm_save_state_32()
208 kvm_x86_call(get_idt)(vcpu, &dt); in enter_smm_save_state_32()
212 enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES); in enter_smm_save_state_32()
213 enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS); in enter_smm_save_state_32()
214 enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS); in enter_smm_save_state_32()
216 enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS); in enter_smm_save_state_32()
217 enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS); in enter_smm_save_state_32()
218 enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS); in enter_smm_save_state_32()
220 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_32()
222 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_32()
224 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_32()
228 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, in enter_smm_save_state_64() argument
235 smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_64()
237 smram->rip = kvm_rip_read(vcpu); in enter_smm_save_state_64()
238 smram->rflags = kvm_get_rflags(vcpu); in enter_smm_save_state_64()
240 smram->dr6 = vcpu->arch.dr6; in enter_smm_save_state_64()
241 smram->dr7 = vcpu->arch.dr7; in enter_smm_save_state_64()
243 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_64()
244 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_64()
245 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_64()
247 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_64()
250 smram->efer = vcpu->arch.efer; in enter_smm_save_state_64()
252 enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR); in enter_smm_save_state_64()
254 kvm_x86_call(get_idt)(vcpu, &dt); in enter_smm_save_state_64()
258 enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR); in enter_smm_save_state_64()
260 kvm_x86_call(get_gdt)(vcpu, &dt); in enter_smm_save_state_64()
264 enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES); in enter_smm_save_state_64()
265 enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS); in enter_smm_save_state_64()
266 enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS); in enter_smm_save_state_64()
267 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS); in enter_smm_save_state_64()
268 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS); in enter_smm_save_state_64()
269 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS); in enter_smm_save_state_64()
271 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_64()
275 void enter_smm(struct kvm_vcpu *vcpu) in enter_smm() argument
287 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) in enter_smm()
288 enter_smm_save_state_64(vcpu, &smram.smram64); in enter_smm()
291 enter_smm_save_state_32(vcpu, &smram.smram32); in enter_smm()
294 * Give enter_smm() a chance to make ISA-specific changes to the vCPU in enter_smm()
301 if (kvm_x86_call(enter_smm)(vcpu, &smram)) in enter_smm()
304 kvm_smm_changed(vcpu, true); in enter_smm()
306 if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram))) in enter_smm()
309 if (kvm_x86_call(get_nmi_mask)(vcpu)) in enter_smm()
310 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
312 kvm_x86_call(set_nmi_mask)(vcpu, true); in enter_smm()
314 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in enter_smm()
315 kvm_rip_write(vcpu, 0x8000); in enter_smm()
317 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in enter_smm()
319 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
320 kvm_x86_call(set_cr0)(vcpu, cr0); in enter_smm()
322 kvm_x86_call(set_cr4)(vcpu, 0); in enter_smm()
326 kvm_x86_call(set_idt)(vcpu, &dt); in enter_smm()
328 if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1))) in enter_smm()
331 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
332 cs.base = vcpu->arch.smbase; in enter_smm()
349 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in enter_smm()
350 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in enter_smm()
351 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in enter_smm()
352 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in enter_smm()
353 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in enter_smm()
354 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in enter_smm()
357 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) in enter_smm()
358 if (kvm_x86_call(set_efer)(vcpu, 0)) in enter_smm()
362 vcpu->arch.cpuid_dynamic_bits_dirty = true; in enter_smm()
363 kvm_mmu_reset_context(vcpu); in enter_smm()
366 kvm_vm_dead(vcpu->kvm); in enter_smm()
384 static int rsm_load_seg_32(struct kvm_vcpu *vcpu, in rsm_load_seg_32() argument
394 kvm_set_segment(vcpu, &desc, n); in rsm_load_seg_32()
400 static int rsm_load_seg_64(struct kvm_vcpu *vcpu, in rsm_load_seg_64() argument
410 kvm_set_segment(vcpu, &desc, n); in rsm_load_seg_64()
415 static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, in rsm_enter_protected_mode() argument
428 bad = kvm_set_cr3(vcpu, cr3); in rsm_enter_protected_mode()
437 bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); in rsm_enter_protected_mode()
441 bad = kvm_set_cr0(vcpu, cr0); in rsm_enter_protected_mode()
446 bad = kvm_set_cr4(vcpu, cr4); in rsm_enter_protected_mode()
450 bad = kvm_set_cr3(vcpu, cr3 | pcid); in rsm_enter_protected_mode()
463 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_32() local
473 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_32()
475 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_32()
478 rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR); in rsm_load_state_32()
479 rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR); in rsm_load_state_32()
483 kvm_x86_call(set_gdt)(vcpu, &dt); in rsm_load_state_32()
487 kvm_x86_call(set_idt)(vcpu, &dt); in rsm_load_state_32()
489 rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES); in rsm_load_state_32()
490 rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS); in rsm_load_state_32()
491 rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS); in rsm_load_state_32()
493 rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS); in rsm_load_state_32()
494 rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS); in rsm_load_state_32()
495 rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS); in rsm_load_state_32()
497 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_32()
499 r = rsm_enter_protected_mode(vcpu, smstate->cr0, in rsm_load_state_32()
505 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in rsm_load_state_32()
515 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_64() local
525 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_64()
527 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_64()
530 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_64()
532 if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA)) in rsm_load_state_64()
535 rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR); in rsm_load_state_64()
539 kvm_x86_call(set_idt)(vcpu, &dt); in rsm_load_state_64()
541 rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR); in rsm_load_state_64()
545 kvm_x86_call(set_gdt)(vcpu, &dt); in rsm_load_state_64()
547 r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4); in rsm_load_state_64()
551 rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES); in rsm_load_state_64()
552 rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS); in rsm_load_state_64()
553 rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS); in rsm_load_state_64()
554 rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS); in rsm_load_state_64()
555 rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS); in rsm_load_state_64()
556 rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS); in rsm_load_state_64()
558 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in rsm_load_state_64()
567 struct kvm_vcpu *vcpu = ctxt->vcpu; in emulator_leave_smm() local
573 smbase = vcpu->arch.smbase; in emulator_leave_smm()
575 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram)); in emulator_leave_smm()
579 if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0) in emulator_leave_smm()
580 kvm_x86_call(set_nmi_mask)(vcpu, false); in emulator_leave_smm()
582 kvm_smm_changed(vcpu, false); in emulator_leave_smm()
586 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU in emulator_leave_smm()
590 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) { in emulator_leave_smm()
595 cr4 = kvm_read_cr4(vcpu); in emulator_leave_smm()
597 kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); in emulator_leave_smm()
603 kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS); in emulator_leave_smm()
608 cr0 = kvm_read_cr0(vcpu); in emulator_leave_smm()
610 kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); in emulator_leave_smm()
613 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) { in emulator_leave_smm()
617 cr4 = kvm_read_cr4(vcpu); in emulator_leave_smm()
619 kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE); in emulator_leave_smm()
623 kvm_set_msr(vcpu, MSR_EFER, efer); in emulator_leave_smm()
634 if (kvm_x86_call(leave_smm)(vcpu, &smram)) in emulator_leave_smm()
638 if (guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) in emulator_leave_smm()
647 * flawed handling of RSM to L2 (see above), the vCPU may already be in emulator_leave_smm()
648 * in_guest_mode(). Force the vCPU out of guest mode before delivering in emulator_leave_smm()
652 if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu)) in emulator_leave_smm()
653 kvm_leave_nested(vcpu); in emulator_leave_smm()