16bada5e8SBlue Swirl /*
232cad1ffSPhilippe Mathieu-Daudé * x86 SVM helpers (system only)
36bada5e8SBlue Swirl *
46bada5e8SBlue Swirl * Copyright (c) 2003 Fabrice Bellard
56bada5e8SBlue Swirl *
66bada5e8SBlue Swirl * This library is free software; you can redistribute it and/or
76bada5e8SBlue Swirl * modify it under the terms of the GNU Lesser General Public
86bada5e8SBlue Swirl * License as published by the Free Software Foundation; either
9d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version.
106bada5e8SBlue Swirl *
116bada5e8SBlue Swirl * This library is distributed in the hope that it will be useful,
126bada5e8SBlue Swirl * but WITHOUT ANY WARRANTY; without even the implied warranty of
136bada5e8SBlue Swirl * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
146bada5e8SBlue Swirl * Lesser General Public License for more details.
156bada5e8SBlue Swirl *
166bada5e8SBlue Swirl * You should have received a copy of the GNU Lesser General Public
176bada5e8SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>.
186bada5e8SBlue Swirl */
196bada5e8SBlue Swirl
20b6a0aa05SPeter Maydell #include "qemu/osdep.h"
21cd617484SPhilippe Mathieu-Daudé #include "qemu/log.h"
226bada5e8SBlue Swirl #include "cpu.h"
232ef6175aSRichard Henderson #include "exec/helper-proto.h"
246ff5da16SPhilippe Mathieu-Daudé #include "exec/cputlb.h"
25*42fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h"
26b3903094SClaudio Fontana #include "tcg/helper-tcg.h"
2792fc4b58SBlue Swirl
286bada5e8SBlue Swirl /* Secure Virtual Machine helpers */
296bada5e8SBlue Swirl
svm_save_seg(CPUX86State * env,int mmu_idx,hwaddr addr,const SegmentCache * sc)30726ea335SRichard Henderson static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
316bada5e8SBlue Swirl const SegmentCache *sc)
326bada5e8SBlue Swirl {
33726ea335SRichard Henderson cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
34726ea335SRichard Henderson sc->selector, mmu_idx, 0);
35726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
36726ea335SRichard Henderson sc->base, mmu_idx, 0);
37726ea335SRichard Henderson cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
38726ea335SRichard Henderson sc->limit, mmu_idx, 0);
39726ea335SRichard Henderson cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
40726ea335SRichard Henderson ((sc->flags >> 8) & 0xff)
41726ea335SRichard Henderson | ((sc->flags >> 12) & 0x0f00),
42726ea335SRichard Henderson mmu_idx, 0);
436bada5e8SBlue Swirl }
446bada5e8SBlue Swirl
4597afb47eSLara Lazier /*
4697afb47eSLara Lazier * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
4797afb47eSLara Lazier * addresses in the segment registers that have been loaded.
4897afb47eSLara Lazier */
svm_canonicalization(CPUX86State * env,target_ulong * seg_base)4997afb47eSLara Lazier static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
5097afb47eSLara Lazier {
5197afb47eSLara Lazier uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
5297afb47eSLara Lazier *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
5397afb47eSLara Lazier }
5497afb47eSLara Lazier
svm_load_seg(CPUX86State * env,int mmu_idx,hwaddr addr,SegmentCache * sc)55726ea335SRichard Henderson static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
56052e80d5SBlue Swirl SegmentCache *sc)
576bada5e8SBlue Swirl {
586bada5e8SBlue Swirl unsigned int flags;
596bada5e8SBlue Swirl
60726ea335SRichard Henderson sc->selector =
61726ea335SRichard Henderson cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
62726ea335SRichard Henderson mmu_idx, 0);
63726ea335SRichard Henderson sc->base =
64726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
65726ea335SRichard Henderson mmu_idx, 0);
66726ea335SRichard Henderson sc->limit =
67726ea335SRichard Henderson cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
68726ea335SRichard Henderson mmu_idx, 0);
69726ea335SRichard Henderson flags =
70726ea335SRichard Henderson cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
71726ea335SRichard Henderson mmu_idx, 0);
726bada5e8SBlue Swirl sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
73726ea335SRichard Henderson
7497afb47eSLara Lazier svm_canonicalization(env, &sc->base);
756bada5e8SBlue Swirl }
766bada5e8SBlue Swirl
svm_load_seg_cache(CPUX86State * env,int mmu_idx,hwaddr addr,int seg_reg)77726ea335SRichard Henderson static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
78726ea335SRichard Henderson hwaddr addr, int seg_reg)
796bada5e8SBlue Swirl {
80726ea335SRichard Henderson SegmentCache sc;
816bada5e8SBlue Swirl
82726ea335SRichard Henderson svm_load_seg(env, mmu_idx, addr, &sc);
83726ea335SRichard Henderson cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
84726ea335SRichard Henderson sc.base, sc.limit, sc.flags);
856bada5e8SBlue Swirl }
866bada5e8SBlue Swirl
is_efer_invalid_state(CPUX86State * env)87d499f196SLara Lazier static inline bool is_efer_invalid_state (CPUX86State *env)
88d499f196SLara Lazier {
89d499f196SLara Lazier if (!(env->efer & MSR_EFER_SVME)) {
90d499f196SLara Lazier return true;
91d499f196SLara Lazier }
92d499f196SLara Lazier
93d499f196SLara Lazier if (env->efer & MSR_EFER_RESERVED) {
94d499f196SLara Lazier return true;
95d499f196SLara Lazier }
96d499f196SLara Lazier
97d499f196SLara Lazier if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
98d499f196SLara Lazier !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
99d499f196SLara Lazier return true;
100d499f196SLara Lazier }
101d499f196SLara Lazier
102d499f196SLara Lazier if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
103d499f196SLara Lazier && !(env->cr[4] & CR4_PAE_MASK)) {
104d499f196SLara Lazier return true;
105d499f196SLara Lazier }
106d499f196SLara Lazier
107d499f196SLara Lazier if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
108d499f196SLara Lazier && !(env->cr[0] & CR0_PE_MASK)) {
109d499f196SLara Lazier return true;
110d499f196SLara Lazier }
111d499f196SLara Lazier
112d499f196SLara Lazier if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
113d499f196SLara Lazier && (env->cr[4] & CR4_PAE_MASK)
114d499f196SLara Lazier && (env->segs[R_CS].flags & DESC_L_MASK)
115d499f196SLara Lazier && (env->segs[R_CS].flags & DESC_B_MASK)) {
116d499f196SLara Lazier return true;
117d499f196SLara Lazier }
118d499f196SLara Lazier
119d499f196SLara Lazier return false;
120d499f196SLara Lazier }
121d499f196SLara Lazier
virtual_gif_enabled(CPUX86State * env)122e3126a5cSLara Lazier static inline bool virtual_gif_enabled(CPUX86State *env)
123900eeca5SLara Lazier {
124900eeca5SLara Lazier if (likely(env->hflags & HF_GUEST_MASK)) {
125900eeca5SLara Lazier return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
126e3126a5cSLara Lazier && (env->int_ctl & V_GIF_ENABLED_MASK);
127900eeca5SLara Lazier }
128900eeca5SLara Lazier return false;
129900eeca5SLara Lazier }
130900eeca5SLara Lazier
virtual_vm_load_save_enabled(CPUX86State * env,uint32_t exit_code,uintptr_t retaddr)13152fb8ad3SLara Lazier static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
13252fb8ad3SLara Lazier {
13352fb8ad3SLara Lazier uint64_t lbr_ctl;
13452fb8ad3SLara Lazier
13552fb8ad3SLara Lazier if (likely(env->hflags & HF_GUEST_MASK)) {
13652fb8ad3SLara Lazier if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
13752fb8ad3SLara Lazier cpu_vmexit(env, exit_code, 0, retaddr);
13852fb8ad3SLara Lazier }
13952fb8ad3SLara Lazier
14052fb8ad3SLara Lazier lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
14152fb8ad3SLara Lazier control.lbr_ctl));
14252fb8ad3SLara Lazier return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
14352fb8ad3SLara Lazier && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
14452fb8ad3SLara Lazier
14552fb8ad3SLara Lazier }
14652fb8ad3SLara Lazier
14752fb8ad3SLara Lazier return false;
14852fb8ad3SLara Lazier }
14952fb8ad3SLara Lazier
virtual_gif_set(CPUX86State * env)150b67e2796SLara Lazier static inline bool virtual_gif_set(CPUX86State *env)
151b67e2796SLara Lazier {
152b67e2796SLara Lazier return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
153b67e2796SLara Lazier }
154b67e2796SLara Lazier
helper_vmrun(CPUX86State * env,int aflag,int next_eip_addend)155052e80d5SBlue Swirl void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
1566bada5e8SBlue Swirl {
1576aa9e42fSRichard Henderson CPUState *cs = env_cpu(env);
158481077b2SLara Lazier X86CPU *cpu = env_archcpu(env);
1596bada5e8SBlue Swirl target_ulong addr;
160fe441054SJan Kiszka uint64_t nested_ctl;
1616bada5e8SBlue Swirl uint32_t event_inj;
1627eb54ca9SLara Lazier uint32_t asid;
163498df2a7SLara Lazier uint64_t new_cr0;
1643407259bSLara Lazier uint64_t new_cr3;
165213ff024SLara Lazier uint64_t new_cr4;
1668aa76496SPaolo Bonzini uint64_t new_dr6;
1678aa76496SPaolo Bonzini uint64_t new_dr7;
1686bada5e8SBlue Swirl
1696bada5e8SBlue Swirl if (aflag == 2) {
1704b34e3adSliguang addr = env->regs[R_EAX];
1716bada5e8SBlue Swirl } else {
1724b34e3adSliguang addr = (uint32_t)env->regs[R_EAX];
1736bada5e8SBlue Swirl }
1746bada5e8SBlue Swirl
175d09c7901SPaolo Bonzini /* Exceptions are checked before the intercept. */
176d09c7901SPaolo Bonzini if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
177d09c7901SPaolo Bonzini raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
178d09c7901SPaolo Bonzini }
179d09c7901SPaolo Bonzini
180d09c7901SPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
181d09c7901SPaolo Bonzini
1826bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
1836bada5e8SBlue Swirl
1846bada5e8SBlue Swirl env->vm_vmcb = addr;
1856bada5e8SBlue Swirl
1866bada5e8SBlue Swirl /* save the current CPU state in the hsave page */
187b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
1886bada5e8SBlue Swirl env->gdt.base);
189b216aa6cSPaolo Bonzini x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
1906bada5e8SBlue Swirl env->gdt.limit);
1916bada5e8SBlue Swirl
192b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
1936bada5e8SBlue Swirl env->idt.base);
194b216aa6cSPaolo Bonzini x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
1956bada5e8SBlue Swirl env->idt.limit);
1966bada5e8SBlue Swirl
197b216aa6cSPaolo Bonzini x86_stq_phys(cs,
198f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
199b216aa6cSPaolo Bonzini x86_stq_phys(cs,
200f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
201b216aa6cSPaolo Bonzini x86_stq_phys(cs,
202f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
203b216aa6cSPaolo Bonzini x86_stq_phys(cs,
204f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
205b216aa6cSPaolo Bonzini x86_stq_phys(cs,
206f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
207b216aa6cSPaolo Bonzini x86_stq_phys(cs,
208f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
2096bada5e8SBlue Swirl
210b216aa6cSPaolo Bonzini x86_stq_phys(cs,
211f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
212b216aa6cSPaolo Bonzini x86_stq_phys(cs,
213f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.rflags),
2146bada5e8SBlue Swirl cpu_compute_eflags(env));
2156bada5e8SBlue Swirl
216726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
217726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.es),
2186bada5e8SBlue Swirl &env->segs[R_ES]);
219726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
220726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.cs),
2216bada5e8SBlue Swirl &env->segs[R_CS]);
222726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
223726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.ss),
2246bada5e8SBlue Swirl &env->segs[R_SS]);
225726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
226726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.ds),
2276bada5e8SBlue Swirl &env->segs[R_DS]);
2286bada5e8SBlue Swirl
229b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
230a78d0eabSliguang env->eip + next_eip_addend);
231b216aa6cSPaolo Bonzini x86_stq_phys(cs,
232f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
233b216aa6cSPaolo Bonzini x86_stq_phys(cs,
234f606604fSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
2356bada5e8SBlue Swirl
2366bada5e8SBlue Swirl /* load the interception bitmaps so we do not need to access the
2376bada5e8SBlue Swirl vmcb in svm mode */
238b216aa6cSPaolo Bonzini env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
2396bada5e8SBlue Swirl control.intercept));
240b216aa6cSPaolo Bonzini env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
2416bada5e8SBlue Swirl offsetof(struct vmcb,
2426bada5e8SBlue Swirl control.intercept_cr_read));
243b216aa6cSPaolo Bonzini env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
2446bada5e8SBlue Swirl offsetof(struct vmcb,
2456bada5e8SBlue Swirl control.intercept_cr_write));
246b216aa6cSPaolo Bonzini env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
2476bada5e8SBlue Swirl offsetof(struct vmcb,
2486bada5e8SBlue Swirl control.intercept_dr_read));
249b216aa6cSPaolo Bonzini env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
2506bada5e8SBlue Swirl offsetof(struct vmcb,
2516bada5e8SBlue Swirl control.intercept_dr_write));
252b216aa6cSPaolo Bonzini env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
2536bada5e8SBlue Swirl offsetof(struct vmcb,
2546bada5e8SBlue Swirl control.intercept_exceptions
2556bada5e8SBlue Swirl ));
2566bada5e8SBlue Swirl
2571a150d33SPaolo Bonzini env->hflags &= ~HF_INHIBIT_IRQ_MASK;
2581a150d33SPaolo Bonzini if (x86_ldl_phys(cs, env->vm_vmcb +
2591a150d33SPaolo Bonzini offsetof(struct vmcb, control.int_state)) &
2601a150d33SPaolo Bonzini SVM_INTERRUPT_SHADOW_MASK) {
2611a150d33SPaolo Bonzini env->hflags |= HF_INHIBIT_IRQ_MASK;
2621a150d33SPaolo Bonzini }
2631a150d33SPaolo Bonzini
264fe441054SJan Kiszka nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
265fe441054SJan Kiszka control.nested_ctl));
2667eb54ca9SLara Lazier asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
2677eb54ca9SLara Lazier control.asid));
268a2d57703SAlexander Boettcher
269481077b2SLara Lazier uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
270481077b2SLara Lazier offsetof(struct vmcb,
271481077b2SLara Lazier control.msrpm_base_pa));
272481077b2SLara Lazier uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
273481077b2SLara Lazier offsetof(struct vmcb, control.iopm_base_pa));
274481077b2SLara Lazier
275481077b2SLara Lazier if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
276481077b2SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
277481077b2SLara Lazier }
278481077b2SLara Lazier
279481077b2SLara Lazier if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
280481077b2SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
281481077b2SLara Lazier }
282481077b2SLara Lazier
283a2d57703SAlexander Boettcher env->nested_pg_mode = 0;
284a2d57703SAlexander Boettcher
2857eb54ca9SLara Lazier if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
2867eb54ca9SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
2877eb54ca9SLara Lazier }
2887eb54ca9SLara Lazier if (asid == 0) {
2897eb54ca9SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
2907eb54ca9SLara Lazier }
2917eb54ca9SLara Lazier
292fe441054SJan Kiszka if (nested_ctl & SVM_NPT_ENABLED) {
293fe441054SJan Kiszka env->nested_cr3 = x86_ldq_phys(cs,
294fe441054SJan Kiszka env->vm_vmcb + offsetof(struct vmcb,
295fe441054SJan Kiszka control.nested_cr3));
296fe441054SJan Kiszka env->hflags2 |= HF2_NPT_MASK;
297fe441054SJan Kiszka
29831dd35ebSPaolo Bonzini env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
29998281984SRichard Henderson
30098281984SRichard Henderson tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
301fe441054SJan Kiszka }
302fe441054SJan Kiszka
3036bada5e8SBlue Swirl /* enable intercepts */
304f8dc4c64SPaolo Bonzini env->hflags |= HF_GUEST_MASK;
3056bada5e8SBlue Swirl
306b216aa6cSPaolo Bonzini env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
3076bada5e8SBlue Swirl offsetof(struct vmcb, control.tsc_offset));
3086bada5e8SBlue Swirl
309498df2a7SLara Lazier new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
310498df2a7SLara Lazier if (new_cr0 & SVM_CR0_RESERVED_MASK) {
311498df2a7SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
312498df2a7SLara Lazier }
313498df2a7SLara Lazier if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
314498df2a7SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
315498df2a7SLara Lazier }
3163407259bSLara Lazier new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
3173407259bSLara Lazier if ((env->efer & MSR_EFER_LMA) &&
31824d84c7eSLara Lazier (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
3193407259bSLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
3203407259bSLara Lazier }
321213ff024SLara Lazier new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
322213ff024SLara Lazier if (new_cr4 & cr4_reserved_bits(env)) {
323213ff024SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
324213ff024SLara Lazier }
3256bada5e8SBlue Swirl /* clear exit_info_2 so we behave like the real hardware */
326b216aa6cSPaolo Bonzini x86_stq_phys(cs,
327f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
3286bada5e8SBlue Swirl
329498df2a7SLara Lazier cpu_x86_update_cr0(env, new_cr0);
330213ff024SLara Lazier cpu_x86_update_cr4(env, new_cr4);
3313407259bSLara Lazier cpu_x86_update_cr3(env, new_cr3);
332b216aa6cSPaolo Bonzini env->cr[2] = x86_ldq_phys(cs,
3332c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.cr2));
334e3126a5cSLara Lazier env->int_ctl = x86_ldl_phys(cs,
335fdfba1a2SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
3366bada5e8SBlue Swirl env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
337e3126a5cSLara Lazier if (env->int_ctl & V_INTR_MASKING_MASK) {
3386bada5e8SBlue Swirl env->hflags2 |= HF2_VINTR_MASK;
3396bada5e8SBlue Swirl if (env->eflags & IF_MASK) {
3406bada5e8SBlue Swirl env->hflags2 |= HF2_HIF_MASK;
3416bada5e8SBlue Swirl }
3426bada5e8SBlue Swirl }
3436bada5e8SBlue Swirl
3446bada5e8SBlue Swirl cpu_load_efer(env,
345b216aa6cSPaolo Bonzini x86_ldq_phys(cs,
3462c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.efer)));
3476bada5e8SBlue Swirl env->eflags = 0;
348b216aa6cSPaolo Bonzini cpu_load_eflags(env, x86_ldq_phys(cs,
3492c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb,
3506bada5e8SBlue Swirl save.rflags)),
3516bada5e8SBlue Swirl ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
3526bada5e8SBlue Swirl
353726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
354726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
355726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
356726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
357726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
358726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
359726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
360726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
361726ea335SRichard Henderson svm_load_seg(env, MMU_PHYS_IDX,
362726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
363726ea335SRichard Henderson svm_load_seg(env, MMU_PHYS_IDX,
364726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
3656bada5e8SBlue Swirl
366b216aa6cSPaolo Bonzini env->eip = x86_ldq_phys(cs,
3672c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.rip));
3680bc60a8aSliguang
369b216aa6cSPaolo Bonzini env->regs[R_ESP] = x86_ldq_phys(cs,
3702c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.rsp));
371b216aa6cSPaolo Bonzini env->regs[R_EAX] = x86_ldq_phys(cs,
3722c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.rax));
3738aa76496SPaolo Bonzini
3748aa76496SPaolo Bonzini new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7));
3758aa76496SPaolo Bonzini new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6));
3766bada5e8SBlue Swirl
377acf23ffbSLara Lazier #ifdef TARGET_X86_64
3788aa76496SPaolo Bonzini if (new_dr7 & DR_RESERVED_MASK) {
379acf23ffbSLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
380acf23ffbSLara Lazier }
3818aa76496SPaolo Bonzini if (new_dr6 & DR_RESERVED_MASK) {
382acf23ffbSLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
383acf23ffbSLara Lazier }
384acf23ffbSLara Lazier #endif
3856bada5e8SBlue Swirl
3868aa76496SPaolo Bonzini cpu_x86_update_dr7(env, new_dr7);
3878aa76496SPaolo Bonzini env->dr[6] = new_dr6;
3888aa76496SPaolo Bonzini
389d499f196SLara Lazier if (is_efer_invalid_state(env)) {
390d499f196SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
391d499f196SLara Lazier }
392d499f196SLara Lazier
393b216aa6cSPaolo Bonzini switch (x86_ldub_phys(cs,
3942c17449bSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
3956bada5e8SBlue Swirl case TLB_CONTROL_DO_NOTHING:
3966bada5e8SBlue Swirl break;
3976bada5e8SBlue Swirl case TLB_CONTROL_FLUSH_ALL_ASID:
3986bada5e8SBlue Swirl /* FIXME: this is not 100% correct but should work for now */
399d10eb08fSAlex Bennée tlb_flush(cs);
4006bada5e8SBlue Swirl break;
4016bada5e8SBlue Swirl }
4026bada5e8SBlue Swirl
4036bada5e8SBlue Swirl env->hflags2 |= HF2_GIF_MASK;
4046bada5e8SBlue Swirl
405e3126a5cSLara Lazier if (ctl_has_irq(env)) {
406259186a7SAndreas Färber cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
4076bada5e8SBlue Swirl }
4086bada5e8SBlue Swirl
409b67e2796SLara Lazier if (virtual_gif_set(env)) {
410b67e2796SLara Lazier env->hflags2 |= HF2_VGIF_MASK;
411b67e2796SLara Lazier }
412b67e2796SLara Lazier
4136bada5e8SBlue Swirl /* maybe we need to inject an event */
414b216aa6cSPaolo Bonzini event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
4156bada5e8SBlue Swirl control.event_inj));
4166bada5e8SBlue Swirl if (event_inj & SVM_EVTINJ_VALID) {
4176bada5e8SBlue Swirl uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4186bada5e8SBlue Swirl uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
419b216aa6cSPaolo Bonzini uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
4206bada5e8SBlue Swirl offsetof(struct vmcb,
4216bada5e8SBlue Swirl control.event_inj_err));
4226bada5e8SBlue Swirl
4236bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4246bada5e8SBlue Swirl /* FIXME: need to implement valid_err */
4256bada5e8SBlue Swirl switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4266bada5e8SBlue Swirl case SVM_EVTINJ_TYPE_INTR:
42727103424SAndreas Färber cs->exception_index = vector;
4286bada5e8SBlue Swirl env->error_code = event_inj_err;
4296bada5e8SBlue Swirl env->exception_is_int = 0;
4306bada5e8SBlue Swirl env->exception_next_eip = -1;
4316bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4326bada5e8SBlue Swirl /* XXX: is it always correct? */
4336bada5e8SBlue Swirl do_interrupt_x86_hardirq(env, vector, 1);
4346bada5e8SBlue Swirl break;
4356bada5e8SBlue Swirl case SVM_EVTINJ_TYPE_NMI:
43627103424SAndreas Färber cs->exception_index = EXCP02_NMI;
4376bada5e8SBlue Swirl env->error_code = event_inj_err;
4386bada5e8SBlue Swirl env->exception_is_int = 0;
439a78d0eabSliguang env->exception_next_eip = env->eip;
4406bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4415638d180SAndreas Färber cpu_loop_exit(cs);
4426bada5e8SBlue Swirl break;
4436bada5e8SBlue Swirl case SVM_EVTINJ_TYPE_EXEPT:
444eceb4f01SLara Lazier if (vector == EXCP02_NMI || vector >= 31) {
445eceb4f01SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
446eceb4f01SLara Lazier }
44727103424SAndreas Färber cs->exception_index = vector;
4486bada5e8SBlue Swirl env->error_code = event_inj_err;
4496bada5e8SBlue Swirl env->exception_is_int = 0;
4506bada5e8SBlue Swirl env->exception_next_eip = -1;
4516bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4525638d180SAndreas Färber cpu_loop_exit(cs);
4536bada5e8SBlue Swirl break;
4546bada5e8SBlue Swirl case SVM_EVTINJ_TYPE_SOFT:
45527103424SAndreas Färber cs->exception_index = vector;
4566bada5e8SBlue Swirl env->error_code = event_inj_err;
4576bada5e8SBlue Swirl env->exception_is_int = 1;
458a78d0eabSliguang env->exception_next_eip = env->eip;
4596bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
4605638d180SAndreas Färber cpu_loop_exit(cs);
4616bada5e8SBlue Swirl break;
462eceb4f01SLara Lazier default:
463eceb4f01SLara Lazier cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
464eceb4f01SLara Lazier break;
4656bada5e8SBlue Swirl }
46627103424SAndreas Färber qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
4676bada5e8SBlue Swirl env->error_code);
4686bada5e8SBlue Swirl }
4696bada5e8SBlue Swirl }
4706bada5e8SBlue Swirl
helper_vmmcall(CPUX86State * env)471052e80d5SBlue Swirl void helper_vmmcall(CPUX86State *env)
4726bada5e8SBlue Swirl {
47365c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
4746bada5e8SBlue Swirl raise_exception(env, EXCP06_ILLOP);
4756bada5e8SBlue Swirl }
4766bada5e8SBlue Swirl
helper_vmload(CPUX86State * env,int aflag)477052e80d5SBlue Swirl void helper_vmload(CPUX86State *env, int aflag)
4786bada5e8SBlue Swirl {
479726ea335SRichard Henderson int mmu_idx = MMU_PHYS_IDX;
4806bada5e8SBlue Swirl target_ulong addr;
4816bada5e8SBlue Swirl
4826bada5e8SBlue Swirl if (aflag == 2) {
4834b34e3adSliguang addr = env->regs[R_EAX];
4846bada5e8SBlue Swirl } else {
4854b34e3adSliguang addr = (uint32_t)env->regs[R_EAX];
4866bada5e8SBlue Swirl }
4876bada5e8SBlue Swirl
488d09c7901SPaolo Bonzini /* Exceptions are checked before the intercept. */
489d09c7901SPaolo Bonzini if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
490d09c7901SPaolo Bonzini raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
491d09c7901SPaolo Bonzini }
492d09c7901SPaolo Bonzini
493d09c7901SPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
494d09c7901SPaolo Bonzini
49552fb8ad3SLara Lazier if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
496726ea335SRichard Henderson mmu_idx = MMU_NESTED_IDX;
49752fb8ad3SLara Lazier }
49852fb8ad3SLara Lazier
499726ea335SRichard Henderson svm_load_seg_cache(env, mmu_idx,
500726ea335SRichard Henderson addr + offsetof(struct vmcb, save.fs), R_FS);
501726ea335SRichard Henderson svm_load_seg_cache(env, mmu_idx,
502726ea335SRichard Henderson addr + offsetof(struct vmcb, save.gs), R_GS);
503726ea335SRichard Henderson svm_load_seg(env, mmu_idx,
504726ea335SRichard Henderson addr + offsetof(struct vmcb, save.tr), &env->tr);
505726ea335SRichard Henderson svm_load_seg(env, mmu_idx,
506726ea335SRichard Henderson addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
5076bada5e8SBlue Swirl
5086bada5e8SBlue Swirl #ifdef TARGET_X86_64
509726ea335SRichard Henderson env->kernelgsbase =
510726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env,
511726ea335SRichard Henderson addr + offsetof(struct vmcb, save.kernel_gs_base),
512726ea335SRichard Henderson mmu_idx, 0);
513726ea335SRichard Henderson env->lstar =
514726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
515726ea335SRichard Henderson mmu_idx, 0);
516726ea335SRichard Henderson env->cstar =
517726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
518726ea335SRichard Henderson mmu_idx, 0);
519726ea335SRichard Henderson env->fmask =
520726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
521726ea335SRichard Henderson mmu_idx, 0);
52297afb47eSLara Lazier svm_canonicalization(env, &env->kernelgsbase);
5236bada5e8SBlue Swirl #endif
524726ea335SRichard Henderson env->star =
525726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
526726ea335SRichard Henderson mmu_idx, 0);
527726ea335SRichard Henderson env->sysenter_cs =
528726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
529726ea335SRichard Henderson mmu_idx, 0);
530726ea335SRichard Henderson env->sysenter_esp =
531726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
532726ea335SRichard Henderson mmu_idx, 0);
533726ea335SRichard Henderson env->sysenter_eip =
534726ea335SRichard Henderson cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
535726ea335SRichard Henderson mmu_idx, 0);
5366bada5e8SBlue Swirl }
5376bada5e8SBlue Swirl
helper_vmsave(CPUX86State * env,int aflag)538052e80d5SBlue Swirl void helper_vmsave(CPUX86State *env, int aflag)
5396bada5e8SBlue Swirl {
540726ea335SRichard Henderson int mmu_idx = MMU_PHYS_IDX;
5416bada5e8SBlue Swirl target_ulong addr;
5426bada5e8SBlue Swirl
5436bada5e8SBlue Swirl if (aflag == 2) {
5444b34e3adSliguang addr = env->regs[R_EAX];
5456bada5e8SBlue Swirl } else {
5464b34e3adSliguang addr = (uint32_t)env->regs[R_EAX];
5476bada5e8SBlue Swirl }
5486bada5e8SBlue Swirl
549d09c7901SPaolo Bonzini /* Exceptions are checked before the intercept. */
550d09c7901SPaolo Bonzini if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
551d09c7901SPaolo Bonzini raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
552d09c7901SPaolo Bonzini }
553d09c7901SPaolo Bonzini
554d09c7901SPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
555d09c7901SPaolo Bonzini
55652fb8ad3SLara Lazier if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
557726ea335SRichard Henderson mmu_idx = MMU_NESTED_IDX;
55852fb8ad3SLara Lazier }
55952fb8ad3SLara Lazier
560726ea335SRichard Henderson svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
5616bada5e8SBlue Swirl &env->segs[R_FS]);
562726ea335SRichard Henderson svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
5636bada5e8SBlue Swirl &env->segs[R_GS]);
564726ea335SRichard Henderson svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
5656bada5e8SBlue Swirl &env->tr);
566726ea335SRichard Henderson svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
5676bada5e8SBlue Swirl &env->ldt);
5686bada5e8SBlue Swirl
5696bada5e8SBlue Swirl #ifdef TARGET_X86_64
570726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
571726ea335SRichard Henderson env->kernelgsbase, mmu_idx, 0);
572726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
573726ea335SRichard Henderson env->lstar, mmu_idx, 0);
574726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
575726ea335SRichard Henderson env->cstar, mmu_idx, 0);
576726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
577726ea335SRichard Henderson env->fmask, mmu_idx, 0);
5786bada5e8SBlue Swirl #endif
579726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
580726ea335SRichard Henderson env->star, mmu_idx, 0);
581726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
582726ea335SRichard Henderson env->sysenter_cs, mmu_idx, 0);
583726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
584726ea335SRichard Henderson env->sysenter_esp, mmu_idx, 0);
585726ea335SRichard Henderson cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
586726ea335SRichard Henderson env->sysenter_eip, mmu_idx, 0);
5876bada5e8SBlue Swirl }
5886bada5e8SBlue Swirl
helper_stgi(CPUX86State * env)589052e80d5SBlue Swirl void helper_stgi(CPUX86State *env)
5906bada5e8SBlue Swirl {
59165c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
592900eeca5SLara Lazier
593e3126a5cSLara Lazier if (virtual_gif_enabled(env)) {
594e3126a5cSLara Lazier env->int_ctl |= V_GIF_MASK;
595b67e2796SLara Lazier env->hflags2 |= HF2_VGIF_MASK;
596900eeca5SLara Lazier } else {
5976bada5e8SBlue Swirl env->hflags2 |= HF2_GIF_MASK;
5986bada5e8SBlue Swirl }
599900eeca5SLara Lazier }
6006bada5e8SBlue Swirl
helper_clgi(CPUX86State * env)601052e80d5SBlue Swirl void helper_clgi(CPUX86State *env)
6026bada5e8SBlue Swirl {
60365c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
604900eeca5SLara Lazier
605e3126a5cSLara Lazier if (virtual_gif_enabled(env)) {
606e3126a5cSLara Lazier env->int_ctl &= ~V_GIF_MASK;
607b67e2796SLara Lazier env->hflags2 &= ~HF2_VGIF_MASK;
608900eeca5SLara Lazier } else {
6096bada5e8SBlue Swirl env->hflags2 &= ~HF2_GIF_MASK;
6106bada5e8SBlue Swirl }
611900eeca5SLara Lazier }
6126bada5e8SBlue Swirl
cpu_svm_has_intercept(CPUX86State * env,uint32_t type)613813c6459SLara Lazier bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
614813c6459SLara Lazier {
615813c6459SLara Lazier switch (type) {
616813c6459SLara Lazier case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
617813c6459SLara Lazier if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
618813c6459SLara Lazier return true;
619813c6459SLara Lazier }
620813c6459SLara Lazier break;
621813c6459SLara Lazier case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
622813c6459SLara Lazier if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
623813c6459SLara Lazier return true;
624813c6459SLara Lazier }
625813c6459SLara Lazier break;
626813c6459SLara Lazier case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
627813c6459SLara Lazier if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
628813c6459SLara Lazier return true;
629813c6459SLara Lazier }
630813c6459SLara Lazier break;
631813c6459SLara Lazier case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
632813c6459SLara Lazier if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
633813c6459SLara Lazier return true;
634813c6459SLara Lazier }
635813c6459SLara Lazier break;
636813c6459SLara Lazier case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
637813c6459SLara Lazier if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
638813c6459SLara Lazier return true;
639813c6459SLara Lazier }
640813c6459SLara Lazier break;
641813c6459SLara Lazier default:
642813c6459SLara Lazier if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
643813c6459SLara Lazier return true;
644813c6459SLara Lazier }
645813c6459SLara Lazier break;
646813c6459SLara Lazier }
647813c6459SLara Lazier return false;
648813c6459SLara Lazier }
649813c6459SLara Lazier
cpu_svm_check_intercept_param(CPUX86State * env,uint32_t type,uint64_t param,uintptr_t retaddr)65065c9d60aSPaolo Bonzini void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
65165c9d60aSPaolo Bonzini uint64_t param, uintptr_t retaddr)
6526bada5e8SBlue Swirl {
6536aa9e42fSRichard Henderson CPUState *cs = env_cpu(env);
6542c17449bSEdgar E. Iglesias
655f8dc4c64SPaolo Bonzini if (likely(!(env->hflags & HF_GUEST_MASK))) {
6566bada5e8SBlue Swirl return;
6576bada5e8SBlue Swirl }
658813c6459SLara Lazier
659813c6459SLara Lazier if (!cpu_svm_has_intercept(env, type)) {
660813c6459SLara Lazier return;
6616bada5e8SBlue Swirl }
662813c6459SLara Lazier
663813c6459SLara Lazier if (type == SVM_EXIT_MSR) {
6646bada5e8SBlue Swirl /* FIXME: this should be read in at vmrun (faster this way?) */
665b216aa6cSPaolo Bonzini uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
6666bada5e8SBlue Swirl offsetof(struct vmcb,
6676bada5e8SBlue Swirl control.msrpm_base_pa));
6686bada5e8SBlue Swirl uint32_t t0, t1;
6696bada5e8SBlue Swirl
670a4165610Sliguang switch ((uint32_t)env->regs[R_ECX]) {
6716bada5e8SBlue Swirl case 0 ... 0x1fff:
672a4165610Sliguang t0 = (env->regs[R_ECX] * 2) % 8;
673a4165610Sliguang t1 = (env->regs[R_ECX] * 2) / 8;
6746bada5e8SBlue Swirl break;
6756bada5e8SBlue Swirl case 0xc0000000 ... 0xc0001fff:
676a4165610Sliguang t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
6776bada5e8SBlue Swirl t1 = (t0 / 8);
6786bada5e8SBlue Swirl t0 %= 8;
6796bada5e8SBlue Swirl break;
6806bada5e8SBlue Swirl case 0xc0010000 ... 0xc0011fff:
681a4165610Sliguang t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
6826bada5e8SBlue Swirl t1 = (t0 / 8);
6836bada5e8SBlue Swirl t0 %= 8;
6846bada5e8SBlue Swirl break;
6856bada5e8SBlue Swirl default:
68665c9d60aSPaolo Bonzini cpu_vmexit(env, type, param, retaddr);
6876bada5e8SBlue Swirl t0 = 0;
6886bada5e8SBlue Swirl t1 = 0;
6896bada5e8SBlue Swirl break;
6906bada5e8SBlue Swirl }
691b216aa6cSPaolo Bonzini if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
69265c9d60aSPaolo Bonzini cpu_vmexit(env, type, param, retaddr);
6936bada5e8SBlue Swirl }
694813c6459SLara Lazier return;
6956bada5e8SBlue Swirl }
696813c6459SLara Lazier
69765c9d60aSPaolo Bonzini cpu_vmexit(env, type, param, retaddr);
6986bada5e8SBlue Swirl }
6996bada5e8SBlue Swirl
helper_svm_check_intercept(CPUX86State * env,uint32_t type)700d051ea04SRichard Henderson void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
7016bada5e8SBlue Swirl {
702d051ea04SRichard Henderson cpu_svm_check_intercept_param(env, type, 0, GETPC());
7036bada5e8SBlue Swirl }
7046bada5e8SBlue Swirl
helper_svm_check_io(CPUX86State * env,uint32_t port,uint32_t param,uint32_t next_eip_addend)705052e80d5SBlue Swirl void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
7066bada5e8SBlue Swirl uint32_t next_eip_addend)
7076bada5e8SBlue Swirl {
7086aa9e42fSRichard Henderson CPUState *cs = env_cpu(env);
70919d6ca16SAndreas Färber
7106bada5e8SBlue Swirl if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
7116bada5e8SBlue Swirl /* FIXME: this should be read in at vmrun (faster this way?) */
712b216aa6cSPaolo Bonzini uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
7136bada5e8SBlue Swirl offsetof(struct vmcb, control.iopm_base_pa));
7146bada5e8SBlue Swirl uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
7156bada5e8SBlue Swirl
716b216aa6cSPaolo Bonzini if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
717a78d0eabSliguang /* next env->eip */
718b216aa6cSPaolo Bonzini x86_stq_phys(cs,
719f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
7206bada5e8SBlue Swirl env->eip + next_eip_addend);
72165c9d60aSPaolo Bonzini cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
7226bada5e8SBlue Swirl }
7236bada5e8SBlue Swirl }
7246bada5e8SBlue Swirl }
7256bada5e8SBlue Swirl
cpu_vmexit(CPUX86State * env,uint32_t exit_code,uint64_t exit_info_1,uintptr_t retaddr)72665c9d60aSPaolo Bonzini void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
72765c9d60aSPaolo Bonzini uintptr_t retaddr)
7286bada5e8SBlue Swirl {
7296aa9e42fSRichard Henderson CPUState *cs = env_cpu(env);
7306bada5e8SBlue Swirl
7313d419a4dSRichard Henderson cpu_restore_state(cs, retaddr);
73265c9d60aSPaolo Bonzini
7336bada5e8SBlue Swirl qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
7346bada5e8SBlue Swirl PRIx64 ", " TARGET_FMT_lx ")!\n",
7356bada5e8SBlue Swirl exit_code, exit_info_1,
736b216aa6cSPaolo Bonzini x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
7376bada5e8SBlue Swirl control.exit_info_2)),
738a78d0eabSliguang env->eip);
7396bada5e8SBlue Swirl
74068775856SPaolo Bonzini cs->exception_index = EXCP_VMEXIT;
74168775856SPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
74268775856SPaolo Bonzini exit_code);
74368775856SPaolo Bonzini
74468775856SPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
74568775856SPaolo Bonzini control.exit_info_1), exit_info_1),
74610cde894SPaolo Bonzini
74710cde894SPaolo Bonzini /* remove any pending exception */
74810cde894SPaolo Bonzini env->old_exception = -1;
74910cde894SPaolo Bonzini cpu_loop_exit(cs);
75010cde894SPaolo Bonzini }
75110cde894SPaolo Bonzini
do_vmexit(CPUX86State * env)75268775856SPaolo Bonzini void do_vmexit(CPUX86State *env)
75310cde894SPaolo Bonzini {
7546aa9e42fSRichard Henderson CPUState *cs = env_cpu(env);
75510cde894SPaolo Bonzini
7566bada5e8SBlue Swirl if (env->hflags & HF_INHIBIT_IRQ_MASK) {
757b216aa6cSPaolo Bonzini x86_stl_phys(cs,
758ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.int_state),
7596bada5e8SBlue Swirl SVM_INTERRUPT_SHADOW_MASK);
7606bada5e8SBlue Swirl env->hflags &= ~HF_INHIBIT_IRQ_MASK;
7616bada5e8SBlue Swirl } else {
762b216aa6cSPaolo Bonzini x86_stl_phys(cs,
763ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
7646bada5e8SBlue Swirl }
765fe441054SJan Kiszka env->hflags2 &= ~HF2_NPT_MASK;
76698281984SRichard Henderson tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
7676bada5e8SBlue Swirl
7686bada5e8SBlue Swirl /* Save the VM state in the vmcb */
769726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
770726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.es),
7716bada5e8SBlue Swirl &env->segs[R_ES]);
772726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
773726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.cs),
7746bada5e8SBlue Swirl &env->segs[R_CS]);
775726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
776726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.ss),
7776bada5e8SBlue Swirl &env->segs[R_SS]);
778726ea335SRichard Henderson svm_save_seg(env, MMU_PHYS_IDX,
779726ea335SRichard Henderson env->vm_vmcb + offsetof(struct vmcb, save.ds),
7806bada5e8SBlue Swirl &env->segs[R_DS]);
7816bada5e8SBlue Swirl
782b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
7836bada5e8SBlue Swirl env->gdt.base);
784b216aa6cSPaolo Bonzini x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
7856bada5e8SBlue Swirl env->gdt.limit);
7866bada5e8SBlue Swirl
787b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
7886bada5e8SBlue Swirl env->idt.base);
789b216aa6cSPaolo Bonzini x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
7906bada5e8SBlue Swirl env->idt.limit);
7916bada5e8SBlue Swirl
792b216aa6cSPaolo Bonzini x86_stq_phys(cs,
793f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
794b216aa6cSPaolo Bonzini x86_stq_phys(cs,
795f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
796b216aa6cSPaolo Bonzini x86_stq_phys(cs,
797f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
798b216aa6cSPaolo Bonzini x86_stq_phys(cs,
799f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
800b216aa6cSPaolo Bonzini x86_stq_phys(cs,
801f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
802b216aa6cSPaolo Bonzini x86_stl_phys(cs,
803e3126a5cSLara Lazier env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
8046bada5e8SBlue Swirl
805b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
8066bada5e8SBlue Swirl cpu_compute_eflags(env));
807b216aa6cSPaolo Bonzini x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
808052e80d5SBlue Swirl env->eip);
809b216aa6cSPaolo Bonzini x86_stq_phys(cs,
810f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
811b216aa6cSPaolo Bonzini x86_stq_phys(cs,
812f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
813b216aa6cSPaolo Bonzini x86_stq_phys(cs,
814f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
815b216aa6cSPaolo Bonzini x86_stq_phys(cs,
816f606604fSEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
817b216aa6cSPaolo Bonzini x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
8186bada5e8SBlue Swirl env->hflags & HF_CPL_MASK);
8196bada5e8SBlue Swirl
8206bada5e8SBlue Swirl /* Reload the host state from vm_hsave */
8216bada5e8SBlue Swirl env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
822f8dc4c64SPaolo Bonzini env->hflags &= ~HF_GUEST_MASK;
8236bada5e8SBlue Swirl env->intercept = 0;
8246bada5e8SBlue Swirl env->intercept_exceptions = 0;
8251a150d33SPaolo Bonzini
8261a150d33SPaolo Bonzini /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
827259186a7SAndreas Färber cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
828e3126a5cSLara Lazier env->int_ctl = 0;
8291a150d33SPaolo Bonzini
8301a150d33SPaolo Bonzini /* Clears the TSC_OFFSET inside the processor. */
8316bada5e8SBlue Swirl env->tsc_offset = 0;
8326bada5e8SBlue Swirl
833b216aa6cSPaolo Bonzini env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
8346bada5e8SBlue Swirl save.gdtr.base));
835b216aa6cSPaolo Bonzini env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
8366bada5e8SBlue Swirl save.gdtr.limit));
8376bada5e8SBlue Swirl
838b216aa6cSPaolo Bonzini env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
8396bada5e8SBlue Swirl save.idtr.base));
840b216aa6cSPaolo Bonzini env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
8416bada5e8SBlue Swirl save.idtr.limit));
8426bada5e8SBlue Swirl
843b216aa6cSPaolo Bonzini cpu_x86_update_cr0(env, x86_ldq_phys(cs,
8442c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb,
8456bada5e8SBlue Swirl save.cr0)) |
8466bada5e8SBlue Swirl CR0_PE_MASK);
847b216aa6cSPaolo Bonzini cpu_x86_update_cr4(env, x86_ldq_phys(cs,
8482c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb,
8496bada5e8SBlue Swirl save.cr4)));
8501a150d33SPaolo Bonzini
8511a150d33SPaolo Bonzini /*
8521a150d33SPaolo Bonzini * Resets the current ASID register to zero (host ASID; TLB flush).
8531a150d33SPaolo Bonzini *
8541a150d33SPaolo Bonzini * If the host is in PAE mode, the processor reloads the host's PDPEs
8551a150d33SPaolo Bonzini * from the page table indicated the host's CR3. FIXME: If the PDPEs
8561a150d33SPaolo Bonzini * contain illegal state, the processor causes a shutdown (QEMU does
8571a150d33SPaolo Bonzini * not implement PDPTRs).
8581a150d33SPaolo Bonzini */
859b216aa6cSPaolo Bonzini cpu_x86_update_cr3(env, x86_ldq_phys(cs,
8602c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb,
8616bada5e8SBlue Swirl save.cr3)));
8626bada5e8SBlue Swirl /* we need to set the efer after the crs so the hidden flags get
8636bada5e8SBlue Swirl set properly */
864b216aa6cSPaolo Bonzini cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
8656bada5e8SBlue Swirl save.efer)));
8661a150d33SPaolo Bonzini
8671a150d33SPaolo Bonzini /* Completion of the VMRUN instruction clears the host EFLAGS.RF bit. */
8686bada5e8SBlue Swirl env->eflags = 0;
869b216aa6cSPaolo Bonzini cpu_load_eflags(env, x86_ldq_phys(cs,
8702c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb,
8716bada5e8SBlue Swirl save.rflags)),
87230452029SKevin O'Connor ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
8731a150d33SPaolo Bonzini RF_MASK | VM_MASK));
8746bada5e8SBlue Swirl
875726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
876726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
877726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
878726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
879726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
880726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
881726ea335SRichard Henderson svm_load_seg_cache(env, MMU_PHYS_IDX,
882726ea335SRichard Henderson env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
8836bada5e8SBlue Swirl
884b216aa6cSPaolo Bonzini env->eip = x86_ldq_phys(cs,
8852c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.rip));
886b216aa6cSPaolo Bonzini env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
88790a2541bSliguang offsetof(struct vmcb, save.rsp));
888b216aa6cSPaolo Bonzini env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
88990a2541bSliguang offsetof(struct vmcb, save.rax));
8906bada5e8SBlue Swirl
891b216aa6cSPaolo Bonzini env->dr[6] = x86_ldq_phys(cs,
8922c17449bSEdgar E. Iglesias env->vm_hsave + offsetof(struct vmcb, save.dr6));
8938aa76496SPaolo Bonzini
8948aa76496SPaolo Bonzini /* Disables all breakpoints in the host DR7 register. */
8958aa76496SPaolo Bonzini cpu_x86_update_dr7(env,
8968aa76496SPaolo Bonzini x86_ldq_phys(cs,
8978aa76496SPaolo Bonzini env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff);
8986bada5e8SBlue Swirl
8996bada5e8SBlue Swirl /* other setups */
900b216aa6cSPaolo Bonzini x86_stl_phys(cs,
901ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
902b216aa6cSPaolo Bonzini x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
9036bada5e8SBlue Swirl control.event_inj)));
904b216aa6cSPaolo Bonzini x86_stl_phys(cs,
905ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
906b216aa6cSPaolo Bonzini x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
9076bada5e8SBlue Swirl control.event_inj_err)));
908b216aa6cSPaolo Bonzini x86_stl_phys(cs,
909ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
9106bada5e8SBlue Swirl
9116bada5e8SBlue Swirl env->hflags2 &= ~HF2_GIF_MASK;
912b67e2796SLara Lazier env->hflags2 &= ~HF2_VGIF_MASK;
9136bada5e8SBlue Swirl
9146bada5e8SBlue Swirl
9151a150d33SPaolo Bonzini /* FIXME: Checks the reloaded host state for consistency. */
9166bada5e8SBlue Swirl
9171a150d33SPaolo Bonzini /*
9181a150d33SPaolo Bonzini * EFLAGS.TF causes a #DB trap after the VMRUN completes on the host
9191a150d33SPaolo Bonzini * side (i.e., after the #VMEXIT from the guest). Since we're running
9201a150d33SPaolo Bonzini * in the main loop, call do_interrupt_all directly.
9211a150d33SPaolo Bonzini */
9221a150d33SPaolo Bonzini if ((env->eflags & TF_MASK) != 0) {
9231a150d33SPaolo Bonzini env->dr[6] |= DR6_BS;
9241a150d33SPaolo Bonzini do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0);
9251a150d33SPaolo Bonzini }
9266bada5e8SBlue Swirl }
927