1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 293e457172SBlue Swirl 30eaa728eeSbellard //#define DEBUG_PCALL 31eaa728eeSbellard 32d12d51d5Saliguori #ifdef DEBUG_PCALL 3393fcfe39Saliguori # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) 348995b7a0SAndreas Färber # define LOG_PCALL_STATE(cpu) \ 358995b7a0SAndreas Färber log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP) 36d12d51d5Saliguori #else 37d12d51d5Saliguori # define LOG_PCALL(...) do { } while (0) 388995b7a0SAndreas Färber # define LOG_PCALL_STATE(cpu) do { } while (0) 39d12d51d5Saliguori #endif 40d12d51d5Saliguori 4121ffbdc9SRichard Henderson /* 4221ffbdc9SRichard Henderson * TODO: Convert callers to compute cpu_mmu_index_kernel once 4321ffbdc9SRichard Henderson * and use *_mmuidx_ra directly. 4421ffbdc9SRichard Henderson */ 4521ffbdc9SRichard Henderson #define cpu_ldub_kernel_ra(e, p, r) \ 4621ffbdc9SRichard Henderson cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) 4721ffbdc9SRichard Henderson #define cpu_lduw_kernel_ra(e, p, r) \ 4821ffbdc9SRichard Henderson cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) 4921ffbdc9SRichard Henderson #define cpu_ldl_kernel_ra(e, p, r) \ 5021ffbdc9SRichard Henderson cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) 5121ffbdc9SRichard Henderson #define cpu_ldq_kernel_ra(e, p, r) \ 5221ffbdc9SRichard Henderson cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) 539220fe54SPeter Maydell 5421ffbdc9SRichard Henderson #define cpu_stb_kernel_ra(e, p, v, r) \ 5521ffbdc9SRichard Henderson cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) 5621ffbdc9SRichard Henderson #define cpu_stw_kernel_ra(e, p, v, r) \ 5721ffbdc9SRichard Henderson cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) 5821ffbdc9SRichard Henderson #define cpu_stl_kernel_ra(e, p, v, r) \ 5921ffbdc9SRichard Henderson cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) 6021ffbdc9SRichard Henderson #define cpu_stq_kernel_ra(e, p, v, r) \ 6121ffbdc9SRichard Henderson cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) 629220fe54SPeter Maydell 6321ffbdc9SRichard Henderson #define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) 6421ffbdc9SRichard Henderson #define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) 6521ffbdc9SRichard Henderson #define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) 6621ffbdc9SRichard Henderson #define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) 679220fe54SPeter Maydell 6821ffbdc9SRichard Henderson #define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) 6921ffbdc9SRichard Henderson #define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) 7021ffbdc9SRichard Henderson #define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) 7121ffbdc9SRichard Henderson #define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) 728a201bd4SPaolo Bonzini 73eaa728eeSbellard /* return non zero if error */ 74100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 75100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 76100ec099SPavel Dovgalyuk uintptr_t retaddr) 77eaa728eeSbellard { 78eaa728eeSbellard SegmentCache *dt; 79eaa728eeSbellard int index; 80eaa728eeSbellard target_ulong ptr; 81eaa728eeSbellard 8220054ef0SBlue Swirl if (selector & 0x4) { 83eaa728eeSbellard dt = &env->ldt; 8420054ef0SBlue Swirl } else { 85eaa728eeSbellard dt = &env->gdt; 8620054ef0SBlue Swirl } 87eaa728eeSbellard index = selector & ~7; 8820054ef0SBlue Swirl if ((index + 7) > dt->limit) { 89eaa728eeSbellard return -1; 9020054ef0SBlue Swirl } 91eaa728eeSbellard ptr = dt->base + index; 92100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 93100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 94eaa728eeSbellard return 0; 95eaa728eeSbellard } 96eaa728eeSbellard 97100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 98100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 99100ec099SPavel Dovgalyuk { 100100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 101100ec099SPavel Dovgalyuk } 102100ec099SPavel Dovgalyuk 103eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 104eaa728eeSbellard { 105eaa728eeSbellard unsigned int limit; 10620054ef0SBlue Swirl 107eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 10820054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 109eaa728eeSbellard limit = (limit << 12) | 0xfff; 11020054ef0SBlue Swirl } 111eaa728eeSbellard return limit; 112eaa728eeSbellard } 113eaa728eeSbellard 114eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 115eaa728eeSbellard { 11620054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 117eaa728eeSbellard } 118eaa728eeSbellard 11920054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 12020054ef0SBlue Swirl uint32_t e2) 121eaa728eeSbellard { 122eaa728eeSbellard sc->base = get_seg_base(e1, e2); 123eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 124eaa728eeSbellard sc->flags = e2; 125eaa728eeSbellard } 126eaa728eeSbellard 127eaa728eeSbellard /* init the segment cache in vm86 mode. */ 1282999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 129eaa728eeSbellard { 130eaa728eeSbellard selector &= 0xffff; 131b98dbc90SPaolo Bonzini 132b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 133b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 134b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 135eaa728eeSbellard } 136eaa728eeSbellard 1372999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 138100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 139100ec099SPavel Dovgalyuk uintptr_t retaddr) 140eaa728eeSbellard { 1416aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 142eaa728eeSbellard int type, index, shift; 143eaa728eeSbellard 144eaa728eeSbellard #if 0 145eaa728eeSbellard { 146eaa728eeSbellard int i; 147eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 148eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 149eaa728eeSbellard printf("%02x ", env->tr.base[i]); 15020054ef0SBlue Swirl if ((i & 7) == 7) { 15120054ef0SBlue Swirl printf("\n"); 15220054ef0SBlue Swirl } 153eaa728eeSbellard } 154eaa728eeSbellard printf("\n"); 155eaa728eeSbellard } 156eaa728eeSbellard #endif 157eaa728eeSbellard 15820054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 159a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 16020054ef0SBlue Swirl } 161eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 16220054ef0SBlue Swirl if ((type & 7) != 1) { 163a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 16420054ef0SBlue Swirl } 165eaa728eeSbellard shift = type >> 3; 166eaa728eeSbellard index = (dpl * 4 + 2) << shift; 16720054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 168100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 16920054ef0SBlue Swirl } 170eaa728eeSbellard if (shift == 0) { 171100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 172100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 173eaa728eeSbellard } else { 174100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 175100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 176eaa728eeSbellard } 177eaa728eeSbellard } 178eaa728eeSbellard 179*c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 180*c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 181eaa728eeSbellard { 182eaa728eeSbellard uint32_t e1, e2; 183d3b54918SPaolo Bonzini int rpl, dpl; 184eaa728eeSbellard 185eaa728eeSbellard if ((selector & 0xfffc) != 0) { 186100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 187100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 18820054ef0SBlue Swirl } 18920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 190100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 19120054ef0SBlue Swirl } 192eaa728eeSbellard rpl = selector & 3; 193eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 194eaa728eeSbellard if (seg_reg == R_CS) { 19520054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 196100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 19720054ef0SBlue Swirl } 19820054ef0SBlue Swirl if (dpl != rpl) { 199100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 20020054ef0SBlue Swirl } 201eaa728eeSbellard } else if (seg_reg == R_SS) { 202eaa728eeSbellard /* SS must be writable data */ 20320054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 204100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 20520054ef0SBlue Swirl } 20620054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 207100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 20820054ef0SBlue Swirl } 209eaa728eeSbellard } else { 210eaa728eeSbellard /* not readable code */ 21120054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 212100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 21320054ef0SBlue Swirl } 214eaa728eeSbellard /* if data or non conforming code, checks the rights */ 215eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 21620054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 217100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 218eaa728eeSbellard } 219eaa728eeSbellard } 22020054ef0SBlue Swirl } 22120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 222100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 22320054ef0SBlue Swirl } 224eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 225eaa728eeSbellard get_seg_base(e1, e2), 226eaa728eeSbellard get_seg_limit(e1, e2), 227eaa728eeSbellard e2); 228eaa728eeSbellard } else { 22920054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 230100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 231eaa728eeSbellard } 232eaa728eeSbellard } 23320054ef0SBlue Swirl } 234eaa728eeSbellard 235eaa728eeSbellard #define SWITCH_TSS_JMP 0 236eaa728eeSbellard #define SWITCH_TSS_IRET 1 237eaa728eeSbellard #define SWITCH_TSS_CALL 2 238eaa728eeSbellard 239eaa728eeSbellard /* XXX: restore CPU state in registers (PowerPC case) */ 240100ec099SPavel Dovgalyuk static void switch_tss_ra(CPUX86State *env, int tss_selector, 241eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 242100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 243eaa728eeSbellard { 244eaa728eeSbellard int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 245eaa728eeSbellard target_ulong tss_base; 246eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 247eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 248eaa728eeSbellard uint32_t old_eflags, eflags_mask; 249eaa728eeSbellard SegmentCache *dt; 250eaa728eeSbellard int index; 251eaa728eeSbellard target_ulong ptr; 252eaa728eeSbellard 253eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 25420054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 25520054ef0SBlue Swirl source); 256eaa728eeSbellard 257eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 258eaa728eeSbellard if (type == 5) { 25920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 260100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 26120054ef0SBlue Swirl } 262eaa728eeSbellard tss_selector = e1 >> 16; 26320054ef0SBlue Swirl if (tss_selector & 4) { 264100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 26520054ef0SBlue Swirl } 266100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 267100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 268eaa728eeSbellard } 26920054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 270100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 27120054ef0SBlue Swirl } 27220054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 27320054ef0SBlue Swirl if ((type & 7) != 1) { 274100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 27520054ef0SBlue Swirl } 27620054ef0SBlue Swirl } 277eaa728eeSbellard 27820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 28020054ef0SBlue Swirl } 281eaa728eeSbellard 28220054ef0SBlue Swirl if (type & 8) { 283eaa728eeSbellard tss_limit_max = 103; 28420054ef0SBlue Swirl } else { 285eaa728eeSbellard tss_limit_max = 43; 28620054ef0SBlue Swirl } 287eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 288eaa728eeSbellard tss_base = get_seg_base(e1, e2); 289eaa728eeSbellard if ((tss_selector & 4) != 0 || 29020054ef0SBlue Swirl tss_limit < tss_limit_max) { 291100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 29220054ef0SBlue Swirl } 293eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 29420054ef0SBlue Swirl if (old_type & 8) { 295eaa728eeSbellard old_tss_limit_max = 103; 29620054ef0SBlue Swirl } else { 297eaa728eeSbellard old_tss_limit_max = 43; 29820054ef0SBlue Swirl } 299eaa728eeSbellard 300eaa728eeSbellard /* read all the registers from the new TSS */ 301eaa728eeSbellard if (type & 8) { 302eaa728eeSbellard /* 32 bit */ 303100ec099SPavel Dovgalyuk new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 304100ec099SPavel Dovgalyuk new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 305100ec099SPavel Dovgalyuk new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 30620054ef0SBlue Swirl for (i = 0; i < 8; i++) { 307100ec099SPavel Dovgalyuk new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 308100ec099SPavel Dovgalyuk retaddr); 30920054ef0SBlue Swirl } 31020054ef0SBlue Swirl for (i = 0; i < 6; i++) { 311100ec099SPavel Dovgalyuk new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 312100ec099SPavel Dovgalyuk retaddr); 31320054ef0SBlue Swirl } 314100ec099SPavel Dovgalyuk new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 315100ec099SPavel Dovgalyuk new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 316eaa728eeSbellard } else { 317eaa728eeSbellard /* 16 bit */ 318eaa728eeSbellard new_cr3 = 0; 319100ec099SPavel Dovgalyuk new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 320100ec099SPavel Dovgalyuk new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 32120054ef0SBlue Swirl for (i = 0; i < 8; i++) { 322100ec099SPavel Dovgalyuk new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), 323100ec099SPavel Dovgalyuk retaddr) | 0xffff0000; 32420054ef0SBlue Swirl } 32520054ef0SBlue Swirl for (i = 0; i < 4; i++) { 326100ec099SPavel Dovgalyuk new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4), 327100ec099SPavel Dovgalyuk retaddr); 32820054ef0SBlue Swirl } 329100ec099SPavel Dovgalyuk new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 330eaa728eeSbellard new_segs[R_FS] = 0; 331eaa728eeSbellard new_segs[R_GS] = 0; 332eaa728eeSbellard new_trap = 0; 333eaa728eeSbellard } 3344581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 3354581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 3364581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 3374581cbcdSBlue Swirl (void)new_trap; 338eaa728eeSbellard 339eaa728eeSbellard /* NOTE: we must avoid memory exceptions during the task switch, 340eaa728eeSbellard so we make dummy accesses before */ 341eaa728eeSbellard /* XXX: it can still fail in some cases, so a bigger hack is 342eaa728eeSbellard necessary to valid the TLB after having done the accesses */ 343eaa728eeSbellard 344100ec099SPavel Dovgalyuk v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 345100ec099SPavel Dovgalyuk v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 346100ec099SPavel Dovgalyuk cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 347100ec099SPavel Dovgalyuk cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 348eaa728eeSbellard 349eaa728eeSbellard /* clear busy bit (it is restartable) */ 350eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 351eaa728eeSbellard target_ulong ptr; 352eaa728eeSbellard uint32_t e2; 35320054ef0SBlue Swirl 354eaa728eeSbellard ptr = env->gdt.base + (env->tr.selector & ~7); 355100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 356eaa728eeSbellard e2 &= ~DESC_TSS_BUSY_MASK; 357100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 358eaa728eeSbellard } 359997ff0d9SBlue Swirl old_eflags = cpu_compute_eflags(env); 36020054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 361eaa728eeSbellard old_eflags &= ~NT_MASK; 36220054ef0SBlue Swirl } 363eaa728eeSbellard 364eaa728eeSbellard /* save the current state in the old TSS */ 365eaa728eeSbellard if (type & 8) { 366eaa728eeSbellard /* 32 bit */ 367100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 368100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 369100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 370100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 371100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 372100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 373100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 374100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 375100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 376100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 37720054ef0SBlue Swirl for (i = 0; i < 6; i++) { 378100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 379100ec099SPavel Dovgalyuk env->segs[i].selector, retaddr); 38020054ef0SBlue Swirl } 381eaa728eeSbellard } else { 382eaa728eeSbellard /* 16 bit */ 383100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 384100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 385100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 386100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 387100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 388100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 389100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 390100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 391100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 392100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 39320054ef0SBlue Swirl for (i = 0; i < 4; i++) { 394100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4), 395100ec099SPavel Dovgalyuk env->segs[i].selector, retaddr); 396eaa728eeSbellard } 39720054ef0SBlue Swirl } 398eaa728eeSbellard 399eaa728eeSbellard /* now if an exception occurs, it will occurs in the next task 400eaa728eeSbellard context */ 401eaa728eeSbellard 402eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 403100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 404eaa728eeSbellard new_eflags |= NT_MASK; 405eaa728eeSbellard } 406eaa728eeSbellard 407eaa728eeSbellard /* set busy bit */ 408eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 409eaa728eeSbellard target_ulong ptr; 410eaa728eeSbellard uint32_t e2; 41120054ef0SBlue Swirl 412eaa728eeSbellard ptr = env->gdt.base + (tss_selector & ~7); 413100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 414eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 415100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 416eaa728eeSbellard } 417eaa728eeSbellard 418eaa728eeSbellard /* set the new CPU state */ 419eaa728eeSbellard /* from this point, any exception which occurs can give problems */ 420eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 421eaa728eeSbellard env->hflags |= HF_TS_MASK; 422eaa728eeSbellard env->tr.selector = tss_selector; 423eaa728eeSbellard env->tr.base = tss_base; 424eaa728eeSbellard env->tr.limit = tss_limit; 425eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 426eaa728eeSbellard 427eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 428eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 429eaa728eeSbellard } 430eaa728eeSbellard 431eaa728eeSbellard /* load all registers without an exception, then reload them with 432eaa728eeSbellard possible exception */ 433eaa728eeSbellard env->eip = new_eip; 434eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 435eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 43620054ef0SBlue Swirl if (!(type & 8)) { 437eaa728eeSbellard eflags_mask &= 0xffff; 43820054ef0SBlue Swirl } 439997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 440eaa728eeSbellard /* XXX: what to do in 16 bit case? */ 4414b34e3adSliguang env->regs[R_EAX] = new_regs[0]; 442a4165610Sliguang env->regs[R_ECX] = new_regs[1]; 44300f5e6f2Sliguang env->regs[R_EDX] = new_regs[2]; 44470b51365Sliguang env->regs[R_EBX] = new_regs[3]; 44508b3ded6Sliguang env->regs[R_ESP] = new_regs[4]; 446c12dddd7Sliguang env->regs[R_EBP] = new_regs[5]; 44778c3c6d3Sliguang env->regs[R_ESI] = new_regs[6]; 448cf75c597Sliguang env->regs[R_EDI] = new_regs[7]; 449eaa728eeSbellard if (new_eflags & VM_MASK) { 45020054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4512999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 45220054ef0SBlue Swirl } 453eaa728eeSbellard } else { 454eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 45520054ef0SBlue Swirl for (i = 0; i < 6; i++) { 456eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 457eaa728eeSbellard } 45820054ef0SBlue Swirl } 459eaa728eeSbellard 460eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 461eaa728eeSbellard env->ldt.base = 0; 462eaa728eeSbellard env->ldt.limit = 0; 463eaa728eeSbellard env->ldt.flags = 0; 464eaa728eeSbellard 465eaa728eeSbellard /* load the LDT */ 46620054ef0SBlue Swirl if (new_ldt & 4) { 467100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 46820054ef0SBlue Swirl } 469eaa728eeSbellard 470eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 471eaa728eeSbellard dt = &env->gdt; 472eaa728eeSbellard index = new_ldt & ~7; 47320054ef0SBlue Swirl if ((index + 7) > dt->limit) { 474100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 47520054ef0SBlue Swirl } 476eaa728eeSbellard ptr = dt->base + index; 477100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 478100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 47920054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 480100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 48120054ef0SBlue Swirl } 48220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 483100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 48420054ef0SBlue Swirl } 485eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 486eaa728eeSbellard } 487eaa728eeSbellard 488eaa728eeSbellard /* load the segments */ 489eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 490d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 491100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 492100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 493100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 494100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 495100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 496100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 497eaa728eeSbellard } 498eaa728eeSbellard 499a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 500eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 501eaa728eeSbellard /* XXX: different exception if CALL? */ 502100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 503eaa728eeSbellard } 50401df040bSaliguori 50501df040bSaliguori #ifndef CONFIG_USER_ONLY 50601df040bSaliguori /* reset local breakpoints */ 507428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 50893d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 50901df040bSaliguori } 51001df040bSaliguori #endif 511eaa728eeSbellard } 512eaa728eeSbellard 513100ec099SPavel Dovgalyuk static void switch_tss(CPUX86State *env, int tss_selector, 514100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 515100ec099SPavel Dovgalyuk uint32_t next_eip) 516100ec099SPavel Dovgalyuk { 517100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 518100ec099SPavel Dovgalyuk } 519100ec099SPavel Dovgalyuk 520eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 521eaa728eeSbellard { 5220aca0605SAndrew Oates #ifdef TARGET_X86_64 5230aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 5240aca0605SAndrew Oates return 0; 5250aca0605SAndrew Oates } else 5260aca0605SAndrew Oates #endif 52720054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 528eaa728eeSbellard return 0xffffffff; 52920054ef0SBlue Swirl } else { 530eaa728eeSbellard return 0xffff; 531eaa728eeSbellard } 53220054ef0SBlue Swirl } 533eaa728eeSbellard 53420054ef0SBlue Swirl static int exception_has_error_code(int intno) 5352ed51f5bSaliguori { 5362ed51f5bSaliguori switch (intno) { 5372ed51f5bSaliguori case 8: 5382ed51f5bSaliguori case 10: 5392ed51f5bSaliguori case 11: 5402ed51f5bSaliguori case 12: 5412ed51f5bSaliguori case 13: 5422ed51f5bSaliguori case 14: 5432ed51f5bSaliguori case 17: 5442ed51f5bSaliguori return 1; 5452ed51f5bSaliguori } 5462ed51f5bSaliguori return 0; 5472ed51f5bSaliguori } 5482ed51f5bSaliguori 549eaa728eeSbellard #ifdef TARGET_X86_64 550eaa728eeSbellard #define SET_ESP(val, sp_mask) \ 551eaa728eeSbellard do { \ 55220054ef0SBlue Swirl if ((sp_mask) == 0xffff) { \ 55308b3ded6Sliguang env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 55408b3ded6Sliguang ((val) & 0xffff); \ 55520054ef0SBlue Swirl } else if ((sp_mask) == 0xffffffffLL) { \ 55608b3ded6Sliguang env->regs[R_ESP] = (uint32_t)(val); \ 55720054ef0SBlue Swirl } else { \ 55808b3ded6Sliguang env->regs[R_ESP] = (val); \ 55920054ef0SBlue Swirl } \ 560eaa728eeSbellard } while (0) 561eaa728eeSbellard #else 56220054ef0SBlue Swirl #define SET_ESP(val, sp_mask) \ 56320054ef0SBlue Swirl do { \ 56408b3ded6Sliguang env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 56508b3ded6Sliguang ((val) & (sp_mask)); \ 56620054ef0SBlue Swirl } while (0) 567eaa728eeSbellard #endif 568eaa728eeSbellard 569c0a04f0eSaliguori /* in 64-bit machines, this can overflow. So this segment addition macro 570c0a04f0eSaliguori * can be used to trim the value to 32-bit whenever needed */ 571c0a04f0eSaliguori #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 572c0a04f0eSaliguori 573eaa728eeSbellard /* XXX: add a is_user flag to have proper security support */ 574100ec099SPavel Dovgalyuk #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 575eaa728eeSbellard { \ 576eaa728eeSbellard sp -= 2; \ 577100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 578eaa728eeSbellard } 579eaa728eeSbellard 580100ec099SPavel Dovgalyuk #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 581eaa728eeSbellard { \ 582eaa728eeSbellard sp -= 4; \ 583100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ 584eaa728eeSbellard } 585eaa728eeSbellard 586100ec099SPavel Dovgalyuk #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 587eaa728eeSbellard { \ 588100ec099SPavel Dovgalyuk val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 589eaa728eeSbellard sp += 2; \ 590eaa728eeSbellard } 591eaa728eeSbellard 592100ec099SPavel Dovgalyuk #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 593eaa728eeSbellard { \ 594100ec099SPavel Dovgalyuk val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ 595eaa728eeSbellard sp += 4; \ 596eaa728eeSbellard } 597eaa728eeSbellard 598100ec099SPavel Dovgalyuk #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 599100ec099SPavel Dovgalyuk #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 600100ec099SPavel Dovgalyuk #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 601100ec099SPavel Dovgalyuk #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 602100ec099SPavel Dovgalyuk 603eaa728eeSbellard /* protected mode interrupt */ 6042999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6052999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6062999a0b2SBlue Swirl int is_hw) 607eaa728eeSbellard { 608eaa728eeSbellard SegmentCache *dt; 609eaa728eeSbellard target_ulong ptr, ssp; 610eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 611eaa728eeSbellard int has_error_code, new_stack, shift; 6121c918ebaSblueswir1 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 613eaa728eeSbellard uint32_t old_eip, sp_mask; 61487446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 615eaa728eeSbellard 616eaa728eeSbellard has_error_code = 0; 61720054ef0SBlue Swirl if (!is_int && !is_hw) { 61820054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 61920054ef0SBlue Swirl } 62020054ef0SBlue Swirl if (is_int) { 621eaa728eeSbellard old_eip = next_eip; 62220054ef0SBlue Swirl } else { 623eaa728eeSbellard old_eip = env->eip; 62420054ef0SBlue Swirl } 625eaa728eeSbellard 626eaa728eeSbellard dt = &env->idt; 62720054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 62877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 62920054ef0SBlue Swirl } 630eaa728eeSbellard ptr = dt->base + intno * 8; 631329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 632329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 633eaa728eeSbellard /* check gate type */ 634eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 635eaa728eeSbellard switch (type) { 636eaa728eeSbellard case 5: /* task gate */ 6373df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6383df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6393df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 6403df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 6413df1a3d0SPeter Maydell break; 6423df1a3d0SPeter Maydell default: 6433df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6443df1a3d0SPeter Maydell break; 6453df1a3d0SPeter Maydell } 6463df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 6473df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 6483df1a3d0SPeter Maydell /* check privilege if software int */ 6493df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 6503df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6513df1a3d0SPeter Maydell } 6523df1a3d0SPeter Maydell 6533df1a3d0SPeter Maydell if (type == 5) { 6543df1a3d0SPeter Maydell /* task gate */ 655eaa728eeSbellard /* must do that check here to return the correct error code */ 65620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 65777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 65820054ef0SBlue Swirl } 6592999a0b2SBlue Swirl switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 660eaa728eeSbellard if (has_error_code) { 661eaa728eeSbellard int type; 662eaa728eeSbellard uint32_t mask; 66320054ef0SBlue Swirl 664eaa728eeSbellard /* push the error code */ 665eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 666eaa728eeSbellard shift = type >> 3; 66720054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 668eaa728eeSbellard mask = 0xffffffff; 66920054ef0SBlue Swirl } else { 670eaa728eeSbellard mask = 0xffff; 67120054ef0SBlue Swirl } 67208b3ded6Sliguang esp = (env->regs[R_ESP] - (2 << shift)) & mask; 673eaa728eeSbellard ssp = env->segs[R_SS].base + esp; 67420054ef0SBlue Swirl if (shift) { 675329e607dSBlue Swirl cpu_stl_kernel(env, ssp, error_code); 67620054ef0SBlue Swirl } else { 677329e607dSBlue Swirl cpu_stw_kernel(env, ssp, error_code); 67820054ef0SBlue Swirl } 679eaa728eeSbellard SET_ESP(esp, mask); 680eaa728eeSbellard } 681eaa728eeSbellard return; 682eaa728eeSbellard } 6833df1a3d0SPeter Maydell 6843df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 6853df1a3d0SPeter Maydell 686eaa728eeSbellard /* check valid bit */ 68720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 68877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 68920054ef0SBlue Swirl } 690eaa728eeSbellard selector = e1 >> 16; 691eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 69220054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 69377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 69420054ef0SBlue Swirl } 6952999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 69677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 69720054ef0SBlue Swirl } 69820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 69977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 70020054ef0SBlue Swirl } 701eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 70220054ef0SBlue Swirl if (dpl > cpl) { 70377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 70420054ef0SBlue Swirl } 70520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 70677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 70720054ef0SBlue Swirl } 7081110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7091110bfe6SPaolo Bonzini dpl = cpl; 7101110bfe6SPaolo Bonzini } 7111110bfe6SPaolo Bonzini if (dpl < cpl) { 712eaa728eeSbellard /* to inner privilege */ 713100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 71420054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 71577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 71620054ef0SBlue Swirl } 71720054ef0SBlue Swirl if ((ss & 3) != dpl) { 71877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 71920054ef0SBlue Swirl } 7202999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 72177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 72220054ef0SBlue Swirl } 723eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 72420054ef0SBlue Swirl if (ss_dpl != dpl) { 72577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 72620054ef0SBlue Swirl } 727eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 728eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 72920054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 73077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 73120054ef0SBlue Swirl } 73220054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 73377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 73420054ef0SBlue Swirl } 735eaa728eeSbellard new_stack = 1; 736eaa728eeSbellard sp_mask = get_sp_mask(ss_e2); 737eaa728eeSbellard ssp = get_seg_base(ss_e1, ss_e2); 7381110bfe6SPaolo Bonzini } else { 739eaa728eeSbellard /* to same privilege */ 74087446327SKevin O'Connor if (vm86) { 74177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74220054ef0SBlue Swirl } 743eaa728eeSbellard new_stack = 0; 744eaa728eeSbellard sp_mask = get_sp_mask(env->segs[R_SS].flags); 745eaa728eeSbellard ssp = env->segs[R_SS].base; 74608b3ded6Sliguang esp = env->regs[R_ESP]; 747eaa728eeSbellard } 748eaa728eeSbellard 749eaa728eeSbellard shift = type >> 3; 750eaa728eeSbellard 751eaa728eeSbellard #if 0 752eaa728eeSbellard /* XXX: check that enough room is available */ 753eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 75487446327SKevin O'Connor if (vm86) { 755eaa728eeSbellard push_size += 8; 75620054ef0SBlue Swirl } 757eaa728eeSbellard push_size <<= shift; 758eaa728eeSbellard #endif 759eaa728eeSbellard if (shift == 1) { 760eaa728eeSbellard if (new_stack) { 76187446327SKevin O'Connor if (vm86) { 762eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 763eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 764eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 765eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 766eaa728eeSbellard } 767eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 76808b3ded6Sliguang PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 769eaa728eeSbellard } 770997ff0d9SBlue Swirl PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); 771eaa728eeSbellard PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 772eaa728eeSbellard PUSHL(ssp, esp, sp_mask, old_eip); 773eaa728eeSbellard if (has_error_code) { 774eaa728eeSbellard PUSHL(ssp, esp, sp_mask, error_code); 775eaa728eeSbellard } 776eaa728eeSbellard } else { 777eaa728eeSbellard if (new_stack) { 77887446327SKevin O'Connor if (vm86) { 779eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 780eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 781eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 782eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 783eaa728eeSbellard } 784eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 78508b3ded6Sliguang PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 786eaa728eeSbellard } 787997ff0d9SBlue Swirl PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); 788eaa728eeSbellard PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 789eaa728eeSbellard PUSHW(ssp, esp, sp_mask, old_eip); 790eaa728eeSbellard if (has_error_code) { 791eaa728eeSbellard PUSHW(ssp, esp, sp_mask, error_code); 792eaa728eeSbellard } 793eaa728eeSbellard } 794eaa728eeSbellard 795fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 796fd460606SKevin O'Connor if ((type & 1) == 0) { 797fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 798fd460606SKevin O'Connor } 799fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 800fd460606SKevin O'Connor 801eaa728eeSbellard if (new_stack) { 80287446327SKevin O'Connor if (vm86) { 803eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 804eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 805eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 806eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 807eaa728eeSbellard } 808eaa728eeSbellard ss = (ss & ~3) | dpl; 809eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 810eaa728eeSbellard ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 811eaa728eeSbellard } 812eaa728eeSbellard SET_ESP(esp, sp_mask); 813eaa728eeSbellard 814eaa728eeSbellard selector = (selector & ~3) | dpl; 815eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 816eaa728eeSbellard get_seg_base(e1, e2), 817eaa728eeSbellard get_seg_limit(e1, e2), 818eaa728eeSbellard e2); 819eaa728eeSbellard env->eip = offset; 820eaa728eeSbellard } 821eaa728eeSbellard 822eaa728eeSbellard #ifdef TARGET_X86_64 823eaa728eeSbellard 824100ec099SPavel Dovgalyuk #define PUSHQ_RA(sp, val, ra) \ 825eaa728eeSbellard { \ 826eaa728eeSbellard sp -= 8; \ 827100ec099SPavel Dovgalyuk cpu_stq_kernel_ra(env, sp, (val), ra); \ 828eaa728eeSbellard } 829eaa728eeSbellard 830100ec099SPavel Dovgalyuk #define POPQ_RA(sp, val, ra) \ 831eaa728eeSbellard { \ 832100ec099SPavel Dovgalyuk val = cpu_ldq_kernel_ra(env, sp, ra); \ 833eaa728eeSbellard sp += 8; \ 834eaa728eeSbellard } 835eaa728eeSbellard 836100ec099SPavel Dovgalyuk #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 837100ec099SPavel Dovgalyuk #define POPQ(sp, val) POPQ_RA(sp, val, 0) 838100ec099SPavel Dovgalyuk 8392999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 840eaa728eeSbellard { 8416aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 842eaa728eeSbellard int index; 843eaa728eeSbellard 844eaa728eeSbellard #if 0 845eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 846eaa728eeSbellard env->tr.base, env->tr.limit); 847eaa728eeSbellard #endif 848eaa728eeSbellard 84920054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 850a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 85120054ef0SBlue Swirl } 852eaa728eeSbellard index = 8 * level + 4; 85320054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 85477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 85520054ef0SBlue Swirl } 856329e607dSBlue Swirl return cpu_ldq_kernel(env, env->tr.base + index); 857eaa728eeSbellard } 858eaa728eeSbellard 859eaa728eeSbellard /* 64 bit interrupt */ 8602999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 8612999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 862eaa728eeSbellard { 863eaa728eeSbellard SegmentCache *dt; 864eaa728eeSbellard target_ulong ptr; 865eaa728eeSbellard int type, dpl, selector, cpl, ist; 866eaa728eeSbellard int has_error_code, new_stack; 867eaa728eeSbellard uint32_t e1, e2, e3, ss; 868eaa728eeSbellard target_ulong old_eip, esp, offset; 869eaa728eeSbellard 870eaa728eeSbellard has_error_code = 0; 87120054ef0SBlue Swirl if (!is_int && !is_hw) { 87220054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 87320054ef0SBlue Swirl } 87420054ef0SBlue Swirl if (is_int) { 875eaa728eeSbellard old_eip = next_eip; 87620054ef0SBlue Swirl } else { 877eaa728eeSbellard old_eip = env->eip; 87820054ef0SBlue Swirl } 879eaa728eeSbellard 880eaa728eeSbellard dt = &env->idt; 88120054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 88277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 88320054ef0SBlue Swirl } 884eaa728eeSbellard ptr = dt->base + intno * 16; 885329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 886329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 887329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 888eaa728eeSbellard /* check gate type */ 889eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 890eaa728eeSbellard switch (type) { 891eaa728eeSbellard case 14: /* 386 interrupt gate */ 892eaa728eeSbellard case 15: /* 386 trap gate */ 893eaa728eeSbellard break; 894eaa728eeSbellard default: 89577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 896eaa728eeSbellard break; 897eaa728eeSbellard } 898eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 899eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9001235fc06Sths /* check privilege if software int */ 90120054ef0SBlue Swirl if (is_int && dpl < cpl) { 90277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 90320054ef0SBlue Swirl } 904eaa728eeSbellard /* check valid bit */ 90520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 90677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); 90720054ef0SBlue Swirl } 908eaa728eeSbellard selector = e1 >> 16; 909eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 910eaa728eeSbellard ist = e2 & 7; 91120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 91277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 91320054ef0SBlue Swirl } 914eaa728eeSbellard 9152999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 91677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 91720054ef0SBlue Swirl } 91820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 91977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 92020054ef0SBlue Swirl } 921eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 92220054ef0SBlue Swirl if (dpl > cpl) { 92377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 92420054ef0SBlue Swirl } 92520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 92677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 92720054ef0SBlue Swirl } 92820054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 92977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 93020054ef0SBlue Swirl } 9311110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 9321110bfe6SPaolo Bonzini dpl = cpl; 9331110bfe6SPaolo Bonzini } 9341110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 935eaa728eeSbellard /* to inner privilege */ 936eaa728eeSbellard new_stack = 1; 937ae67dc72SPaolo Bonzini esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 938ae67dc72SPaolo Bonzini ss = 0; 9391110bfe6SPaolo Bonzini } else { 940eaa728eeSbellard /* to same privilege */ 94120054ef0SBlue Swirl if (env->eflags & VM_MASK) { 94277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 94320054ef0SBlue Swirl } 944eaa728eeSbellard new_stack = 0; 94508b3ded6Sliguang esp = env->regs[R_ESP]; 946e95e9b88SWu Xiang } 947ae67dc72SPaolo Bonzini esp &= ~0xfLL; /* align stack */ 948eaa728eeSbellard 949eaa728eeSbellard PUSHQ(esp, env->segs[R_SS].selector); 95008b3ded6Sliguang PUSHQ(esp, env->regs[R_ESP]); 951997ff0d9SBlue Swirl PUSHQ(esp, cpu_compute_eflags(env)); 952eaa728eeSbellard PUSHQ(esp, env->segs[R_CS].selector); 953eaa728eeSbellard PUSHQ(esp, old_eip); 954eaa728eeSbellard if (has_error_code) { 955eaa728eeSbellard PUSHQ(esp, error_code); 956eaa728eeSbellard } 957eaa728eeSbellard 958fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 959fd460606SKevin O'Connor if ((type & 1) == 0) { 960fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 961fd460606SKevin O'Connor } 962fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 963fd460606SKevin O'Connor 964eaa728eeSbellard if (new_stack) { 965eaa728eeSbellard ss = 0 | dpl; 966e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 967eaa728eeSbellard } 96808b3ded6Sliguang env->regs[R_ESP] = esp; 969eaa728eeSbellard 970eaa728eeSbellard selector = (selector & ~3) | dpl; 971eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 972eaa728eeSbellard get_seg_base(e1, e2), 973eaa728eeSbellard get_seg_limit(e1, e2), 974eaa728eeSbellard e2); 975eaa728eeSbellard env->eip = offset; 976eaa728eeSbellard } 977eaa728eeSbellard #endif 978eaa728eeSbellard 979d9957a8bSblueswir1 #ifdef TARGET_X86_64 980eaa728eeSbellard #if defined(CONFIG_USER_ONLY) 9812999a0b2SBlue Swirl void helper_syscall(CPUX86State *env, int next_eip_addend) 982eaa728eeSbellard { 9836aa9e42fSRichard Henderson CPUState *cs = env_cpu(env); 98427103424SAndreas Färber 98527103424SAndreas Färber cs->exception_index = EXCP_SYSCALL; 98656bf1c49SDouglas Crosher env->exception_is_int = 0; 987eaa728eeSbellard env->exception_next_eip = env->eip + next_eip_addend; 9885638d180SAndreas Färber cpu_loop_exit(cs); 989eaa728eeSbellard } 990eaa728eeSbellard #else 9912999a0b2SBlue Swirl void helper_syscall(CPUX86State *env, int next_eip_addend) 992eaa728eeSbellard { 993eaa728eeSbellard int selector; 994eaa728eeSbellard 995eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 996100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 997eaa728eeSbellard } 998eaa728eeSbellard selector = (env->star >> 32) & 0xffff; 999eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1000eaa728eeSbellard int code64; 1001eaa728eeSbellard 1002a4165610Sliguang env->regs[R_ECX] = env->eip + next_eip_addend; 10031a1435ddSRudolf Marek env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK; 1004eaa728eeSbellard 1005eaa728eeSbellard code64 = env->hflags & HF_CS64_MASK; 1006eaa728eeSbellard 10071a1435ddSRudolf Marek env->eflags &= ~(env->fmask | RF_MASK); 1008fd460606SKevin O'Connor cpu_load_eflags(env, env->eflags, 0); 1009eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1010eaa728eeSbellard 0, 0xffffffff, 1011eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1012eaa728eeSbellard DESC_S_MASK | 101320054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 101420054ef0SBlue Swirl DESC_L_MASK); 1015eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1016eaa728eeSbellard 0, 0xffffffff, 1017eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1018eaa728eeSbellard DESC_S_MASK | 1019eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 102020054ef0SBlue Swirl if (code64) { 1021eaa728eeSbellard env->eip = env->lstar; 102220054ef0SBlue Swirl } else { 1023eaa728eeSbellard env->eip = env->cstar; 102420054ef0SBlue Swirl } 1025d9957a8bSblueswir1 } else { 1026a4165610Sliguang env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); 1027eaa728eeSbellard 1028fd460606SKevin O'Connor env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); 1029eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 1030eaa728eeSbellard 0, 0xffffffff, 1031eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1032eaa728eeSbellard DESC_S_MASK | 1033eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1034eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 1035eaa728eeSbellard 0, 0xffffffff, 1036eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1037eaa728eeSbellard DESC_S_MASK | 1038eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1039eaa728eeSbellard env->eip = (uint32_t)env->star; 1040eaa728eeSbellard } 1041eaa728eeSbellard } 1042eaa728eeSbellard #endif 1043d9957a8bSblueswir1 #endif 1044eaa728eeSbellard 1045d9957a8bSblueswir1 #ifdef TARGET_X86_64 10462999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1047eaa728eeSbellard { 1048eaa728eeSbellard int cpl, selector; 1049eaa728eeSbellard 1050eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1051100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1052eaa728eeSbellard } 1053eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1054eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1055100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1056eaa728eeSbellard } 1057eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 1058eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1059fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1060fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1061fd460606SKevin O'Connor NT_MASK); 1062eaa728eeSbellard if (dflag == 2) { 1063eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1064eaa728eeSbellard 0, 0xffffffff, 1065eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1066eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1067eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1068eaa728eeSbellard DESC_L_MASK); 1069a4165610Sliguang env->eip = env->regs[R_ECX]; 1070eaa728eeSbellard } else { 1071eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1072eaa728eeSbellard 0, 0xffffffff, 1073eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1074eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1075eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1076a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1077eaa728eeSbellard } 1078ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1079eaa728eeSbellard 0, 0xffffffff, 1080eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1081eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1082eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1083d9957a8bSblueswir1 } else { 1084fd460606SKevin O'Connor env->eflags |= IF_MASK; 1085eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1086eaa728eeSbellard 0, 0xffffffff, 1087eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1088eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1089eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1090a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1091ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1092eaa728eeSbellard 0, 0xffffffff, 1093eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1094eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1095eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1096eaa728eeSbellard } 1097eaa728eeSbellard } 1098d9957a8bSblueswir1 #endif 1099eaa728eeSbellard 1100eaa728eeSbellard /* real mode interrupt */ 11012999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11022999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1103eaa728eeSbellard { 1104eaa728eeSbellard SegmentCache *dt; 1105eaa728eeSbellard target_ulong ptr, ssp; 1106eaa728eeSbellard int selector; 1107eaa728eeSbellard uint32_t offset, esp; 1108eaa728eeSbellard uint32_t old_cs, old_eip; 1109eaa728eeSbellard 1110eaa728eeSbellard /* real mode (simpler!) */ 1111eaa728eeSbellard dt = &env->idt; 111220054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 111377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 111420054ef0SBlue Swirl } 1115eaa728eeSbellard ptr = dt->base + intno * 4; 1116329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1117329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 111808b3ded6Sliguang esp = env->regs[R_ESP]; 1119eaa728eeSbellard ssp = env->segs[R_SS].base; 112020054ef0SBlue Swirl if (is_int) { 1121eaa728eeSbellard old_eip = next_eip; 112220054ef0SBlue Swirl } else { 1123eaa728eeSbellard old_eip = env->eip; 112420054ef0SBlue Swirl } 1125eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1126eaa728eeSbellard /* XXX: use SS segment size? */ 1127997ff0d9SBlue Swirl PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1128eaa728eeSbellard PUSHW(ssp, esp, 0xffff, old_cs); 1129eaa728eeSbellard PUSHW(ssp, esp, 0xffff, old_eip); 1130eaa728eeSbellard 1131eaa728eeSbellard /* update processor state */ 113208b3ded6Sliguang env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1133eaa728eeSbellard env->eip = offset; 1134eaa728eeSbellard env->segs[R_CS].selector = selector; 1135eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1136eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1137eaa728eeSbellard } 1138eaa728eeSbellard 1139e694d4e2SBlue Swirl #if defined(CONFIG_USER_ONLY) 114033271823SPeter Maydell /* fake user mode interrupt. is_int is TRUE if coming from the int 114133271823SPeter Maydell * instruction. next_eip is the env->eip value AFTER the interrupt 114233271823SPeter Maydell * instruction. It is only relevant if is_int is TRUE or if intno 114333271823SPeter Maydell * is EXCP_SYSCALL. 114433271823SPeter Maydell */ 11452999a0b2SBlue Swirl static void do_interrupt_user(CPUX86State *env, int intno, int is_int, 11462999a0b2SBlue Swirl int error_code, target_ulong next_eip) 1147eaa728eeSbellard { 1148885b7c44SStanislav Shmarov if (is_int) { 1149eaa728eeSbellard SegmentCache *dt; 1150eaa728eeSbellard target_ulong ptr; 1151eaa728eeSbellard int dpl, cpl, shift; 1152eaa728eeSbellard uint32_t e2; 1153eaa728eeSbellard 1154eaa728eeSbellard dt = &env->idt; 1155eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1156eaa728eeSbellard shift = 4; 1157eaa728eeSbellard } else { 1158eaa728eeSbellard shift = 3; 1159eaa728eeSbellard } 1160eaa728eeSbellard ptr = dt->base + (intno << shift); 1161329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 1162eaa728eeSbellard 1163eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1164eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 11651235fc06Sths /* check privilege if software int */ 1166885b7c44SStanislav Shmarov if (dpl < cpl) { 116777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); 116820054ef0SBlue Swirl } 1169885b7c44SStanislav Shmarov } 1170eaa728eeSbellard 1171eaa728eeSbellard /* Since we emulate only user space, we cannot do more than 1172eaa728eeSbellard exiting the emulation with the suitable exception and error 117347575997SJincheng Miao code. So update EIP for INT 0x80 and EXCP_SYSCALL. */ 117447575997SJincheng Miao if (is_int || intno == EXCP_SYSCALL) { 1175a78d0eabSliguang env->eip = next_eip; 1176eaa728eeSbellard } 117720054ef0SBlue Swirl } 1178eaa728eeSbellard 1179e694d4e2SBlue Swirl #else 1180e694d4e2SBlue Swirl 11812999a0b2SBlue Swirl static void handle_even_inj(CPUX86State *env, int intno, int is_int, 11822999a0b2SBlue Swirl int error_code, int is_hw, int rm) 11832ed51f5bSaliguori { 11846aa9e42fSRichard Henderson CPUState *cs = env_cpu(env); 1185b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, 118620054ef0SBlue Swirl control.event_inj)); 118720054ef0SBlue Swirl 11882ed51f5bSaliguori if (!(event_inj & SVM_EVTINJ_VALID)) { 11892ed51f5bSaliguori int type; 119020054ef0SBlue Swirl 119120054ef0SBlue Swirl if (is_int) { 11922ed51f5bSaliguori type = SVM_EVTINJ_TYPE_SOFT; 119320054ef0SBlue Swirl } else { 11942ed51f5bSaliguori type = SVM_EVTINJ_TYPE_EXEPT; 11952ed51f5bSaliguori } 119620054ef0SBlue Swirl event_inj = intno | type | SVM_EVTINJ_VALID; 119720054ef0SBlue Swirl if (!rm && exception_has_error_code(intno)) { 119820054ef0SBlue Swirl event_inj |= SVM_EVTINJ_VALID_ERR; 1199b216aa6cSPaolo Bonzini x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, 120020054ef0SBlue Swirl control.event_inj_err), 120120054ef0SBlue Swirl error_code); 120220054ef0SBlue Swirl } 1203b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1204ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 120520054ef0SBlue Swirl event_inj); 12062ed51f5bSaliguori } 12072ed51f5bSaliguori } 120800ea18d1Saliguori #endif 12092ed51f5bSaliguori 1210eaa728eeSbellard /* 1211eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1212a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1213eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1214eaa728eeSbellard */ 1215ca4c810aSAndreas Färber static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 12162999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1217eaa728eeSbellard { 1218ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1219ca4c810aSAndreas Färber 12208fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1221eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1222eaa728eeSbellard static int count; 122320054ef0SBlue Swirl 122420054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 122520054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1226eaa728eeSbellard count, intno, error_code, is_int, 1227eaa728eeSbellard env->hflags & HF_CPL_MASK, 1228a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1229a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 123008b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1231eaa728eeSbellard if (intno == 0x0e) { 123293fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1233eaa728eeSbellard } else { 12344b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1235eaa728eeSbellard } 123693fcfe39Saliguori qemu_log("\n"); 1237a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1238eaa728eeSbellard #if 0 1239eaa728eeSbellard { 1240eaa728eeSbellard int i; 12419bd5494eSAdam Lackorzynski target_ulong ptr; 124220054ef0SBlue Swirl 124393fcfe39Saliguori qemu_log(" code="); 1244eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1245eaa728eeSbellard for (i = 0; i < 16; i++) { 124693fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1247eaa728eeSbellard } 124893fcfe39Saliguori qemu_log("\n"); 1249eaa728eeSbellard } 1250eaa728eeSbellard #endif 1251eaa728eeSbellard count++; 1252eaa728eeSbellard } 1253eaa728eeSbellard } 1254eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 125500ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1256f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12572999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 125820054ef0SBlue Swirl } 125900ea18d1Saliguori #endif 1260eb38c52cSblueswir1 #ifdef TARGET_X86_64 1261eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12622999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1263eaa728eeSbellard } else 1264eaa728eeSbellard #endif 1265eaa728eeSbellard { 12662999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12672999a0b2SBlue Swirl is_hw); 1268eaa728eeSbellard } 1269eaa728eeSbellard } else { 127000ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1271f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12722999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 127320054ef0SBlue Swirl } 127400ea18d1Saliguori #endif 12752999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1276eaa728eeSbellard } 12772ed51f5bSaliguori 127800ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1279f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1280fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1281b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 128220054ef0SBlue Swirl offsetof(struct vmcb, 128320054ef0SBlue Swirl control.event_inj)); 128420054ef0SBlue Swirl 1285b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1286ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 128720054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12882ed51f5bSaliguori } 128900ea18d1Saliguori #endif 1290eaa728eeSbellard } 1291eaa728eeSbellard 129297a8ea5aSAndreas Färber void x86_cpu_do_interrupt(CPUState *cs) 1293e694d4e2SBlue Swirl { 129497a8ea5aSAndreas Färber X86CPU *cpu = X86_CPU(cs); 129597a8ea5aSAndreas Färber CPUX86State *env = &cpu->env; 129697a8ea5aSAndreas Färber 1297e694d4e2SBlue Swirl #if defined(CONFIG_USER_ONLY) 1298e694d4e2SBlue Swirl /* if user mode only, we simulate a fake exception 1299e694d4e2SBlue Swirl which will be handled outside the cpu execution 1300e694d4e2SBlue Swirl loop */ 130127103424SAndreas Färber do_interrupt_user(env, cs->exception_index, 1302e694d4e2SBlue Swirl env->exception_is_int, 1303e694d4e2SBlue Swirl env->error_code, 1304e694d4e2SBlue Swirl env->exception_next_eip); 1305e694d4e2SBlue Swirl /* successfully delivered */ 1306e694d4e2SBlue Swirl env->old_exception = -1; 1307e694d4e2SBlue Swirl #else 130810cde894SPaolo Bonzini if (cs->exception_index >= EXCP_VMEXIT) { 130910cde894SPaolo Bonzini assert(env->old_exception == -1); 131010cde894SPaolo Bonzini do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code); 131110cde894SPaolo Bonzini } else { 131227103424SAndreas Färber do_interrupt_all(cpu, cs->exception_index, 1313e694d4e2SBlue Swirl env->exception_is_int, 1314e694d4e2SBlue Swirl env->error_code, 1315e694d4e2SBlue Swirl env->exception_next_eip, 0); 1316e694d4e2SBlue Swirl /* successfully delivered */ 1317e694d4e2SBlue Swirl env->old_exception = -1; 131810cde894SPaolo Bonzini } 1319e694d4e2SBlue Swirl #endif 1320e694d4e2SBlue Swirl } 1321e694d4e2SBlue Swirl 13222999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1323e694d4e2SBlue Swirl { 13246aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1325e694d4e2SBlue Swirl } 1326e694d4e2SBlue Swirl 132742f53feaSRichard Henderson bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 132842f53feaSRichard Henderson { 132942f53feaSRichard Henderson X86CPU *cpu = X86_CPU(cs); 133042f53feaSRichard Henderson CPUX86State *env = &cpu->env; 133192d5f1a4SPaolo Bonzini int intno; 133242f53feaSRichard Henderson 133392d5f1a4SPaolo Bonzini interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); 133492d5f1a4SPaolo Bonzini if (!interrupt_request) { 133592d5f1a4SPaolo Bonzini return false; 133692d5f1a4SPaolo Bonzini } 133792d5f1a4SPaolo Bonzini 133892d5f1a4SPaolo Bonzini /* Don't process multiple interrupt requests in a single call. 133992d5f1a4SPaolo Bonzini * This is required to make icount-driven execution deterministic. 134092d5f1a4SPaolo Bonzini */ 134192d5f1a4SPaolo Bonzini switch (interrupt_request) { 134242f53feaSRichard Henderson #if !defined(CONFIG_USER_ONLY) 134392d5f1a4SPaolo Bonzini case CPU_INTERRUPT_POLL: 134442f53feaSRichard Henderson cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 134542f53feaSRichard Henderson apic_poll_irq(cpu->apic_state); 134692d5f1a4SPaolo Bonzini break; 134742f53feaSRichard Henderson #endif 134892d5f1a4SPaolo Bonzini case CPU_INTERRUPT_SIPI: 134942f53feaSRichard Henderson do_cpu_sipi(cpu); 135092d5f1a4SPaolo Bonzini break; 135192d5f1a4SPaolo Bonzini case CPU_INTERRUPT_SMI: 135265c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); 135342f53feaSRichard Henderson cs->interrupt_request &= ~CPU_INTERRUPT_SMI; 135442f53feaSRichard Henderson do_smm_enter(cpu); 135592d5f1a4SPaolo Bonzini break; 135692d5f1a4SPaolo Bonzini case CPU_INTERRUPT_NMI: 135702f7fd25SJan Kiszka cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); 135842f53feaSRichard Henderson cs->interrupt_request &= ~CPU_INTERRUPT_NMI; 135942f53feaSRichard Henderson env->hflags2 |= HF2_NMI_MASK; 136042f53feaSRichard Henderson do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); 136192d5f1a4SPaolo Bonzini break; 136292d5f1a4SPaolo Bonzini case CPU_INTERRUPT_MCE: 136342f53feaSRichard Henderson cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 136442f53feaSRichard Henderson do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); 136592d5f1a4SPaolo Bonzini break; 136692d5f1a4SPaolo Bonzini case CPU_INTERRUPT_HARD: 136765c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); 136842f53feaSRichard Henderson cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | 136942f53feaSRichard Henderson CPU_INTERRUPT_VIRQ); 137042f53feaSRichard Henderson intno = cpu_get_pic_interrupt(env); 137142f53feaSRichard Henderson qemu_log_mask(CPU_LOG_TB_IN_ASM, 137242f53feaSRichard Henderson "Servicing hardware INT=0x%02x\n", intno); 137342f53feaSRichard Henderson do_interrupt_x86_hardirq(env, intno, 1); 137492d5f1a4SPaolo Bonzini break; 137542f53feaSRichard Henderson #if !defined(CONFIG_USER_ONLY) 137692d5f1a4SPaolo Bonzini case CPU_INTERRUPT_VIRQ: 137742f53feaSRichard Henderson /* FIXME: this should respect TPR */ 137865c9d60aSPaolo Bonzini cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); 1379b216aa6cSPaolo Bonzini intno = x86_ldl_phys(cs, env->vm_vmcb 138042f53feaSRichard Henderson + offsetof(struct vmcb, control.int_vector)); 138142f53feaSRichard Henderson qemu_log_mask(CPU_LOG_TB_IN_ASM, 138242f53feaSRichard Henderson "Servicing virtual hardware INT=0x%02x\n", intno); 138342f53feaSRichard Henderson do_interrupt_x86_hardirq(env, intno, 1); 138442f53feaSRichard Henderson cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; 138592d5f1a4SPaolo Bonzini break; 138642f53feaSRichard Henderson #endif 138742f53feaSRichard Henderson } 138842f53feaSRichard Henderson 138992d5f1a4SPaolo Bonzini /* Ensure that no TB jump will be modified as the program flow was changed. */ 139092d5f1a4SPaolo Bonzini return true; 139142f53feaSRichard Henderson } 139242f53feaSRichard Henderson 13932999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1394eaa728eeSbellard { 1395eaa728eeSbellard SegmentCache *dt; 1396eaa728eeSbellard uint32_t e1, e2; 1397eaa728eeSbellard int index, entry_limit; 1398eaa728eeSbellard target_ulong ptr; 1399eaa728eeSbellard 1400eaa728eeSbellard selector &= 0xffff; 1401eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1402eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1403eaa728eeSbellard env->ldt.base = 0; 1404eaa728eeSbellard env->ldt.limit = 0; 1405eaa728eeSbellard } else { 140620054ef0SBlue Swirl if (selector & 0x4) { 1407100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140820054ef0SBlue Swirl } 1409eaa728eeSbellard dt = &env->gdt; 1410eaa728eeSbellard index = selector & ~7; 1411eaa728eeSbellard #ifdef TARGET_X86_64 141220054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1413eaa728eeSbellard entry_limit = 15; 141420054ef0SBlue Swirl } else 1415eaa728eeSbellard #endif 141620054ef0SBlue Swirl { 1417eaa728eeSbellard entry_limit = 7; 141820054ef0SBlue Swirl } 141920054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1420100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142120054ef0SBlue Swirl } 1422eaa728eeSbellard ptr = dt->base + index; 1423100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1424100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 142520054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1426100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142720054ef0SBlue Swirl } 142820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1429100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 143020054ef0SBlue Swirl } 1431eaa728eeSbellard #ifdef TARGET_X86_64 1432eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1433eaa728eeSbellard uint32_t e3; 143420054ef0SBlue Swirl 1435100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1436eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1437eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1438eaa728eeSbellard } else 1439eaa728eeSbellard #endif 1440eaa728eeSbellard { 1441eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1442eaa728eeSbellard } 1443eaa728eeSbellard } 1444eaa728eeSbellard env->ldt.selector = selector; 1445eaa728eeSbellard } 1446eaa728eeSbellard 14472999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1448eaa728eeSbellard { 1449eaa728eeSbellard SegmentCache *dt; 1450eaa728eeSbellard uint32_t e1, e2; 1451eaa728eeSbellard int index, type, entry_limit; 1452eaa728eeSbellard target_ulong ptr; 1453eaa728eeSbellard 1454eaa728eeSbellard selector &= 0xffff; 1455eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1456eaa728eeSbellard /* NULL selector case: invalid TR */ 1457eaa728eeSbellard env->tr.base = 0; 1458eaa728eeSbellard env->tr.limit = 0; 1459eaa728eeSbellard env->tr.flags = 0; 1460eaa728eeSbellard } else { 146120054ef0SBlue Swirl if (selector & 0x4) { 1462100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 146320054ef0SBlue Swirl } 1464eaa728eeSbellard dt = &env->gdt; 1465eaa728eeSbellard index = selector & ~7; 1466eaa728eeSbellard #ifdef TARGET_X86_64 146720054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1468eaa728eeSbellard entry_limit = 15; 146920054ef0SBlue Swirl } else 1470eaa728eeSbellard #endif 147120054ef0SBlue Swirl { 1472eaa728eeSbellard entry_limit = 7; 147320054ef0SBlue Swirl } 147420054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1475100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 147620054ef0SBlue Swirl } 1477eaa728eeSbellard ptr = dt->base + index; 1478100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1479100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1480eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1481eaa728eeSbellard if ((e2 & DESC_S_MASK) || 148220054ef0SBlue Swirl (type != 1 && type != 9)) { 1483100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 148420054ef0SBlue Swirl } 148520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1486100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 148720054ef0SBlue Swirl } 1488eaa728eeSbellard #ifdef TARGET_X86_64 1489eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1490eaa728eeSbellard uint32_t e3, e4; 149120054ef0SBlue Swirl 1492100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1493100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 149420054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1495100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 149620054ef0SBlue Swirl } 1497eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1498eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1499eaa728eeSbellard } else 1500eaa728eeSbellard #endif 1501eaa728eeSbellard { 1502eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1503eaa728eeSbellard } 1504eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1505100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1506eaa728eeSbellard } 1507eaa728eeSbellard env->tr.selector = selector; 1508eaa728eeSbellard } 1509eaa728eeSbellard 1510eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 15112999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1512eaa728eeSbellard { 1513eaa728eeSbellard uint32_t e1, e2; 1514eaa728eeSbellard int cpl, dpl, rpl; 1515eaa728eeSbellard SegmentCache *dt; 1516eaa728eeSbellard int index; 1517eaa728eeSbellard target_ulong ptr; 1518eaa728eeSbellard 1519eaa728eeSbellard selector &= 0xffff; 1520eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1521eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1522eaa728eeSbellard /* null selector case */ 1523eaa728eeSbellard if (seg_reg == R_SS 1524eaa728eeSbellard #ifdef TARGET_X86_64 1525eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1526eaa728eeSbellard #endif 152720054ef0SBlue Swirl ) { 1528100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 152920054ef0SBlue Swirl } 1530eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1531eaa728eeSbellard } else { 1532eaa728eeSbellard 153320054ef0SBlue Swirl if (selector & 0x4) { 1534eaa728eeSbellard dt = &env->ldt; 153520054ef0SBlue Swirl } else { 1536eaa728eeSbellard dt = &env->gdt; 153720054ef0SBlue Swirl } 1538eaa728eeSbellard index = selector & ~7; 153920054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1540100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 154120054ef0SBlue Swirl } 1542eaa728eeSbellard ptr = dt->base + index; 1543100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1544100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1545eaa728eeSbellard 154620054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1547100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 154820054ef0SBlue Swirl } 1549eaa728eeSbellard rpl = selector & 3; 1550eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1551eaa728eeSbellard if (seg_reg == R_SS) { 1552eaa728eeSbellard /* must be writable segment */ 155320054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1554100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 155520054ef0SBlue Swirl } 155620054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1557100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 155820054ef0SBlue Swirl } 1559eaa728eeSbellard } else { 1560eaa728eeSbellard /* must be readable segment */ 156120054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1562100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 156320054ef0SBlue Swirl } 1564eaa728eeSbellard 1565eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1566eaa728eeSbellard /* if not conforming code, test rights */ 156720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1568100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1569eaa728eeSbellard } 1570eaa728eeSbellard } 157120054ef0SBlue Swirl } 1572eaa728eeSbellard 1573eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 157420054ef0SBlue Swirl if (seg_reg == R_SS) { 1575100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 157620054ef0SBlue Swirl } else { 1577100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1578eaa728eeSbellard } 157920054ef0SBlue Swirl } 1580eaa728eeSbellard 1581eaa728eeSbellard /* set the access bit if not already set */ 1582eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1583eaa728eeSbellard e2 |= DESC_A_MASK; 1584100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1585eaa728eeSbellard } 1586eaa728eeSbellard 1587eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1588eaa728eeSbellard get_seg_base(e1, e2), 1589eaa728eeSbellard get_seg_limit(e1, e2), 1590eaa728eeSbellard e2); 1591eaa728eeSbellard #if 0 159293fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1593eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1594eaa728eeSbellard #endif 1595eaa728eeSbellard } 1596eaa728eeSbellard } 1597eaa728eeSbellard 1598eaa728eeSbellard /* protected mode jump */ 15992999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1600100ec099SPavel Dovgalyuk target_ulong next_eip) 1601eaa728eeSbellard { 1602eaa728eeSbellard int gate_cs, type; 1603eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1604eaa728eeSbellard 160520054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1606100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 160720054ef0SBlue Swirl } 1608100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1609100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 161020054ef0SBlue Swirl } 1611eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1612eaa728eeSbellard if (e2 & DESC_S_MASK) { 161320054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1614100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 161520054ef0SBlue Swirl } 1616eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1617eaa728eeSbellard if (e2 & DESC_C_MASK) { 1618eaa728eeSbellard /* conforming code segment */ 161920054ef0SBlue Swirl if (dpl > cpl) { 1620100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 162120054ef0SBlue Swirl } 1622eaa728eeSbellard } else { 1623eaa728eeSbellard /* non conforming code segment */ 1624eaa728eeSbellard rpl = new_cs & 3; 162520054ef0SBlue Swirl if (rpl > cpl) { 1626100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1627eaa728eeSbellard } 162820054ef0SBlue Swirl if (dpl != cpl) { 1629100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 163020054ef0SBlue Swirl } 163120054ef0SBlue Swirl } 163220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1633100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 163420054ef0SBlue Swirl } 1635eaa728eeSbellard limit = get_seg_limit(e1, e2); 1636eaa728eeSbellard if (new_eip > limit && 1637db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1638db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 163920054ef0SBlue Swirl } 1640eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1641eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1642a78d0eabSliguang env->eip = new_eip; 1643eaa728eeSbellard } else { 1644eaa728eeSbellard /* jump to call or task gate */ 1645eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1646eaa728eeSbellard rpl = new_cs & 3; 1647eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1648eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 16490aca0605SAndrew Oates 16500aca0605SAndrew Oates #ifdef TARGET_X86_64 16510aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 16520aca0605SAndrew Oates if (type != 12) { 16530aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 16540aca0605SAndrew Oates } 16550aca0605SAndrew Oates } 16560aca0605SAndrew Oates #endif 1657eaa728eeSbellard switch (type) { 1658eaa728eeSbellard case 1: /* 286 TSS */ 1659eaa728eeSbellard case 9: /* 386 TSS */ 1660eaa728eeSbellard case 5: /* task gate */ 166120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1662100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166320054ef0SBlue Swirl } 1664100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1665eaa728eeSbellard break; 1666eaa728eeSbellard case 4: /* 286 call gate */ 1667eaa728eeSbellard case 12: /* 386 call gate */ 166820054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1669100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 167020054ef0SBlue Swirl } 167120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1672100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 167320054ef0SBlue Swirl } 1674eaa728eeSbellard gate_cs = e1 >> 16; 1675eaa728eeSbellard new_eip = (e1 & 0xffff); 167620054ef0SBlue Swirl if (type == 12) { 1677eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 167820054ef0SBlue Swirl } 16790aca0605SAndrew Oates 16800aca0605SAndrew Oates #ifdef TARGET_X86_64 16810aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 16820aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 16830aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 16840aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 16850aca0605SAndrew Oates GETPC()); 16860aca0605SAndrew Oates } 16870aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 16880aca0605SAndrew Oates if (type != 0) { 16890aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 16900aca0605SAndrew Oates GETPC()); 16910aca0605SAndrew Oates } 16920aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 16930aca0605SAndrew Oates } 16940aca0605SAndrew Oates #endif 16950aca0605SAndrew Oates 1696100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1697100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 169820054ef0SBlue Swirl } 1699eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1700eaa728eeSbellard /* must be code segment */ 1701eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 170220054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1703100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 170420054ef0SBlue Swirl } 1705eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 170620054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1707100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 170820054ef0SBlue Swirl } 17090aca0605SAndrew Oates #ifdef TARGET_X86_64 17100aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17110aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 17120aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 17130aca0605SAndrew Oates } 17140aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 17150aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 17160aca0605SAndrew Oates } 17170aca0605SAndrew Oates } 17180aca0605SAndrew Oates #endif 171920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1720100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 172120054ef0SBlue Swirl } 1722eaa728eeSbellard limit = get_seg_limit(e1, e2); 17230aca0605SAndrew Oates if (new_eip > limit && 17240aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1725100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 172620054ef0SBlue Swirl } 1727eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1728eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1729a78d0eabSliguang env->eip = new_eip; 1730eaa728eeSbellard break; 1731eaa728eeSbellard default: 1732100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1733eaa728eeSbellard break; 1734eaa728eeSbellard } 1735eaa728eeSbellard } 1736eaa728eeSbellard } 1737eaa728eeSbellard 1738eaa728eeSbellard /* real mode call */ 17392999a0b2SBlue Swirl void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, 1740eaa728eeSbellard int shift, int next_eip) 1741eaa728eeSbellard { 1742eaa728eeSbellard int new_eip; 1743eaa728eeSbellard uint32_t esp, esp_mask; 1744eaa728eeSbellard target_ulong ssp; 1745eaa728eeSbellard 1746eaa728eeSbellard new_eip = new_eip1; 174708b3ded6Sliguang esp = env->regs[R_ESP]; 1748eaa728eeSbellard esp_mask = get_sp_mask(env->segs[R_SS].flags); 1749eaa728eeSbellard ssp = env->segs[R_SS].base; 1750eaa728eeSbellard if (shift) { 1751100ec099SPavel Dovgalyuk PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1752100ec099SPavel Dovgalyuk PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1753eaa728eeSbellard } else { 1754100ec099SPavel Dovgalyuk PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1755100ec099SPavel Dovgalyuk PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1756eaa728eeSbellard } 1757eaa728eeSbellard 1758eaa728eeSbellard SET_ESP(esp, esp_mask); 1759eaa728eeSbellard env->eip = new_eip; 1760eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1761eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1762eaa728eeSbellard } 1763eaa728eeSbellard 1764eaa728eeSbellard /* protected mode call */ 17652999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1766100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1767eaa728eeSbellard { 1768eaa728eeSbellard int new_stack, i; 17690aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 17700aca0605SAndrew Oates uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1771eaa728eeSbellard uint32_t val, limit, old_sp_mask; 17720aca0605SAndrew Oates target_ulong ssp, old_ssp, offset, sp; 1773eaa728eeSbellard 17740aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 17756aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 177620054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1777100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 177820054ef0SBlue Swirl } 1779100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1780100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 178120054ef0SBlue Swirl } 1782eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1783d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1784eaa728eeSbellard if (e2 & DESC_S_MASK) { 178520054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1786100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 178720054ef0SBlue Swirl } 1788eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1789eaa728eeSbellard if (e2 & DESC_C_MASK) { 1790eaa728eeSbellard /* conforming code segment */ 179120054ef0SBlue Swirl if (dpl > cpl) { 1792100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 179320054ef0SBlue Swirl } 1794eaa728eeSbellard } else { 1795eaa728eeSbellard /* non conforming code segment */ 1796eaa728eeSbellard rpl = new_cs & 3; 179720054ef0SBlue Swirl if (rpl > cpl) { 1798100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1799eaa728eeSbellard } 180020054ef0SBlue Swirl if (dpl != cpl) { 1801100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 180220054ef0SBlue Swirl } 180320054ef0SBlue Swirl } 180420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1805100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 180620054ef0SBlue Swirl } 1807eaa728eeSbellard 1808eaa728eeSbellard #ifdef TARGET_X86_64 1809eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1810eaa728eeSbellard if (shift == 2) { 1811eaa728eeSbellard target_ulong rsp; 181220054ef0SBlue Swirl 1813eaa728eeSbellard /* 64 bit case */ 181408b3ded6Sliguang rsp = env->regs[R_ESP]; 1815100ec099SPavel Dovgalyuk PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1816100ec099SPavel Dovgalyuk PUSHQ_RA(rsp, next_eip, GETPC()); 1817eaa728eeSbellard /* from this point, not restartable */ 181808b3ded6Sliguang env->regs[R_ESP] = rsp; 1819eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1820eaa728eeSbellard get_seg_base(e1, e2), 1821eaa728eeSbellard get_seg_limit(e1, e2), e2); 1822a78d0eabSliguang env->eip = new_eip; 1823eaa728eeSbellard } else 1824eaa728eeSbellard #endif 1825eaa728eeSbellard { 182608b3ded6Sliguang sp = env->regs[R_ESP]; 1827eaa728eeSbellard sp_mask = get_sp_mask(env->segs[R_SS].flags); 1828eaa728eeSbellard ssp = env->segs[R_SS].base; 1829eaa728eeSbellard if (shift) { 1830100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1831100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1832eaa728eeSbellard } else { 1833100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1834100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1835eaa728eeSbellard } 1836eaa728eeSbellard 1837eaa728eeSbellard limit = get_seg_limit(e1, e2); 183820054ef0SBlue Swirl if (new_eip > limit) { 1839100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 184020054ef0SBlue Swirl } 1841eaa728eeSbellard /* from this point, not restartable */ 1842eaa728eeSbellard SET_ESP(sp, sp_mask); 1843eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1844eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1845a78d0eabSliguang env->eip = new_eip; 1846eaa728eeSbellard } 1847eaa728eeSbellard } else { 1848eaa728eeSbellard /* check gate type */ 1849eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1850eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1851eaa728eeSbellard rpl = new_cs & 3; 18520aca0605SAndrew Oates 18530aca0605SAndrew Oates #ifdef TARGET_X86_64 18540aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 18550aca0605SAndrew Oates if (type != 12) { 18560aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 18570aca0605SAndrew Oates } 18580aca0605SAndrew Oates } 18590aca0605SAndrew Oates #endif 18600aca0605SAndrew Oates 1861eaa728eeSbellard switch (type) { 1862eaa728eeSbellard case 1: /* available 286 TSS */ 1863eaa728eeSbellard case 9: /* available 386 TSS */ 1864eaa728eeSbellard case 5: /* task gate */ 186520054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1866100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 186720054ef0SBlue Swirl } 1868100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1869eaa728eeSbellard return; 1870eaa728eeSbellard case 4: /* 286 call gate */ 1871eaa728eeSbellard case 12: /* 386 call gate */ 1872eaa728eeSbellard break; 1873eaa728eeSbellard default: 1874100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1875eaa728eeSbellard break; 1876eaa728eeSbellard } 1877eaa728eeSbellard shift = type >> 3; 1878eaa728eeSbellard 187920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1880100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 188120054ef0SBlue Swirl } 1882eaa728eeSbellard /* check valid bit */ 188320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1884100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 188520054ef0SBlue Swirl } 1886eaa728eeSbellard selector = e1 >> 16; 1887eaa728eeSbellard param_count = e2 & 0x1f; 18880aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 18890aca0605SAndrew Oates #ifdef TARGET_X86_64 18900aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 18910aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 18920aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 18930aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 18940aca0605SAndrew Oates GETPC()); 18950aca0605SAndrew Oates } 18960aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 18970aca0605SAndrew Oates if (type != 0) { 18980aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 18990aca0605SAndrew Oates GETPC()); 19000aca0605SAndrew Oates } 19010aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 19020aca0605SAndrew Oates } 19030aca0605SAndrew Oates #endif 190420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1905100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 190620054ef0SBlue Swirl } 1907eaa728eeSbellard 1908100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1909100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 191020054ef0SBlue Swirl } 191120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1912100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 191320054ef0SBlue Swirl } 1914eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 191520054ef0SBlue Swirl if (dpl > cpl) { 1916100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 191720054ef0SBlue Swirl } 19180aca0605SAndrew Oates #ifdef TARGET_X86_64 19190aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 19200aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 19210aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 19220aca0605SAndrew Oates } 19230aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 19240aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 19250aca0605SAndrew Oates } 19260aca0605SAndrew Oates shift++; 19270aca0605SAndrew Oates } 19280aca0605SAndrew Oates #endif 192920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1930100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 193120054ef0SBlue Swirl } 1932eaa728eeSbellard 1933eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1934eaa728eeSbellard /* to inner privilege */ 19350aca0605SAndrew Oates #ifdef TARGET_X86_64 19360aca0605SAndrew Oates if (shift == 2) { 19370aca0605SAndrew Oates sp = get_rsp_from_tss(env, dpl); 19380aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 19390aca0605SAndrew Oates new_stack = 1; 19400aca0605SAndrew Oates sp_mask = 0; 19410aca0605SAndrew Oates ssp = 0; /* SS base is always zero in IA-32e mode */ 19420aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 19430aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 19440aca0605SAndrew Oates } else 19450aca0605SAndrew Oates #endif 19460aca0605SAndrew Oates { 19470aca0605SAndrew Oates uint32_t sp32; 19480aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 194990a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 19500aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 195190a2541bSliguang env->regs[R_ESP]); 19520aca0605SAndrew Oates sp = sp32; 195320054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1954100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 195520054ef0SBlue Swirl } 195620054ef0SBlue Swirl if ((ss & 3) != dpl) { 1957100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 195820054ef0SBlue Swirl } 1959100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1960100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 196120054ef0SBlue Swirl } 1962eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 196320054ef0SBlue Swirl if (ss_dpl != dpl) { 1964100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 196520054ef0SBlue Swirl } 1966eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1967eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 196820054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1969100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 197020054ef0SBlue Swirl } 197120054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1972100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 197320054ef0SBlue Swirl } 1974eaa728eeSbellard 19750aca0605SAndrew Oates sp_mask = get_sp_mask(ss_e2); 19760aca0605SAndrew Oates ssp = get_seg_base(ss_e1, ss_e2); 19770aca0605SAndrew Oates } 19780aca0605SAndrew Oates 197920054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1980eaa728eeSbellard 1981eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1982eaa728eeSbellard old_ssp = env->segs[R_SS].base; 19830aca0605SAndrew Oates #ifdef TARGET_X86_64 19840aca0605SAndrew Oates if (shift == 2) { 19850aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 19860aca0605SAndrew Oates PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 19870aca0605SAndrew Oates PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 19880aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 19890aca0605SAndrew Oates } else 19900aca0605SAndrew Oates #endif 19910aca0605SAndrew Oates if (shift == 1) { 1992100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1993100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1994eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 1995100ec099SPavel Dovgalyuk val = cpu_ldl_kernel_ra(env, old_ssp + 199690a2541bSliguang ((env->regs[R_ESP] + i * 4) & 1997100ec099SPavel Dovgalyuk old_sp_mask), GETPC()); 1998100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1999eaa728eeSbellard } 2000eaa728eeSbellard } else { 2001100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 2002100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 2003eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 2004100ec099SPavel Dovgalyuk val = cpu_lduw_kernel_ra(env, old_ssp + 200590a2541bSliguang ((env->regs[R_ESP] + i * 2) & 2006100ec099SPavel Dovgalyuk old_sp_mask), GETPC()); 2007100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 2008eaa728eeSbellard } 2009eaa728eeSbellard } 2010eaa728eeSbellard new_stack = 1; 2011eaa728eeSbellard } else { 2012eaa728eeSbellard /* to same privilege */ 201308b3ded6Sliguang sp = env->regs[R_ESP]; 2014eaa728eeSbellard sp_mask = get_sp_mask(env->segs[R_SS].flags); 2015eaa728eeSbellard ssp = env->segs[R_SS].base; 201620054ef0SBlue Swirl /* push_size = (4 << shift); */ 2017eaa728eeSbellard new_stack = 0; 2018eaa728eeSbellard } 2019eaa728eeSbellard 20200aca0605SAndrew Oates #ifdef TARGET_X86_64 20210aca0605SAndrew Oates if (shift == 2) { 20220aca0605SAndrew Oates PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 20230aca0605SAndrew Oates PUSHQ_RA(sp, next_eip, GETPC()); 20240aca0605SAndrew Oates } else 20250aca0605SAndrew Oates #endif 20260aca0605SAndrew Oates if (shift == 1) { 2027100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 2028100ec099SPavel Dovgalyuk PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 2029eaa728eeSbellard } else { 2030100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 2031100ec099SPavel Dovgalyuk PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 2032eaa728eeSbellard } 2033eaa728eeSbellard 2034eaa728eeSbellard /* from this point, not restartable */ 2035eaa728eeSbellard 2036eaa728eeSbellard if (new_stack) { 20370aca0605SAndrew Oates #ifdef TARGET_X86_64 20380aca0605SAndrew Oates if (shift == 2) { 20390aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 20400aca0605SAndrew Oates } else 20410aca0605SAndrew Oates #endif 20420aca0605SAndrew Oates { 2043eaa728eeSbellard ss = (ss & ~3) | dpl; 2044eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 2045eaa728eeSbellard ssp, 2046eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2047eaa728eeSbellard ss_e2); 2048eaa728eeSbellard } 20490aca0605SAndrew Oates } 2050eaa728eeSbellard 2051eaa728eeSbellard selector = (selector & ~3) | dpl; 2052eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 2053eaa728eeSbellard get_seg_base(e1, e2), 2054eaa728eeSbellard get_seg_limit(e1, e2), 2055eaa728eeSbellard e2); 2056eaa728eeSbellard SET_ESP(sp, sp_mask); 2057a78d0eabSliguang env->eip = offset; 2058eaa728eeSbellard } 2059eaa728eeSbellard } 2060eaa728eeSbellard 2061eaa728eeSbellard /* real and vm86 mode iret */ 20622999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 2063eaa728eeSbellard { 2064eaa728eeSbellard uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 2065eaa728eeSbellard target_ulong ssp; 2066eaa728eeSbellard int eflags_mask; 2067eaa728eeSbellard 2068eaa728eeSbellard sp_mask = 0xffff; /* XXXX: use SS segment size? */ 206908b3ded6Sliguang sp = env->regs[R_ESP]; 2070eaa728eeSbellard ssp = env->segs[R_SS].base; 2071eaa728eeSbellard if (shift == 1) { 2072eaa728eeSbellard /* 32 bits */ 2073100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 2074100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 2075eaa728eeSbellard new_cs &= 0xffff; 2076100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 2077eaa728eeSbellard } else { 2078eaa728eeSbellard /* 16 bits */ 2079100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 2080100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 2081100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 2082eaa728eeSbellard } 208308b3ded6Sliguang env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 2084bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 2085bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 2086eaa728eeSbellard env->eip = new_eip; 208720054ef0SBlue Swirl if (env->eflags & VM_MASK) { 208820054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 208920054ef0SBlue Swirl NT_MASK; 209020054ef0SBlue Swirl } else { 209120054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 209220054ef0SBlue Swirl RF_MASK | NT_MASK; 209320054ef0SBlue Swirl } 209420054ef0SBlue Swirl if (shift == 0) { 2095eaa728eeSbellard eflags_mask &= 0xffff; 209620054ef0SBlue Swirl } 2097997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2098db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2099eaa728eeSbellard } 2100eaa728eeSbellard 2101*c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 2102eaa728eeSbellard { 2103eaa728eeSbellard int dpl; 2104eaa728eeSbellard uint32_t e2; 2105eaa728eeSbellard 2106eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 2107eaa728eeSbellard they may still contain a valid base. I would be interested to 2108eaa728eeSbellard know how a real x86_64 CPU behaves */ 2109eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 211020054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 2111eaa728eeSbellard return; 211220054ef0SBlue Swirl } 2113eaa728eeSbellard 2114eaa728eeSbellard e2 = env->segs[seg_reg].flags; 2115eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2116eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2117eaa728eeSbellard /* data or non conforming code segment */ 2118eaa728eeSbellard if (dpl < cpl) { 2119c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 2120c2ba0515SBin Meng env->segs[seg_reg].base, 2121c2ba0515SBin Meng env->segs[seg_reg].limit, 2122c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 2123eaa728eeSbellard } 2124eaa728eeSbellard } 2125eaa728eeSbellard } 2126eaa728eeSbellard 2127eaa728eeSbellard /* protected mode iret */ 21282999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 2129100ec099SPavel Dovgalyuk int is_iret, int addend, 2130100ec099SPavel Dovgalyuk uintptr_t retaddr) 2131eaa728eeSbellard { 2132eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 2133eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 2134eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 2135eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 2136eaa728eeSbellard target_ulong ssp, sp, new_eip, new_esp, sp_mask; 2137eaa728eeSbellard 2138eaa728eeSbellard #ifdef TARGET_X86_64 213920054ef0SBlue Swirl if (shift == 2) { 2140eaa728eeSbellard sp_mask = -1; 214120054ef0SBlue Swirl } else 2142eaa728eeSbellard #endif 214320054ef0SBlue Swirl { 2144eaa728eeSbellard sp_mask = get_sp_mask(env->segs[R_SS].flags); 214520054ef0SBlue Swirl } 214608b3ded6Sliguang sp = env->regs[R_ESP]; 2147eaa728eeSbellard ssp = env->segs[R_SS].base; 2148eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2149eaa728eeSbellard #ifdef TARGET_X86_64 2150eaa728eeSbellard if (shift == 2) { 2151100ec099SPavel Dovgalyuk POPQ_RA(sp, new_eip, retaddr); 2152100ec099SPavel Dovgalyuk POPQ_RA(sp, new_cs, retaddr); 2153eaa728eeSbellard new_cs &= 0xffff; 2154eaa728eeSbellard if (is_iret) { 2155100ec099SPavel Dovgalyuk POPQ_RA(sp, new_eflags, retaddr); 2156eaa728eeSbellard } 2157eaa728eeSbellard } else 2158eaa728eeSbellard #endif 215920054ef0SBlue Swirl { 2160eaa728eeSbellard if (shift == 1) { 2161eaa728eeSbellard /* 32 bits */ 2162100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 2163100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 2164eaa728eeSbellard new_cs &= 0xffff; 2165eaa728eeSbellard if (is_iret) { 2166100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 216720054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2168eaa728eeSbellard goto return_to_vm86; 2169eaa728eeSbellard } 217020054ef0SBlue Swirl } 2171eaa728eeSbellard } else { 2172eaa728eeSbellard /* 16 bits */ 2173100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 2174100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 217520054ef0SBlue Swirl if (is_iret) { 2176100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 2177eaa728eeSbellard } 217820054ef0SBlue Swirl } 217920054ef0SBlue Swirl } 2180d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2181eaa728eeSbellard new_cs, new_eip, shift, addend); 21826aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 218320054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2184100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2185eaa728eeSbellard } 2186100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2187100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 218820054ef0SBlue Swirl } 218920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 219020054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2191100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 219220054ef0SBlue Swirl } 219320054ef0SBlue Swirl cpl = env->hflags & HF_CPL_MASK; 219420054ef0SBlue Swirl rpl = new_cs & 3; 219520054ef0SBlue Swirl if (rpl < cpl) { 2196100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 219720054ef0SBlue Swirl } 219820054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 219920054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 220020054ef0SBlue Swirl if (dpl > rpl) { 2201100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 220220054ef0SBlue Swirl } 220320054ef0SBlue Swirl } else { 220420054ef0SBlue Swirl if (dpl != rpl) { 2205100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 220620054ef0SBlue Swirl } 220720054ef0SBlue Swirl } 220820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2209100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 221020054ef0SBlue Swirl } 2211eaa728eeSbellard 2212eaa728eeSbellard sp += addend; 2213eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2214eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 22151235fc06Sths /* return to same privilege level */ 2216eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2217eaa728eeSbellard get_seg_base(e1, e2), 2218eaa728eeSbellard get_seg_limit(e1, e2), 2219eaa728eeSbellard e2); 2220eaa728eeSbellard } else { 2221eaa728eeSbellard /* return to different privilege level */ 2222eaa728eeSbellard #ifdef TARGET_X86_64 2223eaa728eeSbellard if (shift == 2) { 2224100ec099SPavel Dovgalyuk POPQ_RA(sp, new_esp, retaddr); 2225100ec099SPavel Dovgalyuk POPQ_RA(sp, new_ss, retaddr); 2226eaa728eeSbellard new_ss &= 0xffff; 2227eaa728eeSbellard } else 2228eaa728eeSbellard #endif 222920054ef0SBlue Swirl { 2230eaa728eeSbellard if (shift == 1) { 2231eaa728eeSbellard /* 32 bits */ 2232100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2233100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2234eaa728eeSbellard new_ss &= 0xffff; 2235eaa728eeSbellard } else { 2236eaa728eeSbellard /* 16 bits */ 2237100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2238100ec099SPavel Dovgalyuk POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2239eaa728eeSbellard } 224020054ef0SBlue Swirl } 2241d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2242eaa728eeSbellard new_ss, new_esp); 2243eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2244eaa728eeSbellard #ifdef TARGET_X86_64 2245eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2246eaa728eeSbellard /* XXX: test CS64? */ 2247eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2248eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2249eaa728eeSbellard 0, 0xffffffff, 2250eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2251eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2252eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2253eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2254eaa728eeSbellard } else 2255eaa728eeSbellard #endif 2256eaa728eeSbellard { 2257100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2258eaa728eeSbellard } 2259eaa728eeSbellard } else { 226020054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2261100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 226220054ef0SBlue Swirl } 2263100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2264100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 226520054ef0SBlue Swirl } 2266eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2267eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 226820054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2269100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 227020054ef0SBlue Swirl } 2271eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 227220054ef0SBlue Swirl if (dpl != rpl) { 2273100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 227420054ef0SBlue Swirl } 227520054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2276100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 227720054ef0SBlue Swirl } 2278eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2279eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2280eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2281eaa728eeSbellard ss_e2); 2282eaa728eeSbellard } 2283eaa728eeSbellard 2284eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2285eaa728eeSbellard get_seg_base(e1, e2), 2286eaa728eeSbellard get_seg_limit(e1, e2), 2287eaa728eeSbellard e2); 2288eaa728eeSbellard sp = new_esp; 2289eaa728eeSbellard #ifdef TARGET_X86_64 229020054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2291eaa728eeSbellard sp_mask = -1; 229220054ef0SBlue Swirl } else 2293eaa728eeSbellard #endif 229420054ef0SBlue Swirl { 2295eaa728eeSbellard sp_mask = get_sp_mask(ss_e2); 229620054ef0SBlue Swirl } 2297eaa728eeSbellard 2298eaa728eeSbellard /* validate data segments */ 22992999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 23002999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 23012999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 23022999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2303eaa728eeSbellard 2304eaa728eeSbellard sp += addend; 2305eaa728eeSbellard } 2306eaa728eeSbellard SET_ESP(sp, sp_mask); 2307eaa728eeSbellard env->eip = new_eip; 2308eaa728eeSbellard if (is_iret) { 2309eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2310eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 231120054ef0SBlue Swirl if (cpl == 0) { 2312eaa728eeSbellard eflags_mask |= IOPL_MASK; 231320054ef0SBlue Swirl } 2314eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 231520054ef0SBlue Swirl if (cpl <= iopl) { 2316eaa728eeSbellard eflags_mask |= IF_MASK; 231720054ef0SBlue Swirl } 231820054ef0SBlue Swirl if (shift == 0) { 2319eaa728eeSbellard eflags_mask &= 0xffff; 232020054ef0SBlue Swirl } 2321997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2322eaa728eeSbellard } 2323eaa728eeSbellard return; 2324eaa728eeSbellard 2325eaa728eeSbellard return_to_vm86: 2326100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2327100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2328100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2329100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2330100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2331100ec099SPavel Dovgalyuk POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2332eaa728eeSbellard 2333eaa728eeSbellard /* modify processor state */ 2334997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2335997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2336997ff0d9SBlue Swirl VIP_MASK); 23372999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 23382999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 23392999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 23402999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 23412999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 23422999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2343eaa728eeSbellard 2344eaa728eeSbellard env->eip = new_eip & 0xffff; 234508b3ded6Sliguang env->regs[R_ESP] = new_esp; 2346eaa728eeSbellard } 2347eaa728eeSbellard 23482999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2349eaa728eeSbellard { 2350eaa728eeSbellard int tss_selector, type; 2351eaa728eeSbellard uint32_t e1, e2; 2352eaa728eeSbellard 2353eaa728eeSbellard /* specific case for TSS */ 2354eaa728eeSbellard if (env->eflags & NT_MASK) { 2355eaa728eeSbellard #ifdef TARGET_X86_64 235620054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2357100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 235820054ef0SBlue Swirl } 2359eaa728eeSbellard #endif 2360100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 236120054ef0SBlue Swirl if (tss_selector & 4) { 2362100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 236320054ef0SBlue Swirl } 2364100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 236620054ef0SBlue Swirl } 2367eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2368eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 236920054ef0SBlue Swirl if (type != 3) { 2370100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 237120054ef0SBlue Swirl } 2372100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2373eaa728eeSbellard } else { 2374100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2375eaa728eeSbellard } 2376db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2377eaa728eeSbellard } 2378eaa728eeSbellard 23792999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2380eaa728eeSbellard { 2381100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2382eaa728eeSbellard } 2383eaa728eeSbellard 23842999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2385eaa728eeSbellard { 2386eaa728eeSbellard if (env->sysenter_cs == 0) { 2387100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2388eaa728eeSbellard } 2389eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 23902436b61aSbalrog 23912436b61aSbalrog #ifdef TARGET_X86_64 23922436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 23932436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 23942436b61aSbalrog 0, 0xffffffff, 23952436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23962436b61aSbalrog DESC_S_MASK | 239720054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 239820054ef0SBlue Swirl DESC_L_MASK); 23992436b61aSbalrog } else 24002436b61aSbalrog #endif 24012436b61aSbalrog { 2402eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2403eaa728eeSbellard 0, 0xffffffff, 2404eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2405eaa728eeSbellard DESC_S_MASK | 2406eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 24072436b61aSbalrog } 2408eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2409eaa728eeSbellard 0, 0xffffffff, 2410eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2411eaa728eeSbellard DESC_S_MASK | 2412eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 241308b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2414a78d0eabSliguang env->eip = env->sysenter_eip; 2415eaa728eeSbellard } 2416eaa728eeSbellard 24172999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2418eaa728eeSbellard { 2419eaa728eeSbellard int cpl; 2420eaa728eeSbellard 2421eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2422eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2423100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2424eaa728eeSbellard } 24252436b61aSbalrog #ifdef TARGET_X86_64 24262436b61aSbalrog if (dflag == 2) { 242720054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 242820054ef0SBlue Swirl 3, 0, 0xffffffff, 24292436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 24302436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 243120054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 243220054ef0SBlue Swirl DESC_L_MASK); 243320054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 243420054ef0SBlue Swirl 3, 0, 0xffffffff, 24352436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 24362436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 24372436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 24382436b61aSbalrog } else 24392436b61aSbalrog #endif 24402436b61aSbalrog { 244120054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 244220054ef0SBlue Swirl 3, 0, 0xffffffff, 2443eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2444eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2445eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 244620054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 244720054ef0SBlue Swirl 3, 0, 0xffffffff, 2448eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2449eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2450eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 24512436b61aSbalrog } 245208b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2453a78d0eabSliguang env->eip = env->regs[R_EDX]; 2454eaa728eeSbellard } 2455eaa728eeSbellard 24562999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2457eaa728eeSbellard { 2458eaa728eeSbellard unsigned int limit; 2459eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2460eaa728eeSbellard int rpl, dpl, cpl, type; 2461eaa728eeSbellard 2462eaa728eeSbellard selector = selector1 & 0xffff; 2463f0967a1aSBlue Swirl eflags = cpu_cc_compute_all(env, CC_OP); 246420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2465dc1ded53Saliguori goto fail; 246620054ef0SBlue Swirl } 2467100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2468eaa728eeSbellard goto fail; 246920054ef0SBlue Swirl } 2470eaa728eeSbellard rpl = selector & 3; 2471eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2472eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2473eaa728eeSbellard if (e2 & DESC_S_MASK) { 2474eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2475eaa728eeSbellard /* conforming */ 2476eaa728eeSbellard } else { 247720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2478eaa728eeSbellard goto fail; 2479eaa728eeSbellard } 248020054ef0SBlue Swirl } 2481eaa728eeSbellard } else { 2482eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2483eaa728eeSbellard switch (type) { 2484eaa728eeSbellard case 1: 2485eaa728eeSbellard case 2: 2486eaa728eeSbellard case 3: 2487eaa728eeSbellard case 9: 2488eaa728eeSbellard case 11: 2489eaa728eeSbellard break; 2490eaa728eeSbellard default: 2491eaa728eeSbellard goto fail; 2492eaa728eeSbellard } 2493eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2494eaa728eeSbellard fail: 2495eaa728eeSbellard CC_SRC = eflags & ~CC_Z; 2496eaa728eeSbellard return 0; 2497eaa728eeSbellard } 2498eaa728eeSbellard } 2499eaa728eeSbellard limit = get_seg_limit(e1, e2); 2500eaa728eeSbellard CC_SRC = eflags | CC_Z; 2501eaa728eeSbellard return limit; 2502eaa728eeSbellard } 2503eaa728eeSbellard 25042999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2505eaa728eeSbellard { 2506eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2507eaa728eeSbellard int rpl, dpl, cpl, type; 2508eaa728eeSbellard 2509eaa728eeSbellard selector = selector1 & 0xffff; 2510f0967a1aSBlue Swirl eflags = cpu_cc_compute_all(env, CC_OP); 251120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2512eaa728eeSbellard goto fail; 251320054ef0SBlue Swirl } 2514100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2515eaa728eeSbellard goto fail; 251620054ef0SBlue Swirl } 2517eaa728eeSbellard rpl = selector & 3; 2518eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2519eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2520eaa728eeSbellard if (e2 & DESC_S_MASK) { 2521eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2522eaa728eeSbellard /* conforming */ 2523eaa728eeSbellard } else { 252420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2525eaa728eeSbellard goto fail; 2526eaa728eeSbellard } 252720054ef0SBlue Swirl } 2528eaa728eeSbellard } else { 2529eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2530eaa728eeSbellard switch (type) { 2531eaa728eeSbellard case 1: 2532eaa728eeSbellard case 2: 2533eaa728eeSbellard case 3: 2534eaa728eeSbellard case 4: 2535eaa728eeSbellard case 5: 2536eaa728eeSbellard case 9: 2537eaa728eeSbellard case 11: 2538eaa728eeSbellard case 12: 2539eaa728eeSbellard break; 2540eaa728eeSbellard default: 2541eaa728eeSbellard goto fail; 2542eaa728eeSbellard } 2543eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2544eaa728eeSbellard fail: 2545eaa728eeSbellard CC_SRC = eflags & ~CC_Z; 2546eaa728eeSbellard return 0; 2547eaa728eeSbellard } 2548eaa728eeSbellard } 2549eaa728eeSbellard CC_SRC = eflags | CC_Z; 2550eaa728eeSbellard return e2 & 0x00f0ff00; 2551eaa728eeSbellard } 2552eaa728eeSbellard 25532999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2554eaa728eeSbellard { 2555eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2556eaa728eeSbellard int rpl, dpl, cpl; 2557eaa728eeSbellard 2558eaa728eeSbellard selector = selector1 & 0xffff; 2559f0967a1aSBlue Swirl eflags = cpu_cc_compute_all(env, CC_OP); 256020054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2561eaa728eeSbellard goto fail; 256220054ef0SBlue Swirl } 2563100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2564eaa728eeSbellard goto fail; 256520054ef0SBlue Swirl } 256620054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2567eaa728eeSbellard goto fail; 256820054ef0SBlue Swirl } 2569eaa728eeSbellard rpl = selector & 3; 2570eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2571eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2572eaa728eeSbellard if (e2 & DESC_CS_MASK) { 257320054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2574eaa728eeSbellard goto fail; 257520054ef0SBlue Swirl } 2576eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 257720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2578eaa728eeSbellard goto fail; 2579eaa728eeSbellard } 258020054ef0SBlue Swirl } 2581eaa728eeSbellard } else { 2582eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2583eaa728eeSbellard fail: 2584eaa728eeSbellard CC_SRC = eflags & ~CC_Z; 2585eaa728eeSbellard return; 2586eaa728eeSbellard } 2587eaa728eeSbellard } 2588eaa728eeSbellard CC_SRC = eflags | CC_Z; 2589eaa728eeSbellard } 2590eaa728eeSbellard 25912999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2592eaa728eeSbellard { 2593eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2594eaa728eeSbellard int rpl, dpl, cpl; 2595eaa728eeSbellard 2596eaa728eeSbellard selector = selector1 & 0xffff; 2597f0967a1aSBlue Swirl eflags = cpu_cc_compute_all(env, CC_OP); 259820054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2599eaa728eeSbellard goto fail; 260020054ef0SBlue Swirl } 2601100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2602eaa728eeSbellard goto fail; 260320054ef0SBlue Swirl } 260420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2605eaa728eeSbellard goto fail; 260620054ef0SBlue Swirl } 2607eaa728eeSbellard rpl = selector & 3; 2608eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2609eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2610eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2611eaa728eeSbellard goto fail; 2612eaa728eeSbellard } else { 261320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2614eaa728eeSbellard goto fail; 261520054ef0SBlue Swirl } 2616eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2617eaa728eeSbellard fail: 2618eaa728eeSbellard CC_SRC = eflags & ~CC_Z; 2619eaa728eeSbellard return; 2620eaa728eeSbellard } 2621eaa728eeSbellard } 2622eaa728eeSbellard CC_SRC = eflags | CC_Z; 2623eaa728eeSbellard } 2624eaa728eeSbellard 26253e457172SBlue Swirl #if defined(CONFIG_USER_ONLY) 2626*c117e5b1SPhilippe Mathieu-Daudé void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector) 26273e457172SBlue Swirl { 26283e457172SBlue Swirl if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 2629b98dbc90SPaolo Bonzini int dpl = (env->eflags & VM_MASK) ? 3 : 0; 26303e457172SBlue Swirl selector &= 0xffff; 26313e457172SBlue Swirl cpu_x86_load_seg_cache(env, seg_reg, selector, 2632b98dbc90SPaolo Bonzini (selector << 4), 0xffff, 2633b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 2634b98dbc90SPaolo Bonzini DESC_A_MASK | (dpl << DESC_DPL_SHIFT)); 26353e457172SBlue Swirl } else { 26362999a0b2SBlue Swirl helper_load_seg(env, seg_reg, selector); 26373e457172SBlue Swirl } 26383e457172SBlue Swirl } 26393e457172SBlue Swirl #endif 264081cf8d8aSPaolo Bonzini 264181cf8d8aSPaolo Bonzini /* check if Port I/O is allowed in TSS */ 2642100ec099SPavel Dovgalyuk static inline void check_io(CPUX86State *env, int addr, int size, 2643100ec099SPavel Dovgalyuk uintptr_t retaddr) 264481cf8d8aSPaolo Bonzini { 264581cf8d8aSPaolo Bonzini int io_offset, val, mask; 264681cf8d8aSPaolo Bonzini 264781cf8d8aSPaolo Bonzini /* TSS must be a valid 32 bit one */ 264881cf8d8aSPaolo Bonzini if (!(env->tr.flags & DESC_P_MASK) || 264981cf8d8aSPaolo Bonzini ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || 265081cf8d8aSPaolo Bonzini env->tr.limit < 103) { 265181cf8d8aSPaolo Bonzini goto fail; 265281cf8d8aSPaolo Bonzini } 2653100ec099SPavel Dovgalyuk io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); 265481cf8d8aSPaolo Bonzini io_offset += (addr >> 3); 265581cf8d8aSPaolo Bonzini /* Note: the check needs two bytes */ 265681cf8d8aSPaolo Bonzini if ((io_offset + 1) > env->tr.limit) { 265781cf8d8aSPaolo Bonzini goto fail; 265881cf8d8aSPaolo Bonzini } 2659100ec099SPavel Dovgalyuk val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); 266081cf8d8aSPaolo Bonzini val >>= (addr & 7); 266181cf8d8aSPaolo Bonzini mask = (1 << size) - 1; 266281cf8d8aSPaolo Bonzini /* all bits must be zero to allow the I/O */ 266381cf8d8aSPaolo Bonzini if ((val & mask) != 0) { 266481cf8d8aSPaolo Bonzini fail: 2665100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 266681cf8d8aSPaolo Bonzini } 266781cf8d8aSPaolo Bonzini } 266881cf8d8aSPaolo Bonzini 266981cf8d8aSPaolo Bonzini void helper_check_iob(CPUX86State *env, uint32_t t0) 267081cf8d8aSPaolo Bonzini { 2671100ec099SPavel Dovgalyuk check_io(env, t0, 1, GETPC()); 267281cf8d8aSPaolo Bonzini } 267381cf8d8aSPaolo Bonzini 267481cf8d8aSPaolo Bonzini void helper_check_iow(CPUX86State *env, uint32_t t0) 267581cf8d8aSPaolo Bonzini { 2676100ec099SPavel Dovgalyuk check_io(env, t0, 2, GETPC()); 267781cf8d8aSPaolo Bonzini } 267881cf8d8aSPaolo Bonzini 267981cf8d8aSPaolo Bonzini void helper_check_iol(CPUX86State *env, uint32_t t0) 268081cf8d8aSPaolo Bonzini { 2681100ec099SPavel Dovgalyuk check_io(env, t0, 4, GETPC()); 268281cf8d8aSPaolo Bonzini } 2683