1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308b131065SPaolo Bonzini #include "access.h" 318a201bd4SPaolo Bonzini 32059368bcSRichard Henderson #ifdef TARGET_X86_64 33059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 34059368bcSRichard Henderson do { \ 35059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 36059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 37059368bcSRichard Henderson ((val) & 0xffff); \ 38059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 39059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 40059368bcSRichard Henderson } else { \ 41059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 42059368bcSRichard Henderson } \ 43059368bcSRichard Henderson } while (0) 44059368bcSRichard Henderson #else 45059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 46059368bcSRichard Henderson do { \ 47059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 48059368bcSRichard Henderson ((val) & (sp_mask)); \ 49059368bcSRichard Henderson } while (0) 50059368bcSRichard Henderson #endif 51059368bcSRichard Henderson 52059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 53059368bcSRichard Henderson typedef struct StackAccess 54059368bcSRichard Henderson { 55059368bcSRichard Henderson CPUX86State *env; 56059368bcSRichard Henderson uintptr_t ra; 57059368bcSRichard Henderson target_ulong ss_base; 58059368bcSRichard Henderson target_ulong sp; 59059368bcSRichard Henderson target_ulong sp_mask; 608053862aSPaolo Bonzini int mmu_index; 61059368bcSRichard Henderson } StackAccess; 62059368bcSRichard Henderson 63059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 64059368bcSRichard Henderson { 65059368bcSRichard Henderson sa->sp -= 2; 668053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 678053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 68059368bcSRichard Henderson } 69059368bcSRichard Henderson 70059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 71059368bcSRichard Henderson { 72059368bcSRichard Henderson sa->sp -= 4; 738053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 748053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 75059368bcSRichard Henderson } 76059368bcSRichard Henderson 77059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 78059368bcSRichard Henderson { 798053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 80059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 818053862aSPaolo Bonzini sa->mmu_index, sa->ra); 82059368bcSRichard Henderson sa->sp += 2; 83059368bcSRichard Henderson return ret; 84059368bcSRichard Henderson } 85059368bcSRichard Henderson 86059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 87059368bcSRichard Henderson { 888053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 89059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 908053862aSPaolo Bonzini sa->mmu_index, sa->ra); 91059368bcSRichard Henderson sa->sp += 4; 92059368bcSRichard Henderson return ret; 93059368bcSRichard Henderson } 94059368bcSRichard Henderson 9550fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9650fcc7cbSGareth Webb { 978fa11a4dSAlexander Graf int pg_mode = PG_MODE_PG; 9850fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 9950fcc7cbSGareth Webb return 0; 10050fcc7cbSGareth Webb } 10150fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10250fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10350fcc7cbSGareth Webb } 10450fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10550fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10650fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10750fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10850fcc7cbSGareth Webb } 10950fcc7cbSGareth Webb } 11050fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11150fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11250fcc7cbSGareth Webb } 11350fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11450fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11550fcc7cbSGareth Webb } 11650fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11750fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11850fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 11950fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 12050fcc7cbSGareth Webb } 12150fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12250fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12350fcc7cbSGareth Webb } 12450fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12550fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12650fcc7cbSGareth Webb } 12750fcc7cbSGareth Webb } 12850fcc7cbSGareth Webb return pg_mode; 12950fcc7cbSGareth Webb } 13050fcc7cbSGareth Webb 131*611c34a7SPhilippe Mathieu-Daudé static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) 132*611c34a7SPhilippe Mathieu-Daudé { 133*611c34a7SPhilippe Mathieu-Daudé int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; 134*611c34a7SPhilippe Mathieu-Daudé int mmu_index_base = 135*611c34a7SPhilippe Mathieu-Daudé !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : 136*611c34a7SPhilippe Mathieu-Daudé (pl < 3 && (env->eflags & AC_MASK) 137*611c34a7SPhilippe Mathieu-Daudé ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); 138*611c34a7SPhilippe Mathieu-Daudé 139*611c34a7SPhilippe Mathieu-Daudé return mmu_index_base + mmu_index_32; 140*611c34a7SPhilippe Mathieu-Daudé } 141*611c34a7SPhilippe Mathieu-Daudé 142*611c34a7SPhilippe Mathieu-Daudé int cpu_mmu_index_kernel(CPUX86State *env) 143*611c34a7SPhilippe Mathieu-Daudé { 144*611c34a7SPhilippe Mathieu-Daudé return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); 145*611c34a7SPhilippe Mathieu-Daudé } 146*611c34a7SPhilippe Mathieu-Daudé 147eaa728eeSbellard /* return non zero if error */ 148100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 149100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 150100ec099SPavel Dovgalyuk uintptr_t retaddr) 151eaa728eeSbellard { 152eaa728eeSbellard SegmentCache *dt; 153eaa728eeSbellard int index; 154eaa728eeSbellard target_ulong ptr; 155eaa728eeSbellard 15620054ef0SBlue Swirl if (selector & 0x4) { 157eaa728eeSbellard dt = &env->ldt; 15820054ef0SBlue Swirl } else { 159eaa728eeSbellard dt = &env->gdt; 16020054ef0SBlue Swirl } 161eaa728eeSbellard index = selector & ~7; 16220054ef0SBlue Swirl if ((index + 7) > dt->limit) { 163eaa728eeSbellard return -1; 16420054ef0SBlue Swirl } 165eaa728eeSbellard ptr = dt->base + index; 166100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 167100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 168eaa728eeSbellard return 0; 169eaa728eeSbellard } 170eaa728eeSbellard 171100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 172100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 173100ec099SPavel Dovgalyuk { 174100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 175100ec099SPavel Dovgalyuk } 176100ec099SPavel Dovgalyuk 177eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 178eaa728eeSbellard { 179eaa728eeSbellard unsigned int limit; 18020054ef0SBlue Swirl 181eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 18220054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 183eaa728eeSbellard limit = (limit << 12) | 0xfff; 18420054ef0SBlue Swirl } 185eaa728eeSbellard return limit; 186eaa728eeSbellard } 187eaa728eeSbellard 188eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 189eaa728eeSbellard { 19020054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 191eaa728eeSbellard } 192eaa728eeSbellard 19320054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 19420054ef0SBlue Swirl uint32_t e2) 195eaa728eeSbellard { 196eaa728eeSbellard sc->base = get_seg_base(e1, e2); 197eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 198eaa728eeSbellard sc->flags = e2; 199eaa728eeSbellard } 200eaa728eeSbellard 201eaa728eeSbellard /* init the segment cache in vm86 mode. */ 2022999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 203eaa728eeSbellard { 204eaa728eeSbellard selector &= 0xffff; 205b98dbc90SPaolo Bonzini 206b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 207b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 208b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 209eaa728eeSbellard } 210eaa728eeSbellard 2112999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 212100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 213100ec099SPavel Dovgalyuk uintptr_t retaddr) 214eaa728eeSbellard { 2156aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 216eaa728eeSbellard int type, index, shift; 217eaa728eeSbellard 218eaa728eeSbellard #if 0 219eaa728eeSbellard { 220eaa728eeSbellard int i; 221eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 222eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 223eaa728eeSbellard printf("%02x ", env->tr.base[i]); 22420054ef0SBlue Swirl if ((i & 7) == 7) { 22520054ef0SBlue Swirl printf("\n"); 22620054ef0SBlue Swirl } 227eaa728eeSbellard } 228eaa728eeSbellard printf("\n"); 229eaa728eeSbellard } 230eaa728eeSbellard #endif 231eaa728eeSbellard 23220054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 233a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 23420054ef0SBlue Swirl } 235eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 23620054ef0SBlue Swirl if ((type & 7) != 1) { 237a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 23820054ef0SBlue Swirl } 239eaa728eeSbellard shift = type >> 3; 240eaa728eeSbellard index = (dpl * 4 + 2) << shift; 24120054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 242100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 24320054ef0SBlue Swirl } 244eaa728eeSbellard if (shift == 0) { 245100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 246100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 247eaa728eeSbellard } else { 248100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 249100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 250eaa728eeSbellard } 251eaa728eeSbellard } 252eaa728eeSbellard 253c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 254c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 255eaa728eeSbellard { 256eaa728eeSbellard uint32_t e1, e2; 257d3b54918SPaolo Bonzini int rpl, dpl; 258eaa728eeSbellard 259eaa728eeSbellard if ((selector & 0xfffc) != 0) { 260100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 261100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26220054ef0SBlue Swirl } 26320054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 264100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26520054ef0SBlue Swirl } 266eaa728eeSbellard rpl = selector & 3; 267eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 268eaa728eeSbellard if (seg_reg == R_CS) { 26920054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 270100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27120054ef0SBlue Swirl } 27220054ef0SBlue Swirl if (dpl != rpl) { 273100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27420054ef0SBlue Swirl } 275eaa728eeSbellard } else if (seg_reg == R_SS) { 276eaa728eeSbellard /* SS must be writable data */ 27720054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 278100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27920054ef0SBlue Swirl } 28020054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 281100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28220054ef0SBlue Swirl } 283eaa728eeSbellard } else { 284eaa728eeSbellard /* not readable code */ 28520054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 286100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28720054ef0SBlue Swirl } 288eaa728eeSbellard /* if data or non conforming code, checks the rights */ 289eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 29020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 291100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 292eaa728eeSbellard } 293eaa728eeSbellard } 29420054ef0SBlue Swirl } 29520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 296100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 29720054ef0SBlue Swirl } 298eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 299eaa728eeSbellard get_seg_base(e1, e2), 300eaa728eeSbellard get_seg_limit(e1, e2), 301eaa728eeSbellard e2); 302eaa728eeSbellard } else { 30320054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 304100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 305eaa728eeSbellard } 306eaa728eeSbellard } 30720054ef0SBlue Swirl } 308eaa728eeSbellard 309a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 310a9089859SPaolo Bonzini uintptr_t retaddr) 311a9089859SPaolo Bonzini { 312c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 313a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 314a9089859SPaolo Bonzini 315a9089859SPaolo Bonzini if (value) { 316a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 317a9089859SPaolo Bonzini } else { 318a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 319a9089859SPaolo Bonzini } 320a9089859SPaolo Bonzini 321a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 322a9089859SPaolo Bonzini } 323a9089859SPaolo Bonzini 324eaa728eeSbellard #define SWITCH_TSS_JMP 0 325eaa728eeSbellard #define SWITCH_TSS_IRET 1 326eaa728eeSbellard #define SWITCH_TSS_CALL 2 327eaa728eeSbellard 32849958057SPaolo Bonzini /* return 0 if switching to a 16-bit selector */ 32949958057SPaolo Bonzini static int switch_tss_ra(CPUX86State *env, int tss_selector, 330eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 331100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 332eaa728eeSbellard { 3338b131065SPaolo Bonzini int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 334eaa728eeSbellard target_ulong tss_base; 335eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 336eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 337eaa728eeSbellard uint32_t old_eflags, eflags_mask; 338eaa728eeSbellard SegmentCache *dt; 3398b131065SPaolo Bonzini int mmu_index, index; 340eaa728eeSbellard target_ulong ptr; 3418b131065SPaolo Bonzini X86Access old, new; 342eaa728eeSbellard 343eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34420054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 34520054ef0SBlue Swirl source); 346eaa728eeSbellard 347eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 348eaa728eeSbellard if (type == 5) { 34920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 350100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35120054ef0SBlue Swirl } 352eaa728eeSbellard tss_selector = e1 >> 16; 35320054ef0SBlue Swirl if (tss_selector & 4) { 354100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 35520054ef0SBlue Swirl } 356100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 357100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 358eaa728eeSbellard } 35920054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 360100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36120054ef0SBlue Swirl } 36220054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 36320054ef0SBlue Swirl if ((type & 7) != 1) { 364100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36520054ef0SBlue Swirl } 36620054ef0SBlue Swirl } 367eaa728eeSbellard 36820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 369100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 37020054ef0SBlue Swirl } 371eaa728eeSbellard 37220054ef0SBlue Swirl if (type & 8) { 373eaa728eeSbellard tss_limit_max = 103; 37420054ef0SBlue Swirl } else { 375eaa728eeSbellard tss_limit_max = 43; 37620054ef0SBlue Swirl } 377eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 378eaa728eeSbellard tss_base = get_seg_base(e1, e2); 379eaa728eeSbellard if ((tss_selector & 4) != 0 || 38020054ef0SBlue Swirl tss_limit < tss_limit_max) { 381100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 38220054ef0SBlue Swirl } 383eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 38420054ef0SBlue Swirl if (old_type & 8) { 385eaa728eeSbellard old_tss_limit_max = 103; 38620054ef0SBlue Swirl } else { 387eaa728eeSbellard old_tss_limit_max = 43; 38820054ef0SBlue Swirl } 389eaa728eeSbellard 39005d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 39105d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 39205d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 39305d41bbcSPaolo Bonzini } 39405d41bbcSPaolo Bonzini 3958b131065SPaolo Bonzini /* X86Access avoids memory exceptions during the task switch */ 3968b131065SPaolo Bonzini mmu_index = cpu_mmu_index_kernel(env); 397ded1db48SRichard Henderson access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1, 3988b131065SPaolo Bonzini MMU_DATA_STORE, mmu_index, retaddr); 3998b131065SPaolo Bonzini 4008b131065SPaolo Bonzini if (source == SWITCH_TSS_CALL) { 4018b131065SPaolo Bonzini /* Probe for future write of parent task */ 4028b131065SPaolo Bonzini probe_access(env, tss_base, 2, MMU_DATA_STORE, 4038b131065SPaolo Bonzini mmu_index, retaddr); 4048b131065SPaolo Bonzini } 405ded1db48SRichard Henderson /* While true tss_limit may be larger, we don't access the iopb here. */ 406ded1db48SRichard Henderson access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1, 4078b131065SPaolo Bonzini MMU_DATA_LOAD, mmu_index, retaddr); 4088b131065SPaolo Bonzini 4096a079f2eSPaolo Bonzini /* save the current state in the old TSS */ 4106a079f2eSPaolo Bonzini old_eflags = cpu_compute_eflags(env); 4116a079f2eSPaolo Bonzini if (old_type & 8) { 4126a079f2eSPaolo Bonzini /* 32 bit */ 4136a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x20, next_eip); 4146a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 4156a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 4166a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 4176a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 4186a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 4196a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 4206a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 4216a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 4226a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 4236a079f2eSPaolo Bonzini for (i = 0; i < 6; i++) { 4246a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x48 + i * 4), 4256a079f2eSPaolo Bonzini env->segs[i].selector); 4266a079f2eSPaolo Bonzini } 4276a079f2eSPaolo Bonzini } else { 4286a079f2eSPaolo Bonzini /* 16 bit */ 4296a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x0e, next_eip); 4306a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 4316a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 4326a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 4336a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 4346a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 4356a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 4366a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 4376a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 4386a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 4396a079f2eSPaolo Bonzini for (i = 0; i < 4; i++) { 4406a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x22 + i * 2), 4416a079f2eSPaolo Bonzini env->segs[i].selector); 4426a079f2eSPaolo Bonzini } 4436a079f2eSPaolo Bonzini } 4446a079f2eSPaolo Bonzini 445eaa728eeSbellard /* read all the registers from the new TSS */ 446eaa728eeSbellard if (type & 8) { 447eaa728eeSbellard /* 32 bit */ 4488b131065SPaolo Bonzini new_cr3 = access_ldl(&new, tss_base + 0x1c); 4498b131065SPaolo Bonzini new_eip = access_ldl(&new, tss_base + 0x20); 4508b131065SPaolo Bonzini new_eflags = access_ldl(&new, tss_base + 0x24); 45120054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4528b131065SPaolo Bonzini new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 45320054ef0SBlue Swirl } 45420054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4558b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 45620054ef0SBlue Swirl } 4578b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x60); 4588b131065SPaolo Bonzini new_trap = access_ldl(&new, tss_base + 0x64); 459eaa728eeSbellard } else { 460eaa728eeSbellard /* 16 bit */ 461eaa728eeSbellard new_cr3 = 0; 4628b131065SPaolo Bonzini new_eip = access_ldw(&new, tss_base + 0x0e); 4638b131065SPaolo Bonzini new_eflags = access_ldw(&new, tss_base + 0x10); 46420054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4658b131065SPaolo Bonzini new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 46620054ef0SBlue Swirl } 46720054ef0SBlue Swirl for (i = 0; i < 4; i++) { 4688b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 46920054ef0SBlue Swirl } 4708b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x2a); 471eaa728eeSbellard new_segs[R_FS] = 0; 472eaa728eeSbellard new_segs[R_GS] = 0; 473eaa728eeSbellard new_trap = 0; 474eaa728eeSbellard } 4754581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4764581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4774581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4784581cbcdSBlue Swirl (void)new_trap; 479eaa728eeSbellard 480eaa728eeSbellard /* clear busy bit (it is restartable) */ 481eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 482a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 483eaa728eeSbellard } 4846a079f2eSPaolo Bonzini 48520054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 486eaa728eeSbellard old_eflags &= ~NT_MASK; 4871b627f38SPaolo Bonzini if (old_type & 8) { 4888b131065SPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 489eaa728eeSbellard } else { 4908b131065SPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 491eaa728eeSbellard } 49220054ef0SBlue Swirl } 493eaa728eeSbellard 494eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 4958b131065SPaolo Bonzini /* 4968b131065SPaolo Bonzini * Thanks to the probe_access above, we know the first two 4978b131065SPaolo Bonzini * bytes addressed by &new are writable too. 4988b131065SPaolo Bonzini */ 4998b131065SPaolo Bonzini access_stw(&new, tss_base, env->tr.selector); 500eaa728eeSbellard new_eflags |= NT_MASK; 501eaa728eeSbellard } 502eaa728eeSbellard 503eaa728eeSbellard /* set busy bit */ 504eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 505a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 506eaa728eeSbellard } 507eaa728eeSbellard 508eaa728eeSbellard /* set the new CPU state */ 5096a079f2eSPaolo Bonzini 5106a079f2eSPaolo Bonzini /* now if an exception occurs, it will occur in the next task context */ 5116a079f2eSPaolo Bonzini 512eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 513eaa728eeSbellard env->hflags |= HF_TS_MASK; 514eaa728eeSbellard env->tr.selector = tss_selector; 515eaa728eeSbellard env->tr.base = tss_base; 516eaa728eeSbellard env->tr.limit = tss_limit; 517eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 518eaa728eeSbellard 519eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 520eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 521eaa728eeSbellard } 522eaa728eeSbellard 523eaa728eeSbellard /* load all registers without an exception, then reload them with 524eaa728eeSbellard possible exception */ 525eaa728eeSbellard env->eip = new_eip; 526eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 527eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 528a5505f6bSPaolo Bonzini if (type & 8) { 529997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 530a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 531a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 532a5505f6bSPaolo Bonzini } 533a5505f6bSPaolo Bonzini } else { 534a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 535a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 536a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 537a5505f6bSPaolo Bonzini } 538a5505f6bSPaolo Bonzini } 539eaa728eeSbellard if (new_eflags & VM_MASK) { 54020054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5412999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 54220054ef0SBlue Swirl } 543eaa728eeSbellard } else { 544eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 54520054ef0SBlue Swirl for (i = 0; i < 6; i++) { 546eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 547eaa728eeSbellard } 54820054ef0SBlue Swirl } 549eaa728eeSbellard 550eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 551eaa728eeSbellard env->ldt.base = 0; 552eaa728eeSbellard env->ldt.limit = 0; 553eaa728eeSbellard env->ldt.flags = 0; 554eaa728eeSbellard 555eaa728eeSbellard /* load the LDT */ 55620054ef0SBlue Swirl if (new_ldt & 4) { 557100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55820054ef0SBlue Swirl } 559eaa728eeSbellard 560eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 561eaa728eeSbellard dt = &env->gdt; 562eaa728eeSbellard index = new_ldt & ~7; 56320054ef0SBlue Swirl if ((index + 7) > dt->limit) { 564100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 56520054ef0SBlue Swirl } 566eaa728eeSbellard ptr = dt->base + index; 567100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 568100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 56920054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 570100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57120054ef0SBlue Swirl } 57220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 573100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57420054ef0SBlue Swirl } 575eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 576eaa728eeSbellard } 577eaa728eeSbellard 578eaa728eeSbellard /* load the segments */ 579eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 580d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 581100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 582100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 583100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 584100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 585100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 586100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 587eaa728eeSbellard } 588eaa728eeSbellard 589a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 590eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 591eaa728eeSbellard /* XXX: different exception if CALL? */ 592100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 593eaa728eeSbellard } 59401df040bSaliguori 59501df040bSaliguori #ifndef CONFIG_USER_ONLY 59601df040bSaliguori /* reset local breakpoints */ 597428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 59893d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 59901df040bSaliguori } 60001df040bSaliguori #endif 60149958057SPaolo Bonzini return type >> 3; 602eaa728eeSbellard } 603eaa728eeSbellard 60449958057SPaolo Bonzini static int switch_tss(CPUX86State *env, int tss_selector, 605100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 606100ec099SPavel Dovgalyuk uint32_t next_eip) 607100ec099SPavel Dovgalyuk { 60849958057SPaolo Bonzini return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 609100ec099SPavel Dovgalyuk } 610100ec099SPavel Dovgalyuk 611eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 612eaa728eeSbellard { 6130aca0605SAndrew Oates #ifdef TARGET_X86_64 6140aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 6150aca0605SAndrew Oates return 0; 6160aca0605SAndrew Oates } else 6170aca0605SAndrew Oates #endif 61820054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 619eaa728eeSbellard return 0xffffffff; 62020054ef0SBlue Swirl } else { 621eaa728eeSbellard return 0xffff; 622eaa728eeSbellard } 62320054ef0SBlue Swirl } 624eaa728eeSbellard 62569cb498cSPaolo Bonzini static int exception_is_fault(int intno) 62669cb498cSPaolo Bonzini { 62769cb498cSPaolo Bonzini switch (intno) { 62869cb498cSPaolo Bonzini /* 62969cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 63069cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 63169cb498cSPaolo Bonzini */ 63269cb498cSPaolo Bonzini case EXCP01_DB: 63369cb498cSPaolo Bonzini case EXCP03_INT3: 63469cb498cSPaolo Bonzini case EXCP04_INTO: 63569cb498cSPaolo Bonzini case EXCP08_DBLE: 63669cb498cSPaolo Bonzini case EXCP12_MCHK: 63769cb498cSPaolo Bonzini return 0; 63869cb498cSPaolo Bonzini } 63969cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 64069cb498cSPaolo Bonzini return 1; 64169cb498cSPaolo Bonzini } 64269cb498cSPaolo Bonzini 64330493a03SClaudio Fontana int exception_has_error_code(int intno) 6442ed51f5bSaliguori { 6452ed51f5bSaliguori switch (intno) { 6462ed51f5bSaliguori case 8: 6472ed51f5bSaliguori case 10: 6482ed51f5bSaliguori case 11: 6492ed51f5bSaliguori case 12: 6502ed51f5bSaliguori case 13: 6512ed51f5bSaliguori case 14: 6522ed51f5bSaliguori case 17: 6532ed51f5bSaliguori return 1; 6542ed51f5bSaliguori } 6552ed51f5bSaliguori return 0; 6562ed51f5bSaliguori } 6572ed51f5bSaliguori 658eaa728eeSbellard /* protected mode interrupt */ 6592999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6602999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6612999a0b2SBlue Swirl int is_hw) 662eaa728eeSbellard { 663eaa728eeSbellard SegmentCache *dt; 664059368bcSRichard Henderson target_ulong ptr; 665eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 666eaa728eeSbellard int has_error_code, new_stack, shift; 667059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 668059368bcSRichard Henderson uint32_t old_eip, eflags; 66987446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 670059368bcSRichard Henderson StackAccess sa; 67169cb498cSPaolo Bonzini bool set_rf; 672eaa728eeSbellard 673eaa728eeSbellard has_error_code = 0; 67420054ef0SBlue Swirl if (!is_int && !is_hw) { 67520054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 67620054ef0SBlue Swirl } 67720054ef0SBlue Swirl if (is_int) { 678eaa728eeSbellard old_eip = next_eip; 67969cb498cSPaolo Bonzini set_rf = false; 68020054ef0SBlue Swirl } else { 681eaa728eeSbellard old_eip = env->eip; 68269cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 68320054ef0SBlue Swirl } 684eaa728eeSbellard 685eaa728eeSbellard dt = &env->idt; 68620054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 68777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 68820054ef0SBlue Swirl } 689eaa728eeSbellard ptr = dt->base + intno * 8; 690329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 691329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 692eaa728eeSbellard /* check gate type */ 693eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 694eaa728eeSbellard switch (type) { 695eaa728eeSbellard case 5: /* task gate */ 6963df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6973df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6983df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 6993df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 7003df1a3d0SPeter Maydell break; 7013df1a3d0SPeter Maydell default: 7023df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7033df1a3d0SPeter Maydell break; 7043df1a3d0SPeter Maydell } 7053df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 7063df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 7073df1a3d0SPeter Maydell /* check privilege if software int */ 7083df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 7093df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7103df1a3d0SPeter Maydell } 7113df1a3d0SPeter Maydell 712059368bcSRichard Henderson sa.env = env; 713059368bcSRichard Henderson sa.ra = 0; 714059368bcSRichard Henderson 7153df1a3d0SPeter Maydell if (type == 5) { 7163df1a3d0SPeter Maydell /* task gate */ 717eaa728eeSbellard /* must do that check here to return the correct error code */ 71820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 71977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 72020054ef0SBlue Swirl } 72149958057SPaolo Bonzini shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 722eaa728eeSbellard if (has_error_code) { 723e136648cSPaolo Bonzini /* push the error code on the destination stack */ 724e136648cSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 725e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 72620054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 727059368bcSRichard Henderson sa.sp_mask = 0xffffffff; 72820054ef0SBlue Swirl } else { 729059368bcSRichard Henderson sa.sp_mask = 0xffff; 73020054ef0SBlue Swirl } 731059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 732059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 73320054ef0SBlue Swirl if (shift) { 734059368bcSRichard Henderson pushl(&sa, error_code); 73520054ef0SBlue Swirl } else { 736059368bcSRichard Henderson pushw(&sa, error_code); 73720054ef0SBlue Swirl } 738059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 739eaa728eeSbellard } 740eaa728eeSbellard return; 741eaa728eeSbellard } 7423df1a3d0SPeter Maydell 7433df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7443df1a3d0SPeter Maydell 745eaa728eeSbellard /* check valid bit */ 74620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 74777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 74820054ef0SBlue Swirl } 749eaa728eeSbellard selector = e1 >> 16; 750eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 75120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 75277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 75320054ef0SBlue Swirl } 7542999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 75577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 75620054ef0SBlue Swirl } 75720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 75877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 75920054ef0SBlue Swirl } 760eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 76120054ef0SBlue Swirl if (dpl > cpl) { 76277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 76320054ef0SBlue Swirl } 76420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 76577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 76620054ef0SBlue Swirl } 7671110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7681110bfe6SPaolo Bonzini dpl = cpl; 7691110bfe6SPaolo Bonzini } 770e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 7711110bfe6SPaolo Bonzini if (dpl < cpl) { 772eaa728eeSbellard /* to inner privilege */ 773059368bcSRichard Henderson uint32_t esp; 774100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 77520054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 77677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77720054ef0SBlue Swirl } 77820054ef0SBlue Swirl if ((ss & 3) != dpl) { 77977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78020054ef0SBlue Swirl } 7812999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 78277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78320054ef0SBlue Swirl } 784eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 78520054ef0SBlue Swirl if (ss_dpl != dpl) { 78677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78720054ef0SBlue Swirl } 788eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 789eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 79020054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 79177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79220054ef0SBlue Swirl } 79320054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 79477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79520054ef0SBlue Swirl } 796eaa728eeSbellard new_stack = 1; 797059368bcSRichard Henderson sa.sp = esp; 798059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 799059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 8001110bfe6SPaolo Bonzini } else { 801eaa728eeSbellard /* to same privilege */ 80287446327SKevin O'Connor if (vm86) { 80377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 80420054ef0SBlue Swirl } 805eaa728eeSbellard new_stack = 0; 806059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 807059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 808059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 809eaa728eeSbellard } 810eaa728eeSbellard 811eaa728eeSbellard shift = type >> 3; 812eaa728eeSbellard 813eaa728eeSbellard #if 0 814eaa728eeSbellard /* XXX: check that enough room is available */ 815eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 81687446327SKevin O'Connor if (vm86) { 817eaa728eeSbellard push_size += 8; 81820054ef0SBlue Swirl } 819eaa728eeSbellard push_size <<= shift; 820eaa728eeSbellard #endif 82169cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 82269cb498cSPaolo Bonzini /* 82369cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 82469cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 82569cb498cSPaolo Bonzini */ 82669cb498cSPaolo Bonzini if (set_rf) { 82769cb498cSPaolo Bonzini eflags |= RF_MASK; 82869cb498cSPaolo Bonzini } 82969cb498cSPaolo Bonzini 830eaa728eeSbellard if (shift == 1) { 831eaa728eeSbellard if (new_stack) { 83287446327SKevin O'Connor if (vm86) { 833059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 834059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 835059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 836059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 837eaa728eeSbellard } 838059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 839059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 840eaa728eeSbellard } 841059368bcSRichard Henderson pushl(&sa, eflags); 842059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 843059368bcSRichard Henderson pushl(&sa, old_eip); 844eaa728eeSbellard if (has_error_code) { 845059368bcSRichard Henderson pushl(&sa, error_code); 846eaa728eeSbellard } 847eaa728eeSbellard } else { 848eaa728eeSbellard if (new_stack) { 84987446327SKevin O'Connor if (vm86) { 850059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 851059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 852059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 853059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 854eaa728eeSbellard } 855059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 856059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 857eaa728eeSbellard } 858059368bcSRichard Henderson pushw(&sa, eflags); 859059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 860059368bcSRichard Henderson pushw(&sa, old_eip); 861eaa728eeSbellard if (has_error_code) { 862059368bcSRichard Henderson pushw(&sa, error_code); 863eaa728eeSbellard } 864eaa728eeSbellard } 865eaa728eeSbellard 866fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 867fd460606SKevin O'Connor if ((type & 1) == 0) { 868fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 869fd460606SKevin O'Connor } 870fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 871fd460606SKevin O'Connor 872eaa728eeSbellard if (new_stack) { 87387446327SKevin O'Connor if (vm86) { 874eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 875eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 876eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 877eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 878eaa728eeSbellard } 879eaa728eeSbellard ss = (ss & ~3) | dpl; 880059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 881059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 882eaa728eeSbellard } 883059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 884eaa728eeSbellard 885eaa728eeSbellard selector = (selector & ~3) | dpl; 886eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 887eaa728eeSbellard get_seg_base(e1, e2), 888eaa728eeSbellard get_seg_limit(e1, e2), 889eaa728eeSbellard e2); 890eaa728eeSbellard env->eip = offset; 891eaa728eeSbellard } 892eaa728eeSbellard 893eaa728eeSbellard #ifdef TARGET_X86_64 894eaa728eeSbellard 895059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 896059368bcSRichard Henderson { 897059368bcSRichard Henderson sa->sp -= 8; 8988053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 899eaa728eeSbellard } 900eaa728eeSbellard 901059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 902059368bcSRichard Henderson { 9038053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 904059368bcSRichard Henderson sa->sp += 8; 905059368bcSRichard Henderson return ret; 906eaa728eeSbellard } 907eaa728eeSbellard 9082999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 909eaa728eeSbellard { 9106aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 91150fcc7cbSGareth Webb int index, pg_mode; 91250fcc7cbSGareth Webb target_ulong rsp; 91350fcc7cbSGareth Webb int32_t sext; 914eaa728eeSbellard 915eaa728eeSbellard #if 0 916eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 917eaa728eeSbellard env->tr.base, env->tr.limit); 918eaa728eeSbellard #endif 919eaa728eeSbellard 92020054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 921a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 92220054ef0SBlue Swirl } 923eaa728eeSbellard index = 8 * level + 4; 92420054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 92577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 92620054ef0SBlue Swirl } 92750fcc7cbSGareth Webb 92850fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 92950fcc7cbSGareth Webb 93050fcc7cbSGareth Webb /* test virtual address sign extension */ 93150fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 93250fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 93350fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 93450fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 93550fcc7cbSGareth Webb } 93650fcc7cbSGareth Webb 93750fcc7cbSGareth Webb return rsp; 938eaa728eeSbellard } 939eaa728eeSbellard 940eaa728eeSbellard /* 64 bit interrupt */ 9412999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9422999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 943eaa728eeSbellard { 944eaa728eeSbellard SegmentCache *dt; 945eaa728eeSbellard target_ulong ptr; 946eaa728eeSbellard int type, dpl, selector, cpl, ist; 947eaa728eeSbellard int has_error_code, new_stack; 948bde8adb8SPeter Maydell uint32_t e1, e2, e3, eflags; 949059368bcSRichard Henderson target_ulong old_eip, offset; 95069cb498cSPaolo Bonzini bool set_rf; 951059368bcSRichard Henderson StackAccess sa; 952eaa728eeSbellard 953eaa728eeSbellard has_error_code = 0; 95420054ef0SBlue Swirl if (!is_int && !is_hw) { 95520054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 95620054ef0SBlue Swirl } 95720054ef0SBlue Swirl if (is_int) { 958eaa728eeSbellard old_eip = next_eip; 95969cb498cSPaolo Bonzini set_rf = false; 96020054ef0SBlue Swirl } else { 961eaa728eeSbellard old_eip = env->eip; 96269cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 96320054ef0SBlue Swirl } 964eaa728eeSbellard 965eaa728eeSbellard dt = &env->idt; 96620054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 967b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 96820054ef0SBlue Swirl } 969eaa728eeSbellard ptr = dt->base + intno * 16; 970329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 971329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 972329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 973eaa728eeSbellard /* check gate type */ 974eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 975eaa728eeSbellard switch (type) { 976eaa728eeSbellard case 14: /* 386 interrupt gate */ 977eaa728eeSbellard case 15: /* 386 trap gate */ 978eaa728eeSbellard break; 979eaa728eeSbellard default: 980b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 981eaa728eeSbellard break; 982eaa728eeSbellard } 983eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 984eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9851235fc06Sths /* check privilege if software int */ 98620054ef0SBlue Swirl if (is_int && dpl < cpl) { 987b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 98820054ef0SBlue Swirl } 989eaa728eeSbellard /* check valid bit */ 99020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 991b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 99220054ef0SBlue Swirl } 993eaa728eeSbellard selector = e1 >> 16; 994eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 995eaa728eeSbellard ist = e2 & 7; 99620054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 99777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 99820054ef0SBlue Swirl } 999eaa728eeSbellard 10002999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 100177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100220054ef0SBlue Swirl } 100320054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 100477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100520054ef0SBlue Swirl } 1006eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 100720054ef0SBlue Swirl if (dpl > cpl) { 100877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100920054ef0SBlue Swirl } 101020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 101177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 101220054ef0SBlue Swirl } 101320054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 101477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101520054ef0SBlue Swirl } 10161110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 10171110bfe6SPaolo Bonzini dpl = cpl; 10181110bfe6SPaolo Bonzini } 1019059368bcSRichard Henderson 1020059368bcSRichard Henderson sa.env = env; 1021059368bcSRichard Henderson sa.ra = 0; 1022e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 1023059368bcSRichard Henderson sa.sp_mask = -1; 1024059368bcSRichard Henderson sa.ss_base = 0; 10251110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 1026eaa728eeSbellard /* to inner privilege */ 1027eaa728eeSbellard new_stack = 1; 1028059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 10291110bfe6SPaolo Bonzini } else { 1030eaa728eeSbellard /* to same privilege */ 103120054ef0SBlue Swirl if (env->eflags & VM_MASK) { 103277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 103320054ef0SBlue Swirl } 1034eaa728eeSbellard new_stack = 0; 1035059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1036e95e9b88SWu Xiang } 1037059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1038eaa728eeSbellard 103969cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 104069cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 104169cb498cSPaolo Bonzini if (set_rf) { 104269cb498cSPaolo Bonzini eflags |= RF_MASK; 104369cb498cSPaolo Bonzini } 104469cb498cSPaolo Bonzini 1045059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1046059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1047059368bcSRichard Henderson pushq(&sa, eflags); 1048059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1049059368bcSRichard Henderson pushq(&sa, old_eip); 1050eaa728eeSbellard if (has_error_code) { 1051059368bcSRichard Henderson pushq(&sa, error_code); 1052eaa728eeSbellard } 1053eaa728eeSbellard 1054fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1055fd460606SKevin O'Connor if ((type & 1) == 0) { 1056fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1057fd460606SKevin O'Connor } 1058fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1059fd460606SKevin O'Connor 1060eaa728eeSbellard if (new_stack) { 1061bde8adb8SPeter Maydell uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */ 1062e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1063eaa728eeSbellard } 1064059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1065eaa728eeSbellard 1066eaa728eeSbellard selector = (selector & ~3) | dpl; 1067eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1068eaa728eeSbellard get_seg_base(e1, e2), 1069eaa728eeSbellard get_seg_limit(e1, e2), 1070eaa728eeSbellard e2); 1071eaa728eeSbellard env->eip = offset; 1072eaa728eeSbellard } 107363fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1074eaa728eeSbellard 10752999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1076eaa728eeSbellard { 1077eaa728eeSbellard int cpl, selector; 1078eaa728eeSbellard 1079eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1080100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1081eaa728eeSbellard } 1082eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1083eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1084100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1085eaa728eeSbellard } 1086eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 108763fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1088eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1089fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1090fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1091fd460606SKevin O'Connor NT_MASK); 1092eaa728eeSbellard if (dflag == 2) { 1093eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1094eaa728eeSbellard 0, 0xffffffff, 1095eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1096eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1097eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1098eaa728eeSbellard DESC_L_MASK); 1099a4165610Sliguang env->eip = env->regs[R_ECX]; 1100eaa728eeSbellard } else { 1101eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1102eaa728eeSbellard 0, 0xffffffff, 1103eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1104eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1105eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1106a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1107eaa728eeSbellard } 1108ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1109eaa728eeSbellard 0, 0xffffffff, 1110eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1111eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1112eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 111363fd8ef0SPaolo Bonzini } else 111463fd8ef0SPaolo Bonzini #endif 111563fd8ef0SPaolo Bonzini { 1116fd460606SKevin O'Connor env->eflags |= IF_MASK; 1117eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1118eaa728eeSbellard 0, 0xffffffff, 1119eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1120eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1121eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1122a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1123ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1124eaa728eeSbellard 0, 0xffffffff, 1125eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1126eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1127eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1128eaa728eeSbellard } 1129eaa728eeSbellard } 1130eaa728eeSbellard 1131eaa728eeSbellard /* real mode interrupt */ 11322999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11332999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1134eaa728eeSbellard { 1135eaa728eeSbellard SegmentCache *dt; 1136059368bcSRichard Henderson target_ulong ptr; 1137eaa728eeSbellard int selector; 1138059368bcSRichard Henderson uint32_t offset; 1139eaa728eeSbellard uint32_t old_cs, old_eip; 1140059368bcSRichard Henderson StackAccess sa; 1141eaa728eeSbellard 1142eaa728eeSbellard /* real mode (simpler!) */ 1143eaa728eeSbellard dt = &env->idt; 114420054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 114577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 114620054ef0SBlue Swirl } 1147eaa728eeSbellard ptr = dt->base + intno * 4; 1148329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1149329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1150059368bcSRichard Henderson 1151059368bcSRichard Henderson sa.env = env; 1152059368bcSRichard Henderson sa.ra = 0; 1153059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1154059368bcSRichard Henderson sa.sp_mask = 0xffff; 1155059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1156e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1157059368bcSRichard Henderson 115820054ef0SBlue Swirl if (is_int) { 1159eaa728eeSbellard old_eip = next_eip; 116020054ef0SBlue Swirl } else { 1161eaa728eeSbellard old_eip = env->eip; 116220054ef0SBlue Swirl } 1163eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1164eaa728eeSbellard /* XXX: use SS segment size? */ 1165059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1166059368bcSRichard Henderson pushw(&sa, old_cs); 1167059368bcSRichard Henderson pushw(&sa, old_eip); 1168eaa728eeSbellard 1169eaa728eeSbellard /* update processor state */ 1170059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1171eaa728eeSbellard env->eip = offset; 1172eaa728eeSbellard env->segs[R_CS].selector = selector; 1173eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1174eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1175eaa728eeSbellard } 1176eaa728eeSbellard 1177eaa728eeSbellard /* 1178eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1179a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1180eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1181eaa728eeSbellard */ 118230493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11832999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1184eaa728eeSbellard { 1185ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1186ca4c810aSAndreas Färber 11878fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1188eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1189eaa728eeSbellard static int count; 119020054ef0SBlue Swirl 119120054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 119220054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1193eaa728eeSbellard count, intno, error_code, is_int, 1194eaa728eeSbellard env->hflags & HF_CPL_MASK, 1195a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1196a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 119708b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1198eaa728eeSbellard if (intno == 0x0e) { 119993fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1200eaa728eeSbellard } else { 12014b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1202eaa728eeSbellard } 120393fcfe39Saliguori qemu_log("\n"); 1204a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1205eaa728eeSbellard #if 0 1206eaa728eeSbellard { 1207eaa728eeSbellard int i; 12089bd5494eSAdam Lackorzynski target_ulong ptr; 120920054ef0SBlue Swirl 121093fcfe39Saliguori qemu_log(" code="); 1211eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1212eaa728eeSbellard for (i = 0; i < 16; i++) { 121393fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1214eaa728eeSbellard } 121593fcfe39Saliguori qemu_log("\n"); 1216eaa728eeSbellard } 1217eaa728eeSbellard #endif 1218eaa728eeSbellard count++; 1219eaa728eeSbellard } 1220eaa728eeSbellard } 1221eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 122200ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1223f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12242999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 122520054ef0SBlue Swirl } 122600ea18d1Saliguori #endif 1227eb38c52cSblueswir1 #ifdef TARGET_X86_64 1228eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12292999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1230eaa728eeSbellard } else 1231eaa728eeSbellard #endif 1232eaa728eeSbellard { 12332999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12342999a0b2SBlue Swirl is_hw); 1235eaa728eeSbellard } 1236eaa728eeSbellard } else { 123700ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1238f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12392999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 124020054ef0SBlue Swirl } 124100ea18d1Saliguori #endif 12422999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1243eaa728eeSbellard } 12442ed51f5bSaliguori 124500ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1246f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1247fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1248b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 124920054ef0SBlue Swirl offsetof(struct vmcb, 125020054ef0SBlue Swirl control.event_inj)); 125120054ef0SBlue Swirl 1252b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1253ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 125420054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12552ed51f5bSaliguori } 125600ea18d1Saliguori #endif 1257eaa728eeSbellard } 1258eaa728eeSbellard 12592999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1260e694d4e2SBlue Swirl { 12616aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1262e694d4e2SBlue Swirl } 1263e694d4e2SBlue Swirl 12642999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1265eaa728eeSbellard { 1266eaa728eeSbellard SegmentCache *dt; 1267eaa728eeSbellard uint32_t e1, e2; 1268eaa728eeSbellard int index, entry_limit; 1269eaa728eeSbellard target_ulong ptr; 1270eaa728eeSbellard 1271eaa728eeSbellard selector &= 0xffff; 1272eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1273eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1274eaa728eeSbellard env->ldt.base = 0; 1275eaa728eeSbellard env->ldt.limit = 0; 1276eaa728eeSbellard } else { 127720054ef0SBlue Swirl if (selector & 0x4) { 1278100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 127920054ef0SBlue Swirl } 1280eaa728eeSbellard dt = &env->gdt; 1281eaa728eeSbellard index = selector & ~7; 1282eaa728eeSbellard #ifdef TARGET_X86_64 128320054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1284eaa728eeSbellard entry_limit = 15; 128520054ef0SBlue Swirl } else 1286eaa728eeSbellard #endif 128720054ef0SBlue Swirl { 1288eaa728eeSbellard entry_limit = 7; 128920054ef0SBlue Swirl } 129020054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1291100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 129220054ef0SBlue Swirl } 1293eaa728eeSbellard ptr = dt->base + index; 1294100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1295100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 129620054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1297100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 129820054ef0SBlue Swirl } 129920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1300100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 130120054ef0SBlue Swirl } 1302eaa728eeSbellard #ifdef TARGET_X86_64 1303eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1304eaa728eeSbellard uint32_t e3; 130520054ef0SBlue Swirl 1306100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1307eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1308eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1309eaa728eeSbellard } else 1310eaa728eeSbellard #endif 1311eaa728eeSbellard { 1312eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1313eaa728eeSbellard } 1314eaa728eeSbellard } 1315eaa728eeSbellard env->ldt.selector = selector; 1316eaa728eeSbellard } 1317eaa728eeSbellard 13182999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1319eaa728eeSbellard { 1320eaa728eeSbellard SegmentCache *dt; 1321eaa728eeSbellard uint32_t e1, e2; 1322eaa728eeSbellard int index, type, entry_limit; 1323eaa728eeSbellard target_ulong ptr; 1324eaa728eeSbellard 1325eaa728eeSbellard selector &= 0xffff; 1326eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1327eaa728eeSbellard /* NULL selector case: invalid TR */ 1328eaa728eeSbellard env->tr.base = 0; 1329eaa728eeSbellard env->tr.limit = 0; 1330eaa728eeSbellard env->tr.flags = 0; 1331eaa728eeSbellard } else { 133220054ef0SBlue Swirl if (selector & 0x4) { 1333100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133420054ef0SBlue Swirl } 1335eaa728eeSbellard dt = &env->gdt; 1336eaa728eeSbellard index = selector & ~7; 1337eaa728eeSbellard #ifdef TARGET_X86_64 133820054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1339eaa728eeSbellard entry_limit = 15; 134020054ef0SBlue Swirl } else 1341eaa728eeSbellard #endif 134220054ef0SBlue Swirl { 1343eaa728eeSbellard entry_limit = 7; 134420054ef0SBlue Swirl } 134520054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1346100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 134720054ef0SBlue Swirl } 1348eaa728eeSbellard ptr = dt->base + index; 1349100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1350100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1351eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1352eaa728eeSbellard if ((e2 & DESC_S_MASK) || 135320054ef0SBlue Swirl (type != 1 && type != 9)) { 1354100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 135520054ef0SBlue Swirl } 135620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1357100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 135820054ef0SBlue Swirl } 1359eaa728eeSbellard #ifdef TARGET_X86_64 1360eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1361eaa728eeSbellard uint32_t e3, e4; 136220054ef0SBlue Swirl 1363100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1364100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 136520054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1366100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 136720054ef0SBlue Swirl } 1368eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1369eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1370eaa728eeSbellard } else 1371eaa728eeSbellard #endif 1372eaa728eeSbellard { 1373eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1374eaa728eeSbellard } 1375eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1376100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1377eaa728eeSbellard } 1378eaa728eeSbellard env->tr.selector = selector; 1379eaa728eeSbellard } 1380eaa728eeSbellard 1381eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13822999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1383eaa728eeSbellard { 1384eaa728eeSbellard uint32_t e1, e2; 1385eaa728eeSbellard int cpl, dpl, rpl; 1386eaa728eeSbellard SegmentCache *dt; 1387eaa728eeSbellard int index; 1388eaa728eeSbellard target_ulong ptr; 1389eaa728eeSbellard 1390eaa728eeSbellard selector &= 0xffff; 1391eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1392eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1393eaa728eeSbellard /* null selector case */ 1394eaa728eeSbellard if (seg_reg == R_SS 1395eaa728eeSbellard #ifdef TARGET_X86_64 1396eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1397eaa728eeSbellard #endif 139820054ef0SBlue Swirl ) { 1399100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 140020054ef0SBlue Swirl } 1401eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1402eaa728eeSbellard } else { 1403eaa728eeSbellard 140420054ef0SBlue Swirl if (selector & 0x4) { 1405eaa728eeSbellard dt = &env->ldt; 140620054ef0SBlue Swirl } else { 1407eaa728eeSbellard dt = &env->gdt; 140820054ef0SBlue Swirl } 1409eaa728eeSbellard index = selector & ~7; 141020054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1411100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141220054ef0SBlue Swirl } 1413eaa728eeSbellard ptr = dt->base + index; 1414100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1415100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1416eaa728eeSbellard 141720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1418100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141920054ef0SBlue Swirl } 1420eaa728eeSbellard rpl = selector & 3; 1421eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1422eaa728eeSbellard if (seg_reg == R_SS) { 1423eaa728eeSbellard /* must be writable segment */ 142420054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1425100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142620054ef0SBlue Swirl } 142720054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1428100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142920054ef0SBlue Swirl } 1430eaa728eeSbellard } else { 1431eaa728eeSbellard /* must be readable segment */ 143220054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1433100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 143420054ef0SBlue Swirl } 1435eaa728eeSbellard 1436eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1437eaa728eeSbellard /* if not conforming code, test rights */ 143820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1439100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1440eaa728eeSbellard } 1441eaa728eeSbellard } 144220054ef0SBlue Swirl } 1443eaa728eeSbellard 1444eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 144520054ef0SBlue Swirl if (seg_reg == R_SS) { 1446100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 144720054ef0SBlue Swirl } else { 1448100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1449eaa728eeSbellard } 145020054ef0SBlue Swirl } 1451eaa728eeSbellard 1452eaa728eeSbellard /* set the access bit if not already set */ 1453eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1454eaa728eeSbellard e2 |= DESC_A_MASK; 1455100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1456eaa728eeSbellard } 1457eaa728eeSbellard 1458eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1459eaa728eeSbellard get_seg_base(e1, e2), 1460eaa728eeSbellard get_seg_limit(e1, e2), 1461eaa728eeSbellard e2); 1462eaa728eeSbellard #if 0 146393fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1464eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1465eaa728eeSbellard #endif 1466eaa728eeSbellard } 1467eaa728eeSbellard } 1468eaa728eeSbellard 1469eaa728eeSbellard /* protected mode jump */ 14702999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1471100ec099SPavel Dovgalyuk target_ulong next_eip) 1472eaa728eeSbellard { 1473eaa728eeSbellard int gate_cs, type; 1474eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1475eaa728eeSbellard 147620054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1477100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 147820054ef0SBlue Swirl } 1479100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1480100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148120054ef0SBlue Swirl } 1482eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1483eaa728eeSbellard if (e2 & DESC_S_MASK) { 148420054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1485100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148620054ef0SBlue Swirl } 1487eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1488eaa728eeSbellard if (e2 & DESC_C_MASK) { 1489eaa728eeSbellard /* conforming code segment */ 149020054ef0SBlue Swirl if (dpl > cpl) { 1491100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 149220054ef0SBlue Swirl } 1493eaa728eeSbellard } else { 1494eaa728eeSbellard /* non conforming code segment */ 1495eaa728eeSbellard rpl = new_cs & 3; 149620054ef0SBlue Swirl if (rpl > cpl) { 1497100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1498eaa728eeSbellard } 149920054ef0SBlue Swirl if (dpl != cpl) { 1500100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 150120054ef0SBlue Swirl } 150220054ef0SBlue Swirl } 150320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1504100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 150520054ef0SBlue Swirl } 1506eaa728eeSbellard limit = get_seg_limit(e1, e2); 1507eaa728eeSbellard if (new_eip > limit && 1508db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1509db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 151020054ef0SBlue Swirl } 1511eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1512eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1513a78d0eabSliguang env->eip = new_eip; 1514eaa728eeSbellard } else { 1515eaa728eeSbellard /* jump to call or task gate */ 1516eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1517eaa728eeSbellard rpl = new_cs & 3; 1518eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1519eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 15200aca0605SAndrew Oates 15210aca0605SAndrew Oates #ifdef TARGET_X86_64 15220aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15230aca0605SAndrew Oates if (type != 12) { 15240aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 15250aca0605SAndrew Oates } 15260aca0605SAndrew Oates } 15270aca0605SAndrew Oates #endif 1528eaa728eeSbellard switch (type) { 1529eaa728eeSbellard case 1: /* 286 TSS */ 1530eaa728eeSbellard case 9: /* 386 TSS */ 1531eaa728eeSbellard case 5: /* task gate */ 153220054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1533100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 153420054ef0SBlue Swirl } 1535100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1536eaa728eeSbellard break; 1537eaa728eeSbellard case 4: /* 286 call gate */ 1538eaa728eeSbellard case 12: /* 386 call gate */ 153920054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1540100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 154120054ef0SBlue Swirl } 154220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1543100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 154420054ef0SBlue Swirl } 1545eaa728eeSbellard gate_cs = e1 >> 16; 1546eaa728eeSbellard new_eip = (e1 & 0xffff); 154720054ef0SBlue Swirl if (type == 12) { 1548eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 154920054ef0SBlue Swirl } 15500aca0605SAndrew Oates 15510aca0605SAndrew Oates #ifdef TARGET_X86_64 15520aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15530aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15540aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15550aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15560aca0605SAndrew Oates GETPC()); 15570aca0605SAndrew Oates } 15580aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15590aca0605SAndrew Oates if (type != 0) { 15600aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15610aca0605SAndrew Oates GETPC()); 15620aca0605SAndrew Oates } 15630aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15640aca0605SAndrew Oates } 15650aca0605SAndrew Oates #endif 15660aca0605SAndrew Oates 1567100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1568100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 156920054ef0SBlue Swirl } 1570eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1571eaa728eeSbellard /* must be code segment */ 1572eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 157320054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1574100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157520054ef0SBlue Swirl } 1576eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 157720054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1578100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157920054ef0SBlue Swirl } 15800aca0605SAndrew Oates #ifdef TARGET_X86_64 15810aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15820aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15830aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15840aca0605SAndrew Oates } 15850aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15860aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15870aca0605SAndrew Oates } 15880aca0605SAndrew Oates } 15890aca0605SAndrew Oates #endif 159020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1591100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 159220054ef0SBlue Swirl } 1593eaa728eeSbellard limit = get_seg_limit(e1, e2); 15940aca0605SAndrew Oates if (new_eip > limit && 15950aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1596100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 159720054ef0SBlue Swirl } 1598eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1599eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1600a78d0eabSliguang env->eip = new_eip; 1601eaa728eeSbellard break; 1602eaa728eeSbellard default: 1603100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1604eaa728eeSbellard break; 1605eaa728eeSbellard } 1606eaa728eeSbellard } 1607eaa728eeSbellard } 1608eaa728eeSbellard 1609eaa728eeSbellard /* real mode call */ 16108c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 16118c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1612eaa728eeSbellard { 1613059368bcSRichard Henderson StackAccess sa; 1614eaa728eeSbellard 1615059368bcSRichard Henderson sa.env = env; 1616059368bcSRichard Henderson sa.ra = GETPC(); 1617059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1618059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1619059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1620e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1621059368bcSRichard Henderson 1622eaa728eeSbellard if (shift) { 1623059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1624059368bcSRichard Henderson pushl(&sa, next_eip); 1625eaa728eeSbellard } else { 1626059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1627059368bcSRichard Henderson pushw(&sa, next_eip); 1628eaa728eeSbellard } 1629eaa728eeSbellard 1630059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1631eaa728eeSbellard env->eip = new_eip; 1632eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1633eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1634eaa728eeSbellard } 1635eaa728eeSbellard 1636eaa728eeSbellard /* protected mode call */ 16372999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1638100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1639eaa728eeSbellard { 1640eaa728eeSbellard int new_stack, i; 16410aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1642059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1643eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1644059368bcSRichard Henderson target_ulong old_ssp, offset; 1645059368bcSRichard Henderson StackAccess sa; 1646eaa728eeSbellard 16470aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16486aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 164920054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1650100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 165120054ef0SBlue Swirl } 1652100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1653100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 165420054ef0SBlue Swirl } 1655eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1656d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1657059368bcSRichard Henderson 1658059368bcSRichard Henderson sa.env = env; 1659059368bcSRichard Henderson sa.ra = GETPC(); 1660059368bcSRichard Henderson 1661eaa728eeSbellard if (e2 & DESC_S_MASK) { 1662e136648cSPaolo Bonzini /* "normal" far call, no stack switch possible */ 166320054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1664100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166520054ef0SBlue Swirl } 1666eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1667eaa728eeSbellard if (e2 & DESC_C_MASK) { 1668eaa728eeSbellard /* conforming code segment */ 166920054ef0SBlue Swirl if (dpl > cpl) { 1670100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 167120054ef0SBlue Swirl } 1672eaa728eeSbellard } else { 1673eaa728eeSbellard /* non conforming code segment */ 1674eaa728eeSbellard rpl = new_cs & 3; 167520054ef0SBlue Swirl if (rpl > cpl) { 1676100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1677eaa728eeSbellard } 167820054ef0SBlue Swirl if (dpl != cpl) { 1679100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 168020054ef0SBlue Swirl } 168120054ef0SBlue Swirl } 168220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1683100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 168420054ef0SBlue Swirl } 1685eaa728eeSbellard 1686e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1687eaa728eeSbellard #ifdef TARGET_X86_64 1688eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1689eaa728eeSbellard if (shift == 2) { 1690eaa728eeSbellard /* 64 bit case */ 1691059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1692059368bcSRichard Henderson sa.sp_mask = -1; 1693059368bcSRichard Henderson sa.ss_base = 0; 1694059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1695059368bcSRichard Henderson pushq(&sa, next_eip); 1696eaa728eeSbellard /* from this point, not restartable */ 1697059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1698eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1699eaa728eeSbellard get_seg_base(e1, e2), 1700eaa728eeSbellard get_seg_limit(e1, e2), e2); 1701a78d0eabSliguang env->eip = new_eip; 1702eaa728eeSbellard } else 1703eaa728eeSbellard #endif 1704eaa728eeSbellard { 1705059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1706059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1707059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1708eaa728eeSbellard if (shift) { 1709059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1710059368bcSRichard Henderson pushl(&sa, next_eip); 1711eaa728eeSbellard } else { 1712059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1713059368bcSRichard Henderson pushw(&sa, next_eip); 1714eaa728eeSbellard } 1715eaa728eeSbellard 1716eaa728eeSbellard limit = get_seg_limit(e1, e2); 171720054ef0SBlue Swirl if (new_eip > limit) { 1718100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 171920054ef0SBlue Swirl } 1720eaa728eeSbellard /* from this point, not restartable */ 1721059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1722eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1723eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1724a78d0eabSliguang env->eip = new_eip; 1725eaa728eeSbellard } 1726eaa728eeSbellard } else { 1727eaa728eeSbellard /* check gate type */ 1728eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1729eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1730eaa728eeSbellard rpl = new_cs & 3; 17310aca0605SAndrew Oates 17320aca0605SAndrew Oates #ifdef TARGET_X86_64 17330aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17340aca0605SAndrew Oates if (type != 12) { 17350aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17360aca0605SAndrew Oates } 17370aca0605SAndrew Oates } 17380aca0605SAndrew Oates #endif 17390aca0605SAndrew Oates 1740eaa728eeSbellard switch (type) { 1741eaa728eeSbellard case 1: /* available 286 TSS */ 1742eaa728eeSbellard case 9: /* available 386 TSS */ 1743eaa728eeSbellard case 5: /* task gate */ 174420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1745100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 174620054ef0SBlue Swirl } 1747100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1748eaa728eeSbellard return; 1749eaa728eeSbellard case 4: /* 286 call gate */ 1750eaa728eeSbellard case 12: /* 386 call gate */ 1751eaa728eeSbellard break; 1752eaa728eeSbellard default: 1753100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1754eaa728eeSbellard break; 1755eaa728eeSbellard } 1756eaa728eeSbellard shift = type >> 3; 1757eaa728eeSbellard 175820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1759100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 176020054ef0SBlue Swirl } 1761eaa728eeSbellard /* check valid bit */ 176220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1763100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 176420054ef0SBlue Swirl } 1765eaa728eeSbellard selector = e1 >> 16; 1766eaa728eeSbellard param_count = e2 & 0x1f; 17670aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17680aca0605SAndrew Oates #ifdef TARGET_X86_64 17690aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17700aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17710aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17720aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17730aca0605SAndrew Oates GETPC()); 17740aca0605SAndrew Oates } 17750aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17760aca0605SAndrew Oates if (type != 0) { 17770aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17780aca0605SAndrew Oates GETPC()); 17790aca0605SAndrew Oates } 17800aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17810aca0605SAndrew Oates } 17820aca0605SAndrew Oates #endif 178320054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1784100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 178520054ef0SBlue Swirl } 1786eaa728eeSbellard 1787100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1788100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 178920054ef0SBlue Swirl } 179020054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1791100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179220054ef0SBlue Swirl } 1793eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 179420054ef0SBlue Swirl if (dpl > cpl) { 1795100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179620054ef0SBlue Swirl } 17970aca0605SAndrew Oates #ifdef TARGET_X86_64 17980aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17990aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 18000aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18010aca0605SAndrew Oates } 18020aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 18030aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18040aca0605SAndrew Oates } 18050aca0605SAndrew Oates shift++; 18060aca0605SAndrew Oates } 18070aca0605SAndrew Oates #endif 180820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1809100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 181020054ef0SBlue Swirl } 1811eaa728eeSbellard 1812eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1813eaa728eeSbellard /* to inner privilege */ 1814e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 18150aca0605SAndrew Oates #ifdef TARGET_X86_64 18160aca0605SAndrew Oates if (shift == 2) { 18170aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 18180aca0605SAndrew Oates new_stack = 1; 1819059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1820059368bcSRichard Henderson sa.sp_mask = -1; 1821059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 18220aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1823059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 18240aca0605SAndrew Oates } else 18250aca0605SAndrew Oates #endif 18260aca0605SAndrew Oates { 18270aca0605SAndrew Oates uint32_t sp32; 18280aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 182990a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 18300aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 183190a2541bSliguang env->regs[R_ESP]); 183220054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1833100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183420054ef0SBlue Swirl } 183520054ef0SBlue Swirl if ((ss & 3) != dpl) { 1836100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183720054ef0SBlue Swirl } 1838100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1839100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184020054ef0SBlue Swirl } 1841eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 184220054ef0SBlue Swirl if (ss_dpl != dpl) { 1843100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184420054ef0SBlue Swirl } 1845eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1846eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 184720054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1848100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184920054ef0SBlue Swirl } 185020054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1851100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185220054ef0SBlue Swirl } 1853eaa728eeSbellard 1854059368bcSRichard Henderson sa.sp = sp32; 1855059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1856059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18570aca0605SAndrew Oates } 18580aca0605SAndrew Oates 185920054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1860eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1861eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1862059368bcSRichard Henderson 18630aca0605SAndrew Oates #ifdef TARGET_X86_64 18640aca0605SAndrew Oates if (shift == 2) { 18650aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1866059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1867059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18680aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18690aca0605SAndrew Oates } else 18700aca0605SAndrew Oates #endif 18710aca0605SAndrew Oates if (shift == 1) { 1872059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1873059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1874eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18750bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18760bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18770bd385e7SPaolo Bonzini GETPC()); 1878059368bcSRichard Henderson pushl(&sa, val); 1879eaa728eeSbellard } 1880eaa728eeSbellard } else { 1881059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1882059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1883eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18840bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18850bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18860bd385e7SPaolo Bonzini GETPC()); 1887059368bcSRichard Henderson pushw(&sa, val); 1888eaa728eeSbellard } 1889eaa728eeSbellard } 1890eaa728eeSbellard new_stack = 1; 1891eaa728eeSbellard } else { 1892eaa728eeSbellard /* to same privilege */ 1893e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1894059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1895059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1896059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 189720054ef0SBlue Swirl /* push_size = (4 << shift); */ 1898eaa728eeSbellard new_stack = 0; 1899eaa728eeSbellard } 1900eaa728eeSbellard 19010aca0605SAndrew Oates #ifdef TARGET_X86_64 19020aca0605SAndrew Oates if (shift == 2) { 1903059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1904059368bcSRichard Henderson pushq(&sa, next_eip); 19050aca0605SAndrew Oates } else 19060aca0605SAndrew Oates #endif 19070aca0605SAndrew Oates if (shift == 1) { 1908059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1909059368bcSRichard Henderson pushl(&sa, next_eip); 1910eaa728eeSbellard } else { 1911059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1912059368bcSRichard Henderson pushw(&sa, next_eip); 1913eaa728eeSbellard } 1914eaa728eeSbellard 1915eaa728eeSbellard /* from this point, not restartable */ 1916eaa728eeSbellard 1917eaa728eeSbellard if (new_stack) { 19180aca0605SAndrew Oates #ifdef TARGET_X86_64 19190aca0605SAndrew Oates if (shift == 2) { 19200aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 19210aca0605SAndrew Oates } else 19220aca0605SAndrew Oates #endif 19230aca0605SAndrew Oates { 1924eaa728eeSbellard ss = (ss & ~3) | dpl; 1925eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1926059368bcSRichard Henderson sa.ss_base, 1927eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1928eaa728eeSbellard ss_e2); 1929eaa728eeSbellard } 19300aca0605SAndrew Oates } 1931eaa728eeSbellard 1932eaa728eeSbellard selector = (selector & ~3) | dpl; 1933eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1934eaa728eeSbellard get_seg_base(e1, e2), 1935eaa728eeSbellard get_seg_limit(e1, e2), 1936eaa728eeSbellard e2); 1937059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1938a78d0eabSliguang env->eip = offset; 1939eaa728eeSbellard } 1940eaa728eeSbellard } 1941eaa728eeSbellard 1942eaa728eeSbellard /* real and vm86 mode iret */ 19432999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1944eaa728eeSbellard { 1945059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1946eaa728eeSbellard int eflags_mask; 1947059368bcSRichard Henderson StackAccess sa; 1948eaa728eeSbellard 1949059368bcSRichard Henderson sa.env = env; 1950059368bcSRichard Henderson sa.ra = GETPC(); 19518053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1952059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1953059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1954059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1955059368bcSRichard Henderson 1956eaa728eeSbellard if (shift == 1) { 1957eaa728eeSbellard /* 32 bits */ 1958059368bcSRichard Henderson new_eip = popl(&sa); 1959059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1960059368bcSRichard Henderson new_eflags = popl(&sa); 1961eaa728eeSbellard } else { 1962eaa728eeSbellard /* 16 bits */ 1963059368bcSRichard Henderson new_eip = popw(&sa); 1964059368bcSRichard Henderson new_cs = popw(&sa); 1965059368bcSRichard Henderson new_eflags = popw(&sa); 1966eaa728eeSbellard } 1967059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1968bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1969bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1970eaa728eeSbellard env->eip = new_eip; 197120054ef0SBlue Swirl if (env->eflags & VM_MASK) { 197220054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 197320054ef0SBlue Swirl NT_MASK; 197420054ef0SBlue Swirl } else { 197520054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 197620054ef0SBlue Swirl RF_MASK | NT_MASK; 197720054ef0SBlue Swirl } 197820054ef0SBlue Swirl if (shift == 0) { 1979eaa728eeSbellard eflags_mask &= 0xffff; 198020054ef0SBlue Swirl } 1981997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1982db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1983eaa728eeSbellard } 1984eaa728eeSbellard 1985c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1986eaa728eeSbellard { 1987eaa728eeSbellard int dpl; 1988eaa728eeSbellard uint32_t e2; 1989eaa728eeSbellard 1990eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 1991eaa728eeSbellard they may still contain a valid base. I would be interested to 1992eaa728eeSbellard know how a real x86_64 CPU behaves */ 1993eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 199420054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 1995eaa728eeSbellard return; 199620054ef0SBlue Swirl } 1997eaa728eeSbellard 1998eaa728eeSbellard e2 = env->segs[seg_reg].flags; 1999eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2000eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2001eaa728eeSbellard /* data or non conforming code segment */ 2002eaa728eeSbellard if (dpl < cpl) { 2003c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 2004c2ba0515SBin Meng env->segs[seg_reg].base, 2005c2ba0515SBin Meng env->segs[seg_reg].limit, 2006c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 2007eaa728eeSbellard } 2008eaa728eeSbellard } 2009eaa728eeSbellard } 2010eaa728eeSbellard 2011eaa728eeSbellard /* protected mode iret */ 20122999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 2013100ec099SPavel Dovgalyuk int is_iret, int addend, 2014100ec099SPavel Dovgalyuk uintptr_t retaddr) 2015eaa728eeSbellard { 2016eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 2017eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 2018eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 2019eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 2020059368bcSRichard Henderson target_ulong new_eip, new_esp; 2021059368bcSRichard Henderson StackAccess sa; 2022059368bcSRichard Henderson 20238053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 20248053862aSPaolo Bonzini 2025059368bcSRichard Henderson sa.env = env; 2026059368bcSRichard Henderson sa.ra = retaddr; 20278053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 2028eaa728eeSbellard 2029eaa728eeSbellard #ifdef TARGET_X86_64 203020054ef0SBlue Swirl if (shift == 2) { 2031059368bcSRichard Henderson sa.sp_mask = -1; 203220054ef0SBlue Swirl } else 2033eaa728eeSbellard #endif 203420054ef0SBlue Swirl { 2035059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 203620054ef0SBlue Swirl } 2037059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2038059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2039eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2040eaa728eeSbellard #ifdef TARGET_X86_64 2041eaa728eeSbellard if (shift == 2) { 2042059368bcSRichard Henderson new_eip = popq(&sa); 2043059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2044eaa728eeSbellard if (is_iret) { 2045059368bcSRichard Henderson new_eflags = popq(&sa); 2046eaa728eeSbellard } 2047eaa728eeSbellard } else 2048eaa728eeSbellard #endif 204920054ef0SBlue Swirl { 2050eaa728eeSbellard if (shift == 1) { 2051eaa728eeSbellard /* 32 bits */ 2052059368bcSRichard Henderson new_eip = popl(&sa); 2053059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2054eaa728eeSbellard if (is_iret) { 2055059368bcSRichard Henderson new_eflags = popl(&sa); 205620054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2057eaa728eeSbellard goto return_to_vm86; 2058eaa728eeSbellard } 205920054ef0SBlue Swirl } 2060eaa728eeSbellard } else { 2061eaa728eeSbellard /* 16 bits */ 2062059368bcSRichard Henderson new_eip = popw(&sa); 2063059368bcSRichard Henderson new_cs = popw(&sa); 206420054ef0SBlue Swirl if (is_iret) { 2065059368bcSRichard Henderson new_eflags = popw(&sa); 2066eaa728eeSbellard } 206720054ef0SBlue Swirl } 206820054ef0SBlue Swirl } 2069d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2070eaa728eeSbellard new_cs, new_eip, shift, addend); 20716aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 207220054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2073100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2074eaa728eeSbellard } 2075100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2076100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 207720054ef0SBlue Swirl } 207820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 207920054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2080100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 208120054ef0SBlue Swirl } 208220054ef0SBlue Swirl rpl = new_cs & 3; 208320054ef0SBlue Swirl if (rpl < cpl) { 2084100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 208520054ef0SBlue Swirl } 208620054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 208720054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 208820054ef0SBlue Swirl if (dpl > rpl) { 2089100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209020054ef0SBlue Swirl } 209120054ef0SBlue Swirl } else { 209220054ef0SBlue Swirl if (dpl != rpl) { 2093100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209420054ef0SBlue Swirl } 209520054ef0SBlue Swirl } 209620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2097100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 209820054ef0SBlue Swirl } 2099eaa728eeSbellard 2100059368bcSRichard Henderson sa.sp += addend; 2101eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2102eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 21031235fc06Sths /* return to same privilege level */ 2104eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2105eaa728eeSbellard get_seg_base(e1, e2), 2106eaa728eeSbellard get_seg_limit(e1, e2), 2107eaa728eeSbellard e2); 2108eaa728eeSbellard } else { 2109eaa728eeSbellard /* return to different privilege level */ 2110eaa728eeSbellard #ifdef TARGET_X86_64 2111eaa728eeSbellard if (shift == 2) { 2112059368bcSRichard Henderson new_esp = popq(&sa); 2113059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2114eaa728eeSbellard } else 2115eaa728eeSbellard #endif 211620054ef0SBlue Swirl { 2117eaa728eeSbellard if (shift == 1) { 2118eaa728eeSbellard /* 32 bits */ 2119059368bcSRichard Henderson new_esp = popl(&sa); 2120059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2121eaa728eeSbellard } else { 2122eaa728eeSbellard /* 16 bits */ 2123059368bcSRichard Henderson new_esp = popw(&sa); 2124059368bcSRichard Henderson new_ss = popw(&sa); 2125eaa728eeSbellard } 212620054ef0SBlue Swirl } 2127d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2128eaa728eeSbellard new_ss, new_esp); 2129eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2130eaa728eeSbellard #ifdef TARGET_X86_64 2131eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2132eaa728eeSbellard /* XXX: test CS64? */ 2133eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2134eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2135eaa728eeSbellard 0, 0xffffffff, 2136eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2137eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2138eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2139eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2140eaa728eeSbellard } else 2141eaa728eeSbellard #endif 2142eaa728eeSbellard { 2143100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2144eaa728eeSbellard } 2145eaa728eeSbellard } else { 214620054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2147100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 214820054ef0SBlue Swirl } 2149100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2150100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 215120054ef0SBlue Swirl } 2152eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2153eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 215420054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2155100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 215620054ef0SBlue Swirl } 2157eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 215820054ef0SBlue Swirl if (dpl != rpl) { 2159100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 216020054ef0SBlue Swirl } 216120054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2162100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 216320054ef0SBlue Swirl } 2164eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2165eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2166eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2167eaa728eeSbellard ss_e2); 2168eaa728eeSbellard } 2169eaa728eeSbellard 2170eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2171eaa728eeSbellard get_seg_base(e1, e2), 2172eaa728eeSbellard get_seg_limit(e1, e2), 2173eaa728eeSbellard e2); 2174059368bcSRichard Henderson sa.sp = new_esp; 2175eaa728eeSbellard #ifdef TARGET_X86_64 217620054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2177059368bcSRichard Henderson sa.sp_mask = -1; 217820054ef0SBlue Swirl } else 2179eaa728eeSbellard #endif 218020054ef0SBlue Swirl { 2181059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 218220054ef0SBlue Swirl } 2183eaa728eeSbellard 2184eaa728eeSbellard /* validate data segments */ 21852999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21862999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21872999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21882999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2189eaa728eeSbellard 2190059368bcSRichard Henderson sa.sp += addend; 2191eaa728eeSbellard } 2192059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2193eaa728eeSbellard env->eip = new_eip; 2194eaa728eeSbellard if (is_iret) { 2195eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2196eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 219720054ef0SBlue Swirl if (cpl == 0) { 2198eaa728eeSbellard eflags_mask |= IOPL_MASK; 219920054ef0SBlue Swirl } 2200eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 220120054ef0SBlue Swirl if (cpl <= iopl) { 2202eaa728eeSbellard eflags_mask |= IF_MASK; 220320054ef0SBlue Swirl } 220420054ef0SBlue Swirl if (shift == 0) { 2205eaa728eeSbellard eflags_mask &= 0xffff; 220620054ef0SBlue Swirl } 2207997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2208eaa728eeSbellard } 2209eaa728eeSbellard return; 2210eaa728eeSbellard 2211eaa728eeSbellard return_to_vm86: 2212059368bcSRichard Henderson new_esp = popl(&sa); 2213059368bcSRichard Henderson new_ss = popl(&sa); 2214059368bcSRichard Henderson new_es = popl(&sa); 2215059368bcSRichard Henderson new_ds = popl(&sa); 2216059368bcSRichard Henderson new_fs = popl(&sa); 2217059368bcSRichard Henderson new_gs = popl(&sa); 2218eaa728eeSbellard 2219eaa728eeSbellard /* modify processor state */ 2220997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2221997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2222997ff0d9SBlue Swirl VIP_MASK); 22232999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 22242999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 22252999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 22262999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 22272999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 22282999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2229eaa728eeSbellard 2230eaa728eeSbellard env->eip = new_eip & 0xffff; 223108b3ded6Sliguang env->regs[R_ESP] = new_esp; 2232eaa728eeSbellard } 2233eaa728eeSbellard 22342999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2235eaa728eeSbellard { 2236eaa728eeSbellard int tss_selector, type; 2237eaa728eeSbellard uint32_t e1, e2; 2238eaa728eeSbellard 2239eaa728eeSbellard /* specific case for TSS */ 2240eaa728eeSbellard if (env->eflags & NT_MASK) { 2241eaa728eeSbellard #ifdef TARGET_X86_64 224220054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2243100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 224420054ef0SBlue Swirl } 2245eaa728eeSbellard #endif 2246100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 224720054ef0SBlue Swirl if (tss_selector & 4) { 2248100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 224920054ef0SBlue Swirl } 2250100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2251100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225220054ef0SBlue Swirl } 2253eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2254eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 225520054ef0SBlue Swirl if (type != 3) { 2256100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225720054ef0SBlue Swirl } 2258100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2259eaa728eeSbellard } else { 2260100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2261eaa728eeSbellard } 2262db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2263eaa728eeSbellard } 2264eaa728eeSbellard 22652999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2266eaa728eeSbellard { 2267100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2268eaa728eeSbellard } 2269eaa728eeSbellard 22702999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2271eaa728eeSbellard { 2272eaa728eeSbellard if (env->sysenter_cs == 0) { 2273100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2274eaa728eeSbellard } 2275eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22762436b61aSbalrog 22772436b61aSbalrog #ifdef TARGET_X86_64 22782436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22792436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22802436b61aSbalrog 0, 0xffffffff, 22812436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22822436b61aSbalrog DESC_S_MASK | 228320054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 228420054ef0SBlue Swirl DESC_L_MASK); 22852436b61aSbalrog } else 22862436b61aSbalrog #endif 22872436b61aSbalrog { 2288eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2289eaa728eeSbellard 0, 0xffffffff, 2290eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2291eaa728eeSbellard DESC_S_MASK | 2292eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 22932436b61aSbalrog } 2294eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2295eaa728eeSbellard 0, 0xffffffff, 2296eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2297eaa728eeSbellard DESC_S_MASK | 2298eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 229908b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2300a78d0eabSliguang env->eip = env->sysenter_eip; 2301eaa728eeSbellard } 2302eaa728eeSbellard 23032999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2304eaa728eeSbellard { 2305eaa728eeSbellard int cpl; 2306eaa728eeSbellard 2307eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2308eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2309100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2310eaa728eeSbellard } 23112436b61aSbalrog #ifdef TARGET_X86_64 23122436b61aSbalrog if (dflag == 2) { 231320054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 231420054ef0SBlue Swirl 3, 0, 0xffffffff, 23152436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23162436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 231720054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 231820054ef0SBlue Swirl DESC_L_MASK); 231920054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 232020054ef0SBlue Swirl 3, 0, 0xffffffff, 23212436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23222436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 23232436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 23242436b61aSbalrog } else 23252436b61aSbalrog #endif 23262436b61aSbalrog { 232720054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 232820054ef0SBlue Swirl 3, 0, 0xffffffff, 2329eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2330eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2331eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 233220054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 233320054ef0SBlue Swirl 3, 0, 0xffffffff, 2334eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2335eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2336eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23372436b61aSbalrog } 233808b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2339a78d0eabSliguang env->eip = env->regs[R_EDX]; 2340eaa728eeSbellard } 2341eaa728eeSbellard 23422999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2343eaa728eeSbellard { 2344eaa728eeSbellard unsigned int limit; 2345ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2346eaa728eeSbellard int rpl, dpl, cpl, type; 2347eaa728eeSbellard 2348eaa728eeSbellard selector = selector1 & 0xffff; 2349ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 235020054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2351dc1ded53Saliguori goto fail; 235220054ef0SBlue Swirl } 2353100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2354eaa728eeSbellard goto fail; 235520054ef0SBlue Swirl } 2356eaa728eeSbellard rpl = selector & 3; 2357eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2358eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2359eaa728eeSbellard if (e2 & DESC_S_MASK) { 2360eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2361eaa728eeSbellard /* conforming */ 2362eaa728eeSbellard } else { 236320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2364eaa728eeSbellard goto fail; 2365eaa728eeSbellard } 236620054ef0SBlue Swirl } 2367eaa728eeSbellard } else { 2368eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2369eaa728eeSbellard switch (type) { 2370eaa728eeSbellard case 1: 2371eaa728eeSbellard case 2: 2372eaa728eeSbellard case 3: 2373eaa728eeSbellard case 9: 2374eaa728eeSbellard case 11: 2375eaa728eeSbellard break; 2376eaa728eeSbellard default: 2377eaa728eeSbellard goto fail; 2378eaa728eeSbellard } 2379eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2380eaa728eeSbellard fail: 2381ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2382eaa728eeSbellard return 0; 2383eaa728eeSbellard } 2384eaa728eeSbellard } 2385eaa728eeSbellard limit = get_seg_limit(e1, e2); 2386ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2387eaa728eeSbellard return limit; 2388eaa728eeSbellard } 2389eaa728eeSbellard 23902999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2391eaa728eeSbellard { 2392ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2393eaa728eeSbellard int rpl, dpl, cpl, type; 2394eaa728eeSbellard 2395eaa728eeSbellard selector = selector1 & 0xffff; 2396ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 239720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2398eaa728eeSbellard goto fail; 239920054ef0SBlue Swirl } 2400100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2401eaa728eeSbellard goto fail; 240220054ef0SBlue Swirl } 2403eaa728eeSbellard rpl = selector & 3; 2404eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2405eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2406eaa728eeSbellard if (e2 & DESC_S_MASK) { 2407eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2408eaa728eeSbellard /* conforming */ 2409eaa728eeSbellard } else { 241020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2411eaa728eeSbellard goto fail; 2412eaa728eeSbellard } 241320054ef0SBlue Swirl } 2414eaa728eeSbellard } else { 2415eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2416eaa728eeSbellard switch (type) { 2417eaa728eeSbellard case 1: 2418eaa728eeSbellard case 2: 2419eaa728eeSbellard case 3: 2420eaa728eeSbellard case 4: 2421eaa728eeSbellard case 5: 2422eaa728eeSbellard case 9: 2423eaa728eeSbellard case 11: 2424eaa728eeSbellard case 12: 2425eaa728eeSbellard break; 2426eaa728eeSbellard default: 2427eaa728eeSbellard goto fail; 2428eaa728eeSbellard } 2429eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2430eaa728eeSbellard fail: 2431ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2432eaa728eeSbellard return 0; 2433eaa728eeSbellard } 2434eaa728eeSbellard } 2435ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2436eaa728eeSbellard return e2 & 0x00f0ff00; 2437eaa728eeSbellard } 2438eaa728eeSbellard 24392999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2440eaa728eeSbellard { 2441eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2442eaa728eeSbellard int rpl, dpl, cpl; 2443eaa728eeSbellard 2444eaa728eeSbellard selector = selector1 & 0xffff; 2445abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 244620054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2447eaa728eeSbellard goto fail; 244820054ef0SBlue Swirl } 2449100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2450eaa728eeSbellard goto fail; 245120054ef0SBlue Swirl } 245220054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2453eaa728eeSbellard goto fail; 245420054ef0SBlue Swirl } 2455eaa728eeSbellard rpl = selector & 3; 2456eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2457eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2458eaa728eeSbellard if (e2 & DESC_CS_MASK) { 245920054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2460eaa728eeSbellard goto fail; 246120054ef0SBlue Swirl } 2462eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 246320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2464eaa728eeSbellard goto fail; 2465eaa728eeSbellard } 246620054ef0SBlue Swirl } 2467eaa728eeSbellard } else { 2468eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2469eaa728eeSbellard fail: 2470abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2471eaa728eeSbellard } 2472eaa728eeSbellard } 2473abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2474abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2475eaa728eeSbellard } 2476eaa728eeSbellard 24772999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2478eaa728eeSbellard { 2479eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2480eaa728eeSbellard int rpl, dpl, cpl; 2481eaa728eeSbellard 2482eaa728eeSbellard selector = selector1 & 0xffff; 2483abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 248420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2485eaa728eeSbellard goto fail; 248620054ef0SBlue Swirl } 2487100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2488eaa728eeSbellard goto fail; 248920054ef0SBlue Swirl } 249020054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2491eaa728eeSbellard goto fail; 249220054ef0SBlue Swirl } 2493eaa728eeSbellard rpl = selector & 3; 2494eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2495eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2496eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2497eaa728eeSbellard goto fail; 2498eaa728eeSbellard } else { 249920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2500eaa728eeSbellard goto fail; 250120054ef0SBlue Swirl } 2502eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2503eaa728eeSbellard fail: 2504abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2505eaa728eeSbellard } 2506eaa728eeSbellard } 2507abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2508abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2509eaa728eeSbellard } 2510