1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308b131065SPaolo Bonzini #include "access.h" 318a201bd4SPaolo Bonzini 32059368bcSRichard Henderson #ifdef TARGET_X86_64 33059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 34059368bcSRichard Henderson do { \ 35059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 36059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 37059368bcSRichard Henderson ((val) & 0xffff); \ 38059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 39059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 40059368bcSRichard Henderson } else { \ 41059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 42059368bcSRichard Henderson } \ 43059368bcSRichard Henderson } while (0) 44059368bcSRichard Henderson #else 45059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 46059368bcSRichard Henderson do { \ 47059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 48059368bcSRichard Henderson ((val) & (sp_mask)); \ 49059368bcSRichard Henderson } while (0) 50059368bcSRichard Henderson #endif 51059368bcSRichard Henderson 52059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 53059368bcSRichard Henderson typedef struct StackAccess 54059368bcSRichard Henderson { 55059368bcSRichard Henderson CPUX86State *env; 56059368bcSRichard Henderson uintptr_t ra; 57059368bcSRichard Henderson target_ulong ss_base; 58059368bcSRichard Henderson target_ulong sp; 59059368bcSRichard Henderson target_ulong sp_mask; 608053862aSPaolo Bonzini int mmu_index; 61059368bcSRichard Henderson } StackAccess; 62059368bcSRichard Henderson 63059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 64059368bcSRichard Henderson { 65059368bcSRichard Henderson sa->sp -= 2; 668053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 678053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 68059368bcSRichard Henderson } 69059368bcSRichard Henderson 70059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 71059368bcSRichard Henderson { 72059368bcSRichard Henderson sa->sp -= 4; 738053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 748053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 75059368bcSRichard Henderson } 76059368bcSRichard Henderson 77059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 78059368bcSRichard Henderson { 798053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 80059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 818053862aSPaolo Bonzini sa->mmu_index, sa->ra); 82059368bcSRichard Henderson sa->sp += 2; 83059368bcSRichard Henderson return ret; 84059368bcSRichard Henderson } 85059368bcSRichard Henderson 86059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 87059368bcSRichard Henderson { 888053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 89059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 908053862aSPaolo Bonzini sa->mmu_index, sa->ra); 91059368bcSRichard Henderson sa->sp += 4; 92059368bcSRichard Henderson return ret; 93059368bcSRichard Henderson } 94059368bcSRichard Henderson 9550fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9650fcc7cbSGareth Webb { 9750fcc7cbSGareth Webb int pg_mode = 0; 9850fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 9950fcc7cbSGareth Webb return 0; 10050fcc7cbSGareth Webb } 10150fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10250fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10350fcc7cbSGareth Webb } 10450fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10550fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10650fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10750fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10850fcc7cbSGareth Webb } 10950fcc7cbSGareth Webb } 11050fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11150fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11250fcc7cbSGareth Webb } 11350fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11450fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11550fcc7cbSGareth Webb } 11650fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11750fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11850fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 11950fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 12050fcc7cbSGareth Webb } 12150fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12250fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12350fcc7cbSGareth Webb } 12450fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12550fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12650fcc7cbSGareth Webb } 12750fcc7cbSGareth Webb } 12850fcc7cbSGareth Webb return pg_mode; 12950fcc7cbSGareth Webb } 13050fcc7cbSGareth Webb 131eaa728eeSbellard /* return non zero if error */ 132100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 133100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 134100ec099SPavel Dovgalyuk uintptr_t retaddr) 135eaa728eeSbellard { 136eaa728eeSbellard SegmentCache *dt; 137eaa728eeSbellard int index; 138eaa728eeSbellard target_ulong ptr; 139eaa728eeSbellard 14020054ef0SBlue Swirl if (selector & 0x4) { 141eaa728eeSbellard dt = &env->ldt; 14220054ef0SBlue Swirl } else { 143eaa728eeSbellard dt = &env->gdt; 14420054ef0SBlue Swirl } 145eaa728eeSbellard index = selector & ~7; 14620054ef0SBlue Swirl if ((index + 7) > dt->limit) { 147eaa728eeSbellard return -1; 14820054ef0SBlue Swirl } 149eaa728eeSbellard ptr = dt->base + index; 150100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 151100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 152eaa728eeSbellard return 0; 153eaa728eeSbellard } 154eaa728eeSbellard 155100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 156100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 157100ec099SPavel Dovgalyuk { 158100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 159100ec099SPavel Dovgalyuk } 160100ec099SPavel Dovgalyuk 161eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 162eaa728eeSbellard { 163eaa728eeSbellard unsigned int limit; 16420054ef0SBlue Swirl 165eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 16620054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 167eaa728eeSbellard limit = (limit << 12) | 0xfff; 16820054ef0SBlue Swirl } 169eaa728eeSbellard return limit; 170eaa728eeSbellard } 171eaa728eeSbellard 172eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 173eaa728eeSbellard { 17420054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 175eaa728eeSbellard } 176eaa728eeSbellard 17720054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 17820054ef0SBlue Swirl uint32_t e2) 179eaa728eeSbellard { 180eaa728eeSbellard sc->base = get_seg_base(e1, e2); 181eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 182eaa728eeSbellard sc->flags = e2; 183eaa728eeSbellard } 184eaa728eeSbellard 185eaa728eeSbellard /* init the segment cache in vm86 mode. */ 1862999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 187eaa728eeSbellard { 188eaa728eeSbellard selector &= 0xffff; 189b98dbc90SPaolo Bonzini 190b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 191b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 192b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 193eaa728eeSbellard } 194eaa728eeSbellard 1952999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 196100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 197100ec099SPavel Dovgalyuk uintptr_t retaddr) 198eaa728eeSbellard { 1996aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 200eaa728eeSbellard int type, index, shift; 201eaa728eeSbellard 202eaa728eeSbellard #if 0 203eaa728eeSbellard { 204eaa728eeSbellard int i; 205eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 206eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 207eaa728eeSbellard printf("%02x ", env->tr.base[i]); 20820054ef0SBlue Swirl if ((i & 7) == 7) { 20920054ef0SBlue Swirl printf("\n"); 21020054ef0SBlue Swirl } 211eaa728eeSbellard } 212eaa728eeSbellard printf("\n"); 213eaa728eeSbellard } 214eaa728eeSbellard #endif 215eaa728eeSbellard 21620054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 217a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 21820054ef0SBlue Swirl } 219eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 22020054ef0SBlue Swirl if ((type & 7) != 1) { 221a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 22220054ef0SBlue Swirl } 223eaa728eeSbellard shift = type >> 3; 224eaa728eeSbellard index = (dpl * 4 + 2) << shift; 22520054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 226100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 22720054ef0SBlue Swirl } 228eaa728eeSbellard if (shift == 0) { 229100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 230100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 231eaa728eeSbellard } else { 232100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 233100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 234eaa728eeSbellard } 235eaa728eeSbellard } 236eaa728eeSbellard 237c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 238c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 239eaa728eeSbellard { 240eaa728eeSbellard uint32_t e1, e2; 241d3b54918SPaolo Bonzini int rpl, dpl; 242eaa728eeSbellard 243eaa728eeSbellard if ((selector & 0xfffc) != 0) { 244100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 245100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24620054ef0SBlue Swirl } 24720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 248100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24920054ef0SBlue Swirl } 250eaa728eeSbellard rpl = selector & 3; 251eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 252eaa728eeSbellard if (seg_reg == R_CS) { 25320054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 254100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25520054ef0SBlue Swirl } 25620054ef0SBlue Swirl if (dpl != rpl) { 257100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25820054ef0SBlue Swirl } 259eaa728eeSbellard } else if (seg_reg == R_SS) { 260eaa728eeSbellard /* SS must be writable data */ 26120054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26320054ef0SBlue Swirl } 26420054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 265100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26620054ef0SBlue Swirl } 267eaa728eeSbellard } else { 268eaa728eeSbellard /* not readable code */ 26920054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 270100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27120054ef0SBlue Swirl } 272eaa728eeSbellard /* if data or non conforming code, checks the rights */ 273eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 27420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 275100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 276eaa728eeSbellard } 277eaa728eeSbellard } 27820054ef0SBlue Swirl } 27920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 280100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 28120054ef0SBlue Swirl } 282eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 283eaa728eeSbellard get_seg_base(e1, e2), 284eaa728eeSbellard get_seg_limit(e1, e2), 285eaa728eeSbellard e2); 286eaa728eeSbellard } else { 28720054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 288100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 289eaa728eeSbellard } 290eaa728eeSbellard } 29120054ef0SBlue Swirl } 292eaa728eeSbellard 293a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 294a9089859SPaolo Bonzini uintptr_t retaddr) 295a9089859SPaolo Bonzini { 296c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 297a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 298a9089859SPaolo Bonzini 299a9089859SPaolo Bonzini if (value) { 300a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 301a9089859SPaolo Bonzini } else { 302a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 303a9089859SPaolo Bonzini } 304a9089859SPaolo Bonzini 305a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 306a9089859SPaolo Bonzini } 307a9089859SPaolo Bonzini 308eaa728eeSbellard #define SWITCH_TSS_JMP 0 309eaa728eeSbellard #define SWITCH_TSS_IRET 1 310eaa728eeSbellard #define SWITCH_TSS_CALL 2 311eaa728eeSbellard 31249958057SPaolo Bonzini /* return 0 if switching to a 16-bit selector */ 31349958057SPaolo Bonzini static int switch_tss_ra(CPUX86State *env, int tss_selector, 314eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 315100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 316eaa728eeSbellard { 3178b131065SPaolo Bonzini int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 318eaa728eeSbellard target_ulong tss_base; 319eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 320eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 321eaa728eeSbellard uint32_t old_eflags, eflags_mask; 322eaa728eeSbellard SegmentCache *dt; 3238b131065SPaolo Bonzini int mmu_index, index; 324eaa728eeSbellard target_ulong ptr; 3258b131065SPaolo Bonzini X86Access old, new; 326eaa728eeSbellard 327eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 32820054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 32920054ef0SBlue Swirl source); 330eaa728eeSbellard 331eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 332eaa728eeSbellard if (type == 5) { 33320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 334100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 33520054ef0SBlue Swirl } 336eaa728eeSbellard tss_selector = e1 >> 16; 33720054ef0SBlue Swirl if (tss_selector & 4) { 338100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 33920054ef0SBlue Swirl } 340100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 341100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 342eaa728eeSbellard } 34320054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 344100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34520054ef0SBlue Swirl } 34620054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34720054ef0SBlue Swirl if ((type & 7) != 1) { 348100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34920054ef0SBlue Swirl } 35020054ef0SBlue Swirl } 351eaa728eeSbellard 35220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 353100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35420054ef0SBlue Swirl } 355eaa728eeSbellard 35620054ef0SBlue Swirl if (type & 8) { 357eaa728eeSbellard tss_limit_max = 103; 35820054ef0SBlue Swirl } else { 359eaa728eeSbellard tss_limit_max = 43; 36020054ef0SBlue Swirl } 361eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 362eaa728eeSbellard tss_base = get_seg_base(e1, e2); 363eaa728eeSbellard if ((tss_selector & 4) != 0 || 36420054ef0SBlue Swirl tss_limit < tss_limit_max) { 365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 36620054ef0SBlue Swirl } 367eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 36820054ef0SBlue Swirl if (old_type & 8) { 369eaa728eeSbellard old_tss_limit_max = 103; 37020054ef0SBlue Swirl } else { 371eaa728eeSbellard old_tss_limit_max = 43; 37220054ef0SBlue Swirl } 373eaa728eeSbellard 37405d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 37505d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 37605d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 37705d41bbcSPaolo Bonzini } 37805d41bbcSPaolo Bonzini 3798b131065SPaolo Bonzini /* X86Access avoids memory exceptions during the task switch */ 3808b131065SPaolo Bonzini mmu_index = cpu_mmu_index_kernel(env); 3818b131065SPaolo Bonzini access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max, 3828b131065SPaolo Bonzini MMU_DATA_STORE, mmu_index, retaddr); 3838b131065SPaolo Bonzini 3848b131065SPaolo Bonzini if (source == SWITCH_TSS_CALL) { 3858b131065SPaolo Bonzini /* Probe for future write of parent task */ 3868b131065SPaolo Bonzini probe_access(env, tss_base, 2, MMU_DATA_STORE, 3878b131065SPaolo Bonzini mmu_index, retaddr); 3888b131065SPaolo Bonzini } 3898b131065SPaolo Bonzini access_prepare_mmu(&new, env, tss_base, tss_limit, 3908b131065SPaolo Bonzini MMU_DATA_LOAD, mmu_index, retaddr); 3918b131065SPaolo Bonzini 392*6a079f2eSPaolo Bonzini /* save the current state in the old TSS */ 393*6a079f2eSPaolo Bonzini old_eflags = cpu_compute_eflags(env); 394*6a079f2eSPaolo Bonzini if (old_type & 8) { 395*6a079f2eSPaolo Bonzini /* 32 bit */ 396*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x20, next_eip); 397*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 398*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 399*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 400*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 401*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 402*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 403*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 404*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 405*6a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 406*6a079f2eSPaolo Bonzini for (i = 0; i < 6; i++) { 407*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x48 + i * 4), 408*6a079f2eSPaolo Bonzini env->segs[i].selector); 409*6a079f2eSPaolo Bonzini } 410*6a079f2eSPaolo Bonzini } else { 411*6a079f2eSPaolo Bonzini /* 16 bit */ 412*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x0e, next_eip); 413*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 414*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 415*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 416*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 417*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 418*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 419*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 420*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 421*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 422*6a079f2eSPaolo Bonzini for (i = 0; i < 4; i++) { 423*6a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x22 + i * 2), 424*6a079f2eSPaolo Bonzini env->segs[i].selector); 425*6a079f2eSPaolo Bonzini } 426*6a079f2eSPaolo Bonzini } 427*6a079f2eSPaolo Bonzini 428eaa728eeSbellard /* read all the registers from the new TSS */ 429eaa728eeSbellard if (type & 8) { 430eaa728eeSbellard /* 32 bit */ 4318b131065SPaolo Bonzini new_cr3 = access_ldl(&new, tss_base + 0x1c); 4328b131065SPaolo Bonzini new_eip = access_ldl(&new, tss_base + 0x20); 4338b131065SPaolo Bonzini new_eflags = access_ldl(&new, tss_base + 0x24); 43420054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4358b131065SPaolo Bonzini new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 43620054ef0SBlue Swirl } 43720054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4388b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 43920054ef0SBlue Swirl } 4408b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x60); 4418b131065SPaolo Bonzini new_trap = access_ldl(&new, tss_base + 0x64); 442eaa728eeSbellard } else { 443eaa728eeSbellard /* 16 bit */ 444eaa728eeSbellard new_cr3 = 0; 4458b131065SPaolo Bonzini new_eip = access_ldw(&new, tss_base + 0x0e); 4468b131065SPaolo Bonzini new_eflags = access_ldw(&new, tss_base + 0x10); 44720054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4488b131065SPaolo Bonzini new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 44920054ef0SBlue Swirl } 45020054ef0SBlue Swirl for (i = 0; i < 4; i++) { 4518b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 45220054ef0SBlue Swirl } 4538b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x2a); 454eaa728eeSbellard new_segs[R_FS] = 0; 455eaa728eeSbellard new_segs[R_GS] = 0; 456eaa728eeSbellard new_trap = 0; 457eaa728eeSbellard } 4584581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4594581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4604581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4614581cbcdSBlue Swirl (void)new_trap; 462eaa728eeSbellard 463eaa728eeSbellard /* clear busy bit (it is restartable) */ 464eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 465a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 466eaa728eeSbellard } 467*6a079f2eSPaolo Bonzini 46820054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 469eaa728eeSbellard old_eflags &= ~NT_MASK; 4701b627f38SPaolo Bonzini if (old_type & 8) { 4718b131065SPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 472eaa728eeSbellard } else { 4738b131065SPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 474eaa728eeSbellard } 47520054ef0SBlue Swirl } 476eaa728eeSbellard 477eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 4788b131065SPaolo Bonzini /* 4798b131065SPaolo Bonzini * Thanks to the probe_access above, we know the first two 4808b131065SPaolo Bonzini * bytes addressed by &new are writable too. 4818b131065SPaolo Bonzini */ 4828b131065SPaolo Bonzini access_stw(&new, tss_base, env->tr.selector); 483eaa728eeSbellard new_eflags |= NT_MASK; 484eaa728eeSbellard } 485eaa728eeSbellard 486eaa728eeSbellard /* set busy bit */ 487eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 488a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 489eaa728eeSbellard } 490eaa728eeSbellard 491eaa728eeSbellard /* set the new CPU state */ 492*6a079f2eSPaolo Bonzini 493*6a079f2eSPaolo Bonzini /* now if an exception occurs, it will occur in the next task context */ 494*6a079f2eSPaolo Bonzini 495eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 496eaa728eeSbellard env->hflags |= HF_TS_MASK; 497eaa728eeSbellard env->tr.selector = tss_selector; 498eaa728eeSbellard env->tr.base = tss_base; 499eaa728eeSbellard env->tr.limit = tss_limit; 500eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 501eaa728eeSbellard 502eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 503eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 504eaa728eeSbellard } 505eaa728eeSbellard 506eaa728eeSbellard /* load all registers without an exception, then reload them with 507eaa728eeSbellard possible exception */ 508eaa728eeSbellard env->eip = new_eip; 509eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 510eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 511a5505f6bSPaolo Bonzini if (type & 8) { 512997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 513a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 514a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 515a5505f6bSPaolo Bonzini } 516a5505f6bSPaolo Bonzini } else { 517a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 518a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 519a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 520a5505f6bSPaolo Bonzini } 521a5505f6bSPaolo Bonzini } 522eaa728eeSbellard if (new_eflags & VM_MASK) { 52320054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5242999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 52520054ef0SBlue Swirl } 526eaa728eeSbellard } else { 527eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 52820054ef0SBlue Swirl for (i = 0; i < 6; i++) { 529eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 530eaa728eeSbellard } 53120054ef0SBlue Swirl } 532eaa728eeSbellard 533eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 534eaa728eeSbellard env->ldt.base = 0; 535eaa728eeSbellard env->ldt.limit = 0; 536eaa728eeSbellard env->ldt.flags = 0; 537eaa728eeSbellard 538eaa728eeSbellard /* load the LDT */ 53920054ef0SBlue Swirl if (new_ldt & 4) { 540100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54120054ef0SBlue Swirl } 542eaa728eeSbellard 543eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 544eaa728eeSbellard dt = &env->gdt; 545eaa728eeSbellard index = new_ldt & ~7; 54620054ef0SBlue Swirl if ((index + 7) > dt->limit) { 547100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54820054ef0SBlue Swirl } 549eaa728eeSbellard ptr = dt->base + index; 550100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 551100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 55220054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 553100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55420054ef0SBlue Swirl } 55520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 556100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55720054ef0SBlue Swirl } 558eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 559eaa728eeSbellard } 560eaa728eeSbellard 561eaa728eeSbellard /* load the segments */ 562eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 563d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 564100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 565100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 566100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 567100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 568100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 569100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 570eaa728eeSbellard } 571eaa728eeSbellard 572a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 573eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 574eaa728eeSbellard /* XXX: different exception if CALL? */ 575100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 576eaa728eeSbellard } 57701df040bSaliguori 57801df040bSaliguori #ifndef CONFIG_USER_ONLY 57901df040bSaliguori /* reset local breakpoints */ 580428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 58193d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 58201df040bSaliguori } 58301df040bSaliguori #endif 58449958057SPaolo Bonzini return type >> 3; 585eaa728eeSbellard } 586eaa728eeSbellard 58749958057SPaolo Bonzini static int switch_tss(CPUX86State *env, int tss_selector, 588100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 589100ec099SPavel Dovgalyuk uint32_t next_eip) 590100ec099SPavel Dovgalyuk { 59149958057SPaolo Bonzini return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 592100ec099SPavel Dovgalyuk } 593100ec099SPavel Dovgalyuk 594eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 595eaa728eeSbellard { 5960aca0605SAndrew Oates #ifdef TARGET_X86_64 5970aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 5980aca0605SAndrew Oates return 0; 5990aca0605SAndrew Oates } else 6000aca0605SAndrew Oates #endif 60120054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 602eaa728eeSbellard return 0xffffffff; 60320054ef0SBlue Swirl } else { 604eaa728eeSbellard return 0xffff; 605eaa728eeSbellard } 60620054ef0SBlue Swirl } 607eaa728eeSbellard 60869cb498cSPaolo Bonzini static int exception_is_fault(int intno) 60969cb498cSPaolo Bonzini { 61069cb498cSPaolo Bonzini switch (intno) { 61169cb498cSPaolo Bonzini /* 61269cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 61369cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 61469cb498cSPaolo Bonzini */ 61569cb498cSPaolo Bonzini case EXCP01_DB: 61669cb498cSPaolo Bonzini case EXCP03_INT3: 61769cb498cSPaolo Bonzini case EXCP04_INTO: 61869cb498cSPaolo Bonzini case EXCP08_DBLE: 61969cb498cSPaolo Bonzini case EXCP12_MCHK: 62069cb498cSPaolo Bonzini return 0; 62169cb498cSPaolo Bonzini } 62269cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 62369cb498cSPaolo Bonzini return 1; 62469cb498cSPaolo Bonzini } 62569cb498cSPaolo Bonzini 62630493a03SClaudio Fontana int exception_has_error_code(int intno) 6272ed51f5bSaliguori { 6282ed51f5bSaliguori switch (intno) { 6292ed51f5bSaliguori case 8: 6302ed51f5bSaliguori case 10: 6312ed51f5bSaliguori case 11: 6322ed51f5bSaliguori case 12: 6332ed51f5bSaliguori case 13: 6342ed51f5bSaliguori case 14: 6352ed51f5bSaliguori case 17: 6362ed51f5bSaliguori return 1; 6372ed51f5bSaliguori } 6382ed51f5bSaliguori return 0; 6392ed51f5bSaliguori } 6402ed51f5bSaliguori 641eaa728eeSbellard /* protected mode interrupt */ 6422999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6432999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6442999a0b2SBlue Swirl int is_hw) 645eaa728eeSbellard { 646eaa728eeSbellard SegmentCache *dt; 647059368bcSRichard Henderson target_ulong ptr; 648eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 649eaa728eeSbellard int has_error_code, new_stack, shift; 650059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 651059368bcSRichard Henderson uint32_t old_eip, eflags; 65287446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 653059368bcSRichard Henderson StackAccess sa; 65469cb498cSPaolo Bonzini bool set_rf; 655eaa728eeSbellard 656eaa728eeSbellard has_error_code = 0; 65720054ef0SBlue Swirl if (!is_int && !is_hw) { 65820054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 65920054ef0SBlue Swirl } 66020054ef0SBlue Swirl if (is_int) { 661eaa728eeSbellard old_eip = next_eip; 66269cb498cSPaolo Bonzini set_rf = false; 66320054ef0SBlue Swirl } else { 664eaa728eeSbellard old_eip = env->eip; 66569cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 66620054ef0SBlue Swirl } 667eaa728eeSbellard 668eaa728eeSbellard dt = &env->idt; 66920054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 67077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 67120054ef0SBlue Swirl } 672eaa728eeSbellard ptr = dt->base + intno * 8; 673329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 674329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 675eaa728eeSbellard /* check gate type */ 676eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 677eaa728eeSbellard switch (type) { 678eaa728eeSbellard case 5: /* task gate */ 6793df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6803df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6813df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 6823df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 6833df1a3d0SPeter Maydell break; 6843df1a3d0SPeter Maydell default: 6853df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6863df1a3d0SPeter Maydell break; 6873df1a3d0SPeter Maydell } 6883df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 6893df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 6903df1a3d0SPeter Maydell /* check privilege if software int */ 6913df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 6923df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6933df1a3d0SPeter Maydell } 6943df1a3d0SPeter Maydell 695059368bcSRichard Henderson sa.env = env; 696059368bcSRichard Henderson sa.ra = 0; 6978053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 698059368bcSRichard Henderson 6993df1a3d0SPeter Maydell if (type == 5) { 7003df1a3d0SPeter Maydell /* task gate */ 701eaa728eeSbellard /* must do that check here to return the correct error code */ 70220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 70377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 70420054ef0SBlue Swirl } 70549958057SPaolo Bonzini shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 706eaa728eeSbellard if (has_error_code) { 707eaa728eeSbellard /* push the error code */ 70820054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 709059368bcSRichard Henderson sa.sp_mask = 0xffffffff; 71020054ef0SBlue Swirl } else { 711059368bcSRichard Henderson sa.sp_mask = 0xffff; 71220054ef0SBlue Swirl } 713059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 714059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 71520054ef0SBlue Swirl if (shift) { 716059368bcSRichard Henderson pushl(&sa, error_code); 71720054ef0SBlue Swirl } else { 718059368bcSRichard Henderson pushw(&sa, error_code); 71920054ef0SBlue Swirl } 720059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 721eaa728eeSbellard } 722eaa728eeSbellard return; 723eaa728eeSbellard } 7243df1a3d0SPeter Maydell 7253df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7263df1a3d0SPeter Maydell 727eaa728eeSbellard /* check valid bit */ 72820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 72977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 73020054ef0SBlue Swirl } 731eaa728eeSbellard selector = e1 >> 16; 732eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 73320054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 73477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 73520054ef0SBlue Swirl } 7362999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 73777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 73820054ef0SBlue Swirl } 73920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 74077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74120054ef0SBlue Swirl } 742eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 74320054ef0SBlue Swirl if (dpl > cpl) { 74477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74520054ef0SBlue Swirl } 74620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 74777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 74820054ef0SBlue Swirl } 7491110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7501110bfe6SPaolo Bonzini dpl = cpl; 7511110bfe6SPaolo Bonzini } 7521110bfe6SPaolo Bonzini if (dpl < cpl) { 753eaa728eeSbellard /* to inner privilege */ 754059368bcSRichard Henderson uint32_t esp; 755100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 75620054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 75777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 75820054ef0SBlue Swirl } 75920054ef0SBlue Swirl if ((ss & 3) != dpl) { 76077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76120054ef0SBlue Swirl } 7622999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 76377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76420054ef0SBlue Swirl } 765eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 76620054ef0SBlue Swirl if (ss_dpl != dpl) { 76777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76820054ef0SBlue Swirl } 769eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 770eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 77120054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 77277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77320054ef0SBlue Swirl } 77420054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 77577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77620054ef0SBlue Swirl } 777eaa728eeSbellard new_stack = 1; 778059368bcSRichard Henderson sa.sp = esp; 779059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 780059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 7811110bfe6SPaolo Bonzini } else { 782eaa728eeSbellard /* to same privilege */ 78387446327SKevin O'Connor if (vm86) { 78477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 78520054ef0SBlue Swirl } 786eaa728eeSbellard new_stack = 0; 787059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 788059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 789059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 790eaa728eeSbellard } 791eaa728eeSbellard 792eaa728eeSbellard shift = type >> 3; 793eaa728eeSbellard 794eaa728eeSbellard #if 0 795eaa728eeSbellard /* XXX: check that enough room is available */ 796eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 79787446327SKevin O'Connor if (vm86) { 798eaa728eeSbellard push_size += 8; 79920054ef0SBlue Swirl } 800eaa728eeSbellard push_size <<= shift; 801eaa728eeSbellard #endif 80269cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 80369cb498cSPaolo Bonzini /* 80469cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 80569cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 80669cb498cSPaolo Bonzini */ 80769cb498cSPaolo Bonzini if (set_rf) { 80869cb498cSPaolo Bonzini eflags |= RF_MASK; 80969cb498cSPaolo Bonzini } 81069cb498cSPaolo Bonzini 811eaa728eeSbellard if (shift == 1) { 812eaa728eeSbellard if (new_stack) { 81387446327SKevin O'Connor if (vm86) { 814059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 815059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 816059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 817059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 818eaa728eeSbellard } 819059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 820059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 821eaa728eeSbellard } 822059368bcSRichard Henderson pushl(&sa, eflags); 823059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 824059368bcSRichard Henderson pushl(&sa, old_eip); 825eaa728eeSbellard if (has_error_code) { 826059368bcSRichard Henderson pushl(&sa, error_code); 827eaa728eeSbellard } 828eaa728eeSbellard } else { 829eaa728eeSbellard if (new_stack) { 83087446327SKevin O'Connor if (vm86) { 831059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 832059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 833059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 834059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 835eaa728eeSbellard } 836059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 837059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 838eaa728eeSbellard } 839059368bcSRichard Henderson pushw(&sa, eflags); 840059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 841059368bcSRichard Henderson pushw(&sa, old_eip); 842eaa728eeSbellard if (has_error_code) { 843059368bcSRichard Henderson pushw(&sa, error_code); 844eaa728eeSbellard } 845eaa728eeSbellard } 846eaa728eeSbellard 847fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 848fd460606SKevin O'Connor if ((type & 1) == 0) { 849fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 850fd460606SKevin O'Connor } 851fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 852fd460606SKevin O'Connor 853eaa728eeSbellard if (new_stack) { 85487446327SKevin O'Connor if (vm86) { 855eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 856eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 857eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 858eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 859eaa728eeSbellard } 860eaa728eeSbellard ss = (ss & ~3) | dpl; 861059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 862059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 863eaa728eeSbellard } 864059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 865eaa728eeSbellard 866eaa728eeSbellard selector = (selector & ~3) | dpl; 867eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 868eaa728eeSbellard get_seg_base(e1, e2), 869eaa728eeSbellard get_seg_limit(e1, e2), 870eaa728eeSbellard e2); 871eaa728eeSbellard env->eip = offset; 872eaa728eeSbellard } 873eaa728eeSbellard 874eaa728eeSbellard #ifdef TARGET_X86_64 875eaa728eeSbellard 876059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 877059368bcSRichard Henderson { 878059368bcSRichard Henderson sa->sp -= 8; 8798053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 880eaa728eeSbellard } 881eaa728eeSbellard 882059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 883059368bcSRichard Henderson { 8848053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 885059368bcSRichard Henderson sa->sp += 8; 886059368bcSRichard Henderson return ret; 887eaa728eeSbellard } 888eaa728eeSbellard 8892999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 890eaa728eeSbellard { 8916aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 89250fcc7cbSGareth Webb int index, pg_mode; 89350fcc7cbSGareth Webb target_ulong rsp; 89450fcc7cbSGareth Webb int32_t sext; 895eaa728eeSbellard 896eaa728eeSbellard #if 0 897eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 898eaa728eeSbellard env->tr.base, env->tr.limit); 899eaa728eeSbellard #endif 900eaa728eeSbellard 90120054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 902a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 90320054ef0SBlue Swirl } 904eaa728eeSbellard index = 8 * level + 4; 90520054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 90677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 90720054ef0SBlue Swirl } 90850fcc7cbSGareth Webb 90950fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 91050fcc7cbSGareth Webb 91150fcc7cbSGareth Webb /* test virtual address sign extension */ 91250fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 91350fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 91450fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 91550fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 91650fcc7cbSGareth Webb } 91750fcc7cbSGareth Webb 91850fcc7cbSGareth Webb return rsp; 919eaa728eeSbellard } 920eaa728eeSbellard 921eaa728eeSbellard /* 64 bit interrupt */ 9222999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9232999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 924eaa728eeSbellard { 925eaa728eeSbellard SegmentCache *dt; 926eaa728eeSbellard target_ulong ptr; 927eaa728eeSbellard int type, dpl, selector, cpl, ist; 928eaa728eeSbellard int has_error_code, new_stack; 92969cb498cSPaolo Bonzini uint32_t e1, e2, e3, ss, eflags; 930059368bcSRichard Henderson target_ulong old_eip, offset; 93169cb498cSPaolo Bonzini bool set_rf; 932059368bcSRichard Henderson StackAccess sa; 933eaa728eeSbellard 934eaa728eeSbellard has_error_code = 0; 93520054ef0SBlue Swirl if (!is_int && !is_hw) { 93620054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 93720054ef0SBlue Swirl } 93820054ef0SBlue Swirl if (is_int) { 939eaa728eeSbellard old_eip = next_eip; 94069cb498cSPaolo Bonzini set_rf = false; 94120054ef0SBlue Swirl } else { 942eaa728eeSbellard old_eip = env->eip; 94369cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 94420054ef0SBlue Swirl } 945eaa728eeSbellard 946eaa728eeSbellard dt = &env->idt; 94720054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 948b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 94920054ef0SBlue Swirl } 950eaa728eeSbellard ptr = dt->base + intno * 16; 951329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 952329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 953329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 954eaa728eeSbellard /* check gate type */ 955eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 956eaa728eeSbellard switch (type) { 957eaa728eeSbellard case 14: /* 386 interrupt gate */ 958eaa728eeSbellard case 15: /* 386 trap gate */ 959eaa728eeSbellard break; 960eaa728eeSbellard default: 961b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 962eaa728eeSbellard break; 963eaa728eeSbellard } 964eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 965eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9661235fc06Sths /* check privilege if software int */ 96720054ef0SBlue Swirl if (is_int && dpl < cpl) { 968b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 96920054ef0SBlue Swirl } 970eaa728eeSbellard /* check valid bit */ 97120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 972b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 97320054ef0SBlue Swirl } 974eaa728eeSbellard selector = e1 >> 16; 975eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 976eaa728eeSbellard ist = e2 & 7; 97720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 97877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 97920054ef0SBlue Swirl } 980eaa728eeSbellard 9812999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 98277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 98320054ef0SBlue Swirl } 98420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 98577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 98620054ef0SBlue Swirl } 987eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 98820054ef0SBlue Swirl if (dpl > cpl) { 98977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 99020054ef0SBlue Swirl } 99120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 99277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 99320054ef0SBlue Swirl } 99420054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 99577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 99620054ef0SBlue Swirl } 9971110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 9981110bfe6SPaolo Bonzini dpl = cpl; 9991110bfe6SPaolo Bonzini } 1000059368bcSRichard Henderson 1001059368bcSRichard Henderson sa.env = env; 1002059368bcSRichard Henderson sa.ra = 0; 10038053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1004059368bcSRichard Henderson sa.sp_mask = -1; 1005059368bcSRichard Henderson sa.ss_base = 0; 10061110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 1007eaa728eeSbellard /* to inner privilege */ 1008eaa728eeSbellard new_stack = 1; 1009059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 1010ae67dc72SPaolo Bonzini ss = 0; 10111110bfe6SPaolo Bonzini } else { 1012eaa728eeSbellard /* to same privilege */ 101320054ef0SBlue Swirl if (env->eflags & VM_MASK) { 101477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101520054ef0SBlue Swirl } 1016eaa728eeSbellard new_stack = 0; 1017059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1018e95e9b88SWu Xiang } 1019059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1020eaa728eeSbellard 102169cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 102269cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 102369cb498cSPaolo Bonzini if (set_rf) { 102469cb498cSPaolo Bonzini eflags |= RF_MASK; 102569cb498cSPaolo Bonzini } 102669cb498cSPaolo Bonzini 1027059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1028059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1029059368bcSRichard Henderson pushq(&sa, eflags); 1030059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1031059368bcSRichard Henderson pushq(&sa, old_eip); 1032eaa728eeSbellard if (has_error_code) { 1033059368bcSRichard Henderson pushq(&sa, error_code); 1034eaa728eeSbellard } 1035eaa728eeSbellard 1036fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1037fd460606SKevin O'Connor if ((type & 1) == 0) { 1038fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1039fd460606SKevin O'Connor } 1040fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1041fd460606SKevin O'Connor 1042eaa728eeSbellard if (new_stack) { 1043eaa728eeSbellard ss = 0 | dpl; 1044e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1045eaa728eeSbellard } 1046059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1047eaa728eeSbellard 1048eaa728eeSbellard selector = (selector & ~3) | dpl; 1049eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1050eaa728eeSbellard get_seg_base(e1, e2), 1051eaa728eeSbellard get_seg_limit(e1, e2), 1052eaa728eeSbellard e2); 1053eaa728eeSbellard env->eip = offset; 1054eaa728eeSbellard } 105563fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1056eaa728eeSbellard 10572999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1058eaa728eeSbellard { 1059eaa728eeSbellard int cpl, selector; 1060eaa728eeSbellard 1061eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1062100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1063eaa728eeSbellard } 1064eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1065eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1066100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1067eaa728eeSbellard } 1068eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 106963fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1070eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1071fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1072fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1073fd460606SKevin O'Connor NT_MASK); 1074eaa728eeSbellard if (dflag == 2) { 1075eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1076eaa728eeSbellard 0, 0xffffffff, 1077eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1078eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1079eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1080eaa728eeSbellard DESC_L_MASK); 1081a4165610Sliguang env->eip = env->regs[R_ECX]; 1082eaa728eeSbellard } else { 1083eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1084eaa728eeSbellard 0, 0xffffffff, 1085eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1086eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1087eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1088a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1089eaa728eeSbellard } 1090ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1091eaa728eeSbellard 0, 0xffffffff, 1092eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1093eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1094eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 109563fd8ef0SPaolo Bonzini } else 109663fd8ef0SPaolo Bonzini #endif 109763fd8ef0SPaolo Bonzini { 1098fd460606SKevin O'Connor env->eflags |= IF_MASK; 1099eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1100eaa728eeSbellard 0, 0xffffffff, 1101eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1102eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1103eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1104a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1105ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1106eaa728eeSbellard 0, 0xffffffff, 1107eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1108eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1109eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1110eaa728eeSbellard } 1111eaa728eeSbellard } 1112eaa728eeSbellard 1113eaa728eeSbellard /* real mode interrupt */ 11142999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11152999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1116eaa728eeSbellard { 1117eaa728eeSbellard SegmentCache *dt; 1118059368bcSRichard Henderson target_ulong ptr; 1119eaa728eeSbellard int selector; 1120059368bcSRichard Henderson uint32_t offset; 1121eaa728eeSbellard uint32_t old_cs, old_eip; 1122059368bcSRichard Henderson StackAccess sa; 1123eaa728eeSbellard 1124eaa728eeSbellard /* real mode (simpler!) */ 1125eaa728eeSbellard dt = &env->idt; 112620054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 112777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 112820054ef0SBlue Swirl } 1129eaa728eeSbellard ptr = dt->base + intno * 4; 1130329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1131329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1132059368bcSRichard Henderson 1133059368bcSRichard Henderson sa.env = env; 1134059368bcSRichard Henderson sa.ra = 0; 1135059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1136059368bcSRichard Henderson sa.sp_mask = 0xffff; 1137059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 11388053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1139059368bcSRichard Henderson 114020054ef0SBlue Swirl if (is_int) { 1141eaa728eeSbellard old_eip = next_eip; 114220054ef0SBlue Swirl } else { 1143eaa728eeSbellard old_eip = env->eip; 114420054ef0SBlue Swirl } 1145eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1146eaa728eeSbellard /* XXX: use SS segment size? */ 1147059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1148059368bcSRichard Henderson pushw(&sa, old_cs); 1149059368bcSRichard Henderson pushw(&sa, old_eip); 1150eaa728eeSbellard 1151eaa728eeSbellard /* update processor state */ 1152059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1153eaa728eeSbellard env->eip = offset; 1154eaa728eeSbellard env->segs[R_CS].selector = selector; 1155eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1156eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1157eaa728eeSbellard } 1158eaa728eeSbellard 1159eaa728eeSbellard /* 1160eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1161a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1162eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1163eaa728eeSbellard */ 116430493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11652999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1166eaa728eeSbellard { 1167ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1168ca4c810aSAndreas Färber 11698fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1170eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1171eaa728eeSbellard static int count; 117220054ef0SBlue Swirl 117320054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 117420054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1175eaa728eeSbellard count, intno, error_code, is_int, 1176eaa728eeSbellard env->hflags & HF_CPL_MASK, 1177a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1178a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 117908b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1180eaa728eeSbellard if (intno == 0x0e) { 118193fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1182eaa728eeSbellard } else { 11834b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1184eaa728eeSbellard } 118593fcfe39Saliguori qemu_log("\n"); 1186a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1187eaa728eeSbellard #if 0 1188eaa728eeSbellard { 1189eaa728eeSbellard int i; 11909bd5494eSAdam Lackorzynski target_ulong ptr; 119120054ef0SBlue Swirl 119293fcfe39Saliguori qemu_log(" code="); 1193eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1194eaa728eeSbellard for (i = 0; i < 16; i++) { 119593fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1196eaa728eeSbellard } 119793fcfe39Saliguori qemu_log("\n"); 1198eaa728eeSbellard } 1199eaa728eeSbellard #endif 1200eaa728eeSbellard count++; 1201eaa728eeSbellard } 1202eaa728eeSbellard } 1203eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 120400ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1205f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12062999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 120720054ef0SBlue Swirl } 120800ea18d1Saliguori #endif 1209eb38c52cSblueswir1 #ifdef TARGET_X86_64 1210eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12112999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1212eaa728eeSbellard } else 1213eaa728eeSbellard #endif 1214eaa728eeSbellard { 12152999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12162999a0b2SBlue Swirl is_hw); 1217eaa728eeSbellard } 1218eaa728eeSbellard } else { 121900ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1220f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12212999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 122220054ef0SBlue Swirl } 122300ea18d1Saliguori #endif 12242999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1225eaa728eeSbellard } 12262ed51f5bSaliguori 122700ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1228f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1229fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1230b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 123120054ef0SBlue Swirl offsetof(struct vmcb, 123220054ef0SBlue Swirl control.event_inj)); 123320054ef0SBlue Swirl 1234b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1235ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 123620054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12372ed51f5bSaliguori } 123800ea18d1Saliguori #endif 1239eaa728eeSbellard } 1240eaa728eeSbellard 12412999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1242e694d4e2SBlue Swirl { 12436aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1244e694d4e2SBlue Swirl } 1245e694d4e2SBlue Swirl 12462999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1247eaa728eeSbellard { 1248eaa728eeSbellard SegmentCache *dt; 1249eaa728eeSbellard uint32_t e1, e2; 1250eaa728eeSbellard int index, entry_limit; 1251eaa728eeSbellard target_ulong ptr; 1252eaa728eeSbellard 1253eaa728eeSbellard selector &= 0xffff; 1254eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1255eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1256eaa728eeSbellard env->ldt.base = 0; 1257eaa728eeSbellard env->ldt.limit = 0; 1258eaa728eeSbellard } else { 125920054ef0SBlue Swirl if (selector & 0x4) { 1260100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 126120054ef0SBlue Swirl } 1262eaa728eeSbellard dt = &env->gdt; 1263eaa728eeSbellard index = selector & ~7; 1264eaa728eeSbellard #ifdef TARGET_X86_64 126520054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1266eaa728eeSbellard entry_limit = 15; 126720054ef0SBlue Swirl } else 1268eaa728eeSbellard #endif 126920054ef0SBlue Swirl { 1270eaa728eeSbellard entry_limit = 7; 127120054ef0SBlue Swirl } 127220054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1273100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 127420054ef0SBlue Swirl } 1275eaa728eeSbellard ptr = dt->base + index; 1276100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1277100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 127820054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 128020054ef0SBlue Swirl } 128120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1282100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 128320054ef0SBlue Swirl } 1284eaa728eeSbellard #ifdef TARGET_X86_64 1285eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1286eaa728eeSbellard uint32_t e3; 128720054ef0SBlue Swirl 1288100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1289eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1290eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1291eaa728eeSbellard } else 1292eaa728eeSbellard #endif 1293eaa728eeSbellard { 1294eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1295eaa728eeSbellard } 1296eaa728eeSbellard } 1297eaa728eeSbellard env->ldt.selector = selector; 1298eaa728eeSbellard } 1299eaa728eeSbellard 13002999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1301eaa728eeSbellard { 1302eaa728eeSbellard SegmentCache *dt; 1303eaa728eeSbellard uint32_t e1, e2; 1304eaa728eeSbellard int index, type, entry_limit; 1305eaa728eeSbellard target_ulong ptr; 1306eaa728eeSbellard 1307eaa728eeSbellard selector &= 0xffff; 1308eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1309eaa728eeSbellard /* NULL selector case: invalid TR */ 1310eaa728eeSbellard env->tr.base = 0; 1311eaa728eeSbellard env->tr.limit = 0; 1312eaa728eeSbellard env->tr.flags = 0; 1313eaa728eeSbellard } else { 131420054ef0SBlue Swirl if (selector & 0x4) { 1315100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 131620054ef0SBlue Swirl } 1317eaa728eeSbellard dt = &env->gdt; 1318eaa728eeSbellard index = selector & ~7; 1319eaa728eeSbellard #ifdef TARGET_X86_64 132020054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1321eaa728eeSbellard entry_limit = 15; 132220054ef0SBlue Swirl } else 1323eaa728eeSbellard #endif 132420054ef0SBlue Swirl { 1325eaa728eeSbellard entry_limit = 7; 132620054ef0SBlue Swirl } 132720054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1328100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 132920054ef0SBlue Swirl } 1330eaa728eeSbellard ptr = dt->base + index; 1331100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1332100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1333eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1334eaa728eeSbellard if ((e2 & DESC_S_MASK) || 133520054ef0SBlue Swirl (type != 1 && type != 9)) { 1336100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133720054ef0SBlue Swirl } 133820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1339100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 134020054ef0SBlue Swirl } 1341eaa728eeSbellard #ifdef TARGET_X86_64 1342eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1343eaa728eeSbellard uint32_t e3, e4; 134420054ef0SBlue Swirl 1345100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1346100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 134720054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1348100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 134920054ef0SBlue Swirl } 1350eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1351eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1352eaa728eeSbellard } else 1353eaa728eeSbellard #endif 1354eaa728eeSbellard { 1355eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1356eaa728eeSbellard } 1357eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1358100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1359eaa728eeSbellard } 1360eaa728eeSbellard env->tr.selector = selector; 1361eaa728eeSbellard } 1362eaa728eeSbellard 1363eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13642999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1365eaa728eeSbellard { 1366eaa728eeSbellard uint32_t e1, e2; 1367eaa728eeSbellard int cpl, dpl, rpl; 1368eaa728eeSbellard SegmentCache *dt; 1369eaa728eeSbellard int index; 1370eaa728eeSbellard target_ulong ptr; 1371eaa728eeSbellard 1372eaa728eeSbellard selector &= 0xffff; 1373eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1374eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1375eaa728eeSbellard /* null selector case */ 1376eaa728eeSbellard if (seg_reg == R_SS 1377eaa728eeSbellard #ifdef TARGET_X86_64 1378eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1379eaa728eeSbellard #endif 138020054ef0SBlue Swirl ) { 1381100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 138220054ef0SBlue Swirl } 1383eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1384eaa728eeSbellard } else { 1385eaa728eeSbellard 138620054ef0SBlue Swirl if (selector & 0x4) { 1387eaa728eeSbellard dt = &env->ldt; 138820054ef0SBlue Swirl } else { 1389eaa728eeSbellard dt = &env->gdt; 139020054ef0SBlue Swirl } 1391eaa728eeSbellard index = selector & ~7; 139220054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1393100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 139420054ef0SBlue Swirl } 1395eaa728eeSbellard ptr = dt->base + index; 1396100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1397100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1398eaa728eeSbellard 139920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1400100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140120054ef0SBlue Swirl } 1402eaa728eeSbellard rpl = selector & 3; 1403eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1404eaa728eeSbellard if (seg_reg == R_SS) { 1405eaa728eeSbellard /* must be writable segment */ 140620054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1407100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140820054ef0SBlue Swirl } 140920054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1410100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141120054ef0SBlue Swirl } 1412eaa728eeSbellard } else { 1413eaa728eeSbellard /* must be readable segment */ 141420054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1415100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141620054ef0SBlue Swirl } 1417eaa728eeSbellard 1418eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1419eaa728eeSbellard /* if not conforming code, test rights */ 142020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1421100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1422eaa728eeSbellard } 1423eaa728eeSbellard } 142420054ef0SBlue Swirl } 1425eaa728eeSbellard 1426eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 142720054ef0SBlue Swirl if (seg_reg == R_SS) { 1428100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 142920054ef0SBlue Swirl } else { 1430100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1431eaa728eeSbellard } 143220054ef0SBlue Swirl } 1433eaa728eeSbellard 1434eaa728eeSbellard /* set the access bit if not already set */ 1435eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1436eaa728eeSbellard e2 |= DESC_A_MASK; 1437100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1438eaa728eeSbellard } 1439eaa728eeSbellard 1440eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1441eaa728eeSbellard get_seg_base(e1, e2), 1442eaa728eeSbellard get_seg_limit(e1, e2), 1443eaa728eeSbellard e2); 1444eaa728eeSbellard #if 0 144593fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1446eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1447eaa728eeSbellard #endif 1448eaa728eeSbellard } 1449eaa728eeSbellard } 1450eaa728eeSbellard 1451eaa728eeSbellard /* protected mode jump */ 14522999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1453100ec099SPavel Dovgalyuk target_ulong next_eip) 1454eaa728eeSbellard { 1455eaa728eeSbellard int gate_cs, type; 1456eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1457eaa728eeSbellard 145820054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1459100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 146020054ef0SBlue Swirl } 1461100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1462100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 146320054ef0SBlue Swirl } 1464eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1465eaa728eeSbellard if (e2 & DESC_S_MASK) { 146620054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1467100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 146820054ef0SBlue Swirl } 1469eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1470eaa728eeSbellard if (e2 & DESC_C_MASK) { 1471eaa728eeSbellard /* conforming code segment */ 147220054ef0SBlue Swirl if (dpl > cpl) { 1473100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 147420054ef0SBlue Swirl } 1475eaa728eeSbellard } else { 1476eaa728eeSbellard /* non conforming code segment */ 1477eaa728eeSbellard rpl = new_cs & 3; 147820054ef0SBlue Swirl if (rpl > cpl) { 1479100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1480eaa728eeSbellard } 148120054ef0SBlue Swirl if (dpl != cpl) { 1482100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148320054ef0SBlue Swirl } 148420054ef0SBlue Swirl } 148520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1486100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 148720054ef0SBlue Swirl } 1488eaa728eeSbellard limit = get_seg_limit(e1, e2); 1489eaa728eeSbellard if (new_eip > limit && 1490db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1491db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 149220054ef0SBlue Swirl } 1493eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1494eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1495a78d0eabSliguang env->eip = new_eip; 1496eaa728eeSbellard } else { 1497eaa728eeSbellard /* jump to call or task gate */ 1498eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1499eaa728eeSbellard rpl = new_cs & 3; 1500eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1501eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 15020aca0605SAndrew Oates 15030aca0605SAndrew Oates #ifdef TARGET_X86_64 15040aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15050aca0605SAndrew Oates if (type != 12) { 15060aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 15070aca0605SAndrew Oates } 15080aca0605SAndrew Oates } 15090aca0605SAndrew Oates #endif 1510eaa728eeSbellard switch (type) { 1511eaa728eeSbellard case 1: /* 286 TSS */ 1512eaa728eeSbellard case 9: /* 386 TSS */ 1513eaa728eeSbellard case 5: /* task gate */ 151420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1515100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 151620054ef0SBlue Swirl } 1517100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1518eaa728eeSbellard break; 1519eaa728eeSbellard case 4: /* 286 call gate */ 1520eaa728eeSbellard case 12: /* 386 call gate */ 152120054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1522100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 152320054ef0SBlue Swirl } 152420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1525100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 152620054ef0SBlue Swirl } 1527eaa728eeSbellard gate_cs = e1 >> 16; 1528eaa728eeSbellard new_eip = (e1 & 0xffff); 152920054ef0SBlue Swirl if (type == 12) { 1530eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 153120054ef0SBlue Swirl } 15320aca0605SAndrew Oates 15330aca0605SAndrew Oates #ifdef TARGET_X86_64 15340aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15350aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15360aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15370aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15380aca0605SAndrew Oates GETPC()); 15390aca0605SAndrew Oates } 15400aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15410aca0605SAndrew Oates if (type != 0) { 15420aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15430aca0605SAndrew Oates GETPC()); 15440aca0605SAndrew Oates } 15450aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15460aca0605SAndrew Oates } 15470aca0605SAndrew Oates #endif 15480aca0605SAndrew Oates 1549100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1550100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 155120054ef0SBlue Swirl } 1552eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1553eaa728eeSbellard /* must be code segment */ 1554eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 155520054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1556100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 155720054ef0SBlue Swirl } 1558eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 155920054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1560100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 156120054ef0SBlue Swirl } 15620aca0605SAndrew Oates #ifdef TARGET_X86_64 15630aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15640aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15650aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15660aca0605SAndrew Oates } 15670aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15680aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15690aca0605SAndrew Oates } 15700aca0605SAndrew Oates } 15710aca0605SAndrew Oates #endif 157220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1573100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157420054ef0SBlue Swirl } 1575eaa728eeSbellard limit = get_seg_limit(e1, e2); 15760aca0605SAndrew Oates if (new_eip > limit && 15770aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1578100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 157920054ef0SBlue Swirl } 1580eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1581eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1582a78d0eabSliguang env->eip = new_eip; 1583eaa728eeSbellard break; 1584eaa728eeSbellard default: 1585100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1586eaa728eeSbellard break; 1587eaa728eeSbellard } 1588eaa728eeSbellard } 1589eaa728eeSbellard } 1590eaa728eeSbellard 1591eaa728eeSbellard /* real mode call */ 15928c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 15938c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1594eaa728eeSbellard { 1595059368bcSRichard Henderson StackAccess sa; 1596eaa728eeSbellard 1597059368bcSRichard Henderson sa.env = env; 1598059368bcSRichard Henderson sa.ra = GETPC(); 1599059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1600059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1601059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 16028053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1603059368bcSRichard Henderson 1604eaa728eeSbellard if (shift) { 1605059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1606059368bcSRichard Henderson pushl(&sa, next_eip); 1607eaa728eeSbellard } else { 1608059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1609059368bcSRichard Henderson pushw(&sa, next_eip); 1610eaa728eeSbellard } 1611eaa728eeSbellard 1612059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1613eaa728eeSbellard env->eip = new_eip; 1614eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1615eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1616eaa728eeSbellard } 1617eaa728eeSbellard 1618eaa728eeSbellard /* protected mode call */ 16192999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1620100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1621eaa728eeSbellard { 1622eaa728eeSbellard int new_stack, i; 16230aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1624059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1625eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1626059368bcSRichard Henderson target_ulong old_ssp, offset; 1627059368bcSRichard Henderson StackAccess sa; 1628eaa728eeSbellard 16290aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16306aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 163120054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1632100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 163320054ef0SBlue Swirl } 1634100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1635100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 163620054ef0SBlue Swirl } 1637eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1638d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1639059368bcSRichard Henderson 1640059368bcSRichard Henderson sa.env = env; 1641059368bcSRichard Henderson sa.ra = GETPC(); 16428053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1643059368bcSRichard Henderson 1644eaa728eeSbellard if (e2 & DESC_S_MASK) { 164520054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1646100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 164720054ef0SBlue Swirl } 1648eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1649eaa728eeSbellard if (e2 & DESC_C_MASK) { 1650eaa728eeSbellard /* conforming code segment */ 165120054ef0SBlue Swirl if (dpl > cpl) { 1652100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 165320054ef0SBlue Swirl } 1654eaa728eeSbellard } else { 1655eaa728eeSbellard /* non conforming code segment */ 1656eaa728eeSbellard rpl = new_cs & 3; 165720054ef0SBlue Swirl if (rpl > cpl) { 1658100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1659eaa728eeSbellard } 166020054ef0SBlue Swirl if (dpl != cpl) { 1661100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166220054ef0SBlue Swirl } 166320054ef0SBlue Swirl } 166420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1665100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 166620054ef0SBlue Swirl } 1667eaa728eeSbellard 1668eaa728eeSbellard #ifdef TARGET_X86_64 1669eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1670eaa728eeSbellard if (shift == 2) { 1671eaa728eeSbellard /* 64 bit case */ 1672059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1673059368bcSRichard Henderson sa.sp_mask = -1; 1674059368bcSRichard Henderson sa.ss_base = 0; 1675059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1676059368bcSRichard Henderson pushq(&sa, next_eip); 1677eaa728eeSbellard /* from this point, not restartable */ 1678059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1679eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1680eaa728eeSbellard get_seg_base(e1, e2), 1681eaa728eeSbellard get_seg_limit(e1, e2), e2); 1682a78d0eabSliguang env->eip = new_eip; 1683eaa728eeSbellard } else 1684eaa728eeSbellard #endif 1685eaa728eeSbellard { 1686059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1687059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1688059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1689eaa728eeSbellard if (shift) { 1690059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1691059368bcSRichard Henderson pushl(&sa, next_eip); 1692eaa728eeSbellard } else { 1693059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1694059368bcSRichard Henderson pushw(&sa, next_eip); 1695eaa728eeSbellard } 1696eaa728eeSbellard 1697eaa728eeSbellard limit = get_seg_limit(e1, e2); 169820054ef0SBlue Swirl if (new_eip > limit) { 1699100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 170020054ef0SBlue Swirl } 1701eaa728eeSbellard /* from this point, not restartable */ 1702059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1703eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1704eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1705a78d0eabSliguang env->eip = new_eip; 1706eaa728eeSbellard } 1707eaa728eeSbellard } else { 1708eaa728eeSbellard /* check gate type */ 1709eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1710eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1711eaa728eeSbellard rpl = new_cs & 3; 17120aca0605SAndrew Oates 17130aca0605SAndrew Oates #ifdef TARGET_X86_64 17140aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17150aca0605SAndrew Oates if (type != 12) { 17160aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17170aca0605SAndrew Oates } 17180aca0605SAndrew Oates } 17190aca0605SAndrew Oates #endif 17200aca0605SAndrew Oates 1721eaa728eeSbellard switch (type) { 1722eaa728eeSbellard case 1: /* available 286 TSS */ 1723eaa728eeSbellard case 9: /* available 386 TSS */ 1724eaa728eeSbellard case 5: /* task gate */ 172520054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1726100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 172720054ef0SBlue Swirl } 1728100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1729eaa728eeSbellard return; 1730eaa728eeSbellard case 4: /* 286 call gate */ 1731eaa728eeSbellard case 12: /* 386 call gate */ 1732eaa728eeSbellard break; 1733eaa728eeSbellard default: 1734100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1735eaa728eeSbellard break; 1736eaa728eeSbellard } 1737eaa728eeSbellard shift = type >> 3; 1738eaa728eeSbellard 173920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1740100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 174120054ef0SBlue Swirl } 1742eaa728eeSbellard /* check valid bit */ 174320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1744100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 174520054ef0SBlue Swirl } 1746eaa728eeSbellard selector = e1 >> 16; 1747eaa728eeSbellard param_count = e2 & 0x1f; 17480aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17490aca0605SAndrew Oates #ifdef TARGET_X86_64 17500aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17510aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17520aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17530aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17540aca0605SAndrew Oates GETPC()); 17550aca0605SAndrew Oates } 17560aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17570aca0605SAndrew Oates if (type != 0) { 17580aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17590aca0605SAndrew Oates GETPC()); 17600aca0605SAndrew Oates } 17610aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17620aca0605SAndrew Oates } 17630aca0605SAndrew Oates #endif 176420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1765100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 176620054ef0SBlue Swirl } 1767eaa728eeSbellard 1768100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1769100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 177020054ef0SBlue Swirl } 177120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1772100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 177320054ef0SBlue Swirl } 1774eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 177520054ef0SBlue Swirl if (dpl > cpl) { 1776100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 177720054ef0SBlue Swirl } 17780aca0605SAndrew Oates #ifdef TARGET_X86_64 17790aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17800aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 17810aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17820aca0605SAndrew Oates } 17830aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 17840aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17850aca0605SAndrew Oates } 17860aca0605SAndrew Oates shift++; 17870aca0605SAndrew Oates } 17880aca0605SAndrew Oates #endif 178920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1790100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 179120054ef0SBlue Swirl } 1792eaa728eeSbellard 1793eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1794eaa728eeSbellard /* to inner privilege */ 17950aca0605SAndrew Oates #ifdef TARGET_X86_64 17960aca0605SAndrew Oates if (shift == 2) { 17970aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 17980aca0605SAndrew Oates new_stack = 1; 1799059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1800059368bcSRichard Henderson sa.sp_mask = -1; 1801059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 18020aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1803059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 18040aca0605SAndrew Oates } else 18050aca0605SAndrew Oates #endif 18060aca0605SAndrew Oates { 18070aca0605SAndrew Oates uint32_t sp32; 18080aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 180990a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 18100aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 181190a2541bSliguang env->regs[R_ESP]); 181220054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1813100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 181420054ef0SBlue Swirl } 181520054ef0SBlue Swirl if ((ss & 3) != dpl) { 1816100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 181720054ef0SBlue Swirl } 1818100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1819100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182020054ef0SBlue Swirl } 1821eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 182220054ef0SBlue Swirl if (ss_dpl != dpl) { 1823100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182420054ef0SBlue Swirl } 1825eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1826eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 182720054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1828100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182920054ef0SBlue Swirl } 183020054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1831100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183220054ef0SBlue Swirl } 1833eaa728eeSbellard 1834059368bcSRichard Henderson sa.sp = sp32; 1835059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1836059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18370aca0605SAndrew Oates } 18380aca0605SAndrew Oates 183920054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1840eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1841eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1842059368bcSRichard Henderson 18430aca0605SAndrew Oates #ifdef TARGET_X86_64 18440aca0605SAndrew Oates if (shift == 2) { 18450aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1846059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1847059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18480aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18490aca0605SAndrew Oates } else 18500aca0605SAndrew Oates #endif 18510aca0605SAndrew Oates if (shift == 1) { 1852059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1853059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1854eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18550bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18560bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18570bd385e7SPaolo Bonzini GETPC()); 1858059368bcSRichard Henderson pushl(&sa, val); 1859eaa728eeSbellard } 1860eaa728eeSbellard } else { 1861059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1862059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1863eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18640bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18650bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18660bd385e7SPaolo Bonzini GETPC()); 1867059368bcSRichard Henderson pushw(&sa, val); 1868eaa728eeSbellard } 1869eaa728eeSbellard } 1870eaa728eeSbellard new_stack = 1; 1871eaa728eeSbellard } else { 1872eaa728eeSbellard /* to same privilege */ 1873059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1874059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1875059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 187620054ef0SBlue Swirl /* push_size = (4 << shift); */ 1877eaa728eeSbellard new_stack = 0; 1878eaa728eeSbellard } 1879eaa728eeSbellard 18800aca0605SAndrew Oates #ifdef TARGET_X86_64 18810aca0605SAndrew Oates if (shift == 2) { 1882059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1883059368bcSRichard Henderson pushq(&sa, next_eip); 18840aca0605SAndrew Oates } else 18850aca0605SAndrew Oates #endif 18860aca0605SAndrew Oates if (shift == 1) { 1887059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1888059368bcSRichard Henderson pushl(&sa, next_eip); 1889eaa728eeSbellard } else { 1890059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1891059368bcSRichard Henderson pushw(&sa, next_eip); 1892eaa728eeSbellard } 1893eaa728eeSbellard 1894eaa728eeSbellard /* from this point, not restartable */ 1895eaa728eeSbellard 1896eaa728eeSbellard if (new_stack) { 18970aca0605SAndrew Oates #ifdef TARGET_X86_64 18980aca0605SAndrew Oates if (shift == 2) { 18990aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 19000aca0605SAndrew Oates } else 19010aca0605SAndrew Oates #endif 19020aca0605SAndrew Oates { 1903eaa728eeSbellard ss = (ss & ~3) | dpl; 1904eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1905059368bcSRichard Henderson sa.ss_base, 1906eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1907eaa728eeSbellard ss_e2); 1908eaa728eeSbellard } 19090aca0605SAndrew Oates } 1910eaa728eeSbellard 1911eaa728eeSbellard selector = (selector & ~3) | dpl; 1912eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1913eaa728eeSbellard get_seg_base(e1, e2), 1914eaa728eeSbellard get_seg_limit(e1, e2), 1915eaa728eeSbellard e2); 1916059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1917a78d0eabSliguang env->eip = offset; 1918eaa728eeSbellard } 1919eaa728eeSbellard } 1920eaa728eeSbellard 1921eaa728eeSbellard /* real and vm86 mode iret */ 19222999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1923eaa728eeSbellard { 1924059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1925eaa728eeSbellard int eflags_mask; 1926059368bcSRichard Henderson StackAccess sa; 1927eaa728eeSbellard 1928059368bcSRichard Henderson sa.env = env; 1929059368bcSRichard Henderson sa.ra = GETPC(); 19308053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1931059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1932059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1933059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1934059368bcSRichard Henderson 1935eaa728eeSbellard if (shift == 1) { 1936eaa728eeSbellard /* 32 bits */ 1937059368bcSRichard Henderson new_eip = popl(&sa); 1938059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1939059368bcSRichard Henderson new_eflags = popl(&sa); 1940eaa728eeSbellard } else { 1941eaa728eeSbellard /* 16 bits */ 1942059368bcSRichard Henderson new_eip = popw(&sa); 1943059368bcSRichard Henderson new_cs = popw(&sa); 1944059368bcSRichard Henderson new_eflags = popw(&sa); 1945eaa728eeSbellard } 1946059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1947bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1948bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1949eaa728eeSbellard env->eip = new_eip; 195020054ef0SBlue Swirl if (env->eflags & VM_MASK) { 195120054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 195220054ef0SBlue Swirl NT_MASK; 195320054ef0SBlue Swirl } else { 195420054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 195520054ef0SBlue Swirl RF_MASK | NT_MASK; 195620054ef0SBlue Swirl } 195720054ef0SBlue Swirl if (shift == 0) { 1958eaa728eeSbellard eflags_mask &= 0xffff; 195920054ef0SBlue Swirl } 1960997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1961db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1962eaa728eeSbellard } 1963eaa728eeSbellard 1964c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1965eaa728eeSbellard { 1966eaa728eeSbellard int dpl; 1967eaa728eeSbellard uint32_t e2; 1968eaa728eeSbellard 1969eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 1970eaa728eeSbellard they may still contain a valid base. I would be interested to 1971eaa728eeSbellard know how a real x86_64 CPU behaves */ 1972eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 197320054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 1974eaa728eeSbellard return; 197520054ef0SBlue Swirl } 1976eaa728eeSbellard 1977eaa728eeSbellard e2 = env->segs[seg_reg].flags; 1978eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1979eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1980eaa728eeSbellard /* data or non conforming code segment */ 1981eaa728eeSbellard if (dpl < cpl) { 1982c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 1983c2ba0515SBin Meng env->segs[seg_reg].base, 1984c2ba0515SBin Meng env->segs[seg_reg].limit, 1985c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 1986eaa728eeSbellard } 1987eaa728eeSbellard } 1988eaa728eeSbellard } 1989eaa728eeSbellard 1990eaa728eeSbellard /* protected mode iret */ 19912999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 1992100ec099SPavel Dovgalyuk int is_iret, int addend, 1993100ec099SPavel Dovgalyuk uintptr_t retaddr) 1994eaa728eeSbellard { 1995eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 1996eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 1997eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 1998eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 1999059368bcSRichard Henderson target_ulong new_eip, new_esp; 2000059368bcSRichard Henderson StackAccess sa; 2001059368bcSRichard Henderson 20028053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 20038053862aSPaolo Bonzini 2004059368bcSRichard Henderson sa.env = env; 2005059368bcSRichard Henderson sa.ra = retaddr; 20068053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 2007eaa728eeSbellard 2008eaa728eeSbellard #ifdef TARGET_X86_64 200920054ef0SBlue Swirl if (shift == 2) { 2010059368bcSRichard Henderson sa.sp_mask = -1; 201120054ef0SBlue Swirl } else 2012eaa728eeSbellard #endif 201320054ef0SBlue Swirl { 2014059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 201520054ef0SBlue Swirl } 2016059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2017059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2018eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2019eaa728eeSbellard #ifdef TARGET_X86_64 2020eaa728eeSbellard if (shift == 2) { 2021059368bcSRichard Henderson new_eip = popq(&sa); 2022059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2023eaa728eeSbellard if (is_iret) { 2024059368bcSRichard Henderson new_eflags = popq(&sa); 2025eaa728eeSbellard } 2026eaa728eeSbellard } else 2027eaa728eeSbellard #endif 202820054ef0SBlue Swirl { 2029eaa728eeSbellard if (shift == 1) { 2030eaa728eeSbellard /* 32 bits */ 2031059368bcSRichard Henderson new_eip = popl(&sa); 2032059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2033eaa728eeSbellard if (is_iret) { 2034059368bcSRichard Henderson new_eflags = popl(&sa); 203520054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2036eaa728eeSbellard goto return_to_vm86; 2037eaa728eeSbellard } 203820054ef0SBlue Swirl } 2039eaa728eeSbellard } else { 2040eaa728eeSbellard /* 16 bits */ 2041059368bcSRichard Henderson new_eip = popw(&sa); 2042059368bcSRichard Henderson new_cs = popw(&sa); 204320054ef0SBlue Swirl if (is_iret) { 2044059368bcSRichard Henderson new_eflags = popw(&sa); 2045eaa728eeSbellard } 204620054ef0SBlue Swirl } 204720054ef0SBlue Swirl } 2048d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2049eaa728eeSbellard new_cs, new_eip, shift, addend); 20506aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 205120054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2052100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2053eaa728eeSbellard } 2054100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2055100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 205620054ef0SBlue Swirl } 205720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 205820054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2059100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206020054ef0SBlue Swirl } 206120054ef0SBlue Swirl rpl = new_cs & 3; 206220054ef0SBlue Swirl if (rpl < cpl) { 2063100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206420054ef0SBlue Swirl } 206520054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 206620054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 206720054ef0SBlue Swirl if (dpl > rpl) { 2068100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206920054ef0SBlue Swirl } 207020054ef0SBlue Swirl } else { 207120054ef0SBlue Swirl if (dpl != rpl) { 2072100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 207320054ef0SBlue Swirl } 207420054ef0SBlue Swirl } 207520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2076100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 207720054ef0SBlue Swirl } 2078eaa728eeSbellard 2079059368bcSRichard Henderson sa.sp += addend; 2080eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2081eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 20821235fc06Sths /* return to same privilege level */ 2083eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2084eaa728eeSbellard get_seg_base(e1, e2), 2085eaa728eeSbellard get_seg_limit(e1, e2), 2086eaa728eeSbellard e2); 2087eaa728eeSbellard } else { 2088eaa728eeSbellard /* return to different privilege level */ 2089eaa728eeSbellard #ifdef TARGET_X86_64 2090eaa728eeSbellard if (shift == 2) { 2091059368bcSRichard Henderson new_esp = popq(&sa); 2092059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2093eaa728eeSbellard } else 2094eaa728eeSbellard #endif 209520054ef0SBlue Swirl { 2096eaa728eeSbellard if (shift == 1) { 2097eaa728eeSbellard /* 32 bits */ 2098059368bcSRichard Henderson new_esp = popl(&sa); 2099059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2100eaa728eeSbellard } else { 2101eaa728eeSbellard /* 16 bits */ 2102059368bcSRichard Henderson new_esp = popw(&sa); 2103059368bcSRichard Henderson new_ss = popw(&sa); 2104eaa728eeSbellard } 210520054ef0SBlue Swirl } 2106d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2107eaa728eeSbellard new_ss, new_esp); 2108eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2109eaa728eeSbellard #ifdef TARGET_X86_64 2110eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2111eaa728eeSbellard /* XXX: test CS64? */ 2112eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2113eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2114eaa728eeSbellard 0, 0xffffffff, 2115eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2116eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2117eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2118eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2119eaa728eeSbellard } else 2120eaa728eeSbellard #endif 2121eaa728eeSbellard { 2122100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2123eaa728eeSbellard } 2124eaa728eeSbellard } else { 212520054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2126100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 212720054ef0SBlue Swirl } 2128100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2129100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 213020054ef0SBlue Swirl } 2131eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2132eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 213320054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2134100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 213520054ef0SBlue Swirl } 2136eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 213720054ef0SBlue Swirl if (dpl != rpl) { 2138100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 213920054ef0SBlue Swirl } 214020054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2141100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 214220054ef0SBlue Swirl } 2143eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2144eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2145eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2146eaa728eeSbellard ss_e2); 2147eaa728eeSbellard } 2148eaa728eeSbellard 2149eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2150eaa728eeSbellard get_seg_base(e1, e2), 2151eaa728eeSbellard get_seg_limit(e1, e2), 2152eaa728eeSbellard e2); 2153059368bcSRichard Henderson sa.sp = new_esp; 2154eaa728eeSbellard #ifdef TARGET_X86_64 215520054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2156059368bcSRichard Henderson sa.sp_mask = -1; 215720054ef0SBlue Swirl } else 2158eaa728eeSbellard #endif 215920054ef0SBlue Swirl { 2160059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 216120054ef0SBlue Swirl } 2162eaa728eeSbellard 2163eaa728eeSbellard /* validate data segments */ 21642999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21652999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21662999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21672999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2168eaa728eeSbellard 2169059368bcSRichard Henderson sa.sp += addend; 2170eaa728eeSbellard } 2171059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2172eaa728eeSbellard env->eip = new_eip; 2173eaa728eeSbellard if (is_iret) { 2174eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2175eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 217620054ef0SBlue Swirl if (cpl == 0) { 2177eaa728eeSbellard eflags_mask |= IOPL_MASK; 217820054ef0SBlue Swirl } 2179eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 218020054ef0SBlue Swirl if (cpl <= iopl) { 2181eaa728eeSbellard eflags_mask |= IF_MASK; 218220054ef0SBlue Swirl } 218320054ef0SBlue Swirl if (shift == 0) { 2184eaa728eeSbellard eflags_mask &= 0xffff; 218520054ef0SBlue Swirl } 2186997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2187eaa728eeSbellard } 2188eaa728eeSbellard return; 2189eaa728eeSbellard 2190eaa728eeSbellard return_to_vm86: 2191059368bcSRichard Henderson new_esp = popl(&sa); 2192059368bcSRichard Henderson new_ss = popl(&sa); 2193059368bcSRichard Henderson new_es = popl(&sa); 2194059368bcSRichard Henderson new_ds = popl(&sa); 2195059368bcSRichard Henderson new_fs = popl(&sa); 2196059368bcSRichard Henderson new_gs = popl(&sa); 2197eaa728eeSbellard 2198eaa728eeSbellard /* modify processor state */ 2199997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2200997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2201997ff0d9SBlue Swirl VIP_MASK); 22022999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 22032999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 22042999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 22052999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 22062999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 22072999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2208eaa728eeSbellard 2209eaa728eeSbellard env->eip = new_eip & 0xffff; 221008b3ded6Sliguang env->regs[R_ESP] = new_esp; 2211eaa728eeSbellard } 2212eaa728eeSbellard 22132999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2214eaa728eeSbellard { 2215eaa728eeSbellard int tss_selector, type; 2216eaa728eeSbellard uint32_t e1, e2; 2217eaa728eeSbellard 2218eaa728eeSbellard /* specific case for TSS */ 2219eaa728eeSbellard if (env->eflags & NT_MASK) { 2220eaa728eeSbellard #ifdef TARGET_X86_64 222120054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2222100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 222320054ef0SBlue Swirl } 2224eaa728eeSbellard #endif 2225100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 222620054ef0SBlue Swirl if (tss_selector & 4) { 2227100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 222820054ef0SBlue Swirl } 2229100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2230100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 223120054ef0SBlue Swirl } 2232eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2233eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 223420054ef0SBlue Swirl if (type != 3) { 2235100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 223620054ef0SBlue Swirl } 2237100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2238eaa728eeSbellard } else { 2239100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2240eaa728eeSbellard } 2241db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2242eaa728eeSbellard } 2243eaa728eeSbellard 22442999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2245eaa728eeSbellard { 2246100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2247eaa728eeSbellard } 2248eaa728eeSbellard 22492999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2250eaa728eeSbellard { 2251eaa728eeSbellard if (env->sysenter_cs == 0) { 2252100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2253eaa728eeSbellard } 2254eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22552436b61aSbalrog 22562436b61aSbalrog #ifdef TARGET_X86_64 22572436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22582436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22592436b61aSbalrog 0, 0xffffffff, 22602436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22612436b61aSbalrog DESC_S_MASK | 226220054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 226320054ef0SBlue Swirl DESC_L_MASK); 22642436b61aSbalrog } else 22652436b61aSbalrog #endif 22662436b61aSbalrog { 2267eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2268eaa728eeSbellard 0, 0xffffffff, 2269eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2270eaa728eeSbellard DESC_S_MASK | 2271eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 22722436b61aSbalrog } 2273eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2274eaa728eeSbellard 0, 0xffffffff, 2275eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2276eaa728eeSbellard DESC_S_MASK | 2277eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 227808b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2279a78d0eabSliguang env->eip = env->sysenter_eip; 2280eaa728eeSbellard } 2281eaa728eeSbellard 22822999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2283eaa728eeSbellard { 2284eaa728eeSbellard int cpl; 2285eaa728eeSbellard 2286eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2287eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2288100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2289eaa728eeSbellard } 22902436b61aSbalrog #ifdef TARGET_X86_64 22912436b61aSbalrog if (dflag == 2) { 229220054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 229320054ef0SBlue Swirl 3, 0, 0xffffffff, 22942436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22952436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 229620054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 229720054ef0SBlue Swirl DESC_L_MASK); 229820054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 229920054ef0SBlue Swirl 3, 0, 0xffffffff, 23002436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23012436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 23022436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 23032436b61aSbalrog } else 23042436b61aSbalrog #endif 23052436b61aSbalrog { 230620054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 230720054ef0SBlue Swirl 3, 0, 0xffffffff, 2308eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2309eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2310eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 231120054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 231220054ef0SBlue Swirl 3, 0, 0xffffffff, 2313eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2314eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2315eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23162436b61aSbalrog } 231708b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2318a78d0eabSliguang env->eip = env->regs[R_EDX]; 2319eaa728eeSbellard } 2320eaa728eeSbellard 23212999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2322eaa728eeSbellard { 2323eaa728eeSbellard unsigned int limit; 2324ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2325eaa728eeSbellard int rpl, dpl, cpl, type; 2326eaa728eeSbellard 2327eaa728eeSbellard selector = selector1 & 0xffff; 2328ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 232920054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2330dc1ded53Saliguori goto fail; 233120054ef0SBlue Swirl } 2332100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2333eaa728eeSbellard goto fail; 233420054ef0SBlue Swirl } 2335eaa728eeSbellard rpl = selector & 3; 2336eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2337eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2338eaa728eeSbellard if (e2 & DESC_S_MASK) { 2339eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2340eaa728eeSbellard /* conforming */ 2341eaa728eeSbellard } else { 234220054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2343eaa728eeSbellard goto fail; 2344eaa728eeSbellard } 234520054ef0SBlue Swirl } 2346eaa728eeSbellard } else { 2347eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2348eaa728eeSbellard switch (type) { 2349eaa728eeSbellard case 1: 2350eaa728eeSbellard case 2: 2351eaa728eeSbellard case 3: 2352eaa728eeSbellard case 9: 2353eaa728eeSbellard case 11: 2354eaa728eeSbellard break; 2355eaa728eeSbellard default: 2356eaa728eeSbellard goto fail; 2357eaa728eeSbellard } 2358eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2359eaa728eeSbellard fail: 2360ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2361eaa728eeSbellard return 0; 2362eaa728eeSbellard } 2363eaa728eeSbellard } 2364eaa728eeSbellard limit = get_seg_limit(e1, e2); 2365ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2366eaa728eeSbellard return limit; 2367eaa728eeSbellard } 2368eaa728eeSbellard 23692999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2370eaa728eeSbellard { 2371ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2372eaa728eeSbellard int rpl, dpl, cpl, type; 2373eaa728eeSbellard 2374eaa728eeSbellard selector = selector1 & 0xffff; 2375ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 237620054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2377eaa728eeSbellard goto fail; 237820054ef0SBlue Swirl } 2379100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2380eaa728eeSbellard goto fail; 238120054ef0SBlue Swirl } 2382eaa728eeSbellard rpl = selector & 3; 2383eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2384eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2385eaa728eeSbellard if (e2 & DESC_S_MASK) { 2386eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2387eaa728eeSbellard /* conforming */ 2388eaa728eeSbellard } else { 238920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2390eaa728eeSbellard goto fail; 2391eaa728eeSbellard } 239220054ef0SBlue Swirl } 2393eaa728eeSbellard } else { 2394eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2395eaa728eeSbellard switch (type) { 2396eaa728eeSbellard case 1: 2397eaa728eeSbellard case 2: 2398eaa728eeSbellard case 3: 2399eaa728eeSbellard case 4: 2400eaa728eeSbellard case 5: 2401eaa728eeSbellard case 9: 2402eaa728eeSbellard case 11: 2403eaa728eeSbellard case 12: 2404eaa728eeSbellard break; 2405eaa728eeSbellard default: 2406eaa728eeSbellard goto fail; 2407eaa728eeSbellard } 2408eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2409eaa728eeSbellard fail: 2410ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2411eaa728eeSbellard return 0; 2412eaa728eeSbellard } 2413eaa728eeSbellard } 2414ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2415eaa728eeSbellard return e2 & 0x00f0ff00; 2416eaa728eeSbellard } 2417eaa728eeSbellard 24182999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2419eaa728eeSbellard { 2420eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2421eaa728eeSbellard int rpl, dpl, cpl; 2422eaa728eeSbellard 2423eaa728eeSbellard selector = selector1 & 0xffff; 2424abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 242520054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2426eaa728eeSbellard goto fail; 242720054ef0SBlue Swirl } 2428100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2429eaa728eeSbellard goto fail; 243020054ef0SBlue Swirl } 243120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2432eaa728eeSbellard goto fail; 243320054ef0SBlue Swirl } 2434eaa728eeSbellard rpl = selector & 3; 2435eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2436eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2437eaa728eeSbellard if (e2 & DESC_CS_MASK) { 243820054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2439eaa728eeSbellard goto fail; 244020054ef0SBlue Swirl } 2441eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 244220054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2443eaa728eeSbellard goto fail; 2444eaa728eeSbellard } 244520054ef0SBlue Swirl } 2446eaa728eeSbellard } else { 2447eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2448eaa728eeSbellard fail: 2449abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2450eaa728eeSbellard } 2451eaa728eeSbellard } 2452abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2453abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2454eaa728eeSbellard } 2455eaa728eeSbellard 24562999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2457eaa728eeSbellard { 2458eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2459eaa728eeSbellard int rpl, dpl, cpl; 2460eaa728eeSbellard 2461eaa728eeSbellard selector = selector1 & 0xffff; 2462abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 246320054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2464eaa728eeSbellard goto fail; 246520054ef0SBlue Swirl } 2466100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2467eaa728eeSbellard goto fail; 246820054ef0SBlue Swirl } 246920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2470eaa728eeSbellard goto fail; 247120054ef0SBlue Swirl } 2472eaa728eeSbellard rpl = selector & 3; 2473eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2474eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2475eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2476eaa728eeSbellard goto fail; 2477eaa728eeSbellard } else { 247820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2479eaa728eeSbellard goto fail; 248020054ef0SBlue Swirl } 2481eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2482eaa728eeSbellard fail: 2483abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2484eaa728eeSbellard } 2485eaa728eeSbellard } 2486abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2487abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2488eaa728eeSbellard } 2489