1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308a201bd4SPaolo Bonzini 31059368bcSRichard Henderson #ifdef TARGET_X86_64 32059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 33059368bcSRichard Henderson do { \ 34059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 35059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 36059368bcSRichard Henderson ((val) & 0xffff); \ 37059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 38059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 39059368bcSRichard Henderson } else { \ 40059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 41059368bcSRichard Henderson } \ 42059368bcSRichard Henderson } while (0) 43059368bcSRichard Henderson #else 44059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 45059368bcSRichard Henderson do { \ 46059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 47059368bcSRichard Henderson ((val) & (sp_mask)); \ 48059368bcSRichard Henderson } while (0) 49059368bcSRichard Henderson #endif 50059368bcSRichard Henderson 51059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 52059368bcSRichard Henderson typedef struct StackAccess 53059368bcSRichard Henderson { 54059368bcSRichard Henderson CPUX86State *env; 55059368bcSRichard Henderson uintptr_t ra; 56059368bcSRichard Henderson target_ulong ss_base; 57059368bcSRichard Henderson target_ulong sp; 58059368bcSRichard Henderson target_ulong sp_mask; 598053862aSPaolo Bonzini int mmu_index; 60059368bcSRichard Henderson } StackAccess; 61059368bcSRichard Henderson 62059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 63059368bcSRichard Henderson { 64059368bcSRichard Henderson sa->sp -= 2; 658053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 668053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 67059368bcSRichard Henderson } 68059368bcSRichard Henderson 69059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 70059368bcSRichard Henderson { 71059368bcSRichard Henderson sa->sp -= 4; 728053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 738053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 74059368bcSRichard Henderson } 75059368bcSRichard Henderson 76059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 77059368bcSRichard Henderson { 788053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 79059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 808053862aSPaolo Bonzini sa->mmu_index, sa->ra); 81059368bcSRichard Henderson sa->sp += 2; 82059368bcSRichard Henderson return ret; 83059368bcSRichard Henderson } 84059368bcSRichard Henderson 85059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 86059368bcSRichard Henderson { 878053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 88059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 898053862aSPaolo Bonzini sa->mmu_index, sa->ra); 90059368bcSRichard Henderson sa->sp += 4; 91059368bcSRichard Henderson return ret; 92059368bcSRichard Henderson } 93059368bcSRichard Henderson 9450fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9550fcc7cbSGareth Webb { 9650fcc7cbSGareth Webb int pg_mode = 0; 9750fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 9850fcc7cbSGareth Webb return 0; 9950fcc7cbSGareth Webb } 10050fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10150fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10250fcc7cbSGareth Webb } 10350fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10450fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10550fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10650fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10750fcc7cbSGareth Webb } 10850fcc7cbSGareth Webb } 10950fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11050fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11150fcc7cbSGareth Webb } 11250fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11350fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11450fcc7cbSGareth Webb } 11550fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11650fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11750fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 11850fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 11950fcc7cbSGareth Webb } 12050fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12150fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12250fcc7cbSGareth Webb } 12350fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12450fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12550fcc7cbSGareth Webb } 12650fcc7cbSGareth Webb } 12750fcc7cbSGareth Webb return pg_mode; 12850fcc7cbSGareth Webb } 12950fcc7cbSGareth Webb 130eaa728eeSbellard /* return non zero if error */ 131100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 132100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 133100ec099SPavel Dovgalyuk uintptr_t retaddr) 134eaa728eeSbellard { 135eaa728eeSbellard SegmentCache *dt; 136eaa728eeSbellard int index; 137eaa728eeSbellard target_ulong ptr; 138eaa728eeSbellard 13920054ef0SBlue Swirl if (selector & 0x4) { 140eaa728eeSbellard dt = &env->ldt; 14120054ef0SBlue Swirl } else { 142eaa728eeSbellard dt = &env->gdt; 14320054ef0SBlue Swirl } 144eaa728eeSbellard index = selector & ~7; 14520054ef0SBlue Swirl if ((index + 7) > dt->limit) { 146eaa728eeSbellard return -1; 14720054ef0SBlue Swirl } 148eaa728eeSbellard ptr = dt->base + index; 149100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 150100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 151eaa728eeSbellard return 0; 152eaa728eeSbellard } 153eaa728eeSbellard 154100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 155100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 156100ec099SPavel Dovgalyuk { 157100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 158100ec099SPavel Dovgalyuk } 159100ec099SPavel Dovgalyuk 160eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 161eaa728eeSbellard { 162eaa728eeSbellard unsigned int limit; 16320054ef0SBlue Swirl 164eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 16520054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 166eaa728eeSbellard limit = (limit << 12) | 0xfff; 16720054ef0SBlue Swirl } 168eaa728eeSbellard return limit; 169eaa728eeSbellard } 170eaa728eeSbellard 171eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 172eaa728eeSbellard { 17320054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 174eaa728eeSbellard } 175eaa728eeSbellard 17620054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 17720054ef0SBlue Swirl uint32_t e2) 178eaa728eeSbellard { 179eaa728eeSbellard sc->base = get_seg_base(e1, e2); 180eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 181eaa728eeSbellard sc->flags = e2; 182eaa728eeSbellard } 183eaa728eeSbellard 184eaa728eeSbellard /* init the segment cache in vm86 mode. */ 1852999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 186eaa728eeSbellard { 187eaa728eeSbellard selector &= 0xffff; 188b98dbc90SPaolo Bonzini 189b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 190b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 191b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 192eaa728eeSbellard } 193eaa728eeSbellard 1942999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 195100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 196100ec099SPavel Dovgalyuk uintptr_t retaddr) 197eaa728eeSbellard { 1986aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 199eaa728eeSbellard int type, index, shift; 200eaa728eeSbellard 201eaa728eeSbellard #if 0 202eaa728eeSbellard { 203eaa728eeSbellard int i; 204eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 205eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 206eaa728eeSbellard printf("%02x ", env->tr.base[i]); 20720054ef0SBlue Swirl if ((i & 7) == 7) { 20820054ef0SBlue Swirl printf("\n"); 20920054ef0SBlue Swirl } 210eaa728eeSbellard } 211eaa728eeSbellard printf("\n"); 212eaa728eeSbellard } 213eaa728eeSbellard #endif 214eaa728eeSbellard 21520054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 216a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 21720054ef0SBlue Swirl } 218eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 21920054ef0SBlue Swirl if ((type & 7) != 1) { 220a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 22120054ef0SBlue Swirl } 222eaa728eeSbellard shift = type >> 3; 223eaa728eeSbellard index = (dpl * 4 + 2) << shift; 22420054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 225100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 22620054ef0SBlue Swirl } 227eaa728eeSbellard if (shift == 0) { 228100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 229100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 230eaa728eeSbellard } else { 231100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 232100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 233eaa728eeSbellard } 234eaa728eeSbellard } 235eaa728eeSbellard 236c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 237c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 238eaa728eeSbellard { 239eaa728eeSbellard uint32_t e1, e2; 240d3b54918SPaolo Bonzini int rpl, dpl; 241eaa728eeSbellard 242eaa728eeSbellard if ((selector & 0xfffc) != 0) { 243100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 244100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24520054ef0SBlue Swirl } 24620054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 247100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24820054ef0SBlue Swirl } 249eaa728eeSbellard rpl = selector & 3; 250eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 251eaa728eeSbellard if (seg_reg == R_CS) { 25220054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 253100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25420054ef0SBlue Swirl } 25520054ef0SBlue Swirl if (dpl != rpl) { 256100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25720054ef0SBlue Swirl } 258eaa728eeSbellard } else if (seg_reg == R_SS) { 259eaa728eeSbellard /* SS must be writable data */ 26020054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 261100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26220054ef0SBlue Swirl } 26320054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 264100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26520054ef0SBlue Swirl } 266eaa728eeSbellard } else { 267eaa728eeSbellard /* not readable code */ 26820054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 269100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27020054ef0SBlue Swirl } 271eaa728eeSbellard /* if data or non conforming code, checks the rights */ 272eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 27320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 274100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 275eaa728eeSbellard } 276eaa728eeSbellard } 27720054ef0SBlue Swirl } 27820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 28020054ef0SBlue Swirl } 281eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 282eaa728eeSbellard get_seg_base(e1, e2), 283eaa728eeSbellard get_seg_limit(e1, e2), 284eaa728eeSbellard e2); 285eaa728eeSbellard } else { 28620054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 287100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 288eaa728eeSbellard } 289eaa728eeSbellard } 29020054ef0SBlue Swirl } 291eaa728eeSbellard 292a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 293a9089859SPaolo Bonzini uintptr_t retaddr) 294a9089859SPaolo Bonzini { 295c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 296a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 297a9089859SPaolo Bonzini 298a9089859SPaolo Bonzini if (value) { 299a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 300a9089859SPaolo Bonzini } else { 301a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 302a9089859SPaolo Bonzini } 303a9089859SPaolo Bonzini 304a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 305a9089859SPaolo Bonzini } 306a9089859SPaolo Bonzini 307eaa728eeSbellard #define SWITCH_TSS_JMP 0 308eaa728eeSbellard #define SWITCH_TSS_IRET 1 309eaa728eeSbellard #define SWITCH_TSS_CALL 2 310eaa728eeSbellard 31149958057SPaolo Bonzini /* return 0 if switching to a 16-bit selector */ 31249958057SPaolo Bonzini static int switch_tss_ra(CPUX86State *env, int tss_selector, 313eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 314100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 315eaa728eeSbellard { 316eaa728eeSbellard int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 317eaa728eeSbellard target_ulong tss_base; 318eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 319eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 320eaa728eeSbellard uint32_t old_eflags, eflags_mask; 321eaa728eeSbellard SegmentCache *dt; 322eaa728eeSbellard int index; 323eaa728eeSbellard target_ulong ptr; 324eaa728eeSbellard 325eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 32620054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 32720054ef0SBlue Swirl source); 328eaa728eeSbellard 329eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 330eaa728eeSbellard if (type == 5) { 33120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 332100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 33320054ef0SBlue Swirl } 334eaa728eeSbellard tss_selector = e1 >> 16; 33520054ef0SBlue Swirl if (tss_selector & 4) { 336100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 33720054ef0SBlue Swirl } 338100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 339100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 340eaa728eeSbellard } 34120054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 342100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34320054ef0SBlue Swirl } 34420054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34520054ef0SBlue Swirl if ((type & 7) != 1) { 346100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34720054ef0SBlue Swirl } 34820054ef0SBlue Swirl } 349eaa728eeSbellard 35020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 351100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35220054ef0SBlue Swirl } 353eaa728eeSbellard 35420054ef0SBlue Swirl if (type & 8) { 355eaa728eeSbellard tss_limit_max = 103; 35620054ef0SBlue Swirl } else { 357eaa728eeSbellard tss_limit_max = 43; 35820054ef0SBlue Swirl } 359eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 360eaa728eeSbellard tss_base = get_seg_base(e1, e2); 361eaa728eeSbellard if ((tss_selector & 4) != 0 || 36220054ef0SBlue Swirl tss_limit < tss_limit_max) { 363100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 36420054ef0SBlue Swirl } 365eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 36620054ef0SBlue Swirl if (old_type & 8) { 367eaa728eeSbellard old_tss_limit_max = 103; 36820054ef0SBlue Swirl } else { 369eaa728eeSbellard old_tss_limit_max = 43; 37020054ef0SBlue Swirl } 371eaa728eeSbellard 372*05d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 373*05d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 374*05d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 375*05d41bbcSPaolo Bonzini } 376*05d41bbcSPaolo Bonzini 377eaa728eeSbellard /* read all the registers from the new TSS */ 378eaa728eeSbellard if (type & 8) { 379eaa728eeSbellard /* 32 bit */ 380100ec099SPavel Dovgalyuk new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 381100ec099SPavel Dovgalyuk new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 382100ec099SPavel Dovgalyuk new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 38320054ef0SBlue Swirl for (i = 0; i < 8; i++) { 384100ec099SPavel Dovgalyuk new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 385100ec099SPavel Dovgalyuk retaddr); 38620054ef0SBlue Swirl } 38720054ef0SBlue Swirl for (i = 0; i < 6; i++) { 388100ec099SPavel Dovgalyuk new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 389100ec099SPavel Dovgalyuk retaddr); 39020054ef0SBlue Swirl } 391100ec099SPavel Dovgalyuk new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 392100ec099SPavel Dovgalyuk new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 393eaa728eeSbellard } else { 394eaa728eeSbellard /* 16 bit */ 395eaa728eeSbellard new_cr3 = 0; 396100ec099SPavel Dovgalyuk new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 397100ec099SPavel Dovgalyuk new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 39820054ef0SBlue Swirl for (i = 0; i < 8; i++) { 399a5505f6bSPaolo Bonzini new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 40020054ef0SBlue Swirl } 40120054ef0SBlue Swirl for (i = 0; i < 4; i++) { 40228f6aa11SPaolo Bonzini new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 403100ec099SPavel Dovgalyuk retaddr); 40420054ef0SBlue Swirl } 405100ec099SPavel Dovgalyuk new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 406eaa728eeSbellard new_segs[R_FS] = 0; 407eaa728eeSbellard new_segs[R_GS] = 0; 408eaa728eeSbellard new_trap = 0; 409eaa728eeSbellard } 4104581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4114581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4124581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4134581cbcdSBlue Swirl (void)new_trap; 414eaa728eeSbellard 415eaa728eeSbellard /* NOTE: we must avoid memory exceptions during the task switch, 416eaa728eeSbellard so we make dummy accesses before */ 417eaa728eeSbellard /* XXX: it can still fail in some cases, so a bigger hack is 418eaa728eeSbellard necessary to valid the TLB after having done the accesses */ 419eaa728eeSbellard 420100ec099SPavel Dovgalyuk v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 421100ec099SPavel Dovgalyuk v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 422100ec099SPavel Dovgalyuk cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 423100ec099SPavel Dovgalyuk cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 424eaa728eeSbellard 425eaa728eeSbellard /* clear busy bit (it is restartable) */ 426eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 427a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 428eaa728eeSbellard } 429997ff0d9SBlue Swirl old_eflags = cpu_compute_eflags(env); 43020054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 431eaa728eeSbellard old_eflags &= ~NT_MASK; 43220054ef0SBlue Swirl } 433eaa728eeSbellard 434eaa728eeSbellard /* save the current state in the old TSS */ 4351b627f38SPaolo Bonzini if (old_type & 8) { 436eaa728eeSbellard /* 32 bit */ 437100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 438100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 439100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 440100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 441100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 442100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 443100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 444100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 445100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 446100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 44720054ef0SBlue Swirl for (i = 0; i < 6; i++) { 448100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 449100ec099SPavel Dovgalyuk env->segs[i].selector, retaddr); 45020054ef0SBlue Swirl } 451eaa728eeSbellard } else { 452eaa728eeSbellard /* 16 bit */ 453100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 454100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 455100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 456100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 457100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 458100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 459100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 460100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 461100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 462100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 46320054ef0SBlue Swirl for (i = 0; i < 4; i++) { 46428f6aa11SPaolo Bonzini cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 465100ec099SPavel Dovgalyuk env->segs[i].selector, retaddr); 466eaa728eeSbellard } 46720054ef0SBlue Swirl } 468eaa728eeSbellard 469eaa728eeSbellard /* now if an exception occurs, it will occurs in the next task 470eaa728eeSbellard context */ 471eaa728eeSbellard 472eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 473100ec099SPavel Dovgalyuk cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 474eaa728eeSbellard new_eflags |= NT_MASK; 475eaa728eeSbellard } 476eaa728eeSbellard 477eaa728eeSbellard /* set busy bit */ 478eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 479a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 480eaa728eeSbellard } 481eaa728eeSbellard 482eaa728eeSbellard /* set the new CPU state */ 483eaa728eeSbellard /* from this point, any exception which occurs can give problems */ 484eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 485eaa728eeSbellard env->hflags |= HF_TS_MASK; 486eaa728eeSbellard env->tr.selector = tss_selector; 487eaa728eeSbellard env->tr.base = tss_base; 488eaa728eeSbellard env->tr.limit = tss_limit; 489eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 490eaa728eeSbellard 491eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 492eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 493eaa728eeSbellard } 494eaa728eeSbellard 495eaa728eeSbellard /* load all registers without an exception, then reload them with 496eaa728eeSbellard possible exception */ 497eaa728eeSbellard env->eip = new_eip; 498eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 499eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 500a5505f6bSPaolo Bonzini if (type & 8) { 501997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 502a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 503a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 504a5505f6bSPaolo Bonzini } 505a5505f6bSPaolo Bonzini } else { 506a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 507a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 508a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 509a5505f6bSPaolo Bonzini } 510a5505f6bSPaolo Bonzini } 511eaa728eeSbellard if (new_eflags & VM_MASK) { 51220054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5132999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 51420054ef0SBlue Swirl } 515eaa728eeSbellard } else { 516eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 51720054ef0SBlue Swirl for (i = 0; i < 6; i++) { 518eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 519eaa728eeSbellard } 52020054ef0SBlue Swirl } 521eaa728eeSbellard 522eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 523eaa728eeSbellard env->ldt.base = 0; 524eaa728eeSbellard env->ldt.limit = 0; 525eaa728eeSbellard env->ldt.flags = 0; 526eaa728eeSbellard 527eaa728eeSbellard /* load the LDT */ 52820054ef0SBlue Swirl if (new_ldt & 4) { 529100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 53020054ef0SBlue Swirl } 531eaa728eeSbellard 532eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 533eaa728eeSbellard dt = &env->gdt; 534eaa728eeSbellard index = new_ldt & ~7; 53520054ef0SBlue Swirl if ((index + 7) > dt->limit) { 536100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 53720054ef0SBlue Swirl } 538eaa728eeSbellard ptr = dt->base + index; 539100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 540100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 54120054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 542100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54320054ef0SBlue Swirl } 54420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 545100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54620054ef0SBlue Swirl } 547eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 548eaa728eeSbellard } 549eaa728eeSbellard 550eaa728eeSbellard /* load the segments */ 551eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 552d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 553100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 554100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 555100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 556100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 557100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 558100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 559eaa728eeSbellard } 560eaa728eeSbellard 561a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 562eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 563eaa728eeSbellard /* XXX: different exception if CALL? */ 564100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 565eaa728eeSbellard } 56601df040bSaliguori 56701df040bSaliguori #ifndef CONFIG_USER_ONLY 56801df040bSaliguori /* reset local breakpoints */ 569428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 57093d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 57101df040bSaliguori } 57201df040bSaliguori #endif 57349958057SPaolo Bonzini return type >> 3; 574eaa728eeSbellard } 575eaa728eeSbellard 57649958057SPaolo Bonzini static int switch_tss(CPUX86State *env, int tss_selector, 577100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 578100ec099SPavel Dovgalyuk uint32_t next_eip) 579100ec099SPavel Dovgalyuk { 58049958057SPaolo Bonzini return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 581100ec099SPavel Dovgalyuk } 582100ec099SPavel Dovgalyuk 583eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 584eaa728eeSbellard { 5850aca0605SAndrew Oates #ifdef TARGET_X86_64 5860aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 5870aca0605SAndrew Oates return 0; 5880aca0605SAndrew Oates } else 5890aca0605SAndrew Oates #endif 59020054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 591eaa728eeSbellard return 0xffffffff; 59220054ef0SBlue Swirl } else { 593eaa728eeSbellard return 0xffff; 594eaa728eeSbellard } 59520054ef0SBlue Swirl } 596eaa728eeSbellard 59769cb498cSPaolo Bonzini static int exception_is_fault(int intno) 59869cb498cSPaolo Bonzini { 59969cb498cSPaolo Bonzini switch (intno) { 60069cb498cSPaolo Bonzini /* 60169cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 60269cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 60369cb498cSPaolo Bonzini */ 60469cb498cSPaolo Bonzini case EXCP01_DB: 60569cb498cSPaolo Bonzini case EXCP03_INT3: 60669cb498cSPaolo Bonzini case EXCP04_INTO: 60769cb498cSPaolo Bonzini case EXCP08_DBLE: 60869cb498cSPaolo Bonzini case EXCP12_MCHK: 60969cb498cSPaolo Bonzini return 0; 61069cb498cSPaolo Bonzini } 61169cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 61269cb498cSPaolo Bonzini return 1; 61369cb498cSPaolo Bonzini } 61469cb498cSPaolo Bonzini 61530493a03SClaudio Fontana int exception_has_error_code(int intno) 6162ed51f5bSaliguori { 6172ed51f5bSaliguori switch (intno) { 6182ed51f5bSaliguori case 8: 6192ed51f5bSaliguori case 10: 6202ed51f5bSaliguori case 11: 6212ed51f5bSaliguori case 12: 6222ed51f5bSaliguori case 13: 6232ed51f5bSaliguori case 14: 6242ed51f5bSaliguori case 17: 6252ed51f5bSaliguori return 1; 6262ed51f5bSaliguori } 6272ed51f5bSaliguori return 0; 6282ed51f5bSaliguori } 6292ed51f5bSaliguori 630eaa728eeSbellard /* protected mode interrupt */ 6312999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6322999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6332999a0b2SBlue Swirl int is_hw) 634eaa728eeSbellard { 635eaa728eeSbellard SegmentCache *dt; 636059368bcSRichard Henderson target_ulong ptr; 637eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 638eaa728eeSbellard int has_error_code, new_stack, shift; 639059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 640059368bcSRichard Henderson uint32_t old_eip, eflags; 64187446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 642059368bcSRichard Henderson StackAccess sa; 64369cb498cSPaolo Bonzini bool set_rf; 644eaa728eeSbellard 645eaa728eeSbellard has_error_code = 0; 64620054ef0SBlue Swirl if (!is_int && !is_hw) { 64720054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 64820054ef0SBlue Swirl } 64920054ef0SBlue Swirl if (is_int) { 650eaa728eeSbellard old_eip = next_eip; 65169cb498cSPaolo Bonzini set_rf = false; 65220054ef0SBlue Swirl } else { 653eaa728eeSbellard old_eip = env->eip; 65469cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 65520054ef0SBlue Swirl } 656eaa728eeSbellard 657eaa728eeSbellard dt = &env->idt; 65820054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 65977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 66020054ef0SBlue Swirl } 661eaa728eeSbellard ptr = dt->base + intno * 8; 662329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 663329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 664eaa728eeSbellard /* check gate type */ 665eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 666eaa728eeSbellard switch (type) { 667eaa728eeSbellard case 5: /* task gate */ 6683df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6693df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6703df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 6713df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 6723df1a3d0SPeter Maydell break; 6733df1a3d0SPeter Maydell default: 6743df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6753df1a3d0SPeter Maydell break; 6763df1a3d0SPeter Maydell } 6773df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 6783df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 6793df1a3d0SPeter Maydell /* check privilege if software int */ 6803df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 6813df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6823df1a3d0SPeter Maydell } 6833df1a3d0SPeter Maydell 684059368bcSRichard Henderson sa.env = env; 685059368bcSRichard Henderson sa.ra = 0; 6868053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 687059368bcSRichard Henderson 6883df1a3d0SPeter Maydell if (type == 5) { 6893df1a3d0SPeter Maydell /* task gate */ 690eaa728eeSbellard /* must do that check here to return the correct error code */ 69120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 69277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 69320054ef0SBlue Swirl } 69449958057SPaolo Bonzini shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 695eaa728eeSbellard if (has_error_code) { 696eaa728eeSbellard /* push the error code */ 69720054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 698059368bcSRichard Henderson sa.sp_mask = 0xffffffff; 69920054ef0SBlue Swirl } else { 700059368bcSRichard Henderson sa.sp_mask = 0xffff; 70120054ef0SBlue Swirl } 702059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 703059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 70420054ef0SBlue Swirl if (shift) { 705059368bcSRichard Henderson pushl(&sa, error_code); 70620054ef0SBlue Swirl } else { 707059368bcSRichard Henderson pushw(&sa, error_code); 70820054ef0SBlue Swirl } 709059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 710eaa728eeSbellard } 711eaa728eeSbellard return; 712eaa728eeSbellard } 7133df1a3d0SPeter Maydell 7143df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7153df1a3d0SPeter Maydell 716eaa728eeSbellard /* check valid bit */ 71720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 71877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 71920054ef0SBlue Swirl } 720eaa728eeSbellard selector = e1 >> 16; 721eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 72220054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 72377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 72420054ef0SBlue Swirl } 7252999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 72677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 72720054ef0SBlue Swirl } 72820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 72977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 73020054ef0SBlue Swirl } 731eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 73220054ef0SBlue Swirl if (dpl > cpl) { 73377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 73420054ef0SBlue Swirl } 73520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 73677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 73720054ef0SBlue Swirl } 7381110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7391110bfe6SPaolo Bonzini dpl = cpl; 7401110bfe6SPaolo Bonzini } 7411110bfe6SPaolo Bonzini if (dpl < cpl) { 742eaa728eeSbellard /* to inner privilege */ 743059368bcSRichard Henderson uint32_t esp; 744100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 74520054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 74677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 74720054ef0SBlue Swirl } 74820054ef0SBlue Swirl if ((ss & 3) != dpl) { 74977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 75020054ef0SBlue Swirl } 7512999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 75277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 75320054ef0SBlue Swirl } 754eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 75520054ef0SBlue Swirl if (ss_dpl != dpl) { 75677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 75720054ef0SBlue Swirl } 758eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 759eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 76020054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 76177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76220054ef0SBlue Swirl } 76320054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 76477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76520054ef0SBlue Swirl } 766eaa728eeSbellard new_stack = 1; 767059368bcSRichard Henderson sa.sp = esp; 768059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 769059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 7701110bfe6SPaolo Bonzini } else { 771eaa728eeSbellard /* to same privilege */ 77287446327SKevin O'Connor if (vm86) { 77377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 77420054ef0SBlue Swirl } 775eaa728eeSbellard new_stack = 0; 776059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 777059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 778059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 779eaa728eeSbellard } 780eaa728eeSbellard 781eaa728eeSbellard shift = type >> 3; 782eaa728eeSbellard 783eaa728eeSbellard #if 0 784eaa728eeSbellard /* XXX: check that enough room is available */ 785eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 78687446327SKevin O'Connor if (vm86) { 787eaa728eeSbellard push_size += 8; 78820054ef0SBlue Swirl } 789eaa728eeSbellard push_size <<= shift; 790eaa728eeSbellard #endif 79169cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 79269cb498cSPaolo Bonzini /* 79369cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 79469cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 79569cb498cSPaolo Bonzini */ 79669cb498cSPaolo Bonzini if (set_rf) { 79769cb498cSPaolo Bonzini eflags |= RF_MASK; 79869cb498cSPaolo Bonzini } 79969cb498cSPaolo Bonzini 800eaa728eeSbellard if (shift == 1) { 801eaa728eeSbellard if (new_stack) { 80287446327SKevin O'Connor if (vm86) { 803059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 804059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 805059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 806059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 807eaa728eeSbellard } 808059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 809059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 810eaa728eeSbellard } 811059368bcSRichard Henderson pushl(&sa, eflags); 812059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 813059368bcSRichard Henderson pushl(&sa, old_eip); 814eaa728eeSbellard if (has_error_code) { 815059368bcSRichard Henderson pushl(&sa, error_code); 816eaa728eeSbellard } 817eaa728eeSbellard } else { 818eaa728eeSbellard if (new_stack) { 81987446327SKevin O'Connor if (vm86) { 820059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 821059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 822059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 823059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 824eaa728eeSbellard } 825059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 826059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 827eaa728eeSbellard } 828059368bcSRichard Henderson pushw(&sa, eflags); 829059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 830059368bcSRichard Henderson pushw(&sa, old_eip); 831eaa728eeSbellard if (has_error_code) { 832059368bcSRichard Henderson pushw(&sa, error_code); 833eaa728eeSbellard } 834eaa728eeSbellard } 835eaa728eeSbellard 836fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 837fd460606SKevin O'Connor if ((type & 1) == 0) { 838fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 839fd460606SKevin O'Connor } 840fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 841fd460606SKevin O'Connor 842eaa728eeSbellard if (new_stack) { 84387446327SKevin O'Connor if (vm86) { 844eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 845eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 846eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 847eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 848eaa728eeSbellard } 849eaa728eeSbellard ss = (ss & ~3) | dpl; 850059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 851059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 852eaa728eeSbellard } 853059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 854eaa728eeSbellard 855eaa728eeSbellard selector = (selector & ~3) | dpl; 856eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 857eaa728eeSbellard get_seg_base(e1, e2), 858eaa728eeSbellard get_seg_limit(e1, e2), 859eaa728eeSbellard e2); 860eaa728eeSbellard env->eip = offset; 861eaa728eeSbellard } 862eaa728eeSbellard 863eaa728eeSbellard #ifdef TARGET_X86_64 864eaa728eeSbellard 865059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 866059368bcSRichard Henderson { 867059368bcSRichard Henderson sa->sp -= 8; 8688053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 869eaa728eeSbellard } 870eaa728eeSbellard 871059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 872059368bcSRichard Henderson { 8738053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 874059368bcSRichard Henderson sa->sp += 8; 875059368bcSRichard Henderson return ret; 876eaa728eeSbellard } 877eaa728eeSbellard 8782999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 879eaa728eeSbellard { 8806aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 88150fcc7cbSGareth Webb int index, pg_mode; 88250fcc7cbSGareth Webb target_ulong rsp; 88350fcc7cbSGareth Webb int32_t sext; 884eaa728eeSbellard 885eaa728eeSbellard #if 0 886eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 887eaa728eeSbellard env->tr.base, env->tr.limit); 888eaa728eeSbellard #endif 889eaa728eeSbellard 89020054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 891a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 89220054ef0SBlue Swirl } 893eaa728eeSbellard index = 8 * level + 4; 89420054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 89577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 89620054ef0SBlue Swirl } 89750fcc7cbSGareth Webb 89850fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 89950fcc7cbSGareth Webb 90050fcc7cbSGareth Webb /* test virtual address sign extension */ 90150fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 90250fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 90350fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 90450fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 90550fcc7cbSGareth Webb } 90650fcc7cbSGareth Webb 90750fcc7cbSGareth Webb return rsp; 908eaa728eeSbellard } 909eaa728eeSbellard 910eaa728eeSbellard /* 64 bit interrupt */ 9112999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9122999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 913eaa728eeSbellard { 914eaa728eeSbellard SegmentCache *dt; 915eaa728eeSbellard target_ulong ptr; 916eaa728eeSbellard int type, dpl, selector, cpl, ist; 917eaa728eeSbellard int has_error_code, new_stack; 91869cb498cSPaolo Bonzini uint32_t e1, e2, e3, ss, eflags; 919059368bcSRichard Henderson target_ulong old_eip, offset; 92069cb498cSPaolo Bonzini bool set_rf; 921059368bcSRichard Henderson StackAccess sa; 922eaa728eeSbellard 923eaa728eeSbellard has_error_code = 0; 92420054ef0SBlue Swirl if (!is_int && !is_hw) { 92520054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 92620054ef0SBlue Swirl } 92720054ef0SBlue Swirl if (is_int) { 928eaa728eeSbellard old_eip = next_eip; 92969cb498cSPaolo Bonzini set_rf = false; 93020054ef0SBlue Swirl } else { 931eaa728eeSbellard old_eip = env->eip; 93269cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 93320054ef0SBlue Swirl } 934eaa728eeSbellard 935eaa728eeSbellard dt = &env->idt; 93620054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 937b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 93820054ef0SBlue Swirl } 939eaa728eeSbellard ptr = dt->base + intno * 16; 940329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 941329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 942329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 943eaa728eeSbellard /* check gate type */ 944eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 945eaa728eeSbellard switch (type) { 946eaa728eeSbellard case 14: /* 386 interrupt gate */ 947eaa728eeSbellard case 15: /* 386 trap gate */ 948eaa728eeSbellard break; 949eaa728eeSbellard default: 950b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 951eaa728eeSbellard break; 952eaa728eeSbellard } 953eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 954eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9551235fc06Sths /* check privilege if software int */ 95620054ef0SBlue Swirl if (is_int && dpl < cpl) { 957b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 95820054ef0SBlue Swirl } 959eaa728eeSbellard /* check valid bit */ 96020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 961b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 96220054ef0SBlue Swirl } 963eaa728eeSbellard selector = e1 >> 16; 964eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 965eaa728eeSbellard ist = e2 & 7; 96620054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 96777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 96820054ef0SBlue Swirl } 969eaa728eeSbellard 9702999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 97177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 97220054ef0SBlue Swirl } 97320054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 97477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 97520054ef0SBlue Swirl } 976eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 97720054ef0SBlue Swirl if (dpl > cpl) { 97877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 97920054ef0SBlue Swirl } 98020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 98177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 98220054ef0SBlue Swirl } 98320054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 98477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 98520054ef0SBlue Swirl } 9861110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 9871110bfe6SPaolo Bonzini dpl = cpl; 9881110bfe6SPaolo Bonzini } 989059368bcSRichard Henderson 990059368bcSRichard Henderson sa.env = env; 991059368bcSRichard Henderson sa.ra = 0; 9928053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 993059368bcSRichard Henderson sa.sp_mask = -1; 994059368bcSRichard Henderson sa.ss_base = 0; 9951110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 996eaa728eeSbellard /* to inner privilege */ 997eaa728eeSbellard new_stack = 1; 998059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 999ae67dc72SPaolo Bonzini ss = 0; 10001110bfe6SPaolo Bonzini } else { 1001eaa728eeSbellard /* to same privilege */ 100220054ef0SBlue Swirl if (env->eflags & VM_MASK) { 100377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100420054ef0SBlue Swirl } 1005eaa728eeSbellard new_stack = 0; 1006059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1007e95e9b88SWu Xiang } 1008059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1009eaa728eeSbellard 101069cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 101169cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 101269cb498cSPaolo Bonzini if (set_rf) { 101369cb498cSPaolo Bonzini eflags |= RF_MASK; 101469cb498cSPaolo Bonzini } 101569cb498cSPaolo Bonzini 1016059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1017059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1018059368bcSRichard Henderson pushq(&sa, eflags); 1019059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1020059368bcSRichard Henderson pushq(&sa, old_eip); 1021eaa728eeSbellard if (has_error_code) { 1022059368bcSRichard Henderson pushq(&sa, error_code); 1023eaa728eeSbellard } 1024eaa728eeSbellard 1025fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1026fd460606SKevin O'Connor if ((type & 1) == 0) { 1027fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1028fd460606SKevin O'Connor } 1029fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1030fd460606SKevin O'Connor 1031eaa728eeSbellard if (new_stack) { 1032eaa728eeSbellard ss = 0 | dpl; 1033e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1034eaa728eeSbellard } 1035059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1036eaa728eeSbellard 1037eaa728eeSbellard selector = (selector & ~3) | dpl; 1038eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1039eaa728eeSbellard get_seg_base(e1, e2), 1040eaa728eeSbellard get_seg_limit(e1, e2), 1041eaa728eeSbellard e2); 1042eaa728eeSbellard env->eip = offset; 1043eaa728eeSbellard } 104463fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1045eaa728eeSbellard 10462999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1047eaa728eeSbellard { 1048eaa728eeSbellard int cpl, selector; 1049eaa728eeSbellard 1050eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1051100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1052eaa728eeSbellard } 1053eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1054eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1055100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1056eaa728eeSbellard } 1057eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 105863fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1059eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1060fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1061fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1062fd460606SKevin O'Connor NT_MASK); 1063eaa728eeSbellard if (dflag == 2) { 1064eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1065eaa728eeSbellard 0, 0xffffffff, 1066eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1067eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1068eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1069eaa728eeSbellard DESC_L_MASK); 1070a4165610Sliguang env->eip = env->regs[R_ECX]; 1071eaa728eeSbellard } else { 1072eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1073eaa728eeSbellard 0, 0xffffffff, 1074eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1075eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1076eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1077a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1078eaa728eeSbellard } 1079ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1080eaa728eeSbellard 0, 0xffffffff, 1081eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1082eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1083eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 108463fd8ef0SPaolo Bonzini } else 108563fd8ef0SPaolo Bonzini #endif 108663fd8ef0SPaolo Bonzini { 1087fd460606SKevin O'Connor env->eflags |= IF_MASK; 1088eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1089eaa728eeSbellard 0, 0xffffffff, 1090eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1091eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1092eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1093a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1094ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1095eaa728eeSbellard 0, 0xffffffff, 1096eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1097eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1098eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1099eaa728eeSbellard } 1100eaa728eeSbellard } 1101eaa728eeSbellard 1102eaa728eeSbellard /* real mode interrupt */ 11032999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11042999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1105eaa728eeSbellard { 1106eaa728eeSbellard SegmentCache *dt; 1107059368bcSRichard Henderson target_ulong ptr; 1108eaa728eeSbellard int selector; 1109059368bcSRichard Henderson uint32_t offset; 1110eaa728eeSbellard uint32_t old_cs, old_eip; 1111059368bcSRichard Henderson StackAccess sa; 1112eaa728eeSbellard 1113eaa728eeSbellard /* real mode (simpler!) */ 1114eaa728eeSbellard dt = &env->idt; 111520054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 111677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 111720054ef0SBlue Swirl } 1118eaa728eeSbellard ptr = dt->base + intno * 4; 1119329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1120329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1121059368bcSRichard Henderson 1122059368bcSRichard Henderson sa.env = env; 1123059368bcSRichard Henderson sa.ra = 0; 1124059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1125059368bcSRichard Henderson sa.sp_mask = 0xffff; 1126059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 11278053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1128059368bcSRichard Henderson 112920054ef0SBlue Swirl if (is_int) { 1130eaa728eeSbellard old_eip = next_eip; 113120054ef0SBlue Swirl } else { 1132eaa728eeSbellard old_eip = env->eip; 113320054ef0SBlue Swirl } 1134eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1135eaa728eeSbellard /* XXX: use SS segment size? */ 1136059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1137059368bcSRichard Henderson pushw(&sa, old_cs); 1138059368bcSRichard Henderson pushw(&sa, old_eip); 1139eaa728eeSbellard 1140eaa728eeSbellard /* update processor state */ 1141059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1142eaa728eeSbellard env->eip = offset; 1143eaa728eeSbellard env->segs[R_CS].selector = selector; 1144eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1145eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1146eaa728eeSbellard } 1147eaa728eeSbellard 1148eaa728eeSbellard /* 1149eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1150a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1151eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1152eaa728eeSbellard */ 115330493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11542999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1155eaa728eeSbellard { 1156ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1157ca4c810aSAndreas Färber 11588fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1159eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1160eaa728eeSbellard static int count; 116120054ef0SBlue Swirl 116220054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 116320054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1164eaa728eeSbellard count, intno, error_code, is_int, 1165eaa728eeSbellard env->hflags & HF_CPL_MASK, 1166a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1167a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 116808b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1169eaa728eeSbellard if (intno == 0x0e) { 117093fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1171eaa728eeSbellard } else { 11724b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1173eaa728eeSbellard } 117493fcfe39Saliguori qemu_log("\n"); 1175a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1176eaa728eeSbellard #if 0 1177eaa728eeSbellard { 1178eaa728eeSbellard int i; 11799bd5494eSAdam Lackorzynski target_ulong ptr; 118020054ef0SBlue Swirl 118193fcfe39Saliguori qemu_log(" code="); 1182eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1183eaa728eeSbellard for (i = 0; i < 16; i++) { 118493fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1185eaa728eeSbellard } 118693fcfe39Saliguori qemu_log("\n"); 1187eaa728eeSbellard } 1188eaa728eeSbellard #endif 1189eaa728eeSbellard count++; 1190eaa728eeSbellard } 1191eaa728eeSbellard } 1192eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 119300ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1194f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 11952999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 119620054ef0SBlue Swirl } 119700ea18d1Saliguori #endif 1198eb38c52cSblueswir1 #ifdef TARGET_X86_64 1199eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12002999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1201eaa728eeSbellard } else 1202eaa728eeSbellard #endif 1203eaa728eeSbellard { 12042999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12052999a0b2SBlue Swirl is_hw); 1206eaa728eeSbellard } 1207eaa728eeSbellard } else { 120800ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1209f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12102999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 121120054ef0SBlue Swirl } 121200ea18d1Saliguori #endif 12132999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1214eaa728eeSbellard } 12152ed51f5bSaliguori 121600ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1217f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1218fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1219b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 122020054ef0SBlue Swirl offsetof(struct vmcb, 122120054ef0SBlue Swirl control.event_inj)); 122220054ef0SBlue Swirl 1223b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1224ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 122520054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12262ed51f5bSaliguori } 122700ea18d1Saliguori #endif 1228eaa728eeSbellard } 1229eaa728eeSbellard 12302999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1231e694d4e2SBlue Swirl { 12326aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1233e694d4e2SBlue Swirl } 1234e694d4e2SBlue Swirl 12352999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1236eaa728eeSbellard { 1237eaa728eeSbellard SegmentCache *dt; 1238eaa728eeSbellard uint32_t e1, e2; 1239eaa728eeSbellard int index, entry_limit; 1240eaa728eeSbellard target_ulong ptr; 1241eaa728eeSbellard 1242eaa728eeSbellard selector &= 0xffff; 1243eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1244eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1245eaa728eeSbellard env->ldt.base = 0; 1246eaa728eeSbellard env->ldt.limit = 0; 1247eaa728eeSbellard } else { 124820054ef0SBlue Swirl if (selector & 0x4) { 1249100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 125020054ef0SBlue Swirl } 1251eaa728eeSbellard dt = &env->gdt; 1252eaa728eeSbellard index = selector & ~7; 1253eaa728eeSbellard #ifdef TARGET_X86_64 125420054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1255eaa728eeSbellard entry_limit = 15; 125620054ef0SBlue Swirl } else 1257eaa728eeSbellard #endif 125820054ef0SBlue Swirl { 1259eaa728eeSbellard entry_limit = 7; 126020054ef0SBlue Swirl } 126120054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 126320054ef0SBlue Swirl } 1264eaa728eeSbellard ptr = dt->base + index; 1265100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1266100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 126720054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1268100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 126920054ef0SBlue Swirl } 127020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1271100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 127220054ef0SBlue Swirl } 1273eaa728eeSbellard #ifdef TARGET_X86_64 1274eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1275eaa728eeSbellard uint32_t e3; 127620054ef0SBlue Swirl 1277100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1278eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1279eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1280eaa728eeSbellard } else 1281eaa728eeSbellard #endif 1282eaa728eeSbellard { 1283eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1284eaa728eeSbellard } 1285eaa728eeSbellard } 1286eaa728eeSbellard env->ldt.selector = selector; 1287eaa728eeSbellard } 1288eaa728eeSbellard 12892999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1290eaa728eeSbellard { 1291eaa728eeSbellard SegmentCache *dt; 1292eaa728eeSbellard uint32_t e1, e2; 1293eaa728eeSbellard int index, type, entry_limit; 1294eaa728eeSbellard target_ulong ptr; 1295eaa728eeSbellard 1296eaa728eeSbellard selector &= 0xffff; 1297eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1298eaa728eeSbellard /* NULL selector case: invalid TR */ 1299eaa728eeSbellard env->tr.base = 0; 1300eaa728eeSbellard env->tr.limit = 0; 1301eaa728eeSbellard env->tr.flags = 0; 1302eaa728eeSbellard } else { 130320054ef0SBlue Swirl if (selector & 0x4) { 1304100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 130520054ef0SBlue Swirl } 1306eaa728eeSbellard dt = &env->gdt; 1307eaa728eeSbellard index = selector & ~7; 1308eaa728eeSbellard #ifdef TARGET_X86_64 130920054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1310eaa728eeSbellard entry_limit = 15; 131120054ef0SBlue Swirl } else 1312eaa728eeSbellard #endif 131320054ef0SBlue Swirl { 1314eaa728eeSbellard entry_limit = 7; 131520054ef0SBlue Swirl } 131620054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1317100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 131820054ef0SBlue Swirl } 1319eaa728eeSbellard ptr = dt->base + index; 1320100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1321100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1322eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1323eaa728eeSbellard if ((e2 & DESC_S_MASK) || 132420054ef0SBlue Swirl (type != 1 && type != 9)) { 1325100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 132620054ef0SBlue Swirl } 132720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1328100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 132920054ef0SBlue Swirl } 1330eaa728eeSbellard #ifdef TARGET_X86_64 1331eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1332eaa728eeSbellard uint32_t e3, e4; 133320054ef0SBlue Swirl 1334100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1335100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 133620054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1337100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133820054ef0SBlue Swirl } 1339eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1340eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1341eaa728eeSbellard } else 1342eaa728eeSbellard #endif 1343eaa728eeSbellard { 1344eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1345eaa728eeSbellard } 1346eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1347100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1348eaa728eeSbellard } 1349eaa728eeSbellard env->tr.selector = selector; 1350eaa728eeSbellard } 1351eaa728eeSbellard 1352eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13532999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1354eaa728eeSbellard { 1355eaa728eeSbellard uint32_t e1, e2; 1356eaa728eeSbellard int cpl, dpl, rpl; 1357eaa728eeSbellard SegmentCache *dt; 1358eaa728eeSbellard int index; 1359eaa728eeSbellard target_ulong ptr; 1360eaa728eeSbellard 1361eaa728eeSbellard selector &= 0xffff; 1362eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1363eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1364eaa728eeSbellard /* null selector case */ 1365eaa728eeSbellard if (seg_reg == R_SS 1366eaa728eeSbellard #ifdef TARGET_X86_64 1367eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1368eaa728eeSbellard #endif 136920054ef0SBlue Swirl ) { 1370100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 137120054ef0SBlue Swirl } 1372eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1373eaa728eeSbellard } else { 1374eaa728eeSbellard 137520054ef0SBlue Swirl if (selector & 0x4) { 1376eaa728eeSbellard dt = &env->ldt; 137720054ef0SBlue Swirl } else { 1378eaa728eeSbellard dt = &env->gdt; 137920054ef0SBlue Swirl } 1380eaa728eeSbellard index = selector & ~7; 138120054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1382100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 138320054ef0SBlue Swirl } 1384eaa728eeSbellard ptr = dt->base + index; 1385100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1386100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1387eaa728eeSbellard 138820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1389100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 139020054ef0SBlue Swirl } 1391eaa728eeSbellard rpl = selector & 3; 1392eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1393eaa728eeSbellard if (seg_reg == R_SS) { 1394eaa728eeSbellard /* must be writable segment */ 139520054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1396100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 139720054ef0SBlue Swirl } 139820054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1399100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140020054ef0SBlue Swirl } 1401eaa728eeSbellard } else { 1402eaa728eeSbellard /* must be readable segment */ 140320054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1404100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140520054ef0SBlue Swirl } 1406eaa728eeSbellard 1407eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1408eaa728eeSbellard /* if not conforming code, test rights */ 140920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1410100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1411eaa728eeSbellard } 1412eaa728eeSbellard } 141320054ef0SBlue Swirl } 1414eaa728eeSbellard 1415eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 141620054ef0SBlue Swirl if (seg_reg == R_SS) { 1417100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 141820054ef0SBlue Swirl } else { 1419100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1420eaa728eeSbellard } 142120054ef0SBlue Swirl } 1422eaa728eeSbellard 1423eaa728eeSbellard /* set the access bit if not already set */ 1424eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1425eaa728eeSbellard e2 |= DESC_A_MASK; 1426100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1427eaa728eeSbellard } 1428eaa728eeSbellard 1429eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1430eaa728eeSbellard get_seg_base(e1, e2), 1431eaa728eeSbellard get_seg_limit(e1, e2), 1432eaa728eeSbellard e2); 1433eaa728eeSbellard #if 0 143493fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1435eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1436eaa728eeSbellard #endif 1437eaa728eeSbellard } 1438eaa728eeSbellard } 1439eaa728eeSbellard 1440eaa728eeSbellard /* protected mode jump */ 14412999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1442100ec099SPavel Dovgalyuk target_ulong next_eip) 1443eaa728eeSbellard { 1444eaa728eeSbellard int gate_cs, type; 1445eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1446eaa728eeSbellard 144720054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1448100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 144920054ef0SBlue Swirl } 1450100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1451100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 145220054ef0SBlue Swirl } 1453eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1454eaa728eeSbellard if (e2 & DESC_S_MASK) { 145520054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1456100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 145720054ef0SBlue Swirl } 1458eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1459eaa728eeSbellard if (e2 & DESC_C_MASK) { 1460eaa728eeSbellard /* conforming code segment */ 146120054ef0SBlue Swirl if (dpl > cpl) { 1462100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 146320054ef0SBlue Swirl } 1464eaa728eeSbellard } else { 1465eaa728eeSbellard /* non conforming code segment */ 1466eaa728eeSbellard rpl = new_cs & 3; 146720054ef0SBlue Swirl if (rpl > cpl) { 1468100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1469eaa728eeSbellard } 147020054ef0SBlue Swirl if (dpl != cpl) { 1471100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 147220054ef0SBlue Swirl } 147320054ef0SBlue Swirl } 147420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1475100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 147620054ef0SBlue Swirl } 1477eaa728eeSbellard limit = get_seg_limit(e1, e2); 1478eaa728eeSbellard if (new_eip > limit && 1479db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1480db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 148120054ef0SBlue Swirl } 1482eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1483eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1484a78d0eabSliguang env->eip = new_eip; 1485eaa728eeSbellard } else { 1486eaa728eeSbellard /* jump to call or task gate */ 1487eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1488eaa728eeSbellard rpl = new_cs & 3; 1489eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1490eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 14910aca0605SAndrew Oates 14920aca0605SAndrew Oates #ifdef TARGET_X86_64 14930aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 14940aca0605SAndrew Oates if (type != 12) { 14950aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 14960aca0605SAndrew Oates } 14970aca0605SAndrew Oates } 14980aca0605SAndrew Oates #endif 1499eaa728eeSbellard switch (type) { 1500eaa728eeSbellard case 1: /* 286 TSS */ 1501eaa728eeSbellard case 9: /* 386 TSS */ 1502eaa728eeSbellard case 5: /* task gate */ 150320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1504100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 150520054ef0SBlue Swirl } 1506100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1507eaa728eeSbellard break; 1508eaa728eeSbellard case 4: /* 286 call gate */ 1509eaa728eeSbellard case 12: /* 386 call gate */ 151020054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1511100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 151220054ef0SBlue Swirl } 151320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1514100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 151520054ef0SBlue Swirl } 1516eaa728eeSbellard gate_cs = e1 >> 16; 1517eaa728eeSbellard new_eip = (e1 & 0xffff); 151820054ef0SBlue Swirl if (type == 12) { 1519eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 152020054ef0SBlue Swirl } 15210aca0605SAndrew Oates 15220aca0605SAndrew Oates #ifdef TARGET_X86_64 15230aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15240aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15250aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15260aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15270aca0605SAndrew Oates GETPC()); 15280aca0605SAndrew Oates } 15290aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15300aca0605SAndrew Oates if (type != 0) { 15310aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15320aca0605SAndrew Oates GETPC()); 15330aca0605SAndrew Oates } 15340aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15350aca0605SAndrew Oates } 15360aca0605SAndrew Oates #endif 15370aca0605SAndrew Oates 1538100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1539100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 154020054ef0SBlue Swirl } 1541eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1542eaa728eeSbellard /* must be code segment */ 1543eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 154420054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1545100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 154620054ef0SBlue Swirl } 1547eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 154820054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1549100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 155020054ef0SBlue Swirl } 15510aca0605SAndrew Oates #ifdef TARGET_X86_64 15520aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15530aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15540aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15550aca0605SAndrew Oates } 15560aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15570aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15580aca0605SAndrew Oates } 15590aca0605SAndrew Oates } 15600aca0605SAndrew Oates #endif 156120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1562100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 156320054ef0SBlue Swirl } 1564eaa728eeSbellard limit = get_seg_limit(e1, e2); 15650aca0605SAndrew Oates if (new_eip > limit && 15660aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1567100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 156820054ef0SBlue Swirl } 1569eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1570eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1571a78d0eabSliguang env->eip = new_eip; 1572eaa728eeSbellard break; 1573eaa728eeSbellard default: 1574100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1575eaa728eeSbellard break; 1576eaa728eeSbellard } 1577eaa728eeSbellard } 1578eaa728eeSbellard } 1579eaa728eeSbellard 1580eaa728eeSbellard /* real mode call */ 15818c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 15828c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1583eaa728eeSbellard { 1584059368bcSRichard Henderson StackAccess sa; 1585eaa728eeSbellard 1586059368bcSRichard Henderson sa.env = env; 1587059368bcSRichard Henderson sa.ra = GETPC(); 1588059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1589059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1590059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 15918053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1592059368bcSRichard Henderson 1593eaa728eeSbellard if (shift) { 1594059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1595059368bcSRichard Henderson pushl(&sa, next_eip); 1596eaa728eeSbellard } else { 1597059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1598059368bcSRichard Henderson pushw(&sa, next_eip); 1599eaa728eeSbellard } 1600eaa728eeSbellard 1601059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1602eaa728eeSbellard env->eip = new_eip; 1603eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1604eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1605eaa728eeSbellard } 1606eaa728eeSbellard 1607eaa728eeSbellard /* protected mode call */ 16082999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1609100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1610eaa728eeSbellard { 1611eaa728eeSbellard int new_stack, i; 16120aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1613059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1614eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1615059368bcSRichard Henderson target_ulong old_ssp, offset; 1616059368bcSRichard Henderson StackAccess sa; 1617eaa728eeSbellard 16180aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16196aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 162020054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1621100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 162220054ef0SBlue Swirl } 1623100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1624100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 162520054ef0SBlue Swirl } 1626eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1627d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1628059368bcSRichard Henderson 1629059368bcSRichard Henderson sa.env = env; 1630059368bcSRichard Henderson sa.ra = GETPC(); 16318053862aSPaolo Bonzini sa.mmu_index = cpu_mmu_index_kernel(env); 1632059368bcSRichard Henderson 1633eaa728eeSbellard if (e2 & DESC_S_MASK) { 163420054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1635100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 163620054ef0SBlue Swirl } 1637eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1638eaa728eeSbellard if (e2 & DESC_C_MASK) { 1639eaa728eeSbellard /* conforming code segment */ 164020054ef0SBlue Swirl if (dpl > cpl) { 1641100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 164220054ef0SBlue Swirl } 1643eaa728eeSbellard } else { 1644eaa728eeSbellard /* non conforming code segment */ 1645eaa728eeSbellard rpl = new_cs & 3; 164620054ef0SBlue Swirl if (rpl > cpl) { 1647100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1648eaa728eeSbellard } 164920054ef0SBlue Swirl if (dpl != cpl) { 1650100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 165120054ef0SBlue Swirl } 165220054ef0SBlue Swirl } 165320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1654100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 165520054ef0SBlue Swirl } 1656eaa728eeSbellard 1657eaa728eeSbellard #ifdef TARGET_X86_64 1658eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1659eaa728eeSbellard if (shift == 2) { 1660eaa728eeSbellard /* 64 bit case */ 1661059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1662059368bcSRichard Henderson sa.sp_mask = -1; 1663059368bcSRichard Henderson sa.ss_base = 0; 1664059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1665059368bcSRichard Henderson pushq(&sa, next_eip); 1666eaa728eeSbellard /* from this point, not restartable */ 1667059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1668eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1669eaa728eeSbellard get_seg_base(e1, e2), 1670eaa728eeSbellard get_seg_limit(e1, e2), e2); 1671a78d0eabSliguang env->eip = new_eip; 1672eaa728eeSbellard } else 1673eaa728eeSbellard #endif 1674eaa728eeSbellard { 1675059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1676059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1677059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1678eaa728eeSbellard if (shift) { 1679059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1680059368bcSRichard Henderson pushl(&sa, next_eip); 1681eaa728eeSbellard } else { 1682059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1683059368bcSRichard Henderson pushw(&sa, next_eip); 1684eaa728eeSbellard } 1685eaa728eeSbellard 1686eaa728eeSbellard limit = get_seg_limit(e1, e2); 168720054ef0SBlue Swirl if (new_eip > limit) { 1688100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 168920054ef0SBlue Swirl } 1690eaa728eeSbellard /* from this point, not restartable */ 1691059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1692eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1693eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1694a78d0eabSliguang env->eip = new_eip; 1695eaa728eeSbellard } 1696eaa728eeSbellard } else { 1697eaa728eeSbellard /* check gate type */ 1698eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1699eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1700eaa728eeSbellard rpl = new_cs & 3; 17010aca0605SAndrew Oates 17020aca0605SAndrew Oates #ifdef TARGET_X86_64 17030aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17040aca0605SAndrew Oates if (type != 12) { 17050aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17060aca0605SAndrew Oates } 17070aca0605SAndrew Oates } 17080aca0605SAndrew Oates #endif 17090aca0605SAndrew Oates 1710eaa728eeSbellard switch (type) { 1711eaa728eeSbellard case 1: /* available 286 TSS */ 1712eaa728eeSbellard case 9: /* available 386 TSS */ 1713eaa728eeSbellard case 5: /* task gate */ 171420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1715100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 171620054ef0SBlue Swirl } 1717100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1718eaa728eeSbellard return; 1719eaa728eeSbellard case 4: /* 286 call gate */ 1720eaa728eeSbellard case 12: /* 386 call gate */ 1721eaa728eeSbellard break; 1722eaa728eeSbellard default: 1723100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1724eaa728eeSbellard break; 1725eaa728eeSbellard } 1726eaa728eeSbellard shift = type >> 3; 1727eaa728eeSbellard 172820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1729100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 173020054ef0SBlue Swirl } 1731eaa728eeSbellard /* check valid bit */ 173220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1733100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 173420054ef0SBlue Swirl } 1735eaa728eeSbellard selector = e1 >> 16; 1736eaa728eeSbellard param_count = e2 & 0x1f; 17370aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17380aca0605SAndrew Oates #ifdef TARGET_X86_64 17390aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17400aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17410aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17420aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17430aca0605SAndrew Oates GETPC()); 17440aca0605SAndrew Oates } 17450aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17460aca0605SAndrew Oates if (type != 0) { 17470aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17480aca0605SAndrew Oates GETPC()); 17490aca0605SAndrew Oates } 17500aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17510aca0605SAndrew Oates } 17520aca0605SAndrew Oates #endif 175320054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1754100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 175520054ef0SBlue Swirl } 1756eaa728eeSbellard 1757100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1758100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 175920054ef0SBlue Swirl } 176020054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1761100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 176220054ef0SBlue Swirl } 1763eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 176420054ef0SBlue Swirl if (dpl > cpl) { 1765100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 176620054ef0SBlue Swirl } 17670aca0605SAndrew Oates #ifdef TARGET_X86_64 17680aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17690aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 17700aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17710aca0605SAndrew Oates } 17720aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 17730aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17740aca0605SAndrew Oates } 17750aca0605SAndrew Oates shift++; 17760aca0605SAndrew Oates } 17770aca0605SAndrew Oates #endif 177820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1779100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 178020054ef0SBlue Swirl } 1781eaa728eeSbellard 1782eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1783eaa728eeSbellard /* to inner privilege */ 17840aca0605SAndrew Oates #ifdef TARGET_X86_64 17850aca0605SAndrew Oates if (shift == 2) { 17860aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 17870aca0605SAndrew Oates new_stack = 1; 1788059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1789059368bcSRichard Henderson sa.sp_mask = -1; 1790059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 17910aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1792059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 17930aca0605SAndrew Oates } else 17940aca0605SAndrew Oates #endif 17950aca0605SAndrew Oates { 17960aca0605SAndrew Oates uint32_t sp32; 17970aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 179890a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 17990aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 180090a2541bSliguang env->regs[R_ESP]); 180120054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1802100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 180320054ef0SBlue Swirl } 180420054ef0SBlue Swirl if ((ss & 3) != dpl) { 1805100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 180620054ef0SBlue Swirl } 1807100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1808100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 180920054ef0SBlue Swirl } 1810eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 181120054ef0SBlue Swirl if (ss_dpl != dpl) { 1812100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 181320054ef0SBlue Swirl } 1814eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1815eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 181620054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1817100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 181820054ef0SBlue Swirl } 181920054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1820100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182120054ef0SBlue Swirl } 1822eaa728eeSbellard 1823059368bcSRichard Henderson sa.sp = sp32; 1824059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1825059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18260aca0605SAndrew Oates } 18270aca0605SAndrew Oates 182820054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1829eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1830eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1831059368bcSRichard Henderson 18320aca0605SAndrew Oates #ifdef TARGET_X86_64 18330aca0605SAndrew Oates if (shift == 2) { 18340aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1835059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1836059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18370aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18380aca0605SAndrew Oates } else 18390aca0605SAndrew Oates #endif 18400aca0605SAndrew Oates if (shift == 1) { 1841059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1842059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1843eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18440bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18450bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18460bd385e7SPaolo Bonzini GETPC()); 1847059368bcSRichard Henderson pushl(&sa, val); 1848eaa728eeSbellard } 1849eaa728eeSbellard } else { 1850059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1851059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1852eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18530bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18540bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18550bd385e7SPaolo Bonzini GETPC()); 1856059368bcSRichard Henderson pushw(&sa, val); 1857eaa728eeSbellard } 1858eaa728eeSbellard } 1859eaa728eeSbellard new_stack = 1; 1860eaa728eeSbellard } else { 1861eaa728eeSbellard /* to same privilege */ 1862059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1863059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1864059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 186520054ef0SBlue Swirl /* push_size = (4 << shift); */ 1866eaa728eeSbellard new_stack = 0; 1867eaa728eeSbellard } 1868eaa728eeSbellard 18690aca0605SAndrew Oates #ifdef TARGET_X86_64 18700aca0605SAndrew Oates if (shift == 2) { 1871059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1872059368bcSRichard Henderson pushq(&sa, next_eip); 18730aca0605SAndrew Oates } else 18740aca0605SAndrew Oates #endif 18750aca0605SAndrew Oates if (shift == 1) { 1876059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1877059368bcSRichard Henderson pushl(&sa, next_eip); 1878eaa728eeSbellard } else { 1879059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1880059368bcSRichard Henderson pushw(&sa, next_eip); 1881eaa728eeSbellard } 1882eaa728eeSbellard 1883eaa728eeSbellard /* from this point, not restartable */ 1884eaa728eeSbellard 1885eaa728eeSbellard if (new_stack) { 18860aca0605SAndrew Oates #ifdef TARGET_X86_64 18870aca0605SAndrew Oates if (shift == 2) { 18880aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 18890aca0605SAndrew Oates } else 18900aca0605SAndrew Oates #endif 18910aca0605SAndrew Oates { 1892eaa728eeSbellard ss = (ss & ~3) | dpl; 1893eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1894059368bcSRichard Henderson sa.ss_base, 1895eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1896eaa728eeSbellard ss_e2); 1897eaa728eeSbellard } 18980aca0605SAndrew Oates } 1899eaa728eeSbellard 1900eaa728eeSbellard selector = (selector & ~3) | dpl; 1901eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1902eaa728eeSbellard get_seg_base(e1, e2), 1903eaa728eeSbellard get_seg_limit(e1, e2), 1904eaa728eeSbellard e2); 1905059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1906a78d0eabSliguang env->eip = offset; 1907eaa728eeSbellard } 1908eaa728eeSbellard } 1909eaa728eeSbellard 1910eaa728eeSbellard /* real and vm86 mode iret */ 19112999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1912eaa728eeSbellard { 1913059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1914eaa728eeSbellard int eflags_mask; 1915059368bcSRichard Henderson StackAccess sa; 1916eaa728eeSbellard 1917059368bcSRichard Henderson sa.env = env; 1918059368bcSRichard Henderson sa.ra = GETPC(); 19198053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1920059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1921059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1922059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1923059368bcSRichard Henderson 1924eaa728eeSbellard if (shift == 1) { 1925eaa728eeSbellard /* 32 bits */ 1926059368bcSRichard Henderson new_eip = popl(&sa); 1927059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1928059368bcSRichard Henderson new_eflags = popl(&sa); 1929eaa728eeSbellard } else { 1930eaa728eeSbellard /* 16 bits */ 1931059368bcSRichard Henderson new_eip = popw(&sa); 1932059368bcSRichard Henderson new_cs = popw(&sa); 1933059368bcSRichard Henderson new_eflags = popw(&sa); 1934eaa728eeSbellard } 1935059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1936bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1937bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1938eaa728eeSbellard env->eip = new_eip; 193920054ef0SBlue Swirl if (env->eflags & VM_MASK) { 194020054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 194120054ef0SBlue Swirl NT_MASK; 194220054ef0SBlue Swirl } else { 194320054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 194420054ef0SBlue Swirl RF_MASK | NT_MASK; 194520054ef0SBlue Swirl } 194620054ef0SBlue Swirl if (shift == 0) { 1947eaa728eeSbellard eflags_mask &= 0xffff; 194820054ef0SBlue Swirl } 1949997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1950db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1951eaa728eeSbellard } 1952eaa728eeSbellard 1953c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1954eaa728eeSbellard { 1955eaa728eeSbellard int dpl; 1956eaa728eeSbellard uint32_t e2; 1957eaa728eeSbellard 1958eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 1959eaa728eeSbellard they may still contain a valid base. I would be interested to 1960eaa728eeSbellard know how a real x86_64 CPU behaves */ 1961eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 196220054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 1963eaa728eeSbellard return; 196420054ef0SBlue Swirl } 1965eaa728eeSbellard 1966eaa728eeSbellard e2 = env->segs[seg_reg].flags; 1967eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1968eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1969eaa728eeSbellard /* data or non conforming code segment */ 1970eaa728eeSbellard if (dpl < cpl) { 1971c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 1972c2ba0515SBin Meng env->segs[seg_reg].base, 1973c2ba0515SBin Meng env->segs[seg_reg].limit, 1974c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 1975eaa728eeSbellard } 1976eaa728eeSbellard } 1977eaa728eeSbellard } 1978eaa728eeSbellard 1979eaa728eeSbellard /* protected mode iret */ 19802999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 1981100ec099SPavel Dovgalyuk int is_iret, int addend, 1982100ec099SPavel Dovgalyuk uintptr_t retaddr) 1983eaa728eeSbellard { 1984eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 1985eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 1986eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 1987eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 1988059368bcSRichard Henderson target_ulong new_eip, new_esp; 1989059368bcSRichard Henderson StackAccess sa; 1990059368bcSRichard Henderson 19918053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 19928053862aSPaolo Bonzini 1993059368bcSRichard Henderson sa.env = env; 1994059368bcSRichard Henderson sa.ra = retaddr; 19958053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1996eaa728eeSbellard 1997eaa728eeSbellard #ifdef TARGET_X86_64 199820054ef0SBlue Swirl if (shift == 2) { 1999059368bcSRichard Henderson sa.sp_mask = -1; 200020054ef0SBlue Swirl } else 2001eaa728eeSbellard #endif 200220054ef0SBlue Swirl { 2003059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 200420054ef0SBlue Swirl } 2005059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2006059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2007eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2008eaa728eeSbellard #ifdef TARGET_X86_64 2009eaa728eeSbellard if (shift == 2) { 2010059368bcSRichard Henderson new_eip = popq(&sa); 2011059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2012eaa728eeSbellard if (is_iret) { 2013059368bcSRichard Henderson new_eflags = popq(&sa); 2014eaa728eeSbellard } 2015eaa728eeSbellard } else 2016eaa728eeSbellard #endif 201720054ef0SBlue Swirl { 2018eaa728eeSbellard if (shift == 1) { 2019eaa728eeSbellard /* 32 bits */ 2020059368bcSRichard Henderson new_eip = popl(&sa); 2021059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2022eaa728eeSbellard if (is_iret) { 2023059368bcSRichard Henderson new_eflags = popl(&sa); 202420054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2025eaa728eeSbellard goto return_to_vm86; 2026eaa728eeSbellard } 202720054ef0SBlue Swirl } 2028eaa728eeSbellard } else { 2029eaa728eeSbellard /* 16 bits */ 2030059368bcSRichard Henderson new_eip = popw(&sa); 2031059368bcSRichard Henderson new_cs = popw(&sa); 203220054ef0SBlue Swirl if (is_iret) { 2033059368bcSRichard Henderson new_eflags = popw(&sa); 2034eaa728eeSbellard } 203520054ef0SBlue Swirl } 203620054ef0SBlue Swirl } 2037d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2038eaa728eeSbellard new_cs, new_eip, shift, addend); 20396aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 204020054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2041100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2042eaa728eeSbellard } 2043100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2044100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 204520054ef0SBlue Swirl } 204620054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 204720054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2048100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 204920054ef0SBlue Swirl } 205020054ef0SBlue Swirl rpl = new_cs & 3; 205120054ef0SBlue Swirl if (rpl < cpl) { 2052100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 205320054ef0SBlue Swirl } 205420054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 205520054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 205620054ef0SBlue Swirl if (dpl > rpl) { 2057100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 205820054ef0SBlue Swirl } 205920054ef0SBlue Swirl } else { 206020054ef0SBlue Swirl if (dpl != rpl) { 2061100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206220054ef0SBlue Swirl } 206320054ef0SBlue Swirl } 206420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2065100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 206620054ef0SBlue Swirl } 2067eaa728eeSbellard 2068059368bcSRichard Henderson sa.sp += addend; 2069eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2070eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 20711235fc06Sths /* return to same privilege level */ 2072eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2073eaa728eeSbellard get_seg_base(e1, e2), 2074eaa728eeSbellard get_seg_limit(e1, e2), 2075eaa728eeSbellard e2); 2076eaa728eeSbellard } else { 2077eaa728eeSbellard /* return to different privilege level */ 2078eaa728eeSbellard #ifdef TARGET_X86_64 2079eaa728eeSbellard if (shift == 2) { 2080059368bcSRichard Henderson new_esp = popq(&sa); 2081059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2082eaa728eeSbellard } else 2083eaa728eeSbellard #endif 208420054ef0SBlue Swirl { 2085eaa728eeSbellard if (shift == 1) { 2086eaa728eeSbellard /* 32 bits */ 2087059368bcSRichard Henderson new_esp = popl(&sa); 2088059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2089eaa728eeSbellard } else { 2090eaa728eeSbellard /* 16 bits */ 2091059368bcSRichard Henderson new_esp = popw(&sa); 2092059368bcSRichard Henderson new_ss = popw(&sa); 2093eaa728eeSbellard } 209420054ef0SBlue Swirl } 2095d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2096eaa728eeSbellard new_ss, new_esp); 2097eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2098eaa728eeSbellard #ifdef TARGET_X86_64 2099eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2100eaa728eeSbellard /* XXX: test CS64? */ 2101eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2102eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2103eaa728eeSbellard 0, 0xffffffff, 2104eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2105eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2106eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2107eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2108eaa728eeSbellard } else 2109eaa728eeSbellard #endif 2110eaa728eeSbellard { 2111100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2112eaa728eeSbellard } 2113eaa728eeSbellard } else { 211420054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2115100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 211620054ef0SBlue Swirl } 2117100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2118100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 211920054ef0SBlue Swirl } 2120eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2121eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 212220054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2123100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 212420054ef0SBlue Swirl } 2125eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 212620054ef0SBlue Swirl if (dpl != rpl) { 2127100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 212820054ef0SBlue Swirl } 212920054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2130100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 213120054ef0SBlue Swirl } 2132eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2133eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2134eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2135eaa728eeSbellard ss_e2); 2136eaa728eeSbellard } 2137eaa728eeSbellard 2138eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2139eaa728eeSbellard get_seg_base(e1, e2), 2140eaa728eeSbellard get_seg_limit(e1, e2), 2141eaa728eeSbellard e2); 2142059368bcSRichard Henderson sa.sp = new_esp; 2143eaa728eeSbellard #ifdef TARGET_X86_64 214420054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2145059368bcSRichard Henderson sa.sp_mask = -1; 214620054ef0SBlue Swirl } else 2147eaa728eeSbellard #endif 214820054ef0SBlue Swirl { 2149059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 215020054ef0SBlue Swirl } 2151eaa728eeSbellard 2152eaa728eeSbellard /* validate data segments */ 21532999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21542999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21552999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21562999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2157eaa728eeSbellard 2158059368bcSRichard Henderson sa.sp += addend; 2159eaa728eeSbellard } 2160059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2161eaa728eeSbellard env->eip = new_eip; 2162eaa728eeSbellard if (is_iret) { 2163eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2164eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 216520054ef0SBlue Swirl if (cpl == 0) { 2166eaa728eeSbellard eflags_mask |= IOPL_MASK; 216720054ef0SBlue Swirl } 2168eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 216920054ef0SBlue Swirl if (cpl <= iopl) { 2170eaa728eeSbellard eflags_mask |= IF_MASK; 217120054ef0SBlue Swirl } 217220054ef0SBlue Swirl if (shift == 0) { 2173eaa728eeSbellard eflags_mask &= 0xffff; 217420054ef0SBlue Swirl } 2175997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2176eaa728eeSbellard } 2177eaa728eeSbellard return; 2178eaa728eeSbellard 2179eaa728eeSbellard return_to_vm86: 2180059368bcSRichard Henderson new_esp = popl(&sa); 2181059368bcSRichard Henderson new_ss = popl(&sa); 2182059368bcSRichard Henderson new_es = popl(&sa); 2183059368bcSRichard Henderson new_ds = popl(&sa); 2184059368bcSRichard Henderson new_fs = popl(&sa); 2185059368bcSRichard Henderson new_gs = popl(&sa); 2186eaa728eeSbellard 2187eaa728eeSbellard /* modify processor state */ 2188997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2189997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2190997ff0d9SBlue Swirl VIP_MASK); 21912999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 21922999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 21932999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 21942999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 21952999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 21962999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2197eaa728eeSbellard 2198eaa728eeSbellard env->eip = new_eip & 0xffff; 219908b3ded6Sliguang env->regs[R_ESP] = new_esp; 2200eaa728eeSbellard } 2201eaa728eeSbellard 22022999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2203eaa728eeSbellard { 2204eaa728eeSbellard int tss_selector, type; 2205eaa728eeSbellard uint32_t e1, e2; 2206eaa728eeSbellard 2207eaa728eeSbellard /* specific case for TSS */ 2208eaa728eeSbellard if (env->eflags & NT_MASK) { 2209eaa728eeSbellard #ifdef TARGET_X86_64 221020054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2211100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 221220054ef0SBlue Swirl } 2213eaa728eeSbellard #endif 2214100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 221520054ef0SBlue Swirl if (tss_selector & 4) { 2216100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 221720054ef0SBlue Swirl } 2218100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2219100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 222020054ef0SBlue Swirl } 2221eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2222eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 222320054ef0SBlue Swirl if (type != 3) { 2224100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 222520054ef0SBlue Swirl } 2226100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2227eaa728eeSbellard } else { 2228100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2229eaa728eeSbellard } 2230db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2231eaa728eeSbellard } 2232eaa728eeSbellard 22332999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2234eaa728eeSbellard { 2235100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2236eaa728eeSbellard } 2237eaa728eeSbellard 22382999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2239eaa728eeSbellard { 2240eaa728eeSbellard if (env->sysenter_cs == 0) { 2241100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2242eaa728eeSbellard } 2243eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22442436b61aSbalrog 22452436b61aSbalrog #ifdef TARGET_X86_64 22462436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22472436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22482436b61aSbalrog 0, 0xffffffff, 22492436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22502436b61aSbalrog DESC_S_MASK | 225120054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 225220054ef0SBlue Swirl DESC_L_MASK); 22532436b61aSbalrog } else 22542436b61aSbalrog #endif 22552436b61aSbalrog { 2256eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2257eaa728eeSbellard 0, 0xffffffff, 2258eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2259eaa728eeSbellard DESC_S_MASK | 2260eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 22612436b61aSbalrog } 2262eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2263eaa728eeSbellard 0, 0xffffffff, 2264eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2265eaa728eeSbellard DESC_S_MASK | 2266eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 226708b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2268a78d0eabSliguang env->eip = env->sysenter_eip; 2269eaa728eeSbellard } 2270eaa728eeSbellard 22712999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2272eaa728eeSbellard { 2273eaa728eeSbellard int cpl; 2274eaa728eeSbellard 2275eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2276eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2277100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2278eaa728eeSbellard } 22792436b61aSbalrog #ifdef TARGET_X86_64 22802436b61aSbalrog if (dflag == 2) { 228120054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 228220054ef0SBlue Swirl 3, 0, 0xffffffff, 22832436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22842436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 228520054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 228620054ef0SBlue Swirl DESC_L_MASK); 228720054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 228820054ef0SBlue Swirl 3, 0, 0xffffffff, 22892436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22902436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 22912436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 22922436b61aSbalrog } else 22932436b61aSbalrog #endif 22942436b61aSbalrog { 229520054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 229620054ef0SBlue Swirl 3, 0, 0xffffffff, 2297eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2298eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2299eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 230020054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 230120054ef0SBlue Swirl 3, 0, 0xffffffff, 2302eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2303eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2304eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23052436b61aSbalrog } 230608b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2307a78d0eabSliguang env->eip = env->regs[R_EDX]; 2308eaa728eeSbellard } 2309eaa728eeSbellard 23102999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2311eaa728eeSbellard { 2312eaa728eeSbellard unsigned int limit; 2313ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2314eaa728eeSbellard int rpl, dpl, cpl, type; 2315eaa728eeSbellard 2316eaa728eeSbellard selector = selector1 & 0xffff; 2317ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 231820054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2319dc1ded53Saliguori goto fail; 232020054ef0SBlue Swirl } 2321100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2322eaa728eeSbellard goto fail; 232320054ef0SBlue Swirl } 2324eaa728eeSbellard rpl = selector & 3; 2325eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2326eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2327eaa728eeSbellard if (e2 & DESC_S_MASK) { 2328eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2329eaa728eeSbellard /* conforming */ 2330eaa728eeSbellard } else { 233120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2332eaa728eeSbellard goto fail; 2333eaa728eeSbellard } 233420054ef0SBlue Swirl } 2335eaa728eeSbellard } else { 2336eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2337eaa728eeSbellard switch (type) { 2338eaa728eeSbellard case 1: 2339eaa728eeSbellard case 2: 2340eaa728eeSbellard case 3: 2341eaa728eeSbellard case 9: 2342eaa728eeSbellard case 11: 2343eaa728eeSbellard break; 2344eaa728eeSbellard default: 2345eaa728eeSbellard goto fail; 2346eaa728eeSbellard } 2347eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2348eaa728eeSbellard fail: 2349ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2350eaa728eeSbellard return 0; 2351eaa728eeSbellard } 2352eaa728eeSbellard } 2353eaa728eeSbellard limit = get_seg_limit(e1, e2); 2354ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2355eaa728eeSbellard return limit; 2356eaa728eeSbellard } 2357eaa728eeSbellard 23582999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2359eaa728eeSbellard { 2360ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2361eaa728eeSbellard int rpl, dpl, cpl, type; 2362eaa728eeSbellard 2363eaa728eeSbellard selector = selector1 & 0xffff; 2364ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 236520054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2366eaa728eeSbellard goto fail; 236720054ef0SBlue Swirl } 2368100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2369eaa728eeSbellard goto fail; 237020054ef0SBlue Swirl } 2371eaa728eeSbellard rpl = selector & 3; 2372eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2373eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2374eaa728eeSbellard if (e2 & DESC_S_MASK) { 2375eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2376eaa728eeSbellard /* conforming */ 2377eaa728eeSbellard } else { 237820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2379eaa728eeSbellard goto fail; 2380eaa728eeSbellard } 238120054ef0SBlue Swirl } 2382eaa728eeSbellard } else { 2383eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2384eaa728eeSbellard switch (type) { 2385eaa728eeSbellard case 1: 2386eaa728eeSbellard case 2: 2387eaa728eeSbellard case 3: 2388eaa728eeSbellard case 4: 2389eaa728eeSbellard case 5: 2390eaa728eeSbellard case 9: 2391eaa728eeSbellard case 11: 2392eaa728eeSbellard case 12: 2393eaa728eeSbellard break; 2394eaa728eeSbellard default: 2395eaa728eeSbellard goto fail; 2396eaa728eeSbellard } 2397eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2398eaa728eeSbellard fail: 2399ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2400eaa728eeSbellard return 0; 2401eaa728eeSbellard } 2402eaa728eeSbellard } 2403ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2404eaa728eeSbellard return e2 & 0x00f0ff00; 2405eaa728eeSbellard } 2406eaa728eeSbellard 24072999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2408eaa728eeSbellard { 2409eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2410eaa728eeSbellard int rpl, dpl, cpl; 2411eaa728eeSbellard 2412eaa728eeSbellard selector = selector1 & 0xffff; 2413abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 241420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2415eaa728eeSbellard goto fail; 241620054ef0SBlue Swirl } 2417100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2418eaa728eeSbellard goto fail; 241920054ef0SBlue Swirl } 242020054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2421eaa728eeSbellard goto fail; 242220054ef0SBlue Swirl } 2423eaa728eeSbellard rpl = selector & 3; 2424eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2425eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2426eaa728eeSbellard if (e2 & DESC_CS_MASK) { 242720054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2428eaa728eeSbellard goto fail; 242920054ef0SBlue Swirl } 2430eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 243120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2432eaa728eeSbellard goto fail; 2433eaa728eeSbellard } 243420054ef0SBlue Swirl } 2435eaa728eeSbellard } else { 2436eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2437eaa728eeSbellard fail: 2438abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2439eaa728eeSbellard } 2440eaa728eeSbellard } 2441abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2442abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2443eaa728eeSbellard } 2444eaa728eeSbellard 24452999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2446eaa728eeSbellard { 2447eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2448eaa728eeSbellard int rpl, dpl, cpl; 2449eaa728eeSbellard 2450eaa728eeSbellard selector = selector1 & 0xffff; 2451abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 245220054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2453eaa728eeSbellard goto fail; 245420054ef0SBlue Swirl } 2455100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2456eaa728eeSbellard goto fail; 245720054ef0SBlue Swirl } 245820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2459eaa728eeSbellard goto fail; 246020054ef0SBlue Swirl } 2461eaa728eeSbellard rpl = selector & 3; 2462eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2463eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2464eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2465eaa728eeSbellard goto fail; 2466eaa728eeSbellard } else { 246720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2468eaa728eeSbellard goto fail; 246920054ef0SBlue Swirl } 2470eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2471eaa728eeSbellard fail: 2472abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2473eaa728eeSbellard } 2474eaa728eeSbellard } 2475abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2476abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2477eaa728eeSbellard } 2478