1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26*42fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308b131065SPaolo Bonzini #include "access.h" 318480f7c7SPhilippe Mathieu-Daudé #include "tcg-cpu.h" 328a201bd4SPaolo Bonzini 33059368bcSRichard Henderson #ifdef TARGET_X86_64 34059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 35059368bcSRichard Henderson do { \ 36059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 37059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 38059368bcSRichard Henderson ((val) & 0xffff); \ 39059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 40059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 41059368bcSRichard Henderson } else { \ 42059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 43059368bcSRichard Henderson } \ 44059368bcSRichard Henderson } while (0) 45059368bcSRichard Henderson #else 46059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 47059368bcSRichard Henderson do { \ 48059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 49059368bcSRichard Henderson ((val) & (sp_mask)); \ 50059368bcSRichard Henderson } while (0) 51059368bcSRichard Henderson #endif 52059368bcSRichard Henderson 53059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 54059368bcSRichard Henderson typedef struct StackAccess 55059368bcSRichard Henderson { 56059368bcSRichard Henderson CPUX86State *env; 57059368bcSRichard Henderson uintptr_t ra; 58059368bcSRichard Henderson target_ulong ss_base; 59059368bcSRichard Henderson target_ulong sp; 60059368bcSRichard Henderson target_ulong sp_mask; 618053862aSPaolo Bonzini int mmu_index; 62059368bcSRichard Henderson } StackAccess; 63059368bcSRichard Henderson 64059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 65059368bcSRichard Henderson { 66059368bcSRichard Henderson sa->sp -= 2; 678053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 688053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 69059368bcSRichard Henderson } 70059368bcSRichard Henderson 71059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 72059368bcSRichard Henderson { 73059368bcSRichard Henderson sa->sp -= 4; 748053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 758053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 76059368bcSRichard Henderson } 77059368bcSRichard Henderson 78059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 79059368bcSRichard Henderson { 808053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 81059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 828053862aSPaolo Bonzini sa->mmu_index, sa->ra); 83059368bcSRichard Henderson sa->sp += 2; 84059368bcSRichard Henderson return ret; 85059368bcSRichard Henderson } 86059368bcSRichard Henderson 87059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 88059368bcSRichard Henderson { 898053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 90059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 918053862aSPaolo Bonzini sa->mmu_index, sa->ra); 92059368bcSRichard Henderson sa->sp += 4; 93059368bcSRichard Henderson return ret; 94059368bcSRichard Henderson } 95059368bcSRichard Henderson 9650fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9750fcc7cbSGareth Webb { 988fa11a4dSAlexander Graf int pg_mode = PG_MODE_PG; 9950fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 10050fcc7cbSGareth Webb return 0; 10150fcc7cbSGareth Webb } 10250fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10350fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10450fcc7cbSGareth Webb } 10550fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10650fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10750fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10850fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10950fcc7cbSGareth Webb } 11050fcc7cbSGareth Webb } 11150fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11250fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11350fcc7cbSGareth Webb } 11450fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11550fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11650fcc7cbSGareth Webb } 11750fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11850fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11950fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 12050fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 12150fcc7cbSGareth Webb } 12250fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12350fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12450fcc7cbSGareth Webb } 12550fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12650fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12750fcc7cbSGareth Webb } 12850fcc7cbSGareth Webb } 12950fcc7cbSGareth Webb return pg_mode; 13050fcc7cbSGareth Webb } 13150fcc7cbSGareth Webb 132611c34a7SPhilippe Mathieu-Daudé static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) 133611c34a7SPhilippe Mathieu-Daudé { 134611c34a7SPhilippe Mathieu-Daudé int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; 135611c34a7SPhilippe Mathieu-Daudé int mmu_index_base = 136611c34a7SPhilippe Mathieu-Daudé !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : 137611c34a7SPhilippe Mathieu-Daudé (pl < 3 && (env->eflags & AC_MASK) 138611c34a7SPhilippe Mathieu-Daudé ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); 139611c34a7SPhilippe Mathieu-Daudé 140611c34a7SPhilippe Mathieu-Daudé return mmu_index_base + mmu_index_32; 141611c34a7SPhilippe Mathieu-Daudé } 142611c34a7SPhilippe Mathieu-Daudé 143611c34a7SPhilippe Mathieu-Daudé int cpu_mmu_index_kernel(CPUX86State *env) 144611c34a7SPhilippe Mathieu-Daudé { 145611c34a7SPhilippe Mathieu-Daudé return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); 146611c34a7SPhilippe Mathieu-Daudé } 147611c34a7SPhilippe Mathieu-Daudé 148eaa728eeSbellard /* return non zero if error */ 149100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 150100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 151100ec099SPavel Dovgalyuk uintptr_t retaddr) 152eaa728eeSbellard { 153eaa728eeSbellard SegmentCache *dt; 154eaa728eeSbellard int index; 155eaa728eeSbellard target_ulong ptr; 156eaa728eeSbellard 15720054ef0SBlue Swirl if (selector & 0x4) { 158eaa728eeSbellard dt = &env->ldt; 15920054ef0SBlue Swirl } else { 160eaa728eeSbellard dt = &env->gdt; 16120054ef0SBlue Swirl } 162eaa728eeSbellard index = selector & ~7; 16320054ef0SBlue Swirl if ((index + 7) > dt->limit) { 164eaa728eeSbellard return -1; 16520054ef0SBlue Swirl } 166eaa728eeSbellard ptr = dt->base + index; 167100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 168100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 169eaa728eeSbellard return 0; 170eaa728eeSbellard } 171eaa728eeSbellard 172100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 173100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 174100ec099SPavel Dovgalyuk { 175100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 176100ec099SPavel Dovgalyuk } 177100ec099SPavel Dovgalyuk 178eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 179eaa728eeSbellard { 180eaa728eeSbellard unsigned int limit; 18120054ef0SBlue Swirl 182eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 18320054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 184eaa728eeSbellard limit = (limit << 12) | 0xfff; 18520054ef0SBlue Swirl } 186eaa728eeSbellard return limit; 187eaa728eeSbellard } 188eaa728eeSbellard 189eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 190eaa728eeSbellard { 19120054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 192eaa728eeSbellard } 193eaa728eeSbellard 19420054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 19520054ef0SBlue Swirl uint32_t e2) 196eaa728eeSbellard { 197eaa728eeSbellard sc->base = get_seg_base(e1, e2); 198eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 199eaa728eeSbellard sc->flags = e2; 200eaa728eeSbellard } 201eaa728eeSbellard 202eaa728eeSbellard /* init the segment cache in vm86 mode. */ 2032999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 204eaa728eeSbellard { 205eaa728eeSbellard selector &= 0xffff; 206b98dbc90SPaolo Bonzini 207b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 208b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 209b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 210eaa728eeSbellard } 211eaa728eeSbellard 2122999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 213100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 214100ec099SPavel Dovgalyuk uintptr_t retaddr) 215eaa728eeSbellard { 2166aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 217eaa728eeSbellard int type, index, shift; 218eaa728eeSbellard 219eaa728eeSbellard #if 0 220eaa728eeSbellard { 221eaa728eeSbellard int i; 222eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 223eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 224eaa728eeSbellard printf("%02x ", env->tr.base[i]); 22520054ef0SBlue Swirl if ((i & 7) == 7) { 22620054ef0SBlue Swirl printf("\n"); 22720054ef0SBlue Swirl } 228eaa728eeSbellard } 229eaa728eeSbellard printf("\n"); 230eaa728eeSbellard } 231eaa728eeSbellard #endif 232eaa728eeSbellard 23320054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 234a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 23520054ef0SBlue Swirl } 236eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 23720054ef0SBlue Swirl if ((type & 7) != 1) { 238a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 23920054ef0SBlue Swirl } 240eaa728eeSbellard shift = type >> 3; 241eaa728eeSbellard index = (dpl * 4 + 2) << shift; 24220054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 243100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 24420054ef0SBlue Swirl } 245eaa728eeSbellard if (shift == 0) { 246100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 247100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 248eaa728eeSbellard } else { 249100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 250100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 251eaa728eeSbellard } 252eaa728eeSbellard } 253eaa728eeSbellard 254c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 255c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 256eaa728eeSbellard { 257eaa728eeSbellard uint32_t e1, e2; 258d3b54918SPaolo Bonzini int rpl, dpl; 259eaa728eeSbellard 260eaa728eeSbellard if ((selector & 0xfffc) != 0) { 261100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26320054ef0SBlue Swirl } 26420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 265100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26620054ef0SBlue Swirl } 267eaa728eeSbellard rpl = selector & 3; 268eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 269eaa728eeSbellard if (seg_reg == R_CS) { 27020054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 271100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27220054ef0SBlue Swirl } 27320054ef0SBlue Swirl if (dpl != rpl) { 274100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27520054ef0SBlue Swirl } 276eaa728eeSbellard } else if (seg_reg == R_SS) { 277eaa728eeSbellard /* SS must be writable data */ 27820054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28020054ef0SBlue Swirl } 28120054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 282100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28320054ef0SBlue Swirl } 284eaa728eeSbellard } else { 285eaa728eeSbellard /* not readable code */ 28620054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 287100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28820054ef0SBlue Swirl } 289eaa728eeSbellard /* if data or non conforming code, checks the rights */ 290eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 29120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 292100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 293eaa728eeSbellard } 294eaa728eeSbellard } 29520054ef0SBlue Swirl } 29620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 297100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 29820054ef0SBlue Swirl } 299eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 300eaa728eeSbellard get_seg_base(e1, e2), 301eaa728eeSbellard get_seg_limit(e1, e2), 302eaa728eeSbellard e2); 303eaa728eeSbellard } else { 30420054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 305100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 306eaa728eeSbellard } 307eaa728eeSbellard } 30820054ef0SBlue Swirl } 309eaa728eeSbellard 310a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 311a9089859SPaolo Bonzini uintptr_t retaddr) 312a9089859SPaolo Bonzini { 313c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 314a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 315a9089859SPaolo Bonzini 316a9089859SPaolo Bonzini if (value) { 317a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 318a9089859SPaolo Bonzini } else { 319a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 320a9089859SPaolo Bonzini } 321a9089859SPaolo Bonzini 322a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 323a9089859SPaolo Bonzini } 324a9089859SPaolo Bonzini 325eaa728eeSbellard #define SWITCH_TSS_JMP 0 326eaa728eeSbellard #define SWITCH_TSS_IRET 1 327eaa728eeSbellard #define SWITCH_TSS_CALL 2 328eaa728eeSbellard 32949958057SPaolo Bonzini /* return 0 if switching to a 16-bit selector */ 33049958057SPaolo Bonzini static int switch_tss_ra(CPUX86State *env, int tss_selector, 331eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 332100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 333eaa728eeSbellard { 3348b131065SPaolo Bonzini int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 335eaa728eeSbellard target_ulong tss_base; 336eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 337eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 338eaa728eeSbellard uint32_t old_eflags, eflags_mask; 339eaa728eeSbellard SegmentCache *dt; 3408b131065SPaolo Bonzini int mmu_index, index; 341eaa728eeSbellard target_ulong ptr; 3428b131065SPaolo Bonzini X86Access old, new; 343eaa728eeSbellard 344eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34520054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 34620054ef0SBlue Swirl source); 347eaa728eeSbellard 348eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 349eaa728eeSbellard if (type == 5) { 35020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 351100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35220054ef0SBlue Swirl } 353eaa728eeSbellard tss_selector = e1 >> 16; 35420054ef0SBlue Swirl if (tss_selector & 4) { 355100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 35620054ef0SBlue Swirl } 357100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 358100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 359eaa728eeSbellard } 36020054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 361100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36220054ef0SBlue Swirl } 36320054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 36420054ef0SBlue Swirl if ((type & 7) != 1) { 365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36620054ef0SBlue Swirl } 36720054ef0SBlue Swirl } 368eaa728eeSbellard 36920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 370100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 37120054ef0SBlue Swirl } 372eaa728eeSbellard 37320054ef0SBlue Swirl if (type & 8) { 374eaa728eeSbellard tss_limit_max = 103; 37520054ef0SBlue Swirl } else { 376eaa728eeSbellard tss_limit_max = 43; 37720054ef0SBlue Swirl } 378eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 379eaa728eeSbellard tss_base = get_seg_base(e1, e2); 380eaa728eeSbellard if ((tss_selector & 4) != 0 || 38120054ef0SBlue Swirl tss_limit < tss_limit_max) { 382100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 38320054ef0SBlue Swirl } 384eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 38520054ef0SBlue Swirl if (old_type & 8) { 386eaa728eeSbellard old_tss_limit_max = 103; 38720054ef0SBlue Swirl } else { 388eaa728eeSbellard old_tss_limit_max = 43; 38920054ef0SBlue Swirl } 390eaa728eeSbellard 39105d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 39205d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 39305d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 39405d41bbcSPaolo Bonzini } 39505d41bbcSPaolo Bonzini 3968b131065SPaolo Bonzini /* X86Access avoids memory exceptions during the task switch */ 3978b131065SPaolo Bonzini mmu_index = cpu_mmu_index_kernel(env); 398ded1db48SRichard Henderson access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1, 3998b131065SPaolo Bonzini MMU_DATA_STORE, mmu_index, retaddr); 4008b131065SPaolo Bonzini 4018b131065SPaolo Bonzini if (source == SWITCH_TSS_CALL) { 4028b131065SPaolo Bonzini /* Probe for future write of parent task */ 4038b131065SPaolo Bonzini probe_access(env, tss_base, 2, MMU_DATA_STORE, 4048b131065SPaolo Bonzini mmu_index, retaddr); 4058b131065SPaolo Bonzini } 406ded1db48SRichard Henderson /* While true tss_limit may be larger, we don't access the iopb here. */ 407ded1db48SRichard Henderson access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1, 4088b131065SPaolo Bonzini MMU_DATA_LOAD, mmu_index, retaddr); 4098b131065SPaolo Bonzini 4106a079f2eSPaolo Bonzini /* save the current state in the old TSS */ 4116a079f2eSPaolo Bonzini old_eflags = cpu_compute_eflags(env); 4126a079f2eSPaolo Bonzini if (old_type & 8) { 4136a079f2eSPaolo Bonzini /* 32 bit */ 4146a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x20, next_eip); 4156a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 4166a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 4176a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 4186a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 4196a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 4206a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 4216a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 4226a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 4236a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 4246a079f2eSPaolo Bonzini for (i = 0; i < 6; i++) { 4256a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x48 + i * 4), 4266a079f2eSPaolo Bonzini env->segs[i].selector); 4276a079f2eSPaolo Bonzini } 4286a079f2eSPaolo Bonzini } else { 4296a079f2eSPaolo Bonzini /* 16 bit */ 4306a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x0e, next_eip); 4316a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 4326a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 4336a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 4346a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 4356a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 4366a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 4376a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 4386a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 4396a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 4406a079f2eSPaolo Bonzini for (i = 0; i < 4; i++) { 4416a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x22 + i * 2), 4426a079f2eSPaolo Bonzini env->segs[i].selector); 4436a079f2eSPaolo Bonzini } 4446a079f2eSPaolo Bonzini } 4456a079f2eSPaolo Bonzini 446eaa728eeSbellard /* read all the registers from the new TSS */ 447eaa728eeSbellard if (type & 8) { 448eaa728eeSbellard /* 32 bit */ 4498b131065SPaolo Bonzini new_cr3 = access_ldl(&new, tss_base + 0x1c); 4508b131065SPaolo Bonzini new_eip = access_ldl(&new, tss_base + 0x20); 4518b131065SPaolo Bonzini new_eflags = access_ldl(&new, tss_base + 0x24); 45220054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4538b131065SPaolo Bonzini new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 45420054ef0SBlue Swirl } 45520054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4568b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 45720054ef0SBlue Swirl } 4588b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x60); 4598b131065SPaolo Bonzini new_trap = access_ldl(&new, tss_base + 0x64); 460eaa728eeSbellard } else { 461eaa728eeSbellard /* 16 bit */ 462eaa728eeSbellard new_cr3 = 0; 4638b131065SPaolo Bonzini new_eip = access_ldw(&new, tss_base + 0x0e); 4648b131065SPaolo Bonzini new_eflags = access_ldw(&new, tss_base + 0x10); 46520054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4668b131065SPaolo Bonzini new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 46720054ef0SBlue Swirl } 46820054ef0SBlue Swirl for (i = 0; i < 4; i++) { 4698b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 47020054ef0SBlue Swirl } 4718b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x2a); 472eaa728eeSbellard new_segs[R_FS] = 0; 473eaa728eeSbellard new_segs[R_GS] = 0; 474eaa728eeSbellard new_trap = 0; 475eaa728eeSbellard } 4764581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4774581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4784581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4794581cbcdSBlue Swirl (void)new_trap; 480eaa728eeSbellard 481eaa728eeSbellard /* clear busy bit (it is restartable) */ 482eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 483a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 484eaa728eeSbellard } 4856a079f2eSPaolo Bonzini 48620054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 487eaa728eeSbellard old_eflags &= ~NT_MASK; 4881b627f38SPaolo Bonzini if (old_type & 8) { 4898b131065SPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 490eaa728eeSbellard } else { 4918b131065SPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 492eaa728eeSbellard } 49320054ef0SBlue Swirl } 494eaa728eeSbellard 495eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 4968b131065SPaolo Bonzini /* 4978b131065SPaolo Bonzini * Thanks to the probe_access above, we know the first two 4988b131065SPaolo Bonzini * bytes addressed by &new are writable too. 4998b131065SPaolo Bonzini */ 5008b131065SPaolo Bonzini access_stw(&new, tss_base, env->tr.selector); 501eaa728eeSbellard new_eflags |= NT_MASK; 502eaa728eeSbellard } 503eaa728eeSbellard 504eaa728eeSbellard /* set busy bit */ 505eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 506a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 507eaa728eeSbellard } 508eaa728eeSbellard 509eaa728eeSbellard /* set the new CPU state */ 5106a079f2eSPaolo Bonzini 5116a079f2eSPaolo Bonzini /* now if an exception occurs, it will occur in the next task context */ 5126a079f2eSPaolo Bonzini 513eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 514eaa728eeSbellard env->hflags |= HF_TS_MASK; 515eaa728eeSbellard env->tr.selector = tss_selector; 516eaa728eeSbellard env->tr.base = tss_base; 517eaa728eeSbellard env->tr.limit = tss_limit; 518eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 519eaa728eeSbellard 520eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 521eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 522eaa728eeSbellard } 523eaa728eeSbellard 524eaa728eeSbellard /* load all registers without an exception, then reload them with 525eaa728eeSbellard possible exception */ 526eaa728eeSbellard env->eip = new_eip; 527eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 528eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 529a5505f6bSPaolo Bonzini if (type & 8) { 530997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 531a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 532a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 533a5505f6bSPaolo Bonzini } 534a5505f6bSPaolo Bonzini } else { 535a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 536a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 537a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 538a5505f6bSPaolo Bonzini } 539a5505f6bSPaolo Bonzini } 540eaa728eeSbellard if (new_eflags & VM_MASK) { 54120054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5422999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 54320054ef0SBlue Swirl } 544eaa728eeSbellard } else { 545eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 54620054ef0SBlue Swirl for (i = 0; i < 6; i++) { 547eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 548eaa728eeSbellard } 54920054ef0SBlue Swirl } 550eaa728eeSbellard 551eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 552eaa728eeSbellard env->ldt.base = 0; 553eaa728eeSbellard env->ldt.limit = 0; 554eaa728eeSbellard env->ldt.flags = 0; 555eaa728eeSbellard 556eaa728eeSbellard /* load the LDT */ 55720054ef0SBlue Swirl if (new_ldt & 4) { 558100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55920054ef0SBlue Swirl } 560eaa728eeSbellard 561eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 562eaa728eeSbellard dt = &env->gdt; 563eaa728eeSbellard index = new_ldt & ~7; 56420054ef0SBlue Swirl if ((index + 7) > dt->limit) { 565100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 56620054ef0SBlue Swirl } 567eaa728eeSbellard ptr = dt->base + index; 568100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 569100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 57020054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 571100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57220054ef0SBlue Swirl } 57320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 574100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57520054ef0SBlue Swirl } 576eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 577eaa728eeSbellard } 578eaa728eeSbellard 579eaa728eeSbellard /* load the segments */ 580eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 581d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 582100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 583100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 584100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 585100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 586100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 587100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 588eaa728eeSbellard } 589eaa728eeSbellard 590a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 591eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 592eaa728eeSbellard /* XXX: different exception if CALL? */ 593100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 594eaa728eeSbellard } 59501df040bSaliguori 59601df040bSaliguori #ifndef CONFIG_USER_ONLY 59701df040bSaliguori /* reset local breakpoints */ 598428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 59993d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 60001df040bSaliguori } 60101df040bSaliguori #endif 60249958057SPaolo Bonzini return type >> 3; 603eaa728eeSbellard } 604eaa728eeSbellard 60549958057SPaolo Bonzini static int switch_tss(CPUX86State *env, int tss_selector, 606100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 607100ec099SPavel Dovgalyuk uint32_t next_eip) 608100ec099SPavel Dovgalyuk { 60949958057SPaolo Bonzini return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 610100ec099SPavel Dovgalyuk } 611100ec099SPavel Dovgalyuk 612eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 613eaa728eeSbellard { 6140aca0605SAndrew Oates #ifdef TARGET_X86_64 6150aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 6160aca0605SAndrew Oates return 0; 6170aca0605SAndrew Oates } else 6180aca0605SAndrew Oates #endif 61920054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 620eaa728eeSbellard return 0xffffffff; 62120054ef0SBlue Swirl } else { 622eaa728eeSbellard return 0xffff; 623eaa728eeSbellard } 62420054ef0SBlue Swirl } 625eaa728eeSbellard 62669cb498cSPaolo Bonzini static int exception_is_fault(int intno) 62769cb498cSPaolo Bonzini { 62869cb498cSPaolo Bonzini switch (intno) { 62969cb498cSPaolo Bonzini /* 63069cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 63169cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 63269cb498cSPaolo Bonzini */ 63369cb498cSPaolo Bonzini case EXCP01_DB: 63469cb498cSPaolo Bonzini case EXCP03_INT3: 63569cb498cSPaolo Bonzini case EXCP04_INTO: 63669cb498cSPaolo Bonzini case EXCP08_DBLE: 63769cb498cSPaolo Bonzini case EXCP12_MCHK: 63869cb498cSPaolo Bonzini return 0; 63969cb498cSPaolo Bonzini } 64069cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 64169cb498cSPaolo Bonzini return 1; 64269cb498cSPaolo Bonzini } 64369cb498cSPaolo Bonzini 64430493a03SClaudio Fontana int exception_has_error_code(int intno) 6452ed51f5bSaliguori { 6462ed51f5bSaliguori switch (intno) { 6472ed51f5bSaliguori case 8: 6482ed51f5bSaliguori case 10: 6492ed51f5bSaliguori case 11: 6502ed51f5bSaliguori case 12: 6512ed51f5bSaliguori case 13: 6522ed51f5bSaliguori case 14: 6532ed51f5bSaliguori case 17: 6542ed51f5bSaliguori return 1; 6552ed51f5bSaliguori } 6562ed51f5bSaliguori return 0; 6572ed51f5bSaliguori } 6582ed51f5bSaliguori 659eaa728eeSbellard /* protected mode interrupt */ 6602999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6612999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6622999a0b2SBlue Swirl int is_hw) 663eaa728eeSbellard { 664eaa728eeSbellard SegmentCache *dt; 665059368bcSRichard Henderson target_ulong ptr; 666eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 667eaa728eeSbellard int has_error_code, new_stack, shift; 668059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 669059368bcSRichard Henderson uint32_t old_eip, eflags; 67087446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 671059368bcSRichard Henderson StackAccess sa; 67269cb498cSPaolo Bonzini bool set_rf; 673eaa728eeSbellard 674eaa728eeSbellard has_error_code = 0; 67520054ef0SBlue Swirl if (!is_int && !is_hw) { 67620054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 67720054ef0SBlue Swirl } 67820054ef0SBlue Swirl if (is_int) { 679eaa728eeSbellard old_eip = next_eip; 68069cb498cSPaolo Bonzini set_rf = false; 68120054ef0SBlue Swirl } else { 682eaa728eeSbellard old_eip = env->eip; 68369cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 68420054ef0SBlue Swirl } 685eaa728eeSbellard 686eaa728eeSbellard dt = &env->idt; 68720054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 68877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 68920054ef0SBlue Swirl } 690eaa728eeSbellard ptr = dt->base + intno * 8; 691329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 692329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 693eaa728eeSbellard /* check gate type */ 694eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 695eaa728eeSbellard switch (type) { 696eaa728eeSbellard case 5: /* task gate */ 6973df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6983df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6993df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 7003df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 7013df1a3d0SPeter Maydell break; 7023df1a3d0SPeter Maydell default: 7033df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7043df1a3d0SPeter Maydell break; 7053df1a3d0SPeter Maydell } 7063df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 7073df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 7083df1a3d0SPeter Maydell /* check privilege if software int */ 7093df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 7103df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7113df1a3d0SPeter Maydell } 7123df1a3d0SPeter Maydell 713059368bcSRichard Henderson sa.env = env; 714059368bcSRichard Henderson sa.ra = 0; 715059368bcSRichard Henderson 7163df1a3d0SPeter Maydell if (type == 5) { 7173df1a3d0SPeter Maydell /* task gate */ 718eaa728eeSbellard /* must do that check here to return the correct error code */ 71920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 72077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 72120054ef0SBlue Swirl } 72249958057SPaolo Bonzini shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 723eaa728eeSbellard if (has_error_code) { 724e136648cSPaolo Bonzini /* push the error code on the destination stack */ 725e136648cSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 726e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 72720054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 728059368bcSRichard Henderson sa.sp_mask = 0xffffffff; 72920054ef0SBlue Swirl } else { 730059368bcSRichard Henderson sa.sp_mask = 0xffff; 73120054ef0SBlue Swirl } 732059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 733059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 73420054ef0SBlue Swirl if (shift) { 735059368bcSRichard Henderson pushl(&sa, error_code); 73620054ef0SBlue Swirl } else { 737059368bcSRichard Henderson pushw(&sa, error_code); 73820054ef0SBlue Swirl } 739059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 740eaa728eeSbellard } 741eaa728eeSbellard return; 742eaa728eeSbellard } 7433df1a3d0SPeter Maydell 7443df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7453df1a3d0SPeter Maydell 746eaa728eeSbellard /* check valid bit */ 74720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 74877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 74920054ef0SBlue Swirl } 750eaa728eeSbellard selector = e1 >> 16; 751eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 75220054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 75377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 75420054ef0SBlue Swirl } 7552999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 75677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 75720054ef0SBlue Swirl } 75820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 75977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 76020054ef0SBlue Swirl } 761eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 76220054ef0SBlue Swirl if (dpl > cpl) { 76377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 76420054ef0SBlue Swirl } 76520054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 76677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 76720054ef0SBlue Swirl } 7681110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7691110bfe6SPaolo Bonzini dpl = cpl; 7701110bfe6SPaolo Bonzini } 771e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 7721110bfe6SPaolo Bonzini if (dpl < cpl) { 773eaa728eeSbellard /* to inner privilege */ 774059368bcSRichard Henderson uint32_t esp; 775100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 77620054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 77777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77820054ef0SBlue Swirl } 77920054ef0SBlue Swirl if ((ss & 3) != dpl) { 78077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78120054ef0SBlue Swirl } 7822999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 78377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78420054ef0SBlue Swirl } 785eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 78620054ef0SBlue Swirl if (ss_dpl != dpl) { 78777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78820054ef0SBlue Swirl } 789eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 790eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 79120054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 79277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79320054ef0SBlue Swirl } 79420054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 79577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79620054ef0SBlue Swirl } 797eaa728eeSbellard new_stack = 1; 798059368bcSRichard Henderson sa.sp = esp; 799059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 800059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 8011110bfe6SPaolo Bonzini } else { 802eaa728eeSbellard /* to same privilege */ 80387446327SKevin O'Connor if (vm86) { 80477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 80520054ef0SBlue Swirl } 806eaa728eeSbellard new_stack = 0; 807059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 808059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 809059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 810eaa728eeSbellard } 811eaa728eeSbellard 812eaa728eeSbellard shift = type >> 3; 813eaa728eeSbellard 814eaa728eeSbellard #if 0 815eaa728eeSbellard /* XXX: check that enough room is available */ 816eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 81787446327SKevin O'Connor if (vm86) { 818eaa728eeSbellard push_size += 8; 81920054ef0SBlue Swirl } 820eaa728eeSbellard push_size <<= shift; 821eaa728eeSbellard #endif 82269cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 82369cb498cSPaolo Bonzini /* 82469cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 82569cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 82669cb498cSPaolo Bonzini */ 82769cb498cSPaolo Bonzini if (set_rf) { 82869cb498cSPaolo Bonzini eflags |= RF_MASK; 82969cb498cSPaolo Bonzini } 83069cb498cSPaolo Bonzini 831eaa728eeSbellard if (shift == 1) { 832eaa728eeSbellard if (new_stack) { 83387446327SKevin O'Connor if (vm86) { 834059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 835059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 836059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 837059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 838eaa728eeSbellard } 839059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 840059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 841eaa728eeSbellard } 842059368bcSRichard Henderson pushl(&sa, eflags); 843059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 844059368bcSRichard Henderson pushl(&sa, old_eip); 845eaa728eeSbellard if (has_error_code) { 846059368bcSRichard Henderson pushl(&sa, error_code); 847eaa728eeSbellard } 848eaa728eeSbellard } else { 849eaa728eeSbellard if (new_stack) { 85087446327SKevin O'Connor if (vm86) { 851059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 852059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 853059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 854059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 855eaa728eeSbellard } 856059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 857059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 858eaa728eeSbellard } 859059368bcSRichard Henderson pushw(&sa, eflags); 860059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 861059368bcSRichard Henderson pushw(&sa, old_eip); 862eaa728eeSbellard if (has_error_code) { 863059368bcSRichard Henderson pushw(&sa, error_code); 864eaa728eeSbellard } 865eaa728eeSbellard } 866eaa728eeSbellard 867fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 868fd460606SKevin O'Connor if ((type & 1) == 0) { 869fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 870fd460606SKevin O'Connor } 871fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 872fd460606SKevin O'Connor 873eaa728eeSbellard if (new_stack) { 87487446327SKevin O'Connor if (vm86) { 875eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 876eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 877eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 878eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 879eaa728eeSbellard } 880eaa728eeSbellard ss = (ss & ~3) | dpl; 881059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 882059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 883eaa728eeSbellard } 884059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 885eaa728eeSbellard 886eaa728eeSbellard selector = (selector & ~3) | dpl; 887eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 888eaa728eeSbellard get_seg_base(e1, e2), 889eaa728eeSbellard get_seg_limit(e1, e2), 890eaa728eeSbellard e2); 891eaa728eeSbellard env->eip = offset; 892eaa728eeSbellard } 893eaa728eeSbellard 894eaa728eeSbellard #ifdef TARGET_X86_64 895eaa728eeSbellard 896059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 897059368bcSRichard Henderson { 898059368bcSRichard Henderson sa->sp -= 8; 8998053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 900eaa728eeSbellard } 901eaa728eeSbellard 902059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 903059368bcSRichard Henderson { 9048053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 905059368bcSRichard Henderson sa->sp += 8; 906059368bcSRichard Henderson return ret; 907eaa728eeSbellard } 908eaa728eeSbellard 9092999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 910eaa728eeSbellard { 9116aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 91250fcc7cbSGareth Webb int index, pg_mode; 91350fcc7cbSGareth Webb target_ulong rsp; 91450fcc7cbSGareth Webb int32_t sext; 915eaa728eeSbellard 916eaa728eeSbellard #if 0 917eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 918eaa728eeSbellard env->tr.base, env->tr.limit); 919eaa728eeSbellard #endif 920eaa728eeSbellard 92120054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 922a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 92320054ef0SBlue Swirl } 924eaa728eeSbellard index = 8 * level + 4; 92520054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 92677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 92720054ef0SBlue Swirl } 92850fcc7cbSGareth Webb 92950fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 93050fcc7cbSGareth Webb 93150fcc7cbSGareth Webb /* test virtual address sign extension */ 93250fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 93350fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 93450fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 93550fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 93650fcc7cbSGareth Webb } 93750fcc7cbSGareth Webb 93850fcc7cbSGareth Webb return rsp; 939eaa728eeSbellard } 940eaa728eeSbellard 941eaa728eeSbellard /* 64 bit interrupt */ 9422999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9432999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 944eaa728eeSbellard { 945eaa728eeSbellard SegmentCache *dt; 946eaa728eeSbellard target_ulong ptr; 947eaa728eeSbellard int type, dpl, selector, cpl, ist; 948eaa728eeSbellard int has_error_code, new_stack; 949bde8adb8SPeter Maydell uint32_t e1, e2, e3, eflags; 950059368bcSRichard Henderson target_ulong old_eip, offset; 95169cb498cSPaolo Bonzini bool set_rf; 952059368bcSRichard Henderson StackAccess sa; 953eaa728eeSbellard 954eaa728eeSbellard has_error_code = 0; 95520054ef0SBlue Swirl if (!is_int && !is_hw) { 95620054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 95720054ef0SBlue Swirl } 95820054ef0SBlue Swirl if (is_int) { 959eaa728eeSbellard old_eip = next_eip; 96069cb498cSPaolo Bonzini set_rf = false; 96120054ef0SBlue Swirl } else { 962eaa728eeSbellard old_eip = env->eip; 96369cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 96420054ef0SBlue Swirl } 965eaa728eeSbellard 966eaa728eeSbellard dt = &env->idt; 96720054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 968b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 96920054ef0SBlue Swirl } 970eaa728eeSbellard ptr = dt->base + intno * 16; 971329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 972329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 973329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 974eaa728eeSbellard /* check gate type */ 975eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 976eaa728eeSbellard switch (type) { 977eaa728eeSbellard case 14: /* 386 interrupt gate */ 978eaa728eeSbellard case 15: /* 386 trap gate */ 979eaa728eeSbellard break; 980eaa728eeSbellard default: 981b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 982eaa728eeSbellard break; 983eaa728eeSbellard } 984eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 985eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9861235fc06Sths /* check privilege if software int */ 98720054ef0SBlue Swirl if (is_int && dpl < cpl) { 988b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 98920054ef0SBlue Swirl } 990eaa728eeSbellard /* check valid bit */ 99120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 992b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 99320054ef0SBlue Swirl } 994eaa728eeSbellard selector = e1 >> 16; 995eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 996eaa728eeSbellard ist = e2 & 7; 99720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 99877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 99920054ef0SBlue Swirl } 1000eaa728eeSbellard 10012999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 100277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100320054ef0SBlue Swirl } 100420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 100577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 100620054ef0SBlue Swirl } 1007eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 100820054ef0SBlue Swirl if (dpl > cpl) { 100977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101020054ef0SBlue Swirl } 101120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 101277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 101320054ef0SBlue Swirl } 101420054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 101577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101620054ef0SBlue Swirl } 10171110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 10181110bfe6SPaolo Bonzini dpl = cpl; 10191110bfe6SPaolo Bonzini } 1020059368bcSRichard Henderson 1021059368bcSRichard Henderson sa.env = env; 1022059368bcSRichard Henderson sa.ra = 0; 1023e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 1024059368bcSRichard Henderson sa.sp_mask = -1; 1025059368bcSRichard Henderson sa.ss_base = 0; 10261110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 1027eaa728eeSbellard /* to inner privilege */ 1028eaa728eeSbellard new_stack = 1; 1029059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 10301110bfe6SPaolo Bonzini } else { 1031eaa728eeSbellard /* to same privilege */ 103220054ef0SBlue Swirl if (env->eflags & VM_MASK) { 103377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 103420054ef0SBlue Swirl } 1035eaa728eeSbellard new_stack = 0; 1036059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1037e95e9b88SWu Xiang } 1038059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1039eaa728eeSbellard 104069cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 104169cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 104269cb498cSPaolo Bonzini if (set_rf) { 104369cb498cSPaolo Bonzini eflags |= RF_MASK; 104469cb498cSPaolo Bonzini } 104569cb498cSPaolo Bonzini 1046059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1047059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1048059368bcSRichard Henderson pushq(&sa, eflags); 1049059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1050059368bcSRichard Henderson pushq(&sa, old_eip); 1051eaa728eeSbellard if (has_error_code) { 1052059368bcSRichard Henderson pushq(&sa, error_code); 1053eaa728eeSbellard } 1054eaa728eeSbellard 1055fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1056fd460606SKevin O'Connor if ((type & 1) == 0) { 1057fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1058fd460606SKevin O'Connor } 1059fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1060fd460606SKevin O'Connor 1061eaa728eeSbellard if (new_stack) { 1062bde8adb8SPeter Maydell uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */ 1063e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1064eaa728eeSbellard } 1065059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1066eaa728eeSbellard 1067eaa728eeSbellard selector = (selector & ~3) | dpl; 1068eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1069eaa728eeSbellard get_seg_base(e1, e2), 1070eaa728eeSbellard get_seg_limit(e1, e2), 1071eaa728eeSbellard e2); 1072eaa728eeSbellard env->eip = offset; 1073eaa728eeSbellard } 107463fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1075eaa728eeSbellard 10762999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1077eaa728eeSbellard { 1078eaa728eeSbellard int cpl, selector; 1079eaa728eeSbellard 1080eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1081100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1082eaa728eeSbellard } 1083eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1084eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1085100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1086eaa728eeSbellard } 1087eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 108863fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1089eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1090fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1091fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1092fd460606SKevin O'Connor NT_MASK); 1093eaa728eeSbellard if (dflag == 2) { 1094eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1095eaa728eeSbellard 0, 0xffffffff, 1096eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1097eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1098eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1099eaa728eeSbellard DESC_L_MASK); 1100a4165610Sliguang env->eip = env->regs[R_ECX]; 1101eaa728eeSbellard } else { 1102eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1103eaa728eeSbellard 0, 0xffffffff, 1104eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1105eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1106eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1107a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1108eaa728eeSbellard } 1109ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1110eaa728eeSbellard 0, 0xffffffff, 1111eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1112eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1113eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 111463fd8ef0SPaolo Bonzini } else 111563fd8ef0SPaolo Bonzini #endif 111663fd8ef0SPaolo Bonzini { 1117fd460606SKevin O'Connor env->eflags |= IF_MASK; 1118eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1119eaa728eeSbellard 0, 0xffffffff, 1120eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1121eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1122eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1123a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1124ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1125eaa728eeSbellard 0, 0xffffffff, 1126eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1127eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1128eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1129eaa728eeSbellard } 1130eaa728eeSbellard } 1131eaa728eeSbellard 1132eaa728eeSbellard /* real mode interrupt */ 11332999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11342999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1135eaa728eeSbellard { 1136eaa728eeSbellard SegmentCache *dt; 1137059368bcSRichard Henderson target_ulong ptr; 1138eaa728eeSbellard int selector; 1139059368bcSRichard Henderson uint32_t offset; 1140eaa728eeSbellard uint32_t old_cs, old_eip; 1141059368bcSRichard Henderson StackAccess sa; 1142eaa728eeSbellard 1143eaa728eeSbellard /* real mode (simpler!) */ 1144eaa728eeSbellard dt = &env->idt; 114520054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 114677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 114720054ef0SBlue Swirl } 1148eaa728eeSbellard ptr = dt->base + intno * 4; 1149329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1150329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1151059368bcSRichard Henderson 1152059368bcSRichard Henderson sa.env = env; 1153059368bcSRichard Henderson sa.ra = 0; 1154059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1155059368bcSRichard Henderson sa.sp_mask = 0xffff; 1156059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1157e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1158059368bcSRichard Henderson 115920054ef0SBlue Swirl if (is_int) { 1160eaa728eeSbellard old_eip = next_eip; 116120054ef0SBlue Swirl } else { 1162eaa728eeSbellard old_eip = env->eip; 116320054ef0SBlue Swirl } 1164eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1165eaa728eeSbellard /* XXX: use SS segment size? */ 1166059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1167059368bcSRichard Henderson pushw(&sa, old_cs); 1168059368bcSRichard Henderson pushw(&sa, old_eip); 1169eaa728eeSbellard 1170eaa728eeSbellard /* update processor state */ 1171059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1172eaa728eeSbellard env->eip = offset; 1173eaa728eeSbellard env->segs[R_CS].selector = selector; 1174eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1175eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1176eaa728eeSbellard } 1177eaa728eeSbellard 1178eaa728eeSbellard /* 1179eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1180a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1181eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1182eaa728eeSbellard */ 118330493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11842999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1185eaa728eeSbellard { 1186ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1187ca4c810aSAndreas Färber 11888fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1189eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1190eaa728eeSbellard static int count; 119120054ef0SBlue Swirl 119220054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 119320054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1194eaa728eeSbellard count, intno, error_code, is_int, 1195eaa728eeSbellard env->hflags & HF_CPL_MASK, 1196a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1197a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 119808b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1199eaa728eeSbellard if (intno == 0x0e) { 120093fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1201eaa728eeSbellard } else { 12024b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1203eaa728eeSbellard } 120493fcfe39Saliguori qemu_log("\n"); 1205a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1206eaa728eeSbellard #if 0 1207eaa728eeSbellard { 1208eaa728eeSbellard int i; 12099bd5494eSAdam Lackorzynski target_ulong ptr; 121020054ef0SBlue Swirl 121193fcfe39Saliguori qemu_log(" code="); 1212eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1213eaa728eeSbellard for (i = 0; i < 16; i++) { 121493fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1215eaa728eeSbellard } 121693fcfe39Saliguori qemu_log("\n"); 1217eaa728eeSbellard } 1218eaa728eeSbellard #endif 1219eaa728eeSbellard count++; 1220eaa728eeSbellard } 1221eaa728eeSbellard } 1222eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 122300ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1224f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12252999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 122620054ef0SBlue Swirl } 122700ea18d1Saliguori #endif 1228eb38c52cSblueswir1 #ifdef TARGET_X86_64 1229eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12302999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1231eaa728eeSbellard } else 1232eaa728eeSbellard #endif 1233eaa728eeSbellard { 12342999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12352999a0b2SBlue Swirl is_hw); 1236eaa728eeSbellard } 1237eaa728eeSbellard } else { 123800ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1239f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12402999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 124120054ef0SBlue Swirl } 124200ea18d1Saliguori #endif 12432999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1244eaa728eeSbellard } 12452ed51f5bSaliguori 124600ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1247f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1248fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1249b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 125020054ef0SBlue Swirl offsetof(struct vmcb, 125120054ef0SBlue Swirl control.event_inj)); 125220054ef0SBlue Swirl 1253b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1254ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 125520054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12562ed51f5bSaliguori } 125700ea18d1Saliguori #endif 1258eaa728eeSbellard } 1259eaa728eeSbellard 12602999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1261e694d4e2SBlue Swirl { 12626aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1263e694d4e2SBlue Swirl } 1264e694d4e2SBlue Swirl 12652999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1266eaa728eeSbellard { 1267eaa728eeSbellard SegmentCache *dt; 1268eaa728eeSbellard uint32_t e1, e2; 1269eaa728eeSbellard int index, entry_limit; 1270eaa728eeSbellard target_ulong ptr; 1271eaa728eeSbellard 1272eaa728eeSbellard selector &= 0xffff; 1273eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1274eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1275eaa728eeSbellard env->ldt.base = 0; 1276eaa728eeSbellard env->ldt.limit = 0; 1277eaa728eeSbellard } else { 127820054ef0SBlue Swirl if (selector & 0x4) { 1279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 128020054ef0SBlue Swirl } 1281eaa728eeSbellard dt = &env->gdt; 1282eaa728eeSbellard index = selector & ~7; 1283eaa728eeSbellard #ifdef TARGET_X86_64 128420054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1285eaa728eeSbellard entry_limit = 15; 128620054ef0SBlue Swirl } else 1287eaa728eeSbellard #endif 128820054ef0SBlue Swirl { 1289eaa728eeSbellard entry_limit = 7; 129020054ef0SBlue Swirl } 129120054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1292100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 129320054ef0SBlue Swirl } 1294eaa728eeSbellard ptr = dt->base + index; 1295100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1296100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 129720054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1298100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 129920054ef0SBlue Swirl } 130020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1301100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 130220054ef0SBlue Swirl } 1303eaa728eeSbellard #ifdef TARGET_X86_64 1304eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1305eaa728eeSbellard uint32_t e3; 130620054ef0SBlue Swirl 1307100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1308eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1309eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1310eaa728eeSbellard } else 1311eaa728eeSbellard #endif 1312eaa728eeSbellard { 1313eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1314eaa728eeSbellard } 1315eaa728eeSbellard } 1316eaa728eeSbellard env->ldt.selector = selector; 1317eaa728eeSbellard } 1318eaa728eeSbellard 13192999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1320eaa728eeSbellard { 1321eaa728eeSbellard SegmentCache *dt; 1322eaa728eeSbellard uint32_t e1, e2; 1323eaa728eeSbellard int index, type, entry_limit; 1324eaa728eeSbellard target_ulong ptr; 1325eaa728eeSbellard 1326eaa728eeSbellard selector &= 0xffff; 1327eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1328eaa728eeSbellard /* NULL selector case: invalid TR */ 1329eaa728eeSbellard env->tr.base = 0; 1330eaa728eeSbellard env->tr.limit = 0; 1331eaa728eeSbellard env->tr.flags = 0; 1332eaa728eeSbellard } else { 133320054ef0SBlue Swirl if (selector & 0x4) { 1334100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133520054ef0SBlue Swirl } 1336eaa728eeSbellard dt = &env->gdt; 1337eaa728eeSbellard index = selector & ~7; 1338eaa728eeSbellard #ifdef TARGET_X86_64 133920054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1340eaa728eeSbellard entry_limit = 15; 134120054ef0SBlue Swirl } else 1342eaa728eeSbellard #endif 134320054ef0SBlue Swirl { 1344eaa728eeSbellard entry_limit = 7; 134520054ef0SBlue Swirl } 134620054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1347100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 134820054ef0SBlue Swirl } 1349eaa728eeSbellard ptr = dt->base + index; 1350100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1351100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1352eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1353eaa728eeSbellard if ((e2 & DESC_S_MASK) || 135420054ef0SBlue Swirl (type != 1 && type != 9)) { 1355100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 135620054ef0SBlue Swirl } 135720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1358100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 135920054ef0SBlue Swirl } 1360eaa728eeSbellard #ifdef TARGET_X86_64 1361eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1362eaa728eeSbellard uint32_t e3, e4; 136320054ef0SBlue Swirl 1364100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1365100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 136620054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1367100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 136820054ef0SBlue Swirl } 1369eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1370eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1371eaa728eeSbellard } else 1372eaa728eeSbellard #endif 1373eaa728eeSbellard { 1374eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1375eaa728eeSbellard } 1376eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1377100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1378eaa728eeSbellard } 1379eaa728eeSbellard env->tr.selector = selector; 1380eaa728eeSbellard } 1381eaa728eeSbellard 1382eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13832999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1384eaa728eeSbellard { 1385eaa728eeSbellard uint32_t e1, e2; 1386eaa728eeSbellard int cpl, dpl, rpl; 1387eaa728eeSbellard SegmentCache *dt; 1388eaa728eeSbellard int index; 1389eaa728eeSbellard target_ulong ptr; 1390eaa728eeSbellard 1391eaa728eeSbellard selector &= 0xffff; 1392eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1393eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1394eaa728eeSbellard /* null selector case */ 1395eaa728eeSbellard if (seg_reg == R_SS 1396eaa728eeSbellard #ifdef TARGET_X86_64 1397eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1398eaa728eeSbellard #endif 139920054ef0SBlue Swirl ) { 1400100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 140120054ef0SBlue Swirl } 1402eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1403eaa728eeSbellard } else { 1404eaa728eeSbellard 140520054ef0SBlue Swirl if (selector & 0x4) { 1406eaa728eeSbellard dt = &env->ldt; 140720054ef0SBlue Swirl } else { 1408eaa728eeSbellard dt = &env->gdt; 140920054ef0SBlue Swirl } 1410eaa728eeSbellard index = selector & ~7; 141120054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1412100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141320054ef0SBlue Swirl } 1414eaa728eeSbellard ptr = dt->base + index; 1415100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1416100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1417eaa728eeSbellard 141820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1419100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142020054ef0SBlue Swirl } 1421eaa728eeSbellard rpl = selector & 3; 1422eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1423eaa728eeSbellard if (seg_reg == R_SS) { 1424eaa728eeSbellard /* must be writable segment */ 142520054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1426100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142720054ef0SBlue Swirl } 142820054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1429100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 143020054ef0SBlue Swirl } 1431eaa728eeSbellard } else { 1432eaa728eeSbellard /* must be readable segment */ 143320054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1434100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 143520054ef0SBlue Swirl } 1436eaa728eeSbellard 1437eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1438eaa728eeSbellard /* if not conforming code, test rights */ 143920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1440100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1441eaa728eeSbellard } 1442eaa728eeSbellard } 144320054ef0SBlue Swirl } 1444eaa728eeSbellard 1445eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 144620054ef0SBlue Swirl if (seg_reg == R_SS) { 1447100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 144820054ef0SBlue Swirl } else { 1449100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1450eaa728eeSbellard } 145120054ef0SBlue Swirl } 1452eaa728eeSbellard 1453eaa728eeSbellard /* set the access bit if not already set */ 1454eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1455eaa728eeSbellard e2 |= DESC_A_MASK; 1456100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1457eaa728eeSbellard } 1458eaa728eeSbellard 1459eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1460eaa728eeSbellard get_seg_base(e1, e2), 1461eaa728eeSbellard get_seg_limit(e1, e2), 1462eaa728eeSbellard e2); 1463eaa728eeSbellard #if 0 146493fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1465eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1466eaa728eeSbellard #endif 1467eaa728eeSbellard } 1468eaa728eeSbellard } 1469eaa728eeSbellard 1470eaa728eeSbellard /* protected mode jump */ 14712999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1472100ec099SPavel Dovgalyuk target_ulong next_eip) 1473eaa728eeSbellard { 1474eaa728eeSbellard int gate_cs, type; 1475eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1476eaa728eeSbellard 147720054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1478100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 147920054ef0SBlue Swirl } 1480100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1481100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148220054ef0SBlue Swirl } 1483eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1484eaa728eeSbellard if (e2 & DESC_S_MASK) { 148520054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1486100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148720054ef0SBlue Swirl } 1488eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1489eaa728eeSbellard if (e2 & DESC_C_MASK) { 1490eaa728eeSbellard /* conforming code segment */ 149120054ef0SBlue Swirl if (dpl > cpl) { 1492100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 149320054ef0SBlue Swirl } 1494eaa728eeSbellard } else { 1495eaa728eeSbellard /* non conforming code segment */ 1496eaa728eeSbellard rpl = new_cs & 3; 149720054ef0SBlue Swirl if (rpl > cpl) { 1498100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1499eaa728eeSbellard } 150020054ef0SBlue Swirl if (dpl != cpl) { 1501100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 150220054ef0SBlue Swirl } 150320054ef0SBlue Swirl } 150420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1505100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 150620054ef0SBlue Swirl } 1507eaa728eeSbellard limit = get_seg_limit(e1, e2); 1508eaa728eeSbellard if (new_eip > limit && 1509db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1510db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 151120054ef0SBlue Swirl } 1512eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1513eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1514a78d0eabSliguang env->eip = new_eip; 1515eaa728eeSbellard } else { 1516eaa728eeSbellard /* jump to call or task gate */ 1517eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1518eaa728eeSbellard rpl = new_cs & 3; 1519eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1520eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 15210aca0605SAndrew Oates 15220aca0605SAndrew Oates #ifdef TARGET_X86_64 15230aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15240aca0605SAndrew Oates if (type != 12) { 15250aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 15260aca0605SAndrew Oates } 15270aca0605SAndrew Oates } 15280aca0605SAndrew Oates #endif 1529eaa728eeSbellard switch (type) { 1530eaa728eeSbellard case 1: /* 286 TSS */ 1531eaa728eeSbellard case 9: /* 386 TSS */ 1532eaa728eeSbellard case 5: /* task gate */ 153320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1534100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 153520054ef0SBlue Swirl } 1536100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1537eaa728eeSbellard break; 1538eaa728eeSbellard case 4: /* 286 call gate */ 1539eaa728eeSbellard case 12: /* 386 call gate */ 154020054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1541100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 154220054ef0SBlue Swirl } 154320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1544100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 154520054ef0SBlue Swirl } 1546eaa728eeSbellard gate_cs = e1 >> 16; 1547eaa728eeSbellard new_eip = (e1 & 0xffff); 154820054ef0SBlue Swirl if (type == 12) { 1549eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 155020054ef0SBlue Swirl } 15510aca0605SAndrew Oates 15520aca0605SAndrew Oates #ifdef TARGET_X86_64 15530aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15540aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15550aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15560aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15570aca0605SAndrew Oates GETPC()); 15580aca0605SAndrew Oates } 15590aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15600aca0605SAndrew Oates if (type != 0) { 15610aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15620aca0605SAndrew Oates GETPC()); 15630aca0605SAndrew Oates } 15640aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15650aca0605SAndrew Oates } 15660aca0605SAndrew Oates #endif 15670aca0605SAndrew Oates 1568100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1569100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157020054ef0SBlue Swirl } 1571eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1572eaa728eeSbellard /* must be code segment */ 1573eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 157420054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1575100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157620054ef0SBlue Swirl } 1577eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 157820054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1579100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 158020054ef0SBlue Swirl } 15810aca0605SAndrew Oates #ifdef TARGET_X86_64 15820aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15830aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15840aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15850aca0605SAndrew Oates } 15860aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15870aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15880aca0605SAndrew Oates } 15890aca0605SAndrew Oates } 15900aca0605SAndrew Oates #endif 159120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1592100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 159320054ef0SBlue Swirl } 1594eaa728eeSbellard limit = get_seg_limit(e1, e2); 15950aca0605SAndrew Oates if (new_eip > limit && 15960aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1597100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 159820054ef0SBlue Swirl } 1599eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1600eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1601a78d0eabSliguang env->eip = new_eip; 1602eaa728eeSbellard break; 1603eaa728eeSbellard default: 1604100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1605eaa728eeSbellard break; 1606eaa728eeSbellard } 1607eaa728eeSbellard } 1608eaa728eeSbellard } 1609eaa728eeSbellard 1610eaa728eeSbellard /* real mode call */ 16118c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 16128c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1613eaa728eeSbellard { 1614059368bcSRichard Henderson StackAccess sa; 1615eaa728eeSbellard 1616059368bcSRichard Henderson sa.env = env; 1617059368bcSRichard Henderson sa.ra = GETPC(); 1618059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1619059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1620059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1621e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1622059368bcSRichard Henderson 1623eaa728eeSbellard if (shift) { 1624059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1625059368bcSRichard Henderson pushl(&sa, next_eip); 1626eaa728eeSbellard } else { 1627059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1628059368bcSRichard Henderson pushw(&sa, next_eip); 1629eaa728eeSbellard } 1630eaa728eeSbellard 1631059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1632eaa728eeSbellard env->eip = new_eip; 1633eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1634eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1635eaa728eeSbellard } 1636eaa728eeSbellard 1637eaa728eeSbellard /* protected mode call */ 16382999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1639100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1640eaa728eeSbellard { 1641eaa728eeSbellard int new_stack, i; 16420aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1643059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1644eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1645059368bcSRichard Henderson target_ulong old_ssp, offset; 1646059368bcSRichard Henderson StackAccess sa; 1647eaa728eeSbellard 16480aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16496aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 165020054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1651100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 165220054ef0SBlue Swirl } 1653100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1654100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 165520054ef0SBlue Swirl } 1656eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1657d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1658059368bcSRichard Henderson 1659059368bcSRichard Henderson sa.env = env; 1660059368bcSRichard Henderson sa.ra = GETPC(); 1661059368bcSRichard Henderson 1662eaa728eeSbellard if (e2 & DESC_S_MASK) { 1663e136648cSPaolo Bonzini /* "normal" far call, no stack switch possible */ 166420054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1665100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166620054ef0SBlue Swirl } 1667eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1668eaa728eeSbellard if (e2 & DESC_C_MASK) { 1669eaa728eeSbellard /* conforming code segment */ 167020054ef0SBlue Swirl if (dpl > cpl) { 1671100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 167220054ef0SBlue Swirl } 1673eaa728eeSbellard } else { 1674eaa728eeSbellard /* non conforming code segment */ 1675eaa728eeSbellard rpl = new_cs & 3; 167620054ef0SBlue Swirl if (rpl > cpl) { 1677100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1678eaa728eeSbellard } 167920054ef0SBlue Swirl if (dpl != cpl) { 1680100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 168120054ef0SBlue Swirl } 168220054ef0SBlue Swirl } 168320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1684100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 168520054ef0SBlue Swirl } 1686eaa728eeSbellard 1687e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1688eaa728eeSbellard #ifdef TARGET_X86_64 1689eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1690eaa728eeSbellard if (shift == 2) { 1691eaa728eeSbellard /* 64 bit case */ 1692059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1693059368bcSRichard Henderson sa.sp_mask = -1; 1694059368bcSRichard Henderson sa.ss_base = 0; 1695059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1696059368bcSRichard Henderson pushq(&sa, next_eip); 1697eaa728eeSbellard /* from this point, not restartable */ 1698059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1699eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1700eaa728eeSbellard get_seg_base(e1, e2), 1701eaa728eeSbellard get_seg_limit(e1, e2), e2); 1702a78d0eabSliguang env->eip = new_eip; 1703eaa728eeSbellard } else 1704eaa728eeSbellard #endif 1705eaa728eeSbellard { 1706059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1707059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1708059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1709eaa728eeSbellard if (shift) { 1710059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1711059368bcSRichard Henderson pushl(&sa, next_eip); 1712eaa728eeSbellard } else { 1713059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1714059368bcSRichard Henderson pushw(&sa, next_eip); 1715eaa728eeSbellard } 1716eaa728eeSbellard 1717eaa728eeSbellard limit = get_seg_limit(e1, e2); 171820054ef0SBlue Swirl if (new_eip > limit) { 1719100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 172020054ef0SBlue Swirl } 1721eaa728eeSbellard /* from this point, not restartable */ 1722059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1723eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1724eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1725a78d0eabSliguang env->eip = new_eip; 1726eaa728eeSbellard } 1727eaa728eeSbellard } else { 1728eaa728eeSbellard /* check gate type */ 1729eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1730eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1731eaa728eeSbellard rpl = new_cs & 3; 17320aca0605SAndrew Oates 17330aca0605SAndrew Oates #ifdef TARGET_X86_64 17340aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17350aca0605SAndrew Oates if (type != 12) { 17360aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17370aca0605SAndrew Oates } 17380aca0605SAndrew Oates } 17390aca0605SAndrew Oates #endif 17400aca0605SAndrew Oates 1741eaa728eeSbellard switch (type) { 1742eaa728eeSbellard case 1: /* available 286 TSS */ 1743eaa728eeSbellard case 9: /* available 386 TSS */ 1744eaa728eeSbellard case 5: /* task gate */ 174520054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1746100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 174720054ef0SBlue Swirl } 1748100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1749eaa728eeSbellard return; 1750eaa728eeSbellard case 4: /* 286 call gate */ 1751eaa728eeSbellard case 12: /* 386 call gate */ 1752eaa728eeSbellard break; 1753eaa728eeSbellard default: 1754100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1755eaa728eeSbellard break; 1756eaa728eeSbellard } 1757eaa728eeSbellard shift = type >> 3; 1758eaa728eeSbellard 175920054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1760100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 176120054ef0SBlue Swirl } 1762eaa728eeSbellard /* check valid bit */ 176320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1764100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 176520054ef0SBlue Swirl } 1766eaa728eeSbellard selector = e1 >> 16; 1767eaa728eeSbellard param_count = e2 & 0x1f; 17680aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17690aca0605SAndrew Oates #ifdef TARGET_X86_64 17700aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17710aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17720aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17730aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17740aca0605SAndrew Oates GETPC()); 17750aca0605SAndrew Oates } 17760aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17770aca0605SAndrew Oates if (type != 0) { 17780aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17790aca0605SAndrew Oates GETPC()); 17800aca0605SAndrew Oates } 17810aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17820aca0605SAndrew Oates } 17830aca0605SAndrew Oates #endif 178420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1785100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 178620054ef0SBlue Swirl } 1787eaa728eeSbellard 1788100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1789100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179020054ef0SBlue Swirl } 179120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1792100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179320054ef0SBlue Swirl } 1794eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 179520054ef0SBlue Swirl if (dpl > cpl) { 1796100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179720054ef0SBlue Swirl } 17980aca0605SAndrew Oates #ifdef TARGET_X86_64 17990aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 18000aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 18010aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18020aca0605SAndrew Oates } 18030aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 18040aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18050aca0605SAndrew Oates } 18060aca0605SAndrew Oates shift++; 18070aca0605SAndrew Oates } 18080aca0605SAndrew Oates #endif 180920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1810100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 181120054ef0SBlue Swirl } 1812eaa728eeSbellard 1813eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1814eaa728eeSbellard /* to inner privilege */ 1815e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 18160aca0605SAndrew Oates #ifdef TARGET_X86_64 18170aca0605SAndrew Oates if (shift == 2) { 18180aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 18190aca0605SAndrew Oates new_stack = 1; 1820059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1821059368bcSRichard Henderson sa.sp_mask = -1; 1822059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 18230aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1824059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 18250aca0605SAndrew Oates } else 18260aca0605SAndrew Oates #endif 18270aca0605SAndrew Oates { 18280aca0605SAndrew Oates uint32_t sp32; 18290aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 183090a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 18310aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 183290a2541bSliguang env->regs[R_ESP]); 183320054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1834100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183520054ef0SBlue Swirl } 183620054ef0SBlue Swirl if ((ss & 3) != dpl) { 1837100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183820054ef0SBlue Swirl } 1839100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1840100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184120054ef0SBlue Swirl } 1842eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 184320054ef0SBlue Swirl if (ss_dpl != dpl) { 1844100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184520054ef0SBlue Swirl } 1846eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1847eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 184820054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1849100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185020054ef0SBlue Swirl } 185120054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1852100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185320054ef0SBlue Swirl } 1854eaa728eeSbellard 1855059368bcSRichard Henderson sa.sp = sp32; 1856059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1857059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18580aca0605SAndrew Oates } 18590aca0605SAndrew Oates 186020054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1861eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1862eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1863059368bcSRichard Henderson 18640aca0605SAndrew Oates #ifdef TARGET_X86_64 18650aca0605SAndrew Oates if (shift == 2) { 18660aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1867059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1868059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18690aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18700aca0605SAndrew Oates } else 18710aca0605SAndrew Oates #endif 18720aca0605SAndrew Oates if (shift == 1) { 1873059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1874059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1875eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18760bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18770bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18780bd385e7SPaolo Bonzini GETPC()); 1879059368bcSRichard Henderson pushl(&sa, val); 1880eaa728eeSbellard } 1881eaa728eeSbellard } else { 1882059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1883059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1884eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18850bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18860bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18870bd385e7SPaolo Bonzini GETPC()); 1888059368bcSRichard Henderson pushw(&sa, val); 1889eaa728eeSbellard } 1890eaa728eeSbellard } 1891eaa728eeSbellard new_stack = 1; 1892eaa728eeSbellard } else { 1893eaa728eeSbellard /* to same privilege */ 1894e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1895059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1896059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1897059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 189820054ef0SBlue Swirl /* push_size = (4 << shift); */ 1899eaa728eeSbellard new_stack = 0; 1900eaa728eeSbellard } 1901eaa728eeSbellard 19020aca0605SAndrew Oates #ifdef TARGET_X86_64 19030aca0605SAndrew Oates if (shift == 2) { 1904059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1905059368bcSRichard Henderson pushq(&sa, next_eip); 19060aca0605SAndrew Oates } else 19070aca0605SAndrew Oates #endif 19080aca0605SAndrew Oates if (shift == 1) { 1909059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1910059368bcSRichard Henderson pushl(&sa, next_eip); 1911eaa728eeSbellard } else { 1912059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1913059368bcSRichard Henderson pushw(&sa, next_eip); 1914eaa728eeSbellard } 1915eaa728eeSbellard 1916eaa728eeSbellard /* from this point, not restartable */ 1917eaa728eeSbellard 1918eaa728eeSbellard if (new_stack) { 19190aca0605SAndrew Oates #ifdef TARGET_X86_64 19200aca0605SAndrew Oates if (shift == 2) { 19210aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 19220aca0605SAndrew Oates } else 19230aca0605SAndrew Oates #endif 19240aca0605SAndrew Oates { 1925eaa728eeSbellard ss = (ss & ~3) | dpl; 1926eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1927059368bcSRichard Henderson sa.ss_base, 1928eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1929eaa728eeSbellard ss_e2); 1930eaa728eeSbellard } 19310aca0605SAndrew Oates } 1932eaa728eeSbellard 1933eaa728eeSbellard selector = (selector & ~3) | dpl; 1934eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1935eaa728eeSbellard get_seg_base(e1, e2), 1936eaa728eeSbellard get_seg_limit(e1, e2), 1937eaa728eeSbellard e2); 1938059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1939a78d0eabSliguang env->eip = offset; 1940eaa728eeSbellard } 1941eaa728eeSbellard } 1942eaa728eeSbellard 1943eaa728eeSbellard /* real and vm86 mode iret */ 19442999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1945eaa728eeSbellard { 1946059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1947eaa728eeSbellard int eflags_mask; 1948059368bcSRichard Henderson StackAccess sa; 1949eaa728eeSbellard 1950059368bcSRichard Henderson sa.env = env; 1951059368bcSRichard Henderson sa.ra = GETPC(); 19528053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1953059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1954059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1955059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1956059368bcSRichard Henderson 1957eaa728eeSbellard if (shift == 1) { 1958eaa728eeSbellard /* 32 bits */ 1959059368bcSRichard Henderson new_eip = popl(&sa); 1960059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1961059368bcSRichard Henderson new_eflags = popl(&sa); 1962eaa728eeSbellard } else { 1963eaa728eeSbellard /* 16 bits */ 1964059368bcSRichard Henderson new_eip = popw(&sa); 1965059368bcSRichard Henderson new_cs = popw(&sa); 1966059368bcSRichard Henderson new_eflags = popw(&sa); 1967eaa728eeSbellard } 1968059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1969bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1970bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1971eaa728eeSbellard env->eip = new_eip; 197220054ef0SBlue Swirl if (env->eflags & VM_MASK) { 197320054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 197420054ef0SBlue Swirl NT_MASK; 197520054ef0SBlue Swirl } else { 197620054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 197720054ef0SBlue Swirl RF_MASK | NT_MASK; 197820054ef0SBlue Swirl } 197920054ef0SBlue Swirl if (shift == 0) { 1980eaa728eeSbellard eflags_mask &= 0xffff; 198120054ef0SBlue Swirl } 1982997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1983db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1984eaa728eeSbellard } 1985eaa728eeSbellard 1986c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1987eaa728eeSbellard { 1988eaa728eeSbellard int dpl; 1989eaa728eeSbellard uint32_t e2; 1990eaa728eeSbellard 1991eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 1992eaa728eeSbellard they may still contain a valid base. I would be interested to 1993eaa728eeSbellard know how a real x86_64 CPU behaves */ 1994eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 199520054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 1996eaa728eeSbellard return; 199720054ef0SBlue Swirl } 1998eaa728eeSbellard 1999eaa728eeSbellard e2 = env->segs[seg_reg].flags; 2000eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2001eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2002eaa728eeSbellard /* data or non conforming code segment */ 2003eaa728eeSbellard if (dpl < cpl) { 2004c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 2005c2ba0515SBin Meng env->segs[seg_reg].base, 2006c2ba0515SBin Meng env->segs[seg_reg].limit, 2007c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 2008eaa728eeSbellard } 2009eaa728eeSbellard } 2010eaa728eeSbellard } 2011eaa728eeSbellard 2012eaa728eeSbellard /* protected mode iret */ 20132999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 2014100ec099SPavel Dovgalyuk int is_iret, int addend, 2015100ec099SPavel Dovgalyuk uintptr_t retaddr) 2016eaa728eeSbellard { 2017eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 2018eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 2019eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 2020eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 2021059368bcSRichard Henderson target_ulong new_eip, new_esp; 2022059368bcSRichard Henderson StackAccess sa; 2023059368bcSRichard Henderson 20248053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 20258053862aSPaolo Bonzini 2026059368bcSRichard Henderson sa.env = env; 2027059368bcSRichard Henderson sa.ra = retaddr; 20288053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 2029eaa728eeSbellard 2030eaa728eeSbellard #ifdef TARGET_X86_64 203120054ef0SBlue Swirl if (shift == 2) { 2032059368bcSRichard Henderson sa.sp_mask = -1; 203320054ef0SBlue Swirl } else 2034eaa728eeSbellard #endif 203520054ef0SBlue Swirl { 2036059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 203720054ef0SBlue Swirl } 2038059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2039059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2040eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2041eaa728eeSbellard #ifdef TARGET_X86_64 2042eaa728eeSbellard if (shift == 2) { 2043059368bcSRichard Henderson new_eip = popq(&sa); 2044059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2045eaa728eeSbellard if (is_iret) { 2046059368bcSRichard Henderson new_eflags = popq(&sa); 2047eaa728eeSbellard } 2048eaa728eeSbellard } else 2049eaa728eeSbellard #endif 205020054ef0SBlue Swirl { 2051eaa728eeSbellard if (shift == 1) { 2052eaa728eeSbellard /* 32 bits */ 2053059368bcSRichard Henderson new_eip = popl(&sa); 2054059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2055eaa728eeSbellard if (is_iret) { 2056059368bcSRichard Henderson new_eflags = popl(&sa); 205720054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2058eaa728eeSbellard goto return_to_vm86; 2059eaa728eeSbellard } 206020054ef0SBlue Swirl } 2061eaa728eeSbellard } else { 2062eaa728eeSbellard /* 16 bits */ 2063059368bcSRichard Henderson new_eip = popw(&sa); 2064059368bcSRichard Henderson new_cs = popw(&sa); 206520054ef0SBlue Swirl if (is_iret) { 2066059368bcSRichard Henderson new_eflags = popw(&sa); 2067eaa728eeSbellard } 206820054ef0SBlue Swirl } 206920054ef0SBlue Swirl } 2070d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2071eaa728eeSbellard new_cs, new_eip, shift, addend); 20726aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 207320054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2074100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2075eaa728eeSbellard } 2076100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2077100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 207820054ef0SBlue Swirl } 207920054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 208020054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2081100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 208220054ef0SBlue Swirl } 208320054ef0SBlue Swirl rpl = new_cs & 3; 208420054ef0SBlue Swirl if (rpl < cpl) { 2085100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 208620054ef0SBlue Swirl } 208720054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 208820054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 208920054ef0SBlue Swirl if (dpl > rpl) { 2090100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209120054ef0SBlue Swirl } 209220054ef0SBlue Swirl } else { 209320054ef0SBlue Swirl if (dpl != rpl) { 2094100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209520054ef0SBlue Swirl } 209620054ef0SBlue Swirl } 209720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2098100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 209920054ef0SBlue Swirl } 2100eaa728eeSbellard 2101059368bcSRichard Henderson sa.sp += addend; 2102eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2103eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 21041235fc06Sths /* return to same privilege level */ 2105eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2106eaa728eeSbellard get_seg_base(e1, e2), 2107eaa728eeSbellard get_seg_limit(e1, e2), 2108eaa728eeSbellard e2); 2109eaa728eeSbellard } else { 2110eaa728eeSbellard /* return to different privilege level */ 2111eaa728eeSbellard #ifdef TARGET_X86_64 2112eaa728eeSbellard if (shift == 2) { 2113059368bcSRichard Henderson new_esp = popq(&sa); 2114059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2115eaa728eeSbellard } else 2116eaa728eeSbellard #endif 211720054ef0SBlue Swirl { 2118eaa728eeSbellard if (shift == 1) { 2119eaa728eeSbellard /* 32 bits */ 2120059368bcSRichard Henderson new_esp = popl(&sa); 2121059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2122eaa728eeSbellard } else { 2123eaa728eeSbellard /* 16 bits */ 2124059368bcSRichard Henderson new_esp = popw(&sa); 2125059368bcSRichard Henderson new_ss = popw(&sa); 2126eaa728eeSbellard } 212720054ef0SBlue Swirl } 2128d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2129eaa728eeSbellard new_ss, new_esp); 2130eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2131eaa728eeSbellard #ifdef TARGET_X86_64 2132eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2133eaa728eeSbellard /* XXX: test CS64? */ 2134eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2135eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2136eaa728eeSbellard 0, 0xffffffff, 2137eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2138eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2139eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2140eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2141eaa728eeSbellard } else 2142eaa728eeSbellard #endif 2143eaa728eeSbellard { 2144100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2145eaa728eeSbellard } 2146eaa728eeSbellard } else { 214720054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2148100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 214920054ef0SBlue Swirl } 2150100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2151100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 215220054ef0SBlue Swirl } 2153eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2154eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 215520054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2156100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 215720054ef0SBlue Swirl } 2158eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 215920054ef0SBlue Swirl if (dpl != rpl) { 2160100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 216120054ef0SBlue Swirl } 216220054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2163100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 216420054ef0SBlue Swirl } 2165eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2166eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2167eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2168eaa728eeSbellard ss_e2); 2169eaa728eeSbellard } 2170eaa728eeSbellard 2171eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2172eaa728eeSbellard get_seg_base(e1, e2), 2173eaa728eeSbellard get_seg_limit(e1, e2), 2174eaa728eeSbellard e2); 2175059368bcSRichard Henderson sa.sp = new_esp; 2176eaa728eeSbellard #ifdef TARGET_X86_64 217720054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2178059368bcSRichard Henderson sa.sp_mask = -1; 217920054ef0SBlue Swirl } else 2180eaa728eeSbellard #endif 218120054ef0SBlue Swirl { 2182059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 218320054ef0SBlue Swirl } 2184eaa728eeSbellard 2185eaa728eeSbellard /* validate data segments */ 21862999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21872999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21882999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21892999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2190eaa728eeSbellard 2191059368bcSRichard Henderson sa.sp += addend; 2192eaa728eeSbellard } 2193059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2194eaa728eeSbellard env->eip = new_eip; 2195eaa728eeSbellard if (is_iret) { 2196eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2197eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 219820054ef0SBlue Swirl if (cpl == 0) { 2199eaa728eeSbellard eflags_mask |= IOPL_MASK; 220020054ef0SBlue Swirl } 2201eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 220220054ef0SBlue Swirl if (cpl <= iopl) { 2203eaa728eeSbellard eflags_mask |= IF_MASK; 220420054ef0SBlue Swirl } 220520054ef0SBlue Swirl if (shift == 0) { 2206eaa728eeSbellard eflags_mask &= 0xffff; 220720054ef0SBlue Swirl } 2208997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2209eaa728eeSbellard } 2210eaa728eeSbellard return; 2211eaa728eeSbellard 2212eaa728eeSbellard return_to_vm86: 2213059368bcSRichard Henderson new_esp = popl(&sa); 2214059368bcSRichard Henderson new_ss = popl(&sa); 2215059368bcSRichard Henderson new_es = popl(&sa); 2216059368bcSRichard Henderson new_ds = popl(&sa); 2217059368bcSRichard Henderson new_fs = popl(&sa); 2218059368bcSRichard Henderson new_gs = popl(&sa); 2219eaa728eeSbellard 2220eaa728eeSbellard /* modify processor state */ 2221997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2222997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2223997ff0d9SBlue Swirl VIP_MASK); 22242999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 22252999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 22262999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 22272999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 22282999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 22292999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2230eaa728eeSbellard 2231eaa728eeSbellard env->eip = new_eip & 0xffff; 223208b3ded6Sliguang env->regs[R_ESP] = new_esp; 2233eaa728eeSbellard } 2234eaa728eeSbellard 22352999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2236eaa728eeSbellard { 2237eaa728eeSbellard int tss_selector, type; 2238eaa728eeSbellard uint32_t e1, e2; 2239eaa728eeSbellard 2240eaa728eeSbellard /* specific case for TSS */ 2241eaa728eeSbellard if (env->eflags & NT_MASK) { 2242eaa728eeSbellard #ifdef TARGET_X86_64 224320054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2244100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 224520054ef0SBlue Swirl } 2246eaa728eeSbellard #endif 2247100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 224820054ef0SBlue Swirl if (tss_selector & 4) { 2249100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225020054ef0SBlue Swirl } 2251100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2252100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225320054ef0SBlue Swirl } 2254eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2255eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 225620054ef0SBlue Swirl if (type != 3) { 2257100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225820054ef0SBlue Swirl } 2259100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2260eaa728eeSbellard } else { 2261100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2262eaa728eeSbellard } 2263db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2264eaa728eeSbellard } 2265eaa728eeSbellard 22662999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2267eaa728eeSbellard { 2268100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2269eaa728eeSbellard } 2270eaa728eeSbellard 22712999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2272eaa728eeSbellard { 2273eaa728eeSbellard if (env->sysenter_cs == 0) { 2274100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2275eaa728eeSbellard } 2276eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22772436b61aSbalrog 22782436b61aSbalrog #ifdef TARGET_X86_64 22792436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22802436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22812436b61aSbalrog 0, 0xffffffff, 22822436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22832436b61aSbalrog DESC_S_MASK | 228420054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 228520054ef0SBlue Swirl DESC_L_MASK); 22862436b61aSbalrog } else 22872436b61aSbalrog #endif 22882436b61aSbalrog { 2289eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2290eaa728eeSbellard 0, 0xffffffff, 2291eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2292eaa728eeSbellard DESC_S_MASK | 2293eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 22942436b61aSbalrog } 2295eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2296eaa728eeSbellard 0, 0xffffffff, 2297eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2298eaa728eeSbellard DESC_S_MASK | 2299eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 230008b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2301a78d0eabSliguang env->eip = env->sysenter_eip; 2302eaa728eeSbellard } 2303eaa728eeSbellard 23042999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2305eaa728eeSbellard { 2306eaa728eeSbellard int cpl; 2307eaa728eeSbellard 2308eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2309eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2310100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2311eaa728eeSbellard } 23122436b61aSbalrog #ifdef TARGET_X86_64 23132436b61aSbalrog if (dflag == 2) { 231420054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 231520054ef0SBlue Swirl 3, 0, 0xffffffff, 23162436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23172436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 231820054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 231920054ef0SBlue Swirl DESC_L_MASK); 232020054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 232120054ef0SBlue Swirl 3, 0, 0xffffffff, 23222436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23232436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 23242436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 23252436b61aSbalrog } else 23262436b61aSbalrog #endif 23272436b61aSbalrog { 232820054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 232920054ef0SBlue Swirl 3, 0, 0xffffffff, 2330eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2331eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2332eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 233320054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 233420054ef0SBlue Swirl 3, 0, 0xffffffff, 2335eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2336eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2337eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23382436b61aSbalrog } 233908b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2340a78d0eabSliguang env->eip = env->regs[R_EDX]; 2341eaa728eeSbellard } 2342eaa728eeSbellard 23432999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2344eaa728eeSbellard { 2345eaa728eeSbellard unsigned int limit; 2346ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2347eaa728eeSbellard int rpl, dpl, cpl, type; 2348eaa728eeSbellard 2349eaa728eeSbellard selector = selector1 & 0xffff; 2350ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 235120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2352dc1ded53Saliguori goto fail; 235320054ef0SBlue Swirl } 2354100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2355eaa728eeSbellard goto fail; 235620054ef0SBlue Swirl } 2357eaa728eeSbellard rpl = selector & 3; 2358eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2359eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2360eaa728eeSbellard if (e2 & DESC_S_MASK) { 2361eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2362eaa728eeSbellard /* conforming */ 2363eaa728eeSbellard } else { 236420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2365eaa728eeSbellard goto fail; 2366eaa728eeSbellard } 236720054ef0SBlue Swirl } 2368eaa728eeSbellard } else { 2369eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2370eaa728eeSbellard switch (type) { 2371eaa728eeSbellard case 1: 2372eaa728eeSbellard case 2: 2373eaa728eeSbellard case 3: 2374eaa728eeSbellard case 9: 2375eaa728eeSbellard case 11: 2376eaa728eeSbellard break; 2377eaa728eeSbellard default: 2378eaa728eeSbellard goto fail; 2379eaa728eeSbellard } 2380eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2381eaa728eeSbellard fail: 2382ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2383eaa728eeSbellard return 0; 2384eaa728eeSbellard } 2385eaa728eeSbellard } 2386eaa728eeSbellard limit = get_seg_limit(e1, e2); 2387ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2388eaa728eeSbellard return limit; 2389eaa728eeSbellard } 2390eaa728eeSbellard 23912999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2392eaa728eeSbellard { 2393ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2394eaa728eeSbellard int rpl, dpl, cpl, type; 2395eaa728eeSbellard 2396eaa728eeSbellard selector = selector1 & 0xffff; 2397ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 239820054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2399eaa728eeSbellard goto fail; 240020054ef0SBlue Swirl } 2401100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2402eaa728eeSbellard goto fail; 240320054ef0SBlue Swirl } 2404eaa728eeSbellard rpl = selector & 3; 2405eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2406eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2407eaa728eeSbellard if (e2 & DESC_S_MASK) { 2408eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2409eaa728eeSbellard /* conforming */ 2410eaa728eeSbellard } else { 241120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2412eaa728eeSbellard goto fail; 2413eaa728eeSbellard } 241420054ef0SBlue Swirl } 2415eaa728eeSbellard } else { 2416eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2417eaa728eeSbellard switch (type) { 2418eaa728eeSbellard case 1: 2419eaa728eeSbellard case 2: 2420eaa728eeSbellard case 3: 2421eaa728eeSbellard case 4: 2422eaa728eeSbellard case 5: 2423eaa728eeSbellard case 9: 2424eaa728eeSbellard case 11: 2425eaa728eeSbellard case 12: 2426eaa728eeSbellard break; 2427eaa728eeSbellard default: 2428eaa728eeSbellard goto fail; 2429eaa728eeSbellard } 2430eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2431eaa728eeSbellard fail: 2432ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2433eaa728eeSbellard return 0; 2434eaa728eeSbellard } 2435eaa728eeSbellard } 2436ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2437eaa728eeSbellard return e2 & 0x00f0ff00; 2438eaa728eeSbellard } 2439eaa728eeSbellard 24402999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2441eaa728eeSbellard { 2442eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2443eaa728eeSbellard int rpl, dpl, cpl; 2444eaa728eeSbellard 2445eaa728eeSbellard selector = selector1 & 0xffff; 2446abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 244720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2448eaa728eeSbellard goto fail; 244920054ef0SBlue Swirl } 2450100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2451eaa728eeSbellard goto fail; 245220054ef0SBlue Swirl } 245320054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2454eaa728eeSbellard goto fail; 245520054ef0SBlue Swirl } 2456eaa728eeSbellard rpl = selector & 3; 2457eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2458eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2459eaa728eeSbellard if (e2 & DESC_CS_MASK) { 246020054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2461eaa728eeSbellard goto fail; 246220054ef0SBlue Swirl } 2463eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 246420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2465eaa728eeSbellard goto fail; 2466eaa728eeSbellard } 246720054ef0SBlue Swirl } 2468eaa728eeSbellard } else { 2469eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2470eaa728eeSbellard fail: 2471abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2472eaa728eeSbellard } 2473eaa728eeSbellard } 2474abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2475abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2476eaa728eeSbellard } 2477eaa728eeSbellard 24782999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2479eaa728eeSbellard { 2480eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2481eaa728eeSbellard int rpl, dpl, cpl; 2482eaa728eeSbellard 2483eaa728eeSbellard selector = selector1 & 0xffff; 2484abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 248520054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2486eaa728eeSbellard goto fail; 248720054ef0SBlue Swirl } 2488100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2489eaa728eeSbellard goto fail; 249020054ef0SBlue Swirl } 249120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2492eaa728eeSbellard goto fail; 249320054ef0SBlue Swirl } 2494eaa728eeSbellard rpl = selector & 3; 2495eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2496eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2497eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2498eaa728eeSbellard goto fail; 2499eaa728eeSbellard } else { 250020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2501eaa728eeSbellard goto fail; 250220054ef0SBlue Swirl } 2503eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2504eaa728eeSbellard fail: 2505abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2506eaa728eeSbellard } 2507eaa728eeSbellard } 2508abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2509abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2510eaa728eeSbellard } 2511