1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2542fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h" 26fe1a3aceSPhilippe Mathieu-Daudé #include "accel/tcg/probe.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308b131065SPaolo Bonzini #include "access.h" 318480f7c7SPhilippe Mathieu-Daudé #include "tcg-cpu.h" 328a201bd4SPaolo Bonzini 33059368bcSRichard Henderson #ifdef TARGET_X86_64 34059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 35059368bcSRichard Henderson do { \ 36059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 37059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 38059368bcSRichard Henderson ((val) & 0xffff); \ 39059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 40059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 41059368bcSRichard Henderson } else { \ 42059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 43059368bcSRichard Henderson } \ 44059368bcSRichard Henderson } while (0) 45059368bcSRichard Henderson #else 46059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 47059368bcSRichard Henderson do { \ 48059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 49059368bcSRichard Henderson ((val) & (sp_mask)); \ 50059368bcSRichard Henderson } while (0) 51059368bcSRichard Henderson #endif 52059368bcSRichard Henderson 53059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 54059368bcSRichard Henderson typedef struct StackAccess 55059368bcSRichard Henderson { 56059368bcSRichard Henderson CPUX86State *env; 57059368bcSRichard Henderson uintptr_t ra; 58059368bcSRichard Henderson target_ulong ss_base; 59059368bcSRichard Henderson target_ulong sp; 60059368bcSRichard Henderson target_ulong sp_mask; 618053862aSPaolo Bonzini int mmu_index; 62059368bcSRichard Henderson } StackAccess; 63059368bcSRichard Henderson 64059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 65059368bcSRichard Henderson { 66059368bcSRichard Henderson sa->sp -= 2; 678053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 688053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 69059368bcSRichard Henderson } 70059368bcSRichard Henderson 71059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 72059368bcSRichard Henderson { 73059368bcSRichard Henderson sa->sp -= 4; 748053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 758053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 76059368bcSRichard Henderson } 77059368bcSRichard Henderson 78059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 79059368bcSRichard Henderson { 808053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 81059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 828053862aSPaolo Bonzini sa->mmu_index, sa->ra); 83059368bcSRichard Henderson sa->sp += 2; 84059368bcSRichard Henderson return ret; 85059368bcSRichard Henderson } 86059368bcSRichard Henderson 87059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 88059368bcSRichard Henderson { 898053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 90059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 918053862aSPaolo Bonzini sa->mmu_index, sa->ra); 92059368bcSRichard Henderson sa->sp += 4; 93059368bcSRichard Henderson return ret; 94059368bcSRichard Henderson } 95059368bcSRichard Henderson 9650fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9750fcc7cbSGareth Webb { 988fa11a4dSAlexander Graf int pg_mode = PG_MODE_PG; 9950fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 10050fcc7cbSGareth Webb return 0; 10150fcc7cbSGareth Webb } 10250fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10350fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10450fcc7cbSGareth Webb } 10550fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10650fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10750fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10850fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10950fcc7cbSGareth Webb } 11050fcc7cbSGareth Webb } 11150fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11250fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11350fcc7cbSGareth Webb } 11450fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11550fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11650fcc7cbSGareth Webb } 11750fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11850fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11950fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 12050fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 12150fcc7cbSGareth Webb } 12250fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12350fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12450fcc7cbSGareth Webb } 12550fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12650fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12750fcc7cbSGareth Webb } 12850fcc7cbSGareth Webb } 12950fcc7cbSGareth Webb return pg_mode; 13050fcc7cbSGareth Webb } 13150fcc7cbSGareth Webb 132611c34a7SPhilippe Mathieu-Daudé static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) 133611c34a7SPhilippe Mathieu-Daudé { 134611c34a7SPhilippe Mathieu-Daudé int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; 135611c34a7SPhilippe Mathieu-Daudé int mmu_index_base = 136611c34a7SPhilippe Mathieu-Daudé !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : 137611c34a7SPhilippe Mathieu-Daudé (pl < 3 && (env->eflags & AC_MASK) 138611c34a7SPhilippe Mathieu-Daudé ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); 139611c34a7SPhilippe Mathieu-Daudé 140611c34a7SPhilippe Mathieu-Daudé return mmu_index_base + mmu_index_32; 141611c34a7SPhilippe Mathieu-Daudé } 142611c34a7SPhilippe Mathieu-Daudé 143611c34a7SPhilippe Mathieu-Daudé int cpu_mmu_index_kernel(CPUX86State *env) 144611c34a7SPhilippe Mathieu-Daudé { 145611c34a7SPhilippe Mathieu-Daudé return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); 146611c34a7SPhilippe Mathieu-Daudé } 147611c34a7SPhilippe Mathieu-Daudé 148eaa728eeSbellard /* return non zero if error */ 149100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 150100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 151100ec099SPavel Dovgalyuk uintptr_t retaddr) 152eaa728eeSbellard { 153eaa728eeSbellard SegmentCache *dt; 154eaa728eeSbellard int index; 155eaa728eeSbellard target_ulong ptr; 156eaa728eeSbellard 15720054ef0SBlue Swirl if (selector & 0x4) { 158eaa728eeSbellard dt = &env->ldt; 15920054ef0SBlue Swirl } else { 160eaa728eeSbellard dt = &env->gdt; 16120054ef0SBlue Swirl } 162eaa728eeSbellard index = selector & ~7; 16320054ef0SBlue Swirl if ((index + 7) > dt->limit) { 164eaa728eeSbellard return -1; 16520054ef0SBlue Swirl } 166eaa728eeSbellard ptr = dt->base + index; 167100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 168100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 169eaa728eeSbellard return 0; 170eaa728eeSbellard } 171eaa728eeSbellard 172100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 173100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 174100ec099SPavel Dovgalyuk { 175100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 176100ec099SPavel Dovgalyuk } 177100ec099SPavel Dovgalyuk 178eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 179eaa728eeSbellard { 180eaa728eeSbellard unsigned int limit; 18120054ef0SBlue Swirl 182eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 18320054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 184eaa728eeSbellard limit = (limit << 12) | 0xfff; 18520054ef0SBlue Swirl } 186eaa728eeSbellard return limit; 187eaa728eeSbellard } 188eaa728eeSbellard 189eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 190eaa728eeSbellard { 19120054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 192eaa728eeSbellard } 193eaa728eeSbellard 19420054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 19520054ef0SBlue Swirl uint32_t e2) 196eaa728eeSbellard { 197eaa728eeSbellard sc->base = get_seg_base(e1, e2); 198eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 199eaa728eeSbellard sc->flags = e2; 200eaa728eeSbellard } 201eaa728eeSbellard 202eaa728eeSbellard /* init the segment cache in vm86 mode. */ 2032999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 204eaa728eeSbellard { 205eaa728eeSbellard selector &= 0xffff; 206b98dbc90SPaolo Bonzini 207b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 208b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 209b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 210eaa728eeSbellard } 211eaa728eeSbellard 2122999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 213100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 214100ec099SPavel Dovgalyuk uintptr_t retaddr) 215eaa728eeSbellard { 2166aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 217eaa728eeSbellard int type, index, shift; 218eaa728eeSbellard 219eaa728eeSbellard #if 0 220eaa728eeSbellard { 221eaa728eeSbellard int i; 222eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 223eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 224eaa728eeSbellard printf("%02x ", env->tr.base[i]); 22520054ef0SBlue Swirl if ((i & 7) == 7) { 22620054ef0SBlue Swirl printf("\n"); 22720054ef0SBlue Swirl } 228eaa728eeSbellard } 229eaa728eeSbellard printf("\n"); 230eaa728eeSbellard } 231eaa728eeSbellard #endif 232eaa728eeSbellard 23320054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 234a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 23520054ef0SBlue Swirl } 236eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 23720054ef0SBlue Swirl if ((type & 7) != 1) { 238a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 23920054ef0SBlue Swirl } 240eaa728eeSbellard shift = type >> 3; 241eaa728eeSbellard index = (dpl * 4 + 2) << shift; 24220054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 243100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 24420054ef0SBlue Swirl } 245eaa728eeSbellard if (shift == 0) { 246100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 247100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 248eaa728eeSbellard } else { 249100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 250100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 251eaa728eeSbellard } 252eaa728eeSbellard } 253eaa728eeSbellard 254c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 255c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 256eaa728eeSbellard { 257eaa728eeSbellard uint32_t e1, e2; 258d3b54918SPaolo Bonzini int rpl, dpl; 259eaa728eeSbellard 260eaa728eeSbellard if ((selector & 0xfffc) != 0) { 261100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26320054ef0SBlue Swirl } 26420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 265100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26620054ef0SBlue Swirl } 267eaa728eeSbellard rpl = selector & 3; 268eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 269eaa728eeSbellard if (seg_reg == R_CS) { 27020054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 271100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27220054ef0SBlue Swirl } 27320054ef0SBlue Swirl if (dpl != rpl) { 274100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27520054ef0SBlue Swirl } 276eaa728eeSbellard } else if (seg_reg == R_SS) { 277eaa728eeSbellard /* SS must be writable data */ 27820054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 279100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28020054ef0SBlue Swirl } 28120054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 282100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28320054ef0SBlue Swirl } 284eaa728eeSbellard } else { 285eaa728eeSbellard /* not readable code */ 28620054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 287100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 28820054ef0SBlue Swirl } 289eaa728eeSbellard /* if data or non conforming code, checks the rights */ 290eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 29120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 292100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 293eaa728eeSbellard } 294eaa728eeSbellard } 29520054ef0SBlue Swirl } 29620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 297100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 29820054ef0SBlue Swirl } 299eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 300eaa728eeSbellard get_seg_base(e1, e2), 301eaa728eeSbellard get_seg_limit(e1, e2), 302eaa728eeSbellard e2); 303eaa728eeSbellard } else { 30420054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 305100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 306eaa728eeSbellard } 307eaa728eeSbellard } 30820054ef0SBlue Swirl } 309eaa728eeSbellard 310a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 311a9089859SPaolo Bonzini uintptr_t retaddr) 312a9089859SPaolo Bonzini { 313c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 314a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 315a9089859SPaolo Bonzini 316a9089859SPaolo Bonzini if (value) { 317a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 318a9089859SPaolo Bonzini } else { 319a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 320a9089859SPaolo Bonzini } 321a9089859SPaolo Bonzini 322a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 323a9089859SPaolo Bonzini } 324a9089859SPaolo Bonzini 325eaa728eeSbellard #define SWITCH_TSS_JMP 0 326eaa728eeSbellard #define SWITCH_TSS_IRET 1 327eaa728eeSbellard #define SWITCH_TSS_CALL 2 328eaa728eeSbellard 329*c7c33283SPaolo Bonzini static void switch_tss_ra(CPUX86State *env, int tss_selector, 330eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 331*c7c33283SPaolo Bonzini uint32_t next_eip, bool has_error_code, 332*c7c33283SPaolo Bonzini uint32_t error_code, uintptr_t retaddr) 333eaa728eeSbellard { 3348b131065SPaolo Bonzini int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 335eaa728eeSbellard target_ulong tss_base; 336eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 337eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 338eaa728eeSbellard uint32_t old_eflags, eflags_mask; 339eaa728eeSbellard SegmentCache *dt; 3408b131065SPaolo Bonzini int mmu_index, index; 341eaa728eeSbellard target_ulong ptr; 3428b131065SPaolo Bonzini X86Access old, new; 343eaa728eeSbellard 344eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34520054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 34620054ef0SBlue Swirl source); 347eaa728eeSbellard 348eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 349eaa728eeSbellard if (type == 5) { 35020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 351100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35220054ef0SBlue Swirl } 353eaa728eeSbellard tss_selector = e1 >> 16; 35420054ef0SBlue Swirl if (tss_selector & 4) { 355100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 35620054ef0SBlue Swirl } 357100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 358100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 359eaa728eeSbellard } 36020054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 361100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36220054ef0SBlue Swirl } 36320054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 36420054ef0SBlue Swirl if ((type & 7) != 1) { 365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 36620054ef0SBlue Swirl } 36720054ef0SBlue Swirl } 368eaa728eeSbellard 36920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 370100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 37120054ef0SBlue Swirl } 372eaa728eeSbellard 37320054ef0SBlue Swirl if (type & 8) { 374eaa728eeSbellard tss_limit_max = 103; 37520054ef0SBlue Swirl } else { 376eaa728eeSbellard tss_limit_max = 43; 37720054ef0SBlue Swirl } 378eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 379eaa728eeSbellard tss_base = get_seg_base(e1, e2); 380eaa728eeSbellard if ((tss_selector & 4) != 0 || 38120054ef0SBlue Swirl tss_limit < tss_limit_max) { 382100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 38320054ef0SBlue Swirl } 384eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 38520054ef0SBlue Swirl if (old_type & 8) { 386eaa728eeSbellard old_tss_limit_max = 103; 38720054ef0SBlue Swirl } else { 388eaa728eeSbellard old_tss_limit_max = 43; 38920054ef0SBlue Swirl } 390eaa728eeSbellard 39105d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 39205d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 39305d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 39405d41bbcSPaolo Bonzini } 39505d41bbcSPaolo Bonzini 3968b131065SPaolo Bonzini /* X86Access avoids memory exceptions during the task switch */ 3978b131065SPaolo Bonzini mmu_index = cpu_mmu_index_kernel(env); 398ded1db48SRichard Henderson access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1, 3998b131065SPaolo Bonzini MMU_DATA_STORE, mmu_index, retaddr); 4008b131065SPaolo Bonzini 4018b131065SPaolo Bonzini if (source == SWITCH_TSS_CALL) { 4028b131065SPaolo Bonzini /* Probe for future write of parent task */ 4038b131065SPaolo Bonzini probe_access(env, tss_base, 2, MMU_DATA_STORE, 4048b131065SPaolo Bonzini mmu_index, retaddr); 4058b131065SPaolo Bonzini } 406ded1db48SRichard Henderson /* While true tss_limit may be larger, we don't access the iopb here. */ 407ded1db48SRichard Henderson access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1, 4088b131065SPaolo Bonzini MMU_DATA_LOAD, mmu_index, retaddr); 4098b131065SPaolo Bonzini 4106a079f2eSPaolo Bonzini /* save the current state in the old TSS */ 4116a079f2eSPaolo Bonzini old_eflags = cpu_compute_eflags(env); 4126a079f2eSPaolo Bonzini if (old_type & 8) { 4136a079f2eSPaolo Bonzini /* 32 bit */ 4146a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x20, next_eip); 4156a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 4166a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 4176a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 4186a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 4196a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 4206a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 4216a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 4226a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 4236a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 4246a079f2eSPaolo Bonzini for (i = 0; i < 6; i++) { 4256a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x48 + i * 4), 4266a079f2eSPaolo Bonzini env->segs[i].selector); 4276a079f2eSPaolo Bonzini } 4286a079f2eSPaolo Bonzini } else { 4296a079f2eSPaolo Bonzini /* 16 bit */ 4306a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x0e, next_eip); 4316a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 4326a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 4336a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 4346a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 4356a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 4366a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 4376a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 4386a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 4396a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 4406a079f2eSPaolo Bonzini for (i = 0; i < 4; i++) { 4416a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x22 + i * 2), 4426a079f2eSPaolo Bonzini env->segs[i].selector); 4436a079f2eSPaolo Bonzini } 4446a079f2eSPaolo Bonzini } 4456a079f2eSPaolo Bonzini 446eaa728eeSbellard /* read all the registers from the new TSS */ 447eaa728eeSbellard if (type & 8) { 448eaa728eeSbellard /* 32 bit */ 4498b131065SPaolo Bonzini new_cr3 = access_ldl(&new, tss_base + 0x1c); 4508b131065SPaolo Bonzini new_eip = access_ldl(&new, tss_base + 0x20); 4518b131065SPaolo Bonzini new_eflags = access_ldl(&new, tss_base + 0x24); 45220054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4538b131065SPaolo Bonzini new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 45420054ef0SBlue Swirl } 45520054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4568b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 45720054ef0SBlue Swirl } 4588b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x60); 4598b131065SPaolo Bonzini new_trap = access_ldl(&new, tss_base + 0x64); 460eaa728eeSbellard } else { 461eaa728eeSbellard /* 16 bit */ 462eaa728eeSbellard new_cr3 = 0; 4638b131065SPaolo Bonzini new_eip = access_ldw(&new, tss_base + 0x0e); 4648b131065SPaolo Bonzini new_eflags = access_ldw(&new, tss_base + 0x10); 46520054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4668b131065SPaolo Bonzini new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 46720054ef0SBlue Swirl } 46820054ef0SBlue Swirl for (i = 0; i < 4; i++) { 4698b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 47020054ef0SBlue Swirl } 4718b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x2a); 472eaa728eeSbellard new_segs[R_FS] = 0; 473eaa728eeSbellard new_segs[R_GS] = 0; 474eaa728eeSbellard new_trap = 0; 475eaa728eeSbellard } 4764581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4774581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4784581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4794581cbcdSBlue Swirl (void)new_trap; 480eaa728eeSbellard 481eaa728eeSbellard /* clear busy bit (it is restartable) */ 482eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 483a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 484eaa728eeSbellard } 4856a079f2eSPaolo Bonzini 48620054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 487eaa728eeSbellard old_eflags &= ~NT_MASK; 4881b627f38SPaolo Bonzini if (old_type & 8) { 4898b131065SPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 490eaa728eeSbellard } else { 4918b131065SPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 492eaa728eeSbellard } 49320054ef0SBlue Swirl } 494eaa728eeSbellard 495eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 4968b131065SPaolo Bonzini /* 4978b131065SPaolo Bonzini * Thanks to the probe_access above, we know the first two 4988b131065SPaolo Bonzini * bytes addressed by &new are writable too. 4998b131065SPaolo Bonzini */ 5008b131065SPaolo Bonzini access_stw(&new, tss_base, env->tr.selector); 501eaa728eeSbellard new_eflags |= NT_MASK; 502eaa728eeSbellard } 503eaa728eeSbellard 504eaa728eeSbellard /* set busy bit */ 505eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 506a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 507eaa728eeSbellard } 508eaa728eeSbellard 509eaa728eeSbellard /* set the new CPU state */ 5106a079f2eSPaolo Bonzini 5116a079f2eSPaolo Bonzini /* now if an exception occurs, it will occur in the next task context */ 5126a079f2eSPaolo Bonzini 513eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 514eaa728eeSbellard env->hflags |= HF_TS_MASK; 515eaa728eeSbellard env->tr.selector = tss_selector; 516eaa728eeSbellard env->tr.base = tss_base; 517eaa728eeSbellard env->tr.limit = tss_limit; 518eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 519eaa728eeSbellard 520eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 521eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 522eaa728eeSbellard } 523eaa728eeSbellard 524eaa728eeSbellard /* load all registers without an exception, then reload them with 525eaa728eeSbellard possible exception */ 526eaa728eeSbellard env->eip = new_eip; 527eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 528eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 529a5505f6bSPaolo Bonzini if (type & 8) { 530997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 531a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 532a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 533a5505f6bSPaolo Bonzini } 534a5505f6bSPaolo Bonzini } else { 535a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 536a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 537a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 538a5505f6bSPaolo Bonzini } 539a5505f6bSPaolo Bonzini } 540eaa728eeSbellard if (new_eflags & VM_MASK) { 54120054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5422999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 54320054ef0SBlue Swirl } 544eaa728eeSbellard } else { 545eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 54620054ef0SBlue Swirl for (i = 0; i < 6; i++) { 547eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 548eaa728eeSbellard } 54920054ef0SBlue Swirl } 550eaa728eeSbellard 551eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 552eaa728eeSbellard env->ldt.base = 0; 553eaa728eeSbellard env->ldt.limit = 0; 554eaa728eeSbellard env->ldt.flags = 0; 555eaa728eeSbellard 556eaa728eeSbellard /* load the LDT */ 55720054ef0SBlue Swirl if (new_ldt & 4) { 558100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55920054ef0SBlue Swirl } 560eaa728eeSbellard 561eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 562eaa728eeSbellard dt = &env->gdt; 563eaa728eeSbellard index = new_ldt & ~7; 56420054ef0SBlue Swirl if ((index + 7) > dt->limit) { 565100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 56620054ef0SBlue Swirl } 567eaa728eeSbellard ptr = dt->base + index; 568100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 569100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 57020054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 571100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57220054ef0SBlue Swirl } 57320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 574100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 57520054ef0SBlue Swirl } 576eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 577eaa728eeSbellard } 578eaa728eeSbellard 579eaa728eeSbellard /* load the segments */ 580eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 581d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 582100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 583100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 584100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 585100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 586100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 587100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 588eaa728eeSbellard } 589eaa728eeSbellard 590a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 591eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 592eaa728eeSbellard /* XXX: different exception if CALL? */ 593100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 594eaa728eeSbellard } 59501df040bSaliguori 59601df040bSaliguori #ifndef CONFIG_USER_ONLY 59701df040bSaliguori /* reset local breakpoints */ 598428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 59993d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 60001df040bSaliguori } 60101df040bSaliguori #endif 602*c7c33283SPaolo Bonzini 603*c7c33283SPaolo Bonzini if (has_error_code) { 604*c7c33283SPaolo Bonzini int cpl = env->hflags & HF_CPL_MASK; 605*c7c33283SPaolo Bonzini StackAccess sa; 606*c7c33283SPaolo Bonzini 607*c7c33283SPaolo Bonzini /* push the error code */ 608*c7c33283SPaolo Bonzini sa.env = env; 609*c7c33283SPaolo Bonzini sa.ra = retaddr; 610*c7c33283SPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 611*c7c33283SPaolo Bonzini sa.sp = env->regs[R_ESP]; 612*c7c33283SPaolo Bonzini if (env->segs[R_SS].flags & DESC_B_MASK) { 613*c7c33283SPaolo Bonzini sa.sp_mask = 0xffffffff; 614*c7c33283SPaolo Bonzini } else { 615*c7c33283SPaolo Bonzini sa.sp_mask = 0xffff; 616*c7c33283SPaolo Bonzini } 617*c7c33283SPaolo Bonzini sa.ss_base = env->segs[R_SS].base; 618*c7c33283SPaolo Bonzini if (type & 8) { 619*c7c33283SPaolo Bonzini pushl(&sa, error_code); 620*c7c33283SPaolo Bonzini } else { 621*c7c33283SPaolo Bonzini pushw(&sa, error_code); 622*c7c33283SPaolo Bonzini } 623*c7c33283SPaolo Bonzini SET_ESP(sa.sp, sa.sp_mask); 624*c7c33283SPaolo Bonzini } 625eaa728eeSbellard } 626eaa728eeSbellard 627*c7c33283SPaolo Bonzini static void switch_tss(CPUX86State *env, int tss_selector, 628100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 629*c7c33283SPaolo Bonzini uint32_t next_eip, bool has_error_code, 630*c7c33283SPaolo Bonzini int error_code) 631100ec099SPavel Dovgalyuk { 632*c7c33283SPaolo Bonzini switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 633*c7c33283SPaolo Bonzini has_error_code, error_code, 0); 634100ec099SPavel Dovgalyuk } 635100ec099SPavel Dovgalyuk 636eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 637eaa728eeSbellard { 6380aca0605SAndrew Oates #ifdef TARGET_X86_64 6390aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 6400aca0605SAndrew Oates return 0; 6410aca0605SAndrew Oates } else 6420aca0605SAndrew Oates #endif 64320054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 644eaa728eeSbellard return 0xffffffff; 64520054ef0SBlue Swirl } else { 646eaa728eeSbellard return 0xffff; 647eaa728eeSbellard } 64820054ef0SBlue Swirl } 649eaa728eeSbellard 65069cb498cSPaolo Bonzini static int exception_is_fault(int intno) 65169cb498cSPaolo Bonzini { 65269cb498cSPaolo Bonzini switch (intno) { 65369cb498cSPaolo Bonzini /* 65469cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 65569cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 65669cb498cSPaolo Bonzini */ 65769cb498cSPaolo Bonzini case EXCP01_DB: 65869cb498cSPaolo Bonzini case EXCP03_INT3: 65969cb498cSPaolo Bonzini case EXCP04_INTO: 66069cb498cSPaolo Bonzini case EXCP08_DBLE: 66169cb498cSPaolo Bonzini case EXCP12_MCHK: 66269cb498cSPaolo Bonzini return 0; 66369cb498cSPaolo Bonzini } 66469cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 66569cb498cSPaolo Bonzini return 1; 66669cb498cSPaolo Bonzini } 66769cb498cSPaolo Bonzini 66830493a03SClaudio Fontana int exception_has_error_code(int intno) 6692ed51f5bSaliguori { 6702ed51f5bSaliguori switch (intno) { 6712ed51f5bSaliguori case 8: 6722ed51f5bSaliguori case 10: 6732ed51f5bSaliguori case 11: 6742ed51f5bSaliguori case 12: 6752ed51f5bSaliguori case 13: 6762ed51f5bSaliguori case 14: 6772ed51f5bSaliguori case 17: 6782ed51f5bSaliguori return 1; 6792ed51f5bSaliguori } 6802ed51f5bSaliguori return 0; 6812ed51f5bSaliguori } 6822ed51f5bSaliguori 683eaa728eeSbellard /* protected mode interrupt */ 6842999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6852999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6862999a0b2SBlue Swirl int is_hw) 687eaa728eeSbellard { 688eaa728eeSbellard SegmentCache *dt; 689059368bcSRichard Henderson target_ulong ptr; 690eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 691eaa728eeSbellard int has_error_code, new_stack, shift; 692059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 693059368bcSRichard Henderson uint32_t old_eip, eflags; 69487446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 695059368bcSRichard Henderson StackAccess sa; 69669cb498cSPaolo Bonzini bool set_rf; 697eaa728eeSbellard 698eaa728eeSbellard has_error_code = 0; 69920054ef0SBlue Swirl if (!is_int && !is_hw) { 70020054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 70120054ef0SBlue Swirl } 70220054ef0SBlue Swirl if (is_int) { 703eaa728eeSbellard old_eip = next_eip; 70469cb498cSPaolo Bonzini set_rf = false; 70520054ef0SBlue Swirl } else { 706eaa728eeSbellard old_eip = env->eip; 70769cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 70820054ef0SBlue Swirl } 709eaa728eeSbellard 710eaa728eeSbellard dt = &env->idt; 71120054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 71277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 71320054ef0SBlue Swirl } 714eaa728eeSbellard ptr = dt->base + intno * 8; 715329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 716329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 717eaa728eeSbellard /* check gate type */ 718eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 719eaa728eeSbellard switch (type) { 720eaa728eeSbellard case 5: /* task gate */ 7213df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 7223df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 7233df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 7243df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 7253df1a3d0SPeter Maydell break; 7263df1a3d0SPeter Maydell default: 7273df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7283df1a3d0SPeter Maydell break; 7293df1a3d0SPeter Maydell } 7303df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 7313df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 7323df1a3d0SPeter Maydell /* check privilege if software int */ 7333df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 7343df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 7353df1a3d0SPeter Maydell } 7363df1a3d0SPeter Maydell 737059368bcSRichard Henderson sa.env = env; 738059368bcSRichard Henderson sa.ra = 0; 739059368bcSRichard Henderson 7403df1a3d0SPeter Maydell if (type == 5) { 7413df1a3d0SPeter Maydell /* task gate */ 742eaa728eeSbellard /* must do that check here to return the correct error code */ 74320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 74477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 74520054ef0SBlue Swirl } 746*c7c33283SPaolo Bonzini switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip, 747*c7c33283SPaolo Bonzini has_error_code, error_code); 748eaa728eeSbellard return; 749eaa728eeSbellard } 7503df1a3d0SPeter Maydell 7513df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7523df1a3d0SPeter Maydell 753eaa728eeSbellard /* check valid bit */ 75420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 75577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 75620054ef0SBlue Swirl } 757eaa728eeSbellard selector = e1 >> 16; 758eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 75920054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 76077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 76120054ef0SBlue Swirl } 7622999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 76377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 76420054ef0SBlue Swirl } 76520054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 76677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 76720054ef0SBlue Swirl } 768eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 76920054ef0SBlue Swirl if (dpl > cpl) { 77077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 77120054ef0SBlue Swirl } 77220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 77377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 77420054ef0SBlue Swirl } 7751110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7761110bfe6SPaolo Bonzini dpl = cpl; 7771110bfe6SPaolo Bonzini } 778e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 7791110bfe6SPaolo Bonzini if (dpl < cpl) { 780eaa728eeSbellard /* to inner privilege */ 781059368bcSRichard Henderson uint32_t esp; 782100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 78320054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 78477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78520054ef0SBlue Swirl } 78620054ef0SBlue Swirl if ((ss & 3) != dpl) { 78777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 78820054ef0SBlue Swirl } 7892999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 79077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79120054ef0SBlue Swirl } 792eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 79320054ef0SBlue Swirl if (ss_dpl != dpl) { 79477b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 79520054ef0SBlue Swirl } 796eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 797eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 79820054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 79977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 80020054ef0SBlue Swirl } 80120054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 80277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 80320054ef0SBlue Swirl } 804eaa728eeSbellard new_stack = 1; 805059368bcSRichard Henderson sa.sp = esp; 806059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 807059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 8081110bfe6SPaolo Bonzini } else { 809eaa728eeSbellard /* to same privilege */ 81087446327SKevin O'Connor if (vm86) { 81177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 81220054ef0SBlue Swirl } 813eaa728eeSbellard new_stack = 0; 814059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 815059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 816059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 817eaa728eeSbellard } 818eaa728eeSbellard 819eaa728eeSbellard shift = type >> 3; 820eaa728eeSbellard 821eaa728eeSbellard #if 0 822eaa728eeSbellard /* XXX: check that enough room is available */ 823eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 82487446327SKevin O'Connor if (vm86) { 825eaa728eeSbellard push_size += 8; 82620054ef0SBlue Swirl } 827eaa728eeSbellard push_size <<= shift; 828eaa728eeSbellard #endif 82969cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 83069cb498cSPaolo Bonzini /* 83169cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 83269cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 83369cb498cSPaolo Bonzini */ 83469cb498cSPaolo Bonzini if (set_rf) { 83569cb498cSPaolo Bonzini eflags |= RF_MASK; 83669cb498cSPaolo Bonzini } 83769cb498cSPaolo Bonzini 838eaa728eeSbellard if (shift == 1) { 839eaa728eeSbellard if (new_stack) { 84087446327SKevin O'Connor if (vm86) { 841059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 842059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 843059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 844059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 845eaa728eeSbellard } 846059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 847059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 848eaa728eeSbellard } 849059368bcSRichard Henderson pushl(&sa, eflags); 850059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 851059368bcSRichard Henderson pushl(&sa, old_eip); 852eaa728eeSbellard if (has_error_code) { 853059368bcSRichard Henderson pushl(&sa, error_code); 854eaa728eeSbellard } 855eaa728eeSbellard } else { 856eaa728eeSbellard if (new_stack) { 85787446327SKevin O'Connor if (vm86) { 858059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 859059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 860059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 861059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 862eaa728eeSbellard } 863059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 864059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 865eaa728eeSbellard } 866059368bcSRichard Henderson pushw(&sa, eflags); 867059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 868059368bcSRichard Henderson pushw(&sa, old_eip); 869eaa728eeSbellard if (has_error_code) { 870059368bcSRichard Henderson pushw(&sa, error_code); 871eaa728eeSbellard } 872eaa728eeSbellard } 873eaa728eeSbellard 874fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 875fd460606SKevin O'Connor if ((type & 1) == 0) { 876fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 877fd460606SKevin O'Connor } 878fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 879fd460606SKevin O'Connor 880eaa728eeSbellard if (new_stack) { 88187446327SKevin O'Connor if (vm86) { 882eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 883eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 884eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 885eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 886eaa728eeSbellard } 887eaa728eeSbellard ss = (ss & ~3) | dpl; 888059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 889059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 890eaa728eeSbellard } 891059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 892eaa728eeSbellard 893eaa728eeSbellard selector = (selector & ~3) | dpl; 894eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 895eaa728eeSbellard get_seg_base(e1, e2), 896eaa728eeSbellard get_seg_limit(e1, e2), 897eaa728eeSbellard e2); 898eaa728eeSbellard env->eip = offset; 899eaa728eeSbellard } 900eaa728eeSbellard 901eaa728eeSbellard #ifdef TARGET_X86_64 902eaa728eeSbellard 903059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 904059368bcSRichard Henderson { 905059368bcSRichard Henderson sa->sp -= 8; 9068053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 907eaa728eeSbellard } 908eaa728eeSbellard 909059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 910059368bcSRichard Henderson { 9118053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 912059368bcSRichard Henderson sa->sp += 8; 913059368bcSRichard Henderson return ret; 914eaa728eeSbellard } 915eaa728eeSbellard 9162999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 917eaa728eeSbellard { 9186aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 91950fcc7cbSGareth Webb int index, pg_mode; 92050fcc7cbSGareth Webb target_ulong rsp; 92150fcc7cbSGareth Webb int32_t sext; 922eaa728eeSbellard 923eaa728eeSbellard #if 0 924eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 925eaa728eeSbellard env->tr.base, env->tr.limit); 926eaa728eeSbellard #endif 927eaa728eeSbellard 92820054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 929a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 93020054ef0SBlue Swirl } 931eaa728eeSbellard index = 8 * level + 4; 93220054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 93377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 93420054ef0SBlue Swirl } 93550fcc7cbSGareth Webb 93650fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 93750fcc7cbSGareth Webb 93850fcc7cbSGareth Webb /* test virtual address sign extension */ 93950fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 94050fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 94150fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 94250fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 94350fcc7cbSGareth Webb } 94450fcc7cbSGareth Webb 94550fcc7cbSGareth Webb return rsp; 946eaa728eeSbellard } 947eaa728eeSbellard 948eaa728eeSbellard /* 64 bit interrupt */ 9492999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9502999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 951eaa728eeSbellard { 952eaa728eeSbellard SegmentCache *dt; 953eaa728eeSbellard target_ulong ptr; 954eaa728eeSbellard int type, dpl, selector, cpl, ist; 955eaa728eeSbellard int has_error_code, new_stack; 956bde8adb8SPeter Maydell uint32_t e1, e2, e3, eflags; 957059368bcSRichard Henderson target_ulong old_eip, offset; 95869cb498cSPaolo Bonzini bool set_rf; 959059368bcSRichard Henderson StackAccess sa; 960eaa728eeSbellard 961eaa728eeSbellard has_error_code = 0; 96220054ef0SBlue Swirl if (!is_int && !is_hw) { 96320054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 96420054ef0SBlue Swirl } 96520054ef0SBlue Swirl if (is_int) { 966eaa728eeSbellard old_eip = next_eip; 96769cb498cSPaolo Bonzini set_rf = false; 96820054ef0SBlue Swirl } else { 969eaa728eeSbellard old_eip = env->eip; 97069cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 97120054ef0SBlue Swirl } 972eaa728eeSbellard 973eaa728eeSbellard dt = &env->idt; 97420054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 975b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 97620054ef0SBlue Swirl } 977eaa728eeSbellard ptr = dt->base + intno * 16; 978329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 979329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 980329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 981eaa728eeSbellard /* check gate type */ 982eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 983eaa728eeSbellard switch (type) { 984eaa728eeSbellard case 14: /* 386 interrupt gate */ 985eaa728eeSbellard case 15: /* 386 trap gate */ 986eaa728eeSbellard break; 987eaa728eeSbellard default: 988b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 989eaa728eeSbellard break; 990eaa728eeSbellard } 991eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 992eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9931235fc06Sths /* check privilege if software int */ 99420054ef0SBlue Swirl if (is_int && dpl < cpl) { 995b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 99620054ef0SBlue Swirl } 997eaa728eeSbellard /* check valid bit */ 99820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 999b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 100020054ef0SBlue Swirl } 1001eaa728eeSbellard selector = e1 >> 16; 1002eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1003eaa728eeSbellard ist = e2 & 7; 100420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 100577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 100620054ef0SBlue Swirl } 1007eaa728eeSbellard 10082999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 100977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101020054ef0SBlue Swirl } 101120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 101277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101320054ef0SBlue Swirl } 1014eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 101520054ef0SBlue Swirl if (dpl > cpl) { 101677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101720054ef0SBlue Swirl } 101820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 101977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 102020054ef0SBlue Swirl } 102120054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 102277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 102320054ef0SBlue Swirl } 10241110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 10251110bfe6SPaolo Bonzini dpl = cpl; 10261110bfe6SPaolo Bonzini } 1027059368bcSRichard Henderson 1028059368bcSRichard Henderson sa.env = env; 1029059368bcSRichard Henderson sa.ra = 0; 1030e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 1031059368bcSRichard Henderson sa.sp_mask = -1; 1032059368bcSRichard Henderson sa.ss_base = 0; 10331110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 1034eaa728eeSbellard /* to inner privilege */ 1035eaa728eeSbellard new_stack = 1; 1036059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 10371110bfe6SPaolo Bonzini } else { 1038eaa728eeSbellard /* to same privilege */ 103920054ef0SBlue Swirl if (env->eflags & VM_MASK) { 104077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 104120054ef0SBlue Swirl } 1042eaa728eeSbellard new_stack = 0; 1043059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1044e95e9b88SWu Xiang } 1045059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1046eaa728eeSbellard 104769cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 104869cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 104969cb498cSPaolo Bonzini if (set_rf) { 105069cb498cSPaolo Bonzini eflags |= RF_MASK; 105169cb498cSPaolo Bonzini } 105269cb498cSPaolo Bonzini 1053059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1054059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1055059368bcSRichard Henderson pushq(&sa, eflags); 1056059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1057059368bcSRichard Henderson pushq(&sa, old_eip); 1058eaa728eeSbellard if (has_error_code) { 1059059368bcSRichard Henderson pushq(&sa, error_code); 1060eaa728eeSbellard } 1061eaa728eeSbellard 1062fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1063fd460606SKevin O'Connor if ((type & 1) == 0) { 1064fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1065fd460606SKevin O'Connor } 1066fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1067fd460606SKevin O'Connor 1068eaa728eeSbellard if (new_stack) { 1069bde8adb8SPeter Maydell uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */ 1070e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1071eaa728eeSbellard } 1072059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1073eaa728eeSbellard 1074eaa728eeSbellard selector = (selector & ~3) | dpl; 1075eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1076eaa728eeSbellard get_seg_base(e1, e2), 1077eaa728eeSbellard get_seg_limit(e1, e2), 1078eaa728eeSbellard e2); 1079eaa728eeSbellard env->eip = offset; 1080eaa728eeSbellard } 108163fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1082eaa728eeSbellard 10832999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1084eaa728eeSbellard { 1085eaa728eeSbellard int cpl, selector; 1086eaa728eeSbellard 1087eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1088100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1089eaa728eeSbellard } 1090eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1091eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1092100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1093eaa728eeSbellard } 1094eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 109563fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1096eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1097fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1098fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1099fd460606SKevin O'Connor NT_MASK); 1100eaa728eeSbellard if (dflag == 2) { 1101eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1102eaa728eeSbellard 0, 0xffffffff, 1103eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1104eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1105eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1106eaa728eeSbellard DESC_L_MASK); 1107a4165610Sliguang env->eip = env->regs[R_ECX]; 1108eaa728eeSbellard } else { 1109eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1110eaa728eeSbellard 0, 0xffffffff, 1111eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1112eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1113eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1114a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1115eaa728eeSbellard } 1116ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1117eaa728eeSbellard 0, 0xffffffff, 1118eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1119eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1120eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 112163fd8ef0SPaolo Bonzini } else 112263fd8ef0SPaolo Bonzini #endif 112363fd8ef0SPaolo Bonzini { 1124fd460606SKevin O'Connor env->eflags |= IF_MASK; 1125eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1126eaa728eeSbellard 0, 0xffffffff, 1127eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1128eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1129eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1130a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1131ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1132eaa728eeSbellard 0, 0xffffffff, 1133eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1134eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1135eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1136eaa728eeSbellard } 1137eaa728eeSbellard } 1138eaa728eeSbellard 1139eaa728eeSbellard /* real mode interrupt */ 11402999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11412999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1142eaa728eeSbellard { 1143eaa728eeSbellard SegmentCache *dt; 1144059368bcSRichard Henderson target_ulong ptr; 1145eaa728eeSbellard int selector; 1146059368bcSRichard Henderson uint32_t offset; 1147eaa728eeSbellard uint32_t old_cs, old_eip; 1148059368bcSRichard Henderson StackAccess sa; 1149eaa728eeSbellard 1150eaa728eeSbellard /* real mode (simpler!) */ 1151eaa728eeSbellard dt = &env->idt; 115220054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 115377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 115420054ef0SBlue Swirl } 1155eaa728eeSbellard ptr = dt->base + intno * 4; 1156329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1157329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1158059368bcSRichard Henderson 1159059368bcSRichard Henderson sa.env = env; 1160059368bcSRichard Henderson sa.ra = 0; 1161059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1162059368bcSRichard Henderson sa.sp_mask = 0xffff; 1163059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1164e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1165059368bcSRichard Henderson 116620054ef0SBlue Swirl if (is_int) { 1167eaa728eeSbellard old_eip = next_eip; 116820054ef0SBlue Swirl } else { 1169eaa728eeSbellard old_eip = env->eip; 117020054ef0SBlue Swirl } 1171eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1172eaa728eeSbellard /* XXX: use SS segment size? */ 1173059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1174059368bcSRichard Henderson pushw(&sa, old_cs); 1175059368bcSRichard Henderson pushw(&sa, old_eip); 1176eaa728eeSbellard 1177eaa728eeSbellard /* update processor state */ 1178059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1179eaa728eeSbellard env->eip = offset; 1180eaa728eeSbellard env->segs[R_CS].selector = selector; 1181eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1182eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1183eaa728eeSbellard } 1184eaa728eeSbellard 1185eaa728eeSbellard /* 1186eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1187a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1188eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1189eaa728eeSbellard */ 119030493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11912999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1192eaa728eeSbellard { 1193ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1194ca4c810aSAndreas Färber 11958fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1196eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1197eaa728eeSbellard static int count; 119820054ef0SBlue Swirl 119920054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 120020054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1201eaa728eeSbellard count, intno, error_code, is_int, 1202eaa728eeSbellard env->hflags & HF_CPL_MASK, 1203a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1204a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 120508b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1206eaa728eeSbellard if (intno == 0x0e) { 120793fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1208eaa728eeSbellard } else { 12094b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1210eaa728eeSbellard } 121193fcfe39Saliguori qemu_log("\n"); 1212a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1213eaa728eeSbellard #if 0 1214eaa728eeSbellard { 1215eaa728eeSbellard int i; 12169bd5494eSAdam Lackorzynski target_ulong ptr; 121720054ef0SBlue Swirl 121893fcfe39Saliguori qemu_log(" code="); 1219eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1220eaa728eeSbellard for (i = 0; i < 16; i++) { 122193fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1222eaa728eeSbellard } 122393fcfe39Saliguori qemu_log("\n"); 1224eaa728eeSbellard } 1225eaa728eeSbellard #endif 1226eaa728eeSbellard count++; 1227eaa728eeSbellard } 1228eaa728eeSbellard } 1229eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 123000ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1231f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12322999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 123320054ef0SBlue Swirl } 123400ea18d1Saliguori #endif 1235eb38c52cSblueswir1 #ifdef TARGET_X86_64 1236eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12372999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1238eaa728eeSbellard } else 1239eaa728eeSbellard #endif 1240eaa728eeSbellard { 12412999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12422999a0b2SBlue Swirl is_hw); 1243eaa728eeSbellard } 1244eaa728eeSbellard } else { 124500ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1246f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12472999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 124820054ef0SBlue Swirl } 124900ea18d1Saliguori #endif 12502999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1251eaa728eeSbellard } 12522ed51f5bSaliguori 125300ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1254f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1255fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1256b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 125720054ef0SBlue Swirl offsetof(struct vmcb, 125820054ef0SBlue Swirl control.event_inj)); 125920054ef0SBlue Swirl 1260b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1261ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 126220054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12632ed51f5bSaliguori } 126400ea18d1Saliguori #endif 1265eaa728eeSbellard } 1266eaa728eeSbellard 12672999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1268e694d4e2SBlue Swirl { 12696aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1270e694d4e2SBlue Swirl } 1271e694d4e2SBlue Swirl 12722999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1273eaa728eeSbellard { 1274eaa728eeSbellard SegmentCache *dt; 1275eaa728eeSbellard uint32_t e1, e2; 1276eaa728eeSbellard int index, entry_limit; 1277eaa728eeSbellard target_ulong ptr; 1278eaa728eeSbellard 1279eaa728eeSbellard selector &= 0xffff; 1280eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1281eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1282eaa728eeSbellard env->ldt.base = 0; 1283eaa728eeSbellard env->ldt.limit = 0; 1284eaa728eeSbellard } else { 128520054ef0SBlue Swirl if (selector & 0x4) { 1286100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 128720054ef0SBlue Swirl } 1288eaa728eeSbellard dt = &env->gdt; 1289eaa728eeSbellard index = selector & ~7; 1290eaa728eeSbellard #ifdef TARGET_X86_64 129120054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1292eaa728eeSbellard entry_limit = 15; 129320054ef0SBlue Swirl } else 1294eaa728eeSbellard #endif 129520054ef0SBlue Swirl { 1296eaa728eeSbellard entry_limit = 7; 129720054ef0SBlue Swirl } 129820054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1299100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 130020054ef0SBlue Swirl } 1301eaa728eeSbellard ptr = dt->base + index; 1302100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1303100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 130420054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1305100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 130620054ef0SBlue Swirl } 130720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1308100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 130920054ef0SBlue Swirl } 1310eaa728eeSbellard #ifdef TARGET_X86_64 1311eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1312eaa728eeSbellard uint32_t e3; 131320054ef0SBlue Swirl 1314100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1315eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1316eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1317eaa728eeSbellard } else 1318eaa728eeSbellard #endif 1319eaa728eeSbellard { 1320eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1321eaa728eeSbellard } 1322eaa728eeSbellard } 1323eaa728eeSbellard env->ldt.selector = selector; 1324eaa728eeSbellard } 1325eaa728eeSbellard 13262999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1327eaa728eeSbellard { 1328eaa728eeSbellard SegmentCache *dt; 1329eaa728eeSbellard uint32_t e1, e2; 1330eaa728eeSbellard int index, type, entry_limit; 1331eaa728eeSbellard target_ulong ptr; 1332eaa728eeSbellard 1333eaa728eeSbellard selector &= 0xffff; 1334eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1335eaa728eeSbellard /* NULL selector case: invalid TR */ 1336eaa728eeSbellard env->tr.base = 0; 1337eaa728eeSbellard env->tr.limit = 0; 1338eaa728eeSbellard env->tr.flags = 0; 1339eaa728eeSbellard } else { 134020054ef0SBlue Swirl if (selector & 0x4) { 1341100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 134220054ef0SBlue Swirl } 1343eaa728eeSbellard dt = &env->gdt; 1344eaa728eeSbellard index = selector & ~7; 1345eaa728eeSbellard #ifdef TARGET_X86_64 134620054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1347eaa728eeSbellard entry_limit = 15; 134820054ef0SBlue Swirl } else 1349eaa728eeSbellard #endif 135020054ef0SBlue Swirl { 1351eaa728eeSbellard entry_limit = 7; 135220054ef0SBlue Swirl } 135320054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1354100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 135520054ef0SBlue Swirl } 1356eaa728eeSbellard ptr = dt->base + index; 1357100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1358100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1359eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1360eaa728eeSbellard if ((e2 & DESC_S_MASK) || 136120054ef0SBlue Swirl (type != 1 && type != 9)) { 1362100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 136320054ef0SBlue Swirl } 136420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 136620054ef0SBlue Swirl } 1367eaa728eeSbellard #ifdef TARGET_X86_64 1368eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1369eaa728eeSbellard uint32_t e3, e4; 137020054ef0SBlue Swirl 1371100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1372100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 137320054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1374100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 137520054ef0SBlue Swirl } 1376eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1377eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1378eaa728eeSbellard } else 1379eaa728eeSbellard #endif 1380eaa728eeSbellard { 1381eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1382eaa728eeSbellard } 1383eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1384100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1385eaa728eeSbellard } 1386eaa728eeSbellard env->tr.selector = selector; 1387eaa728eeSbellard } 1388eaa728eeSbellard 1389eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13902999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1391eaa728eeSbellard { 1392eaa728eeSbellard uint32_t e1, e2; 1393eaa728eeSbellard int cpl, dpl, rpl; 1394eaa728eeSbellard SegmentCache *dt; 1395eaa728eeSbellard int index; 1396eaa728eeSbellard target_ulong ptr; 1397eaa728eeSbellard 1398eaa728eeSbellard selector &= 0xffff; 1399eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1400eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1401eaa728eeSbellard /* null selector case */ 1402eaa728eeSbellard if (seg_reg == R_SS 1403eaa728eeSbellard #ifdef TARGET_X86_64 1404eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1405eaa728eeSbellard #endif 140620054ef0SBlue Swirl ) { 1407100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 140820054ef0SBlue Swirl } 1409eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1410eaa728eeSbellard } else { 1411eaa728eeSbellard 141220054ef0SBlue Swirl if (selector & 0x4) { 1413eaa728eeSbellard dt = &env->ldt; 141420054ef0SBlue Swirl } else { 1415eaa728eeSbellard dt = &env->gdt; 141620054ef0SBlue Swirl } 1417eaa728eeSbellard index = selector & ~7; 141820054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1419100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142020054ef0SBlue Swirl } 1421eaa728eeSbellard ptr = dt->base + index; 1422100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1423100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1424eaa728eeSbellard 142520054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1426100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 142720054ef0SBlue Swirl } 1428eaa728eeSbellard rpl = selector & 3; 1429eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1430eaa728eeSbellard if (seg_reg == R_SS) { 1431eaa728eeSbellard /* must be writable segment */ 143220054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1433100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 143420054ef0SBlue Swirl } 143520054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1436100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 143720054ef0SBlue Swirl } 1438eaa728eeSbellard } else { 1439eaa728eeSbellard /* must be readable segment */ 144020054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1441100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 144220054ef0SBlue Swirl } 1443eaa728eeSbellard 1444eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1445eaa728eeSbellard /* if not conforming code, test rights */ 144620054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1447100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1448eaa728eeSbellard } 1449eaa728eeSbellard } 145020054ef0SBlue Swirl } 1451eaa728eeSbellard 1452eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 145320054ef0SBlue Swirl if (seg_reg == R_SS) { 1454100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 145520054ef0SBlue Swirl } else { 1456100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1457eaa728eeSbellard } 145820054ef0SBlue Swirl } 1459eaa728eeSbellard 1460eaa728eeSbellard /* set the access bit if not already set */ 1461eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1462eaa728eeSbellard e2 |= DESC_A_MASK; 1463100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1464eaa728eeSbellard } 1465eaa728eeSbellard 1466eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1467eaa728eeSbellard get_seg_base(e1, e2), 1468eaa728eeSbellard get_seg_limit(e1, e2), 1469eaa728eeSbellard e2); 1470eaa728eeSbellard #if 0 147193fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1472eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1473eaa728eeSbellard #endif 1474eaa728eeSbellard } 1475eaa728eeSbellard } 1476eaa728eeSbellard 1477eaa728eeSbellard /* protected mode jump */ 14782999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1479100ec099SPavel Dovgalyuk target_ulong next_eip) 1480eaa728eeSbellard { 1481eaa728eeSbellard int gate_cs, type; 1482eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1483eaa728eeSbellard 148420054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1485100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 148620054ef0SBlue Swirl } 1487100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1488100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148920054ef0SBlue Swirl } 1490eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1491eaa728eeSbellard if (e2 & DESC_S_MASK) { 149220054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1493100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 149420054ef0SBlue Swirl } 1495eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1496eaa728eeSbellard if (e2 & DESC_C_MASK) { 1497eaa728eeSbellard /* conforming code segment */ 149820054ef0SBlue Swirl if (dpl > cpl) { 1499100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 150020054ef0SBlue Swirl } 1501eaa728eeSbellard } else { 1502eaa728eeSbellard /* non conforming code segment */ 1503eaa728eeSbellard rpl = new_cs & 3; 150420054ef0SBlue Swirl if (rpl > cpl) { 1505100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1506eaa728eeSbellard } 150720054ef0SBlue Swirl if (dpl != cpl) { 1508100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 150920054ef0SBlue Swirl } 151020054ef0SBlue Swirl } 151120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1512100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 151320054ef0SBlue Swirl } 1514eaa728eeSbellard limit = get_seg_limit(e1, e2); 1515eaa728eeSbellard if (new_eip > limit && 1516db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1517db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 151820054ef0SBlue Swirl } 1519eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1520eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1521a78d0eabSliguang env->eip = new_eip; 1522eaa728eeSbellard } else { 1523eaa728eeSbellard /* jump to call or task gate */ 1524eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1525eaa728eeSbellard rpl = new_cs & 3; 1526eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1527eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 15280aca0605SAndrew Oates 15290aca0605SAndrew Oates #ifdef TARGET_X86_64 15300aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15310aca0605SAndrew Oates if (type != 12) { 15320aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 15330aca0605SAndrew Oates } 15340aca0605SAndrew Oates } 15350aca0605SAndrew Oates #endif 1536eaa728eeSbellard switch (type) { 1537eaa728eeSbellard case 1: /* 286 TSS */ 1538eaa728eeSbellard case 9: /* 386 TSS */ 1539eaa728eeSbellard case 5: /* task gate */ 154020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1541100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 154220054ef0SBlue Swirl } 1543*c7c33283SPaolo Bonzini switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, 1544*c7c33283SPaolo Bonzini false, 0, GETPC()); 1545eaa728eeSbellard break; 1546eaa728eeSbellard case 4: /* 286 call gate */ 1547eaa728eeSbellard case 12: /* 386 call gate */ 154820054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1549100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 155020054ef0SBlue Swirl } 155120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1552100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 155320054ef0SBlue Swirl } 1554eaa728eeSbellard gate_cs = e1 >> 16; 1555eaa728eeSbellard new_eip = (e1 & 0xffff); 155620054ef0SBlue Swirl if (type == 12) { 1557eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 155820054ef0SBlue Swirl } 15590aca0605SAndrew Oates 15600aca0605SAndrew Oates #ifdef TARGET_X86_64 15610aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15620aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15630aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15640aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15650aca0605SAndrew Oates GETPC()); 15660aca0605SAndrew Oates } 15670aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15680aca0605SAndrew Oates if (type != 0) { 15690aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15700aca0605SAndrew Oates GETPC()); 15710aca0605SAndrew Oates } 15720aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15730aca0605SAndrew Oates } 15740aca0605SAndrew Oates #endif 15750aca0605SAndrew Oates 1576100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1577100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157820054ef0SBlue Swirl } 1579eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1580eaa728eeSbellard /* must be code segment */ 1581eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 158220054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1583100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 158420054ef0SBlue Swirl } 1585eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 158620054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1587100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 158820054ef0SBlue Swirl } 15890aca0605SAndrew Oates #ifdef TARGET_X86_64 15900aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15910aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15920aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15930aca0605SAndrew Oates } 15940aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15950aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15960aca0605SAndrew Oates } 15970aca0605SAndrew Oates } 15980aca0605SAndrew Oates #endif 159920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1600100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 160120054ef0SBlue Swirl } 1602eaa728eeSbellard limit = get_seg_limit(e1, e2); 16030aca0605SAndrew Oates if (new_eip > limit && 16040aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1605100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 160620054ef0SBlue Swirl } 1607eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1608eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1609a78d0eabSliguang env->eip = new_eip; 1610eaa728eeSbellard break; 1611eaa728eeSbellard default: 1612100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1613eaa728eeSbellard break; 1614eaa728eeSbellard } 1615eaa728eeSbellard } 1616eaa728eeSbellard } 1617eaa728eeSbellard 1618eaa728eeSbellard /* real mode call */ 16198c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 16208c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1621eaa728eeSbellard { 1622059368bcSRichard Henderson StackAccess sa; 1623eaa728eeSbellard 1624059368bcSRichard Henderson sa.env = env; 1625059368bcSRichard Henderson sa.ra = GETPC(); 1626059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1627059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1628059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1629e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1630059368bcSRichard Henderson 1631eaa728eeSbellard if (shift) { 1632059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1633059368bcSRichard Henderson pushl(&sa, next_eip); 1634eaa728eeSbellard } else { 1635059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1636059368bcSRichard Henderson pushw(&sa, next_eip); 1637eaa728eeSbellard } 1638eaa728eeSbellard 1639059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1640eaa728eeSbellard env->eip = new_eip; 1641eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1642eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1643eaa728eeSbellard } 1644eaa728eeSbellard 1645eaa728eeSbellard /* protected mode call */ 16462999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1647100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1648eaa728eeSbellard { 1649eaa728eeSbellard int new_stack, i; 16500aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1651059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1652eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1653059368bcSRichard Henderson target_ulong old_ssp, offset; 1654059368bcSRichard Henderson StackAccess sa; 1655eaa728eeSbellard 16560aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16576aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 165820054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1659100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 166020054ef0SBlue Swirl } 1661100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1662100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166320054ef0SBlue Swirl } 1664eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1665d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1666059368bcSRichard Henderson 1667059368bcSRichard Henderson sa.env = env; 1668059368bcSRichard Henderson sa.ra = GETPC(); 1669059368bcSRichard Henderson 1670eaa728eeSbellard if (e2 & DESC_S_MASK) { 1671e136648cSPaolo Bonzini /* "normal" far call, no stack switch possible */ 167220054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1673100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 167420054ef0SBlue Swirl } 1675eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1676eaa728eeSbellard if (e2 & DESC_C_MASK) { 1677eaa728eeSbellard /* conforming code segment */ 167820054ef0SBlue Swirl if (dpl > cpl) { 1679100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 168020054ef0SBlue Swirl } 1681eaa728eeSbellard } else { 1682eaa728eeSbellard /* non conforming code segment */ 1683eaa728eeSbellard rpl = new_cs & 3; 168420054ef0SBlue Swirl if (rpl > cpl) { 1685100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1686eaa728eeSbellard } 168720054ef0SBlue Swirl if (dpl != cpl) { 1688100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 168920054ef0SBlue Swirl } 169020054ef0SBlue Swirl } 169120054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1692100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 169320054ef0SBlue Swirl } 1694eaa728eeSbellard 1695e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1696eaa728eeSbellard #ifdef TARGET_X86_64 1697eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1698eaa728eeSbellard if (shift == 2) { 1699eaa728eeSbellard /* 64 bit case */ 1700059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1701059368bcSRichard Henderson sa.sp_mask = -1; 1702059368bcSRichard Henderson sa.ss_base = 0; 1703059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1704059368bcSRichard Henderson pushq(&sa, next_eip); 1705eaa728eeSbellard /* from this point, not restartable */ 1706059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1707eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1708eaa728eeSbellard get_seg_base(e1, e2), 1709eaa728eeSbellard get_seg_limit(e1, e2), e2); 1710a78d0eabSliguang env->eip = new_eip; 1711eaa728eeSbellard } else 1712eaa728eeSbellard #endif 1713eaa728eeSbellard { 1714059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1715059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1716059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1717eaa728eeSbellard if (shift) { 1718059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1719059368bcSRichard Henderson pushl(&sa, next_eip); 1720eaa728eeSbellard } else { 1721059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1722059368bcSRichard Henderson pushw(&sa, next_eip); 1723eaa728eeSbellard } 1724eaa728eeSbellard 1725eaa728eeSbellard limit = get_seg_limit(e1, e2); 172620054ef0SBlue Swirl if (new_eip > limit) { 1727100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 172820054ef0SBlue Swirl } 1729eaa728eeSbellard /* from this point, not restartable */ 1730059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1731eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1732eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1733a78d0eabSliguang env->eip = new_eip; 1734eaa728eeSbellard } 1735eaa728eeSbellard } else { 1736eaa728eeSbellard /* check gate type */ 1737eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1738eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1739eaa728eeSbellard rpl = new_cs & 3; 17400aca0605SAndrew Oates 17410aca0605SAndrew Oates #ifdef TARGET_X86_64 17420aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17430aca0605SAndrew Oates if (type != 12) { 17440aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17450aca0605SAndrew Oates } 17460aca0605SAndrew Oates } 17470aca0605SAndrew Oates #endif 17480aca0605SAndrew Oates 1749eaa728eeSbellard switch (type) { 1750eaa728eeSbellard case 1: /* available 286 TSS */ 1751eaa728eeSbellard case 9: /* available 386 TSS */ 1752eaa728eeSbellard case 5: /* task gate */ 175320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1754100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 175520054ef0SBlue Swirl } 1756*c7c33283SPaolo Bonzini switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, 1757*c7c33283SPaolo Bonzini false, 0, GETPC()); 1758eaa728eeSbellard return; 1759eaa728eeSbellard case 4: /* 286 call gate */ 1760eaa728eeSbellard case 12: /* 386 call gate */ 1761eaa728eeSbellard break; 1762eaa728eeSbellard default: 1763100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1764eaa728eeSbellard break; 1765eaa728eeSbellard } 1766eaa728eeSbellard shift = type >> 3; 1767eaa728eeSbellard 176820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1769100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 177020054ef0SBlue Swirl } 1771eaa728eeSbellard /* check valid bit */ 177220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1773100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 177420054ef0SBlue Swirl } 1775eaa728eeSbellard selector = e1 >> 16; 1776eaa728eeSbellard param_count = e2 & 0x1f; 17770aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17780aca0605SAndrew Oates #ifdef TARGET_X86_64 17790aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17800aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17810aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17820aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17830aca0605SAndrew Oates GETPC()); 17840aca0605SAndrew Oates } 17850aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17860aca0605SAndrew Oates if (type != 0) { 17870aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17880aca0605SAndrew Oates GETPC()); 17890aca0605SAndrew Oates } 17900aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17910aca0605SAndrew Oates } 17920aca0605SAndrew Oates #endif 179320054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1794100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 179520054ef0SBlue Swirl } 1796eaa728eeSbellard 1797100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1798100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 179920054ef0SBlue Swirl } 180020054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1801100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 180220054ef0SBlue Swirl } 1803eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 180420054ef0SBlue Swirl if (dpl > cpl) { 1805100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 180620054ef0SBlue Swirl } 18070aca0605SAndrew Oates #ifdef TARGET_X86_64 18080aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 18090aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 18100aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18110aca0605SAndrew Oates } 18120aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 18130aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 18140aca0605SAndrew Oates } 18150aca0605SAndrew Oates shift++; 18160aca0605SAndrew Oates } 18170aca0605SAndrew Oates #endif 181820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1819100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 182020054ef0SBlue Swirl } 1821eaa728eeSbellard 1822eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1823eaa728eeSbellard /* to inner privilege */ 1824e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 18250aca0605SAndrew Oates #ifdef TARGET_X86_64 18260aca0605SAndrew Oates if (shift == 2) { 18270aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 18280aca0605SAndrew Oates new_stack = 1; 1829059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1830059368bcSRichard Henderson sa.sp_mask = -1; 1831059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 18320aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1833059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 18340aca0605SAndrew Oates } else 18350aca0605SAndrew Oates #endif 18360aca0605SAndrew Oates { 18370aca0605SAndrew Oates uint32_t sp32; 18380aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 183990a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 18400aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 184190a2541bSliguang env->regs[R_ESP]); 184220054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1843100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184420054ef0SBlue Swirl } 184520054ef0SBlue Swirl if ((ss & 3) != dpl) { 1846100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 184720054ef0SBlue Swirl } 1848100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1849100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185020054ef0SBlue Swirl } 1851eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 185220054ef0SBlue Swirl if (ss_dpl != dpl) { 1853100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185420054ef0SBlue Swirl } 1855eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1856eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 185720054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1858100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 185920054ef0SBlue Swirl } 186020054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1861100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 186220054ef0SBlue Swirl } 1863eaa728eeSbellard 1864059368bcSRichard Henderson sa.sp = sp32; 1865059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1866059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18670aca0605SAndrew Oates } 18680aca0605SAndrew Oates 186920054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1870eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1871eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1872059368bcSRichard Henderson 18730aca0605SAndrew Oates #ifdef TARGET_X86_64 18740aca0605SAndrew Oates if (shift == 2) { 18750aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1876059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1877059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18780aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18790aca0605SAndrew Oates } else 18800aca0605SAndrew Oates #endif 18810aca0605SAndrew Oates if (shift == 1) { 1882059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1883059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1884eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18850bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18860bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18870bd385e7SPaolo Bonzini GETPC()); 1888059368bcSRichard Henderson pushl(&sa, val); 1889eaa728eeSbellard } 1890eaa728eeSbellard } else { 1891059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1892059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1893eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18940bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18950bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18960bd385e7SPaolo Bonzini GETPC()); 1897059368bcSRichard Henderson pushw(&sa, val); 1898eaa728eeSbellard } 1899eaa728eeSbellard } 1900eaa728eeSbellard new_stack = 1; 1901eaa728eeSbellard } else { 1902eaa728eeSbellard /* to same privilege */ 1903e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1904059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1905059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1906059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 190720054ef0SBlue Swirl /* push_size = (4 << shift); */ 1908eaa728eeSbellard new_stack = 0; 1909eaa728eeSbellard } 1910eaa728eeSbellard 19110aca0605SAndrew Oates #ifdef TARGET_X86_64 19120aca0605SAndrew Oates if (shift == 2) { 1913059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1914059368bcSRichard Henderson pushq(&sa, next_eip); 19150aca0605SAndrew Oates } else 19160aca0605SAndrew Oates #endif 19170aca0605SAndrew Oates if (shift == 1) { 1918059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1919059368bcSRichard Henderson pushl(&sa, next_eip); 1920eaa728eeSbellard } else { 1921059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1922059368bcSRichard Henderson pushw(&sa, next_eip); 1923eaa728eeSbellard } 1924eaa728eeSbellard 1925eaa728eeSbellard /* from this point, not restartable */ 1926eaa728eeSbellard 1927eaa728eeSbellard if (new_stack) { 19280aca0605SAndrew Oates #ifdef TARGET_X86_64 19290aca0605SAndrew Oates if (shift == 2) { 19300aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 19310aca0605SAndrew Oates } else 19320aca0605SAndrew Oates #endif 19330aca0605SAndrew Oates { 1934eaa728eeSbellard ss = (ss & ~3) | dpl; 1935eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1936059368bcSRichard Henderson sa.ss_base, 1937eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1938eaa728eeSbellard ss_e2); 1939eaa728eeSbellard } 19400aca0605SAndrew Oates } 1941eaa728eeSbellard 1942eaa728eeSbellard selector = (selector & ~3) | dpl; 1943eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1944eaa728eeSbellard get_seg_base(e1, e2), 1945eaa728eeSbellard get_seg_limit(e1, e2), 1946eaa728eeSbellard e2); 1947059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1948a78d0eabSliguang env->eip = offset; 1949eaa728eeSbellard } 1950eaa728eeSbellard } 1951eaa728eeSbellard 1952eaa728eeSbellard /* real and vm86 mode iret */ 19532999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1954eaa728eeSbellard { 1955059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1956eaa728eeSbellard int eflags_mask; 1957059368bcSRichard Henderson StackAccess sa; 1958eaa728eeSbellard 1959059368bcSRichard Henderson sa.env = env; 1960059368bcSRichard Henderson sa.ra = GETPC(); 19618053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1962059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1963059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1964059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1965059368bcSRichard Henderson 1966eaa728eeSbellard if (shift == 1) { 1967eaa728eeSbellard /* 32 bits */ 1968059368bcSRichard Henderson new_eip = popl(&sa); 1969059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1970059368bcSRichard Henderson new_eflags = popl(&sa); 1971eaa728eeSbellard } else { 1972eaa728eeSbellard /* 16 bits */ 1973059368bcSRichard Henderson new_eip = popw(&sa); 1974059368bcSRichard Henderson new_cs = popw(&sa); 1975059368bcSRichard Henderson new_eflags = popw(&sa); 1976eaa728eeSbellard } 1977059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1978bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1979bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1980eaa728eeSbellard env->eip = new_eip; 198120054ef0SBlue Swirl if (env->eflags & VM_MASK) { 198220054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 198320054ef0SBlue Swirl NT_MASK; 198420054ef0SBlue Swirl } else { 198520054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 198620054ef0SBlue Swirl RF_MASK | NT_MASK; 198720054ef0SBlue Swirl } 198820054ef0SBlue Swirl if (shift == 0) { 1989eaa728eeSbellard eflags_mask &= 0xffff; 199020054ef0SBlue Swirl } 1991997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1992db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1993eaa728eeSbellard } 1994eaa728eeSbellard 1995c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1996eaa728eeSbellard { 1997eaa728eeSbellard int dpl; 1998eaa728eeSbellard uint32_t e2; 1999eaa728eeSbellard 2000eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 2001eaa728eeSbellard they may still contain a valid base. I would be interested to 2002eaa728eeSbellard know how a real x86_64 CPU behaves */ 2003eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 200420054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 2005eaa728eeSbellard return; 200620054ef0SBlue Swirl } 2007eaa728eeSbellard 2008eaa728eeSbellard e2 = env->segs[seg_reg].flags; 2009eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2010eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 2011eaa728eeSbellard /* data or non conforming code segment */ 2012eaa728eeSbellard if (dpl < cpl) { 2013c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 2014c2ba0515SBin Meng env->segs[seg_reg].base, 2015c2ba0515SBin Meng env->segs[seg_reg].limit, 2016c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 2017eaa728eeSbellard } 2018eaa728eeSbellard } 2019eaa728eeSbellard } 2020eaa728eeSbellard 2021eaa728eeSbellard /* protected mode iret */ 20222999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 2023100ec099SPavel Dovgalyuk int is_iret, int addend, 2024100ec099SPavel Dovgalyuk uintptr_t retaddr) 2025eaa728eeSbellard { 2026eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 2027eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 2028eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 2029eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 2030059368bcSRichard Henderson target_ulong new_eip, new_esp; 2031059368bcSRichard Henderson StackAccess sa; 2032059368bcSRichard Henderson 20338053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 20348053862aSPaolo Bonzini 2035059368bcSRichard Henderson sa.env = env; 2036059368bcSRichard Henderson sa.ra = retaddr; 20378053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 2038eaa728eeSbellard 2039eaa728eeSbellard #ifdef TARGET_X86_64 204020054ef0SBlue Swirl if (shift == 2) { 2041059368bcSRichard Henderson sa.sp_mask = -1; 204220054ef0SBlue Swirl } else 2043eaa728eeSbellard #endif 204420054ef0SBlue Swirl { 2045059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 204620054ef0SBlue Swirl } 2047059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2048059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2049eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2050eaa728eeSbellard #ifdef TARGET_X86_64 2051eaa728eeSbellard if (shift == 2) { 2052059368bcSRichard Henderson new_eip = popq(&sa); 2053059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2054eaa728eeSbellard if (is_iret) { 2055059368bcSRichard Henderson new_eflags = popq(&sa); 2056eaa728eeSbellard } 2057eaa728eeSbellard } else 2058eaa728eeSbellard #endif 205920054ef0SBlue Swirl { 2060eaa728eeSbellard if (shift == 1) { 2061eaa728eeSbellard /* 32 bits */ 2062059368bcSRichard Henderson new_eip = popl(&sa); 2063059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2064eaa728eeSbellard if (is_iret) { 2065059368bcSRichard Henderson new_eflags = popl(&sa); 206620054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2067eaa728eeSbellard goto return_to_vm86; 2068eaa728eeSbellard } 206920054ef0SBlue Swirl } 2070eaa728eeSbellard } else { 2071eaa728eeSbellard /* 16 bits */ 2072059368bcSRichard Henderson new_eip = popw(&sa); 2073059368bcSRichard Henderson new_cs = popw(&sa); 207420054ef0SBlue Swirl if (is_iret) { 2075059368bcSRichard Henderson new_eflags = popw(&sa); 2076eaa728eeSbellard } 207720054ef0SBlue Swirl } 207820054ef0SBlue Swirl } 2079d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2080eaa728eeSbellard new_cs, new_eip, shift, addend); 20816aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 208220054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2083100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2084eaa728eeSbellard } 2085100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2086100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 208720054ef0SBlue Swirl } 208820054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 208920054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2090100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209120054ef0SBlue Swirl } 209220054ef0SBlue Swirl rpl = new_cs & 3; 209320054ef0SBlue Swirl if (rpl < cpl) { 2094100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 209520054ef0SBlue Swirl } 209620054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 209720054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 209820054ef0SBlue Swirl if (dpl > rpl) { 2099100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 210020054ef0SBlue Swirl } 210120054ef0SBlue Swirl } else { 210220054ef0SBlue Swirl if (dpl != rpl) { 2103100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 210420054ef0SBlue Swirl } 210520054ef0SBlue Swirl } 210620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2107100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 210820054ef0SBlue Swirl } 2109eaa728eeSbellard 2110059368bcSRichard Henderson sa.sp += addend; 2111eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2112eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 21131235fc06Sths /* return to same privilege level */ 2114eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2115eaa728eeSbellard get_seg_base(e1, e2), 2116eaa728eeSbellard get_seg_limit(e1, e2), 2117eaa728eeSbellard e2); 2118eaa728eeSbellard } else { 2119eaa728eeSbellard /* return to different privilege level */ 2120eaa728eeSbellard #ifdef TARGET_X86_64 2121eaa728eeSbellard if (shift == 2) { 2122059368bcSRichard Henderson new_esp = popq(&sa); 2123059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2124eaa728eeSbellard } else 2125eaa728eeSbellard #endif 212620054ef0SBlue Swirl { 2127eaa728eeSbellard if (shift == 1) { 2128eaa728eeSbellard /* 32 bits */ 2129059368bcSRichard Henderson new_esp = popl(&sa); 2130059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2131eaa728eeSbellard } else { 2132eaa728eeSbellard /* 16 bits */ 2133059368bcSRichard Henderson new_esp = popw(&sa); 2134059368bcSRichard Henderson new_ss = popw(&sa); 2135eaa728eeSbellard } 213620054ef0SBlue Swirl } 2137d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2138eaa728eeSbellard new_ss, new_esp); 2139eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2140eaa728eeSbellard #ifdef TARGET_X86_64 2141eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2142eaa728eeSbellard /* XXX: test CS64? */ 2143eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2144eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2145eaa728eeSbellard 0, 0xffffffff, 2146eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2147eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2148eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2149eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2150eaa728eeSbellard } else 2151eaa728eeSbellard #endif 2152eaa728eeSbellard { 2153100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2154eaa728eeSbellard } 2155eaa728eeSbellard } else { 215620054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2157100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 215820054ef0SBlue Swirl } 2159100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2160100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 216120054ef0SBlue Swirl } 2162eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2163eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 216420054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2165100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 216620054ef0SBlue Swirl } 2167eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 216820054ef0SBlue Swirl if (dpl != rpl) { 2169100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 217020054ef0SBlue Swirl } 217120054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2172100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 217320054ef0SBlue Swirl } 2174eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2175eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2176eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2177eaa728eeSbellard ss_e2); 2178eaa728eeSbellard } 2179eaa728eeSbellard 2180eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2181eaa728eeSbellard get_seg_base(e1, e2), 2182eaa728eeSbellard get_seg_limit(e1, e2), 2183eaa728eeSbellard e2); 2184059368bcSRichard Henderson sa.sp = new_esp; 2185eaa728eeSbellard #ifdef TARGET_X86_64 218620054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2187059368bcSRichard Henderson sa.sp_mask = -1; 218820054ef0SBlue Swirl } else 2189eaa728eeSbellard #endif 219020054ef0SBlue Swirl { 2191059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 219220054ef0SBlue Swirl } 2193eaa728eeSbellard 2194eaa728eeSbellard /* validate data segments */ 21952999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21962999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21972999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21982999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2199eaa728eeSbellard 2200059368bcSRichard Henderson sa.sp += addend; 2201eaa728eeSbellard } 2202059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2203eaa728eeSbellard env->eip = new_eip; 2204eaa728eeSbellard if (is_iret) { 2205eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2206eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 220720054ef0SBlue Swirl if (cpl == 0) { 2208eaa728eeSbellard eflags_mask |= IOPL_MASK; 220920054ef0SBlue Swirl } 2210eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 221120054ef0SBlue Swirl if (cpl <= iopl) { 2212eaa728eeSbellard eflags_mask |= IF_MASK; 221320054ef0SBlue Swirl } 221420054ef0SBlue Swirl if (shift == 0) { 2215eaa728eeSbellard eflags_mask &= 0xffff; 221620054ef0SBlue Swirl } 2217997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2218eaa728eeSbellard } 2219eaa728eeSbellard return; 2220eaa728eeSbellard 2221eaa728eeSbellard return_to_vm86: 2222059368bcSRichard Henderson new_esp = popl(&sa); 2223059368bcSRichard Henderson new_ss = popl(&sa); 2224059368bcSRichard Henderson new_es = popl(&sa); 2225059368bcSRichard Henderson new_ds = popl(&sa); 2226059368bcSRichard Henderson new_fs = popl(&sa); 2227059368bcSRichard Henderson new_gs = popl(&sa); 2228eaa728eeSbellard 2229eaa728eeSbellard /* modify processor state */ 2230997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2231997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2232997ff0d9SBlue Swirl VIP_MASK); 22332999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 22342999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 22352999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 22362999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 22372999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 22382999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2239eaa728eeSbellard 2240eaa728eeSbellard env->eip = new_eip & 0xffff; 224108b3ded6Sliguang env->regs[R_ESP] = new_esp; 2242eaa728eeSbellard } 2243eaa728eeSbellard 22442999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2245eaa728eeSbellard { 2246eaa728eeSbellard int tss_selector, type; 2247eaa728eeSbellard uint32_t e1, e2; 2248eaa728eeSbellard 2249eaa728eeSbellard /* specific case for TSS */ 2250eaa728eeSbellard if (env->eflags & NT_MASK) { 2251eaa728eeSbellard #ifdef TARGET_X86_64 225220054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2253100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 225420054ef0SBlue Swirl } 2255eaa728eeSbellard #endif 2256100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 225720054ef0SBlue Swirl if (tss_selector & 4) { 2258100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 225920054ef0SBlue Swirl } 2260100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2261100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 226220054ef0SBlue Swirl } 2263eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2264eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 226520054ef0SBlue Swirl if (type != 3) { 2266100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 226720054ef0SBlue Swirl } 2268*c7c33283SPaolo Bonzini switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, 2269*c7c33283SPaolo Bonzini false, 0, GETPC()); 2270eaa728eeSbellard } else { 2271100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2272eaa728eeSbellard } 2273db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2274eaa728eeSbellard } 2275eaa728eeSbellard 22762999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2277eaa728eeSbellard { 2278100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2279eaa728eeSbellard } 2280eaa728eeSbellard 22812999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2282eaa728eeSbellard { 2283eaa728eeSbellard if (env->sysenter_cs == 0) { 2284100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2285eaa728eeSbellard } 2286eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22872436b61aSbalrog 22882436b61aSbalrog #ifdef TARGET_X86_64 22892436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22902436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22912436b61aSbalrog 0, 0xffffffff, 22922436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22932436b61aSbalrog DESC_S_MASK | 229420054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 229520054ef0SBlue Swirl DESC_L_MASK); 22962436b61aSbalrog } else 22972436b61aSbalrog #endif 22982436b61aSbalrog { 2299eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2300eaa728eeSbellard 0, 0xffffffff, 2301eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2302eaa728eeSbellard DESC_S_MASK | 2303eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 23042436b61aSbalrog } 2305eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2306eaa728eeSbellard 0, 0xffffffff, 2307eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2308eaa728eeSbellard DESC_S_MASK | 2309eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 231008b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2311a78d0eabSliguang env->eip = env->sysenter_eip; 2312eaa728eeSbellard } 2313eaa728eeSbellard 23142999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2315eaa728eeSbellard { 2316eaa728eeSbellard int cpl; 2317eaa728eeSbellard 2318eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2319eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2320100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2321eaa728eeSbellard } 23222436b61aSbalrog #ifdef TARGET_X86_64 23232436b61aSbalrog if (dflag == 2) { 232420054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 232520054ef0SBlue Swirl 3, 0, 0xffffffff, 23262436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23272436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 232820054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 232920054ef0SBlue Swirl DESC_L_MASK); 233020054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 233120054ef0SBlue Swirl 3, 0, 0xffffffff, 23322436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23332436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 23342436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 23352436b61aSbalrog } else 23362436b61aSbalrog #endif 23372436b61aSbalrog { 233820054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 233920054ef0SBlue Swirl 3, 0, 0xffffffff, 2340eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2341eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2342eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 234320054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 234420054ef0SBlue Swirl 3, 0, 0xffffffff, 2345eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2346eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2347eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23482436b61aSbalrog } 234908b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2350a78d0eabSliguang env->eip = env->regs[R_EDX]; 2351eaa728eeSbellard } 2352eaa728eeSbellard 23532999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2354eaa728eeSbellard { 2355eaa728eeSbellard unsigned int limit; 2356ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2357eaa728eeSbellard int rpl, dpl, cpl, type; 2358eaa728eeSbellard 2359eaa728eeSbellard selector = selector1 & 0xffff; 2360ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 236120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2362dc1ded53Saliguori goto fail; 236320054ef0SBlue Swirl } 2364100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2365eaa728eeSbellard goto fail; 236620054ef0SBlue Swirl } 2367eaa728eeSbellard rpl = selector & 3; 2368eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2369eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2370eaa728eeSbellard if (e2 & DESC_S_MASK) { 2371eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2372eaa728eeSbellard /* conforming */ 2373eaa728eeSbellard } else { 237420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2375eaa728eeSbellard goto fail; 2376eaa728eeSbellard } 237720054ef0SBlue Swirl } 2378eaa728eeSbellard } else { 2379eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2380eaa728eeSbellard switch (type) { 2381eaa728eeSbellard case 1: 2382eaa728eeSbellard case 2: 2383eaa728eeSbellard case 3: 2384eaa728eeSbellard case 9: 2385eaa728eeSbellard case 11: 2386eaa728eeSbellard break; 2387eaa728eeSbellard default: 2388eaa728eeSbellard goto fail; 2389eaa728eeSbellard } 2390eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2391eaa728eeSbellard fail: 2392ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2393eaa728eeSbellard return 0; 2394eaa728eeSbellard } 2395eaa728eeSbellard } 2396eaa728eeSbellard limit = get_seg_limit(e1, e2); 2397ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2398eaa728eeSbellard return limit; 2399eaa728eeSbellard } 2400eaa728eeSbellard 24012999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2402eaa728eeSbellard { 2403ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2404eaa728eeSbellard int rpl, dpl, cpl, type; 2405eaa728eeSbellard 2406eaa728eeSbellard selector = selector1 & 0xffff; 2407ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 240820054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2409eaa728eeSbellard goto fail; 241020054ef0SBlue Swirl } 2411100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2412eaa728eeSbellard goto fail; 241320054ef0SBlue Swirl } 2414eaa728eeSbellard rpl = selector & 3; 2415eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2416eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2417eaa728eeSbellard if (e2 & DESC_S_MASK) { 2418eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2419eaa728eeSbellard /* conforming */ 2420eaa728eeSbellard } else { 242120054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2422eaa728eeSbellard goto fail; 2423eaa728eeSbellard } 242420054ef0SBlue Swirl } 2425eaa728eeSbellard } else { 2426eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2427eaa728eeSbellard switch (type) { 2428eaa728eeSbellard case 1: 2429eaa728eeSbellard case 2: 2430eaa728eeSbellard case 3: 2431eaa728eeSbellard case 4: 2432eaa728eeSbellard case 5: 2433eaa728eeSbellard case 9: 2434eaa728eeSbellard case 11: 2435eaa728eeSbellard case 12: 2436eaa728eeSbellard break; 2437eaa728eeSbellard default: 2438eaa728eeSbellard goto fail; 2439eaa728eeSbellard } 2440eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2441eaa728eeSbellard fail: 2442ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2443eaa728eeSbellard return 0; 2444eaa728eeSbellard } 2445eaa728eeSbellard } 2446ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2447eaa728eeSbellard return e2 & 0x00f0ff00; 2448eaa728eeSbellard } 2449eaa728eeSbellard 24502999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2451eaa728eeSbellard { 2452eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2453eaa728eeSbellard int rpl, dpl, cpl; 2454eaa728eeSbellard 2455eaa728eeSbellard selector = selector1 & 0xffff; 2456abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 245720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2458eaa728eeSbellard goto fail; 245920054ef0SBlue Swirl } 2460100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2461eaa728eeSbellard goto fail; 246220054ef0SBlue Swirl } 246320054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2464eaa728eeSbellard goto fail; 246520054ef0SBlue Swirl } 2466eaa728eeSbellard rpl = selector & 3; 2467eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2468eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2469eaa728eeSbellard if (e2 & DESC_CS_MASK) { 247020054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2471eaa728eeSbellard goto fail; 247220054ef0SBlue Swirl } 2473eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 247420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2475eaa728eeSbellard goto fail; 2476eaa728eeSbellard } 247720054ef0SBlue Swirl } 2478eaa728eeSbellard } else { 2479eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2480eaa728eeSbellard fail: 2481abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2482eaa728eeSbellard } 2483eaa728eeSbellard } 2484abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2485abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2486eaa728eeSbellard } 2487eaa728eeSbellard 24882999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2489eaa728eeSbellard { 2490eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2491eaa728eeSbellard int rpl, dpl, cpl; 2492eaa728eeSbellard 2493eaa728eeSbellard selector = selector1 & 0xffff; 2494abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 249520054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2496eaa728eeSbellard goto fail; 249720054ef0SBlue Swirl } 2498100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2499eaa728eeSbellard goto fail; 250020054ef0SBlue Swirl } 250120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2502eaa728eeSbellard goto fail; 250320054ef0SBlue Swirl } 2504eaa728eeSbellard rpl = selector & 3; 2505eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2506eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2507eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2508eaa728eeSbellard goto fail; 2509eaa728eeSbellard } else { 251020054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2511eaa728eeSbellard goto fail; 251220054ef0SBlue Swirl } 2513eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2514eaa728eeSbellard fail: 2515abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2516eaa728eeSbellard } 2517eaa728eeSbellard } 2518abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2519abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2520eaa728eeSbellard } 2521