1eaa728eeSbellard /* 210774999SBlue Swirl * x86 segmentation related helpers: 310774999SBlue Swirl * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4eaa728eeSbellard * 5eaa728eeSbellard * Copyright (c) 2003 Fabrice Bellard 6eaa728eeSbellard * 7eaa728eeSbellard * This library is free software; you can redistribute it and/or 8eaa728eeSbellard * modify it under the terms of the GNU Lesser General Public 9eaa728eeSbellard * License as published by the Free Software Foundation; either 10d9ff33adSChetan Pant * version 2.1 of the License, or (at your option) any later version. 11eaa728eeSbellard * 12eaa728eeSbellard * This library is distributed in the hope that it will be useful, 13eaa728eeSbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 14eaa728eeSbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15eaa728eeSbellard * Lesser General Public License for more details. 16eaa728eeSbellard * 17eaa728eeSbellard * You should have received a copy of the GNU Lesser General Public 188167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19eaa728eeSbellard */ 2083dae095SPaolo Bonzini 21b6a0aa05SPeter Maydell #include "qemu/osdep.h" 223e457172SBlue Swirl #include "cpu.h" 231de7afc9SPaolo Bonzini #include "qemu/log.h" 242ef6175aSRichard Henderson #include "exec/helper-proto.h" 2563c91552SPaolo Bonzini #include "exec/exec-all.h" 26f08b6170SPaolo Bonzini #include "exec/cpu_ldst.h" 27508127e2SPaolo Bonzini #include "exec/log.h" 28ed69e831SClaudio Fontana #include "helper-tcg.h" 2930493a03SClaudio Fontana #include "seg_helper.h" 308b131065SPaolo Bonzini #include "access.h" 318a201bd4SPaolo Bonzini 32059368bcSRichard Henderson #ifdef TARGET_X86_64 33059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 34059368bcSRichard Henderson do { \ 35059368bcSRichard Henderson if ((sp_mask) == 0xffff) { \ 36059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 37059368bcSRichard Henderson ((val) & 0xffff); \ 38059368bcSRichard Henderson } else if ((sp_mask) == 0xffffffffLL) { \ 39059368bcSRichard Henderson env->regs[R_ESP] = (uint32_t)(val); \ 40059368bcSRichard Henderson } else { \ 41059368bcSRichard Henderson env->regs[R_ESP] = (val); \ 42059368bcSRichard Henderson } \ 43059368bcSRichard Henderson } while (0) 44059368bcSRichard Henderson #else 45059368bcSRichard Henderson #define SET_ESP(val, sp_mask) \ 46059368bcSRichard Henderson do { \ 47059368bcSRichard Henderson env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 48059368bcSRichard Henderson ((val) & (sp_mask)); \ 49059368bcSRichard Henderson } while (0) 50059368bcSRichard Henderson #endif 51059368bcSRichard Henderson 52059368bcSRichard Henderson /* XXX: use mmu_index to have proper DPL support */ 53059368bcSRichard Henderson typedef struct StackAccess 54059368bcSRichard Henderson { 55059368bcSRichard Henderson CPUX86State *env; 56059368bcSRichard Henderson uintptr_t ra; 57059368bcSRichard Henderson target_ulong ss_base; 58059368bcSRichard Henderson target_ulong sp; 59059368bcSRichard Henderson target_ulong sp_mask; 608053862aSPaolo Bonzini int mmu_index; 61059368bcSRichard Henderson } StackAccess; 62059368bcSRichard Henderson 63059368bcSRichard Henderson static void pushw(StackAccess *sa, uint16_t val) 64059368bcSRichard Henderson { 65059368bcSRichard Henderson sa->sp -= 2; 668053862aSPaolo Bonzini cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 678053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 68059368bcSRichard Henderson } 69059368bcSRichard Henderson 70059368bcSRichard Henderson static void pushl(StackAccess *sa, uint32_t val) 71059368bcSRichard Henderson { 72059368bcSRichard Henderson sa->sp -= 4; 738053862aSPaolo Bonzini cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 748053862aSPaolo Bonzini val, sa->mmu_index, sa->ra); 75059368bcSRichard Henderson } 76059368bcSRichard Henderson 77059368bcSRichard Henderson static uint16_t popw(StackAccess *sa) 78059368bcSRichard Henderson { 798053862aSPaolo Bonzini uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 80059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 818053862aSPaolo Bonzini sa->mmu_index, sa->ra); 82059368bcSRichard Henderson sa->sp += 2; 83059368bcSRichard Henderson return ret; 84059368bcSRichard Henderson } 85059368bcSRichard Henderson 86059368bcSRichard Henderson static uint32_t popl(StackAccess *sa) 87059368bcSRichard Henderson { 888053862aSPaolo Bonzini uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 89059368bcSRichard Henderson sa->ss_base + (sa->sp & sa->sp_mask), 908053862aSPaolo Bonzini sa->mmu_index, sa->ra); 91059368bcSRichard Henderson sa->sp += 4; 92059368bcSRichard Henderson return ret; 93059368bcSRichard Henderson } 94059368bcSRichard Henderson 9550fcc7cbSGareth Webb int get_pg_mode(CPUX86State *env) 9650fcc7cbSGareth Webb { 97*8fa11a4dSAlexander Graf int pg_mode = PG_MODE_PG; 9850fcc7cbSGareth Webb if (!(env->cr[0] & CR0_PG_MASK)) { 9950fcc7cbSGareth Webb return 0; 10050fcc7cbSGareth Webb } 10150fcc7cbSGareth Webb if (env->cr[0] & CR0_WP_MASK) { 10250fcc7cbSGareth Webb pg_mode |= PG_MODE_WP; 10350fcc7cbSGareth Webb } 10450fcc7cbSGareth Webb if (env->cr[4] & CR4_PAE_MASK) { 10550fcc7cbSGareth Webb pg_mode |= PG_MODE_PAE; 10650fcc7cbSGareth Webb if (env->efer & MSR_EFER_NXE) { 10750fcc7cbSGareth Webb pg_mode |= PG_MODE_NXE; 10850fcc7cbSGareth Webb } 10950fcc7cbSGareth Webb } 11050fcc7cbSGareth Webb if (env->cr[4] & CR4_PSE_MASK) { 11150fcc7cbSGareth Webb pg_mode |= PG_MODE_PSE; 11250fcc7cbSGareth Webb } 11350fcc7cbSGareth Webb if (env->cr[4] & CR4_SMEP_MASK) { 11450fcc7cbSGareth Webb pg_mode |= PG_MODE_SMEP; 11550fcc7cbSGareth Webb } 11650fcc7cbSGareth Webb if (env->hflags & HF_LMA_MASK) { 11750fcc7cbSGareth Webb pg_mode |= PG_MODE_LMA; 11850fcc7cbSGareth Webb if (env->cr[4] & CR4_PKE_MASK) { 11950fcc7cbSGareth Webb pg_mode |= PG_MODE_PKE; 12050fcc7cbSGareth Webb } 12150fcc7cbSGareth Webb if (env->cr[4] & CR4_PKS_MASK) { 12250fcc7cbSGareth Webb pg_mode |= PG_MODE_PKS; 12350fcc7cbSGareth Webb } 12450fcc7cbSGareth Webb if (env->cr[4] & CR4_LA57_MASK) { 12550fcc7cbSGareth Webb pg_mode |= PG_MODE_LA57; 12650fcc7cbSGareth Webb } 12750fcc7cbSGareth Webb } 12850fcc7cbSGareth Webb return pg_mode; 12950fcc7cbSGareth Webb } 13050fcc7cbSGareth Webb 131eaa728eeSbellard /* return non zero if error */ 132100ec099SPavel Dovgalyuk static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 133100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector, 134100ec099SPavel Dovgalyuk uintptr_t retaddr) 135eaa728eeSbellard { 136eaa728eeSbellard SegmentCache *dt; 137eaa728eeSbellard int index; 138eaa728eeSbellard target_ulong ptr; 139eaa728eeSbellard 14020054ef0SBlue Swirl if (selector & 0x4) { 141eaa728eeSbellard dt = &env->ldt; 14220054ef0SBlue Swirl } else { 143eaa728eeSbellard dt = &env->gdt; 14420054ef0SBlue Swirl } 145eaa728eeSbellard index = selector & ~7; 14620054ef0SBlue Swirl if ((index + 7) > dt->limit) { 147eaa728eeSbellard return -1; 14820054ef0SBlue Swirl } 149eaa728eeSbellard ptr = dt->base + index; 150100ec099SPavel Dovgalyuk *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 151100ec099SPavel Dovgalyuk *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 152eaa728eeSbellard return 0; 153eaa728eeSbellard } 154eaa728eeSbellard 155100ec099SPavel Dovgalyuk static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 156100ec099SPavel Dovgalyuk uint32_t *e2_ptr, int selector) 157100ec099SPavel Dovgalyuk { 158100ec099SPavel Dovgalyuk return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 159100ec099SPavel Dovgalyuk } 160100ec099SPavel Dovgalyuk 161eaa728eeSbellard static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 162eaa728eeSbellard { 163eaa728eeSbellard unsigned int limit; 16420054ef0SBlue Swirl 165eaa728eeSbellard limit = (e1 & 0xffff) | (e2 & 0x000f0000); 16620054ef0SBlue Swirl if (e2 & DESC_G_MASK) { 167eaa728eeSbellard limit = (limit << 12) | 0xfff; 16820054ef0SBlue Swirl } 169eaa728eeSbellard return limit; 170eaa728eeSbellard } 171eaa728eeSbellard 172eaa728eeSbellard static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 173eaa728eeSbellard { 17420054ef0SBlue Swirl return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 175eaa728eeSbellard } 176eaa728eeSbellard 17720054ef0SBlue Swirl static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 17820054ef0SBlue Swirl uint32_t e2) 179eaa728eeSbellard { 180eaa728eeSbellard sc->base = get_seg_base(e1, e2); 181eaa728eeSbellard sc->limit = get_seg_limit(e1, e2); 182eaa728eeSbellard sc->flags = e2; 183eaa728eeSbellard } 184eaa728eeSbellard 185eaa728eeSbellard /* init the segment cache in vm86 mode. */ 1862999a0b2SBlue Swirl static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 187eaa728eeSbellard { 188eaa728eeSbellard selector &= 0xffff; 189b98dbc90SPaolo Bonzini 190b98dbc90SPaolo Bonzini cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 191b98dbc90SPaolo Bonzini DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 192b98dbc90SPaolo Bonzini DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 193eaa728eeSbellard } 194eaa728eeSbellard 1952999a0b2SBlue Swirl static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 196100ec099SPavel Dovgalyuk uint32_t *esp_ptr, int dpl, 197100ec099SPavel Dovgalyuk uintptr_t retaddr) 198eaa728eeSbellard { 1996aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 200eaa728eeSbellard int type, index, shift; 201eaa728eeSbellard 202eaa728eeSbellard #if 0 203eaa728eeSbellard { 204eaa728eeSbellard int i; 205eaa728eeSbellard printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 206eaa728eeSbellard for (i = 0; i < env->tr.limit; i++) { 207eaa728eeSbellard printf("%02x ", env->tr.base[i]); 20820054ef0SBlue Swirl if ((i & 7) == 7) { 20920054ef0SBlue Swirl printf("\n"); 21020054ef0SBlue Swirl } 211eaa728eeSbellard } 212eaa728eeSbellard printf("\n"); 213eaa728eeSbellard } 214eaa728eeSbellard #endif 215eaa728eeSbellard 21620054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 217a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 21820054ef0SBlue Swirl } 219eaa728eeSbellard type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 22020054ef0SBlue Swirl if ((type & 7) != 1) { 221a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss type"); 22220054ef0SBlue Swirl } 223eaa728eeSbellard shift = type >> 3; 224eaa728eeSbellard index = (dpl * 4 + 2) << shift; 22520054ef0SBlue Swirl if (index + (4 << shift) - 1 > env->tr.limit) { 226100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 22720054ef0SBlue Swirl } 228eaa728eeSbellard if (shift == 0) { 229100ec099SPavel Dovgalyuk *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 230100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 231eaa728eeSbellard } else { 232100ec099SPavel Dovgalyuk *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 233100ec099SPavel Dovgalyuk *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 234eaa728eeSbellard } 235eaa728eeSbellard } 236eaa728eeSbellard 237c117e5b1SPhilippe Mathieu-Daudé static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 238c117e5b1SPhilippe Mathieu-Daudé int cpl, uintptr_t retaddr) 239eaa728eeSbellard { 240eaa728eeSbellard uint32_t e1, e2; 241d3b54918SPaolo Bonzini int rpl, dpl; 242eaa728eeSbellard 243eaa728eeSbellard if ((selector & 0xfffc) != 0) { 244100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 245100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24620054ef0SBlue Swirl } 24720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 248100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 24920054ef0SBlue Swirl } 250eaa728eeSbellard rpl = selector & 3; 251eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 252eaa728eeSbellard if (seg_reg == R_CS) { 25320054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 254100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25520054ef0SBlue Swirl } 25620054ef0SBlue Swirl if (dpl != rpl) { 257100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 25820054ef0SBlue Swirl } 259eaa728eeSbellard } else if (seg_reg == R_SS) { 260eaa728eeSbellard /* SS must be writable data */ 26120054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26320054ef0SBlue Swirl } 26420054ef0SBlue Swirl if (dpl != cpl || dpl != rpl) { 265100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 26620054ef0SBlue Swirl } 267eaa728eeSbellard } else { 268eaa728eeSbellard /* not readable code */ 26920054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 270100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 27120054ef0SBlue Swirl } 272eaa728eeSbellard /* if data or non conforming code, checks the rights */ 273eaa728eeSbellard if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 27420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 275100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 276eaa728eeSbellard } 277eaa728eeSbellard } 27820054ef0SBlue Swirl } 27920054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 280100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 28120054ef0SBlue Swirl } 282eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 283eaa728eeSbellard get_seg_base(e1, e2), 284eaa728eeSbellard get_seg_limit(e1, e2), 285eaa728eeSbellard e2); 286eaa728eeSbellard } else { 28720054ef0SBlue Swirl if (seg_reg == R_SS || seg_reg == R_CS) { 288100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 289eaa728eeSbellard } 290eaa728eeSbellard } 29120054ef0SBlue Swirl } 292eaa728eeSbellard 293a9089859SPaolo Bonzini static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 294a9089859SPaolo Bonzini uintptr_t retaddr) 295a9089859SPaolo Bonzini { 296c35b2fb1SPaolo Bonzini target_ulong ptr = env->gdt.base + (tss_selector & ~7); 297a9089859SPaolo Bonzini uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 298a9089859SPaolo Bonzini 299a9089859SPaolo Bonzini if (value) { 300a9089859SPaolo Bonzini e2 |= DESC_TSS_BUSY_MASK; 301a9089859SPaolo Bonzini } else { 302a9089859SPaolo Bonzini e2 &= ~DESC_TSS_BUSY_MASK; 303a9089859SPaolo Bonzini } 304a9089859SPaolo Bonzini 305a9089859SPaolo Bonzini cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 306a9089859SPaolo Bonzini } 307a9089859SPaolo Bonzini 308eaa728eeSbellard #define SWITCH_TSS_JMP 0 309eaa728eeSbellard #define SWITCH_TSS_IRET 1 310eaa728eeSbellard #define SWITCH_TSS_CALL 2 311eaa728eeSbellard 31249958057SPaolo Bonzini /* return 0 if switching to a 16-bit selector */ 31349958057SPaolo Bonzini static int switch_tss_ra(CPUX86State *env, int tss_selector, 314eaa728eeSbellard uint32_t e1, uint32_t e2, int source, 315100ec099SPavel Dovgalyuk uint32_t next_eip, uintptr_t retaddr) 316eaa728eeSbellard { 3178b131065SPaolo Bonzini int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 318eaa728eeSbellard target_ulong tss_base; 319eaa728eeSbellard uint32_t new_regs[8], new_segs[6]; 320eaa728eeSbellard uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 321eaa728eeSbellard uint32_t old_eflags, eflags_mask; 322eaa728eeSbellard SegmentCache *dt; 3238b131065SPaolo Bonzini int mmu_index, index; 324eaa728eeSbellard target_ulong ptr; 3258b131065SPaolo Bonzini X86Access old, new; 326eaa728eeSbellard 327eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 32820054ef0SBlue Swirl LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 32920054ef0SBlue Swirl source); 330eaa728eeSbellard 331eaa728eeSbellard /* if task gate, we read the TSS segment and we load it */ 332eaa728eeSbellard if (type == 5) { 33320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 334100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 33520054ef0SBlue Swirl } 336eaa728eeSbellard tss_selector = e1 >> 16; 33720054ef0SBlue Swirl if (tss_selector & 4) { 338100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 33920054ef0SBlue Swirl } 340100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 341100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 342eaa728eeSbellard } 34320054ef0SBlue Swirl if (e2 & DESC_S_MASK) { 344100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34520054ef0SBlue Swirl } 34620054ef0SBlue Swirl type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 34720054ef0SBlue Swirl if ((type & 7) != 1) { 348100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 34920054ef0SBlue Swirl } 35020054ef0SBlue Swirl } 351eaa728eeSbellard 35220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 353100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 35420054ef0SBlue Swirl } 355eaa728eeSbellard 35620054ef0SBlue Swirl if (type & 8) { 357eaa728eeSbellard tss_limit_max = 103; 35820054ef0SBlue Swirl } else { 359eaa728eeSbellard tss_limit_max = 43; 36020054ef0SBlue Swirl } 361eaa728eeSbellard tss_limit = get_seg_limit(e1, e2); 362eaa728eeSbellard tss_base = get_seg_base(e1, e2); 363eaa728eeSbellard if ((tss_selector & 4) != 0 || 36420054ef0SBlue Swirl tss_limit < tss_limit_max) { 365100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 36620054ef0SBlue Swirl } 367eaa728eeSbellard old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 36820054ef0SBlue Swirl if (old_type & 8) { 369eaa728eeSbellard old_tss_limit_max = 103; 37020054ef0SBlue Swirl } else { 371eaa728eeSbellard old_tss_limit_max = 43; 37220054ef0SBlue Swirl } 373eaa728eeSbellard 37405d41bbcSPaolo Bonzini /* new TSS must be busy iff the source is an IRET instruction */ 37505d41bbcSPaolo Bonzini if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 37605d41bbcSPaolo Bonzini raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 37705d41bbcSPaolo Bonzini } 37805d41bbcSPaolo Bonzini 3798b131065SPaolo Bonzini /* X86Access avoids memory exceptions during the task switch */ 3808b131065SPaolo Bonzini mmu_index = cpu_mmu_index_kernel(env); 381ded1db48SRichard Henderson access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1, 3828b131065SPaolo Bonzini MMU_DATA_STORE, mmu_index, retaddr); 3838b131065SPaolo Bonzini 3848b131065SPaolo Bonzini if (source == SWITCH_TSS_CALL) { 3858b131065SPaolo Bonzini /* Probe for future write of parent task */ 3868b131065SPaolo Bonzini probe_access(env, tss_base, 2, MMU_DATA_STORE, 3878b131065SPaolo Bonzini mmu_index, retaddr); 3888b131065SPaolo Bonzini } 389ded1db48SRichard Henderson /* While true tss_limit may be larger, we don't access the iopb here. */ 390ded1db48SRichard Henderson access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1, 3918b131065SPaolo Bonzini MMU_DATA_LOAD, mmu_index, retaddr); 3928b131065SPaolo Bonzini 3936a079f2eSPaolo Bonzini /* save the current state in the old TSS */ 3946a079f2eSPaolo Bonzini old_eflags = cpu_compute_eflags(env); 3956a079f2eSPaolo Bonzini if (old_type & 8) { 3966a079f2eSPaolo Bonzini /* 32 bit */ 3976a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x20, next_eip); 3986a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 3996a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 4006a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 4016a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 4026a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 4036a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 4046a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 4056a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 4066a079f2eSPaolo Bonzini access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 4076a079f2eSPaolo Bonzini for (i = 0; i < 6; i++) { 4086a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x48 + i * 4), 4096a079f2eSPaolo Bonzini env->segs[i].selector); 4106a079f2eSPaolo Bonzini } 4116a079f2eSPaolo Bonzini } else { 4126a079f2eSPaolo Bonzini /* 16 bit */ 4136a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x0e, next_eip); 4146a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 4156a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 4166a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 4176a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 4186a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 4196a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 4206a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 4216a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 4226a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 4236a079f2eSPaolo Bonzini for (i = 0; i < 4; i++) { 4246a079f2eSPaolo Bonzini access_stw(&old, env->tr.base + (0x22 + i * 2), 4256a079f2eSPaolo Bonzini env->segs[i].selector); 4266a079f2eSPaolo Bonzini } 4276a079f2eSPaolo Bonzini } 4286a079f2eSPaolo Bonzini 429eaa728eeSbellard /* read all the registers from the new TSS */ 430eaa728eeSbellard if (type & 8) { 431eaa728eeSbellard /* 32 bit */ 4328b131065SPaolo Bonzini new_cr3 = access_ldl(&new, tss_base + 0x1c); 4338b131065SPaolo Bonzini new_eip = access_ldl(&new, tss_base + 0x20); 4348b131065SPaolo Bonzini new_eflags = access_ldl(&new, tss_base + 0x24); 43520054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4368b131065SPaolo Bonzini new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 43720054ef0SBlue Swirl } 43820054ef0SBlue Swirl for (i = 0; i < 6; i++) { 4398b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 44020054ef0SBlue Swirl } 4418b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x60); 4428b131065SPaolo Bonzini new_trap = access_ldl(&new, tss_base + 0x64); 443eaa728eeSbellard } else { 444eaa728eeSbellard /* 16 bit */ 445eaa728eeSbellard new_cr3 = 0; 4468b131065SPaolo Bonzini new_eip = access_ldw(&new, tss_base + 0x0e); 4478b131065SPaolo Bonzini new_eflags = access_ldw(&new, tss_base + 0x10); 44820054ef0SBlue Swirl for (i = 0; i < 8; i++) { 4498b131065SPaolo Bonzini new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 45020054ef0SBlue Swirl } 45120054ef0SBlue Swirl for (i = 0; i < 4; i++) { 4528b131065SPaolo Bonzini new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 45320054ef0SBlue Swirl } 4548b131065SPaolo Bonzini new_ldt = access_ldw(&new, tss_base + 0x2a); 455eaa728eeSbellard new_segs[R_FS] = 0; 456eaa728eeSbellard new_segs[R_GS] = 0; 457eaa728eeSbellard new_trap = 0; 458eaa728eeSbellard } 4594581cbcdSBlue Swirl /* XXX: avoid a compiler warning, see 4604581cbcdSBlue Swirl http://support.amd.com/us/Processor_TechDocs/24593.pdf 4614581cbcdSBlue Swirl chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 4624581cbcdSBlue Swirl (void)new_trap; 463eaa728eeSbellard 464eaa728eeSbellard /* clear busy bit (it is restartable) */ 465eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 466a9089859SPaolo Bonzini tss_set_busy(env, env->tr.selector, 0, retaddr); 467eaa728eeSbellard } 4686a079f2eSPaolo Bonzini 46920054ef0SBlue Swirl if (source == SWITCH_TSS_IRET) { 470eaa728eeSbellard old_eflags &= ~NT_MASK; 4711b627f38SPaolo Bonzini if (old_type & 8) { 4728b131065SPaolo Bonzini access_stl(&old, env->tr.base + 0x24, old_eflags); 473eaa728eeSbellard } else { 4748b131065SPaolo Bonzini access_stw(&old, env->tr.base + 0x10, old_eflags); 475eaa728eeSbellard } 47620054ef0SBlue Swirl } 477eaa728eeSbellard 478eaa728eeSbellard if (source == SWITCH_TSS_CALL) { 4798b131065SPaolo Bonzini /* 4808b131065SPaolo Bonzini * Thanks to the probe_access above, we know the first two 4818b131065SPaolo Bonzini * bytes addressed by &new are writable too. 4828b131065SPaolo Bonzini */ 4838b131065SPaolo Bonzini access_stw(&new, tss_base, env->tr.selector); 484eaa728eeSbellard new_eflags |= NT_MASK; 485eaa728eeSbellard } 486eaa728eeSbellard 487eaa728eeSbellard /* set busy bit */ 488eaa728eeSbellard if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 489a9089859SPaolo Bonzini tss_set_busy(env, tss_selector, 1, retaddr); 490eaa728eeSbellard } 491eaa728eeSbellard 492eaa728eeSbellard /* set the new CPU state */ 4936a079f2eSPaolo Bonzini 4946a079f2eSPaolo Bonzini /* now if an exception occurs, it will occur in the next task context */ 4956a079f2eSPaolo Bonzini 496eaa728eeSbellard env->cr[0] |= CR0_TS_MASK; 497eaa728eeSbellard env->hflags |= HF_TS_MASK; 498eaa728eeSbellard env->tr.selector = tss_selector; 499eaa728eeSbellard env->tr.base = tss_base; 500eaa728eeSbellard env->tr.limit = tss_limit; 501eaa728eeSbellard env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 502eaa728eeSbellard 503eaa728eeSbellard if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 504eaa728eeSbellard cpu_x86_update_cr3(env, new_cr3); 505eaa728eeSbellard } 506eaa728eeSbellard 507eaa728eeSbellard /* load all registers without an exception, then reload them with 508eaa728eeSbellard possible exception */ 509eaa728eeSbellard env->eip = new_eip; 510eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | 511eaa728eeSbellard IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 512a5505f6bSPaolo Bonzini if (type & 8) { 513997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 514a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 515a5505f6bSPaolo Bonzini env->regs[i] = new_regs[i]; 516a5505f6bSPaolo Bonzini } 517a5505f6bSPaolo Bonzini } else { 518a5505f6bSPaolo Bonzini cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 519a5505f6bSPaolo Bonzini for (i = 0; i < 8; i++) { 520a5505f6bSPaolo Bonzini env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 521a5505f6bSPaolo Bonzini } 522a5505f6bSPaolo Bonzini } 523eaa728eeSbellard if (new_eflags & VM_MASK) { 52420054ef0SBlue Swirl for (i = 0; i < 6; i++) { 5252999a0b2SBlue Swirl load_seg_vm(env, i, new_segs[i]); 52620054ef0SBlue Swirl } 527eaa728eeSbellard } else { 528eaa728eeSbellard /* first just selectors as the rest may trigger exceptions */ 52920054ef0SBlue Swirl for (i = 0; i < 6; i++) { 530eaa728eeSbellard cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 531eaa728eeSbellard } 53220054ef0SBlue Swirl } 533eaa728eeSbellard 534eaa728eeSbellard env->ldt.selector = new_ldt & ~4; 535eaa728eeSbellard env->ldt.base = 0; 536eaa728eeSbellard env->ldt.limit = 0; 537eaa728eeSbellard env->ldt.flags = 0; 538eaa728eeSbellard 539eaa728eeSbellard /* load the LDT */ 54020054ef0SBlue Swirl if (new_ldt & 4) { 541100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54220054ef0SBlue Swirl } 543eaa728eeSbellard 544eaa728eeSbellard if ((new_ldt & 0xfffc) != 0) { 545eaa728eeSbellard dt = &env->gdt; 546eaa728eeSbellard index = new_ldt & ~7; 54720054ef0SBlue Swirl if ((index + 7) > dt->limit) { 548100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 54920054ef0SBlue Swirl } 550eaa728eeSbellard ptr = dt->base + index; 551100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 552100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 55320054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 554100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55520054ef0SBlue Swirl } 55620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 557100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 55820054ef0SBlue Swirl } 559eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 560eaa728eeSbellard } 561eaa728eeSbellard 562eaa728eeSbellard /* load the segments */ 563eaa728eeSbellard if (!(new_eflags & VM_MASK)) { 564d3b54918SPaolo Bonzini int cpl = new_segs[R_CS] & 3; 565100ec099SPavel Dovgalyuk tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 566100ec099SPavel Dovgalyuk tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 567100ec099SPavel Dovgalyuk tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 568100ec099SPavel Dovgalyuk tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 569100ec099SPavel Dovgalyuk tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 570100ec099SPavel Dovgalyuk tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 571eaa728eeSbellard } 572eaa728eeSbellard 573a78d0eabSliguang /* check that env->eip is in the CS segment limits */ 574eaa728eeSbellard if (new_eip > env->segs[R_CS].limit) { 575eaa728eeSbellard /* XXX: different exception if CALL? */ 576100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 577eaa728eeSbellard } 57801df040bSaliguori 57901df040bSaliguori #ifndef CONFIG_USER_ONLY 58001df040bSaliguori /* reset local breakpoints */ 581428065ceSliguang if (env->dr[7] & DR7_LOCAL_BP_MASK) { 58293d00d0fSRichard Henderson cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 58301df040bSaliguori } 58401df040bSaliguori #endif 58549958057SPaolo Bonzini return type >> 3; 586eaa728eeSbellard } 587eaa728eeSbellard 58849958057SPaolo Bonzini static int switch_tss(CPUX86State *env, int tss_selector, 589100ec099SPavel Dovgalyuk uint32_t e1, uint32_t e2, int source, 590100ec099SPavel Dovgalyuk uint32_t next_eip) 591100ec099SPavel Dovgalyuk { 59249958057SPaolo Bonzini return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 593100ec099SPavel Dovgalyuk } 594100ec099SPavel Dovgalyuk 595eaa728eeSbellard static inline unsigned int get_sp_mask(unsigned int e2) 596eaa728eeSbellard { 5970aca0605SAndrew Oates #ifdef TARGET_X86_64 5980aca0605SAndrew Oates if (e2 & DESC_L_MASK) { 5990aca0605SAndrew Oates return 0; 6000aca0605SAndrew Oates } else 6010aca0605SAndrew Oates #endif 60220054ef0SBlue Swirl if (e2 & DESC_B_MASK) { 603eaa728eeSbellard return 0xffffffff; 60420054ef0SBlue Swirl } else { 605eaa728eeSbellard return 0xffff; 606eaa728eeSbellard } 60720054ef0SBlue Swirl } 608eaa728eeSbellard 60969cb498cSPaolo Bonzini static int exception_is_fault(int intno) 61069cb498cSPaolo Bonzini { 61169cb498cSPaolo Bonzini switch (intno) { 61269cb498cSPaolo Bonzini /* 61369cb498cSPaolo Bonzini * #DB can be both fault- and trap-like, but it never sets RF=1 61469cb498cSPaolo Bonzini * in the RFLAGS value pushed on the stack. 61569cb498cSPaolo Bonzini */ 61669cb498cSPaolo Bonzini case EXCP01_DB: 61769cb498cSPaolo Bonzini case EXCP03_INT3: 61869cb498cSPaolo Bonzini case EXCP04_INTO: 61969cb498cSPaolo Bonzini case EXCP08_DBLE: 62069cb498cSPaolo Bonzini case EXCP12_MCHK: 62169cb498cSPaolo Bonzini return 0; 62269cb498cSPaolo Bonzini } 62369cb498cSPaolo Bonzini /* Everything else including reserved exception is a fault. */ 62469cb498cSPaolo Bonzini return 1; 62569cb498cSPaolo Bonzini } 62669cb498cSPaolo Bonzini 62730493a03SClaudio Fontana int exception_has_error_code(int intno) 6282ed51f5bSaliguori { 6292ed51f5bSaliguori switch (intno) { 6302ed51f5bSaliguori case 8: 6312ed51f5bSaliguori case 10: 6322ed51f5bSaliguori case 11: 6332ed51f5bSaliguori case 12: 6342ed51f5bSaliguori case 13: 6352ed51f5bSaliguori case 14: 6362ed51f5bSaliguori case 17: 6372ed51f5bSaliguori return 1; 6382ed51f5bSaliguori } 6392ed51f5bSaliguori return 0; 6402ed51f5bSaliguori } 6412ed51f5bSaliguori 642eaa728eeSbellard /* protected mode interrupt */ 6432999a0b2SBlue Swirl static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 6442999a0b2SBlue Swirl int error_code, unsigned int next_eip, 6452999a0b2SBlue Swirl int is_hw) 646eaa728eeSbellard { 647eaa728eeSbellard SegmentCache *dt; 648059368bcSRichard Henderson target_ulong ptr; 649eaa728eeSbellard int type, dpl, selector, ss_dpl, cpl; 650eaa728eeSbellard int has_error_code, new_stack, shift; 651059368bcSRichard Henderson uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 652059368bcSRichard Henderson uint32_t old_eip, eflags; 65387446327SKevin O'Connor int vm86 = env->eflags & VM_MASK; 654059368bcSRichard Henderson StackAccess sa; 65569cb498cSPaolo Bonzini bool set_rf; 656eaa728eeSbellard 657eaa728eeSbellard has_error_code = 0; 65820054ef0SBlue Swirl if (!is_int && !is_hw) { 65920054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 66020054ef0SBlue Swirl } 66120054ef0SBlue Swirl if (is_int) { 662eaa728eeSbellard old_eip = next_eip; 66369cb498cSPaolo Bonzini set_rf = false; 66420054ef0SBlue Swirl } else { 665eaa728eeSbellard old_eip = env->eip; 66669cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 66720054ef0SBlue Swirl } 668eaa728eeSbellard 669eaa728eeSbellard dt = &env->idt; 67020054ef0SBlue Swirl if (intno * 8 + 7 > dt->limit) { 67177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 67220054ef0SBlue Swirl } 673eaa728eeSbellard ptr = dt->base + intno * 8; 674329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 675329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 676eaa728eeSbellard /* check gate type */ 677eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 678eaa728eeSbellard switch (type) { 679eaa728eeSbellard case 5: /* task gate */ 6803df1a3d0SPeter Maydell case 6: /* 286 interrupt gate */ 6813df1a3d0SPeter Maydell case 7: /* 286 trap gate */ 6823df1a3d0SPeter Maydell case 14: /* 386 interrupt gate */ 6833df1a3d0SPeter Maydell case 15: /* 386 trap gate */ 6843df1a3d0SPeter Maydell break; 6853df1a3d0SPeter Maydell default: 6863df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6873df1a3d0SPeter Maydell break; 6883df1a3d0SPeter Maydell } 6893df1a3d0SPeter Maydell dpl = (e2 >> DESC_DPL_SHIFT) & 3; 6903df1a3d0SPeter Maydell cpl = env->hflags & HF_CPL_MASK; 6913df1a3d0SPeter Maydell /* check privilege if software int */ 6923df1a3d0SPeter Maydell if (is_int && dpl < cpl) { 6933df1a3d0SPeter Maydell raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 6943df1a3d0SPeter Maydell } 6953df1a3d0SPeter Maydell 696059368bcSRichard Henderson sa.env = env; 697059368bcSRichard Henderson sa.ra = 0; 698059368bcSRichard Henderson 6993df1a3d0SPeter Maydell if (type == 5) { 7003df1a3d0SPeter Maydell /* task gate */ 701eaa728eeSbellard /* must do that check here to return the correct error code */ 70220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 70377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 70420054ef0SBlue Swirl } 70549958057SPaolo Bonzini shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 706eaa728eeSbellard if (has_error_code) { 707e136648cSPaolo Bonzini /* push the error code on the destination stack */ 708e136648cSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 709e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 71020054ef0SBlue Swirl if (env->segs[R_SS].flags & DESC_B_MASK) { 711059368bcSRichard Henderson sa.sp_mask = 0xffffffff; 71220054ef0SBlue Swirl } else { 713059368bcSRichard Henderson sa.sp_mask = 0xffff; 71420054ef0SBlue Swirl } 715059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 716059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 71720054ef0SBlue Swirl if (shift) { 718059368bcSRichard Henderson pushl(&sa, error_code); 71920054ef0SBlue Swirl } else { 720059368bcSRichard Henderson pushw(&sa, error_code); 72120054ef0SBlue Swirl } 722059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 723eaa728eeSbellard } 724eaa728eeSbellard return; 725eaa728eeSbellard } 7263df1a3d0SPeter Maydell 7273df1a3d0SPeter Maydell /* Otherwise, trap or interrupt gate */ 7283df1a3d0SPeter Maydell 729eaa728eeSbellard /* check valid bit */ 73020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 73177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 73220054ef0SBlue Swirl } 733eaa728eeSbellard selector = e1 >> 16; 734eaa728eeSbellard offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 73520054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 73677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 73720054ef0SBlue Swirl } 7382999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 73977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74020054ef0SBlue Swirl } 74120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 74277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74320054ef0SBlue Swirl } 744eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 74520054ef0SBlue Swirl if (dpl > cpl) { 74677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 74720054ef0SBlue Swirl } 74820054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 74977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 75020054ef0SBlue Swirl } 7511110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 7521110bfe6SPaolo Bonzini dpl = cpl; 7531110bfe6SPaolo Bonzini } 754e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 7551110bfe6SPaolo Bonzini if (dpl < cpl) { 756eaa728eeSbellard /* to inner privilege */ 757059368bcSRichard Henderson uint32_t esp; 758100ec099SPavel Dovgalyuk get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 75920054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 76077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76120054ef0SBlue Swirl } 76220054ef0SBlue Swirl if ((ss & 3) != dpl) { 76377b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76420054ef0SBlue Swirl } 7652999a0b2SBlue Swirl if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 76677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 76720054ef0SBlue Swirl } 768eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 76920054ef0SBlue Swirl if (ss_dpl != dpl) { 77077b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77120054ef0SBlue Swirl } 772eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 773eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 77420054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 77577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77620054ef0SBlue Swirl } 77720054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 77877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 77920054ef0SBlue Swirl } 780eaa728eeSbellard new_stack = 1; 781059368bcSRichard Henderson sa.sp = esp; 782059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 783059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 7841110bfe6SPaolo Bonzini } else { 785eaa728eeSbellard /* to same privilege */ 78687446327SKevin O'Connor if (vm86) { 78777b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 78820054ef0SBlue Swirl } 789eaa728eeSbellard new_stack = 0; 790059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 791059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 792059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 793eaa728eeSbellard } 794eaa728eeSbellard 795eaa728eeSbellard shift = type >> 3; 796eaa728eeSbellard 797eaa728eeSbellard #if 0 798eaa728eeSbellard /* XXX: check that enough room is available */ 799eaa728eeSbellard push_size = 6 + (new_stack << 2) + (has_error_code << 1); 80087446327SKevin O'Connor if (vm86) { 801eaa728eeSbellard push_size += 8; 80220054ef0SBlue Swirl } 803eaa728eeSbellard push_size <<= shift; 804eaa728eeSbellard #endif 80569cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 80669cb498cSPaolo Bonzini /* 80769cb498cSPaolo Bonzini * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 80869cb498cSPaolo Bonzini * as is. AMD behavior could be implemented in check_hw_breakpoints(). 80969cb498cSPaolo Bonzini */ 81069cb498cSPaolo Bonzini if (set_rf) { 81169cb498cSPaolo Bonzini eflags |= RF_MASK; 81269cb498cSPaolo Bonzini } 81369cb498cSPaolo Bonzini 814eaa728eeSbellard if (shift == 1) { 815eaa728eeSbellard if (new_stack) { 81687446327SKevin O'Connor if (vm86) { 817059368bcSRichard Henderson pushl(&sa, env->segs[R_GS].selector); 818059368bcSRichard Henderson pushl(&sa, env->segs[R_FS].selector); 819059368bcSRichard Henderson pushl(&sa, env->segs[R_DS].selector); 820059368bcSRichard Henderson pushl(&sa, env->segs[R_ES].selector); 821eaa728eeSbellard } 822059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 823059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 824eaa728eeSbellard } 825059368bcSRichard Henderson pushl(&sa, eflags); 826059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 827059368bcSRichard Henderson pushl(&sa, old_eip); 828eaa728eeSbellard if (has_error_code) { 829059368bcSRichard Henderson pushl(&sa, error_code); 830eaa728eeSbellard } 831eaa728eeSbellard } else { 832eaa728eeSbellard if (new_stack) { 83387446327SKevin O'Connor if (vm86) { 834059368bcSRichard Henderson pushw(&sa, env->segs[R_GS].selector); 835059368bcSRichard Henderson pushw(&sa, env->segs[R_FS].selector); 836059368bcSRichard Henderson pushw(&sa, env->segs[R_DS].selector); 837059368bcSRichard Henderson pushw(&sa, env->segs[R_ES].selector); 838eaa728eeSbellard } 839059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 840059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 841eaa728eeSbellard } 842059368bcSRichard Henderson pushw(&sa, eflags); 843059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 844059368bcSRichard Henderson pushw(&sa, old_eip); 845eaa728eeSbellard if (has_error_code) { 846059368bcSRichard Henderson pushw(&sa, error_code); 847eaa728eeSbellard } 848eaa728eeSbellard } 849eaa728eeSbellard 850fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 851fd460606SKevin O'Connor if ((type & 1) == 0) { 852fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 853fd460606SKevin O'Connor } 854fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 855fd460606SKevin O'Connor 856eaa728eeSbellard if (new_stack) { 85787446327SKevin O'Connor if (vm86) { 858eaa728eeSbellard cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 859eaa728eeSbellard cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 860eaa728eeSbellard cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 861eaa728eeSbellard cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 862eaa728eeSbellard } 863eaa728eeSbellard ss = (ss & ~3) | dpl; 864059368bcSRichard Henderson cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 865059368bcSRichard Henderson get_seg_limit(ss_e1, ss_e2), ss_e2); 866eaa728eeSbellard } 867059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 868eaa728eeSbellard 869eaa728eeSbellard selector = (selector & ~3) | dpl; 870eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 871eaa728eeSbellard get_seg_base(e1, e2), 872eaa728eeSbellard get_seg_limit(e1, e2), 873eaa728eeSbellard e2); 874eaa728eeSbellard env->eip = offset; 875eaa728eeSbellard } 876eaa728eeSbellard 877eaa728eeSbellard #ifdef TARGET_X86_64 878eaa728eeSbellard 879059368bcSRichard Henderson static void pushq(StackAccess *sa, uint64_t val) 880059368bcSRichard Henderson { 881059368bcSRichard Henderson sa->sp -= 8; 8828053862aSPaolo Bonzini cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 883eaa728eeSbellard } 884eaa728eeSbellard 885059368bcSRichard Henderson static uint64_t popq(StackAccess *sa) 886059368bcSRichard Henderson { 8878053862aSPaolo Bonzini uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 888059368bcSRichard Henderson sa->sp += 8; 889059368bcSRichard Henderson return ret; 890eaa728eeSbellard } 891eaa728eeSbellard 8922999a0b2SBlue Swirl static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 893eaa728eeSbellard { 8946aa9e42fSRichard Henderson X86CPU *cpu = env_archcpu(env); 89550fcc7cbSGareth Webb int index, pg_mode; 89650fcc7cbSGareth Webb target_ulong rsp; 89750fcc7cbSGareth Webb int32_t sext; 898eaa728eeSbellard 899eaa728eeSbellard #if 0 900eaa728eeSbellard printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 901eaa728eeSbellard env->tr.base, env->tr.limit); 902eaa728eeSbellard #endif 903eaa728eeSbellard 90420054ef0SBlue Swirl if (!(env->tr.flags & DESC_P_MASK)) { 905a47dddd7SAndreas Färber cpu_abort(CPU(cpu), "invalid tss"); 90620054ef0SBlue Swirl } 907eaa728eeSbellard index = 8 * level + 4; 90820054ef0SBlue Swirl if ((index + 7) > env->tr.limit) { 90977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 91020054ef0SBlue Swirl } 91150fcc7cbSGareth Webb 91250fcc7cbSGareth Webb rsp = cpu_ldq_kernel(env, env->tr.base + index); 91350fcc7cbSGareth Webb 91450fcc7cbSGareth Webb /* test virtual address sign extension */ 91550fcc7cbSGareth Webb pg_mode = get_pg_mode(env); 91650fcc7cbSGareth Webb sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 91750fcc7cbSGareth Webb if (sext != 0 && sext != -1) { 91850fcc7cbSGareth Webb raise_exception_err(env, EXCP0C_STACK, 0); 91950fcc7cbSGareth Webb } 92050fcc7cbSGareth Webb 92150fcc7cbSGareth Webb return rsp; 922eaa728eeSbellard } 923eaa728eeSbellard 924eaa728eeSbellard /* 64 bit interrupt */ 9252999a0b2SBlue Swirl static void do_interrupt64(CPUX86State *env, int intno, int is_int, 9262999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 927eaa728eeSbellard { 928eaa728eeSbellard SegmentCache *dt; 929eaa728eeSbellard target_ulong ptr; 930eaa728eeSbellard int type, dpl, selector, cpl, ist; 931eaa728eeSbellard int has_error_code, new_stack; 932bde8adb8SPeter Maydell uint32_t e1, e2, e3, eflags; 933059368bcSRichard Henderson target_ulong old_eip, offset; 93469cb498cSPaolo Bonzini bool set_rf; 935059368bcSRichard Henderson StackAccess sa; 936eaa728eeSbellard 937eaa728eeSbellard has_error_code = 0; 93820054ef0SBlue Swirl if (!is_int && !is_hw) { 93920054ef0SBlue Swirl has_error_code = exception_has_error_code(intno); 94020054ef0SBlue Swirl } 94120054ef0SBlue Swirl if (is_int) { 942eaa728eeSbellard old_eip = next_eip; 94369cb498cSPaolo Bonzini set_rf = false; 94420054ef0SBlue Swirl } else { 945eaa728eeSbellard old_eip = env->eip; 94669cb498cSPaolo Bonzini set_rf = exception_is_fault(intno); 94720054ef0SBlue Swirl } 948eaa728eeSbellard 949eaa728eeSbellard dt = &env->idt; 95020054ef0SBlue Swirl if (intno * 16 + 15 > dt->limit) { 951b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 95220054ef0SBlue Swirl } 953eaa728eeSbellard ptr = dt->base + intno * 16; 954329e607dSBlue Swirl e1 = cpu_ldl_kernel(env, ptr); 955329e607dSBlue Swirl e2 = cpu_ldl_kernel(env, ptr + 4); 956329e607dSBlue Swirl e3 = cpu_ldl_kernel(env, ptr + 8); 957eaa728eeSbellard /* check gate type */ 958eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 959eaa728eeSbellard switch (type) { 960eaa728eeSbellard case 14: /* 386 interrupt gate */ 961eaa728eeSbellard case 15: /* 386 trap gate */ 962eaa728eeSbellard break; 963eaa728eeSbellard default: 964b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 965eaa728eeSbellard break; 966eaa728eeSbellard } 967eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 968eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 9691235fc06Sths /* check privilege if software int */ 97020054ef0SBlue Swirl if (is_int && dpl < cpl) { 971b585edcaSJoe Richey raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 97220054ef0SBlue Swirl } 973eaa728eeSbellard /* check valid bit */ 97420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 975b585edcaSJoe Richey raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 97620054ef0SBlue Swirl } 977eaa728eeSbellard selector = e1 >> 16; 978eaa728eeSbellard offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 979eaa728eeSbellard ist = e2 & 7; 98020054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 98177b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, 0); 98220054ef0SBlue Swirl } 983eaa728eeSbellard 9842999a0b2SBlue Swirl if (load_segment(env, &e1, &e2, selector) != 0) { 98577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 98620054ef0SBlue Swirl } 98720054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 98877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 98920054ef0SBlue Swirl } 990eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 99120054ef0SBlue Swirl if (dpl > cpl) { 99277b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 99320054ef0SBlue Swirl } 99420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 99577b2bc2cSBlue Swirl raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 99620054ef0SBlue Swirl } 99720054ef0SBlue Swirl if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 99877b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 99920054ef0SBlue Swirl } 10001110bfe6SPaolo Bonzini if (e2 & DESC_C_MASK) { 10011110bfe6SPaolo Bonzini dpl = cpl; 10021110bfe6SPaolo Bonzini } 1003059368bcSRichard Henderson 1004059368bcSRichard Henderson sa.env = env; 1005059368bcSRichard Henderson sa.ra = 0; 1006e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 1007059368bcSRichard Henderson sa.sp_mask = -1; 1008059368bcSRichard Henderson sa.ss_base = 0; 10091110bfe6SPaolo Bonzini if (dpl < cpl || ist != 0) { 1010eaa728eeSbellard /* to inner privilege */ 1011eaa728eeSbellard new_stack = 1; 1012059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 10131110bfe6SPaolo Bonzini } else { 1014eaa728eeSbellard /* to same privilege */ 101520054ef0SBlue Swirl if (env->eflags & VM_MASK) { 101677b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 101720054ef0SBlue Swirl } 1018eaa728eeSbellard new_stack = 0; 1019059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1020e95e9b88SWu Xiang } 1021059368bcSRichard Henderson sa.sp &= ~0xfLL; /* align stack */ 1022eaa728eeSbellard 102369cb498cSPaolo Bonzini /* See do_interrupt_protected. */ 102469cb498cSPaolo Bonzini eflags = cpu_compute_eflags(env); 102569cb498cSPaolo Bonzini if (set_rf) { 102669cb498cSPaolo Bonzini eflags |= RF_MASK; 102769cb498cSPaolo Bonzini } 102869cb498cSPaolo Bonzini 1029059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1030059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 1031059368bcSRichard Henderson pushq(&sa, eflags); 1032059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1033059368bcSRichard Henderson pushq(&sa, old_eip); 1034eaa728eeSbellard if (has_error_code) { 1035059368bcSRichard Henderson pushq(&sa, error_code); 1036eaa728eeSbellard } 1037eaa728eeSbellard 1038fd460606SKevin O'Connor /* interrupt gate clear IF mask */ 1039fd460606SKevin O'Connor if ((type & 1) == 0) { 1040fd460606SKevin O'Connor env->eflags &= ~IF_MASK; 1041fd460606SKevin O'Connor } 1042fd460606SKevin O'Connor env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1043fd460606SKevin O'Connor 1044eaa728eeSbellard if (new_stack) { 1045bde8adb8SPeter Maydell uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */ 1046e95e9b88SWu Xiang cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1047eaa728eeSbellard } 1048059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1049eaa728eeSbellard 1050eaa728eeSbellard selector = (selector & ~3) | dpl; 1051eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1052eaa728eeSbellard get_seg_base(e1, e2), 1053eaa728eeSbellard get_seg_limit(e1, e2), 1054eaa728eeSbellard e2); 1055eaa728eeSbellard env->eip = offset; 1056eaa728eeSbellard } 105763fd8ef0SPaolo Bonzini #endif /* TARGET_X86_64 */ 1058eaa728eeSbellard 10592999a0b2SBlue Swirl void helper_sysret(CPUX86State *env, int dflag) 1060eaa728eeSbellard { 1061eaa728eeSbellard int cpl, selector; 1062eaa728eeSbellard 1063eaa728eeSbellard if (!(env->efer & MSR_EFER_SCE)) { 1064100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1065eaa728eeSbellard } 1066eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1067eaa728eeSbellard if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1068100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1069eaa728eeSbellard } 1070eaa728eeSbellard selector = (env->star >> 48) & 0xffff; 107163fd8ef0SPaolo Bonzini #ifdef TARGET_X86_64 1072eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1073fd460606SKevin O'Connor cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1074fd460606SKevin O'Connor | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1075fd460606SKevin O'Connor NT_MASK); 1076eaa728eeSbellard if (dflag == 2) { 1077eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1078eaa728eeSbellard 0, 0xffffffff, 1079eaa728eeSbellard DESC_G_MASK | DESC_P_MASK | 1080eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1081eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1082eaa728eeSbellard DESC_L_MASK); 1083a4165610Sliguang env->eip = env->regs[R_ECX]; 1084eaa728eeSbellard } else { 1085eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1086eaa728eeSbellard 0, 0xffffffff, 1087eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1088eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1089eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1090a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1091eaa728eeSbellard } 1092ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1093eaa728eeSbellard 0, 0xffffffff, 1094eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1095eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1096eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 109763fd8ef0SPaolo Bonzini } else 109863fd8ef0SPaolo Bonzini #endif 109963fd8ef0SPaolo Bonzini { 1100fd460606SKevin O'Connor env->eflags |= IF_MASK; 1101eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1102eaa728eeSbellard 0, 0xffffffff, 1103eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1104eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1105eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1106a4165610Sliguang env->eip = (uint32_t)env->regs[R_ECX]; 1107ac576229SBill Paul cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1108eaa728eeSbellard 0, 0xffffffff, 1109eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1110eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1111eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 1112eaa728eeSbellard } 1113eaa728eeSbellard } 1114eaa728eeSbellard 1115eaa728eeSbellard /* real mode interrupt */ 11162999a0b2SBlue Swirl static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 11172999a0b2SBlue Swirl int error_code, unsigned int next_eip) 1118eaa728eeSbellard { 1119eaa728eeSbellard SegmentCache *dt; 1120059368bcSRichard Henderson target_ulong ptr; 1121eaa728eeSbellard int selector; 1122059368bcSRichard Henderson uint32_t offset; 1123eaa728eeSbellard uint32_t old_cs, old_eip; 1124059368bcSRichard Henderson StackAccess sa; 1125eaa728eeSbellard 1126eaa728eeSbellard /* real mode (simpler!) */ 1127eaa728eeSbellard dt = &env->idt; 112820054ef0SBlue Swirl if (intno * 4 + 3 > dt->limit) { 112977b2bc2cSBlue Swirl raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 113020054ef0SBlue Swirl } 1131eaa728eeSbellard ptr = dt->base + intno * 4; 1132329e607dSBlue Swirl offset = cpu_lduw_kernel(env, ptr); 1133329e607dSBlue Swirl selector = cpu_lduw_kernel(env, ptr + 2); 1134059368bcSRichard Henderson 1135059368bcSRichard Henderson sa.env = env; 1136059368bcSRichard Henderson sa.ra = 0; 1137059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1138059368bcSRichard Henderson sa.sp_mask = 0xffff; 1139059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1140e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1141059368bcSRichard Henderson 114220054ef0SBlue Swirl if (is_int) { 1143eaa728eeSbellard old_eip = next_eip; 114420054ef0SBlue Swirl } else { 1145eaa728eeSbellard old_eip = env->eip; 114620054ef0SBlue Swirl } 1147eaa728eeSbellard old_cs = env->segs[R_CS].selector; 1148eaa728eeSbellard /* XXX: use SS segment size? */ 1149059368bcSRichard Henderson pushw(&sa, cpu_compute_eflags(env)); 1150059368bcSRichard Henderson pushw(&sa, old_cs); 1151059368bcSRichard Henderson pushw(&sa, old_eip); 1152eaa728eeSbellard 1153eaa728eeSbellard /* update processor state */ 1154059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1155eaa728eeSbellard env->eip = offset; 1156eaa728eeSbellard env->segs[R_CS].selector = selector; 1157eaa728eeSbellard env->segs[R_CS].base = (selector << 4); 1158eaa728eeSbellard env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1159eaa728eeSbellard } 1160eaa728eeSbellard 1161eaa728eeSbellard /* 1162eaa728eeSbellard * Begin execution of an interruption. is_int is TRUE if coming from 1163a78d0eabSliguang * the int instruction. next_eip is the env->eip value AFTER the interrupt 1164eaa728eeSbellard * instruction. It is only relevant if is_int is TRUE. 1165eaa728eeSbellard */ 116630493a03SClaudio Fontana void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 11672999a0b2SBlue Swirl int error_code, target_ulong next_eip, int is_hw) 1168eaa728eeSbellard { 1169ca4c810aSAndreas Färber CPUX86State *env = &cpu->env; 1170ca4c810aSAndreas Färber 11718fec2b8cSaliguori if (qemu_loglevel_mask(CPU_LOG_INT)) { 1172eaa728eeSbellard if ((env->cr[0] & CR0_PE_MASK)) { 1173eaa728eeSbellard static int count; 117420054ef0SBlue Swirl 117520054ef0SBlue Swirl qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 117620054ef0SBlue Swirl " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1177eaa728eeSbellard count, intno, error_code, is_int, 1178eaa728eeSbellard env->hflags & HF_CPL_MASK, 1179a78d0eabSliguang env->segs[R_CS].selector, env->eip, 1180a78d0eabSliguang (int)env->segs[R_CS].base + env->eip, 118108b3ded6Sliguang env->segs[R_SS].selector, env->regs[R_ESP]); 1182eaa728eeSbellard if (intno == 0x0e) { 118393fcfe39Saliguori qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1184eaa728eeSbellard } else { 11854b34e3adSliguang qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1186eaa728eeSbellard } 118793fcfe39Saliguori qemu_log("\n"); 1188a0762859SAndreas Färber log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1189eaa728eeSbellard #if 0 1190eaa728eeSbellard { 1191eaa728eeSbellard int i; 11929bd5494eSAdam Lackorzynski target_ulong ptr; 119320054ef0SBlue Swirl 119493fcfe39Saliguori qemu_log(" code="); 1195eaa728eeSbellard ptr = env->segs[R_CS].base + env->eip; 1196eaa728eeSbellard for (i = 0; i < 16; i++) { 119793fcfe39Saliguori qemu_log(" %02x", ldub(ptr + i)); 1198eaa728eeSbellard } 119993fcfe39Saliguori qemu_log("\n"); 1200eaa728eeSbellard } 1201eaa728eeSbellard #endif 1202eaa728eeSbellard count++; 1203eaa728eeSbellard } 1204eaa728eeSbellard } 1205eaa728eeSbellard if (env->cr[0] & CR0_PE_MASK) { 120600ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1207f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12082999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 120920054ef0SBlue Swirl } 121000ea18d1Saliguori #endif 1211eb38c52cSblueswir1 #ifdef TARGET_X86_64 1212eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 12132999a0b2SBlue Swirl do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1214eaa728eeSbellard } else 1215eaa728eeSbellard #endif 1216eaa728eeSbellard { 12172999a0b2SBlue Swirl do_interrupt_protected(env, intno, is_int, error_code, next_eip, 12182999a0b2SBlue Swirl is_hw); 1219eaa728eeSbellard } 1220eaa728eeSbellard } else { 122100ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1222f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 12232999a0b2SBlue Swirl handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 122420054ef0SBlue Swirl } 122500ea18d1Saliguori #endif 12262999a0b2SBlue Swirl do_interrupt_real(env, intno, is_int, error_code, next_eip); 1227eaa728eeSbellard } 12282ed51f5bSaliguori 122900ea18d1Saliguori #if !defined(CONFIG_USER_ONLY) 1230f8dc4c64SPaolo Bonzini if (env->hflags & HF_GUEST_MASK) { 1231fdfba1a2SEdgar E. Iglesias CPUState *cs = CPU(cpu); 1232b216aa6cSPaolo Bonzini uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 123320054ef0SBlue Swirl offsetof(struct vmcb, 123420054ef0SBlue Swirl control.event_inj)); 123520054ef0SBlue Swirl 1236b216aa6cSPaolo Bonzini x86_stl_phys(cs, 1237ab1da857SEdgar E. Iglesias env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 123820054ef0SBlue Swirl event_inj & ~SVM_EVTINJ_VALID); 12392ed51f5bSaliguori } 124000ea18d1Saliguori #endif 1241eaa728eeSbellard } 1242eaa728eeSbellard 12432999a0b2SBlue Swirl void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1244e694d4e2SBlue Swirl { 12456aa9e42fSRichard Henderson do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1246e694d4e2SBlue Swirl } 1247e694d4e2SBlue Swirl 12482999a0b2SBlue Swirl void helper_lldt(CPUX86State *env, int selector) 1249eaa728eeSbellard { 1250eaa728eeSbellard SegmentCache *dt; 1251eaa728eeSbellard uint32_t e1, e2; 1252eaa728eeSbellard int index, entry_limit; 1253eaa728eeSbellard target_ulong ptr; 1254eaa728eeSbellard 1255eaa728eeSbellard selector &= 0xffff; 1256eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1257eaa728eeSbellard /* XXX: NULL selector case: invalid LDT */ 1258eaa728eeSbellard env->ldt.base = 0; 1259eaa728eeSbellard env->ldt.limit = 0; 1260eaa728eeSbellard } else { 126120054ef0SBlue Swirl if (selector & 0x4) { 1262100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 126320054ef0SBlue Swirl } 1264eaa728eeSbellard dt = &env->gdt; 1265eaa728eeSbellard index = selector & ~7; 1266eaa728eeSbellard #ifdef TARGET_X86_64 126720054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1268eaa728eeSbellard entry_limit = 15; 126920054ef0SBlue Swirl } else 1270eaa728eeSbellard #endif 127120054ef0SBlue Swirl { 1272eaa728eeSbellard entry_limit = 7; 127320054ef0SBlue Swirl } 127420054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1275100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 127620054ef0SBlue Swirl } 1277eaa728eeSbellard ptr = dt->base + index; 1278100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1279100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 128020054ef0SBlue Swirl if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1281100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 128220054ef0SBlue Swirl } 128320054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1284100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 128520054ef0SBlue Swirl } 1286eaa728eeSbellard #ifdef TARGET_X86_64 1287eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1288eaa728eeSbellard uint32_t e3; 128920054ef0SBlue Swirl 1290100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1291eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1292eaa728eeSbellard env->ldt.base |= (target_ulong)e3 << 32; 1293eaa728eeSbellard } else 1294eaa728eeSbellard #endif 1295eaa728eeSbellard { 1296eaa728eeSbellard load_seg_cache_raw_dt(&env->ldt, e1, e2); 1297eaa728eeSbellard } 1298eaa728eeSbellard } 1299eaa728eeSbellard env->ldt.selector = selector; 1300eaa728eeSbellard } 1301eaa728eeSbellard 13022999a0b2SBlue Swirl void helper_ltr(CPUX86State *env, int selector) 1303eaa728eeSbellard { 1304eaa728eeSbellard SegmentCache *dt; 1305eaa728eeSbellard uint32_t e1, e2; 1306eaa728eeSbellard int index, type, entry_limit; 1307eaa728eeSbellard target_ulong ptr; 1308eaa728eeSbellard 1309eaa728eeSbellard selector &= 0xffff; 1310eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1311eaa728eeSbellard /* NULL selector case: invalid TR */ 1312eaa728eeSbellard env->tr.base = 0; 1313eaa728eeSbellard env->tr.limit = 0; 1314eaa728eeSbellard env->tr.flags = 0; 1315eaa728eeSbellard } else { 131620054ef0SBlue Swirl if (selector & 0x4) { 1317100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 131820054ef0SBlue Swirl } 1319eaa728eeSbellard dt = &env->gdt; 1320eaa728eeSbellard index = selector & ~7; 1321eaa728eeSbellard #ifdef TARGET_X86_64 132220054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 1323eaa728eeSbellard entry_limit = 15; 132420054ef0SBlue Swirl } else 1325eaa728eeSbellard #endif 132620054ef0SBlue Swirl { 1327eaa728eeSbellard entry_limit = 7; 132820054ef0SBlue Swirl } 132920054ef0SBlue Swirl if ((index + entry_limit) > dt->limit) { 1330100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133120054ef0SBlue Swirl } 1332eaa728eeSbellard ptr = dt->base + index; 1333100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1334100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1335eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1336eaa728eeSbellard if ((e2 & DESC_S_MASK) || 133720054ef0SBlue Swirl (type != 1 && type != 9)) { 1338100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 133920054ef0SBlue Swirl } 134020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1341100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 134220054ef0SBlue Swirl } 1343eaa728eeSbellard #ifdef TARGET_X86_64 1344eaa728eeSbellard if (env->hflags & HF_LMA_MASK) { 1345eaa728eeSbellard uint32_t e3, e4; 134620054ef0SBlue Swirl 1347100ec099SPavel Dovgalyuk e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1348100ec099SPavel Dovgalyuk e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 134920054ef0SBlue Swirl if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1350100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 135120054ef0SBlue Swirl } 1352eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1353eaa728eeSbellard env->tr.base |= (target_ulong)e3 << 32; 1354eaa728eeSbellard } else 1355eaa728eeSbellard #endif 1356eaa728eeSbellard { 1357eaa728eeSbellard load_seg_cache_raw_dt(&env->tr, e1, e2); 1358eaa728eeSbellard } 1359eaa728eeSbellard e2 |= DESC_TSS_BUSY_MASK; 1360100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1361eaa728eeSbellard } 1362eaa728eeSbellard env->tr.selector = selector; 1363eaa728eeSbellard } 1364eaa728eeSbellard 1365eaa728eeSbellard /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 13662999a0b2SBlue Swirl void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1367eaa728eeSbellard { 1368eaa728eeSbellard uint32_t e1, e2; 1369eaa728eeSbellard int cpl, dpl, rpl; 1370eaa728eeSbellard SegmentCache *dt; 1371eaa728eeSbellard int index; 1372eaa728eeSbellard target_ulong ptr; 1373eaa728eeSbellard 1374eaa728eeSbellard selector &= 0xffff; 1375eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1376eaa728eeSbellard if ((selector & 0xfffc) == 0) { 1377eaa728eeSbellard /* null selector case */ 1378eaa728eeSbellard if (seg_reg == R_SS 1379eaa728eeSbellard #ifdef TARGET_X86_64 1380eaa728eeSbellard && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1381eaa728eeSbellard #endif 138220054ef0SBlue Swirl ) { 1383100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 138420054ef0SBlue Swirl } 1385eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1386eaa728eeSbellard } else { 1387eaa728eeSbellard 138820054ef0SBlue Swirl if (selector & 0x4) { 1389eaa728eeSbellard dt = &env->ldt; 139020054ef0SBlue Swirl } else { 1391eaa728eeSbellard dt = &env->gdt; 139220054ef0SBlue Swirl } 1393eaa728eeSbellard index = selector & ~7; 139420054ef0SBlue Swirl if ((index + 7) > dt->limit) { 1395100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 139620054ef0SBlue Swirl } 1397eaa728eeSbellard ptr = dt->base + index; 1398100ec099SPavel Dovgalyuk e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1399100ec099SPavel Dovgalyuk e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1400eaa728eeSbellard 140120054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 1402100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 140320054ef0SBlue Swirl } 1404eaa728eeSbellard rpl = selector & 3; 1405eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1406eaa728eeSbellard if (seg_reg == R_SS) { 1407eaa728eeSbellard /* must be writable segment */ 140820054ef0SBlue Swirl if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1409100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141020054ef0SBlue Swirl } 141120054ef0SBlue Swirl if (rpl != cpl || dpl != cpl) { 1412100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141320054ef0SBlue Swirl } 1414eaa728eeSbellard } else { 1415eaa728eeSbellard /* must be readable segment */ 141620054ef0SBlue Swirl if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1417100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 141820054ef0SBlue Swirl } 1419eaa728eeSbellard 1420eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1421eaa728eeSbellard /* if not conforming code, test rights */ 142220054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1423100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1424eaa728eeSbellard } 1425eaa728eeSbellard } 142620054ef0SBlue Swirl } 1427eaa728eeSbellard 1428eaa728eeSbellard if (!(e2 & DESC_P_MASK)) { 142920054ef0SBlue Swirl if (seg_reg == R_SS) { 1430100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 143120054ef0SBlue Swirl } else { 1432100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1433eaa728eeSbellard } 143420054ef0SBlue Swirl } 1435eaa728eeSbellard 1436eaa728eeSbellard /* set the access bit if not already set */ 1437eaa728eeSbellard if (!(e2 & DESC_A_MASK)) { 1438eaa728eeSbellard e2 |= DESC_A_MASK; 1439100ec099SPavel Dovgalyuk cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1440eaa728eeSbellard } 1441eaa728eeSbellard 1442eaa728eeSbellard cpu_x86_load_seg_cache(env, seg_reg, selector, 1443eaa728eeSbellard get_seg_base(e1, e2), 1444eaa728eeSbellard get_seg_limit(e1, e2), 1445eaa728eeSbellard e2); 1446eaa728eeSbellard #if 0 144793fcfe39Saliguori qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1448eaa728eeSbellard selector, (unsigned long)sc->base, sc->limit, sc->flags); 1449eaa728eeSbellard #endif 1450eaa728eeSbellard } 1451eaa728eeSbellard } 1452eaa728eeSbellard 1453eaa728eeSbellard /* protected mode jump */ 14542999a0b2SBlue Swirl void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1455100ec099SPavel Dovgalyuk target_ulong next_eip) 1456eaa728eeSbellard { 1457eaa728eeSbellard int gate_cs, type; 1458eaa728eeSbellard uint32_t e1, e2, cpl, dpl, rpl, limit; 1459eaa728eeSbellard 146020054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1461100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 146220054ef0SBlue Swirl } 1463100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1464100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 146520054ef0SBlue Swirl } 1466eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1467eaa728eeSbellard if (e2 & DESC_S_MASK) { 146820054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1469100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 147020054ef0SBlue Swirl } 1471eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1472eaa728eeSbellard if (e2 & DESC_C_MASK) { 1473eaa728eeSbellard /* conforming code segment */ 147420054ef0SBlue Swirl if (dpl > cpl) { 1475100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 147620054ef0SBlue Swirl } 1477eaa728eeSbellard } else { 1478eaa728eeSbellard /* non conforming code segment */ 1479eaa728eeSbellard rpl = new_cs & 3; 148020054ef0SBlue Swirl if (rpl > cpl) { 1481100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1482eaa728eeSbellard } 148320054ef0SBlue Swirl if (dpl != cpl) { 1484100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 148520054ef0SBlue Swirl } 148620054ef0SBlue Swirl } 148720054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1488100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 148920054ef0SBlue Swirl } 1490eaa728eeSbellard limit = get_seg_limit(e1, e2); 1491eaa728eeSbellard if (new_eip > limit && 1492db7196dbSAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1493db7196dbSAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 149420054ef0SBlue Swirl } 1495eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1496eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1497a78d0eabSliguang env->eip = new_eip; 1498eaa728eeSbellard } else { 1499eaa728eeSbellard /* jump to call or task gate */ 1500eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1501eaa728eeSbellard rpl = new_cs & 3; 1502eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1503eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 15040aca0605SAndrew Oates 15050aca0605SAndrew Oates #ifdef TARGET_X86_64 15060aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15070aca0605SAndrew Oates if (type != 12) { 15080aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 15090aca0605SAndrew Oates } 15100aca0605SAndrew Oates } 15110aca0605SAndrew Oates #endif 1512eaa728eeSbellard switch (type) { 1513eaa728eeSbellard case 1: /* 286 TSS */ 1514eaa728eeSbellard case 9: /* 386 TSS */ 1515eaa728eeSbellard case 5: /* task gate */ 151620054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1517100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 151820054ef0SBlue Swirl } 1519100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1520eaa728eeSbellard break; 1521eaa728eeSbellard case 4: /* 286 call gate */ 1522eaa728eeSbellard case 12: /* 386 call gate */ 152320054ef0SBlue Swirl if ((dpl < cpl) || (dpl < rpl)) { 1524100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 152520054ef0SBlue Swirl } 152620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1527100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 152820054ef0SBlue Swirl } 1529eaa728eeSbellard gate_cs = e1 >> 16; 1530eaa728eeSbellard new_eip = (e1 & 0xffff); 153120054ef0SBlue Swirl if (type == 12) { 1532eaa728eeSbellard new_eip |= (e2 & 0xffff0000); 153320054ef0SBlue Swirl } 15340aca0605SAndrew Oates 15350aca0605SAndrew Oates #ifdef TARGET_X86_64 15360aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15370aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 15380aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 15390aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15400aca0605SAndrew Oates GETPC()); 15410aca0605SAndrew Oates } 15420aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 15430aca0605SAndrew Oates if (type != 0) { 15440aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 15450aca0605SAndrew Oates GETPC()); 15460aca0605SAndrew Oates } 15470aca0605SAndrew Oates new_eip |= ((target_ulong)e1) << 32; 15480aca0605SAndrew Oates } 15490aca0605SAndrew Oates #endif 15500aca0605SAndrew Oates 1551100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1552100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 155320054ef0SBlue Swirl } 1554eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1555eaa728eeSbellard /* must be code segment */ 1556eaa728eeSbellard if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 155720054ef0SBlue Swirl (DESC_S_MASK | DESC_CS_MASK))) { 1558100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 155920054ef0SBlue Swirl } 1560eaa728eeSbellard if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 156120054ef0SBlue Swirl (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1562100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 156320054ef0SBlue Swirl } 15640aca0605SAndrew Oates #ifdef TARGET_X86_64 15650aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 15660aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 15670aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15680aca0605SAndrew Oates } 15690aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 15700aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 15710aca0605SAndrew Oates } 15720aca0605SAndrew Oates } 15730aca0605SAndrew Oates #endif 157420054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1575100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 157620054ef0SBlue Swirl } 1577eaa728eeSbellard limit = get_seg_limit(e1, e2); 15780aca0605SAndrew Oates if (new_eip > limit && 15790aca0605SAndrew Oates (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1580100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 158120054ef0SBlue Swirl } 1582eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1583eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1584a78d0eabSliguang env->eip = new_eip; 1585eaa728eeSbellard break; 1586eaa728eeSbellard default: 1587100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1588eaa728eeSbellard break; 1589eaa728eeSbellard } 1590eaa728eeSbellard } 1591eaa728eeSbellard } 1592eaa728eeSbellard 1593eaa728eeSbellard /* real mode call */ 15948c03ab9fSRichard Henderson void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 15958c03ab9fSRichard Henderson int shift, uint32_t next_eip) 1596eaa728eeSbellard { 1597059368bcSRichard Henderson StackAccess sa; 1598eaa728eeSbellard 1599059368bcSRichard Henderson sa.env = env; 1600059368bcSRichard Henderson sa.ra = GETPC(); 1601059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1602059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1603059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1604e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1605059368bcSRichard Henderson 1606eaa728eeSbellard if (shift) { 1607059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1608059368bcSRichard Henderson pushl(&sa, next_eip); 1609eaa728eeSbellard } else { 1610059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1611059368bcSRichard Henderson pushw(&sa, next_eip); 1612eaa728eeSbellard } 1613eaa728eeSbellard 1614059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1615eaa728eeSbellard env->eip = new_eip; 1616eaa728eeSbellard env->segs[R_CS].selector = new_cs; 1617eaa728eeSbellard env->segs[R_CS].base = (new_cs << 4); 1618eaa728eeSbellard } 1619eaa728eeSbellard 1620eaa728eeSbellard /* protected mode call */ 16212999a0b2SBlue Swirl void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1622100ec099SPavel Dovgalyuk int shift, target_ulong next_eip) 1623eaa728eeSbellard { 1624eaa728eeSbellard int new_stack, i; 16250aca0605SAndrew Oates uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1626059368bcSRichard Henderson uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1627eaa728eeSbellard uint32_t val, limit, old_sp_mask; 1628059368bcSRichard Henderson target_ulong old_ssp, offset; 1629059368bcSRichard Henderson StackAccess sa; 1630eaa728eeSbellard 16310aca0605SAndrew Oates LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 16326aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 163320054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 1634100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 163520054ef0SBlue Swirl } 1636100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1637100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 163820054ef0SBlue Swirl } 1639eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 1640d12d51d5Saliguori LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1641059368bcSRichard Henderson 1642059368bcSRichard Henderson sa.env = env; 1643059368bcSRichard Henderson sa.ra = GETPC(); 1644059368bcSRichard Henderson 1645eaa728eeSbellard if (e2 & DESC_S_MASK) { 1646e136648cSPaolo Bonzini /* "normal" far call, no stack switch possible */ 164720054ef0SBlue Swirl if (!(e2 & DESC_CS_MASK)) { 1648100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 164920054ef0SBlue Swirl } 1650eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1651eaa728eeSbellard if (e2 & DESC_C_MASK) { 1652eaa728eeSbellard /* conforming code segment */ 165320054ef0SBlue Swirl if (dpl > cpl) { 1654100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 165520054ef0SBlue Swirl } 1656eaa728eeSbellard } else { 1657eaa728eeSbellard /* non conforming code segment */ 1658eaa728eeSbellard rpl = new_cs & 3; 165920054ef0SBlue Swirl if (rpl > cpl) { 1660100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1661eaa728eeSbellard } 166220054ef0SBlue Swirl if (dpl != cpl) { 1663100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 166420054ef0SBlue Swirl } 166520054ef0SBlue Swirl } 166620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1667100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 166820054ef0SBlue Swirl } 1669eaa728eeSbellard 1670e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1671eaa728eeSbellard #ifdef TARGET_X86_64 1672eaa728eeSbellard /* XXX: check 16/32 bit cases in long mode */ 1673eaa728eeSbellard if (shift == 2) { 1674eaa728eeSbellard /* 64 bit case */ 1675059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1676059368bcSRichard Henderson sa.sp_mask = -1; 1677059368bcSRichard Henderson sa.ss_base = 0; 1678059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1679059368bcSRichard Henderson pushq(&sa, next_eip); 1680eaa728eeSbellard /* from this point, not restartable */ 1681059368bcSRichard Henderson env->regs[R_ESP] = sa.sp; 1682eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1683eaa728eeSbellard get_seg_base(e1, e2), 1684eaa728eeSbellard get_seg_limit(e1, e2), e2); 1685a78d0eabSliguang env->eip = new_eip; 1686eaa728eeSbellard } else 1687eaa728eeSbellard #endif 1688eaa728eeSbellard { 1689059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1690059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1691059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1692eaa728eeSbellard if (shift) { 1693059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1694059368bcSRichard Henderson pushl(&sa, next_eip); 1695eaa728eeSbellard } else { 1696059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1697059368bcSRichard Henderson pushw(&sa, next_eip); 1698eaa728eeSbellard } 1699eaa728eeSbellard 1700eaa728eeSbellard limit = get_seg_limit(e1, e2); 170120054ef0SBlue Swirl if (new_eip > limit) { 1702100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 170320054ef0SBlue Swirl } 1704eaa728eeSbellard /* from this point, not restartable */ 1705059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1706eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1707eaa728eeSbellard get_seg_base(e1, e2), limit, e2); 1708a78d0eabSliguang env->eip = new_eip; 1709eaa728eeSbellard } 1710eaa728eeSbellard } else { 1711eaa728eeSbellard /* check gate type */ 1712eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1713eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1714eaa728eeSbellard rpl = new_cs & 3; 17150aca0605SAndrew Oates 17160aca0605SAndrew Oates #ifdef TARGET_X86_64 17170aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17180aca0605SAndrew Oates if (type != 12) { 17190aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 17200aca0605SAndrew Oates } 17210aca0605SAndrew Oates } 17220aca0605SAndrew Oates #endif 17230aca0605SAndrew Oates 1724eaa728eeSbellard switch (type) { 1725eaa728eeSbellard case 1: /* available 286 TSS */ 1726eaa728eeSbellard case 9: /* available 386 TSS */ 1727eaa728eeSbellard case 5: /* task gate */ 172820054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1729100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 173020054ef0SBlue Swirl } 1731100ec099SPavel Dovgalyuk switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1732eaa728eeSbellard return; 1733eaa728eeSbellard case 4: /* 286 call gate */ 1734eaa728eeSbellard case 12: /* 386 call gate */ 1735eaa728eeSbellard break; 1736eaa728eeSbellard default: 1737100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1738eaa728eeSbellard break; 1739eaa728eeSbellard } 1740eaa728eeSbellard shift = type >> 3; 1741eaa728eeSbellard 174220054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 1743100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 174420054ef0SBlue Swirl } 1745eaa728eeSbellard /* check valid bit */ 174620054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1747100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 174820054ef0SBlue Swirl } 1749eaa728eeSbellard selector = e1 >> 16; 1750eaa728eeSbellard param_count = e2 & 0x1f; 17510aca0605SAndrew Oates offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 17520aca0605SAndrew Oates #ifdef TARGET_X86_64 17530aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17540aca0605SAndrew Oates /* load the upper 8 bytes of the 64-bit call gate */ 17550aca0605SAndrew Oates if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 17560aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17570aca0605SAndrew Oates GETPC()); 17580aca0605SAndrew Oates } 17590aca0605SAndrew Oates type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 17600aca0605SAndrew Oates if (type != 0) { 17610aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 17620aca0605SAndrew Oates GETPC()); 17630aca0605SAndrew Oates } 17640aca0605SAndrew Oates offset |= ((target_ulong)e1) << 32; 17650aca0605SAndrew Oates } 17660aca0605SAndrew Oates #endif 176720054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 1768100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 176920054ef0SBlue Swirl } 1770eaa728eeSbellard 1771100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1772100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 177320054ef0SBlue Swirl } 177420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1775100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 177620054ef0SBlue Swirl } 1777eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 177820054ef0SBlue Swirl if (dpl > cpl) { 1779100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 178020054ef0SBlue Swirl } 17810aca0605SAndrew Oates #ifdef TARGET_X86_64 17820aca0605SAndrew Oates if (env->efer & MSR_EFER_LMA) { 17830aca0605SAndrew Oates if (!(e2 & DESC_L_MASK)) { 17840aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17850aca0605SAndrew Oates } 17860aca0605SAndrew Oates if (e2 & DESC_B_MASK) { 17870aca0605SAndrew Oates raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 17880aca0605SAndrew Oates } 17890aca0605SAndrew Oates shift++; 17900aca0605SAndrew Oates } 17910aca0605SAndrew Oates #endif 179220054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 1793100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 179420054ef0SBlue Swirl } 1795eaa728eeSbellard 1796eaa728eeSbellard if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1797eaa728eeSbellard /* to inner privilege */ 1798e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, dpl); 17990aca0605SAndrew Oates #ifdef TARGET_X86_64 18000aca0605SAndrew Oates if (shift == 2) { 18010aca0605SAndrew Oates ss = dpl; /* SS = NULL selector with RPL = new CPL */ 18020aca0605SAndrew Oates new_stack = 1; 1803059368bcSRichard Henderson sa.sp = get_rsp_from_tss(env, dpl); 1804059368bcSRichard Henderson sa.sp_mask = -1; 1805059368bcSRichard Henderson sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 18060aca0605SAndrew Oates LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1807059368bcSRichard Henderson TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 18080aca0605SAndrew Oates } else 18090aca0605SAndrew Oates #endif 18100aca0605SAndrew Oates { 18110aca0605SAndrew Oates uint32_t sp32; 18120aca0605SAndrew Oates get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 181390a2541bSliguang LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 18140aca0605SAndrew Oates TARGET_FMT_lx "\n", ss, sp32, param_count, 181590a2541bSliguang env->regs[R_ESP]); 181620054ef0SBlue Swirl if ((ss & 0xfffc) == 0) { 1817100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 181820054ef0SBlue Swirl } 181920054ef0SBlue Swirl if ((ss & 3) != dpl) { 1820100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182120054ef0SBlue Swirl } 1822100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1823100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182420054ef0SBlue Swirl } 1825eaa728eeSbellard ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 182620054ef0SBlue Swirl if (ss_dpl != dpl) { 1827100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 182820054ef0SBlue Swirl } 1829eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 1830eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 183120054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 1832100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183320054ef0SBlue Swirl } 183420054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 1835100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 183620054ef0SBlue Swirl } 1837eaa728eeSbellard 1838059368bcSRichard Henderson sa.sp = sp32; 1839059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 1840059368bcSRichard Henderson sa.ss_base = get_seg_base(ss_e1, ss_e2); 18410aca0605SAndrew Oates } 18420aca0605SAndrew Oates 184320054ef0SBlue Swirl /* push_size = ((param_count * 2) + 8) << shift; */ 1844eaa728eeSbellard old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1845eaa728eeSbellard old_ssp = env->segs[R_SS].base; 1846059368bcSRichard Henderson 18470aca0605SAndrew Oates #ifdef TARGET_X86_64 18480aca0605SAndrew Oates if (shift == 2) { 18490aca0605SAndrew Oates /* XXX: verify if new stack address is canonical */ 1850059368bcSRichard Henderson pushq(&sa, env->segs[R_SS].selector); 1851059368bcSRichard Henderson pushq(&sa, env->regs[R_ESP]); 18520aca0605SAndrew Oates /* parameters aren't supported for 64-bit call gates */ 18530aca0605SAndrew Oates } else 18540aca0605SAndrew Oates #endif 18550aca0605SAndrew Oates if (shift == 1) { 1856059368bcSRichard Henderson pushl(&sa, env->segs[R_SS].selector); 1857059368bcSRichard Henderson pushl(&sa, env->regs[R_ESP]); 1858eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18590bd385e7SPaolo Bonzini val = cpu_ldl_data_ra(env, 18600bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 18610bd385e7SPaolo Bonzini GETPC()); 1862059368bcSRichard Henderson pushl(&sa, val); 1863eaa728eeSbellard } 1864eaa728eeSbellard } else { 1865059368bcSRichard Henderson pushw(&sa, env->segs[R_SS].selector); 1866059368bcSRichard Henderson pushw(&sa, env->regs[R_ESP]); 1867eaa728eeSbellard for (i = param_count - 1; i >= 0; i--) { 18680bd385e7SPaolo Bonzini val = cpu_lduw_data_ra(env, 18690bd385e7SPaolo Bonzini old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 18700bd385e7SPaolo Bonzini GETPC()); 1871059368bcSRichard Henderson pushw(&sa, val); 1872eaa728eeSbellard } 1873eaa728eeSbellard } 1874eaa728eeSbellard new_stack = 1; 1875eaa728eeSbellard } else { 1876eaa728eeSbellard /* to same privilege */ 1877e136648cSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 1878059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1879059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1880059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 188120054ef0SBlue Swirl /* push_size = (4 << shift); */ 1882eaa728eeSbellard new_stack = 0; 1883eaa728eeSbellard } 1884eaa728eeSbellard 18850aca0605SAndrew Oates #ifdef TARGET_X86_64 18860aca0605SAndrew Oates if (shift == 2) { 1887059368bcSRichard Henderson pushq(&sa, env->segs[R_CS].selector); 1888059368bcSRichard Henderson pushq(&sa, next_eip); 18890aca0605SAndrew Oates } else 18900aca0605SAndrew Oates #endif 18910aca0605SAndrew Oates if (shift == 1) { 1892059368bcSRichard Henderson pushl(&sa, env->segs[R_CS].selector); 1893059368bcSRichard Henderson pushl(&sa, next_eip); 1894eaa728eeSbellard } else { 1895059368bcSRichard Henderson pushw(&sa, env->segs[R_CS].selector); 1896059368bcSRichard Henderson pushw(&sa, next_eip); 1897eaa728eeSbellard } 1898eaa728eeSbellard 1899eaa728eeSbellard /* from this point, not restartable */ 1900eaa728eeSbellard 1901eaa728eeSbellard if (new_stack) { 19020aca0605SAndrew Oates #ifdef TARGET_X86_64 19030aca0605SAndrew Oates if (shift == 2) { 19040aca0605SAndrew Oates cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 19050aca0605SAndrew Oates } else 19060aca0605SAndrew Oates #endif 19070aca0605SAndrew Oates { 1908eaa728eeSbellard ss = (ss & ~3) | dpl; 1909eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, ss, 1910059368bcSRichard Henderson sa.ss_base, 1911eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 1912eaa728eeSbellard ss_e2); 1913eaa728eeSbellard } 19140aca0605SAndrew Oates } 1915eaa728eeSbellard 1916eaa728eeSbellard selector = (selector & ~3) | dpl; 1917eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, selector, 1918eaa728eeSbellard get_seg_base(e1, e2), 1919eaa728eeSbellard get_seg_limit(e1, e2), 1920eaa728eeSbellard e2); 1921059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1922a78d0eabSliguang env->eip = offset; 1923eaa728eeSbellard } 1924eaa728eeSbellard } 1925eaa728eeSbellard 1926eaa728eeSbellard /* real and vm86 mode iret */ 19272999a0b2SBlue Swirl void helper_iret_real(CPUX86State *env, int shift) 1928eaa728eeSbellard { 1929059368bcSRichard Henderson uint32_t new_cs, new_eip, new_eflags; 1930eaa728eeSbellard int eflags_mask; 1931059368bcSRichard Henderson StackAccess sa; 1932eaa728eeSbellard 1933059368bcSRichard Henderson sa.env = env; 1934059368bcSRichard Henderson sa.ra = GETPC(); 19358053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, 0); 1936059368bcSRichard Henderson sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1937059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 1938059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 1939059368bcSRichard Henderson 1940eaa728eeSbellard if (shift == 1) { 1941eaa728eeSbellard /* 32 bits */ 1942059368bcSRichard Henderson new_eip = popl(&sa); 1943059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 1944059368bcSRichard Henderson new_eflags = popl(&sa); 1945eaa728eeSbellard } else { 1946eaa728eeSbellard /* 16 bits */ 1947059368bcSRichard Henderson new_eip = popw(&sa); 1948059368bcSRichard Henderson new_cs = popw(&sa); 1949059368bcSRichard Henderson new_eflags = popw(&sa); 1950eaa728eeSbellard } 1951059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 1952bdadc0b5Smalc env->segs[R_CS].selector = new_cs; 1953bdadc0b5Smalc env->segs[R_CS].base = (new_cs << 4); 1954eaa728eeSbellard env->eip = new_eip; 195520054ef0SBlue Swirl if (env->eflags & VM_MASK) { 195620054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 195720054ef0SBlue Swirl NT_MASK; 195820054ef0SBlue Swirl } else { 195920054ef0SBlue Swirl eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 196020054ef0SBlue Swirl RF_MASK | NT_MASK; 196120054ef0SBlue Swirl } 196220054ef0SBlue Swirl if (shift == 0) { 1963eaa728eeSbellard eflags_mask &= 0xffff; 196420054ef0SBlue Swirl } 1965997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 1966db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 1967eaa728eeSbellard } 1968eaa728eeSbellard 1969c117e5b1SPhilippe Mathieu-Daudé static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1970eaa728eeSbellard { 1971eaa728eeSbellard int dpl; 1972eaa728eeSbellard uint32_t e2; 1973eaa728eeSbellard 1974eaa728eeSbellard /* XXX: on x86_64, we do not want to nullify FS and GS because 1975eaa728eeSbellard they may still contain a valid base. I would be interested to 1976eaa728eeSbellard know how a real x86_64 CPU behaves */ 1977eaa728eeSbellard if ((seg_reg == R_FS || seg_reg == R_GS) && 197820054ef0SBlue Swirl (env->segs[seg_reg].selector & 0xfffc) == 0) { 1979eaa728eeSbellard return; 198020054ef0SBlue Swirl } 1981eaa728eeSbellard 1982eaa728eeSbellard e2 = env->segs[seg_reg].flags; 1983eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1984eaa728eeSbellard if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1985eaa728eeSbellard /* data or non conforming code segment */ 1986eaa728eeSbellard if (dpl < cpl) { 1987c2ba0515SBin Meng cpu_x86_load_seg_cache(env, seg_reg, 0, 1988c2ba0515SBin Meng env->segs[seg_reg].base, 1989c2ba0515SBin Meng env->segs[seg_reg].limit, 1990c2ba0515SBin Meng env->segs[seg_reg].flags & ~DESC_P_MASK); 1991eaa728eeSbellard } 1992eaa728eeSbellard } 1993eaa728eeSbellard } 1994eaa728eeSbellard 1995eaa728eeSbellard /* protected mode iret */ 19962999a0b2SBlue Swirl static inline void helper_ret_protected(CPUX86State *env, int shift, 1997100ec099SPavel Dovgalyuk int is_iret, int addend, 1998100ec099SPavel Dovgalyuk uintptr_t retaddr) 1999eaa728eeSbellard { 2000eaa728eeSbellard uint32_t new_cs, new_eflags, new_ss; 2001eaa728eeSbellard uint32_t new_es, new_ds, new_fs, new_gs; 2002eaa728eeSbellard uint32_t e1, e2, ss_e1, ss_e2; 2003eaa728eeSbellard int cpl, dpl, rpl, eflags_mask, iopl; 2004059368bcSRichard Henderson target_ulong new_eip, new_esp; 2005059368bcSRichard Henderson StackAccess sa; 2006059368bcSRichard Henderson 20078053862aSPaolo Bonzini cpl = env->hflags & HF_CPL_MASK; 20088053862aSPaolo Bonzini 2009059368bcSRichard Henderson sa.env = env; 2010059368bcSRichard Henderson sa.ra = retaddr; 20118053862aSPaolo Bonzini sa.mmu_index = x86_mmu_index_pl(env, cpl); 2012eaa728eeSbellard 2013eaa728eeSbellard #ifdef TARGET_X86_64 201420054ef0SBlue Swirl if (shift == 2) { 2015059368bcSRichard Henderson sa.sp_mask = -1; 201620054ef0SBlue Swirl } else 2017eaa728eeSbellard #endif 201820054ef0SBlue Swirl { 2019059368bcSRichard Henderson sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 202020054ef0SBlue Swirl } 2021059368bcSRichard Henderson sa.sp = env->regs[R_ESP]; 2022059368bcSRichard Henderson sa.ss_base = env->segs[R_SS].base; 2023eaa728eeSbellard new_eflags = 0; /* avoid warning */ 2024eaa728eeSbellard #ifdef TARGET_X86_64 2025eaa728eeSbellard if (shift == 2) { 2026059368bcSRichard Henderson new_eip = popq(&sa); 2027059368bcSRichard Henderson new_cs = popq(&sa) & 0xffff; 2028eaa728eeSbellard if (is_iret) { 2029059368bcSRichard Henderson new_eflags = popq(&sa); 2030eaa728eeSbellard } 2031eaa728eeSbellard } else 2032eaa728eeSbellard #endif 203320054ef0SBlue Swirl { 2034eaa728eeSbellard if (shift == 1) { 2035eaa728eeSbellard /* 32 bits */ 2036059368bcSRichard Henderson new_eip = popl(&sa); 2037059368bcSRichard Henderson new_cs = popl(&sa) & 0xffff; 2038eaa728eeSbellard if (is_iret) { 2039059368bcSRichard Henderson new_eflags = popl(&sa); 204020054ef0SBlue Swirl if (new_eflags & VM_MASK) { 2041eaa728eeSbellard goto return_to_vm86; 2042eaa728eeSbellard } 204320054ef0SBlue Swirl } 2044eaa728eeSbellard } else { 2045eaa728eeSbellard /* 16 bits */ 2046059368bcSRichard Henderson new_eip = popw(&sa); 2047059368bcSRichard Henderson new_cs = popw(&sa); 204820054ef0SBlue Swirl if (is_iret) { 2049059368bcSRichard Henderson new_eflags = popw(&sa); 2050eaa728eeSbellard } 205120054ef0SBlue Swirl } 205220054ef0SBlue Swirl } 2053d12d51d5Saliguori LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2054eaa728eeSbellard new_cs, new_eip, shift, addend); 20556aa9e42fSRichard Henderson LOG_PCALL_STATE(env_cpu(env)); 205620054ef0SBlue Swirl if ((new_cs & 0xfffc) == 0) { 2057100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2058eaa728eeSbellard } 2059100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2060100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206120054ef0SBlue Swirl } 206220054ef0SBlue Swirl if (!(e2 & DESC_S_MASK) || 206320054ef0SBlue Swirl !(e2 & DESC_CS_MASK)) { 2064100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206520054ef0SBlue Swirl } 206620054ef0SBlue Swirl rpl = new_cs & 3; 206720054ef0SBlue Swirl if (rpl < cpl) { 2068100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 206920054ef0SBlue Swirl } 207020054ef0SBlue Swirl dpl = (e2 >> DESC_DPL_SHIFT) & 3; 207120054ef0SBlue Swirl if (e2 & DESC_C_MASK) { 207220054ef0SBlue Swirl if (dpl > rpl) { 2073100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 207420054ef0SBlue Swirl } 207520054ef0SBlue Swirl } else { 207620054ef0SBlue Swirl if (dpl != rpl) { 2077100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 207820054ef0SBlue Swirl } 207920054ef0SBlue Swirl } 208020054ef0SBlue Swirl if (!(e2 & DESC_P_MASK)) { 2081100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 208220054ef0SBlue Swirl } 2083eaa728eeSbellard 2084059368bcSRichard Henderson sa.sp += addend; 2085eaa728eeSbellard if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2086eaa728eeSbellard ((env->hflags & HF_CS64_MASK) && !is_iret))) { 20871235fc06Sths /* return to same privilege level */ 2088eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2089eaa728eeSbellard get_seg_base(e1, e2), 2090eaa728eeSbellard get_seg_limit(e1, e2), 2091eaa728eeSbellard e2); 2092eaa728eeSbellard } else { 2093eaa728eeSbellard /* return to different privilege level */ 2094eaa728eeSbellard #ifdef TARGET_X86_64 2095eaa728eeSbellard if (shift == 2) { 2096059368bcSRichard Henderson new_esp = popq(&sa); 2097059368bcSRichard Henderson new_ss = popq(&sa) & 0xffff; 2098eaa728eeSbellard } else 2099eaa728eeSbellard #endif 210020054ef0SBlue Swirl { 2101eaa728eeSbellard if (shift == 1) { 2102eaa728eeSbellard /* 32 bits */ 2103059368bcSRichard Henderson new_esp = popl(&sa); 2104059368bcSRichard Henderson new_ss = popl(&sa) & 0xffff; 2105eaa728eeSbellard } else { 2106eaa728eeSbellard /* 16 bits */ 2107059368bcSRichard Henderson new_esp = popw(&sa); 2108059368bcSRichard Henderson new_ss = popw(&sa); 2109eaa728eeSbellard } 211020054ef0SBlue Swirl } 2111d12d51d5Saliguori LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2112eaa728eeSbellard new_ss, new_esp); 2113eaa728eeSbellard if ((new_ss & 0xfffc) == 0) { 2114eaa728eeSbellard #ifdef TARGET_X86_64 2115eaa728eeSbellard /* NULL ss is allowed in long mode if cpl != 3 */ 2116eaa728eeSbellard /* XXX: test CS64? */ 2117eaa728eeSbellard if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2118eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2119eaa728eeSbellard 0, 0xffffffff, 2120eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2121eaa728eeSbellard DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2122eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 2123eaa728eeSbellard ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2124eaa728eeSbellard } else 2125eaa728eeSbellard #endif 2126eaa728eeSbellard { 2127100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2128eaa728eeSbellard } 2129eaa728eeSbellard } else { 213020054ef0SBlue Swirl if ((new_ss & 3) != rpl) { 2131100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 213220054ef0SBlue Swirl } 2133100ec099SPavel Dovgalyuk if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2134100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 213520054ef0SBlue Swirl } 2136eaa728eeSbellard if (!(ss_e2 & DESC_S_MASK) || 2137eaa728eeSbellard (ss_e2 & DESC_CS_MASK) || 213820054ef0SBlue Swirl !(ss_e2 & DESC_W_MASK)) { 2139100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 214020054ef0SBlue Swirl } 2141eaa728eeSbellard dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 214220054ef0SBlue Swirl if (dpl != rpl) { 2143100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 214420054ef0SBlue Swirl } 214520054ef0SBlue Swirl if (!(ss_e2 & DESC_P_MASK)) { 2146100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 214720054ef0SBlue Swirl } 2148eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, new_ss, 2149eaa728eeSbellard get_seg_base(ss_e1, ss_e2), 2150eaa728eeSbellard get_seg_limit(ss_e1, ss_e2), 2151eaa728eeSbellard ss_e2); 2152eaa728eeSbellard } 2153eaa728eeSbellard 2154eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, new_cs, 2155eaa728eeSbellard get_seg_base(e1, e2), 2156eaa728eeSbellard get_seg_limit(e1, e2), 2157eaa728eeSbellard e2); 2158059368bcSRichard Henderson sa.sp = new_esp; 2159eaa728eeSbellard #ifdef TARGET_X86_64 216020054ef0SBlue Swirl if (env->hflags & HF_CS64_MASK) { 2161059368bcSRichard Henderson sa.sp_mask = -1; 216220054ef0SBlue Swirl } else 2163eaa728eeSbellard #endif 216420054ef0SBlue Swirl { 2165059368bcSRichard Henderson sa.sp_mask = get_sp_mask(ss_e2); 216620054ef0SBlue Swirl } 2167eaa728eeSbellard 2168eaa728eeSbellard /* validate data segments */ 21692999a0b2SBlue Swirl validate_seg(env, R_ES, rpl); 21702999a0b2SBlue Swirl validate_seg(env, R_DS, rpl); 21712999a0b2SBlue Swirl validate_seg(env, R_FS, rpl); 21722999a0b2SBlue Swirl validate_seg(env, R_GS, rpl); 2173eaa728eeSbellard 2174059368bcSRichard Henderson sa.sp += addend; 2175eaa728eeSbellard } 2176059368bcSRichard Henderson SET_ESP(sa.sp, sa.sp_mask); 2177eaa728eeSbellard env->eip = new_eip; 2178eaa728eeSbellard if (is_iret) { 2179eaa728eeSbellard /* NOTE: 'cpl' is the _old_ CPL */ 2180eaa728eeSbellard eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 218120054ef0SBlue Swirl if (cpl == 0) { 2182eaa728eeSbellard eflags_mask |= IOPL_MASK; 218320054ef0SBlue Swirl } 2184eaa728eeSbellard iopl = (env->eflags >> IOPL_SHIFT) & 3; 218520054ef0SBlue Swirl if (cpl <= iopl) { 2186eaa728eeSbellard eflags_mask |= IF_MASK; 218720054ef0SBlue Swirl } 218820054ef0SBlue Swirl if (shift == 0) { 2189eaa728eeSbellard eflags_mask &= 0xffff; 219020054ef0SBlue Swirl } 2191997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, eflags_mask); 2192eaa728eeSbellard } 2193eaa728eeSbellard return; 2194eaa728eeSbellard 2195eaa728eeSbellard return_to_vm86: 2196059368bcSRichard Henderson new_esp = popl(&sa); 2197059368bcSRichard Henderson new_ss = popl(&sa); 2198059368bcSRichard Henderson new_es = popl(&sa); 2199059368bcSRichard Henderson new_ds = popl(&sa); 2200059368bcSRichard Henderson new_fs = popl(&sa); 2201059368bcSRichard Henderson new_gs = popl(&sa); 2202eaa728eeSbellard 2203eaa728eeSbellard /* modify processor state */ 2204997ff0d9SBlue Swirl cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2205997ff0d9SBlue Swirl IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2206997ff0d9SBlue Swirl VIP_MASK); 22072999a0b2SBlue Swirl load_seg_vm(env, R_CS, new_cs & 0xffff); 22082999a0b2SBlue Swirl load_seg_vm(env, R_SS, new_ss & 0xffff); 22092999a0b2SBlue Swirl load_seg_vm(env, R_ES, new_es & 0xffff); 22102999a0b2SBlue Swirl load_seg_vm(env, R_DS, new_ds & 0xffff); 22112999a0b2SBlue Swirl load_seg_vm(env, R_FS, new_fs & 0xffff); 22122999a0b2SBlue Swirl load_seg_vm(env, R_GS, new_gs & 0xffff); 2213eaa728eeSbellard 2214eaa728eeSbellard env->eip = new_eip & 0xffff; 221508b3ded6Sliguang env->regs[R_ESP] = new_esp; 2216eaa728eeSbellard } 2217eaa728eeSbellard 22182999a0b2SBlue Swirl void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2219eaa728eeSbellard { 2220eaa728eeSbellard int tss_selector, type; 2221eaa728eeSbellard uint32_t e1, e2; 2222eaa728eeSbellard 2223eaa728eeSbellard /* specific case for TSS */ 2224eaa728eeSbellard if (env->eflags & NT_MASK) { 2225eaa728eeSbellard #ifdef TARGET_X86_64 222620054ef0SBlue Swirl if (env->hflags & HF_LMA_MASK) { 2227100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 222820054ef0SBlue Swirl } 2229eaa728eeSbellard #endif 2230100ec099SPavel Dovgalyuk tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 223120054ef0SBlue Swirl if (tss_selector & 4) { 2232100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 223320054ef0SBlue Swirl } 2234100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2235100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 223620054ef0SBlue Swirl } 2237eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2238eaa728eeSbellard /* NOTE: we check both segment and busy TSS */ 223920054ef0SBlue Swirl if (type != 3) { 2240100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 224120054ef0SBlue Swirl } 2242100ec099SPavel Dovgalyuk switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2243eaa728eeSbellard } else { 2244100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 1, 0, GETPC()); 2245eaa728eeSbellard } 2246db620f46Sbellard env->hflags2 &= ~HF2_NMI_MASK; 2247eaa728eeSbellard } 2248eaa728eeSbellard 22492999a0b2SBlue Swirl void helper_lret_protected(CPUX86State *env, int shift, int addend) 2250eaa728eeSbellard { 2251100ec099SPavel Dovgalyuk helper_ret_protected(env, shift, 0, addend, GETPC()); 2252eaa728eeSbellard } 2253eaa728eeSbellard 22542999a0b2SBlue Swirl void helper_sysenter(CPUX86State *env) 2255eaa728eeSbellard { 2256eaa728eeSbellard if (env->sysenter_cs == 0) { 2257100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2258eaa728eeSbellard } 2259eaa728eeSbellard env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 22602436b61aSbalrog 22612436b61aSbalrog #ifdef TARGET_X86_64 22622436b61aSbalrog if (env->hflags & HF_LMA_MASK) { 22632436b61aSbalrog cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 22642436b61aSbalrog 0, 0xffffffff, 22652436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 22662436b61aSbalrog DESC_S_MASK | 226720054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 226820054ef0SBlue Swirl DESC_L_MASK); 22692436b61aSbalrog } else 22702436b61aSbalrog #endif 22712436b61aSbalrog { 2272eaa728eeSbellard cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2273eaa728eeSbellard 0, 0xffffffff, 2274eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2275eaa728eeSbellard DESC_S_MASK | 2276eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 22772436b61aSbalrog } 2278eaa728eeSbellard cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2279eaa728eeSbellard 0, 0xffffffff, 2280eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2281eaa728eeSbellard DESC_S_MASK | 2282eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 228308b3ded6Sliguang env->regs[R_ESP] = env->sysenter_esp; 2284a78d0eabSliguang env->eip = env->sysenter_eip; 2285eaa728eeSbellard } 2286eaa728eeSbellard 22872999a0b2SBlue Swirl void helper_sysexit(CPUX86State *env, int dflag) 2288eaa728eeSbellard { 2289eaa728eeSbellard int cpl; 2290eaa728eeSbellard 2291eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2292eaa728eeSbellard if (env->sysenter_cs == 0 || cpl != 0) { 2293100ec099SPavel Dovgalyuk raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2294eaa728eeSbellard } 22952436b61aSbalrog #ifdef TARGET_X86_64 22962436b61aSbalrog if (dflag == 2) { 229720054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 229820054ef0SBlue Swirl 3, 0, 0xffffffff, 22992436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23002436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 230120054ef0SBlue Swirl DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 230220054ef0SBlue Swirl DESC_L_MASK); 230320054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 230420054ef0SBlue Swirl 3, 0, 0xffffffff, 23052436b61aSbalrog DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 23062436b61aSbalrog DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 23072436b61aSbalrog DESC_W_MASK | DESC_A_MASK); 23082436b61aSbalrog } else 23092436b61aSbalrog #endif 23102436b61aSbalrog { 231120054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 231220054ef0SBlue Swirl 3, 0, 0xffffffff, 2313eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2314eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2315eaa728eeSbellard DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 231620054ef0SBlue Swirl cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 231720054ef0SBlue Swirl 3, 0, 0xffffffff, 2318eaa728eeSbellard DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2319eaa728eeSbellard DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2320eaa728eeSbellard DESC_W_MASK | DESC_A_MASK); 23212436b61aSbalrog } 232208b3ded6Sliguang env->regs[R_ESP] = env->regs[R_ECX]; 2323a78d0eabSliguang env->eip = env->regs[R_EDX]; 2324eaa728eeSbellard } 2325eaa728eeSbellard 23262999a0b2SBlue Swirl target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2327eaa728eeSbellard { 2328eaa728eeSbellard unsigned int limit; 2329ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2330eaa728eeSbellard int rpl, dpl, cpl, type; 2331eaa728eeSbellard 2332eaa728eeSbellard selector = selector1 & 0xffff; 2333ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 233420054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2335dc1ded53Saliguori goto fail; 233620054ef0SBlue Swirl } 2337100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2338eaa728eeSbellard goto fail; 233920054ef0SBlue Swirl } 2340eaa728eeSbellard rpl = selector & 3; 2341eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2342eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2343eaa728eeSbellard if (e2 & DESC_S_MASK) { 2344eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2345eaa728eeSbellard /* conforming */ 2346eaa728eeSbellard } else { 234720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2348eaa728eeSbellard goto fail; 2349eaa728eeSbellard } 235020054ef0SBlue Swirl } 2351eaa728eeSbellard } else { 2352eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2353eaa728eeSbellard switch (type) { 2354eaa728eeSbellard case 1: 2355eaa728eeSbellard case 2: 2356eaa728eeSbellard case 3: 2357eaa728eeSbellard case 9: 2358eaa728eeSbellard case 11: 2359eaa728eeSbellard break; 2360eaa728eeSbellard default: 2361eaa728eeSbellard goto fail; 2362eaa728eeSbellard } 2363eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2364eaa728eeSbellard fail: 2365ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2366eaa728eeSbellard return 0; 2367eaa728eeSbellard } 2368eaa728eeSbellard } 2369eaa728eeSbellard limit = get_seg_limit(e1, e2); 2370ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2371eaa728eeSbellard return limit; 2372eaa728eeSbellard } 2373eaa728eeSbellard 23742999a0b2SBlue Swirl target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2375eaa728eeSbellard { 2376ae541c0eSPaolo Bonzini uint32_t e1, e2, selector; 2377eaa728eeSbellard int rpl, dpl, cpl, type; 2378eaa728eeSbellard 2379eaa728eeSbellard selector = selector1 & 0xffff; 2380ae541c0eSPaolo Bonzini assert(CC_OP == CC_OP_EFLAGS); 238120054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2382eaa728eeSbellard goto fail; 238320054ef0SBlue Swirl } 2384100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2385eaa728eeSbellard goto fail; 238620054ef0SBlue Swirl } 2387eaa728eeSbellard rpl = selector & 3; 2388eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2389eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2390eaa728eeSbellard if (e2 & DESC_S_MASK) { 2391eaa728eeSbellard if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2392eaa728eeSbellard /* conforming */ 2393eaa728eeSbellard } else { 239420054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2395eaa728eeSbellard goto fail; 2396eaa728eeSbellard } 239720054ef0SBlue Swirl } 2398eaa728eeSbellard } else { 2399eaa728eeSbellard type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2400eaa728eeSbellard switch (type) { 2401eaa728eeSbellard case 1: 2402eaa728eeSbellard case 2: 2403eaa728eeSbellard case 3: 2404eaa728eeSbellard case 4: 2405eaa728eeSbellard case 5: 2406eaa728eeSbellard case 9: 2407eaa728eeSbellard case 11: 2408eaa728eeSbellard case 12: 2409eaa728eeSbellard break; 2410eaa728eeSbellard default: 2411eaa728eeSbellard goto fail; 2412eaa728eeSbellard } 2413eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2414eaa728eeSbellard fail: 2415ae541c0eSPaolo Bonzini CC_SRC &= ~CC_Z; 2416eaa728eeSbellard return 0; 2417eaa728eeSbellard } 2418eaa728eeSbellard } 2419ae541c0eSPaolo Bonzini CC_SRC |= CC_Z; 2420eaa728eeSbellard return e2 & 0x00f0ff00; 2421eaa728eeSbellard } 2422eaa728eeSbellard 24232999a0b2SBlue Swirl void helper_verr(CPUX86State *env, target_ulong selector1) 2424eaa728eeSbellard { 2425eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2426eaa728eeSbellard int rpl, dpl, cpl; 2427eaa728eeSbellard 2428eaa728eeSbellard selector = selector1 & 0xffff; 2429abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 243020054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2431eaa728eeSbellard goto fail; 243220054ef0SBlue Swirl } 2433100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2434eaa728eeSbellard goto fail; 243520054ef0SBlue Swirl } 243620054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2437eaa728eeSbellard goto fail; 243820054ef0SBlue Swirl } 2439eaa728eeSbellard rpl = selector & 3; 2440eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2441eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2442eaa728eeSbellard if (e2 & DESC_CS_MASK) { 244320054ef0SBlue Swirl if (!(e2 & DESC_R_MASK)) { 2444eaa728eeSbellard goto fail; 244520054ef0SBlue Swirl } 2446eaa728eeSbellard if (!(e2 & DESC_C_MASK)) { 244720054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2448eaa728eeSbellard goto fail; 2449eaa728eeSbellard } 245020054ef0SBlue Swirl } 2451eaa728eeSbellard } else { 2452eaa728eeSbellard if (dpl < cpl || dpl < rpl) { 2453eaa728eeSbellard fail: 2454abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2455eaa728eeSbellard } 2456eaa728eeSbellard } 2457abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2458abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2459eaa728eeSbellard } 2460eaa728eeSbellard 24612999a0b2SBlue Swirl void helper_verw(CPUX86State *env, target_ulong selector1) 2462eaa728eeSbellard { 2463eaa728eeSbellard uint32_t e1, e2, eflags, selector; 2464eaa728eeSbellard int rpl, dpl, cpl; 2465eaa728eeSbellard 2466eaa728eeSbellard selector = selector1 & 0xffff; 2467abdcc5c8SPaolo Bonzini eflags = cpu_cc_compute_all(env) | CC_Z; 246820054ef0SBlue Swirl if ((selector & 0xfffc) == 0) { 2469eaa728eeSbellard goto fail; 247020054ef0SBlue Swirl } 2471100ec099SPavel Dovgalyuk if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2472eaa728eeSbellard goto fail; 247320054ef0SBlue Swirl } 247420054ef0SBlue Swirl if (!(e2 & DESC_S_MASK)) { 2475eaa728eeSbellard goto fail; 247620054ef0SBlue Swirl } 2477eaa728eeSbellard rpl = selector & 3; 2478eaa728eeSbellard dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2479eaa728eeSbellard cpl = env->hflags & HF_CPL_MASK; 2480eaa728eeSbellard if (e2 & DESC_CS_MASK) { 2481eaa728eeSbellard goto fail; 2482eaa728eeSbellard } else { 248320054ef0SBlue Swirl if (dpl < cpl || dpl < rpl) { 2484eaa728eeSbellard goto fail; 248520054ef0SBlue Swirl } 2486eaa728eeSbellard if (!(e2 & DESC_W_MASK)) { 2487eaa728eeSbellard fail: 2488abdcc5c8SPaolo Bonzini eflags &= ~CC_Z; 2489eaa728eeSbellard } 2490eaa728eeSbellard } 2491abdcc5c8SPaolo Bonzini CC_SRC = eflags; 2492abdcc5c8SPaolo Bonzini CC_OP = CC_OP_EFLAGS; 2493eaa728eeSbellard } 2494