1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 int get_pg_mode(CPUX86State *env) 32 { 33 int pg_mode = 0; 34 if (!(env->cr[0] & CR0_PG_MASK)) { 35 return 0; 36 } 37 if (env->cr[0] & CR0_WP_MASK) { 38 pg_mode |= PG_MODE_WP; 39 } 40 if (env->cr[4] & CR4_PAE_MASK) { 41 pg_mode |= PG_MODE_PAE; 42 if (env->efer & MSR_EFER_NXE) { 43 pg_mode |= PG_MODE_NXE; 44 } 45 } 46 if (env->cr[4] & CR4_PSE_MASK) { 47 pg_mode |= PG_MODE_PSE; 48 } 49 if (env->cr[4] & CR4_SMEP_MASK) { 50 pg_mode |= PG_MODE_SMEP; 51 } 52 if (env->hflags & HF_LMA_MASK) { 53 pg_mode |= PG_MODE_LMA; 54 if (env->cr[4] & CR4_PKE_MASK) { 55 pg_mode |= PG_MODE_PKE; 56 } 57 if (env->cr[4] & CR4_PKS_MASK) { 58 pg_mode |= PG_MODE_PKS; 59 } 60 if (env->cr[4] & CR4_LA57_MASK) { 61 pg_mode |= PG_MODE_LA57; 62 } 63 } 64 return pg_mode; 65 } 66 67 /* return non zero if error */ 68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 69 uint32_t *e2_ptr, int selector, 70 uintptr_t retaddr) 71 { 72 SegmentCache *dt; 73 int index; 74 target_ulong ptr; 75 76 if (selector & 0x4) { 77 dt = &env->ldt; 78 } else { 79 dt = &env->gdt; 80 } 81 index = selector & ~7; 82 if ((index + 7) > dt->limit) { 83 return -1; 84 } 85 ptr = dt->base + index; 86 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 87 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 88 return 0; 89 } 90 91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 92 uint32_t *e2_ptr, int selector) 93 { 94 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 95 } 96 97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 98 { 99 unsigned int limit; 100 101 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 102 if (e2 & DESC_G_MASK) { 103 limit = (limit << 12) | 0xfff; 104 } 105 return limit; 106 } 107 108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 109 { 110 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 111 } 112 113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 114 uint32_t e2) 115 { 116 sc->base = get_seg_base(e1, e2); 117 sc->limit = get_seg_limit(e1, e2); 118 sc->flags = e2; 119 } 120 121 /* init the segment cache in vm86 mode. */ 122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 123 { 124 selector &= 0xffff; 125 126 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 127 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 128 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 129 } 130 131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 132 uint32_t *esp_ptr, int dpl, 133 uintptr_t retaddr) 134 { 135 X86CPU *cpu = env_archcpu(env); 136 int type, index, shift; 137 138 #if 0 139 { 140 int i; 141 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 142 for (i = 0; i < env->tr.limit; i++) { 143 printf("%02x ", env->tr.base[i]); 144 if ((i & 7) == 7) { 145 printf("\n"); 146 } 147 } 148 printf("\n"); 149 } 150 #endif 151 152 if (!(env->tr.flags & DESC_P_MASK)) { 153 cpu_abort(CPU(cpu), "invalid tss"); 154 } 155 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 156 if ((type & 7) != 1) { 157 cpu_abort(CPU(cpu), "invalid tss type"); 158 } 159 shift = type >> 3; 160 index = (dpl * 4 + 2) << shift; 161 if (index + (4 << shift) - 1 > env->tr.limit) { 162 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 163 } 164 if (shift == 0) { 165 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 166 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 167 } else { 168 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 170 } 171 } 172 173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 174 int cpl, uintptr_t retaddr) 175 { 176 uint32_t e1, e2; 177 int rpl, dpl; 178 179 if ((selector & 0xfffc) != 0) { 180 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 181 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 182 } 183 if (!(e2 & DESC_S_MASK)) { 184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 185 } 186 rpl = selector & 3; 187 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 188 if (seg_reg == R_CS) { 189 if (!(e2 & DESC_CS_MASK)) { 190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 191 } 192 if (dpl != rpl) { 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 194 } 195 } else if (seg_reg == R_SS) { 196 /* SS must be writable data */ 197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 199 } 200 if (dpl != cpl || dpl != rpl) { 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 202 } 203 } else { 204 /* not readable code */ 205 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 207 } 208 /* if data or non conforming code, checks the rights */ 209 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 210 if (dpl < cpl || dpl < rpl) { 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 212 } 213 } 214 } 215 if (!(e2 & DESC_P_MASK)) { 216 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 217 } 218 cpu_x86_load_seg_cache(env, seg_reg, selector, 219 get_seg_base(e1, e2), 220 get_seg_limit(e1, e2), 221 e2); 222 } else { 223 if (seg_reg == R_SS || seg_reg == R_CS) { 224 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 225 } 226 } 227 } 228 229 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 230 uintptr_t retaddr) 231 { 232 target_ulong ptr = env->gdt.base + (env->tr.selector & ~7); 233 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 234 235 if (value) { 236 e2 |= DESC_TSS_BUSY_MASK; 237 } else { 238 e2 &= ~DESC_TSS_BUSY_MASK; 239 } 240 241 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 242 } 243 244 #define SWITCH_TSS_JMP 0 245 #define SWITCH_TSS_IRET 1 246 #define SWITCH_TSS_CALL 2 247 248 /* XXX: restore CPU state in registers (PowerPC case) */ 249 static void switch_tss_ra(CPUX86State *env, int tss_selector, 250 uint32_t e1, uint32_t e2, int source, 251 uint32_t next_eip, uintptr_t retaddr) 252 { 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 254 target_ulong tss_base; 255 uint32_t new_regs[8], new_segs[6]; 256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 257 uint32_t old_eflags, eflags_mask; 258 SegmentCache *dt; 259 int index; 260 target_ulong ptr; 261 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 263 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 264 source); 265 266 /* if task gate, we read the TSS segment and we load it */ 267 if (type == 5) { 268 if (!(e2 & DESC_P_MASK)) { 269 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 270 } 271 tss_selector = e1 >> 16; 272 if (tss_selector & 4) { 273 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 274 } 275 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 276 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 277 } 278 if (e2 & DESC_S_MASK) { 279 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 280 } 281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 282 if ((type & 7) != 1) { 283 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 284 } 285 } 286 287 if (!(e2 & DESC_P_MASK)) { 288 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 289 } 290 291 if (type & 8) { 292 tss_limit_max = 103; 293 } else { 294 tss_limit_max = 43; 295 } 296 tss_limit = get_seg_limit(e1, e2); 297 tss_base = get_seg_base(e1, e2); 298 if ((tss_selector & 4) != 0 || 299 tss_limit < tss_limit_max) { 300 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 301 } 302 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 303 if (old_type & 8) { 304 old_tss_limit_max = 103; 305 } else { 306 old_tss_limit_max = 43; 307 } 308 309 /* read all the registers from the new TSS */ 310 if (type & 8) { 311 /* 32 bit */ 312 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 313 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 314 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 315 for (i = 0; i < 8; i++) { 316 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 317 retaddr); 318 } 319 for (i = 0; i < 6; i++) { 320 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 321 retaddr); 322 } 323 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 324 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 325 } else { 326 /* 16 bit */ 327 new_cr3 = 0; 328 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 329 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 330 for (i = 0; i < 8; i++) { 331 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 332 } 333 for (i = 0; i < 4; i++) { 334 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 335 retaddr); 336 } 337 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 338 new_segs[R_FS] = 0; 339 new_segs[R_GS] = 0; 340 new_trap = 0; 341 } 342 /* XXX: avoid a compiler warning, see 343 http://support.amd.com/us/Processor_TechDocs/24593.pdf 344 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 345 (void)new_trap; 346 347 /* NOTE: we must avoid memory exceptions during the task switch, 348 so we make dummy accesses before */ 349 /* XXX: it can still fail in some cases, so a bigger hack is 350 necessary to valid the TLB after having done the accesses */ 351 352 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 353 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 354 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 355 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 356 357 /* clear busy bit (it is restartable) */ 358 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 359 tss_set_busy(env, env->tr.selector, 0, retaddr); 360 } 361 old_eflags = cpu_compute_eflags(env); 362 if (source == SWITCH_TSS_IRET) { 363 old_eflags &= ~NT_MASK; 364 } 365 366 /* save the current state in the old TSS */ 367 if (old_type & 8) { 368 /* 32 bit */ 369 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 370 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 377 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 378 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 379 for (i = 0; i < 6; i++) { 380 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 381 env->segs[i].selector, retaddr); 382 } 383 } else { 384 /* 16 bit */ 385 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 386 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 393 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 394 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 395 for (i = 0; i < 4; i++) { 396 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 397 env->segs[i].selector, retaddr); 398 } 399 } 400 401 /* now if an exception occurs, it will occurs in the next task 402 context */ 403 404 if (source == SWITCH_TSS_CALL) { 405 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 406 new_eflags |= NT_MASK; 407 } 408 409 /* set busy bit */ 410 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 411 tss_set_busy(env, tss_selector, 1, retaddr); 412 } 413 414 /* set the new CPU state */ 415 /* from this point, any exception which occurs can give problems */ 416 env->cr[0] |= CR0_TS_MASK; 417 env->hflags |= HF_TS_MASK; 418 env->tr.selector = tss_selector; 419 env->tr.base = tss_base; 420 env->tr.limit = tss_limit; 421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 422 423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 424 cpu_x86_update_cr3(env, new_cr3); 425 } 426 427 /* load all registers without an exception, then reload them with 428 possible exception */ 429 env->eip = new_eip; 430 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 432 if (type & 8) { 433 cpu_load_eflags(env, new_eflags, eflags_mask); 434 for (i = 0; i < 8; i++) { 435 env->regs[i] = new_regs[i]; 436 } 437 } else { 438 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 439 for (i = 0; i < 8; i++) { 440 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 441 } 442 } 443 if (new_eflags & VM_MASK) { 444 for (i = 0; i < 6; i++) { 445 load_seg_vm(env, i, new_segs[i]); 446 } 447 } else { 448 /* first just selectors as the rest may trigger exceptions */ 449 for (i = 0; i < 6; i++) { 450 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 451 } 452 } 453 454 env->ldt.selector = new_ldt & ~4; 455 env->ldt.base = 0; 456 env->ldt.limit = 0; 457 env->ldt.flags = 0; 458 459 /* load the LDT */ 460 if (new_ldt & 4) { 461 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 462 } 463 464 if ((new_ldt & 0xfffc) != 0) { 465 dt = &env->gdt; 466 index = new_ldt & ~7; 467 if ((index + 7) > dt->limit) { 468 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 469 } 470 ptr = dt->base + index; 471 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 473 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 475 } 476 if (!(e2 & DESC_P_MASK)) { 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 478 } 479 load_seg_cache_raw_dt(&env->ldt, e1, e2); 480 } 481 482 /* load the segments */ 483 if (!(new_eflags & VM_MASK)) { 484 int cpl = new_segs[R_CS] & 3; 485 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 486 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 487 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 488 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 489 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 490 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 491 } 492 493 /* check that env->eip is in the CS segment limits */ 494 if (new_eip > env->segs[R_CS].limit) { 495 /* XXX: different exception if CALL? */ 496 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 497 } 498 499 #ifndef CONFIG_USER_ONLY 500 /* reset local breakpoints */ 501 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 502 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 503 } 504 #endif 505 } 506 507 static void switch_tss(CPUX86State *env, int tss_selector, 508 uint32_t e1, uint32_t e2, int source, 509 uint32_t next_eip) 510 { 511 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 512 } 513 514 static inline unsigned int get_sp_mask(unsigned int e2) 515 { 516 #ifdef TARGET_X86_64 517 if (e2 & DESC_L_MASK) { 518 return 0; 519 } else 520 #endif 521 if (e2 & DESC_B_MASK) { 522 return 0xffffffff; 523 } else { 524 return 0xffff; 525 } 526 } 527 528 int exception_has_error_code(int intno) 529 { 530 switch (intno) { 531 case 8: 532 case 10: 533 case 11: 534 case 12: 535 case 13: 536 case 14: 537 case 17: 538 return 1; 539 } 540 return 0; 541 } 542 543 #ifdef TARGET_X86_64 544 #define SET_ESP(val, sp_mask) \ 545 do { \ 546 if ((sp_mask) == 0xffff) { \ 547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 548 ((val) & 0xffff); \ 549 } else if ((sp_mask) == 0xffffffffLL) { \ 550 env->regs[R_ESP] = (uint32_t)(val); \ 551 } else { \ 552 env->regs[R_ESP] = (val); \ 553 } \ 554 } while (0) 555 #else 556 #define SET_ESP(val, sp_mask) \ 557 do { \ 558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 559 ((val) & (sp_mask)); \ 560 } while (0) 561 #endif 562 563 /* in 64-bit machines, this can overflow. So this segment addition macro 564 * can be used to trim the value to 32-bit whenever needed */ 565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 566 567 /* XXX: add a is_user flag to have proper security support */ 568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 569 { \ 570 sp -= 2; \ 571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 572 } 573 574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 575 { \ 576 sp -= 4; \ 577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \ 578 } 579 580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 581 { \ 582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 583 sp += 2; \ 584 } 585 586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 587 { \ 588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \ 589 sp += 4; \ 590 } 591 592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 596 597 /* protected mode interrupt */ 598 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 599 int error_code, unsigned int next_eip, 600 int is_hw) 601 { 602 SegmentCache *dt; 603 target_ulong ptr, ssp; 604 int type, dpl, selector, ss_dpl, cpl; 605 int has_error_code, new_stack, shift; 606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 607 uint32_t old_eip, sp_mask; 608 int vm86 = env->eflags & VM_MASK; 609 610 has_error_code = 0; 611 if (!is_int && !is_hw) { 612 has_error_code = exception_has_error_code(intno); 613 } 614 if (is_int) { 615 old_eip = next_eip; 616 } else { 617 old_eip = env->eip; 618 } 619 620 dt = &env->idt; 621 if (intno * 8 + 7 > dt->limit) { 622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 623 } 624 ptr = dt->base + intno * 8; 625 e1 = cpu_ldl_kernel(env, ptr); 626 e2 = cpu_ldl_kernel(env, ptr + 4); 627 /* check gate type */ 628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 629 switch (type) { 630 case 5: /* task gate */ 631 case 6: /* 286 interrupt gate */ 632 case 7: /* 286 trap gate */ 633 case 14: /* 386 interrupt gate */ 634 case 15: /* 386 trap gate */ 635 break; 636 default: 637 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 638 break; 639 } 640 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 641 cpl = env->hflags & HF_CPL_MASK; 642 /* check privilege if software int */ 643 if (is_int && dpl < cpl) { 644 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 645 } 646 647 if (type == 5) { 648 /* task gate */ 649 /* must do that check here to return the correct error code */ 650 if (!(e2 & DESC_P_MASK)) { 651 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 652 } 653 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 654 if (has_error_code) { 655 int type; 656 uint32_t mask; 657 658 /* push the error code */ 659 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 660 shift = type >> 3; 661 if (env->segs[R_SS].flags & DESC_B_MASK) { 662 mask = 0xffffffff; 663 } else { 664 mask = 0xffff; 665 } 666 esp = (env->regs[R_ESP] - (2 << shift)) & mask; 667 ssp = env->segs[R_SS].base + esp; 668 if (shift) { 669 cpu_stl_kernel(env, ssp, error_code); 670 } else { 671 cpu_stw_kernel(env, ssp, error_code); 672 } 673 SET_ESP(esp, mask); 674 } 675 return; 676 } 677 678 /* Otherwise, trap or interrupt gate */ 679 680 /* check valid bit */ 681 if (!(e2 & DESC_P_MASK)) { 682 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 683 } 684 selector = e1 >> 16; 685 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 686 if ((selector & 0xfffc) == 0) { 687 raise_exception_err(env, EXCP0D_GPF, 0); 688 } 689 if (load_segment(env, &e1, &e2, selector) != 0) { 690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 691 } 692 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 694 } 695 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 696 if (dpl > cpl) { 697 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 698 } 699 if (!(e2 & DESC_P_MASK)) { 700 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 701 } 702 if (e2 & DESC_C_MASK) { 703 dpl = cpl; 704 } 705 if (dpl < cpl) { 706 /* to inner privilege */ 707 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 708 if ((ss & 0xfffc) == 0) { 709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 710 } 711 if ((ss & 3) != dpl) { 712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 713 } 714 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 715 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 716 } 717 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 718 if (ss_dpl != dpl) { 719 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 720 } 721 if (!(ss_e2 & DESC_S_MASK) || 722 (ss_e2 & DESC_CS_MASK) || 723 !(ss_e2 & DESC_W_MASK)) { 724 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 725 } 726 if (!(ss_e2 & DESC_P_MASK)) { 727 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 728 } 729 new_stack = 1; 730 sp_mask = get_sp_mask(ss_e2); 731 ssp = get_seg_base(ss_e1, ss_e2); 732 } else { 733 /* to same privilege */ 734 if (vm86) { 735 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 736 } 737 new_stack = 0; 738 sp_mask = get_sp_mask(env->segs[R_SS].flags); 739 ssp = env->segs[R_SS].base; 740 esp = env->regs[R_ESP]; 741 } 742 743 shift = type >> 3; 744 745 #if 0 746 /* XXX: check that enough room is available */ 747 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 748 if (vm86) { 749 push_size += 8; 750 } 751 push_size <<= shift; 752 #endif 753 if (shift == 1) { 754 if (new_stack) { 755 if (vm86) { 756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 760 } 761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 763 } 764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); 765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 766 PUSHL(ssp, esp, sp_mask, old_eip); 767 if (has_error_code) { 768 PUSHL(ssp, esp, sp_mask, error_code); 769 } 770 } else { 771 if (new_stack) { 772 if (vm86) { 773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 777 } 778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 780 } 781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); 782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 783 PUSHW(ssp, esp, sp_mask, old_eip); 784 if (has_error_code) { 785 PUSHW(ssp, esp, sp_mask, error_code); 786 } 787 } 788 789 /* interrupt gate clear IF mask */ 790 if ((type & 1) == 0) { 791 env->eflags &= ~IF_MASK; 792 } 793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 794 795 if (new_stack) { 796 if (vm86) { 797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 801 } 802 ss = (ss & ~3) | dpl; 803 cpu_x86_load_seg_cache(env, R_SS, ss, 804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 805 } 806 SET_ESP(esp, sp_mask); 807 808 selector = (selector & ~3) | dpl; 809 cpu_x86_load_seg_cache(env, R_CS, selector, 810 get_seg_base(e1, e2), 811 get_seg_limit(e1, e2), 812 e2); 813 env->eip = offset; 814 } 815 816 #ifdef TARGET_X86_64 817 818 #define PUSHQ_RA(sp, val, ra) \ 819 { \ 820 sp -= 8; \ 821 cpu_stq_kernel_ra(env, sp, (val), ra); \ 822 } 823 824 #define POPQ_RA(sp, val, ra) \ 825 { \ 826 val = cpu_ldq_kernel_ra(env, sp, ra); \ 827 sp += 8; \ 828 } 829 830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 831 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 832 833 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 834 { 835 X86CPU *cpu = env_archcpu(env); 836 int index, pg_mode; 837 target_ulong rsp; 838 int32_t sext; 839 840 #if 0 841 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 842 env->tr.base, env->tr.limit); 843 #endif 844 845 if (!(env->tr.flags & DESC_P_MASK)) { 846 cpu_abort(CPU(cpu), "invalid tss"); 847 } 848 index = 8 * level + 4; 849 if ((index + 7) > env->tr.limit) { 850 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 851 } 852 853 rsp = cpu_ldq_kernel(env, env->tr.base + index); 854 855 /* test virtual address sign extension */ 856 pg_mode = get_pg_mode(env); 857 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 858 if (sext != 0 && sext != -1) { 859 raise_exception_err(env, EXCP0C_STACK, 0); 860 } 861 862 return rsp; 863 } 864 865 /* 64 bit interrupt */ 866 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 867 int error_code, target_ulong next_eip, int is_hw) 868 { 869 SegmentCache *dt; 870 target_ulong ptr; 871 int type, dpl, selector, cpl, ist; 872 int has_error_code, new_stack; 873 uint32_t e1, e2, e3, ss; 874 target_ulong old_eip, esp, offset; 875 876 has_error_code = 0; 877 if (!is_int && !is_hw) { 878 has_error_code = exception_has_error_code(intno); 879 } 880 if (is_int) { 881 old_eip = next_eip; 882 } else { 883 old_eip = env->eip; 884 } 885 886 dt = &env->idt; 887 if (intno * 16 + 15 > dt->limit) { 888 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 889 } 890 ptr = dt->base + intno * 16; 891 e1 = cpu_ldl_kernel(env, ptr); 892 e2 = cpu_ldl_kernel(env, ptr + 4); 893 e3 = cpu_ldl_kernel(env, ptr + 8); 894 /* check gate type */ 895 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 896 switch (type) { 897 case 14: /* 386 interrupt gate */ 898 case 15: /* 386 trap gate */ 899 break; 900 default: 901 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 902 break; 903 } 904 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 905 cpl = env->hflags & HF_CPL_MASK; 906 /* check privilege if software int */ 907 if (is_int && dpl < cpl) { 908 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 909 } 910 /* check valid bit */ 911 if (!(e2 & DESC_P_MASK)) { 912 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 913 } 914 selector = e1 >> 16; 915 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 916 ist = e2 & 7; 917 if ((selector & 0xfffc) == 0) { 918 raise_exception_err(env, EXCP0D_GPF, 0); 919 } 920 921 if (load_segment(env, &e1, &e2, selector) != 0) { 922 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 923 } 924 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 925 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 926 } 927 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 928 if (dpl > cpl) { 929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 930 } 931 if (!(e2 & DESC_P_MASK)) { 932 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 933 } 934 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 935 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 936 } 937 if (e2 & DESC_C_MASK) { 938 dpl = cpl; 939 } 940 if (dpl < cpl || ist != 0) { 941 /* to inner privilege */ 942 new_stack = 1; 943 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 944 ss = 0; 945 } else { 946 /* to same privilege */ 947 if (env->eflags & VM_MASK) { 948 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 949 } 950 new_stack = 0; 951 esp = env->regs[R_ESP]; 952 } 953 esp &= ~0xfLL; /* align stack */ 954 955 PUSHQ(esp, env->segs[R_SS].selector); 956 PUSHQ(esp, env->regs[R_ESP]); 957 PUSHQ(esp, cpu_compute_eflags(env)); 958 PUSHQ(esp, env->segs[R_CS].selector); 959 PUSHQ(esp, old_eip); 960 if (has_error_code) { 961 PUSHQ(esp, error_code); 962 } 963 964 /* interrupt gate clear IF mask */ 965 if ((type & 1) == 0) { 966 env->eflags &= ~IF_MASK; 967 } 968 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 969 970 if (new_stack) { 971 ss = 0 | dpl; 972 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 973 } 974 env->regs[R_ESP] = esp; 975 976 selector = (selector & ~3) | dpl; 977 cpu_x86_load_seg_cache(env, R_CS, selector, 978 get_seg_base(e1, e2), 979 get_seg_limit(e1, e2), 980 e2); 981 env->eip = offset; 982 } 983 #endif /* TARGET_X86_64 */ 984 985 void helper_sysret(CPUX86State *env, int dflag) 986 { 987 int cpl, selector; 988 989 if (!(env->efer & MSR_EFER_SCE)) { 990 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 991 } 992 cpl = env->hflags & HF_CPL_MASK; 993 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 994 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 995 } 996 selector = (env->star >> 48) & 0xffff; 997 #ifdef TARGET_X86_64 998 if (env->hflags & HF_LMA_MASK) { 999 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1000 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1001 NT_MASK); 1002 if (dflag == 2) { 1003 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1004 0, 0xffffffff, 1005 DESC_G_MASK | DESC_P_MASK | 1006 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1007 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1008 DESC_L_MASK); 1009 env->eip = env->regs[R_ECX]; 1010 } else { 1011 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1012 0, 0xffffffff, 1013 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1014 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1015 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1016 env->eip = (uint32_t)env->regs[R_ECX]; 1017 } 1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1019 0, 0xffffffff, 1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1021 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1022 DESC_W_MASK | DESC_A_MASK); 1023 } else 1024 #endif 1025 { 1026 env->eflags |= IF_MASK; 1027 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1028 0, 0xffffffff, 1029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1030 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1031 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1032 env->eip = (uint32_t)env->regs[R_ECX]; 1033 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1034 0, 0xffffffff, 1035 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1036 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1037 DESC_W_MASK | DESC_A_MASK); 1038 } 1039 } 1040 1041 /* real mode interrupt */ 1042 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1043 int error_code, unsigned int next_eip) 1044 { 1045 SegmentCache *dt; 1046 target_ulong ptr, ssp; 1047 int selector; 1048 uint32_t offset, esp; 1049 uint32_t old_cs, old_eip; 1050 1051 /* real mode (simpler!) */ 1052 dt = &env->idt; 1053 if (intno * 4 + 3 > dt->limit) { 1054 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1055 } 1056 ptr = dt->base + intno * 4; 1057 offset = cpu_lduw_kernel(env, ptr); 1058 selector = cpu_lduw_kernel(env, ptr + 2); 1059 esp = env->regs[R_ESP]; 1060 ssp = env->segs[R_SS].base; 1061 if (is_int) { 1062 old_eip = next_eip; 1063 } else { 1064 old_eip = env->eip; 1065 } 1066 old_cs = env->segs[R_CS].selector; 1067 /* XXX: use SS segment size? */ 1068 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1069 PUSHW(ssp, esp, 0xffff, old_cs); 1070 PUSHW(ssp, esp, 0xffff, old_eip); 1071 1072 /* update processor state */ 1073 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1074 env->eip = offset; 1075 env->segs[R_CS].selector = selector; 1076 env->segs[R_CS].base = (selector << 4); 1077 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1078 } 1079 1080 /* 1081 * Begin execution of an interruption. is_int is TRUE if coming from 1082 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1083 * instruction. It is only relevant if is_int is TRUE. 1084 */ 1085 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1086 int error_code, target_ulong next_eip, int is_hw) 1087 { 1088 CPUX86State *env = &cpu->env; 1089 1090 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1091 if ((env->cr[0] & CR0_PE_MASK)) { 1092 static int count; 1093 1094 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1095 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1096 count, intno, error_code, is_int, 1097 env->hflags & HF_CPL_MASK, 1098 env->segs[R_CS].selector, env->eip, 1099 (int)env->segs[R_CS].base + env->eip, 1100 env->segs[R_SS].selector, env->regs[R_ESP]); 1101 if (intno == 0x0e) { 1102 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1103 } else { 1104 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1105 } 1106 qemu_log("\n"); 1107 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1108 #if 0 1109 { 1110 int i; 1111 target_ulong ptr; 1112 1113 qemu_log(" code="); 1114 ptr = env->segs[R_CS].base + env->eip; 1115 for (i = 0; i < 16; i++) { 1116 qemu_log(" %02x", ldub(ptr + i)); 1117 } 1118 qemu_log("\n"); 1119 } 1120 #endif 1121 count++; 1122 } 1123 } 1124 if (env->cr[0] & CR0_PE_MASK) { 1125 #if !defined(CONFIG_USER_ONLY) 1126 if (env->hflags & HF_GUEST_MASK) { 1127 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1128 } 1129 #endif 1130 #ifdef TARGET_X86_64 1131 if (env->hflags & HF_LMA_MASK) { 1132 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1133 } else 1134 #endif 1135 { 1136 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1137 is_hw); 1138 } 1139 } else { 1140 #if !defined(CONFIG_USER_ONLY) 1141 if (env->hflags & HF_GUEST_MASK) { 1142 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1143 } 1144 #endif 1145 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1146 } 1147 1148 #if !defined(CONFIG_USER_ONLY) 1149 if (env->hflags & HF_GUEST_MASK) { 1150 CPUState *cs = CPU(cpu); 1151 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1152 offsetof(struct vmcb, 1153 control.event_inj)); 1154 1155 x86_stl_phys(cs, 1156 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1157 event_inj & ~SVM_EVTINJ_VALID); 1158 } 1159 #endif 1160 } 1161 1162 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1163 { 1164 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1165 } 1166 1167 void helper_lldt(CPUX86State *env, int selector) 1168 { 1169 SegmentCache *dt; 1170 uint32_t e1, e2; 1171 int index, entry_limit; 1172 target_ulong ptr; 1173 1174 selector &= 0xffff; 1175 if ((selector & 0xfffc) == 0) { 1176 /* XXX: NULL selector case: invalid LDT */ 1177 env->ldt.base = 0; 1178 env->ldt.limit = 0; 1179 } else { 1180 if (selector & 0x4) { 1181 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1182 } 1183 dt = &env->gdt; 1184 index = selector & ~7; 1185 #ifdef TARGET_X86_64 1186 if (env->hflags & HF_LMA_MASK) { 1187 entry_limit = 15; 1188 } else 1189 #endif 1190 { 1191 entry_limit = 7; 1192 } 1193 if ((index + entry_limit) > dt->limit) { 1194 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1195 } 1196 ptr = dt->base + index; 1197 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1198 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1199 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1200 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1201 } 1202 if (!(e2 & DESC_P_MASK)) { 1203 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1204 } 1205 #ifdef TARGET_X86_64 1206 if (env->hflags & HF_LMA_MASK) { 1207 uint32_t e3; 1208 1209 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1210 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1211 env->ldt.base |= (target_ulong)e3 << 32; 1212 } else 1213 #endif 1214 { 1215 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1216 } 1217 } 1218 env->ldt.selector = selector; 1219 } 1220 1221 void helper_ltr(CPUX86State *env, int selector) 1222 { 1223 SegmentCache *dt; 1224 uint32_t e1, e2; 1225 int index, type, entry_limit; 1226 target_ulong ptr; 1227 1228 selector &= 0xffff; 1229 if ((selector & 0xfffc) == 0) { 1230 /* NULL selector case: invalid TR */ 1231 env->tr.base = 0; 1232 env->tr.limit = 0; 1233 env->tr.flags = 0; 1234 } else { 1235 if (selector & 0x4) { 1236 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1237 } 1238 dt = &env->gdt; 1239 index = selector & ~7; 1240 #ifdef TARGET_X86_64 1241 if (env->hflags & HF_LMA_MASK) { 1242 entry_limit = 15; 1243 } else 1244 #endif 1245 { 1246 entry_limit = 7; 1247 } 1248 if ((index + entry_limit) > dt->limit) { 1249 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1250 } 1251 ptr = dt->base + index; 1252 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1253 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1254 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1255 if ((e2 & DESC_S_MASK) || 1256 (type != 1 && type != 9)) { 1257 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1258 } 1259 if (!(e2 & DESC_P_MASK)) { 1260 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1261 } 1262 #ifdef TARGET_X86_64 1263 if (env->hflags & HF_LMA_MASK) { 1264 uint32_t e3, e4; 1265 1266 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1267 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1268 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1269 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1270 } 1271 load_seg_cache_raw_dt(&env->tr, e1, e2); 1272 env->tr.base |= (target_ulong)e3 << 32; 1273 } else 1274 #endif 1275 { 1276 load_seg_cache_raw_dt(&env->tr, e1, e2); 1277 } 1278 e2 |= DESC_TSS_BUSY_MASK; 1279 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1280 } 1281 env->tr.selector = selector; 1282 } 1283 1284 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1285 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1286 { 1287 uint32_t e1, e2; 1288 int cpl, dpl, rpl; 1289 SegmentCache *dt; 1290 int index; 1291 target_ulong ptr; 1292 1293 selector &= 0xffff; 1294 cpl = env->hflags & HF_CPL_MASK; 1295 if ((selector & 0xfffc) == 0) { 1296 /* null selector case */ 1297 if (seg_reg == R_SS 1298 #ifdef TARGET_X86_64 1299 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1300 #endif 1301 ) { 1302 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1303 } 1304 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1305 } else { 1306 1307 if (selector & 0x4) { 1308 dt = &env->ldt; 1309 } else { 1310 dt = &env->gdt; 1311 } 1312 index = selector & ~7; 1313 if ((index + 7) > dt->limit) { 1314 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1315 } 1316 ptr = dt->base + index; 1317 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1318 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1319 1320 if (!(e2 & DESC_S_MASK)) { 1321 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1322 } 1323 rpl = selector & 3; 1324 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1325 if (seg_reg == R_SS) { 1326 /* must be writable segment */ 1327 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1328 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1329 } 1330 if (rpl != cpl || dpl != cpl) { 1331 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1332 } 1333 } else { 1334 /* must be readable segment */ 1335 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1336 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1337 } 1338 1339 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1340 /* if not conforming code, test rights */ 1341 if (dpl < cpl || dpl < rpl) { 1342 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1343 } 1344 } 1345 } 1346 1347 if (!(e2 & DESC_P_MASK)) { 1348 if (seg_reg == R_SS) { 1349 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1350 } else { 1351 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1352 } 1353 } 1354 1355 /* set the access bit if not already set */ 1356 if (!(e2 & DESC_A_MASK)) { 1357 e2 |= DESC_A_MASK; 1358 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1359 } 1360 1361 cpu_x86_load_seg_cache(env, seg_reg, selector, 1362 get_seg_base(e1, e2), 1363 get_seg_limit(e1, e2), 1364 e2); 1365 #if 0 1366 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1367 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1368 #endif 1369 } 1370 } 1371 1372 /* protected mode jump */ 1373 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1374 target_ulong next_eip) 1375 { 1376 int gate_cs, type; 1377 uint32_t e1, e2, cpl, dpl, rpl, limit; 1378 1379 if ((new_cs & 0xfffc) == 0) { 1380 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1381 } 1382 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1383 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1384 } 1385 cpl = env->hflags & HF_CPL_MASK; 1386 if (e2 & DESC_S_MASK) { 1387 if (!(e2 & DESC_CS_MASK)) { 1388 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1389 } 1390 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1391 if (e2 & DESC_C_MASK) { 1392 /* conforming code segment */ 1393 if (dpl > cpl) { 1394 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1395 } 1396 } else { 1397 /* non conforming code segment */ 1398 rpl = new_cs & 3; 1399 if (rpl > cpl) { 1400 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1401 } 1402 if (dpl != cpl) { 1403 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1404 } 1405 } 1406 if (!(e2 & DESC_P_MASK)) { 1407 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1408 } 1409 limit = get_seg_limit(e1, e2); 1410 if (new_eip > limit && 1411 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1412 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1413 } 1414 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1415 get_seg_base(e1, e2), limit, e2); 1416 env->eip = new_eip; 1417 } else { 1418 /* jump to call or task gate */ 1419 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1420 rpl = new_cs & 3; 1421 cpl = env->hflags & HF_CPL_MASK; 1422 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1423 1424 #ifdef TARGET_X86_64 1425 if (env->efer & MSR_EFER_LMA) { 1426 if (type != 12) { 1427 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1428 } 1429 } 1430 #endif 1431 switch (type) { 1432 case 1: /* 286 TSS */ 1433 case 9: /* 386 TSS */ 1434 case 5: /* task gate */ 1435 if (dpl < cpl || dpl < rpl) { 1436 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1437 } 1438 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1439 break; 1440 case 4: /* 286 call gate */ 1441 case 12: /* 386 call gate */ 1442 if ((dpl < cpl) || (dpl < rpl)) { 1443 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1444 } 1445 if (!(e2 & DESC_P_MASK)) { 1446 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1447 } 1448 gate_cs = e1 >> 16; 1449 new_eip = (e1 & 0xffff); 1450 if (type == 12) { 1451 new_eip |= (e2 & 0xffff0000); 1452 } 1453 1454 #ifdef TARGET_X86_64 1455 if (env->efer & MSR_EFER_LMA) { 1456 /* load the upper 8 bytes of the 64-bit call gate */ 1457 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1458 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1459 GETPC()); 1460 } 1461 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1462 if (type != 0) { 1463 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1464 GETPC()); 1465 } 1466 new_eip |= ((target_ulong)e1) << 32; 1467 } 1468 #endif 1469 1470 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1471 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1472 } 1473 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1474 /* must be code segment */ 1475 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1476 (DESC_S_MASK | DESC_CS_MASK))) { 1477 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1478 } 1479 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1480 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1481 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1482 } 1483 #ifdef TARGET_X86_64 1484 if (env->efer & MSR_EFER_LMA) { 1485 if (!(e2 & DESC_L_MASK)) { 1486 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1487 } 1488 if (e2 & DESC_B_MASK) { 1489 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1490 } 1491 } 1492 #endif 1493 if (!(e2 & DESC_P_MASK)) { 1494 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1495 } 1496 limit = get_seg_limit(e1, e2); 1497 if (new_eip > limit && 1498 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1499 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1500 } 1501 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1502 get_seg_base(e1, e2), limit, e2); 1503 env->eip = new_eip; 1504 break; 1505 default: 1506 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1507 break; 1508 } 1509 } 1510 } 1511 1512 /* real mode call */ 1513 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1514 int shift, uint32_t next_eip) 1515 { 1516 uint32_t esp, esp_mask; 1517 target_ulong ssp; 1518 1519 esp = env->regs[R_ESP]; 1520 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1521 ssp = env->segs[R_SS].base; 1522 if (shift) { 1523 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1524 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1525 } else { 1526 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1527 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1528 } 1529 1530 SET_ESP(esp, esp_mask); 1531 env->eip = new_eip; 1532 env->segs[R_CS].selector = new_cs; 1533 env->segs[R_CS].base = (new_cs << 4); 1534 } 1535 1536 /* protected mode call */ 1537 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1538 int shift, target_ulong next_eip) 1539 { 1540 int new_stack, i; 1541 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1542 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1543 uint32_t val, limit, old_sp_mask; 1544 target_ulong ssp, old_ssp, offset, sp; 1545 1546 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1547 LOG_PCALL_STATE(env_cpu(env)); 1548 if ((new_cs & 0xfffc) == 0) { 1549 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1550 } 1551 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1552 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1553 } 1554 cpl = env->hflags & HF_CPL_MASK; 1555 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1556 if (e2 & DESC_S_MASK) { 1557 if (!(e2 & DESC_CS_MASK)) { 1558 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1559 } 1560 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1561 if (e2 & DESC_C_MASK) { 1562 /* conforming code segment */ 1563 if (dpl > cpl) { 1564 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1565 } 1566 } else { 1567 /* non conforming code segment */ 1568 rpl = new_cs & 3; 1569 if (rpl > cpl) { 1570 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1571 } 1572 if (dpl != cpl) { 1573 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1574 } 1575 } 1576 if (!(e2 & DESC_P_MASK)) { 1577 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1578 } 1579 1580 #ifdef TARGET_X86_64 1581 /* XXX: check 16/32 bit cases in long mode */ 1582 if (shift == 2) { 1583 target_ulong rsp; 1584 1585 /* 64 bit case */ 1586 rsp = env->regs[R_ESP]; 1587 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1588 PUSHQ_RA(rsp, next_eip, GETPC()); 1589 /* from this point, not restartable */ 1590 env->regs[R_ESP] = rsp; 1591 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1592 get_seg_base(e1, e2), 1593 get_seg_limit(e1, e2), e2); 1594 env->eip = new_eip; 1595 } else 1596 #endif 1597 { 1598 sp = env->regs[R_ESP]; 1599 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1600 ssp = env->segs[R_SS].base; 1601 if (shift) { 1602 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1603 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1604 } else { 1605 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1606 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1607 } 1608 1609 limit = get_seg_limit(e1, e2); 1610 if (new_eip > limit) { 1611 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1612 } 1613 /* from this point, not restartable */ 1614 SET_ESP(sp, sp_mask); 1615 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1616 get_seg_base(e1, e2), limit, e2); 1617 env->eip = new_eip; 1618 } 1619 } else { 1620 /* check gate type */ 1621 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1622 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1623 rpl = new_cs & 3; 1624 1625 #ifdef TARGET_X86_64 1626 if (env->efer & MSR_EFER_LMA) { 1627 if (type != 12) { 1628 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1629 } 1630 } 1631 #endif 1632 1633 switch (type) { 1634 case 1: /* available 286 TSS */ 1635 case 9: /* available 386 TSS */ 1636 case 5: /* task gate */ 1637 if (dpl < cpl || dpl < rpl) { 1638 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1639 } 1640 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1641 return; 1642 case 4: /* 286 call gate */ 1643 case 12: /* 386 call gate */ 1644 break; 1645 default: 1646 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1647 break; 1648 } 1649 shift = type >> 3; 1650 1651 if (dpl < cpl || dpl < rpl) { 1652 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1653 } 1654 /* check valid bit */ 1655 if (!(e2 & DESC_P_MASK)) { 1656 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1657 } 1658 selector = e1 >> 16; 1659 param_count = e2 & 0x1f; 1660 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1661 #ifdef TARGET_X86_64 1662 if (env->efer & MSR_EFER_LMA) { 1663 /* load the upper 8 bytes of the 64-bit call gate */ 1664 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1665 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1666 GETPC()); 1667 } 1668 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1669 if (type != 0) { 1670 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1671 GETPC()); 1672 } 1673 offset |= ((target_ulong)e1) << 32; 1674 } 1675 #endif 1676 if ((selector & 0xfffc) == 0) { 1677 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1678 } 1679 1680 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1681 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1682 } 1683 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1684 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1685 } 1686 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1687 if (dpl > cpl) { 1688 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1689 } 1690 #ifdef TARGET_X86_64 1691 if (env->efer & MSR_EFER_LMA) { 1692 if (!(e2 & DESC_L_MASK)) { 1693 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1694 } 1695 if (e2 & DESC_B_MASK) { 1696 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1697 } 1698 shift++; 1699 } 1700 #endif 1701 if (!(e2 & DESC_P_MASK)) { 1702 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1703 } 1704 1705 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1706 /* to inner privilege */ 1707 #ifdef TARGET_X86_64 1708 if (shift == 2) { 1709 sp = get_rsp_from_tss(env, dpl); 1710 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1711 new_stack = 1; 1712 sp_mask = 0; 1713 ssp = 0; /* SS base is always zero in IA-32e mode */ 1714 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1715 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1716 } else 1717 #endif 1718 { 1719 uint32_t sp32; 1720 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1721 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1722 TARGET_FMT_lx "\n", ss, sp32, param_count, 1723 env->regs[R_ESP]); 1724 sp = sp32; 1725 if ((ss & 0xfffc) == 0) { 1726 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1727 } 1728 if ((ss & 3) != dpl) { 1729 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1730 } 1731 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1732 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1733 } 1734 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1735 if (ss_dpl != dpl) { 1736 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1737 } 1738 if (!(ss_e2 & DESC_S_MASK) || 1739 (ss_e2 & DESC_CS_MASK) || 1740 !(ss_e2 & DESC_W_MASK)) { 1741 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1742 } 1743 if (!(ss_e2 & DESC_P_MASK)) { 1744 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1745 } 1746 1747 sp_mask = get_sp_mask(ss_e2); 1748 ssp = get_seg_base(ss_e1, ss_e2); 1749 } 1750 1751 /* push_size = ((param_count * 2) + 8) << shift; */ 1752 1753 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1754 old_ssp = env->segs[R_SS].base; 1755 #ifdef TARGET_X86_64 1756 if (shift == 2) { 1757 /* XXX: verify if new stack address is canonical */ 1758 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1759 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1760 /* parameters aren't supported for 64-bit call gates */ 1761 } else 1762 #endif 1763 if (shift == 1) { 1764 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1765 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1766 for (i = param_count - 1; i >= 0; i--) { 1767 val = cpu_ldl_kernel_ra(env, old_ssp + 1768 ((env->regs[R_ESP] + i * 4) & 1769 old_sp_mask), GETPC()); 1770 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1771 } 1772 } else { 1773 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1774 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1775 for (i = param_count - 1; i >= 0; i--) { 1776 val = cpu_lduw_kernel_ra(env, old_ssp + 1777 ((env->regs[R_ESP] + i * 2) & 1778 old_sp_mask), GETPC()); 1779 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1780 } 1781 } 1782 new_stack = 1; 1783 } else { 1784 /* to same privilege */ 1785 sp = env->regs[R_ESP]; 1786 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1787 ssp = env->segs[R_SS].base; 1788 /* push_size = (4 << shift); */ 1789 new_stack = 0; 1790 } 1791 1792 #ifdef TARGET_X86_64 1793 if (shift == 2) { 1794 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1795 PUSHQ_RA(sp, next_eip, GETPC()); 1796 } else 1797 #endif 1798 if (shift == 1) { 1799 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1800 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1801 } else { 1802 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1803 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1804 } 1805 1806 /* from this point, not restartable */ 1807 1808 if (new_stack) { 1809 #ifdef TARGET_X86_64 1810 if (shift == 2) { 1811 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1812 } else 1813 #endif 1814 { 1815 ss = (ss & ~3) | dpl; 1816 cpu_x86_load_seg_cache(env, R_SS, ss, 1817 ssp, 1818 get_seg_limit(ss_e1, ss_e2), 1819 ss_e2); 1820 } 1821 } 1822 1823 selector = (selector & ~3) | dpl; 1824 cpu_x86_load_seg_cache(env, R_CS, selector, 1825 get_seg_base(e1, e2), 1826 get_seg_limit(e1, e2), 1827 e2); 1828 SET_ESP(sp, sp_mask); 1829 env->eip = offset; 1830 } 1831 } 1832 1833 /* real and vm86 mode iret */ 1834 void helper_iret_real(CPUX86State *env, int shift) 1835 { 1836 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1837 target_ulong ssp; 1838 int eflags_mask; 1839 1840 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1841 sp = env->regs[R_ESP]; 1842 ssp = env->segs[R_SS].base; 1843 if (shift == 1) { 1844 /* 32 bits */ 1845 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1846 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1847 new_cs &= 0xffff; 1848 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1849 } else { 1850 /* 16 bits */ 1851 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1852 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1853 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1854 } 1855 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1856 env->segs[R_CS].selector = new_cs; 1857 env->segs[R_CS].base = (new_cs << 4); 1858 env->eip = new_eip; 1859 if (env->eflags & VM_MASK) { 1860 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1861 NT_MASK; 1862 } else { 1863 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1864 RF_MASK | NT_MASK; 1865 } 1866 if (shift == 0) { 1867 eflags_mask &= 0xffff; 1868 } 1869 cpu_load_eflags(env, new_eflags, eflags_mask); 1870 env->hflags2 &= ~HF2_NMI_MASK; 1871 } 1872 1873 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1874 { 1875 int dpl; 1876 uint32_t e2; 1877 1878 /* XXX: on x86_64, we do not want to nullify FS and GS because 1879 they may still contain a valid base. I would be interested to 1880 know how a real x86_64 CPU behaves */ 1881 if ((seg_reg == R_FS || seg_reg == R_GS) && 1882 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1883 return; 1884 } 1885 1886 e2 = env->segs[seg_reg].flags; 1887 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1888 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1889 /* data or non conforming code segment */ 1890 if (dpl < cpl) { 1891 cpu_x86_load_seg_cache(env, seg_reg, 0, 1892 env->segs[seg_reg].base, 1893 env->segs[seg_reg].limit, 1894 env->segs[seg_reg].flags & ~DESC_P_MASK); 1895 } 1896 } 1897 } 1898 1899 /* protected mode iret */ 1900 static inline void helper_ret_protected(CPUX86State *env, int shift, 1901 int is_iret, int addend, 1902 uintptr_t retaddr) 1903 { 1904 uint32_t new_cs, new_eflags, new_ss; 1905 uint32_t new_es, new_ds, new_fs, new_gs; 1906 uint32_t e1, e2, ss_e1, ss_e2; 1907 int cpl, dpl, rpl, eflags_mask, iopl; 1908 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1909 1910 #ifdef TARGET_X86_64 1911 if (shift == 2) { 1912 sp_mask = -1; 1913 } else 1914 #endif 1915 { 1916 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1917 } 1918 sp = env->regs[R_ESP]; 1919 ssp = env->segs[R_SS].base; 1920 new_eflags = 0; /* avoid warning */ 1921 #ifdef TARGET_X86_64 1922 if (shift == 2) { 1923 POPQ_RA(sp, new_eip, retaddr); 1924 POPQ_RA(sp, new_cs, retaddr); 1925 new_cs &= 0xffff; 1926 if (is_iret) { 1927 POPQ_RA(sp, new_eflags, retaddr); 1928 } 1929 } else 1930 #endif 1931 { 1932 if (shift == 1) { 1933 /* 32 bits */ 1934 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1935 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1936 new_cs &= 0xffff; 1937 if (is_iret) { 1938 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1939 if (new_eflags & VM_MASK) { 1940 goto return_to_vm86; 1941 } 1942 } 1943 } else { 1944 /* 16 bits */ 1945 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1946 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1947 if (is_iret) { 1948 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1949 } 1950 } 1951 } 1952 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1953 new_cs, new_eip, shift, addend); 1954 LOG_PCALL_STATE(env_cpu(env)); 1955 if ((new_cs & 0xfffc) == 0) { 1956 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1957 } 1958 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1959 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1960 } 1961 if (!(e2 & DESC_S_MASK) || 1962 !(e2 & DESC_CS_MASK)) { 1963 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1964 } 1965 cpl = env->hflags & HF_CPL_MASK; 1966 rpl = new_cs & 3; 1967 if (rpl < cpl) { 1968 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1969 } 1970 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1971 if (e2 & DESC_C_MASK) { 1972 if (dpl > rpl) { 1973 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1974 } 1975 } else { 1976 if (dpl != rpl) { 1977 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1978 } 1979 } 1980 if (!(e2 & DESC_P_MASK)) { 1981 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 1982 } 1983 1984 sp += addend; 1985 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 1986 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 1987 /* return to same privilege level */ 1988 cpu_x86_load_seg_cache(env, R_CS, new_cs, 1989 get_seg_base(e1, e2), 1990 get_seg_limit(e1, e2), 1991 e2); 1992 } else { 1993 /* return to different privilege level */ 1994 #ifdef TARGET_X86_64 1995 if (shift == 2) { 1996 POPQ_RA(sp, new_esp, retaddr); 1997 POPQ_RA(sp, new_ss, retaddr); 1998 new_ss &= 0xffff; 1999 } else 2000 #endif 2001 { 2002 if (shift == 1) { 2003 /* 32 bits */ 2004 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2005 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2006 new_ss &= 0xffff; 2007 } else { 2008 /* 16 bits */ 2009 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2010 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2011 } 2012 } 2013 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2014 new_ss, new_esp); 2015 if ((new_ss & 0xfffc) == 0) { 2016 #ifdef TARGET_X86_64 2017 /* NULL ss is allowed in long mode if cpl != 3 */ 2018 /* XXX: test CS64? */ 2019 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2020 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2021 0, 0xffffffff, 2022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2023 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2024 DESC_W_MASK | DESC_A_MASK); 2025 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2026 } else 2027 #endif 2028 { 2029 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2030 } 2031 } else { 2032 if ((new_ss & 3) != rpl) { 2033 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2034 } 2035 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2036 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2037 } 2038 if (!(ss_e2 & DESC_S_MASK) || 2039 (ss_e2 & DESC_CS_MASK) || 2040 !(ss_e2 & DESC_W_MASK)) { 2041 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2042 } 2043 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2044 if (dpl != rpl) { 2045 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2046 } 2047 if (!(ss_e2 & DESC_P_MASK)) { 2048 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2049 } 2050 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2051 get_seg_base(ss_e1, ss_e2), 2052 get_seg_limit(ss_e1, ss_e2), 2053 ss_e2); 2054 } 2055 2056 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2057 get_seg_base(e1, e2), 2058 get_seg_limit(e1, e2), 2059 e2); 2060 sp = new_esp; 2061 #ifdef TARGET_X86_64 2062 if (env->hflags & HF_CS64_MASK) { 2063 sp_mask = -1; 2064 } else 2065 #endif 2066 { 2067 sp_mask = get_sp_mask(ss_e2); 2068 } 2069 2070 /* validate data segments */ 2071 validate_seg(env, R_ES, rpl); 2072 validate_seg(env, R_DS, rpl); 2073 validate_seg(env, R_FS, rpl); 2074 validate_seg(env, R_GS, rpl); 2075 2076 sp += addend; 2077 } 2078 SET_ESP(sp, sp_mask); 2079 env->eip = new_eip; 2080 if (is_iret) { 2081 /* NOTE: 'cpl' is the _old_ CPL */ 2082 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2083 if (cpl == 0) { 2084 eflags_mask |= IOPL_MASK; 2085 } 2086 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2087 if (cpl <= iopl) { 2088 eflags_mask |= IF_MASK; 2089 } 2090 if (shift == 0) { 2091 eflags_mask &= 0xffff; 2092 } 2093 cpu_load_eflags(env, new_eflags, eflags_mask); 2094 } 2095 return; 2096 2097 return_to_vm86: 2098 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2099 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2100 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2101 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2102 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2103 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2104 2105 /* modify processor state */ 2106 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2107 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2108 VIP_MASK); 2109 load_seg_vm(env, R_CS, new_cs & 0xffff); 2110 load_seg_vm(env, R_SS, new_ss & 0xffff); 2111 load_seg_vm(env, R_ES, new_es & 0xffff); 2112 load_seg_vm(env, R_DS, new_ds & 0xffff); 2113 load_seg_vm(env, R_FS, new_fs & 0xffff); 2114 load_seg_vm(env, R_GS, new_gs & 0xffff); 2115 2116 env->eip = new_eip & 0xffff; 2117 env->regs[R_ESP] = new_esp; 2118 } 2119 2120 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2121 { 2122 int tss_selector, type; 2123 uint32_t e1, e2; 2124 2125 /* specific case for TSS */ 2126 if (env->eflags & NT_MASK) { 2127 #ifdef TARGET_X86_64 2128 if (env->hflags & HF_LMA_MASK) { 2129 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2130 } 2131 #endif 2132 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2133 if (tss_selector & 4) { 2134 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2135 } 2136 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2137 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2138 } 2139 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2140 /* NOTE: we check both segment and busy TSS */ 2141 if (type != 3) { 2142 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2143 } 2144 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2145 } else { 2146 helper_ret_protected(env, shift, 1, 0, GETPC()); 2147 } 2148 env->hflags2 &= ~HF2_NMI_MASK; 2149 } 2150 2151 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2152 { 2153 helper_ret_protected(env, shift, 0, addend, GETPC()); 2154 } 2155 2156 void helper_sysenter(CPUX86State *env) 2157 { 2158 if (env->sysenter_cs == 0) { 2159 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2160 } 2161 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2162 2163 #ifdef TARGET_X86_64 2164 if (env->hflags & HF_LMA_MASK) { 2165 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2166 0, 0xffffffff, 2167 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2168 DESC_S_MASK | 2169 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2170 DESC_L_MASK); 2171 } else 2172 #endif 2173 { 2174 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2175 0, 0xffffffff, 2176 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2177 DESC_S_MASK | 2178 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2179 } 2180 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2181 0, 0xffffffff, 2182 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2183 DESC_S_MASK | 2184 DESC_W_MASK | DESC_A_MASK); 2185 env->regs[R_ESP] = env->sysenter_esp; 2186 env->eip = env->sysenter_eip; 2187 } 2188 2189 void helper_sysexit(CPUX86State *env, int dflag) 2190 { 2191 int cpl; 2192 2193 cpl = env->hflags & HF_CPL_MASK; 2194 if (env->sysenter_cs == 0 || cpl != 0) { 2195 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2196 } 2197 #ifdef TARGET_X86_64 2198 if (dflag == 2) { 2199 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2200 3, 0, 0xffffffff, 2201 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2202 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2203 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2204 DESC_L_MASK); 2205 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2206 3, 0, 0xffffffff, 2207 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2208 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2209 DESC_W_MASK | DESC_A_MASK); 2210 } else 2211 #endif 2212 { 2213 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2214 3, 0, 0xffffffff, 2215 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2216 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2217 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2218 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2219 3, 0, 0xffffffff, 2220 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2221 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2222 DESC_W_MASK | DESC_A_MASK); 2223 } 2224 env->regs[R_ESP] = env->regs[R_ECX]; 2225 env->eip = env->regs[R_EDX]; 2226 } 2227 2228 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2229 { 2230 unsigned int limit; 2231 uint32_t e1, e2, eflags, selector; 2232 int rpl, dpl, cpl, type; 2233 2234 selector = selector1 & 0xffff; 2235 eflags = cpu_cc_compute_all(env, CC_OP); 2236 if ((selector & 0xfffc) == 0) { 2237 goto fail; 2238 } 2239 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2240 goto fail; 2241 } 2242 rpl = selector & 3; 2243 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2244 cpl = env->hflags & HF_CPL_MASK; 2245 if (e2 & DESC_S_MASK) { 2246 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2247 /* conforming */ 2248 } else { 2249 if (dpl < cpl || dpl < rpl) { 2250 goto fail; 2251 } 2252 } 2253 } else { 2254 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2255 switch (type) { 2256 case 1: 2257 case 2: 2258 case 3: 2259 case 9: 2260 case 11: 2261 break; 2262 default: 2263 goto fail; 2264 } 2265 if (dpl < cpl || dpl < rpl) { 2266 fail: 2267 CC_SRC = eflags & ~CC_Z; 2268 return 0; 2269 } 2270 } 2271 limit = get_seg_limit(e1, e2); 2272 CC_SRC = eflags | CC_Z; 2273 return limit; 2274 } 2275 2276 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2277 { 2278 uint32_t e1, e2, eflags, selector; 2279 int rpl, dpl, cpl, type; 2280 2281 selector = selector1 & 0xffff; 2282 eflags = cpu_cc_compute_all(env, CC_OP); 2283 if ((selector & 0xfffc) == 0) { 2284 goto fail; 2285 } 2286 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2287 goto fail; 2288 } 2289 rpl = selector & 3; 2290 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2291 cpl = env->hflags & HF_CPL_MASK; 2292 if (e2 & DESC_S_MASK) { 2293 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2294 /* conforming */ 2295 } else { 2296 if (dpl < cpl || dpl < rpl) { 2297 goto fail; 2298 } 2299 } 2300 } else { 2301 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2302 switch (type) { 2303 case 1: 2304 case 2: 2305 case 3: 2306 case 4: 2307 case 5: 2308 case 9: 2309 case 11: 2310 case 12: 2311 break; 2312 default: 2313 goto fail; 2314 } 2315 if (dpl < cpl || dpl < rpl) { 2316 fail: 2317 CC_SRC = eflags & ~CC_Z; 2318 return 0; 2319 } 2320 } 2321 CC_SRC = eflags | CC_Z; 2322 return e2 & 0x00f0ff00; 2323 } 2324 2325 void helper_verr(CPUX86State *env, target_ulong selector1) 2326 { 2327 uint32_t e1, e2, eflags, selector; 2328 int rpl, dpl, cpl; 2329 2330 selector = selector1 & 0xffff; 2331 eflags = cpu_cc_compute_all(env, CC_OP); 2332 if ((selector & 0xfffc) == 0) { 2333 goto fail; 2334 } 2335 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2336 goto fail; 2337 } 2338 if (!(e2 & DESC_S_MASK)) { 2339 goto fail; 2340 } 2341 rpl = selector & 3; 2342 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2343 cpl = env->hflags & HF_CPL_MASK; 2344 if (e2 & DESC_CS_MASK) { 2345 if (!(e2 & DESC_R_MASK)) { 2346 goto fail; 2347 } 2348 if (!(e2 & DESC_C_MASK)) { 2349 if (dpl < cpl || dpl < rpl) { 2350 goto fail; 2351 } 2352 } 2353 } else { 2354 if (dpl < cpl || dpl < rpl) { 2355 fail: 2356 CC_SRC = eflags & ~CC_Z; 2357 return; 2358 } 2359 } 2360 CC_SRC = eflags | CC_Z; 2361 } 2362 2363 void helper_verw(CPUX86State *env, target_ulong selector1) 2364 { 2365 uint32_t e1, e2, eflags, selector; 2366 int rpl, dpl, cpl; 2367 2368 selector = selector1 & 0xffff; 2369 eflags = cpu_cc_compute_all(env, CC_OP); 2370 if ((selector & 0xfffc) == 0) { 2371 goto fail; 2372 } 2373 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2374 goto fail; 2375 } 2376 if (!(e2 & DESC_S_MASK)) { 2377 goto fail; 2378 } 2379 rpl = selector & 3; 2380 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2381 cpl = env->hflags & HF_CPL_MASK; 2382 if (e2 & DESC_CS_MASK) { 2383 goto fail; 2384 } else { 2385 if (dpl < cpl || dpl < rpl) { 2386 goto fail; 2387 } 2388 if (!(e2 & DESC_W_MASK)) { 2389 fail: 2390 CC_SRC = eflags & ~CC_Z; 2391 return; 2392 } 2393 } 2394 CC_SRC = eflags | CC_Z; 2395 } 2396