1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 #include "access.h" 31 32 #ifdef TARGET_X86_64 33 #define SET_ESP(val, sp_mask) \ 34 do { \ 35 if ((sp_mask) == 0xffff) { \ 36 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 37 ((val) & 0xffff); \ 38 } else if ((sp_mask) == 0xffffffffLL) { \ 39 env->regs[R_ESP] = (uint32_t)(val); \ 40 } else { \ 41 env->regs[R_ESP] = (val); \ 42 } \ 43 } while (0) 44 #else 45 #define SET_ESP(val, sp_mask) \ 46 do { \ 47 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 48 ((val) & (sp_mask)); \ 49 } while (0) 50 #endif 51 52 /* XXX: use mmu_index to have proper DPL support */ 53 typedef struct StackAccess 54 { 55 CPUX86State *env; 56 uintptr_t ra; 57 target_ulong ss_base; 58 target_ulong sp; 59 target_ulong sp_mask; 60 int mmu_index; 61 } StackAccess; 62 63 static void pushw(StackAccess *sa, uint16_t val) 64 { 65 sa->sp -= 2; 66 cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 67 val, sa->mmu_index, sa->ra); 68 } 69 70 static void pushl(StackAccess *sa, uint32_t val) 71 { 72 sa->sp -= 4; 73 cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask), 74 val, sa->mmu_index, sa->ra); 75 } 76 77 static uint16_t popw(StackAccess *sa) 78 { 79 uint16_t ret = cpu_lduw_mmuidx_ra(sa->env, 80 sa->ss_base + (sa->sp & sa->sp_mask), 81 sa->mmu_index, sa->ra); 82 sa->sp += 2; 83 return ret; 84 } 85 86 static uint32_t popl(StackAccess *sa) 87 { 88 uint32_t ret = cpu_ldl_mmuidx_ra(sa->env, 89 sa->ss_base + (sa->sp & sa->sp_mask), 90 sa->mmu_index, sa->ra); 91 sa->sp += 4; 92 return ret; 93 } 94 95 int get_pg_mode(CPUX86State *env) 96 { 97 int pg_mode = 0; 98 if (!(env->cr[0] & CR0_PG_MASK)) { 99 return 0; 100 } 101 if (env->cr[0] & CR0_WP_MASK) { 102 pg_mode |= PG_MODE_WP; 103 } 104 if (env->cr[4] & CR4_PAE_MASK) { 105 pg_mode |= PG_MODE_PAE; 106 if (env->efer & MSR_EFER_NXE) { 107 pg_mode |= PG_MODE_NXE; 108 } 109 } 110 if (env->cr[4] & CR4_PSE_MASK) { 111 pg_mode |= PG_MODE_PSE; 112 } 113 if (env->cr[4] & CR4_SMEP_MASK) { 114 pg_mode |= PG_MODE_SMEP; 115 } 116 if (env->hflags & HF_LMA_MASK) { 117 pg_mode |= PG_MODE_LMA; 118 if (env->cr[4] & CR4_PKE_MASK) { 119 pg_mode |= PG_MODE_PKE; 120 } 121 if (env->cr[4] & CR4_PKS_MASK) { 122 pg_mode |= PG_MODE_PKS; 123 } 124 if (env->cr[4] & CR4_LA57_MASK) { 125 pg_mode |= PG_MODE_LA57; 126 } 127 } 128 return pg_mode; 129 } 130 131 /* return non zero if error */ 132 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 133 uint32_t *e2_ptr, int selector, 134 uintptr_t retaddr) 135 { 136 SegmentCache *dt; 137 int index; 138 target_ulong ptr; 139 140 if (selector & 0x4) { 141 dt = &env->ldt; 142 } else { 143 dt = &env->gdt; 144 } 145 index = selector & ~7; 146 if ((index + 7) > dt->limit) { 147 return -1; 148 } 149 ptr = dt->base + index; 150 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 151 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 152 return 0; 153 } 154 155 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 156 uint32_t *e2_ptr, int selector) 157 { 158 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 159 } 160 161 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 162 { 163 unsigned int limit; 164 165 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 166 if (e2 & DESC_G_MASK) { 167 limit = (limit << 12) | 0xfff; 168 } 169 return limit; 170 } 171 172 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 173 { 174 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 175 } 176 177 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 178 uint32_t e2) 179 { 180 sc->base = get_seg_base(e1, e2); 181 sc->limit = get_seg_limit(e1, e2); 182 sc->flags = e2; 183 } 184 185 /* init the segment cache in vm86 mode. */ 186 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 187 { 188 selector &= 0xffff; 189 190 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 191 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 192 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 193 } 194 195 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 196 uint32_t *esp_ptr, int dpl, 197 uintptr_t retaddr) 198 { 199 X86CPU *cpu = env_archcpu(env); 200 int type, index, shift; 201 202 #if 0 203 { 204 int i; 205 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 206 for (i = 0; i < env->tr.limit; i++) { 207 printf("%02x ", env->tr.base[i]); 208 if ((i & 7) == 7) { 209 printf("\n"); 210 } 211 } 212 printf("\n"); 213 } 214 #endif 215 216 if (!(env->tr.flags & DESC_P_MASK)) { 217 cpu_abort(CPU(cpu), "invalid tss"); 218 } 219 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 220 if ((type & 7) != 1) { 221 cpu_abort(CPU(cpu), "invalid tss type"); 222 } 223 shift = type >> 3; 224 index = (dpl * 4 + 2) << shift; 225 if (index + (4 << shift) - 1 > env->tr.limit) { 226 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 227 } 228 if (shift == 0) { 229 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 230 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 231 } else { 232 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 233 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 234 } 235 } 236 237 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 238 int cpl, uintptr_t retaddr) 239 { 240 uint32_t e1, e2; 241 int rpl, dpl; 242 243 if ((selector & 0xfffc) != 0) { 244 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 245 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 246 } 247 if (!(e2 & DESC_S_MASK)) { 248 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 249 } 250 rpl = selector & 3; 251 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 252 if (seg_reg == R_CS) { 253 if (!(e2 & DESC_CS_MASK)) { 254 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 255 } 256 if (dpl != rpl) { 257 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 258 } 259 } else if (seg_reg == R_SS) { 260 /* SS must be writable data */ 261 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 262 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 263 } 264 if (dpl != cpl || dpl != rpl) { 265 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 266 } 267 } else { 268 /* not readable code */ 269 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 270 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 271 } 272 /* if data or non conforming code, checks the rights */ 273 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 274 if (dpl < cpl || dpl < rpl) { 275 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 276 } 277 } 278 } 279 if (!(e2 & DESC_P_MASK)) { 280 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 281 } 282 cpu_x86_load_seg_cache(env, seg_reg, selector, 283 get_seg_base(e1, e2), 284 get_seg_limit(e1, e2), 285 e2); 286 } else { 287 if (seg_reg == R_SS || seg_reg == R_CS) { 288 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 289 } 290 } 291 } 292 293 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 294 uintptr_t retaddr) 295 { 296 target_ulong ptr = env->gdt.base + (tss_selector & ~7); 297 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 298 299 if (value) { 300 e2 |= DESC_TSS_BUSY_MASK; 301 } else { 302 e2 &= ~DESC_TSS_BUSY_MASK; 303 } 304 305 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 306 } 307 308 #define SWITCH_TSS_JMP 0 309 #define SWITCH_TSS_IRET 1 310 #define SWITCH_TSS_CALL 2 311 312 /* return 0 if switching to a 16-bit selector */ 313 static int switch_tss_ra(CPUX86State *env, int tss_selector, 314 uint32_t e1, uint32_t e2, int source, 315 uint32_t next_eip, uintptr_t retaddr) 316 { 317 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; 318 target_ulong tss_base; 319 uint32_t new_regs[8], new_segs[6]; 320 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 321 uint32_t old_eflags, eflags_mask; 322 SegmentCache *dt; 323 int mmu_index, index; 324 target_ulong ptr; 325 X86Access old, new; 326 327 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 328 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 329 source); 330 331 /* if task gate, we read the TSS segment and we load it */ 332 if (type == 5) { 333 if (!(e2 & DESC_P_MASK)) { 334 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 335 } 336 tss_selector = e1 >> 16; 337 if (tss_selector & 4) { 338 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 339 } 340 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 341 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 342 } 343 if (e2 & DESC_S_MASK) { 344 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 345 } 346 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 347 if ((type & 7) != 1) { 348 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 349 } 350 } 351 352 if (!(e2 & DESC_P_MASK)) { 353 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 354 } 355 356 if (type & 8) { 357 tss_limit_max = 103; 358 } else { 359 tss_limit_max = 43; 360 } 361 tss_limit = get_seg_limit(e1, e2); 362 tss_base = get_seg_base(e1, e2); 363 if ((tss_selector & 4) != 0 || 364 tss_limit < tss_limit_max) { 365 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 366 } 367 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 368 if (old_type & 8) { 369 old_tss_limit_max = 103; 370 } else { 371 old_tss_limit_max = 43; 372 } 373 374 /* new TSS must be busy iff the source is an IRET instruction */ 375 if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) { 376 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 377 } 378 379 /* X86Access avoids memory exceptions during the task switch */ 380 mmu_index = cpu_mmu_index_kernel(env); 381 access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max, 382 MMU_DATA_STORE, mmu_index, retaddr); 383 384 if (source == SWITCH_TSS_CALL) { 385 /* Probe for future write of parent task */ 386 probe_access(env, tss_base, 2, MMU_DATA_STORE, 387 mmu_index, retaddr); 388 } 389 access_prepare_mmu(&new, env, tss_base, tss_limit, 390 MMU_DATA_LOAD, mmu_index, retaddr); 391 392 /* read all the registers from the new TSS */ 393 if (type & 8) { 394 /* 32 bit */ 395 new_cr3 = access_ldl(&new, tss_base + 0x1c); 396 new_eip = access_ldl(&new, tss_base + 0x20); 397 new_eflags = access_ldl(&new, tss_base + 0x24); 398 for (i = 0; i < 8; i++) { 399 new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4)); 400 } 401 for (i = 0; i < 6; i++) { 402 new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); 403 } 404 new_ldt = access_ldw(&new, tss_base + 0x60); 405 new_trap = access_ldl(&new, tss_base + 0x64); 406 } else { 407 /* 16 bit */ 408 new_cr3 = 0; 409 new_eip = access_ldw(&new, tss_base + 0x0e); 410 new_eflags = access_ldw(&new, tss_base + 0x10); 411 for (i = 0; i < 8; i++) { 412 new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2)); 413 } 414 for (i = 0; i < 4; i++) { 415 new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2)); 416 } 417 new_ldt = access_ldw(&new, tss_base + 0x2a); 418 new_segs[R_FS] = 0; 419 new_segs[R_GS] = 0; 420 new_trap = 0; 421 } 422 /* XXX: avoid a compiler warning, see 423 http://support.amd.com/us/Processor_TechDocs/24593.pdf 424 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 425 (void)new_trap; 426 427 /* clear busy bit (it is restartable) */ 428 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 429 tss_set_busy(env, env->tr.selector, 0, retaddr); 430 } 431 old_eflags = cpu_compute_eflags(env); 432 if (source == SWITCH_TSS_IRET) { 433 old_eflags &= ~NT_MASK; 434 } 435 436 /* save the current state in the old TSS */ 437 if (old_type & 8) { 438 /* 32 bit */ 439 access_stl(&old, env->tr.base + 0x20, next_eip); 440 access_stl(&old, env->tr.base + 0x24, old_eflags); 441 access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); 442 access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); 443 access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); 444 access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); 445 access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); 446 access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); 447 access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); 448 access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); 449 for (i = 0; i < 6; i++) { 450 access_stw(&old, env->tr.base + (0x48 + i * 4), 451 env->segs[i].selector); 452 } 453 } else { 454 /* 16 bit */ 455 access_stw(&old, env->tr.base + 0x0e, next_eip); 456 access_stw(&old, env->tr.base + 0x10, old_eflags); 457 access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); 458 access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); 459 access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); 460 access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); 461 access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); 462 access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); 463 access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); 464 access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); 465 for (i = 0; i < 4; i++) { 466 access_stw(&old, env->tr.base + (0x22 + i * 2), 467 env->segs[i].selector); 468 } 469 } 470 471 /* now if an exception occurs, it will occurs in the next task 472 context */ 473 474 if (source == SWITCH_TSS_CALL) { 475 /* 476 * Thanks to the probe_access above, we know the first two 477 * bytes addressed by &new are writable too. 478 */ 479 access_stw(&new, tss_base, env->tr.selector); 480 new_eflags |= NT_MASK; 481 } 482 483 /* set busy bit */ 484 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 485 tss_set_busy(env, tss_selector, 1, retaddr); 486 } 487 488 /* set the new CPU state */ 489 /* from this point, any exception which occurs can give problems */ 490 env->cr[0] |= CR0_TS_MASK; 491 env->hflags |= HF_TS_MASK; 492 env->tr.selector = tss_selector; 493 env->tr.base = tss_base; 494 env->tr.limit = tss_limit; 495 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 496 497 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 498 cpu_x86_update_cr3(env, new_cr3); 499 } 500 501 /* load all registers without an exception, then reload them with 502 possible exception */ 503 env->eip = new_eip; 504 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 505 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 506 if (type & 8) { 507 cpu_load_eflags(env, new_eflags, eflags_mask); 508 for (i = 0; i < 8; i++) { 509 env->regs[i] = new_regs[i]; 510 } 511 } else { 512 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 513 for (i = 0; i < 8; i++) { 514 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 515 } 516 } 517 if (new_eflags & VM_MASK) { 518 for (i = 0; i < 6; i++) { 519 load_seg_vm(env, i, new_segs[i]); 520 } 521 } else { 522 /* first just selectors as the rest may trigger exceptions */ 523 for (i = 0; i < 6; i++) { 524 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 525 } 526 } 527 528 env->ldt.selector = new_ldt & ~4; 529 env->ldt.base = 0; 530 env->ldt.limit = 0; 531 env->ldt.flags = 0; 532 533 /* load the LDT */ 534 if (new_ldt & 4) { 535 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 536 } 537 538 if ((new_ldt & 0xfffc) != 0) { 539 dt = &env->gdt; 540 index = new_ldt & ~7; 541 if ((index + 7) > dt->limit) { 542 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 543 } 544 ptr = dt->base + index; 545 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 546 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 547 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 548 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 549 } 550 if (!(e2 & DESC_P_MASK)) { 551 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 552 } 553 load_seg_cache_raw_dt(&env->ldt, e1, e2); 554 } 555 556 /* load the segments */ 557 if (!(new_eflags & VM_MASK)) { 558 int cpl = new_segs[R_CS] & 3; 559 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 560 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 561 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 562 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 563 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 564 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 565 } 566 567 /* check that env->eip is in the CS segment limits */ 568 if (new_eip > env->segs[R_CS].limit) { 569 /* XXX: different exception if CALL? */ 570 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 571 } 572 573 #ifndef CONFIG_USER_ONLY 574 /* reset local breakpoints */ 575 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 576 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 577 } 578 #endif 579 return type >> 3; 580 } 581 582 static int switch_tss(CPUX86State *env, int tss_selector, 583 uint32_t e1, uint32_t e2, int source, 584 uint32_t next_eip) 585 { 586 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 587 } 588 589 static inline unsigned int get_sp_mask(unsigned int e2) 590 { 591 #ifdef TARGET_X86_64 592 if (e2 & DESC_L_MASK) { 593 return 0; 594 } else 595 #endif 596 if (e2 & DESC_B_MASK) { 597 return 0xffffffff; 598 } else { 599 return 0xffff; 600 } 601 } 602 603 static int exception_is_fault(int intno) 604 { 605 switch (intno) { 606 /* 607 * #DB can be both fault- and trap-like, but it never sets RF=1 608 * in the RFLAGS value pushed on the stack. 609 */ 610 case EXCP01_DB: 611 case EXCP03_INT3: 612 case EXCP04_INTO: 613 case EXCP08_DBLE: 614 case EXCP12_MCHK: 615 return 0; 616 } 617 /* Everything else including reserved exception is a fault. */ 618 return 1; 619 } 620 621 int exception_has_error_code(int intno) 622 { 623 switch (intno) { 624 case 8: 625 case 10: 626 case 11: 627 case 12: 628 case 13: 629 case 14: 630 case 17: 631 return 1; 632 } 633 return 0; 634 } 635 636 /* protected mode interrupt */ 637 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 638 int error_code, unsigned int next_eip, 639 int is_hw) 640 { 641 SegmentCache *dt; 642 target_ulong ptr; 643 int type, dpl, selector, ss_dpl, cpl; 644 int has_error_code, new_stack, shift; 645 uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0; 646 uint32_t old_eip, eflags; 647 int vm86 = env->eflags & VM_MASK; 648 StackAccess sa; 649 bool set_rf; 650 651 has_error_code = 0; 652 if (!is_int && !is_hw) { 653 has_error_code = exception_has_error_code(intno); 654 } 655 if (is_int) { 656 old_eip = next_eip; 657 set_rf = false; 658 } else { 659 old_eip = env->eip; 660 set_rf = exception_is_fault(intno); 661 } 662 663 dt = &env->idt; 664 if (intno * 8 + 7 > dt->limit) { 665 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 666 } 667 ptr = dt->base + intno * 8; 668 e1 = cpu_ldl_kernel(env, ptr); 669 e2 = cpu_ldl_kernel(env, ptr + 4); 670 /* check gate type */ 671 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 672 switch (type) { 673 case 5: /* task gate */ 674 case 6: /* 286 interrupt gate */ 675 case 7: /* 286 trap gate */ 676 case 14: /* 386 interrupt gate */ 677 case 15: /* 386 trap gate */ 678 break; 679 default: 680 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 681 break; 682 } 683 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 684 cpl = env->hflags & HF_CPL_MASK; 685 /* check privilege if software int */ 686 if (is_int && dpl < cpl) { 687 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 688 } 689 690 sa.env = env; 691 sa.ra = 0; 692 sa.mmu_index = cpu_mmu_index_kernel(env); 693 694 if (type == 5) { 695 /* task gate */ 696 /* must do that check here to return the correct error code */ 697 if (!(e2 & DESC_P_MASK)) { 698 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 699 } 700 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 701 if (has_error_code) { 702 /* push the error code */ 703 if (env->segs[R_SS].flags & DESC_B_MASK) { 704 sa.sp_mask = 0xffffffff; 705 } else { 706 sa.sp_mask = 0xffff; 707 } 708 sa.sp = env->regs[R_ESP]; 709 sa.ss_base = env->segs[R_SS].base; 710 if (shift) { 711 pushl(&sa, error_code); 712 } else { 713 pushw(&sa, error_code); 714 } 715 SET_ESP(sa.sp, sa.sp_mask); 716 } 717 return; 718 } 719 720 /* Otherwise, trap or interrupt gate */ 721 722 /* check valid bit */ 723 if (!(e2 & DESC_P_MASK)) { 724 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 725 } 726 selector = e1 >> 16; 727 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 728 if ((selector & 0xfffc) == 0) { 729 raise_exception_err(env, EXCP0D_GPF, 0); 730 } 731 if (load_segment(env, &e1, &e2, selector) != 0) { 732 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 733 } 734 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 735 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 736 } 737 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 738 if (dpl > cpl) { 739 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 740 } 741 if (!(e2 & DESC_P_MASK)) { 742 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 743 } 744 if (e2 & DESC_C_MASK) { 745 dpl = cpl; 746 } 747 if (dpl < cpl) { 748 /* to inner privilege */ 749 uint32_t esp; 750 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 751 if ((ss & 0xfffc) == 0) { 752 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 753 } 754 if ((ss & 3) != dpl) { 755 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 756 } 757 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 758 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 759 } 760 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 761 if (ss_dpl != dpl) { 762 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 763 } 764 if (!(ss_e2 & DESC_S_MASK) || 765 (ss_e2 & DESC_CS_MASK) || 766 !(ss_e2 & DESC_W_MASK)) { 767 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 768 } 769 if (!(ss_e2 & DESC_P_MASK)) { 770 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 771 } 772 new_stack = 1; 773 sa.sp = esp; 774 sa.sp_mask = get_sp_mask(ss_e2); 775 sa.ss_base = get_seg_base(ss_e1, ss_e2); 776 } else { 777 /* to same privilege */ 778 if (vm86) { 779 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 780 } 781 new_stack = 0; 782 sa.sp = env->regs[R_ESP]; 783 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 784 sa.ss_base = env->segs[R_SS].base; 785 } 786 787 shift = type >> 3; 788 789 #if 0 790 /* XXX: check that enough room is available */ 791 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 792 if (vm86) { 793 push_size += 8; 794 } 795 push_size <<= shift; 796 #endif 797 eflags = cpu_compute_eflags(env); 798 /* 799 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 800 * as is. AMD behavior could be implemented in check_hw_breakpoints(). 801 */ 802 if (set_rf) { 803 eflags |= RF_MASK; 804 } 805 806 if (shift == 1) { 807 if (new_stack) { 808 if (vm86) { 809 pushl(&sa, env->segs[R_GS].selector); 810 pushl(&sa, env->segs[R_FS].selector); 811 pushl(&sa, env->segs[R_DS].selector); 812 pushl(&sa, env->segs[R_ES].selector); 813 } 814 pushl(&sa, env->segs[R_SS].selector); 815 pushl(&sa, env->regs[R_ESP]); 816 } 817 pushl(&sa, eflags); 818 pushl(&sa, env->segs[R_CS].selector); 819 pushl(&sa, old_eip); 820 if (has_error_code) { 821 pushl(&sa, error_code); 822 } 823 } else { 824 if (new_stack) { 825 if (vm86) { 826 pushw(&sa, env->segs[R_GS].selector); 827 pushw(&sa, env->segs[R_FS].selector); 828 pushw(&sa, env->segs[R_DS].selector); 829 pushw(&sa, env->segs[R_ES].selector); 830 } 831 pushw(&sa, env->segs[R_SS].selector); 832 pushw(&sa, env->regs[R_ESP]); 833 } 834 pushw(&sa, eflags); 835 pushw(&sa, env->segs[R_CS].selector); 836 pushw(&sa, old_eip); 837 if (has_error_code) { 838 pushw(&sa, error_code); 839 } 840 } 841 842 /* interrupt gate clear IF mask */ 843 if ((type & 1) == 0) { 844 env->eflags &= ~IF_MASK; 845 } 846 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 847 848 if (new_stack) { 849 if (vm86) { 850 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 851 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 852 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 853 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 854 } 855 ss = (ss & ~3) | dpl; 856 cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base, 857 get_seg_limit(ss_e1, ss_e2), ss_e2); 858 } 859 SET_ESP(sa.sp, sa.sp_mask); 860 861 selector = (selector & ~3) | dpl; 862 cpu_x86_load_seg_cache(env, R_CS, selector, 863 get_seg_base(e1, e2), 864 get_seg_limit(e1, e2), 865 e2); 866 env->eip = offset; 867 } 868 869 #ifdef TARGET_X86_64 870 871 static void pushq(StackAccess *sa, uint64_t val) 872 { 873 sa->sp -= 8; 874 cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra); 875 } 876 877 static uint64_t popq(StackAccess *sa) 878 { 879 uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra); 880 sa->sp += 8; 881 return ret; 882 } 883 884 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 885 { 886 X86CPU *cpu = env_archcpu(env); 887 int index, pg_mode; 888 target_ulong rsp; 889 int32_t sext; 890 891 #if 0 892 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 893 env->tr.base, env->tr.limit); 894 #endif 895 896 if (!(env->tr.flags & DESC_P_MASK)) { 897 cpu_abort(CPU(cpu), "invalid tss"); 898 } 899 index = 8 * level + 4; 900 if ((index + 7) > env->tr.limit) { 901 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 902 } 903 904 rsp = cpu_ldq_kernel(env, env->tr.base + index); 905 906 /* test virtual address sign extension */ 907 pg_mode = get_pg_mode(env); 908 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 909 if (sext != 0 && sext != -1) { 910 raise_exception_err(env, EXCP0C_STACK, 0); 911 } 912 913 return rsp; 914 } 915 916 /* 64 bit interrupt */ 917 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 918 int error_code, target_ulong next_eip, int is_hw) 919 { 920 SegmentCache *dt; 921 target_ulong ptr; 922 int type, dpl, selector, cpl, ist; 923 int has_error_code, new_stack; 924 uint32_t e1, e2, e3, ss, eflags; 925 target_ulong old_eip, offset; 926 bool set_rf; 927 StackAccess sa; 928 929 has_error_code = 0; 930 if (!is_int && !is_hw) { 931 has_error_code = exception_has_error_code(intno); 932 } 933 if (is_int) { 934 old_eip = next_eip; 935 set_rf = false; 936 } else { 937 old_eip = env->eip; 938 set_rf = exception_is_fault(intno); 939 } 940 941 dt = &env->idt; 942 if (intno * 16 + 15 > dt->limit) { 943 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 944 } 945 ptr = dt->base + intno * 16; 946 e1 = cpu_ldl_kernel(env, ptr); 947 e2 = cpu_ldl_kernel(env, ptr + 4); 948 e3 = cpu_ldl_kernel(env, ptr + 8); 949 /* check gate type */ 950 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 951 switch (type) { 952 case 14: /* 386 interrupt gate */ 953 case 15: /* 386 trap gate */ 954 break; 955 default: 956 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 957 break; 958 } 959 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 960 cpl = env->hflags & HF_CPL_MASK; 961 /* check privilege if software int */ 962 if (is_int && dpl < cpl) { 963 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 964 } 965 /* check valid bit */ 966 if (!(e2 & DESC_P_MASK)) { 967 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 968 } 969 selector = e1 >> 16; 970 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 971 ist = e2 & 7; 972 if ((selector & 0xfffc) == 0) { 973 raise_exception_err(env, EXCP0D_GPF, 0); 974 } 975 976 if (load_segment(env, &e1, &e2, selector) != 0) { 977 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 978 } 979 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 980 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 981 } 982 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 983 if (dpl > cpl) { 984 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 985 } 986 if (!(e2 & DESC_P_MASK)) { 987 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 988 } 989 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 990 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 991 } 992 if (e2 & DESC_C_MASK) { 993 dpl = cpl; 994 } 995 996 sa.env = env; 997 sa.ra = 0; 998 sa.mmu_index = cpu_mmu_index_kernel(env); 999 sa.sp_mask = -1; 1000 sa.ss_base = 0; 1001 if (dpl < cpl || ist != 0) { 1002 /* to inner privilege */ 1003 new_stack = 1; 1004 sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 1005 ss = 0; 1006 } else { 1007 /* to same privilege */ 1008 if (env->eflags & VM_MASK) { 1009 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1010 } 1011 new_stack = 0; 1012 sa.sp = env->regs[R_ESP]; 1013 } 1014 sa.sp &= ~0xfLL; /* align stack */ 1015 1016 /* See do_interrupt_protected. */ 1017 eflags = cpu_compute_eflags(env); 1018 if (set_rf) { 1019 eflags |= RF_MASK; 1020 } 1021 1022 pushq(&sa, env->segs[R_SS].selector); 1023 pushq(&sa, env->regs[R_ESP]); 1024 pushq(&sa, eflags); 1025 pushq(&sa, env->segs[R_CS].selector); 1026 pushq(&sa, old_eip); 1027 if (has_error_code) { 1028 pushq(&sa, error_code); 1029 } 1030 1031 /* interrupt gate clear IF mask */ 1032 if ((type & 1) == 0) { 1033 env->eflags &= ~IF_MASK; 1034 } 1035 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1036 1037 if (new_stack) { 1038 ss = 0 | dpl; 1039 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1040 } 1041 env->regs[R_ESP] = sa.sp; 1042 1043 selector = (selector & ~3) | dpl; 1044 cpu_x86_load_seg_cache(env, R_CS, selector, 1045 get_seg_base(e1, e2), 1046 get_seg_limit(e1, e2), 1047 e2); 1048 env->eip = offset; 1049 } 1050 #endif /* TARGET_X86_64 */ 1051 1052 void helper_sysret(CPUX86State *env, int dflag) 1053 { 1054 int cpl, selector; 1055 1056 if (!(env->efer & MSR_EFER_SCE)) { 1057 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1058 } 1059 cpl = env->hflags & HF_CPL_MASK; 1060 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1061 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1062 } 1063 selector = (env->star >> 48) & 0xffff; 1064 #ifdef TARGET_X86_64 1065 if (env->hflags & HF_LMA_MASK) { 1066 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1067 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1068 NT_MASK); 1069 if (dflag == 2) { 1070 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1071 0, 0xffffffff, 1072 DESC_G_MASK | DESC_P_MASK | 1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1074 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1075 DESC_L_MASK); 1076 env->eip = env->regs[R_ECX]; 1077 } else { 1078 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1079 0, 0xffffffff, 1080 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1082 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1083 env->eip = (uint32_t)env->regs[R_ECX]; 1084 } 1085 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1086 0, 0xffffffff, 1087 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1088 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1089 DESC_W_MASK | DESC_A_MASK); 1090 } else 1091 #endif 1092 { 1093 env->eflags |= IF_MASK; 1094 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1095 0, 0xffffffff, 1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1098 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1099 env->eip = (uint32_t)env->regs[R_ECX]; 1100 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1101 0, 0xffffffff, 1102 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1103 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1104 DESC_W_MASK | DESC_A_MASK); 1105 } 1106 } 1107 1108 /* real mode interrupt */ 1109 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1110 int error_code, unsigned int next_eip) 1111 { 1112 SegmentCache *dt; 1113 target_ulong ptr; 1114 int selector; 1115 uint32_t offset; 1116 uint32_t old_cs, old_eip; 1117 StackAccess sa; 1118 1119 /* real mode (simpler!) */ 1120 dt = &env->idt; 1121 if (intno * 4 + 3 > dt->limit) { 1122 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1123 } 1124 ptr = dt->base + intno * 4; 1125 offset = cpu_lduw_kernel(env, ptr); 1126 selector = cpu_lduw_kernel(env, ptr + 2); 1127 1128 sa.env = env; 1129 sa.ra = 0; 1130 sa.sp = env->regs[R_ESP]; 1131 sa.sp_mask = 0xffff; 1132 sa.ss_base = env->segs[R_SS].base; 1133 sa.mmu_index = cpu_mmu_index_kernel(env); 1134 1135 if (is_int) { 1136 old_eip = next_eip; 1137 } else { 1138 old_eip = env->eip; 1139 } 1140 old_cs = env->segs[R_CS].selector; 1141 /* XXX: use SS segment size? */ 1142 pushw(&sa, cpu_compute_eflags(env)); 1143 pushw(&sa, old_cs); 1144 pushw(&sa, old_eip); 1145 1146 /* update processor state */ 1147 SET_ESP(sa.sp, sa.sp_mask); 1148 env->eip = offset; 1149 env->segs[R_CS].selector = selector; 1150 env->segs[R_CS].base = (selector << 4); 1151 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1152 } 1153 1154 /* 1155 * Begin execution of an interruption. is_int is TRUE if coming from 1156 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1157 * instruction. It is only relevant if is_int is TRUE. 1158 */ 1159 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1160 int error_code, target_ulong next_eip, int is_hw) 1161 { 1162 CPUX86State *env = &cpu->env; 1163 1164 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1165 if ((env->cr[0] & CR0_PE_MASK)) { 1166 static int count; 1167 1168 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1169 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1170 count, intno, error_code, is_int, 1171 env->hflags & HF_CPL_MASK, 1172 env->segs[R_CS].selector, env->eip, 1173 (int)env->segs[R_CS].base + env->eip, 1174 env->segs[R_SS].selector, env->regs[R_ESP]); 1175 if (intno == 0x0e) { 1176 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1177 } else { 1178 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1179 } 1180 qemu_log("\n"); 1181 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1182 #if 0 1183 { 1184 int i; 1185 target_ulong ptr; 1186 1187 qemu_log(" code="); 1188 ptr = env->segs[R_CS].base + env->eip; 1189 for (i = 0; i < 16; i++) { 1190 qemu_log(" %02x", ldub(ptr + i)); 1191 } 1192 qemu_log("\n"); 1193 } 1194 #endif 1195 count++; 1196 } 1197 } 1198 if (env->cr[0] & CR0_PE_MASK) { 1199 #if !defined(CONFIG_USER_ONLY) 1200 if (env->hflags & HF_GUEST_MASK) { 1201 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1202 } 1203 #endif 1204 #ifdef TARGET_X86_64 1205 if (env->hflags & HF_LMA_MASK) { 1206 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1207 } else 1208 #endif 1209 { 1210 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1211 is_hw); 1212 } 1213 } else { 1214 #if !defined(CONFIG_USER_ONLY) 1215 if (env->hflags & HF_GUEST_MASK) { 1216 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1217 } 1218 #endif 1219 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1220 } 1221 1222 #if !defined(CONFIG_USER_ONLY) 1223 if (env->hflags & HF_GUEST_MASK) { 1224 CPUState *cs = CPU(cpu); 1225 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1226 offsetof(struct vmcb, 1227 control.event_inj)); 1228 1229 x86_stl_phys(cs, 1230 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1231 event_inj & ~SVM_EVTINJ_VALID); 1232 } 1233 #endif 1234 } 1235 1236 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1237 { 1238 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1239 } 1240 1241 void helper_lldt(CPUX86State *env, int selector) 1242 { 1243 SegmentCache *dt; 1244 uint32_t e1, e2; 1245 int index, entry_limit; 1246 target_ulong ptr; 1247 1248 selector &= 0xffff; 1249 if ((selector & 0xfffc) == 0) { 1250 /* XXX: NULL selector case: invalid LDT */ 1251 env->ldt.base = 0; 1252 env->ldt.limit = 0; 1253 } else { 1254 if (selector & 0x4) { 1255 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1256 } 1257 dt = &env->gdt; 1258 index = selector & ~7; 1259 #ifdef TARGET_X86_64 1260 if (env->hflags & HF_LMA_MASK) { 1261 entry_limit = 15; 1262 } else 1263 #endif 1264 { 1265 entry_limit = 7; 1266 } 1267 if ((index + entry_limit) > dt->limit) { 1268 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1269 } 1270 ptr = dt->base + index; 1271 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1272 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1273 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1274 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1275 } 1276 if (!(e2 & DESC_P_MASK)) { 1277 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1278 } 1279 #ifdef TARGET_X86_64 1280 if (env->hflags & HF_LMA_MASK) { 1281 uint32_t e3; 1282 1283 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1284 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1285 env->ldt.base |= (target_ulong)e3 << 32; 1286 } else 1287 #endif 1288 { 1289 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1290 } 1291 } 1292 env->ldt.selector = selector; 1293 } 1294 1295 void helper_ltr(CPUX86State *env, int selector) 1296 { 1297 SegmentCache *dt; 1298 uint32_t e1, e2; 1299 int index, type, entry_limit; 1300 target_ulong ptr; 1301 1302 selector &= 0xffff; 1303 if ((selector & 0xfffc) == 0) { 1304 /* NULL selector case: invalid TR */ 1305 env->tr.base = 0; 1306 env->tr.limit = 0; 1307 env->tr.flags = 0; 1308 } else { 1309 if (selector & 0x4) { 1310 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1311 } 1312 dt = &env->gdt; 1313 index = selector & ~7; 1314 #ifdef TARGET_X86_64 1315 if (env->hflags & HF_LMA_MASK) { 1316 entry_limit = 15; 1317 } else 1318 #endif 1319 { 1320 entry_limit = 7; 1321 } 1322 if ((index + entry_limit) > dt->limit) { 1323 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1324 } 1325 ptr = dt->base + index; 1326 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1327 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1328 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1329 if ((e2 & DESC_S_MASK) || 1330 (type != 1 && type != 9)) { 1331 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1332 } 1333 if (!(e2 & DESC_P_MASK)) { 1334 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1335 } 1336 #ifdef TARGET_X86_64 1337 if (env->hflags & HF_LMA_MASK) { 1338 uint32_t e3, e4; 1339 1340 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1341 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1342 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1343 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1344 } 1345 load_seg_cache_raw_dt(&env->tr, e1, e2); 1346 env->tr.base |= (target_ulong)e3 << 32; 1347 } else 1348 #endif 1349 { 1350 load_seg_cache_raw_dt(&env->tr, e1, e2); 1351 } 1352 e2 |= DESC_TSS_BUSY_MASK; 1353 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1354 } 1355 env->tr.selector = selector; 1356 } 1357 1358 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1359 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1360 { 1361 uint32_t e1, e2; 1362 int cpl, dpl, rpl; 1363 SegmentCache *dt; 1364 int index; 1365 target_ulong ptr; 1366 1367 selector &= 0xffff; 1368 cpl = env->hflags & HF_CPL_MASK; 1369 if ((selector & 0xfffc) == 0) { 1370 /* null selector case */ 1371 if (seg_reg == R_SS 1372 #ifdef TARGET_X86_64 1373 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1374 #endif 1375 ) { 1376 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1377 } 1378 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1379 } else { 1380 1381 if (selector & 0x4) { 1382 dt = &env->ldt; 1383 } else { 1384 dt = &env->gdt; 1385 } 1386 index = selector & ~7; 1387 if ((index + 7) > dt->limit) { 1388 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1389 } 1390 ptr = dt->base + index; 1391 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1392 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1393 1394 if (!(e2 & DESC_S_MASK)) { 1395 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1396 } 1397 rpl = selector & 3; 1398 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1399 if (seg_reg == R_SS) { 1400 /* must be writable segment */ 1401 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1402 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1403 } 1404 if (rpl != cpl || dpl != cpl) { 1405 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1406 } 1407 } else { 1408 /* must be readable segment */ 1409 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1410 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1411 } 1412 1413 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1414 /* if not conforming code, test rights */ 1415 if (dpl < cpl || dpl < rpl) { 1416 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1417 } 1418 } 1419 } 1420 1421 if (!(e2 & DESC_P_MASK)) { 1422 if (seg_reg == R_SS) { 1423 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1424 } else { 1425 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1426 } 1427 } 1428 1429 /* set the access bit if not already set */ 1430 if (!(e2 & DESC_A_MASK)) { 1431 e2 |= DESC_A_MASK; 1432 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1433 } 1434 1435 cpu_x86_load_seg_cache(env, seg_reg, selector, 1436 get_seg_base(e1, e2), 1437 get_seg_limit(e1, e2), 1438 e2); 1439 #if 0 1440 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1441 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1442 #endif 1443 } 1444 } 1445 1446 /* protected mode jump */ 1447 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1448 target_ulong next_eip) 1449 { 1450 int gate_cs, type; 1451 uint32_t e1, e2, cpl, dpl, rpl, limit; 1452 1453 if ((new_cs & 0xfffc) == 0) { 1454 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1455 } 1456 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1457 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1458 } 1459 cpl = env->hflags & HF_CPL_MASK; 1460 if (e2 & DESC_S_MASK) { 1461 if (!(e2 & DESC_CS_MASK)) { 1462 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1463 } 1464 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1465 if (e2 & DESC_C_MASK) { 1466 /* conforming code segment */ 1467 if (dpl > cpl) { 1468 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1469 } 1470 } else { 1471 /* non conforming code segment */ 1472 rpl = new_cs & 3; 1473 if (rpl > cpl) { 1474 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1475 } 1476 if (dpl != cpl) { 1477 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1478 } 1479 } 1480 if (!(e2 & DESC_P_MASK)) { 1481 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1482 } 1483 limit = get_seg_limit(e1, e2); 1484 if (new_eip > limit && 1485 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1486 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1487 } 1488 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1489 get_seg_base(e1, e2), limit, e2); 1490 env->eip = new_eip; 1491 } else { 1492 /* jump to call or task gate */ 1493 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1494 rpl = new_cs & 3; 1495 cpl = env->hflags & HF_CPL_MASK; 1496 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1497 1498 #ifdef TARGET_X86_64 1499 if (env->efer & MSR_EFER_LMA) { 1500 if (type != 12) { 1501 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1502 } 1503 } 1504 #endif 1505 switch (type) { 1506 case 1: /* 286 TSS */ 1507 case 9: /* 386 TSS */ 1508 case 5: /* task gate */ 1509 if (dpl < cpl || dpl < rpl) { 1510 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1511 } 1512 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1513 break; 1514 case 4: /* 286 call gate */ 1515 case 12: /* 386 call gate */ 1516 if ((dpl < cpl) || (dpl < rpl)) { 1517 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1518 } 1519 if (!(e2 & DESC_P_MASK)) { 1520 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1521 } 1522 gate_cs = e1 >> 16; 1523 new_eip = (e1 & 0xffff); 1524 if (type == 12) { 1525 new_eip |= (e2 & 0xffff0000); 1526 } 1527 1528 #ifdef TARGET_X86_64 1529 if (env->efer & MSR_EFER_LMA) { 1530 /* load the upper 8 bytes of the 64-bit call gate */ 1531 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1532 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1533 GETPC()); 1534 } 1535 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1536 if (type != 0) { 1537 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1538 GETPC()); 1539 } 1540 new_eip |= ((target_ulong)e1) << 32; 1541 } 1542 #endif 1543 1544 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1545 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1546 } 1547 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1548 /* must be code segment */ 1549 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1550 (DESC_S_MASK | DESC_CS_MASK))) { 1551 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1552 } 1553 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1554 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1555 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1556 } 1557 #ifdef TARGET_X86_64 1558 if (env->efer & MSR_EFER_LMA) { 1559 if (!(e2 & DESC_L_MASK)) { 1560 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1561 } 1562 if (e2 & DESC_B_MASK) { 1563 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1564 } 1565 } 1566 #endif 1567 if (!(e2 & DESC_P_MASK)) { 1568 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1569 } 1570 limit = get_seg_limit(e1, e2); 1571 if (new_eip > limit && 1572 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1573 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1574 } 1575 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1576 get_seg_base(e1, e2), limit, e2); 1577 env->eip = new_eip; 1578 break; 1579 default: 1580 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1581 break; 1582 } 1583 } 1584 } 1585 1586 /* real mode call */ 1587 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1588 int shift, uint32_t next_eip) 1589 { 1590 StackAccess sa; 1591 1592 sa.env = env; 1593 sa.ra = GETPC(); 1594 sa.sp = env->regs[R_ESP]; 1595 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1596 sa.ss_base = env->segs[R_SS].base; 1597 sa.mmu_index = cpu_mmu_index_kernel(env); 1598 1599 if (shift) { 1600 pushl(&sa, env->segs[R_CS].selector); 1601 pushl(&sa, next_eip); 1602 } else { 1603 pushw(&sa, env->segs[R_CS].selector); 1604 pushw(&sa, next_eip); 1605 } 1606 1607 SET_ESP(sa.sp, sa.sp_mask); 1608 env->eip = new_eip; 1609 env->segs[R_CS].selector = new_cs; 1610 env->segs[R_CS].base = (new_cs << 4); 1611 } 1612 1613 /* protected mode call */ 1614 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1615 int shift, target_ulong next_eip) 1616 { 1617 int new_stack, i; 1618 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1619 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl; 1620 uint32_t val, limit, old_sp_mask; 1621 target_ulong old_ssp, offset; 1622 StackAccess sa; 1623 1624 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1625 LOG_PCALL_STATE(env_cpu(env)); 1626 if ((new_cs & 0xfffc) == 0) { 1627 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1628 } 1629 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1630 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1631 } 1632 cpl = env->hflags & HF_CPL_MASK; 1633 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1634 1635 sa.env = env; 1636 sa.ra = GETPC(); 1637 sa.mmu_index = cpu_mmu_index_kernel(env); 1638 1639 if (e2 & DESC_S_MASK) { 1640 if (!(e2 & DESC_CS_MASK)) { 1641 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1642 } 1643 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1644 if (e2 & DESC_C_MASK) { 1645 /* conforming code segment */ 1646 if (dpl > cpl) { 1647 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1648 } 1649 } else { 1650 /* non conforming code segment */ 1651 rpl = new_cs & 3; 1652 if (rpl > cpl) { 1653 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1654 } 1655 if (dpl != cpl) { 1656 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1657 } 1658 } 1659 if (!(e2 & DESC_P_MASK)) { 1660 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1661 } 1662 1663 #ifdef TARGET_X86_64 1664 /* XXX: check 16/32 bit cases in long mode */ 1665 if (shift == 2) { 1666 /* 64 bit case */ 1667 sa.sp = env->regs[R_ESP]; 1668 sa.sp_mask = -1; 1669 sa.ss_base = 0; 1670 pushq(&sa, env->segs[R_CS].selector); 1671 pushq(&sa, next_eip); 1672 /* from this point, not restartable */ 1673 env->regs[R_ESP] = sa.sp; 1674 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1675 get_seg_base(e1, e2), 1676 get_seg_limit(e1, e2), e2); 1677 env->eip = new_eip; 1678 } else 1679 #endif 1680 { 1681 sa.sp = env->regs[R_ESP]; 1682 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1683 sa.ss_base = env->segs[R_SS].base; 1684 if (shift) { 1685 pushl(&sa, env->segs[R_CS].selector); 1686 pushl(&sa, next_eip); 1687 } else { 1688 pushw(&sa, env->segs[R_CS].selector); 1689 pushw(&sa, next_eip); 1690 } 1691 1692 limit = get_seg_limit(e1, e2); 1693 if (new_eip > limit) { 1694 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1695 } 1696 /* from this point, not restartable */ 1697 SET_ESP(sa.sp, sa.sp_mask); 1698 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1699 get_seg_base(e1, e2), limit, e2); 1700 env->eip = new_eip; 1701 } 1702 } else { 1703 /* check gate type */ 1704 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1705 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1706 rpl = new_cs & 3; 1707 1708 #ifdef TARGET_X86_64 1709 if (env->efer & MSR_EFER_LMA) { 1710 if (type != 12) { 1711 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1712 } 1713 } 1714 #endif 1715 1716 switch (type) { 1717 case 1: /* available 286 TSS */ 1718 case 9: /* available 386 TSS */ 1719 case 5: /* task gate */ 1720 if (dpl < cpl || dpl < rpl) { 1721 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1722 } 1723 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1724 return; 1725 case 4: /* 286 call gate */ 1726 case 12: /* 386 call gate */ 1727 break; 1728 default: 1729 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1730 break; 1731 } 1732 shift = type >> 3; 1733 1734 if (dpl < cpl || dpl < rpl) { 1735 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1736 } 1737 /* check valid bit */ 1738 if (!(e2 & DESC_P_MASK)) { 1739 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1740 } 1741 selector = e1 >> 16; 1742 param_count = e2 & 0x1f; 1743 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1744 #ifdef TARGET_X86_64 1745 if (env->efer & MSR_EFER_LMA) { 1746 /* load the upper 8 bytes of the 64-bit call gate */ 1747 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1748 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1749 GETPC()); 1750 } 1751 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1752 if (type != 0) { 1753 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1754 GETPC()); 1755 } 1756 offset |= ((target_ulong)e1) << 32; 1757 } 1758 #endif 1759 if ((selector & 0xfffc) == 0) { 1760 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1761 } 1762 1763 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1764 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1765 } 1766 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1767 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1768 } 1769 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1770 if (dpl > cpl) { 1771 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1772 } 1773 #ifdef TARGET_X86_64 1774 if (env->efer & MSR_EFER_LMA) { 1775 if (!(e2 & DESC_L_MASK)) { 1776 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1777 } 1778 if (e2 & DESC_B_MASK) { 1779 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1780 } 1781 shift++; 1782 } 1783 #endif 1784 if (!(e2 & DESC_P_MASK)) { 1785 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1786 } 1787 1788 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1789 /* to inner privilege */ 1790 #ifdef TARGET_X86_64 1791 if (shift == 2) { 1792 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1793 new_stack = 1; 1794 sa.sp = get_rsp_from_tss(env, dpl); 1795 sa.sp_mask = -1; 1796 sa.ss_base = 0; /* SS base is always zero in IA-32e mode */ 1797 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1798 TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]); 1799 } else 1800 #endif 1801 { 1802 uint32_t sp32; 1803 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1804 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1805 TARGET_FMT_lx "\n", ss, sp32, param_count, 1806 env->regs[R_ESP]); 1807 if ((ss & 0xfffc) == 0) { 1808 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1809 } 1810 if ((ss & 3) != dpl) { 1811 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1812 } 1813 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1814 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1815 } 1816 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1817 if (ss_dpl != dpl) { 1818 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1819 } 1820 if (!(ss_e2 & DESC_S_MASK) || 1821 (ss_e2 & DESC_CS_MASK) || 1822 !(ss_e2 & DESC_W_MASK)) { 1823 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1824 } 1825 if (!(ss_e2 & DESC_P_MASK)) { 1826 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1827 } 1828 1829 sa.sp = sp32; 1830 sa.sp_mask = get_sp_mask(ss_e2); 1831 sa.ss_base = get_seg_base(ss_e1, ss_e2); 1832 } 1833 1834 /* push_size = ((param_count * 2) + 8) << shift; */ 1835 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1836 old_ssp = env->segs[R_SS].base; 1837 1838 #ifdef TARGET_X86_64 1839 if (shift == 2) { 1840 /* XXX: verify if new stack address is canonical */ 1841 pushq(&sa, env->segs[R_SS].selector); 1842 pushq(&sa, env->regs[R_ESP]); 1843 /* parameters aren't supported for 64-bit call gates */ 1844 } else 1845 #endif 1846 if (shift == 1) { 1847 pushl(&sa, env->segs[R_SS].selector); 1848 pushl(&sa, env->regs[R_ESP]); 1849 for (i = param_count - 1; i >= 0; i--) { 1850 val = cpu_ldl_data_ra(env, 1851 old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 1852 GETPC()); 1853 pushl(&sa, val); 1854 } 1855 } else { 1856 pushw(&sa, env->segs[R_SS].selector); 1857 pushw(&sa, env->regs[R_ESP]); 1858 for (i = param_count - 1; i >= 0; i--) { 1859 val = cpu_lduw_data_ra(env, 1860 old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 1861 GETPC()); 1862 pushw(&sa, val); 1863 } 1864 } 1865 new_stack = 1; 1866 } else { 1867 /* to same privilege */ 1868 sa.sp = env->regs[R_ESP]; 1869 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 1870 sa.ss_base = env->segs[R_SS].base; 1871 /* push_size = (4 << shift); */ 1872 new_stack = 0; 1873 } 1874 1875 #ifdef TARGET_X86_64 1876 if (shift == 2) { 1877 pushq(&sa, env->segs[R_CS].selector); 1878 pushq(&sa, next_eip); 1879 } else 1880 #endif 1881 if (shift == 1) { 1882 pushl(&sa, env->segs[R_CS].selector); 1883 pushl(&sa, next_eip); 1884 } else { 1885 pushw(&sa, env->segs[R_CS].selector); 1886 pushw(&sa, next_eip); 1887 } 1888 1889 /* from this point, not restartable */ 1890 1891 if (new_stack) { 1892 #ifdef TARGET_X86_64 1893 if (shift == 2) { 1894 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1895 } else 1896 #endif 1897 { 1898 ss = (ss & ~3) | dpl; 1899 cpu_x86_load_seg_cache(env, R_SS, ss, 1900 sa.ss_base, 1901 get_seg_limit(ss_e1, ss_e2), 1902 ss_e2); 1903 } 1904 } 1905 1906 selector = (selector & ~3) | dpl; 1907 cpu_x86_load_seg_cache(env, R_CS, selector, 1908 get_seg_base(e1, e2), 1909 get_seg_limit(e1, e2), 1910 e2); 1911 SET_ESP(sa.sp, sa.sp_mask); 1912 env->eip = offset; 1913 } 1914 } 1915 1916 /* real and vm86 mode iret */ 1917 void helper_iret_real(CPUX86State *env, int shift) 1918 { 1919 uint32_t new_cs, new_eip, new_eflags; 1920 int eflags_mask; 1921 StackAccess sa; 1922 1923 sa.env = env; 1924 sa.ra = GETPC(); 1925 sa.mmu_index = x86_mmu_index_pl(env, 0); 1926 sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1927 sa.sp = env->regs[R_ESP]; 1928 sa.ss_base = env->segs[R_SS].base; 1929 1930 if (shift == 1) { 1931 /* 32 bits */ 1932 new_eip = popl(&sa); 1933 new_cs = popl(&sa) & 0xffff; 1934 new_eflags = popl(&sa); 1935 } else { 1936 /* 16 bits */ 1937 new_eip = popw(&sa); 1938 new_cs = popw(&sa); 1939 new_eflags = popw(&sa); 1940 } 1941 SET_ESP(sa.sp, sa.sp_mask); 1942 env->segs[R_CS].selector = new_cs; 1943 env->segs[R_CS].base = (new_cs << 4); 1944 env->eip = new_eip; 1945 if (env->eflags & VM_MASK) { 1946 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1947 NT_MASK; 1948 } else { 1949 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1950 RF_MASK | NT_MASK; 1951 } 1952 if (shift == 0) { 1953 eflags_mask &= 0xffff; 1954 } 1955 cpu_load_eflags(env, new_eflags, eflags_mask); 1956 env->hflags2 &= ~HF2_NMI_MASK; 1957 } 1958 1959 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1960 { 1961 int dpl; 1962 uint32_t e2; 1963 1964 /* XXX: on x86_64, we do not want to nullify FS and GS because 1965 they may still contain a valid base. I would be interested to 1966 know how a real x86_64 CPU behaves */ 1967 if ((seg_reg == R_FS || seg_reg == R_GS) && 1968 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1969 return; 1970 } 1971 1972 e2 = env->segs[seg_reg].flags; 1973 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1974 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1975 /* data or non conforming code segment */ 1976 if (dpl < cpl) { 1977 cpu_x86_load_seg_cache(env, seg_reg, 0, 1978 env->segs[seg_reg].base, 1979 env->segs[seg_reg].limit, 1980 env->segs[seg_reg].flags & ~DESC_P_MASK); 1981 } 1982 } 1983 } 1984 1985 /* protected mode iret */ 1986 static inline void helper_ret_protected(CPUX86State *env, int shift, 1987 int is_iret, int addend, 1988 uintptr_t retaddr) 1989 { 1990 uint32_t new_cs, new_eflags, new_ss; 1991 uint32_t new_es, new_ds, new_fs, new_gs; 1992 uint32_t e1, e2, ss_e1, ss_e2; 1993 int cpl, dpl, rpl, eflags_mask, iopl; 1994 target_ulong new_eip, new_esp; 1995 StackAccess sa; 1996 1997 cpl = env->hflags & HF_CPL_MASK; 1998 1999 sa.env = env; 2000 sa.ra = retaddr; 2001 sa.mmu_index = x86_mmu_index_pl(env, cpl); 2002 2003 #ifdef TARGET_X86_64 2004 if (shift == 2) { 2005 sa.sp_mask = -1; 2006 } else 2007 #endif 2008 { 2009 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); 2010 } 2011 sa.sp = env->regs[R_ESP]; 2012 sa.ss_base = env->segs[R_SS].base; 2013 new_eflags = 0; /* avoid warning */ 2014 #ifdef TARGET_X86_64 2015 if (shift == 2) { 2016 new_eip = popq(&sa); 2017 new_cs = popq(&sa) & 0xffff; 2018 if (is_iret) { 2019 new_eflags = popq(&sa); 2020 } 2021 } else 2022 #endif 2023 { 2024 if (shift == 1) { 2025 /* 32 bits */ 2026 new_eip = popl(&sa); 2027 new_cs = popl(&sa) & 0xffff; 2028 if (is_iret) { 2029 new_eflags = popl(&sa); 2030 if (new_eflags & VM_MASK) { 2031 goto return_to_vm86; 2032 } 2033 } 2034 } else { 2035 /* 16 bits */ 2036 new_eip = popw(&sa); 2037 new_cs = popw(&sa); 2038 if (is_iret) { 2039 new_eflags = popw(&sa); 2040 } 2041 } 2042 } 2043 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2044 new_cs, new_eip, shift, addend); 2045 LOG_PCALL_STATE(env_cpu(env)); 2046 if ((new_cs & 0xfffc) == 0) { 2047 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2048 } 2049 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 2050 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2051 } 2052 if (!(e2 & DESC_S_MASK) || 2053 !(e2 & DESC_CS_MASK)) { 2054 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2055 } 2056 rpl = new_cs & 3; 2057 if (rpl < cpl) { 2058 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2059 } 2060 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2061 if (e2 & DESC_C_MASK) { 2062 if (dpl > rpl) { 2063 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2064 } 2065 } else { 2066 if (dpl != rpl) { 2067 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2068 } 2069 } 2070 if (!(e2 & DESC_P_MASK)) { 2071 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 2072 } 2073 2074 sa.sp += addend; 2075 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2076 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2077 /* return to same privilege level */ 2078 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2079 get_seg_base(e1, e2), 2080 get_seg_limit(e1, e2), 2081 e2); 2082 } else { 2083 /* return to different privilege level */ 2084 #ifdef TARGET_X86_64 2085 if (shift == 2) { 2086 new_esp = popq(&sa); 2087 new_ss = popq(&sa) & 0xffff; 2088 } else 2089 #endif 2090 { 2091 if (shift == 1) { 2092 /* 32 bits */ 2093 new_esp = popl(&sa); 2094 new_ss = popl(&sa) & 0xffff; 2095 } else { 2096 /* 16 bits */ 2097 new_esp = popw(&sa); 2098 new_ss = popw(&sa); 2099 } 2100 } 2101 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2102 new_ss, new_esp); 2103 if ((new_ss & 0xfffc) == 0) { 2104 #ifdef TARGET_X86_64 2105 /* NULL ss is allowed in long mode if cpl != 3 */ 2106 /* XXX: test CS64? */ 2107 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2108 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2109 0, 0xffffffff, 2110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2111 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2112 DESC_W_MASK | DESC_A_MASK); 2113 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2114 } else 2115 #endif 2116 { 2117 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2118 } 2119 } else { 2120 if ((new_ss & 3) != rpl) { 2121 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2122 } 2123 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2124 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2125 } 2126 if (!(ss_e2 & DESC_S_MASK) || 2127 (ss_e2 & DESC_CS_MASK) || 2128 !(ss_e2 & DESC_W_MASK)) { 2129 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2130 } 2131 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2132 if (dpl != rpl) { 2133 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2134 } 2135 if (!(ss_e2 & DESC_P_MASK)) { 2136 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2137 } 2138 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2139 get_seg_base(ss_e1, ss_e2), 2140 get_seg_limit(ss_e1, ss_e2), 2141 ss_e2); 2142 } 2143 2144 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2145 get_seg_base(e1, e2), 2146 get_seg_limit(e1, e2), 2147 e2); 2148 sa.sp = new_esp; 2149 #ifdef TARGET_X86_64 2150 if (env->hflags & HF_CS64_MASK) { 2151 sa.sp_mask = -1; 2152 } else 2153 #endif 2154 { 2155 sa.sp_mask = get_sp_mask(ss_e2); 2156 } 2157 2158 /* validate data segments */ 2159 validate_seg(env, R_ES, rpl); 2160 validate_seg(env, R_DS, rpl); 2161 validate_seg(env, R_FS, rpl); 2162 validate_seg(env, R_GS, rpl); 2163 2164 sa.sp += addend; 2165 } 2166 SET_ESP(sa.sp, sa.sp_mask); 2167 env->eip = new_eip; 2168 if (is_iret) { 2169 /* NOTE: 'cpl' is the _old_ CPL */ 2170 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2171 if (cpl == 0) { 2172 eflags_mask |= IOPL_MASK; 2173 } 2174 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2175 if (cpl <= iopl) { 2176 eflags_mask |= IF_MASK; 2177 } 2178 if (shift == 0) { 2179 eflags_mask &= 0xffff; 2180 } 2181 cpu_load_eflags(env, new_eflags, eflags_mask); 2182 } 2183 return; 2184 2185 return_to_vm86: 2186 new_esp = popl(&sa); 2187 new_ss = popl(&sa); 2188 new_es = popl(&sa); 2189 new_ds = popl(&sa); 2190 new_fs = popl(&sa); 2191 new_gs = popl(&sa); 2192 2193 /* modify processor state */ 2194 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2195 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2196 VIP_MASK); 2197 load_seg_vm(env, R_CS, new_cs & 0xffff); 2198 load_seg_vm(env, R_SS, new_ss & 0xffff); 2199 load_seg_vm(env, R_ES, new_es & 0xffff); 2200 load_seg_vm(env, R_DS, new_ds & 0xffff); 2201 load_seg_vm(env, R_FS, new_fs & 0xffff); 2202 load_seg_vm(env, R_GS, new_gs & 0xffff); 2203 2204 env->eip = new_eip & 0xffff; 2205 env->regs[R_ESP] = new_esp; 2206 } 2207 2208 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2209 { 2210 int tss_selector, type; 2211 uint32_t e1, e2; 2212 2213 /* specific case for TSS */ 2214 if (env->eflags & NT_MASK) { 2215 #ifdef TARGET_X86_64 2216 if (env->hflags & HF_LMA_MASK) { 2217 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2218 } 2219 #endif 2220 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2221 if (tss_selector & 4) { 2222 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2223 } 2224 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2225 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2226 } 2227 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2228 /* NOTE: we check both segment and busy TSS */ 2229 if (type != 3) { 2230 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2231 } 2232 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2233 } else { 2234 helper_ret_protected(env, shift, 1, 0, GETPC()); 2235 } 2236 env->hflags2 &= ~HF2_NMI_MASK; 2237 } 2238 2239 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2240 { 2241 helper_ret_protected(env, shift, 0, addend, GETPC()); 2242 } 2243 2244 void helper_sysenter(CPUX86State *env) 2245 { 2246 if (env->sysenter_cs == 0) { 2247 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2248 } 2249 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2250 2251 #ifdef TARGET_X86_64 2252 if (env->hflags & HF_LMA_MASK) { 2253 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2254 0, 0xffffffff, 2255 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2256 DESC_S_MASK | 2257 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2258 DESC_L_MASK); 2259 } else 2260 #endif 2261 { 2262 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2263 0, 0xffffffff, 2264 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2265 DESC_S_MASK | 2266 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2267 } 2268 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2269 0, 0xffffffff, 2270 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2271 DESC_S_MASK | 2272 DESC_W_MASK | DESC_A_MASK); 2273 env->regs[R_ESP] = env->sysenter_esp; 2274 env->eip = env->sysenter_eip; 2275 } 2276 2277 void helper_sysexit(CPUX86State *env, int dflag) 2278 { 2279 int cpl; 2280 2281 cpl = env->hflags & HF_CPL_MASK; 2282 if (env->sysenter_cs == 0 || cpl != 0) { 2283 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2284 } 2285 #ifdef TARGET_X86_64 2286 if (dflag == 2) { 2287 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2288 3, 0, 0xffffffff, 2289 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2290 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2291 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2292 DESC_L_MASK); 2293 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2294 3, 0, 0xffffffff, 2295 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2296 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2297 DESC_W_MASK | DESC_A_MASK); 2298 } else 2299 #endif 2300 { 2301 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2302 3, 0, 0xffffffff, 2303 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2304 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2305 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2306 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2307 3, 0, 0xffffffff, 2308 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2309 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2310 DESC_W_MASK | DESC_A_MASK); 2311 } 2312 env->regs[R_ESP] = env->regs[R_ECX]; 2313 env->eip = env->regs[R_EDX]; 2314 } 2315 2316 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2317 { 2318 unsigned int limit; 2319 uint32_t e1, e2, selector; 2320 int rpl, dpl, cpl, type; 2321 2322 selector = selector1 & 0xffff; 2323 assert(CC_OP == CC_OP_EFLAGS); 2324 if ((selector & 0xfffc) == 0) { 2325 goto fail; 2326 } 2327 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2328 goto fail; 2329 } 2330 rpl = selector & 3; 2331 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2332 cpl = env->hflags & HF_CPL_MASK; 2333 if (e2 & DESC_S_MASK) { 2334 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2335 /* conforming */ 2336 } else { 2337 if (dpl < cpl || dpl < rpl) { 2338 goto fail; 2339 } 2340 } 2341 } else { 2342 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2343 switch (type) { 2344 case 1: 2345 case 2: 2346 case 3: 2347 case 9: 2348 case 11: 2349 break; 2350 default: 2351 goto fail; 2352 } 2353 if (dpl < cpl || dpl < rpl) { 2354 fail: 2355 CC_SRC &= ~CC_Z; 2356 return 0; 2357 } 2358 } 2359 limit = get_seg_limit(e1, e2); 2360 CC_SRC |= CC_Z; 2361 return limit; 2362 } 2363 2364 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2365 { 2366 uint32_t e1, e2, selector; 2367 int rpl, dpl, cpl, type; 2368 2369 selector = selector1 & 0xffff; 2370 assert(CC_OP == CC_OP_EFLAGS); 2371 if ((selector & 0xfffc) == 0) { 2372 goto fail; 2373 } 2374 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2375 goto fail; 2376 } 2377 rpl = selector & 3; 2378 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2379 cpl = env->hflags & HF_CPL_MASK; 2380 if (e2 & DESC_S_MASK) { 2381 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2382 /* conforming */ 2383 } else { 2384 if (dpl < cpl || dpl < rpl) { 2385 goto fail; 2386 } 2387 } 2388 } else { 2389 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2390 switch (type) { 2391 case 1: 2392 case 2: 2393 case 3: 2394 case 4: 2395 case 5: 2396 case 9: 2397 case 11: 2398 case 12: 2399 break; 2400 default: 2401 goto fail; 2402 } 2403 if (dpl < cpl || dpl < rpl) { 2404 fail: 2405 CC_SRC &= ~CC_Z; 2406 return 0; 2407 } 2408 } 2409 CC_SRC |= CC_Z; 2410 return e2 & 0x00f0ff00; 2411 } 2412 2413 void helper_verr(CPUX86State *env, target_ulong selector1) 2414 { 2415 uint32_t e1, e2, eflags, selector; 2416 int rpl, dpl, cpl; 2417 2418 selector = selector1 & 0xffff; 2419 eflags = cpu_cc_compute_all(env) | CC_Z; 2420 if ((selector & 0xfffc) == 0) { 2421 goto fail; 2422 } 2423 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2424 goto fail; 2425 } 2426 if (!(e2 & DESC_S_MASK)) { 2427 goto fail; 2428 } 2429 rpl = selector & 3; 2430 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2431 cpl = env->hflags & HF_CPL_MASK; 2432 if (e2 & DESC_CS_MASK) { 2433 if (!(e2 & DESC_R_MASK)) { 2434 goto fail; 2435 } 2436 if (!(e2 & DESC_C_MASK)) { 2437 if (dpl < cpl || dpl < rpl) { 2438 goto fail; 2439 } 2440 } 2441 } else { 2442 if (dpl < cpl || dpl < rpl) { 2443 fail: 2444 eflags &= ~CC_Z; 2445 } 2446 } 2447 CC_SRC = eflags; 2448 CC_OP = CC_OP_EFLAGS; 2449 } 2450 2451 void helper_verw(CPUX86State *env, target_ulong selector1) 2452 { 2453 uint32_t e1, e2, eflags, selector; 2454 int rpl, dpl, cpl; 2455 2456 selector = selector1 & 0xffff; 2457 eflags = cpu_cc_compute_all(env) | CC_Z; 2458 if ((selector & 0xfffc) == 0) { 2459 goto fail; 2460 } 2461 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2462 goto fail; 2463 } 2464 if (!(e2 & DESC_S_MASK)) { 2465 goto fail; 2466 } 2467 rpl = selector & 3; 2468 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2469 cpl = env->hflags & HF_CPL_MASK; 2470 if (e2 & DESC_CS_MASK) { 2471 goto fail; 2472 } else { 2473 if (dpl < cpl || dpl < rpl) { 2474 goto fail; 2475 } 2476 if (!(e2 & DESC_W_MASK)) { 2477 fail: 2478 eflags &= ~CC_Z; 2479 } 2480 } 2481 CC_SRC = eflags; 2482 CC_OP = CC_OP_EFLAGS; 2483 } 2484