1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 int get_pg_mode(CPUX86State *env) 32 { 33 int pg_mode = 0; 34 if (!(env->cr[0] & CR0_PG_MASK)) { 35 return 0; 36 } 37 if (env->cr[0] & CR0_WP_MASK) { 38 pg_mode |= PG_MODE_WP; 39 } 40 if (env->cr[4] & CR4_PAE_MASK) { 41 pg_mode |= PG_MODE_PAE; 42 if (env->efer & MSR_EFER_NXE) { 43 pg_mode |= PG_MODE_NXE; 44 } 45 } 46 if (env->cr[4] & CR4_PSE_MASK) { 47 pg_mode |= PG_MODE_PSE; 48 } 49 if (env->cr[4] & CR4_SMEP_MASK) { 50 pg_mode |= PG_MODE_SMEP; 51 } 52 if (env->hflags & HF_LMA_MASK) { 53 pg_mode |= PG_MODE_LMA; 54 if (env->cr[4] & CR4_PKE_MASK) { 55 pg_mode |= PG_MODE_PKE; 56 } 57 if (env->cr[4] & CR4_PKS_MASK) { 58 pg_mode |= PG_MODE_PKS; 59 } 60 if (env->cr[4] & CR4_LA57_MASK) { 61 pg_mode |= PG_MODE_LA57; 62 } 63 } 64 return pg_mode; 65 } 66 67 /* return non zero if error */ 68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 69 uint32_t *e2_ptr, int selector, 70 uintptr_t retaddr) 71 { 72 SegmentCache *dt; 73 int index; 74 target_ulong ptr; 75 76 if (selector & 0x4) { 77 dt = &env->ldt; 78 } else { 79 dt = &env->gdt; 80 } 81 index = selector & ~7; 82 if ((index + 7) > dt->limit) { 83 return -1; 84 } 85 ptr = dt->base + index; 86 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 87 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 88 return 0; 89 } 90 91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 92 uint32_t *e2_ptr, int selector) 93 { 94 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 95 } 96 97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 98 { 99 unsigned int limit; 100 101 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 102 if (e2 & DESC_G_MASK) { 103 limit = (limit << 12) | 0xfff; 104 } 105 return limit; 106 } 107 108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 109 { 110 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 111 } 112 113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 114 uint32_t e2) 115 { 116 sc->base = get_seg_base(e1, e2); 117 sc->limit = get_seg_limit(e1, e2); 118 sc->flags = e2; 119 } 120 121 /* init the segment cache in vm86 mode. */ 122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 123 { 124 selector &= 0xffff; 125 126 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 127 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 128 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 129 } 130 131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 132 uint32_t *esp_ptr, int dpl, 133 uintptr_t retaddr) 134 { 135 X86CPU *cpu = env_archcpu(env); 136 int type, index, shift; 137 138 #if 0 139 { 140 int i; 141 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 142 for (i = 0; i < env->tr.limit; i++) { 143 printf("%02x ", env->tr.base[i]); 144 if ((i & 7) == 7) { 145 printf("\n"); 146 } 147 } 148 printf("\n"); 149 } 150 #endif 151 152 if (!(env->tr.flags & DESC_P_MASK)) { 153 cpu_abort(CPU(cpu), "invalid tss"); 154 } 155 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 156 if ((type & 7) != 1) { 157 cpu_abort(CPU(cpu), "invalid tss type"); 158 } 159 shift = type >> 3; 160 index = (dpl * 4 + 2) << shift; 161 if (index + (4 << shift) - 1 > env->tr.limit) { 162 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 163 } 164 if (shift == 0) { 165 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 166 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 167 } else { 168 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 170 } 171 } 172 173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 174 int cpl, uintptr_t retaddr) 175 { 176 uint32_t e1, e2; 177 int rpl, dpl; 178 179 if ((selector & 0xfffc) != 0) { 180 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 181 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 182 } 183 if (!(e2 & DESC_S_MASK)) { 184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 185 } 186 rpl = selector & 3; 187 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 188 if (seg_reg == R_CS) { 189 if (!(e2 & DESC_CS_MASK)) { 190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 191 } 192 if (dpl != rpl) { 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 194 } 195 } else if (seg_reg == R_SS) { 196 /* SS must be writable data */ 197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 199 } 200 if (dpl != cpl || dpl != rpl) { 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 202 } 203 } else { 204 /* not readable code */ 205 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 207 } 208 /* if data or non conforming code, checks the rights */ 209 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 210 if (dpl < cpl || dpl < rpl) { 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 212 } 213 } 214 } 215 if (!(e2 & DESC_P_MASK)) { 216 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 217 } 218 cpu_x86_load_seg_cache(env, seg_reg, selector, 219 get_seg_base(e1, e2), 220 get_seg_limit(e1, e2), 221 e2); 222 } else { 223 if (seg_reg == R_SS || seg_reg == R_CS) { 224 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 225 } 226 } 227 } 228 229 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 230 uintptr_t retaddr) 231 { 232 target_ulong ptr = env->gdt.base + (tss_selector & ~7); 233 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 234 235 if (value) { 236 e2 |= DESC_TSS_BUSY_MASK; 237 } else { 238 e2 &= ~DESC_TSS_BUSY_MASK; 239 } 240 241 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 242 } 243 244 #define SWITCH_TSS_JMP 0 245 #define SWITCH_TSS_IRET 1 246 #define SWITCH_TSS_CALL 2 247 248 /* return 0 if switching to a 16-bit selector */ 249 static int switch_tss_ra(CPUX86State *env, int tss_selector, 250 uint32_t e1, uint32_t e2, int source, 251 uint32_t next_eip, uintptr_t retaddr) 252 { 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 254 target_ulong tss_base; 255 uint32_t new_regs[8], new_segs[6]; 256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 257 uint32_t old_eflags, eflags_mask; 258 SegmentCache *dt; 259 int index; 260 target_ulong ptr; 261 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 263 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 264 source); 265 266 /* if task gate, we read the TSS segment and we load it */ 267 if (type == 5) { 268 if (!(e2 & DESC_P_MASK)) { 269 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 270 } 271 tss_selector = e1 >> 16; 272 if (tss_selector & 4) { 273 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 274 } 275 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 276 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 277 } 278 if (e2 & DESC_S_MASK) { 279 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 280 } 281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 282 if ((type & 7) != 1) { 283 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 284 } 285 } 286 287 if (!(e2 & DESC_P_MASK)) { 288 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 289 } 290 291 if (type & 8) { 292 tss_limit_max = 103; 293 } else { 294 tss_limit_max = 43; 295 } 296 tss_limit = get_seg_limit(e1, e2); 297 tss_base = get_seg_base(e1, e2); 298 if ((tss_selector & 4) != 0 || 299 tss_limit < tss_limit_max) { 300 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 301 } 302 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 303 if (old_type & 8) { 304 old_tss_limit_max = 103; 305 } else { 306 old_tss_limit_max = 43; 307 } 308 309 /* read all the registers from the new TSS */ 310 if (type & 8) { 311 /* 32 bit */ 312 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 313 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 314 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 315 for (i = 0; i < 8; i++) { 316 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 317 retaddr); 318 } 319 for (i = 0; i < 6; i++) { 320 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 321 retaddr); 322 } 323 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 324 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 325 } else { 326 /* 16 bit */ 327 new_cr3 = 0; 328 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 329 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 330 for (i = 0; i < 8; i++) { 331 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 332 } 333 for (i = 0; i < 4; i++) { 334 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 335 retaddr); 336 } 337 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 338 new_segs[R_FS] = 0; 339 new_segs[R_GS] = 0; 340 new_trap = 0; 341 } 342 /* XXX: avoid a compiler warning, see 343 http://support.amd.com/us/Processor_TechDocs/24593.pdf 344 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 345 (void)new_trap; 346 347 /* NOTE: we must avoid memory exceptions during the task switch, 348 so we make dummy accesses before */ 349 /* XXX: it can still fail in some cases, so a bigger hack is 350 necessary to valid the TLB after having done the accesses */ 351 352 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 353 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 354 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 355 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 356 357 /* clear busy bit (it is restartable) */ 358 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 359 tss_set_busy(env, env->tr.selector, 0, retaddr); 360 } 361 old_eflags = cpu_compute_eflags(env); 362 if (source == SWITCH_TSS_IRET) { 363 old_eflags &= ~NT_MASK; 364 } 365 366 /* save the current state in the old TSS */ 367 if (old_type & 8) { 368 /* 32 bit */ 369 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 370 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 377 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 378 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 379 for (i = 0; i < 6; i++) { 380 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 381 env->segs[i].selector, retaddr); 382 } 383 } else { 384 /* 16 bit */ 385 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 386 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 393 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 394 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 395 for (i = 0; i < 4; i++) { 396 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 397 env->segs[i].selector, retaddr); 398 } 399 } 400 401 /* now if an exception occurs, it will occurs in the next task 402 context */ 403 404 if (source == SWITCH_TSS_CALL) { 405 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 406 new_eflags |= NT_MASK; 407 } 408 409 /* set busy bit */ 410 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 411 tss_set_busy(env, tss_selector, 1, retaddr); 412 } 413 414 /* set the new CPU state */ 415 /* from this point, any exception which occurs can give problems */ 416 env->cr[0] |= CR0_TS_MASK; 417 env->hflags |= HF_TS_MASK; 418 env->tr.selector = tss_selector; 419 env->tr.base = tss_base; 420 env->tr.limit = tss_limit; 421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 422 423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 424 cpu_x86_update_cr3(env, new_cr3); 425 } 426 427 /* load all registers without an exception, then reload them with 428 possible exception */ 429 env->eip = new_eip; 430 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 432 if (type & 8) { 433 cpu_load_eflags(env, new_eflags, eflags_mask); 434 for (i = 0; i < 8; i++) { 435 env->regs[i] = new_regs[i]; 436 } 437 } else { 438 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 439 for (i = 0; i < 8; i++) { 440 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 441 } 442 } 443 if (new_eflags & VM_MASK) { 444 for (i = 0; i < 6; i++) { 445 load_seg_vm(env, i, new_segs[i]); 446 } 447 } else { 448 /* first just selectors as the rest may trigger exceptions */ 449 for (i = 0; i < 6; i++) { 450 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 451 } 452 } 453 454 env->ldt.selector = new_ldt & ~4; 455 env->ldt.base = 0; 456 env->ldt.limit = 0; 457 env->ldt.flags = 0; 458 459 /* load the LDT */ 460 if (new_ldt & 4) { 461 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 462 } 463 464 if ((new_ldt & 0xfffc) != 0) { 465 dt = &env->gdt; 466 index = new_ldt & ~7; 467 if ((index + 7) > dt->limit) { 468 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 469 } 470 ptr = dt->base + index; 471 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 473 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 475 } 476 if (!(e2 & DESC_P_MASK)) { 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 478 } 479 load_seg_cache_raw_dt(&env->ldt, e1, e2); 480 } 481 482 /* load the segments */ 483 if (!(new_eflags & VM_MASK)) { 484 int cpl = new_segs[R_CS] & 3; 485 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 486 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 487 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 488 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 489 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 490 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 491 } 492 493 /* check that env->eip is in the CS segment limits */ 494 if (new_eip > env->segs[R_CS].limit) { 495 /* XXX: different exception if CALL? */ 496 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 497 } 498 499 #ifndef CONFIG_USER_ONLY 500 /* reset local breakpoints */ 501 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 502 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 503 } 504 #endif 505 return type >> 3; 506 } 507 508 static int switch_tss(CPUX86State *env, int tss_selector, 509 uint32_t e1, uint32_t e2, int source, 510 uint32_t next_eip) 511 { 512 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 513 } 514 515 static inline unsigned int get_sp_mask(unsigned int e2) 516 { 517 #ifdef TARGET_X86_64 518 if (e2 & DESC_L_MASK) { 519 return 0; 520 } else 521 #endif 522 if (e2 & DESC_B_MASK) { 523 return 0xffffffff; 524 } else { 525 return 0xffff; 526 } 527 } 528 529 static int exception_is_fault(int intno) 530 { 531 switch (intno) { 532 /* 533 * #DB can be both fault- and trap-like, but it never sets RF=1 534 * in the RFLAGS value pushed on the stack. 535 */ 536 case EXCP01_DB: 537 case EXCP03_INT3: 538 case EXCP04_INTO: 539 case EXCP08_DBLE: 540 case EXCP12_MCHK: 541 return 0; 542 } 543 /* Everything else including reserved exception is a fault. */ 544 return 1; 545 } 546 547 int exception_has_error_code(int intno) 548 { 549 switch (intno) { 550 case 8: 551 case 10: 552 case 11: 553 case 12: 554 case 13: 555 case 14: 556 case 17: 557 return 1; 558 } 559 return 0; 560 } 561 562 #ifdef TARGET_X86_64 563 #define SET_ESP(val, sp_mask) \ 564 do { \ 565 if ((sp_mask) == 0xffff) { \ 566 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 567 ((val) & 0xffff); \ 568 } else if ((sp_mask) == 0xffffffffLL) { \ 569 env->regs[R_ESP] = (uint32_t)(val); \ 570 } else { \ 571 env->regs[R_ESP] = (val); \ 572 } \ 573 } while (0) 574 #else 575 #define SET_ESP(val, sp_mask) \ 576 do { \ 577 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 578 ((val) & (sp_mask)); \ 579 } while (0) 580 #endif 581 582 /* XXX: add a is_user flag to have proper security support */ 583 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 584 { \ 585 sp -= 2; \ 586 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 587 } 588 589 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 590 { \ 591 sp -= 4; \ 592 cpu_stl_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 593 } 594 595 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 596 { \ 597 val = cpu_lduw_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 598 sp += 2; \ 599 } 600 601 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 602 { \ 603 val = (uint32_t)cpu_ldl_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 604 sp += 4; \ 605 } 606 607 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 608 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 609 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 610 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 611 612 /* protected mode interrupt */ 613 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 614 int error_code, unsigned int next_eip, 615 int is_hw) 616 { 617 SegmentCache *dt; 618 target_ulong ptr, ssp; 619 int type, dpl, selector, ss_dpl, cpl; 620 int has_error_code, new_stack, shift; 621 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 622 uint32_t old_eip, sp_mask, eflags; 623 int vm86 = env->eflags & VM_MASK; 624 bool set_rf; 625 626 has_error_code = 0; 627 if (!is_int && !is_hw) { 628 has_error_code = exception_has_error_code(intno); 629 } 630 if (is_int) { 631 old_eip = next_eip; 632 set_rf = false; 633 } else { 634 old_eip = env->eip; 635 set_rf = exception_is_fault(intno); 636 } 637 638 dt = &env->idt; 639 if (intno * 8 + 7 > dt->limit) { 640 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 641 } 642 ptr = dt->base + intno * 8; 643 e1 = cpu_ldl_kernel(env, ptr); 644 e2 = cpu_ldl_kernel(env, ptr + 4); 645 /* check gate type */ 646 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 647 switch (type) { 648 case 5: /* task gate */ 649 case 6: /* 286 interrupt gate */ 650 case 7: /* 286 trap gate */ 651 case 14: /* 386 interrupt gate */ 652 case 15: /* 386 trap gate */ 653 break; 654 default: 655 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 656 break; 657 } 658 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 659 cpl = env->hflags & HF_CPL_MASK; 660 /* check privilege if software int */ 661 if (is_int && dpl < cpl) { 662 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 663 } 664 665 if (type == 5) { 666 /* task gate */ 667 /* must do that check here to return the correct error code */ 668 if (!(e2 & DESC_P_MASK)) { 669 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 670 } 671 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 672 if (has_error_code) { 673 /* push the error code */ 674 if (env->segs[R_SS].flags & DESC_B_MASK) { 675 sp_mask = 0xffffffff; 676 } else { 677 sp_mask = 0xffff; 678 } 679 esp = env->regs[R_ESP]; 680 ssp = env->segs[R_SS].base; 681 if (shift) { 682 PUSHL(ssp, esp, sp_mask, error_code); 683 } else { 684 PUSHW(ssp, esp, sp_mask, error_code); 685 } 686 SET_ESP(esp, sp_mask); 687 } 688 return; 689 } 690 691 /* Otherwise, trap or interrupt gate */ 692 693 /* check valid bit */ 694 if (!(e2 & DESC_P_MASK)) { 695 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 696 } 697 selector = e1 >> 16; 698 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 699 if ((selector & 0xfffc) == 0) { 700 raise_exception_err(env, EXCP0D_GPF, 0); 701 } 702 if (load_segment(env, &e1, &e2, selector) != 0) { 703 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 704 } 705 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 706 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 707 } 708 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 709 if (dpl > cpl) { 710 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 711 } 712 if (!(e2 & DESC_P_MASK)) { 713 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 714 } 715 if (e2 & DESC_C_MASK) { 716 dpl = cpl; 717 } 718 if (dpl < cpl) { 719 /* to inner privilege */ 720 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 721 if ((ss & 0xfffc) == 0) { 722 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 723 } 724 if ((ss & 3) != dpl) { 725 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 726 } 727 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 728 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 729 } 730 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 731 if (ss_dpl != dpl) { 732 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 733 } 734 if (!(ss_e2 & DESC_S_MASK) || 735 (ss_e2 & DESC_CS_MASK) || 736 !(ss_e2 & DESC_W_MASK)) { 737 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 738 } 739 if (!(ss_e2 & DESC_P_MASK)) { 740 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 741 } 742 new_stack = 1; 743 sp_mask = get_sp_mask(ss_e2); 744 ssp = get_seg_base(ss_e1, ss_e2); 745 } else { 746 /* to same privilege */ 747 if (vm86) { 748 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 749 } 750 new_stack = 0; 751 sp_mask = get_sp_mask(env->segs[R_SS].flags); 752 ssp = env->segs[R_SS].base; 753 esp = env->regs[R_ESP]; 754 } 755 756 shift = type >> 3; 757 758 #if 0 759 /* XXX: check that enough room is available */ 760 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 761 if (vm86) { 762 push_size += 8; 763 } 764 push_size <<= shift; 765 #endif 766 eflags = cpu_compute_eflags(env); 767 /* 768 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 769 * as is. AMD behavior could be implemented in check_hw_breakpoints(). 770 */ 771 if (set_rf) { 772 eflags |= RF_MASK; 773 } 774 775 if (shift == 1) { 776 if (new_stack) { 777 if (vm86) { 778 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 779 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 780 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 781 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 782 } 783 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 784 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 785 } 786 PUSHL(ssp, esp, sp_mask, eflags); 787 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 788 PUSHL(ssp, esp, sp_mask, old_eip); 789 if (has_error_code) { 790 PUSHL(ssp, esp, sp_mask, error_code); 791 } 792 } else { 793 if (new_stack) { 794 if (vm86) { 795 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 796 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 797 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 798 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 799 } 800 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 801 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 802 } 803 PUSHW(ssp, esp, sp_mask, eflags); 804 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 805 PUSHW(ssp, esp, sp_mask, old_eip); 806 if (has_error_code) { 807 PUSHW(ssp, esp, sp_mask, error_code); 808 } 809 } 810 811 /* interrupt gate clear IF mask */ 812 if ((type & 1) == 0) { 813 env->eflags &= ~IF_MASK; 814 } 815 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 816 817 if (new_stack) { 818 if (vm86) { 819 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 820 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 821 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 822 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 823 } 824 ss = (ss & ~3) | dpl; 825 cpu_x86_load_seg_cache(env, R_SS, ss, 826 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 827 } 828 SET_ESP(esp, sp_mask); 829 830 selector = (selector & ~3) | dpl; 831 cpu_x86_load_seg_cache(env, R_CS, selector, 832 get_seg_base(e1, e2), 833 get_seg_limit(e1, e2), 834 e2); 835 env->eip = offset; 836 } 837 838 #ifdef TARGET_X86_64 839 840 #define PUSHQ_RA(sp, val, ra) \ 841 { \ 842 sp -= 8; \ 843 cpu_stq_kernel_ra(env, sp, (val), ra); \ 844 } 845 846 #define POPQ_RA(sp, val, ra) \ 847 { \ 848 val = cpu_ldq_data_ra(env, sp, ra); \ 849 sp += 8; \ 850 } 851 852 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 853 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 854 855 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 856 { 857 X86CPU *cpu = env_archcpu(env); 858 int index, pg_mode; 859 target_ulong rsp; 860 int32_t sext; 861 862 #if 0 863 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 864 env->tr.base, env->tr.limit); 865 #endif 866 867 if (!(env->tr.flags & DESC_P_MASK)) { 868 cpu_abort(CPU(cpu), "invalid tss"); 869 } 870 index = 8 * level + 4; 871 if ((index + 7) > env->tr.limit) { 872 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 873 } 874 875 rsp = cpu_ldq_kernel(env, env->tr.base + index); 876 877 /* test virtual address sign extension */ 878 pg_mode = get_pg_mode(env); 879 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 880 if (sext != 0 && sext != -1) { 881 raise_exception_err(env, EXCP0C_STACK, 0); 882 } 883 884 return rsp; 885 } 886 887 /* 64 bit interrupt */ 888 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 889 int error_code, target_ulong next_eip, int is_hw) 890 { 891 SegmentCache *dt; 892 target_ulong ptr; 893 int type, dpl, selector, cpl, ist; 894 int has_error_code, new_stack; 895 uint32_t e1, e2, e3, ss, eflags; 896 target_ulong old_eip, esp, offset; 897 bool set_rf; 898 899 has_error_code = 0; 900 if (!is_int && !is_hw) { 901 has_error_code = exception_has_error_code(intno); 902 } 903 if (is_int) { 904 old_eip = next_eip; 905 set_rf = false; 906 } else { 907 old_eip = env->eip; 908 set_rf = exception_is_fault(intno); 909 } 910 911 dt = &env->idt; 912 if (intno * 16 + 15 > dt->limit) { 913 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 914 } 915 ptr = dt->base + intno * 16; 916 e1 = cpu_ldl_kernel(env, ptr); 917 e2 = cpu_ldl_kernel(env, ptr + 4); 918 e3 = cpu_ldl_kernel(env, ptr + 8); 919 /* check gate type */ 920 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 921 switch (type) { 922 case 14: /* 386 interrupt gate */ 923 case 15: /* 386 trap gate */ 924 break; 925 default: 926 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 927 break; 928 } 929 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 930 cpl = env->hflags & HF_CPL_MASK; 931 /* check privilege if software int */ 932 if (is_int && dpl < cpl) { 933 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 934 } 935 /* check valid bit */ 936 if (!(e2 & DESC_P_MASK)) { 937 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 938 } 939 selector = e1 >> 16; 940 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 941 ist = e2 & 7; 942 if ((selector & 0xfffc) == 0) { 943 raise_exception_err(env, EXCP0D_GPF, 0); 944 } 945 946 if (load_segment(env, &e1, &e2, selector) != 0) { 947 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 948 } 949 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 950 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 951 } 952 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 953 if (dpl > cpl) { 954 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 955 } 956 if (!(e2 & DESC_P_MASK)) { 957 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 958 } 959 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 960 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 961 } 962 if (e2 & DESC_C_MASK) { 963 dpl = cpl; 964 } 965 if (dpl < cpl || ist != 0) { 966 /* to inner privilege */ 967 new_stack = 1; 968 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 969 ss = 0; 970 } else { 971 /* to same privilege */ 972 if (env->eflags & VM_MASK) { 973 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 974 } 975 new_stack = 0; 976 esp = env->regs[R_ESP]; 977 } 978 esp &= ~0xfLL; /* align stack */ 979 980 /* See do_interrupt_protected. */ 981 eflags = cpu_compute_eflags(env); 982 if (set_rf) { 983 eflags |= RF_MASK; 984 } 985 986 PUSHQ(esp, env->segs[R_SS].selector); 987 PUSHQ(esp, env->regs[R_ESP]); 988 PUSHQ(esp, eflags); 989 PUSHQ(esp, env->segs[R_CS].selector); 990 PUSHQ(esp, old_eip); 991 if (has_error_code) { 992 PUSHQ(esp, error_code); 993 } 994 995 /* interrupt gate clear IF mask */ 996 if ((type & 1) == 0) { 997 env->eflags &= ~IF_MASK; 998 } 999 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1000 1001 if (new_stack) { 1002 ss = 0 | dpl; 1003 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1004 } 1005 env->regs[R_ESP] = esp; 1006 1007 selector = (selector & ~3) | dpl; 1008 cpu_x86_load_seg_cache(env, R_CS, selector, 1009 get_seg_base(e1, e2), 1010 get_seg_limit(e1, e2), 1011 e2); 1012 env->eip = offset; 1013 } 1014 #endif /* TARGET_X86_64 */ 1015 1016 void helper_sysret(CPUX86State *env, int dflag) 1017 { 1018 int cpl, selector; 1019 1020 if (!(env->efer & MSR_EFER_SCE)) { 1021 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1022 } 1023 cpl = env->hflags & HF_CPL_MASK; 1024 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1025 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1026 } 1027 selector = (env->star >> 48) & 0xffff; 1028 #ifdef TARGET_X86_64 1029 if (env->hflags & HF_LMA_MASK) { 1030 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1031 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1032 NT_MASK); 1033 if (dflag == 2) { 1034 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1035 0, 0xffffffff, 1036 DESC_G_MASK | DESC_P_MASK | 1037 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1038 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1039 DESC_L_MASK); 1040 env->eip = env->regs[R_ECX]; 1041 } else { 1042 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1043 0, 0xffffffff, 1044 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1045 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1046 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1047 env->eip = (uint32_t)env->regs[R_ECX]; 1048 } 1049 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1050 0, 0xffffffff, 1051 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1052 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1053 DESC_W_MASK | DESC_A_MASK); 1054 } else 1055 #endif 1056 { 1057 env->eflags |= IF_MASK; 1058 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1059 0, 0xffffffff, 1060 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1062 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1063 env->eip = (uint32_t)env->regs[R_ECX]; 1064 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1065 0, 0xffffffff, 1066 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1067 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1068 DESC_W_MASK | DESC_A_MASK); 1069 } 1070 } 1071 1072 /* real mode interrupt */ 1073 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1074 int error_code, unsigned int next_eip) 1075 { 1076 SegmentCache *dt; 1077 target_ulong ptr, ssp; 1078 int selector; 1079 uint32_t offset, esp; 1080 uint32_t old_cs, old_eip; 1081 1082 /* real mode (simpler!) */ 1083 dt = &env->idt; 1084 if (intno * 4 + 3 > dt->limit) { 1085 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1086 } 1087 ptr = dt->base + intno * 4; 1088 offset = cpu_lduw_kernel(env, ptr); 1089 selector = cpu_lduw_kernel(env, ptr + 2); 1090 esp = env->regs[R_ESP]; 1091 ssp = env->segs[R_SS].base; 1092 if (is_int) { 1093 old_eip = next_eip; 1094 } else { 1095 old_eip = env->eip; 1096 } 1097 old_cs = env->segs[R_CS].selector; 1098 /* XXX: use SS segment size? */ 1099 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1100 PUSHW(ssp, esp, 0xffff, old_cs); 1101 PUSHW(ssp, esp, 0xffff, old_eip); 1102 1103 /* update processor state */ 1104 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1105 env->eip = offset; 1106 env->segs[R_CS].selector = selector; 1107 env->segs[R_CS].base = (selector << 4); 1108 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1109 } 1110 1111 /* 1112 * Begin execution of an interruption. is_int is TRUE if coming from 1113 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1114 * instruction. It is only relevant if is_int is TRUE. 1115 */ 1116 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1117 int error_code, target_ulong next_eip, int is_hw) 1118 { 1119 CPUX86State *env = &cpu->env; 1120 1121 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1122 if ((env->cr[0] & CR0_PE_MASK)) { 1123 static int count; 1124 1125 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1126 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1127 count, intno, error_code, is_int, 1128 env->hflags & HF_CPL_MASK, 1129 env->segs[R_CS].selector, env->eip, 1130 (int)env->segs[R_CS].base + env->eip, 1131 env->segs[R_SS].selector, env->regs[R_ESP]); 1132 if (intno == 0x0e) { 1133 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1134 } else { 1135 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1136 } 1137 qemu_log("\n"); 1138 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1139 #if 0 1140 { 1141 int i; 1142 target_ulong ptr; 1143 1144 qemu_log(" code="); 1145 ptr = env->segs[R_CS].base + env->eip; 1146 for (i = 0; i < 16; i++) { 1147 qemu_log(" %02x", ldub(ptr + i)); 1148 } 1149 qemu_log("\n"); 1150 } 1151 #endif 1152 count++; 1153 } 1154 } 1155 if (env->cr[0] & CR0_PE_MASK) { 1156 #if !defined(CONFIG_USER_ONLY) 1157 if (env->hflags & HF_GUEST_MASK) { 1158 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1159 } 1160 #endif 1161 #ifdef TARGET_X86_64 1162 if (env->hflags & HF_LMA_MASK) { 1163 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1164 } else 1165 #endif 1166 { 1167 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1168 is_hw); 1169 } 1170 } else { 1171 #if !defined(CONFIG_USER_ONLY) 1172 if (env->hflags & HF_GUEST_MASK) { 1173 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1174 } 1175 #endif 1176 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1177 } 1178 1179 #if !defined(CONFIG_USER_ONLY) 1180 if (env->hflags & HF_GUEST_MASK) { 1181 CPUState *cs = CPU(cpu); 1182 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1183 offsetof(struct vmcb, 1184 control.event_inj)); 1185 1186 x86_stl_phys(cs, 1187 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1188 event_inj & ~SVM_EVTINJ_VALID); 1189 } 1190 #endif 1191 } 1192 1193 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1194 { 1195 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1196 } 1197 1198 void helper_lldt(CPUX86State *env, int selector) 1199 { 1200 SegmentCache *dt; 1201 uint32_t e1, e2; 1202 int index, entry_limit; 1203 target_ulong ptr; 1204 1205 selector &= 0xffff; 1206 if ((selector & 0xfffc) == 0) { 1207 /* XXX: NULL selector case: invalid LDT */ 1208 env->ldt.base = 0; 1209 env->ldt.limit = 0; 1210 } else { 1211 if (selector & 0x4) { 1212 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1213 } 1214 dt = &env->gdt; 1215 index = selector & ~7; 1216 #ifdef TARGET_X86_64 1217 if (env->hflags & HF_LMA_MASK) { 1218 entry_limit = 15; 1219 } else 1220 #endif 1221 { 1222 entry_limit = 7; 1223 } 1224 if ((index + entry_limit) > dt->limit) { 1225 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1226 } 1227 ptr = dt->base + index; 1228 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1229 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1230 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1231 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1232 } 1233 if (!(e2 & DESC_P_MASK)) { 1234 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1235 } 1236 #ifdef TARGET_X86_64 1237 if (env->hflags & HF_LMA_MASK) { 1238 uint32_t e3; 1239 1240 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1241 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1242 env->ldt.base |= (target_ulong)e3 << 32; 1243 } else 1244 #endif 1245 { 1246 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1247 } 1248 } 1249 env->ldt.selector = selector; 1250 } 1251 1252 void helper_ltr(CPUX86State *env, int selector) 1253 { 1254 SegmentCache *dt; 1255 uint32_t e1, e2; 1256 int index, type, entry_limit; 1257 target_ulong ptr; 1258 1259 selector &= 0xffff; 1260 if ((selector & 0xfffc) == 0) { 1261 /* NULL selector case: invalid TR */ 1262 env->tr.base = 0; 1263 env->tr.limit = 0; 1264 env->tr.flags = 0; 1265 } else { 1266 if (selector & 0x4) { 1267 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1268 } 1269 dt = &env->gdt; 1270 index = selector & ~7; 1271 #ifdef TARGET_X86_64 1272 if (env->hflags & HF_LMA_MASK) { 1273 entry_limit = 15; 1274 } else 1275 #endif 1276 { 1277 entry_limit = 7; 1278 } 1279 if ((index + entry_limit) > dt->limit) { 1280 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1281 } 1282 ptr = dt->base + index; 1283 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1284 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1285 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1286 if ((e2 & DESC_S_MASK) || 1287 (type != 1 && type != 9)) { 1288 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1289 } 1290 if (!(e2 & DESC_P_MASK)) { 1291 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1292 } 1293 #ifdef TARGET_X86_64 1294 if (env->hflags & HF_LMA_MASK) { 1295 uint32_t e3, e4; 1296 1297 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1298 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1299 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1300 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1301 } 1302 load_seg_cache_raw_dt(&env->tr, e1, e2); 1303 env->tr.base |= (target_ulong)e3 << 32; 1304 } else 1305 #endif 1306 { 1307 load_seg_cache_raw_dt(&env->tr, e1, e2); 1308 } 1309 e2 |= DESC_TSS_BUSY_MASK; 1310 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1311 } 1312 env->tr.selector = selector; 1313 } 1314 1315 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1316 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1317 { 1318 uint32_t e1, e2; 1319 int cpl, dpl, rpl; 1320 SegmentCache *dt; 1321 int index; 1322 target_ulong ptr; 1323 1324 selector &= 0xffff; 1325 cpl = env->hflags & HF_CPL_MASK; 1326 if ((selector & 0xfffc) == 0) { 1327 /* null selector case */ 1328 if (seg_reg == R_SS 1329 #ifdef TARGET_X86_64 1330 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1331 #endif 1332 ) { 1333 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1334 } 1335 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1336 } else { 1337 1338 if (selector & 0x4) { 1339 dt = &env->ldt; 1340 } else { 1341 dt = &env->gdt; 1342 } 1343 index = selector & ~7; 1344 if ((index + 7) > dt->limit) { 1345 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1346 } 1347 ptr = dt->base + index; 1348 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1349 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1350 1351 if (!(e2 & DESC_S_MASK)) { 1352 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1353 } 1354 rpl = selector & 3; 1355 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1356 if (seg_reg == R_SS) { 1357 /* must be writable segment */ 1358 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1359 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1360 } 1361 if (rpl != cpl || dpl != cpl) { 1362 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1363 } 1364 } else { 1365 /* must be readable segment */ 1366 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1367 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1368 } 1369 1370 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1371 /* if not conforming code, test rights */ 1372 if (dpl < cpl || dpl < rpl) { 1373 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1374 } 1375 } 1376 } 1377 1378 if (!(e2 & DESC_P_MASK)) { 1379 if (seg_reg == R_SS) { 1380 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1381 } else { 1382 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1383 } 1384 } 1385 1386 /* set the access bit if not already set */ 1387 if (!(e2 & DESC_A_MASK)) { 1388 e2 |= DESC_A_MASK; 1389 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1390 } 1391 1392 cpu_x86_load_seg_cache(env, seg_reg, selector, 1393 get_seg_base(e1, e2), 1394 get_seg_limit(e1, e2), 1395 e2); 1396 #if 0 1397 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1398 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1399 #endif 1400 } 1401 } 1402 1403 /* protected mode jump */ 1404 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1405 target_ulong next_eip) 1406 { 1407 int gate_cs, type; 1408 uint32_t e1, e2, cpl, dpl, rpl, limit; 1409 1410 if ((new_cs & 0xfffc) == 0) { 1411 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1412 } 1413 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1414 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1415 } 1416 cpl = env->hflags & HF_CPL_MASK; 1417 if (e2 & DESC_S_MASK) { 1418 if (!(e2 & DESC_CS_MASK)) { 1419 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1420 } 1421 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1422 if (e2 & DESC_C_MASK) { 1423 /* conforming code segment */ 1424 if (dpl > cpl) { 1425 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1426 } 1427 } else { 1428 /* non conforming code segment */ 1429 rpl = new_cs & 3; 1430 if (rpl > cpl) { 1431 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1432 } 1433 if (dpl != cpl) { 1434 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1435 } 1436 } 1437 if (!(e2 & DESC_P_MASK)) { 1438 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1439 } 1440 limit = get_seg_limit(e1, e2); 1441 if (new_eip > limit && 1442 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1443 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1444 } 1445 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1446 get_seg_base(e1, e2), limit, e2); 1447 env->eip = new_eip; 1448 } else { 1449 /* jump to call or task gate */ 1450 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1451 rpl = new_cs & 3; 1452 cpl = env->hflags & HF_CPL_MASK; 1453 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1454 1455 #ifdef TARGET_X86_64 1456 if (env->efer & MSR_EFER_LMA) { 1457 if (type != 12) { 1458 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1459 } 1460 } 1461 #endif 1462 switch (type) { 1463 case 1: /* 286 TSS */ 1464 case 9: /* 386 TSS */ 1465 case 5: /* task gate */ 1466 if (dpl < cpl || dpl < rpl) { 1467 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1468 } 1469 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1470 break; 1471 case 4: /* 286 call gate */ 1472 case 12: /* 386 call gate */ 1473 if ((dpl < cpl) || (dpl < rpl)) { 1474 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1475 } 1476 if (!(e2 & DESC_P_MASK)) { 1477 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1478 } 1479 gate_cs = e1 >> 16; 1480 new_eip = (e1 & 0xffff); 1481 if (type == 12) { 1482 new_eip |= (e2 & 0xffff0000); 1483 } 1484 1485 #ifdef TARGET_X86_64 1486 if (env->efer & MSR_EFER_LMA) { 1487 /* load the upper 8 bytes of the 64-bit call gate */ 1488 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1489 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1490 GETPC()); 1491 } 1492 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1493 if (type != 0) { 1494 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1495 GETPC()); 1496 } 1497 new_eip |= ((target_ulong)e1) << 32; 1498 } 1499 #endif 1500 1501 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1502 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1503 } 1504 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1505 /* must be code segment */ 1506 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1507 (DESC_S_MASK | DESC_CS_MASK))) { 1508 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1509 } 1510 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1511 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1512 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1513 } 1514 #ifdef TARGET_X86_64 1515 if (env->efer & MSR_EFER_LMA) { 1516 if (!(e2 & DESC_L_MASK)) { 1517 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1518 } 1519 if (e2 & DESC_B_MASK) { 1520 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1521 } 1522 } 1523 #endif 1524 if (!(e2 & DESC_P_MASK)) { 1525 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1526 } 1527 limit = get_seg_limit(e1, e2); 1528 if (new_eip > limit && 1529 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1530 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1531 } 1532 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1533 get_seg_base(e1, e2), limit, e2); 1534 env->eip = new_eip; 1535 break; 1536 default: 1537 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1538 break; 1539 } 1540 } 1541 } 1542 1543 /* real mode call */ 1544 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1545 int shift, uint32_t next_eip) 1546 { 1547 uint32_t esp, esp_mask; 1548 target_ulong ssp; 1549 1550 esp = env->regs[R_ESP]; 1551 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1552 ssp = env->segs[R_SS].base; 1553 if (shift) { 1554 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1555 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1556 } else { 1557 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1558 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1559 } 1560 1561 SET_ESP(esp, esp_mask); 1562 env->eip = new_eip; 1563 env->segs[R_CS].selector = new_cs; 1564 env->segs[R_CS].base = (new_cs << 4); 1565 } 1566 1567 /* protected mode call */ 1568 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1569 int shift, target_ulong next_eip) 1570 { 1571 int new_stack, i; 1572 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1573 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1574 uint32_t val, limit, old_sp_mask; 1575 target_ulong ssp, old_ssp, offset, sp; 1576 1577 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1578 LOG_PCALL_STATE(env_cpu(env)); 1579 if ((new_cs & 0xfffc) == 0) { 1580 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1581 } 1582 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1583 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1584 } 1585 cpl = env->hflags & HF_CPL_MASK; 1586 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1587 if (e2 & DESC_S_MASK) { 1588 if (!(e2 & DESC_CS_MASK)) { 1589 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1590 } 1591 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1592 if (e2 & DESC_C_MASK) { 1593 /* conforming code segment */ 1594 if (dpl > cpl) { 1595 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1596 } 1597 } else { 1598 /* non conforming code segment */ 1599 rpl = new_cs & 3; 1600 if (rpl > cpl) { 1601 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1602 } 1603 if (dpl != cpl) { 1604 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1605 } 1606 } 1607 if (!(e2 & DESC_P_MASK)) { 1608 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1609 } 1610 1611 #ifdef TARGET_X86_64 1612 /* XXX: check 16/32 bit cases in long mode */ 1613 if (shift == 2) { 1614 target_ulong rsp; 1615 1616 /* 64 bit case */ 1617 rsp = env->regs[R_ESP]; 1618 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1619 PUSHQ_RA(rsp, next_eip, GETPC()); 1620 /* from this point, not restartable */ 1621 env->regs[R_ESP] = rsp; 1622 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1623 get_seg_base(e1, e2), 1624 get_seg_limit(e1, e2), e2); 1625 env->eip = new_eip; 1626 } else 1627 #endif 1628 { 1629 sp = env->regs[R_ESP]; 1630 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1631 ssp = env->segs[R_SS].base; 1632 if (shift) { 1633 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1634 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1635 } else { 1636 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1637 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1638 } 1639 1640 limit = get_seg_limit(e1, e2); 1641 if (new_eip > limit) { 1642 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1643 } 1644 /* from this point, not restartable */ 1645 SET_ESP(sp, sp_mask); 1646 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1647 get_seg_base(e1, e2), limit, e2); 1648 env->eip = new_eip; 1649 } 1650 } else { 1651 /* check gate type */ 1652 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1653 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1654 rpl = new_cs & 3; 1655 1656 #ifdef TARGET_X86_64 1657 if (env->efer & MSR_EFER_LMA) { 1658 if (type != 12) { 1659 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1660 } 1661 } 1662 #endif 1663 1664 switch (type) { 1665 case 1: /* available 286 TSS */ 1666 case 9: /* available 386 TSS */ 1667 case 5: /* task gate */ 1668 if (dpl < cpl || dpl < rpl) { 1669 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1670 } 1671 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1672 return; 1673 case 4: /* 286 call gate */ 1674 case 12: /* 386 call gate */ 1675 break; 1676 default: 1677 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1678 break; 1679 } 1680 shift = type >> 3; 1681 1682 if (dpl < cpl || dpl < rpl) { 1683 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1684 } 1685 /* check valid bit */ 1686 if (!(e2 & DESC_P_MASK)) { 1687 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1688 } 1689 selector = e1 >> 16; 1690 param_count = e2 & 0x1f; 1691 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1692 #ifdef TARGET_X86_64 1693 if (env->efer & MSR_EFER_LMA) { 1694 /* load the upper 8 bytes of the 64-bit call gate */ 1695 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1696 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1697 GETPC()); 1698 } 1699 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1700 if (type != 0) { 1701 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1702 GETPC()); 1703 } 1704 offset |= ((target_ulong)e1) << 32; 1705 } 1706 #endif 1707 if ((selector & 0xfffc) == 0) { 1708 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1709 } 1710 1711 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1712 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1713 } 1714 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1715 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1716 } 1717 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1718 if (dpl > cpl) { 1719 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1720 } 1721 #ifdef TARGET_X86_64 1722 if (env->efer & MSR_EFER_LMA) { 1723 if (!(e2 & DESC_L_MASK)) { 1724 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1725 } 1726 if (e2 & DESC_B_MASK) { 1727 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1728 } 1729 shift++; 1730 } 1731 #endif 1732 if (!(e2 & DESC_P_MASK)) { 1733 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1734 } 1735 1736 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1737 /* to inner privilege */ 1738 #ifdef TARGET_X86_64 1739 if (shift == 2) { 1740 sp = get_rsp_from_tss(env, dpl); 1741 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1742 new_stack = 1; 1743 sp_mask = 0; 1744 ssp = 0; /* SS base is always zero in IA-32e mode */ 1745 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1746 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1747 } else 1748 #endif 1749 { 1750 uint32_t sp32; 1751 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1752 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1753 TARGET_FMT_lx "\n", ss, sp32, param_count, 1754 env->regs[R_ESP]); 1755 sp = sp32; 1756 if ((ss & 0xfffc) == 0) { 1757 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1758 } 1759 if ((ss & 3) != dpl) { 1760 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1761 } 1762 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1763 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1764 } 1765 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1766 if (ss_dpl != dpl) { 1767 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1768 } 1769 if (!(ss_e2 & DESC_S_MASK) || 1770 (ss_e2 & DESC_CS_MASK) || 1771 !(ss_e2 & DESC_W_MASK)) { 1772 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1773 } 1774 if (!(ss_e2 & DESC_P_MASK)) { 1775 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1776 } 1777 1778 sp_mask = get_sp_mask(ss_e2); 1779 ssp = get_seg_base(ss_e1, ss_e2); 1780 } 1781 1782 /* push_size = ((param_count * 2) + 8) << shift; */ 1783 1784 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1785 old_ssp = env->segs[R_SS].base; 1786 #ifdef TARGET_X86_64 1787 if (shift == 2) { 1788 /* XXX: verify if new stack address is canonical */ 1789 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1790 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1791 /* parameters aren't supported for 64-bit call gates */ 1792 } else 1793 #endif 1794 if (shift == 1) { 1795 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1796 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1797 for (i = param_count - 1; i >= 0; i--) { 1798 val = cpu_ldl_data_ra(env, 1799 old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 1800 GETPC()); 1801 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1802 } 1803 } else { 1804 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1805 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1806 for (i = param_count - 1; i >= 0; i--) { 1807 val = cpu_lduw_data_ra(env, 1808 old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 1809 GETPC()); 1810 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1811 } 1812 } 1813 new_stack = 1; 1814 } else { 1815 /* to same privilege */ 1816 sp = env->regs[R_ESP]; 1817 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1818 ssp = env->segs[R_SS].base; 1819 /* push_size = (4 << shift); */ 1820 new_stack = 0; 1821 } 1822 1823 #ifdef TARGET_X86_64 1824 if (shift == 2) { 1825 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1826 PUSHQ_RA(sp, next_eip, GETPC()); 1827 } else 1828 #endif 1829 if (shift == 1) { 1830 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1831 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1832 } else { 1833 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1834 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1835 } 1836 1837 /* from this point, not restartable */ 1838 1839 if (new_stack) { 1840 #ifdef TARGET_X86_64 1841 if (shift == 2) { 1842 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1843 } else 1844 #endif 1845 { 1846 ss = (ss & ~3) | dpl; 1847 cpu_x86_load_seg_cache(env, R_SS, ss, 1848 ssp, 1849 get_seg_limit(ss_e1, ss_e2), 1850 ss_e2); 1851 } 1852 } 1853 1854 selector = (selector & ~3) | dpl; 1855 cpu_x86_load_seg_cache(env, R_CS, selector, 1856 get_seg_base(e1, e2), 1857 get_seg_limit(e1, e2), 1858 e2); 1859 SET_ESP(sp, sp_mask); 1860 env->eip = offset; 1861 } 1862 } 1863 1864 /* real and vm86 mode iret */ 1865 void helper_iret_real(CPUX86State *env, int shift) 1866 { 1867 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1868 target_ulong ssp; 1869 int eflags_mask; 1870 1871 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1872 sp = env->regs[R_ESP]; 1873 ssp = env->segs[R_SS].base; 1874 if (shift == 1) { 1875 /* 32 bits */ 1876 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1877 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1878 new_cs &= 0xffff; 1879 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1880 } else { 1881 /* 16 bits */ 1882 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1883 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1884 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1885 } 1886 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1887 env->segs[R_CS].selector = new_cs; 1888 env->segs[R_CS].base = (new_cs << 4); 1889 env->eip = new_eip; 1890 if (env->eflags & VM_MASK) { 1891 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1892 NT_MASK; 1893 } else { 1894 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1895 RF_MASK | NT_MASK; 1896 } 1897 if (shift == 0) { 1898 eflags_mask &= 0xffff; 1899 } 1900 cpu_load_eflags(env, new_eflags, eflags_mask); 1901 env->hflags2 &= ~HF2_NMI_MASK; 1902 } 1903 1904 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1905 { 1906 int dpl; 1907 uint32_t e2; 1908 1909 /* XXX: on x86_64, we do not want to nullify FS and GS because 1910 they may still contain a valid base. I would be interested to 1911 know how a real x86_64 CPU behaves */ 1912 if ((seg_reg == R_FS || seg_reg == R_GS) && 1913 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1914 return; 1915 } 1916 1917 e2 = env->segs[seg_reg].flags; 1918 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1919 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1920 /* data or non conforming code segment */ 1921 if (dpl < cpl) { 1922 cpu_x86_load_seg_cache(env, seg_reg, 0, 1923 env->segs[seg_reg].base, 1924 env->segs[seg_reg].limit, 1925 env->segs[seg_reg].flags & ~DESC_P_MASK); 1926 } 1927 } 1928 } 1929 1930 /* protected mode iret */ 1931 static inline void helper_ret_protected(CPUX86State *env, int shift, 1932 int is_iret, int addend, 1933 uintptr_t retaddr) 1934 { 1935 uint32_t new_cs, new_eflags, new_ss; 1936 uint32_t new_es, new_ds, new_fs, new_gs; 1937 uint32_t e1, e2, ss_e1, ss_e2; 1938 int cpl, dpl, rpl, eflags_mask, iopl; 1939 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1940 1941 #ifdef TARGET_X86_64 1942 if (shift == 2) { 1943 sp_mask = -1; 1944 } else 1945 #endif 1946 { 1947 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1948 } 1949 sp = env->regs[R_ESP]; 1950 ssp = env->segs[R_SS].base; 1951 new_eflags = 0; /* avoid warning */ 1952 #ifdef TARGET_X86_64 1953 if (shift == 2) { 1954 POPQ_RA(sp, new_eip, retaddr); 1955 POPQ_RA(sp, new_cs, retaddr); 1956 new_cs &= 0xffff; 1957 if (is_iret) { 1958 POPQ_RA(sp, new_eflags, retaddr); 1959 } 1960 } else 1961 #endif 1962 { 1963 if (shift == 1) { 1964 /* 32 bits */ 1965 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1966 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1967 new_cs &= 0xffff; 1968 if (is_iret) { 1969 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1970 if (new_eflags & VM_MASK) { 1971 goto return_to_vm86; 1972 } 1973 } 1974 } else { 1975 /* 16 bits */ 1976 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1977 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1978 if (is_iret) { 1979 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1980 } 1981 } 1982 } 1983 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1984 new_cs, new_eip, shift, addend); 1985 LOG_PCALL_STATE(env_cpu(env)); 1986 if ((new_cs & 0xfffc) == 0) { 1987 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1988 } 1989 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1990 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1991 } 1992 if (!(e2 & DESC_S_MASK) || 1993 !(e2 & DESC_CS_MASK)) { 1994 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1995 } 1996 cpl = env->hflags & HF_CPL_MASK; 1997 rpl = new_cs & 3; 1998 if (rpl < cpl) { 1999 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2000 } 2001 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2002 if (e2 & DESC_C_MASK) { 2003 if (dpl > rpl) { 2004 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2005 } 2006 } else { 2007 if (dpl != rpl) { 2008 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2009 } 2010 } 2011 if (!(e2 & DESC_P_MASK)) { 2012 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 2013 } 2014 2015 sp += addend; 2016 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2017 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2018 /* return to same privilege level */ 2019 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2020 get_seg_base(e1, e2), 2021 get_seg_limit(e1, e2), 2022 e2); 2023 } else { 2024 /* return to different privilege level */ 2025 #ifdef TARGET_X86_64 2026 if (shift == 2) { 2027 POPQ_RA(sp, new_esp, retaddr); 2028 POPQ_RA(sp, new_ss, retaddr); 2029 new_ss &= 0xffff; 2030 } else 2031 #endif 2032 { 2033 if (shift == 1) { 2034 /* 32 bits */ 2035 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2036 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2037 new_ss &= 0xffff; 2038 } else { 2039 /* 16 bits */ 2040 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2041 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2042 } 2043 } 2044 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2045 new_ss, new_esp); 2046 if ((new_ss & 0xfffc) == 0) { 2047 #ifdef TARGET_X86_64 2048 /* NULL ss is allowed in long mode if cpl != 3 */ 2049 /* XXX: test CS64? */ 2050 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2051 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2052 0, 0xffffffff, 2053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2054 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2055 DESC_W_MASK | DESC_A_MASK); 2056 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2057 } else 2058 #endif 2059 { 2060 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2061 } 2062 } else { 2063 if ((new_ss & 3) != rpl) { 2064 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2065 } 2066 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2067 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2068 } 2069 if (!(ss_e2 & DESC_S_MASK) || 2070 (ss_e2 & DESC_CS_MASK) || 2071 !(ss_e2 & DESC_W_MASK)) { 2072 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2073 } 2074 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2075 if (dpl != rpl) { 2076 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2077 } 2078 if (!(ss_e2 & DESC_P_MASK)) { 2079 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2080 } 2081 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2082 get_seg_base(ss_e1, ss_e2), 2083 get_seg_limit(ss_e1, ss_e2), 2084 ss_e2); 2085 } 2086 2087 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2088 get_seg_base(e1, e2), 2089 get_seg_limit(e1, e2), 2090 e2); 2091 sp = new_esp; 2092 #ifdef TARGET_X86_64 2093 if (env->hflags & HF_CS64_MASK) { 2094 sp_mask = -1; 2095 } else 2096 #endif 2097 { 2098 sp_mask = get_sp_mask(ss_e2); 2099 } 2100 2101 /* validate data segments */ 2102 validate_seg(env, R_ES, rpl); 2103 validate_seg(env, R_DS, rpl); 2104 validate_seg(env, R_FS, rpl); 2105 validate_seg(env, R_GS, rpl); 2106 2107 sp += addend; 2108 } 2109 SET_ESP(sp, sp_mask); 2110 env->eip = new_eip; 2111 if (is_iret) { 2112 /* NOTE: 'cpl' is the _old_ CPL */ 2113 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2114 if (cpl == 0) { 2115 eflags_mask |= IOPL_MASK; 2116 } 2117 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2118 if (cpl <= iopl) { 2119 eflags_mask |= IF_MASK; 2120 } 2121 if (shift == 0) { 2122 eflags_mask &= 0xffff; 2123 } 2124 cpu_load_eflags(env, new_eflags, eflags_mask); 2125 } 2126 return; 2127 2128 return_to_vm86: 2129 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2130 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2131 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2132 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2133 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2134 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2135 2136 /* modify processor state */ 2137 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2138 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2139 VIP_MASK); 2140 load_seg_vm(env, R_CS, new_cs & 0xffff); 2141 load_seg_vm(env, R_SS, new_ss & 0xffff); 2142 load_seg_vm(env, R_ES, new_es & 0xffff); 2143 load_seg_vm(env, R_DS, new_ds & 0xffff); 2144 load_seg_vm(env, R_FS, new_fs & 0xffff); 2145 load_seg_vm(env, R_GS, new_gs & 0xffff); 2146 2147 env->eip = new_eip & 0xffff; 2148 env->regs[R_ESP] = new_esp; 2149 } 2150 2151 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2152 { 2153 int tss_selector, type; 2154 uint32_t e1, e2; 2155 2156 /* specific case for TSS */ 2157 if (env->eflags & NT_MASK) { 2158 #ifdef TARGET_X86_64 2159 if (env->hflags & HF_LMA_MASK) { 2160 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2161 } 2162 #endif 2163 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2164 if (tss_selector & 4) { 2165 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2166 } 2167 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2168 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2169 } 2170 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2171 /* NOTE: we check both segment and busy TSS */ 2172 if (type != 3) { 2173 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2174 } 2175 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2176 } else { 2177 helper_ret_protected(env, shift, 1, 0, GETPC()); 2178 } 2179 env->hflags2 &= ~HF2_NMI_MASK; 2180 } 2181 2182 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2183 { 2184 helper_ret_protected(env, shift, 0, addend, GETPC()); 2185 } 2186 2187 void helper_sysenter(CPUX86State *env) 2188 { 2189 if (env->sysenter_cs == 0) { 2190 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2191 } 2192 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2193 2194 #ifdef TARGET_X86_64 2195 if (env->hflags & HF_LMA_MASK) { 2196 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2197 0, 0xffffffff, 2198 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2199 DESC_S_MASK | 2200 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2201 DESC_L_MASK); 2202 } else 2203 #endif 2204 { 2205 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2206 0, 0xffffffff, 2207 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2208 DESC_S_MASK | 2209 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2210 } 2211 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2212 0, 0xffffffff, 2213 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2214 DESC_S_MASK | 2215 DESC_W_MASK | DESC_A_MASK); 2216 env->regs[R_ESP] = env->sysenter_esp; 2217 env->eip = env->sysenter_eip; 2218 } 2219 2220 void helper_sysexit(CPUX86State *env, int dflag) 2221 { 2222 int cpl; 2223 2224 cpl = env->hflags & HF_CPL_MASK; 2225 if (env->sysenter_cs == 0 || cpl != 0) { 2226 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2227 } 2228 #ifdef TARGET_X86_64 2229 if (dflag == 2) { 2230 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2231 3, 0, 0xffffffff, 2232 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2233 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2234 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2235 DESC_L_MASK); 2236 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2237 3, 0, 0xffffffff, 2238 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2239 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2240 DESC_W_MASK | DESC_A_MASK); 2241 } else 2242 #endif 2243 { 2244 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2245 3, 0, 0xffffffff, 2246 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2247 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2248 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2249 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2250 3, 0, 0xffffffff, 2251 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2252 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2253 DESC_W_MASK | DESC_A_MASK); 2254 } 2255 env->regs[R_ESP] = env->regs[R_ECX]; 2256 env->eip = env->regs[R_EDX]; 2257 } 2258 2259 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2260 { 2261 unsigned int limit; 2262 uint32_t e1, e2, selector; 2263 int rpl, dpl, cpl, type; 2264 2265 selector = selector1 & 0xffff; 2266 assert(CC_OP == CC_OP_EFLAGS); 2267 if ((selector & 0xfffc) == 0) { 2268 goto fail; 2269 } 2270 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2271 goto fail; 2272 } 2273 rpl = selector & 3; 2274 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2275 cpl = env->hflags & HF_CPL_MASK; 2276 if (e2 & DESC_S_MASK) { 2277 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2278 /* conforming */ 2279 } else { 2280 if (dpl < cpl || dpl < rpl) { 2281 goto fail; 2282 } 2283 } 2284 } else { 2285 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2286 switch (type) { 2287 case 1: 2288 case 2: 2289 case 3: 2290 case 9: 2291 case 11: 2292 break; 2293 default: 2294 goto fail; 2295 } 2296 if (dpl < cpl || dpl < rpl) { 2297 fail: 2298 CC_SRC &= ~CC_Z; 2299 return 0; 2300 } 2301 } 2302 limit = get_seg_limit(e1, e2); 2303 CC_SRC |= CC_Z; 2304 return limit; 2305 } 2306 2307 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2308 { 2309 uint32_t e1, e2, selector; 2310 int rpl, dpl, cpl, type; 2311 2312 selector = selector1 & 0xffff; 2313 assert(CC_OP == CC_OP_EFLAGS); 2314 if ((selector & 0xfffc) == 0) { 2315 goto fail; 2316 } 2317 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2318 goto fail; 2319 } 2320 rpl = selector & 3; 2321 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2322 cpl = env->hflags & HF_CPL_MASK; 2323 if (e2 & DESC_S_MASK) { 2324 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2325 /* conforming */ 2326 } else { 2327 if (dpl < cpl || dpl < rpl) { 2328 goto fail; 2329 } 2330 } 2331 } else { 2332 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2333 switch (type) { 2334 case 1: 2335 case 2: 2336 case 3: 2337 case 4: 2338 case 5: 2339 case 9: 2340 case 11: 2341 case 12: 2342 break; 2343 default: 2344 goto fail; 2345 } 2346 if (dpl < cpl || dpl < rpl) { 2347 fail: 2348 CC_SRC &= ~CC_Z; 2349 return 0; 2350 } 2351 } 2352 CC_SRC |= CC_Z; 2353 return e2 & 0x00f0ff00; 2354 } 2355 2356 void helper_verr(CPUX86State *env, target_ulong selector1) 2357 { 2358 uint32_t e1, e2, eflags, selector; 2359 int rpl, dpl, cpl; 2360 2361 selector = selector1 & 0xffff; 2362 eflags = cpu_cc_compute_all(env) | CC_Z; 2363 if ((selector & 0xfffc) == 0) { 2364 goto fail; 2365 } 2366 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2367 goto fail; 2368 } 2369 if (!(e2 & DESC_S_MASK)) { 2370 goto fail; 2371 } 2372 rpl = selector & 3; 2373 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2374 cpl = env->hflags & HF_CPL_MASK; 2375 if (e2 & DESC_CS_MASK) { 2376 if (!(e2 & DESC_R_MASK)) { 2377 goto fail; 2378 } 2379 if (!(e2 & DESC_C_MASK)) { 2380 if (dpl < cpl || dpl < rpl) { 2381 goto fail; 2382 } 2383 } 2384 } else { 2385 if (dpl < cpl || dpl < rpl) { 2386 fail: 2387 eflags &= ~CC_Z; 2388 } 2389 } 2390 CC_SRC = eflags; 2391 CC_OP = CC_OP_EFLAGS; 2392 } 2393 2394 void helper_verw(CPUX86State *env, target_ulong selector1) 2395 { 2396 uint32_t e1, e2, eflags, selector; 2397 int rpl, dpl, cpl; 2398 2399 selector = selector1 & 0xffff; 2400 eflags = cpu_cc_compute_all(env) | CC_Z; 2401 if ((selector & 0xfffc) == 0) { 2402 goto fail; 2403 } 2404 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2405 goto fail; 2406 } 2407 if (!(e2 & DESC_S_MASK)) { 2408 goto fail; 2409 } 2410 rpl = selector & 3; 2411 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2412 cpl = env->hflags & HF_CPL_MASK; 2413 if (e2 & DESC_CS_MASK) { 2414 goto fail; 2415 } else { 2416 if (dpl < cpl || dpl < rpl) { 2417 goto fail; 2418 } 2419 if (!(e2 & DESC_W_MASK)) { 2420 fail: 2421 eflags &= ~CC_Z; 2422 } 2423 } 2424 CC_SRC = eflags; 2425 CC_OP = CC_OP_EFLAGS; 2426 } 2427