1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "qemu/log.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/log.h" 28 #include "helper-tcg.h" 29 #include "seg_helper.h" 30 31 int get_pg_mode(CPUX86State *env) 32 { 33 int pg_mode = 0; 34 if (!(env->cr[0] & CR0_PG_MASK)) { 35 return 0; 36 } 37 if (env->cr[0] & CR0_WP_MASK) { 38 pg_mode |= PG_MODE_WP; 39 } 40 if (env->cr[4] & CR4_PAE_MASK) { 41 pg_mode |= PG_MODE_PAE; 42 if (env->efer & MSR_EFER_NXE) { 43 pg_mode |= PG_MODE_NXE; 44 } 45 } 46 if (env->cr[4] & CR4_PSE_MASK) { 47 pg_mode |= PG_MODE_PSE; 48 } 49 if (env->cr[4] & CR4_SMEP_MASK) { 50 pg_mode |= PG_MODE_SMEP; 51 } 52 if (env->hflags & HF_LMA_MASK) { 53 pg_mode |= PG_MODE_LMA; 54 if (env->cr[4] & CR4_PKE_MASK) { 55 pg_mode |= PG_MODE_PKE; 56 } 57 if (env->cr[4] & CR4_PKS_MASK) { 58 pg_mode |= PG_MODE_PKS; 59 } 60 if (env->cr[4] & CR4_LA57_MASK) { 61 pg_mode |= PG_MODE_LA57; 62 } 63 } 64 return pg_mode; 65 } 66 67 /* return non zero if error */ 68 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, 69 uint32_t *e2_ptr, int selector, 70 uintptr_t retaddr) 71 { 72 SegmentCache *dt; 73 int index; 74 target_ulong ptr; 75 76 if (selector & 0x4) { 77 dt = &env->ldt; 78 } else { 79 dt = &env->gdt; 80 } 81 index = selector & ~7; 82 if ((index + 7) > dt->limit) { 83 return -1; 84 } 85 ptr = dt->base + index; 86 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr); 87 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 88 return 0; 89 } 90 91 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, 92 uint32_t *e2_ptr, int selector) 93 { 94 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0); 95 } 96 97 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 98 { 99 unsigned int limit; 100 101 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 102 if (e2 & DESC_G_MASK) { 103 limit = (limit << 12) | 0xfff; 104 } 105 return limit; 106 } 107 108 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 109 { 110 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); 111 } 112 113 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, 114 uint32_t e2) 115 { 116 sc->base = get_seg_base(e1, e2); 117 sc->limit = get_seg_limit(e1, e2); 118 sc->flags = e2; 119 } 120 121 /* init the segment cache in vm86 mode. */ 122 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 123 { 124 selector &= 0xffff; 125 126 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, 127 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 128 DESC_A_MASK | (3 << DESC_DPL_SHIFT)); 129 } 130 131 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, 132 uint32_t *esp_ptr, int dpl, 133 uintptr_t retaddr) 134 { 135 X86CPU *cpu = env_archcpu(env); 136 int type, index, shift; 137 138 #if 0 139 { 140 int i; 141 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 142 for (i = 0; i < env->tr.limit; i++) { 143 printf("%02x ", env->tr.base[i]); 144 if ((i & 7) == 7) { 145 printf("\n"); 146 } 147 } 148 printf("\n"); 149 } 150 #endif 151 152 if (!(env->tr.flags & DESC_P_MASK)) { 153 cpu_abort(CPU(cpu), "invalid tss"); 154 } 155 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 156 if ((type & 7) != 1) { 157 cpu_abort(CPU(cpu), "invalid tss type"); 158 } 159 shift = type >> 3; 160 index = (dpl * 4 + 2) << shift; 161 if (index + (4 << shift) - 1 > env->tr.limit) { 162 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr); 163 } 164 if (shift == 0) { 165 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr); 166 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr); 167 } else { 168 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr); 169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr); 170 } 171 } 172 173 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector, 174 int cpl, uintptr_t retaddr) 175 { 176 uint32_t e1, e2; 177 int rpl, dpl; 178 179 if ((selector & 0xfffc) != 0) { 180 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) { 181 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 182 } 183 if (!(e2 & DESC_S_MASK)) { 184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 185 } 186 rpl = selector & 3; 187 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 188 if (seg_reg == R_CS) { 189 if (!(e2 & DESC_CS_MASK)) { 190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 191 } 192 if (dpl != rpl) { 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 194 } 195 } else if (seg_reg == R_SS) { 196 /* SS must be writable data */ 197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 199 } 200 if (dpl != cpl || dpl != rpl) { 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 202 } 203 } else { 204 /* not readable code */ 205 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 207 } 208 /* if data or non conforming code, checks the rights */ 209 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 210 if (dpl < cpl || dpl < rpl) { 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 212 } 213 } 214 } 215 if (!(e2 & DESC_P_MASK)) { 216 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr); 217 } 218 cpu_x86_load_seg_cache(env, seg_reg, selector, 219 get_seg_base(e1, e2), 220 get_seg_limit(e1, e2), 221 e2); 222 } else { 223 if (seg_reg == R_SS || seg_reg == R_CS) { 224 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr); 225 } 226 } 227 } 228 229 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, 230 uintptr_t retaddr) 231 { 232 target_ulong ptr = env->gdt.base + (tss_selector & ~7); 233 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 234 235 if (value) { 236 e2 |= DESC_TSS_BUSY_MASK; 237 } else { 238 e2 &= ~DESC_TSS_BUSY_MASK; 239 } 240 241 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr); 242 } 243 244 #define SWITCH_TSS_JMP 0 245 #define SWITCH_TSS_IRET 1 246 #define SWITCH_TSS_CALL 2 247 248 /* return 0 if switching to a 16-bit selector */ 249 static int switch_tss_ra(CPUX86State *env, int tss_selector, 250 uint32_t e1, uint32_t e2, int source, 251 uint32_t next_eip, uintptr_t retaddr) 252 { 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 254 target_ulong tss_base; 255 uint32_t new_regs[8], new_segs[6]; 256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; 257 uint32_t old_eflags, eflags_mask; 258 SegmentCache *dt; 259 int index; 260 target_ulong ptr; 261 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 263 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, 264 source); 265 266 /* if task gate, we read the TSS segment and we load it */ 267 if (type == 5) { 268 if (!(e2 & DESC_P_MASK)) { 269 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 270 } 271 tss_selector = e1 >> 16; 272 if (tss_selector & 4) { 273 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 274 } 275 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) { 276 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 277 } 278 if (e2 & DESC_S_MASK) { 279 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 280 } 281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 282 if ((type & 7) != 1) { 283 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr); 284 } 285 } 286 287 if (!(e2 & DESC_P_MASK)) { 288 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr); 289 } 290 291 if (type & 8) { 292 tss_limit_max = 103; 293 } else { 294 tss_limit_max = 43; 295 } 296 tss_limit = get_seg_limit(e1, e2); 297 tss_base = get_seg_base(e1, e2); 298 if ((tss_selector & 4) != 0 || 299 tss_limit < tss_limit_max) { 300 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr); 301 } 302 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 303 if (old_type & 8) { 304 old_tss_limit_max = 103; 305 } else { 306 old_tss_limit_max = 43; 307 } 308 309 /* read all the registers from the new TSS */ 310 if (type & 8) { 311 /* 32 bit */ 312 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr); 313 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr); 314 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr); 315 for (i = 0; i < 8; i++) { 316 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4), 317 retaddr); 318 } 319 for (i = 0; i < 6; i++) { 320 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4), 321 retaddr); 322 } 323 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr); 324 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr); 325 } else { 326 /* 16 bit */ 327 new_cr3 = 0; 328 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr); 329 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr); 330 for (i = 0; i < 8; i++) { 331 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr); 332 } 333 for (i = 0; i < 4; i++) { 334 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2), 335 retaddr); 336 } 337 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr); 338 new_segs[R_FS] = 0; 339 new_segs[R_GS] = 0; 340 new_trap = 0; 341 } 342 /* XXX: avoid a compiler warning, see 343 http://support.amd.com/us/Processor_TechDocs/24593.pdf 344 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ 345 (void)new_trap; 346 347 /* NOTE: we must avoid memory exceptions during the task switch, 348 so we make dummy accesses before */ 349 /* XXX: it can still fail in some cases, so a bigger hack is 350 necessary to valid the TLB after having done the accesses */ 351 352 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr); 353 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr); 354 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr); 355 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr); 356 357 /* clear busy bit (it is restartable) */ 358 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 359 tss_set_busy(env, env->tr.selector, 0, retaddr); 360 } 361 old_eflags = cpu_compute_eflags(env); 362 if (source == SWITCH_TSS_IRET) { 363 old_eflags &= ~NT_MASK; 364 } 365 366 /* save the current state in the old TSS */ 367 if (old_type & 8) { 368 /* 32 bit */ 369 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr); 370 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr); 371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr); 372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr); 373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr); 374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr); 375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr); 376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr); 377 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr); 378 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr); 379 for (i = 0; i < 6; i++) { 380 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4), 381 env->segs[i].selector, retaddr); 382 } 383 } else { 384 /* 16 bit */ 385 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr); 386 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr); 387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr); 388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr); 389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr); 390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr); 391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr); 392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr); 393 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr); 394 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr); 395 for (i = 0; i < 4; i++) { 396 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2), 397 env->segs[i].selector, retaddr); 398 } 399 } 400 401 /* now if an exception occurs, it will occurs in the next task 402 context */ 403 404 if (source == SWITCH_TSS_CALL) { 405 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr); 406 new_eflags |= NT_MASK; 407 } 408 409 /* set busy bit */ 410 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 411 tss_set_busy(env, tss_selector, 1, retaddr); 412 } 413 414 /* set the new CPU state */ 415 /* from this point, any exception which occurs can give problems */ 416 env->cr[0] |= CR0_TS_MASK; 417 env->hflags |= HF_TS_MASK; 418 env->tr.selector = tss_selector; 419 env->tr.base = tss_base; 420 env->tr.limit = tss_limit; 421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 422 423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 424 cpu_x86_update_cr3(env, new_cr3); 425 } 426 427 /* load all registers without an exception, then reload them with 428 possible exception */ 429 env->eip = new_eip; 430 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 432 if (type & 8) { 433 cpu_load_eflags(env, new_eflags, eflags_mask); 434 for (i = 0; i < 8; i++) { 435 env->regs[i] = new_regs[i]; 436 } 437 } else { 438 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff); 439 for (i = 0; i < 8; i++) { 440 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i]; 441 } 442 } 443 if (new_eflags & VM_MASK) { 444 for (i = 0; i < 6; i++) { 445 load_seg_vm(env, i, new_segs[i]); 446 } 447 } else { 448 /* first just selectors as the rest may trigger exceptions */ 449 for (i = 0; i < 6; i++) { 450 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 451 } 452 } 453 454 env->ldt.selector = new_ldt & ~4; 455 env->ldt.base = 0; 456 env->ldt.limit = 0; 457 env->ldt.flags = 0; 458 459 /* load the LDT */ 460 if (new_ldt & 4) { 461 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 462 } 463 464 if ((new_ldt & 0xfffc) != 0) { 465 dt = &env->gdt; 466 index = new_ldt & ~7; 467 if ((index + 7) > dt->limit) { 468 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 469 } 470 ptr = dt->base + index; 471 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr); 472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr); 473 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 475 } 476 if (!(e2 & DESC_P_MASK)) { 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr); 478 } 479 load_seg_cache_raw_dt(&env->ldt, e1, e2); 480 } 481 482 /* load the segments */ 483 if (!(new_eflags & VM_MASK)) { 484 int cpl = new_segs[R_CS] & 3; 485 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr); 486 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr); 487 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr); 488 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr); 489 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr); 490 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr); 491 } 492 493 /* check that env->eip is in the CS segment limits */ 494 if (new_eip > env->segs[R_CS].limit) { 495 /* XXX: different exception if CALL? */ 496 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 497 } 498 499 #ifndef CONFIG_USER_ONLY 500 /* reset local breakpoints */ 501 if (env->dr[7] & DR7_LOCAL_BP_MASK) { 502 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); 503 } 504 #endif 505 return type >> 3; 506 } 507 508 static int switch_tss(CPUX86State *env, int tss_selector, 509 uint32_t e1, uint32_t e2, int source, 510 uint32_t next_eip) 511 { 512 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); 513 } 514 515 static inline unsigned int get_sp_mask(unsigned int e2) 516 { 517 #ifdef TARGET_X86_64 518 if (e2 & DESC_L_MASK) { 519 return 0; 520 } else 521 #endif 522 if (e2 & DESC_B_MASK) { 523 return 0xffffffff; 524 } else { 525 return 0xffff; 526 } 527 } 528 529 static int exception_is_fault(int intno) 530 { 531 switch (intno) { 532 /* 533 * #DB can be both fault- and trap-like, but it never sets RF=1 534 * in the RFLAGS value pushed on the stack. 535 */ 536 case EXCP01_DB: 537 case EXCP03_INT3: 538 case EXCP04_INTO: 539 case EXCP08_DBLE: 540 case EXCP12_MCHK: 541 return 0; 542 } 543 /* Everything else including reserved exception is a fault. */ 544 return 1; 545 } 546 547 int exception_has_error_code(int intno) 548 { 549 switch (intno) { 550 case 8: 551 case 10: 552 case 11: 553 case 12: 554 case 13: 555 case 14: 556 case 17: 557 return 1; 558 } 559 return 0; 560 } 561 562 #ifdef TARGET_X86_64 563 #define SET_ESP(val, sp_mask) \ 564 do { \ 565 if ((sp_mask) == 0xffff) { \ 566 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ 567 ((val) & 0xffff); \ 568 } else if ((sp_mask) == 0xffffffffLL) { \ 569 env->regs[R_ESP] = (uint32_t)(val); \ 570 } else { \ 571 env->regs[R_ESP] = (val); \ 572 } \ 573 } while (0) 574 #else 575 #define SET_ESP(val, sp_mask) \ 576 do { \ 577 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ 578 ((val) & (sp_mask)); \ 579 } while (0) 580 #endif 581 582 /* XXX: add a is_user flag to have proper security support */ 583 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \ 584 { \ 585 sp -= 2; \ 586 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 587 } 588 589 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \ 590 { \ 591 sp -= 4; \ 592 cpu_stl_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \ 593 } 594 595 #define POPW_RA(ssp, sp, sp_mask, val, ra) \ 596 { \ 597 val = cpu_lduw_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 598 sp += 2; \ 599 } 600 601 #define POPL_RA(ssp, sp, sp_mask, val, ra) \ 602 { \ 603 val = (uint32_t)cpu_ldl_data_ra(env, (ssp) + (sp & (sp_mask)), ra); \ 604 sp += 4; \ 605 } 606 607 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0) 608 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0) 609 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0) 610 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0) 611 612 /* protected mode interrupt */ 613 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, 614 int error_code, unsigned int next_eip, 615 int is_hw) 616 { 617 SegmentCache *dt; 618 target_ulong ptr, ssp; 619 int type, dpl, selector, ss_dpl, cpl; 620 int has_error_code, new_stack, shift; 621 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 622 uint32_t old_eip, sp_mask, eflags; 623 int vm86 = env->eflags & VM_MASK; 624 bool set_rf; 625 626 has_error_code = 0; 627 if (!is_int && !is_hw) { 628 has_error_code = exception_has_error_code(intno); 629 } 630 if (is_int) { 631 old_eip = next_eip; 632 set_rf = false; 633 } else { 634 old_eip = env->eip; 635 set_rf = exception_is_fault(intno); 636 } 637 638 dt = &env->idt; 639 if (intno * 8 + 7 > dt->limit) { 640 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 641 } 642 ptr = dt->base + intno * 8; 643 e1 = cpu_ldl_kernel(env, ptr); 644 e2 = cpu_ldl_kernel(env, ptr + 4); 645 /* check gate type */ 646 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 647 switch (type) { 648 case 5: /* task gate */ 649 case 6: /* 286 interrupt gate */ 650 case 7: /* 286 trap gate */ 651 case 14: /* 386 interrupt gate */ 652 case 15: /* 386 trap gate */ 653 break; 654 default: 655 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 656 break; 657 } 658 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 659 cpl = env->hflags & HF_CPL_MASK; 660 /* check privilege if software int */ 661 if (is_int && dpl < cpl) { 662 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 663 } 664 665 if (type == 5) { 666 /* task gate */ 667 /* must do that check here to return the correct error code */ 668 if (!(e2 & DESC_P_MASK)) { 669 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 670 } 671 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 672 if (has_error_code) { 673 uint32_t mask; 674 675 /* push the error code */ 676 if (env->segs[R_SS].flags & DESC_B_MASK) { 677 mask = 0xffffffff; 678 } else { 679 mask = 0xffff; 680 } 681 esp = (env->regs[R_ESP] - (2 << shift)) & mask; 682 ssp = env->segs[R_SS].base + esp; 683 if (shift) { 684 cpu_stl_kernel(env, ssp, error_code); 685 } else { 686 cpu_stw_kernel(env, ssp, error_code); 687 } 688 SET_ESP(esp, mask); 689 } 690 return; 691 } 692 693 /* Otherwise, trap or interrupt gate */ 694 695 /* check valid bit */ 696 if (!(e2 & DESC_P_MASK)) { 697 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 698 } 699 selector = e1 >> 16; 700 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 701 if ((selector & 0xfffc) == 0) { 702 raise_exception_err(env, EXCP0D_GPF, 0); 703 } 704 if (load_segment(env, &e1, &e2, selector) != 0) { 705 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 706 } 707 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 708 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 709 } 710 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 711 if (dpl > cpl) { 712 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 713 } 714 if (!(e2 & DESC_P_MASK)) { 715 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 716 } 717 if (e2 & DESC_C_MASK) { 718 dpl = cpl; 719 } 720 if (dpl < cpl) { 721 /* to inner privilege */ 722 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0); 723 if ((ss & 0xfffc) == 0) { 724 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 725 } 726 if ((ss & 3) != dpl) { 727 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 728 } 729 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { 730 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 731 } 732 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 733 if (ss_dpl != dpl) { 734 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 735 } 736 if (!(ss_e2 & DESC_S_MASK) || 737 (ss_e2 & DESC_CS_MASK) || 738 !(ss_e2 & DESC_W_MASK)) { 739 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 740 } 741 if (!(ss_e2 & DESC_P_MASK)) { 742 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 743 } 744 new_stack = 1; 745 sp_mask = get_sp_mask(ss_e2); 746 ssp = get_seg_base(ss_e1, ss_e2); 747 } else { 748 /* to same privilege */ 749 if (vm86) { 750 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 751 } 752 new_stack = 0; 753 sp_mask = get_sp_mask(env->segs[R_SS].flags); 754 ssp = env->segs[R_SS].base; 755 esp = env->regs[R_ESP]; 756 } 757 758 shift = type >> 3; 759 760 #if 0 761 /* XXX: check that enough room is available */ 762 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 763 if (vm86) { 764 push_size += 8; 765 } 766 push_size <<= shift; 767 #endif 768 eflags = cpu_compute_eflags(env); 769 /* 770 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it 771 * as is. AMD behavior could be implemented in check_hw_breakpoints(). 772 */ 773 if (set_rf) { 774 eflags |= RF_MASK; 775 } 776 777 if (shift == 1) { 778 if (new_stack) { 779 if (vm86) { 780 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 781 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 782 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 783 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 784 } 785 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 786 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); 787 } 788 PUSHL(ssp, esp, sp_mask, eflags); 789 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 790 PUSHL(ssp, esp, sp_mask, old_eip); 791 if (has_error_code) { 792 PUSHL(ssp, esp, sp_mask, error_code); 793 } 794 } else { 795 if (new_stack) { 796 if (vm86) { 797 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 798 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 799 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 800 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 801 } 802 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 803 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); 804 } 805 PUSHW(ssp, esp, sp_mask, eflags); 806 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 807 PUSHW(ssp, esp, sp_mask, old_eip); 808 if (has_error_code) { 809 PUSHW(ssp, esp, sp_mask, error_code); 810 } 811 } 812 813 /* interrupt gate clear IF mask */ 814 if ((type & 1) == 0) { 815 env->eflags &= ~IF_MASK; 816 } 817 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 818 819 if (new_stack) { 820 if (vm86) { 821 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 822 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 823 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 824 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 825 } 826 ss = (ss & ~3) | dpl; 827 cpu_x86_load_seg_cache(env, R_SS, ss, 828 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 829 } 830 SET_ESP(esp, sp_mask); 831 832 selector = (selector & ~3) | dpl; 833 cpu_x86_load_seg_cache(env, R_CS, selector, 834 get_seg_base(e1, e2), 835 get_seg_limit(e1, e2), 836 e2); 837 env->eip = offset; 838 } 839 840 #ifdef TARGET_X86_64 841 842 #define PUSHQ_RA(sp, val, ra) \ 843 { \ 844 sp -= 8; \ 845 cpu_stq_kernel_ra(env, sp, (val), ra); \ 846 } 847 848 #define POPQ_RA(sp, val, ra) \ 849 { \ 850 val = cpu_ldq_data_ra(env, sp, ra); \ 851 sp += 8; \ 852 } 853 854 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0) 855 #define POPQ(sp, val) POPQ_RA(sp, val, 0) 856 857 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 858 { 859 X86CPU *cpu = env_archcpu(env); 860 int index, pg_mode; 861 target_ulong rsp; 862 int32_t sext; 863 864 #if 0 865 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 866 env->tr.base, env->tr.limit); 867 #endif 868 869 if (!(env->tr.flags & DESC_P_MASK)) { 870 cpu_abort(CPU(cpu), "invalid tss"); 871 } 872 index = 8 * level + 4; 873 if ((index + 7) > env->tr.limit) { 874 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 875 } 876 877 rsp = cpu_ldq_kernel(env, env->tr.base + index); 878 879 /* test virtual address sign extension */ 880 pg_mode = get_pg_mode(env); 881 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47); 882 if (sext != 0 && sext != -1) { 883 raise_exception_err(env, EXCP0C_STACK, 0); 884 } 885 886 return rsp; 887 } 888 889 /* 64 bit interrupt */ 890 static void do_interrupt64(CPUX86State *env, int intno, int is_int, 891 int error_code, target_ulong next_eip, int is_hw) 892 { 893 SegmentCache *dt; 894 target_ulong ptr; 895 int type, dpl, selector, cpl, ist; 896 int has_error_code, new_stack; 897 uint32_t e1, e2, e3, ss, eflags; 898 target_ulong old_eip, esp, offset; 899 bool set_rf; 900 901 has_error_code = 0; 902 if (!is_int && !is_hw) { 903 has_error_code = exception_has_error_code(intno); 904 } 905 if (is_int) { 906 old_eip = next_eip; 907 set_rf = false; 908 } else { 909 old_eip = env->eip; 910 set_rf = exception_is_fault(intno); 911 } 912 913 dt = &env->idt; 914 if (intno * 16 + 15 > dt->limit) { 915 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 916 } 917 ptr = dt->base + intno * 16; 918 e1 = cpu_ldl_kernel(env, ptr); 919 e2 = cpu_ldl_kernel(env, ptr + 4); 920 e3 = cpu_ldl_kernel(env, ptr + 8); 921 /* check gate type */ 922 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 923 switch (type) { 924 case 14: /* 386 interrupt gate */ 925 case 15: /* 386 trap gate */ 926 break; 927 default: 928 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 929 break; 930 } 931 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 932 cpl = env->hflags & HF_CPL_MASK; 933 /* check privilege if software int */ 934 if (is_int && dpl < cpl) { 935 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 936 } 937 /* check valid bit */ 938 if (!(e2 & DESC_P_MASK)) { 939 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 940 } 941 selector = e1 >> 16; 942 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 943 ist = e2 & 7; 944 if ((selector & 0xfffc) == 0) { 945 raise_exception_err(env, EXCP0D_GPF, 0); 946 } 947 948 if (load_segment(env, &e1, &e2, selector) != 0) { 949 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 950 } 951 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 952 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 953 } 954 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 955 if (dpl > cpl) { 956 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 957 } 958 if (!(e2 & DESC_P_MASK)) { 959 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 960 } 961 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { 962 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 963 } 964 if (e2 & DESC_C_MASK) { 965 dpl = cpl; 966 } 967 if (dpl < cpl || ist != 0) { 968 /* to inner privilege */ 969 new_stack = 1; 970 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); 971 ss = 0; 972 } else { 973 /* to same privilege */ 974 if (env->eflags & VM_MASK) { 975 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 976 } 977 new_stack = 0; 978 esp = env->regs[R_ESP]; 979 } 980 esp &= ~0xfLL; /* align stack */ 981 982 /* See do_interrupt_protected. */ 983 eflags = cpu_compute_eflags(env); 984 if (set_rf) { 985 eflags |= RF_MASK; 986 } 987 988 PUSHQ(esp, env->segs[R_SS].selector); 989 PUSHQ(esp, env->regs[R_ESP]); 990 PUSHQ(esp, eflags); 991 PUSHQ(esp, env->segs[R_CS].selector); 992 PUSHQ(esp, old_eip); 993 if (has_error_code) { 994 PUSHQ(esp, error_code); 995 } 996 997 /* interrupt gate clear IF mask */ 998 if ((type & 1) == 0) { 999 env->eflags &= ~IF_MASK; 1000 } 1001 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 1002 1003 if (new_stack) { 1004 ss = 0 | dpl; 1005 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT); 1006 } 1007 env->regs[R_ESP] = esp; 1008 1009 selector = (selector & ~3) | dpl; 1010 cpu_x86_load_seg_cache(env, R_CS, selector, 1011 get_seg_base(e1, e2), 1012 get_seg_limit(e1, e2), 1013 e2); 1014 env->eip = offset; 1015 } 1016 #endif /* TARGET_X86_64 */ 1017 1018 void helper_sysret(CPUX86State *env, int dflag) 1019 { 1020 int cpl, selector; 1021 1022 if (!(env->efer & MSR_EFER_SCE)) { 1023 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 1024 } 1025 cpl = env->hflags & HF_CPL_MASK; 1026 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 1027 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1028 } 1029 selector = (env->star >> 48) & 0xffff; 1030 #ifdef TARGET_X86_64 1031 if (env->hflags & HF_LMA_MASK) { 1032 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK 1033 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | 1034 NT_MASK); 1035 if (dflag == 2) { 1036 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 1037 0, 0xffffffff, 1038 DESC_G_MASK | DESC_P_MASK | 1039 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1040 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 1041 DESC_L_MASK); 1042 env->eip = env->regs[R_ECX]; 1043 } else { 1044 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1045 0, 0xffffffff, 1046 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1047 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1048 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1049 env->eip = (uint32_t)env->regs[R_ECX]; 1050 } 1051 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1052 0, 0xffffffff, 1053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1054 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1055 DESC_W_MASK | DESC_A_MASK); 1056 } else 1057 #endif 1058 { 1059 env->eflags |= IF_MASK; 1060 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 1061 0, 0xffffffff, 1062 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1063 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1064 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 1065 env->eip = (uint32_t)env->regs[R_ECX]; 1066 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3, 1067 0, 0xffffffff, 1068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 1070 DESC_W_MASK | DESC_A_MASK); 1071 } 1072 } 1073 1074 /* real mode interrupt */ 1075 static void do_interrupt_real(CPUX86State *env, int intno, int is_int, 1076 int error_code, unsigned int next_eip) 1077 { 1078 SegmentCache *dt; 1079 target_ulong ptr, ssp; 1080 int selector; 1081 uint32_t offset, esp; 1082 uint32_t old_cs, old_eip; 1083 1084 /* real mode (simpler!) */ 1085 dt = &env->idt; 1086 if (intno * 4 + 3 > dt->limit) { 1087 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 1088 } 1089 ptr = dt->base + intno * 4; 1090 offset = cpu_lduw_kernel(env, ptr); 1091 selector = cpu_lduw_kernel(env, ptr + 2); 1092 esp = env->regs[R_ESP]; 1093 ssp = env->segs[R_SS].base; 1094 if (is_int) { 1095 old_eip = next_eip; 1096 } else { 1097 old_eip = env->eip; 1098 } 1099 old_cs = env->segs[R_CS].selector; 1100 /* XXX: use SS segment size? */ 1101 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 1102 PUSHW(ssp, esp, 0xffff, old_cs); 1103 PUSHW(ssp, esp, 0xffff, old_eip); 1104 1105 /* update processor state */ 1106 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); 1107 env->eip = offset; 1108 env->segs[R_CS].selector = selector; 1109 env->segs[R_CS].base = (selector << 4); 1110 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 1111 } 1112 1113 /* 1114 * Begin execution of an interruption. is_int is TRUE if coming from 1115 * the int instruction. next_eip is the env->eip value AFTER the interrupt 1116 * instruction. It is only relevant if is_int is TRUE. 1117 */ 1118 void do_interrupt_all(X86CPU *cpu, int intno, int is_int, 1119 int error_code, target_ulong next_eip, int is_hw) 1120 { 1121 CPUX86State *env = &cpu->env; 1122 1123 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1124 if ((env->cr[0] & CR0_PE_MASK)) { 1125 static int count; 1126 1127 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx 1128 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1129 count, intno, error_code, is_int, 1130 env->hflags & HF_CPL_MASK, 1131 env->segs[R_CS].selector, env->eip, 1132 (int)env->segs[R_CS].base + env->eip, 1133 env->segs[R_SS].selector, env->regs[R_ESP]); 1134 if (intno == 0x0e) { 1135 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1136 } else { 1137 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); 1138 } 1139 qemu_log("\n"); 1140 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); 1141 #if 0 1142 { 1143 int i; 1144 target_ulong ptr; 1145 1146 qemu_log(" code="); 1147 ptr = env->segs[R_CS].base + env->eip; 1148 for (i = 0; i < 16; i++) { 1149 qemu_log(" %02x", ldub(ptr + i)); 1150 } 1151 qemu_log("\n"); 1152 } 1153 #endif 1154 count++; 1155 } 1156 } 1157 if (env->cr[0] & CR0_PE_MASK) { 1158 #if !defined(CONFIG_USER_ONLY) 1159 if (env->hflags & HF_GUEST_MASK) { 1160 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1161 } 1162 #endif 1163 #ifdef TARGET_X86_64 1164 if (env->hflags & HF_LMA_MASK) { 1165 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1166 } else 1167 #endif 1168 { 1169 do_interrupt_protected(env, intno, is_int, error_code, next_eip, 1170 is_hw); 1171 } 1172 } else { 1173 #if !defined(CONFIG_USER_ONLY) 1174 if (env->hflags & HF_GUEST_MASK) { 1175 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1176 } 1177 #endif 1178 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1179 } 1180 1181 #if !defined(CONFIG_USER_ONLY) 1182 if (env->hflags & HF_GUEST_MASK) { 1183 CPUState *cs = CPU(cpu); 1184 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + 1185 offsetof(struct vmcb, 1186 control.event_inj)); 1187 1188 x86_stl_phys(cs, 1189 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 1190 event_inj & ~SVM_EVTINJ_VALID); 1191 } 1192 #endif 1193 } 1194 1195 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1196 { 1197 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1198 } 1199 1200 void helper_lldt(CPUX86State *env, int selector) 1201 { 1202 SegmentCache *dt; 1203 uint32_t e1, e2; 1204 int index, entry_limit; 1205 target_ulong ptr; 1206 1207 selector &= 0xffff; 1208 if ((selector & 0xfffc) == 0) { 1209 /* XXX: NULL selector case: invalid LDT */ 1210 env->ldt.base = 0; 1211 env->ldt.limit = 0; 1212 } else { 1213 if (selector & 0x4) { 1214 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1215 } 1216 dt = &env->gdt; 1217 index = selector & ~7; 1218 #ifdef TARGET_X86_64 1219 if (env->hflags & HF_LMA_MASK) { 1220 entry_limit = 15; 1221 } else 1222 #endif 1223 { 1224 entry_limit = 7; 1225 } 1226 if ((index + entry_limit) > dt->limit) { 1227 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1228 } 1229 ptr = dt->base + index; 1230 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1231 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1232 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { 1233 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1234 } 1235 if (!(e2 & DESC_P_MASK)) { 1236 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1237 } 1238 #ifdef TARGET_X86_64 1239 if (env->hflags & HF_LMA_MASK) { 1240 uint32_t e3; 1241 1242 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1243 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1244 env->ldt.base |= (target_ulong)e3 << 32; 1245 } else 1246 #endif 1247 { 1248 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1249 } 1250 } 1251 env->ldt.selector = selector; 1252 } 1253 1254 void helper_ltr(CPUX86State *env, int selector) 1255 { 1256 SegmentCache *dt; 1257 uint32_t e1, e2; 1258 int index, type, entry_limit; 1259 target_ulong ptr; 1260 1261 selector &= 0xffff; 1262 if ((selector & 0xfffc) == 0) { 1263 /* NULL selector case: invalid TR */ 1264 env->tr.base = 0; 1265 env->tr.limit = 0; 1266 env->tr.flags = 0; 1267 } else { 1268 if (selector & 0x4) { 1269 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1270 } 1271 dt = &env->gdt; 1272 index = selector & ~7; 1273 #ifdef TARGET_X86_64 1274 if (env->hflags & HF_LMA_MASK) { 1275 entry_limit = 15; 1276 } else 1277 #endif 1278 { 1279 entry_limit = 7; 1280 } 1281 if ((index + entry_limit) > dt->limit) { 1282 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1283 } 1284 ptr = dt->base + index; 1285 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1286 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1287 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1288 if ((e2 & DESC_S_MASK) || 1289 (type != 1 && type != 9)) { 1290 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1291 } 1292 if (!(e2 & DESC_P_MASK)) { 1293 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1294 } 1295 #ifdef TARGET_X86_64 1296 if (env->hflags & HF_LMA_MASK) { 1297 uint32_t e3, e4; 1298 1299 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC()); 1300 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC()); 1301 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { 1302 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1303 } 1304 load_seg_cache_raw_dt(&env->tr, e1, e2); 1305 env->tr.base |= (target_ulong)e3 << 32; 1306 } else 1307 #endif 1308 { 1309 load_seg_cache_raw_dt(&env->tr, e1, e2); 1310 } 1311 e2 |= DESC_TSS_BUSY_MASK; 1312 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1313 } 1314 env->tr.selector = selector; 1315 } 1316 1317 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1318 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1319 { 1320 uint32_t e1, e2; 1321 int cpl, dpl, rpl; 1322 SegmentCache *dt; 1323 int index; 1324 target_ulong ptr; 1325 1326 selector &= 0xffff; 1327 cpl = env->hflags & HF_CPL_MASK; 1328 if ((selector & 0xfffc) == 0) { 1329 /* null selector case */ 1330 if (seg_reg == R_SS 1331 #ifdef TARGET_X86_64 1332 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1333 #endif 1334 ) { 1335 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1336 } 1337 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1338 } else { 1339 1340 if (selector & 0x4) { 1341 dt = &env->ldt; 1342 } else { 1343 dt = &env->gdt; 1344 } 1345 index = selector & ~7; 1346 if ((index + 7) > dt->limit) { 1347 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1348 } 1349 ptr = dt->base + index; 1350 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC()); 1351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC()); 1352 1353 if (!(e2 & DESC_S_MASK)) { 1354 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1355 } 1356 rpl = selector & 3; 1357 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1358 if (seg_reg == R_SS) { 1359 /* must be writable segment */ 1360 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { 1361 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1362 } 1363 if (rpl != cpl || dpl != cpl) { 1364 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1365 } 1366 } else { 1367 /* must be readable segment */ 1368 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { 1369 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1370 } 1371 1372 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1373 /* if not conforming code, test rights */ 1374 if (dpl < cpl || dpl < rpl) { 1375 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1376 } 1377 } 1378 } 1379 1380 if (!(e2 & DESC_P_MASK)) { 1381 if (seg_reg == R_SS) { 1382 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC()); 1383 } else { 1384 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1385 } 1386 } 1387 1388 /* set the access bit if not already set */ 1389 if (!(e2 & DESC_A_MASK)) { 1390 e2 |= DESC_A_MASK; 1391 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC()); 1392 } 1393 1394 cpu_x86_load_seg_cache(env, seg_reg, selector, 1395 get_seg_base(e1, e2), 1396 get_seg_limit(e1, e2), 1397 e2); 1398 #if 0 1399 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1400 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1401 #endif 1402 } 1403 } 1404 1405 /* protected mode jump */ 1406 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1407 target_ulong next_eip) 1408 { 1409 int gate_cs, type; 1410 uint32_t e1, e2, cpl, dpl, rpl, limit; 1411 1412 if ((new_cs & 0xfffc) == 0) { 1413 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1414 } 1415 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1416 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1417 } 1418 cpl = env->hflags & HF_CPL_MASK; 1419 if (e2 & DESC_S_MASK) { 1420 if (!(e2 & DESC_CS_MASK)) { 1421 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1422 } 1423 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1424 if (e2 & DESC_C_MASK) { 1425 /* conforming code segment */ 1426 if (dpl > cpl) { 1427 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1428 } 1429 } else { 1430 /* non conforming code segment */ 1431 rpl = new_cs & 3; 1432 if (rpl > cpl) { 1433 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1434 } 1435 if (dpl != cpl) { 1436 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1437 } 1438 } 1439 if (!(e2 & DESC_P_MASK)) { 1440 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1441 } 1442 limit = get_seg_limit(e1, e2); 1443 if (new_eip > limit && 1444 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1445 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1446 } 1447 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1448 get_seg_base(e1, e2), limit, e2); 1449 env->eip = new_eip; 1450 } else { 1451 /* jump to call or task gate */ 1452 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1453 rpl = new_cs & 3; 1454 cpl = env->hflags & HF_CPL_MASK; 1455 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1456 1457 #ifdef TARGET_X86_64 1458 if (env->efer & MSR_EFER_LMA) { 1459 if (type != 12) { 1460 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1461 } 1462 } 1463 #endif 1464 switch (type) { 1465 case 1: /* 286 TSS */ 1466 case 9: /* 386 TSS */ 1467 case 5: /* task gate */ 1468 if (dpl < cpl || dpl < rpl) { 1469 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1470 } 1471 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); 1472 break; 1473 case 4: /* 286 call gate */ 1474 case 12: /* 386 call gate */ 1475 if ((dpl < cpl) || (dpl < rpl)) { 1476 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1477 } 1478 if (!(e2 & DESC_P_MASK)) { 1479 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1480 } 1481 gate_cs = e1 >> 16; 1482 new_eip = (e1 & 0xffff); 1483 if (type == 12) { 1484 new_eip |= (e2 & 0xffff0000); 1485 } 1486 1487 #ifdef TARGET_X86_64 1488 if (env->efer & MSR_EFER_LMA) { 1489 /* load the upper 8 bytes of the 64-bit call gate */ 1490 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1491 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1492 GETPC()); 1493 } 1494 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1495 if (type != 0) { 1496 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1497 GETPC()); 1498 } 1499 new_eip |= ((target_ulong)e1) << 32; 1500 } 1501 #endif 1502 1503 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) { 1504 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1505 } 1506 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1507 /* must be code segment */ 1508 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1509 (DESC_S_MASK | DESC_CS_MASK))) { 1510 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1511 } 1512 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1513 (!(e2 & DESC_C_MASK) && (dpl != cpl))) { 1514 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1515 } 1516 #ifdef TARGET_X86_64 1517 if (env->efer & MSR_EFER_LMA) { 1518 if (!(e2 & DESC_L_MASK)) { 1519 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1520 } 1521 if (e2 & DESC_B_MASK) { 1522 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1523 } 1524 } 1525 #endif 1526 if (!(e2 & DESC_P_MASK)) { 1527 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC()); 1528 } 1529 limit = get_seg_limit(e1, e2); 1530 if (new_eip > limit && 1531 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) { 1532 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1533 } 1534 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1535 get_seg_base(e1, e2), limit, e2); 1536 env->eip = new_eip; 1537 break; 1538 default: 1539 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1540 break; 1541 } 1542 } 1543 } 1544 1545 /* real mode call */ 1546 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, 1547 int shift, uint32_t next_eip) 1548 { 1549 uint32_t esp, esp_mask; 1550 target_ulong ssp; 1551 1552 esp = env->regs[R_ESP]; 1553 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1554 ssp = env->segs[R_SS].base; 1555 if (shift) { 1556 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1557 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1558 } else { 1559 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC()); 1560 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC()); 1561 } 1562 1563 SET_ESP(esp, esp_mask); 1564 env->eip = new_eip; 1565 env->segs[R_CS].selector = new_cs; 1566 env->segs[R_CS].base = (new_cs << 4); 1567 } 1568 1569 /* protected mode call */ 1570 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, 1571 int shift, target_ulong next_eip) 1572 { 1573 int new_stack, i; 1574 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count; 1575 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask; 1576 uint32_t val, limit, old_sp_mask; 1577 target_ulong ssp, old_ssp, offset, sp; 1578 1579 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1580 LOG_PCALL_STATE(env_cpu(env)); 1581 if ((new_cs & 0xfffc) == 0) { 1582 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1583 } 1584 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) { 1585 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1586 } 1587 cpl = env->hflags & HF_CPL_MASK; 1588 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1589 if (e2 & DESC_S_MASK) { 1590 if (!(e2 & DESC_CS_MASK)) { 1591 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1592 } 1593 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1594 if (e2 & DESC_C_MASK) { 1595 /* conforming code segment */ 1596 if (dpl > cpl) { 1597 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1598 } 1599 } else { 1600 /* non conforming code segment */ 1601 rpl = new_cs & 3; 1602 if (rpl > cpl) { 1603 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1604 } 1605 if (dpl != cpl) { 1606 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1607 } 1608 } 1609 if (!(e2 & DESC_P_MASK)) { 1610 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1611 } 1612 1613 #ifdef TARGET_X86_64 1614 /* XXX: check 16/32 bit cases in long mode */ 1615 if (shift == 2) { 1616 target_ulong rsp; 1617 1618 /* 64 bit case */ 1619 rsp = env->regs[R_ESP]; 1620 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC()); 1621 PUSHQ_RA(rsp, next_eip, GETPC()); 1622 /* from this point, not restartable */ 1623 env->regs[R_ESP] = rsp; 1624 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1625 get_seg_base(e1, e2), 1626 get_seg_limit(e1, e2), e2); 1627 env->eip = new_eip; 1628 } else 1629 #endif 1630 { 1631 sp = env->regs[R_ESP]; 1632 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1633 ssp = env->segs[R_SS].base; 1634 if (shift) { 1635 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1636 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1637 } else { 1638 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1639 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1640 } 1641 1642 limit = get_seg_limit(e1, e2); 1643 if (new_eip > limit) { 1644 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1645 } 1646 /* from this point, not restartable */ 1647 SET_ESP(sp, sp_mask); 1648 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1649 get_seg_base(e1, e2), limit, e2); 1650 env->eip = new_eip; 1651 } 1652 } else { 1653 /* check gate type */ 1654 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1655 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1656 rpl = new_cs & 3; 1657 1658 #ifdef TARGET_X86_64 1659 if (env->efer & MSR_EFER_LMA) { 1660 if (type != 12) { 1661 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1662 } 1663 } 1664 #endif 1665 1666 switch (type) { 1667 case 1: /* available 286 TSS */ 1668 case 9: /* available 386 TSS */ 1669 case 5: /* task gate */ 1670 if (dpl < cpl || dpl < rpl) { 1671 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1672 } 1673 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); 1674 return; 1675 case 4: /* 286 call gate */ 1676 case 12: /* 386 call gate */ 1677 break; 1678 default: 1679 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1680 break; 1681 } 1682 shift = type >> 3; 1683 1684 if (dpl < cpl || dpl < rpl) { 1685 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); 1686 } 1687 /* check valid bit */ 1688 if (!(e2 & DESC_P_MASK)) { 1689 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); 1690 } 1691 selector = e1 >> 16; 1692 param_count = e2 & 0x1f; 1693 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1694 #ifdef TARGET_X86_64 1695 if (env->efer & MSR_EFER_LMA) { 1696 /* load the upper 8 bytes of the 64-bit call gate */ 1697 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) { 1698 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1699 GETPC()); 1700 } 1701 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1702 if (type != 0) { 1703 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, 1704 GETPC()); 1705 } 1706 offset |= ((target_ulong)e1) << 32; 1707 } 1708 #endif 1709 if ((selector & 0xfffc) == 0) { 1710 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1711 } 1712 1713 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 1714 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1715 } 1716 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { 1717 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1718 } 1719 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1720 if (dpl > cpl) { 1721 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1722 } 1723 #ifdef TARGET_X86_64 1724 if (env->efer & MSR_EFER_LMA) { 1725 if (!(e2 & DESC_L_MASK)) { 1726 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1727 } 1728 if (e2 & DESC_B_MASK) { 1729 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC()); 1730 } 1731 shift++; 1732 } 1733 #endif 1734 if (!(e2 & DESC_P_MASK)) { 1735 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC()); 1736 } 1737 1738 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1739 /* to inner privilege */ 1740 #ifdef TARGET_X86_64 1741 if (shift == 2) { 1742 sp = get_rsp_from_tss(env, dpl); 1743 ss = dpl; /* SS = NULL selector with RPL = new CPL */ 1744 new_stack = 1; 1745 sp_mask = 0; 1746 ssp = 0; /* SS base is always zero in IA-32e mode */ 1747 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]=" 1748 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]); 1749 } else 1750 #endif 1751 { 1752 uint32_t sp32; 1753 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC()); 1754 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" 1755 TARGET_FMT_lx "\n", ss, sp32, param_count, 1756 env->regs[R_ESP]); 1757 sp = sp32; 1758 if ((ss & 0xfffc) == 0) { 1759 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1760 } 1761 if ((ss & 3) != dpl) { 1762 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1763 } 1764 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) { 1765 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1766 } 1767 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1768 if (ss_dpl != dpl) { 1769 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1770 } 1771 if (!(ss_e2 & DESC_S_MASK) || 1772 (ss_e2 & DESC_CS_MASK) || 1773 !(ss_e2 & DESC_W_MASK)) { 1774 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1775 } 1776 if (!(ss_e2 & DESC_P_MASK)) { 1777 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC()); 1778 } 1779 1780 sp_mask = get_sp_mask(ss_e2); 1781 ssp = get_seg_base(ss_e1, ss_e2); 1782 } 1783 1784 /* push_size = ((param_count * 2) + 8) << shift; */ 1785 1786 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1787 old_ssp = env->segs[R_SS].base; 1788 #ifdef TARGET_X86_64 1789 if (shift == 2) { 1790 /* XXX: verify if new stack address is canonical */ 1791 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC()); 1792 PUSHQ_RA(sp, env->regs[R_ESP], GETPC()); 1793 /* parameters aren't supported for 64-bit call gates */ 1794 } else 1795 #endif 1796 if (shift == 1) { 1797 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1798 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1799 for (i = param_count - 1; i >= 0; i--) { 1800 val = cpu_ldl_data_ra(env, 1801 old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask), 1802 GETPC()); 1803 PUSHL_RA(ssp, sp, sp_mask, val, GETPC()); 1804 } 1805 } else { 1806 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC()); 1807 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC()); 1808 for (i = param_count - 1; i >= 0; i--) { 1809 val = cpu_lduw_data_ra(env, 1810 old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask), 1811 GETPC()); 1812 PUSHW_RA(ssp, sp, sp_mask, val, GETPC()); 1813 } 1814 } 1815 new_stack = 1; 1816 } else { 1817 /* to same privilege */ 1818 sp = env->regs[R_ESP]; 1819 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1820 ssp = env->segs[R_SS].base; 1821 /* push_size = (4 << shift); */ 1822 new_stack = 0; 1823 } 1824 1825 #ifdef TARGET_X86_64 1826 if (shift == 2) { 1827 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC()); 1828 PUSHQ_RA(sp, next_eip, GETPC()); 1829 } else 1830 #endif 1831 if (shift == 1) { 1832 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1833 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1834 } else { 1835 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC()); 1836 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC()); 1837 } 1838 1839 /* from this point, not restartable */ 1840 1841 if (new_stack) { 1842 #ifdef TARGET_X86_64 1843 if (shift == 2) { 1844 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 1845 } else 1846 #endif 1847 { 1848 ss = (ss & ~3) | dpl; 1849 cpu_x86_load_seg_cache(env, R_SS, ss, 1850 ssp, 1851 get_seg_limit(ss_e1, ss_e2), 1852 ss_e2); 1853 } 1854 } 1855 1856 selector = (selector & ~3) | dpl; 1857 cpu_x86_load_seg_cache(env, R_CS, selector, 1858 get_seg_base(e1, e2), 1859 get_seg_limit(e1, e2), 1860 e2); 1861 SET_ESP(sp, sp_mask); 1862 env->eip = offset; 1863 } 1864 } 1865 1866 /* real and vm86 mode iret */ 1867 void helper_iret_real(CPUX86State *env, int shift) 1868 { 1869 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1870 target_ulong ssp; 1871 int eflags_mask; 1872 1873 sp_mask = 0xffff; /* XXXX: use SS segment size? */ 1874 sp = env->regs[R_ESP]; 1875 ssp = env->segs[R_SS].base; 1876 if (shift == 1) { 1877 /* 32 bits */ 1878 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1879 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1880 new_cs &= 0xffff; 1881 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1882 } else { 1883 /* 16 bits */ 1884 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC()); 1885 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC()); 1886 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC()); 1887 } 1888 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); 1889 env->segs[R_CS].selector = new_cs; 1890 env->segs[R_CS].base = (new_cs << 4); 1891 env->eip = new_eip; 1892 if (env->eflags & VM_MASK) { 1893 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | 1894 NT_MASK; 1895 } else { 1896 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | 1897 RF_MASK | NT_MASK; 1898 } 1899 if (shift == 0) { 1900 eflags_mask &= 0xffff; 1901 } 1902 cpu_load_eflags(env, new_eflags, eflags_mask); 1903 env->hflags2 &= ~HF2_NMI_MASK; 1904 } 1905 1906 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl) 1907 { 1908 int dpl; 1909 uint32_t e2; 1910 1911 /* XXX: on x86_64, we do not want to nullify FS and GS because 1912 they may still contain a valid base. I would be interested to 1913 know how a real x86_64 CPU behaves */ 1914 if ((seg_reg == R_FS || seg_reg == R_GS) && 1915 (env->segs[seg_reg].selector & 0xfffc) == 0) { 1916 return; 1917 } 1918 1919 e2 = env->segs[seg_reg].flags; 1920 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1921 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1922 /* data or non conforming code segment */ 1923 if (dpl < cpl) { 1924 cpu_x86_load_seg_cache(env, seg_reg, 0, 1925 env->segs[seg_reg].base, 1926 env->segs[seg_reg].limit, 1927 env->segs[seg_reg].flags & ~DESC_P_MASK); 1928 } 1929 } 1930 } 1931 1932 /* protected mode iret */ 1933 static inline void helper_ret_protected(CPUX86State *env, int shift, 1934 int is_iret, int addend, 1935 uintptr_t retaddr) 1936 { 1937 uint32_t new_cs, new_eflags, new_ss; 1938 uint32_t new_es, new_ds, new_fs, new_gs; 1939 uint32_t e1, e2, ss_e1, ss_e2; 1940 int cpl, dpl, rpl, eflags_mask, iopl; 1941 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1942 1943 #ifdef TARGET_X86_64 1944 if (shift == 2) { 1945 sp_mask = -1; 1946 } else 1947 #endif 1948 { 1949 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1950 } 1951 sp = env->regs[R_ESP]; 1952 ssp = env->segs[R_SS].base; 1953 new_eflags = 0; /* avoid warning */ 1954 #ifdef TARGET_X86_64 1955 if (shift == 2) { 1956 POPQ_RA(sp, new_eip, retaddr); 1957 POPQ_RA(sp, new_cs, retaddr); 1958 new_cs &= 0xffff; 1959 if (is_iret) { 1960 POPQ_RA(sp, new_eflags, retaddr); 1961 } 1962 } else 1963 #endif 1964 { 1965 if (shift == 1) { 1966 /* 32 bits */ 1967 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr); 1968 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr); 1969 new_cs &= 0xffff; 1970 if (is_iret) { 1971 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1972 if (new_eflags & VM_MASK) { 1973 goto return_to_vm86; 1974 } 1975 } 1976 } else { 1977 /* 16 bits */ 1978 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr); 1979 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr); 1980 if (is_iret) { 1981 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr); 1982 } 1983 } 1984 } 1985 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1986 new_cs, new_eip, shift, addend); 1987 LOG_PCALL_STATE(env_cpu(env)); 1988 if ((new_cs & 0xfffc) == 0) { 1989 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1990 } 1991 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) { 1992 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1993 } 1994 if (!(e2 & DESC_S_MASK) || 1995 !(e2 & DESC_CS_MASK)) { 1996 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 1997 } 1998 cpl = env->hflags & HF_CPL_MASK; 1999 rpl = new_cs & 3; 2000 if (rpl < cpl) { 2001 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2002 } 2003 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2004 if (e2 & DESC_C_MASK) { 2005 if (dpl > rpl) { 2006 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2007 } 2008 } else { 2009 if (dpl != rpl) { 2010 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2011 } 2012 } 2013 if (!(e2 & DESC_P_MASK)) { 2014 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr); 2015 } 2016 2017 sp += addend; 2018 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 2019 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 2020 /* return to same privilege level */ 2021 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2022 get_seg_base(e1, e2), 2023 get_seg_limit(e1, e2), 2024 e2); 2025 } else { 2026 /* return to different privilege level */ 2027 #ifdef TARGET_X86_64 2028 if (shift == 2) { 2029 POPQ_RA(sp, new_esp, retaddr); 2030 POPQ_RA(sp, new_ss, retaddr); 2031 new_ss &= 0xffff; 2032 } else 2033 #endif 2034 { 2035 if (shift == 1) { 2036 /* 32 bits */ 2037 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2038 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2039 new_ss &= 0xffff; 2040 } else { 2041 /* 16 bits */ 2042 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr); 2043 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr); 2044 } 2045 } 2046 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 2047 new_ss, new_esp); 2048 if ((new_ss & 0xfffc) == 0) { 2049 #ifdef TARGET_X86_64 2050 /* NULL ss is allowed in long mode if cpl != 3 */ 2051 /* XXX: test CS64? */ 2052 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 2053 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2054 0, 0xffffffff, 2055 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2056 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 2057 DESC_W_MASK | DESC_A_MASK); 2058 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ 2059 } else 2060 #endif 2061 { 2062 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 2063 } 2064 } else { 2065 if ((new_ss & 3) != rpl) { 2066 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2067 } 2068 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) { 2069 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2070 } 2071 if (!(ss_e2 & DESC_S_MASK) || 2072 (ss_e2 & DESC_CS_MASK) || 2073 !(ss_e2 & DESC_W_MASK)) { 2074 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2075 } 2076 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 2077 if (dpl != rpl) { 2078 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr); 2079 } 2080 if (!(ss_e2 & DESC_P_MASK)) { 2081 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr); 2082 } 2083 cpu_x86_load_seg_cache(env, R_SS, new_ss, 2084 get_seg_base(ss_e1, ss_e2), 2085 get_seg_limit(ss_e1, ss_e2), 2086 ss_e2); 2087 } 2088 2089 cpu_x86_load_seg_cache(env, R_CS, new_cs, 2090 get_seg_base(e1, e2), 2091 get_seg_limit(e1, e2), 2092 e2); 2093 sp = new_esp; 2094 #ifdef TARGET_X86_64 2095 if (env->hflags & HF_CS64_MASK) { 2096 sp_mask = -1; 2097 } else 2098 #endif 2099 { 2100 sp_mask = get_sp_mask(ss_e2); 2101 } 2102 2103 /* validate data segments */ 2104 validate_seg(env, R_ES, rpl); 2105 validate_seg(env, R_DS, rpl); 2106 validate_seg(env, R_FS, rpl); 2107 validate_seg(env, R_GS, rpl); 2108 2109 sp += addend; 2110 } 2111 SET_ESP(sp, sp_mask); 2112 env->eip = new_eip; 2113 if (is_iret) { 2114 /* NOTE: 'cpl' is the _old_ CPL */ 2115 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2116 if (cpl == 0) { 2117 eflags_mask |= IOPL_MASK; 2118 } 2119 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2120 if (cpl <= iopl) { 2121 eflags_mask |= IF_MASK; 2122 } 2123 if (shift == 0) { 2124 eflags_mask &= 0xffff; 2125 } 2126 cpu_load_eflags(env, new_eflags, eflags_mask); 2127 } 2128 return; 2129 2130 return_to_vm86: 2131 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr); 2132 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr); 2133 POPL_RA(ssp, sp, sp_mask, new_es, retaddr); 2134 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr); 2135 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr); 2136 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr); 2137 2138 /* modify processor state */ 2139 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 2140 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | 2141 VIP_MASK); 2142 load_seg_vm(env, R_CS, new_cs & 0xffff); 2143 load_seg_vm(env, R_SS, new_ss & 0xffff); 2144 load_seg_vm(env, R_ES, new_es & 0xffff); 2145 load_seg_vm(env, R_DS, new_ds & 0xffff); 2146 load_seg_vm(env, R_FS, new_fs & 0xffff); 2147 load_seg_vm(env, R_GS, new_gs & 0xffff); 2148 2149 env->eip = new_eip & 0xffff; 2150 env->regs[R_ESP] = new_esp; 2151 } 2152 2153 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 2154 { 2155 int tss_selector, type; 2156 uint32_t e1, e2; 2157 2158 /* specific case for TSS */ 2159 if (env->eflags & NT_MASK) { 2160 #ifdef TARGET_X86_64 2161 if (env->hflags & HF_LMA_MASK) { 2162 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2163 } 2164 #endif 2165 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC()); 2166 if (tss_selector & 4) { 2167 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2168 } 2169 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) { 2170 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2171 } 2172 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 2173 /* NOTE: we check both segment and busy TSS */ 2174 if (type != 3) { 2175 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); 2176 } 2177 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); 2178 } else { 2179 helper_ret_protected(env, shift, 1, 0, GETPC()); 2180 } 2181 env->hflags2 &= ~HF2_NMI_MASK; 2182 } 2183 2184 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2185 { 2186 helper_ret_protected(env, shift, 0, addend, GETPC()); 2187 } 2188 2189 void helper_sysenter(CPUX86State *env) 2190 { 2191 if (env->sysenter_cs == 0) { 2192 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2193 } 2194 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2195 2196 #ifdef TARGET_X86_64 2197 if (env->hflags & HF_LMA_MASK) { 2198 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2199 0, 0xffffffff, 2200 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2201 DESC_S_MASK | 2202 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2203 DESC_L_MASK); 2204 } else 2205 #endif 2206 { 2207 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2208 0, 0xffffffff, 2209 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2210 DESC_S_MASK | 2211 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2212 } 2213 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2214 0, 0xffffffff, 2215 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2216 DESC_S_MASK | 2217 DESC_W_MASK | DESC_A_MASK); 2218 env->regs[R_ESP] = env->sysenter_esp; 2219 env->eip = env->sysenter_eip; 2220 } 2221 2222 void helper_sysexit(CPUX86State *env, int dflag) 2223 { 2224 int cpl; 2225 2226 cpl = env->hflags & HF_CPL_MASK; 2227 if (env->sysenter_cs == 0 || cpl != 0) { 2228 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 2229 } 2230 #ifdef TARGET_X86_64 2231 if (dflag == 2) { 2232 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 2233 3, 0, 0xffffffff, 2234 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2235 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2236 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 2237 DESC_L_MASK); 2238 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 2239 3, 0, 0xffffffff, 2240 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2241 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2242 DESC_W_MASK | DESC_A_MASK); 2243 } else 2244 #endif 2245 { 2246 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 2247 3, 0, 0xffffffff, 2248 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2249 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2250 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2251 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 2252 3, 0, 0xffffffff, 2253 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2254 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2255 DESC_W_MASK | DESC_A_MASK); 2256 } 2257 env->regs[R_ESP] = env->regs[R_ECX]; 2258 env->eip = env->regs[R_EDX]; 2259 } 2260 2261 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2262 { 2263 unsigned int limit; 2264 uint32_t e1, e2, selector; 2265 int rpl, dpl, cpl, type; 2266 2267 selector = selector1 & 0xffff; 2268 assert(CC_OP == CC_OP_EFLAGS); 2269 if ((selector & 0xfffc) == 0) { 2270 goto fail; 2271 } 2272 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2273 goto fail; 2274 } 2275 rpl = selector & 3; 2276 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2277 cpl = env->hflags & HF_CPL_MASK; 2278 if (e2 & DESC_S_MASK) { 2279 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2280 /* conforming */ 2281 } else { 2282 if (dpl < cpl || dpl < rpl) { 2283 goto fail; 2284 } 2285 } 2286 } else { 2287 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2288 switch (type) { 2289 case 1: 2290 case 2: 2291 case 3: 2292 case 9: 2293 case 11: 2294 break; 2295 default: 2296 goto fail; 2297 } 2298 if (dpl < cpl || dpl < rpl) { 2299 fail: 2300 CC_SRC &= ~CC_Z; 2301 return 0; 2302 } 2303 } 2304 limit = get_seg_limit(e1, e2); 2305 CC_SRC |= CC_Z; 2306 return limit; 2307 } 2308 2309 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2310 { 2311 uint32_t e1, e2, selector; 2312 int rpl, dpl, cpl, type; 2313 2314 selector = selector1 & 0xffff; 2315 assert(CC_OP == CC_OP_EFLAGS); 2316 if ((selector & 0xfffc) == 0) { 2317 goto fail; 2318 } 2319 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2320 goto fail; 2321 } 2322 rpl = selector & 3; 2323 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2324 cpl = env->hflags & HF_CPL_MASK; 2325 if (e2 & DESC_S_MASK) { 2326 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2327 /* conforming */ 2328 } else { 2329 if (dpl < cpl || dpl < rpl) { 2330 goto fail; 2331 } 2332 } 2333 } else { 2334 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2335 switch (type) { 2336 case 1: 2337 case 2: 2338 case 3: 2339 case 4: 2340 case 5: 2341 case 9: 2342 case 11: 2343 case 12: 2344 break; 2345 default: 2346 goto fail; 2347 } 2348 if (dpl < cpl || dpl < rpl) { 2349 fail: 2350 CC_SRC &= ~CC_Z; 2351 return 0; 2352 } 2353 } 2354 CC_SRC |= CC_Z; 2355 return e2 & 0x00f0ff00; 2356 } 2357 2358 void helper_verr(CPUX86State *env, target_ulong selector1) 2359 { 2360 uint32_t e1, e2, eflags, selector; 2361 int rpl, dpl, cpl; 2362 2363 selector = selector1 & 0xffff; 2364 eflags = cpu_cc_compute_all(env) | CC_Z; 2365 if ((selector & 0xfffc) == 0) { 2366 goto fail; 2367 } 2368 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2369 goto fail; 2370 } 2371 if (!(e2 & DESC_S_MASK)) { 2372 goto fail; 2373 } 2374 rpl = selector & 3; 2375 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2376 cpl = env->hflags & HF_CPL_MASK; 2377 if (e2 & DESC_CS_MASK) { 2378 if (!(e2 & DESC_R_MASK)) { 2379 goto fail; 2380 } 2381 if (!(e2 & DESC_C_MASK)) { 2382 if (dpl < cpl || dpl < rpl) { 2383 goto fail; 2384 } 2385 } 2386 } else { 2387 if (dpl < cpl || dpl < rpl) { 2388 fail: 2389 eflags &= ~CC_Z; 2390 } 2391 } 2392 CC_SRC = eflags; 2393 CC_OP = CC_OP_EFLAGS; 2394 } 2395 2396 void helper_verw(CPUX86State *env, target_ulong selector1) 2397 { 2398 uint32_t e1, e2, eflags, selector; 2399 int rpl, dpl, cpl; 2400 2401 selector = selector1 & 0xffff; 2402 eflags = cpu_cc_compute_all(env) | CC_Z; 2403 if ((selector & 0xfffc) == 0) { 2404 goto fail; 2405 } 2406 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) { 2407 goto fail; 2408 } 2409 if (!(e2 & DESC_S_MASK)) { 2410 goto fail; 2411 } 2412 rpl = selector & 3; 2413 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2414 cpl = env->hflags & HF_CPL_MASK; 2415 if (e2 & DESC_CS_MASK) { 2416 goto fail; 2417 } else { 2418 if (dpl < cpl || dpl < rpl) { 2419 goto fail; 2420 } 2421 if (!(e2 & DESC_W_MASK)) { 2422 fail: 2423 eflags &= ~CC_Z; 2424 } 2425 } 2426 CC_SRC = eflags; 2427 CC_OP = CC_OP_EFLAGS; 2428 } 2429