1 /* 2 * Copyright (C) 2016 Veertu Inc, 3 * Copyright (C) 2017 Google Inc, 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU Lesser General Public 7 * License as published by the Free Software Foundation; either 8 * version 2.1 of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * Lesser General Public License for more details. 14 * 15 * You should have received a copy of the GNU Lesser General Public 16 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 ///////////////////////////////////////////////////////////////////////// 20 // 21 // Copyright (C) 2001-2012 The Bochs Project 22 // 23 // This library is free software; you can redistribute it and/or 24 // modify it under the terms of the GNU Lesser General Public 25 // License as published by the Free Software Foundation; either 26 // version 2.1 of the License, or (at your option) any later version. 27 // 28 // This library is distributed in the hope that it will be useful, 29 // but WITHOUT ANY WARRANTY; without even the implied warranty of 30 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 31 // Lesser General Public License for more details. 32 // 33 // You should have received a copy of the GNU Lesser General Public 34 // License along with this library; if not, write to the Free Software 35 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA 36 ///////////////////////////////////////////////////////////////////////// 37 38 #include "qemu/osdep.h" 39 #include "panic.h" 40 #include "x86_decode.h" 41 #include "x86.h" 42 #include "x86_emu.h" 43 #include "x86_mmu.h" 44 #include "x86_flags.h" 45 #include "vmcs.h" 46 #include "vmx.h" 47 #include "hvf-i386.h" 48 49 #define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \ 50 { \ 51 fetch_operands(env, decode, 2, true, true, false); \ 52 switch (decode->operand_size) { \ 53 case 1: \ 54 { \ 55 uint8_t v1 = (uint8_t)decode->op[0].val; \ 56 uint8_t v2 = (uint8_t)decode->op[1].val; \ 57 uint8_t diff = v1 cmd v2; \ 58 if (save_res) { \ 59 write_val_ext(env, decode->op[0].ptr, diff, 1); \ 60 } \ 61 FLAGS_FUNC##8(env, v1, v2, diff); \ 62 break; \ 63 } \ 64 case 2: \ 65 { \ 66 uint16_t v1 = (uint16_t)decode->op[0].val; \ 67 uint16_t v2 = (uint16_t)decode->op[1].val; \ 68 uint16_t diff = v1 cmd v2; \ 69 if (save_res) { \ 70 write_val_ext(env, decode->op[0].ptr, diff, 2); \ 71 } \ 72 FLAGS_FUNC##16(env, v1, v2, diff); \ 73 break; \ 74 } \ 75 case 4: \ 76 { \ 77 uint32_t v1 = (uint32_t)decode->op[0].val; \ 78 uint32_t v2 = (uint32_t)decode->op[1].val; \ 79 uint32_t diff = v1 cmd v2; \ 80 if (save_res) { \ 81 write_val_ext(env, decode->op[0].ptr, diff, 4); \ 82 } \ 83 FLAGS_FUNC##32(env, v1, v2, diff); \ 84 break; \ 85 } \ 86 default: \ 87 VM_PANIC("bad size\n"); \ 88 } \ 89 } \ 90 91 target_ulong read_reg(CPUX86State *env, int reg, int size) 92 { 93 switch (size) { 94 case 1: 95 return x86_reg(env, reg)->lx; 96 case 2: 97 return x86_reg(env, reg)->rx; 98 case 4: 99 return x86_reg(env, reg)->erx; 100 case 8: 101 return x86_reg(env, reg)->rrx; 102 default: 103 abort(); 104 } 105 return 0; 106 } 107 108 void write_reg(CPUX86State *env, int reg, target_ulong val, int size) 109 { 110 switch (size) { 111 case 1: 112 x86_reg(env, reg)->lx = val; 113 break; 114 case 2: 115 x86_reg(env, reg)->rx = val; 116 break; 117 case 4: 118 x86_reg(env, reg)->rrx = (uint32_t)val; 119 break; 120 case 8: 121 x86_reg(env, reg)->rrx = val; 122 break; 123 default: 124 abort(); 125 } 126 } 127 128 target_ulong read_val_from_reg(target_ulong reg_ptr, int size) 129 { 130 target_ulong val; 131 132 switch (size) { 133 case 1: 134 val = *(uint8_t *)reg_ptr; 135 break; 136 case 2: 137 val = *(uint16_t *)reg_ptr; 138 break; 139 case 4: 140 val = *(uint32_t *)reg_ptr; 141 break; 142 case 8: 143 val = *(uint64_t *)reg_ptr; 144 break; 145 default: 146 abort(); 147 } 148 return val; 149 } 150 151 void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size) 152 { 153 switch (size) { 154 case 1: 155 *(uint8_t *)reg_ptr = val; 156 break; 157 case 2: 158 *(uint16_t *)reg_ptr = val; 159 break; 160 case 4: 161 *(uint64_t *)reg_ptr = (uint32_t)val; 162 break; 163 case 8: 164 *(uint64_t *)reg_ptr = val; 165 break; 166 default: 167 abort(); 168 } 169 } 170 171 static bool is_host_reg(CPUX86State *env, target_ulong ptr) 172 { 173 return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs); 174 } 175 176 void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size) 177 { 178 if (is_host_reg(env, ptr)) { 179 write_val_to_reg(ptr, val, size); 180 return; 181 } 182 vmx_write_mem(env_cpu(env), ptr, &val, size); 183 } 184 185 uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes) 186 { 187 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes); 188 return env->hvf_mmio_buf; 189 } 190 191 192 target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size) 193 { 194 target_ulong val; 195 uint8_t *mmio_ptr; 196 197 if (is_host_reg(env, ptr)) { 198 return read_val_from_reg(ptr, size); 199 } 200 201 mmio_ptr = read_mmio(env, ptr, size); 202 switch (size) { 203 case 1: 204 val = *(uint8_t *)mmio_ptr; 205 break; 206 case 2: 207 val = *(uint16_t *)mmio_ptr; 208 break; 209 case 4: 210 val = *(uint32_t *)mmio_ptr; 211 break; 212 case 8: 213 val = *(uint64_t *)mmio_ptr; 214 break; 215 default: 216 VM_PANIC("bad size\n"); 217 break; 218 } 219 return val; 220 } 221 222 static void fetch_operands(CPUX86State *env, struct x86_decode *decode, 223 int n, bool val_op0, bool val_op1, bool val_op2) 224 { 225 int i; 226 bool calc_val[3] = {val_op0, val_op1, val_op2}; 227 228 for (i = 0; i < n; i++) { 229 switch (decode->op[i].type) { 230 case X86_VAR_IMMEDIATE: 231 break; 232 case X86_VAR_REG: 233 VM_PANIC_ON(!decode->op[i].ptr); 234 if (calc_val[i]) { 235 decode->op[i].val = read_val_from_reg(decode->op[i].ptr, 236 decode->operand_size); 237 } 238 break; 239 case X86_VAR_RM: 240 calc_modrm_operand(env, decode, &decode->op[i]); 241 if (calc_val[i]) { 242 decode->op[i].val = read_val_ext(env, decode->op[i].ptr, 243 decode->operand_size); 244 } 245 break; 246 case X86_VAR_OFFSET: 247 decode->op[i].ptr = decode_linear_addr(env, decode, 248 decode->op[i].ptr, 249 R_DS); 250 if (calc_val[i]) { 251 decode->op[i].val = read_val_ext(env, decode->op[i].ptr, 252 decode->operand_size); 253 } 254 break; 255 default: 256 break; 257 } 258 } 259 } 260 261 static void exec_mov(CPUX86State *env, struct x86_decode *decode) 262 { 263 fetch_operands(env, decode, 2, false, true, false); 264 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, 265 decode->operand_size); 266 267 env->eip += decode->len; 268 } 269 270 static void exec_add(CPUX86State *env, struct x86_decode *decode) 271 { 272 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); 273 env->eip += decode->len; 274 } 275 276 static void exec_or(CPUX86State *env, struct x86_decode *decode) 277 { 278 EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true); 279 env->eip += decode->len; 280 } 281 282 static void exec_adc(CPUX86State *env, struct x86_decode *decode) 283 { 284 EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true); 285 env->eip += decode->len; 286 } 287 288 static void exec_sbb(CPUX86State *env, struct x86_decode *decode) 289 { 290 EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true); 291 env->eip += decode->len; 292 } 293 294 static void exec_and(CPUX86State *env, struct x86_decode *decode) 295 { 296 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true); 297 env->eip += decode->len; 298 } 299 300 static void exec_sub(CPUX86State *env, struct x86_decode *decode) 301 { 302 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true); 303 env->eip += decode->len; 304 } 305 306 static void exec_xor(CPUX86State *env, struct x86_decode *decode) 307 { 308 EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true); 309 env->eip += decode->len; 310 } 311 312 static void exec_neg(CPUX86State *env, struct x86_decode *decode) 313 { 314 /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/ 315 int32_t val; 316 fetch_operands(env, decode, 2, true, true, false); 317 318 val = 0 - sign(decode->op[1].val, decode->operand_size); 319 write_val_ext(env, decode->op[1].ptr, val, decode->operand_size); 320 321 if (4 == decode->operand_size) { 322 SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val); 323 } else if (2 == decode->operand_size) { 324 SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val); 325 } else if (1 == decode->operand_size) { 326 SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val); 327 } else { 328 VM_PANIC("bad op size\n"); 329 } 330 331 /*lflags_to_rflags(env);*/ 332 env->eip += decode->len; 333 } 334 335 static void exec_cmp(CPUX86State *env, struct x86_decode *decode) 336 { 337 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 338 env->eip += decode->len; 339 } 340 341 static void exec_inc(CPUX86State *env, struct x86_decode *decode) 342 { 343 decode->op[1].type = X86_VAR_IMMEDIATE; 344 decode->op[1].val = 0; 345 346 EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true); 347 348 env->eip += decode->len; 349 } 350 351 static void exec_dec(CPUX86State *env, struct x86_decode *decode) 352 { 353 decode->op[1].type = X86_VAR_IMMEDIATE; 354 decode->op[1].val = 0; 355 356 EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true); 357 env->eip += decode->len; 358 } 359 360 static void exec_tst(CPUX86State *env, struct x86_decode *decode) 361 { 362 EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false); 363 env->eip += decode->len; 364 } 365 366 static void exec_not(CPUX86State *env, struct x86_decode *decode) 367 { 368 fetch_operands(env, decode, 1, true, false, false); 369 370 write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val, 371 decode->operand_size); 372 env->eip += decode->len; 373 } 374 375 void exec_movzx(CPUX86State *env, struct x86_decode *decode) 376 { 377 int src_op_size; 378 int op_size = decode->operand_size; 379 380 fetch_operands(env, decode, 1, false, false, false); 381 382 if (0xb6 == decode->opcode[1]) { 383 src_op_size = 1; 384 } else { 385 src_op_size = 2; 386 } 387 decode->operand_size = src_op_size; 388 calc_modrm_operand(env, decode, &decode->op[1]); 389 decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size); 390 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); 391 392 env->eip += decode->len; 393 } 394 395 static void exec_out(CPUX86State *env, struct x86_decode *decode) 396 { 397 switch (decode->opcode[0]) { 398 case 0xe6: 399 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1); 400 break; 401 case 0xe7: 402 hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1, 403 decode->operand_size, 1); 404 break; 405 case 0xee: 406 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1); 407 break; 408 case 0xef: 409 hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1, 410 decode->operand_size, 1); 411 break; 412 default: 413 VM_PANIC("Bad out opcode\n"); 414 break; 415 } 416 env->eip += decode->len; 417 } 418 419 static void exec_in(CPUX86State *env, struct x86_decode *decode) 420 { 421 target_ulong val = 0; 422 switch (decode->opcode[0]) { 423 case 0xe4: 424 hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1); 425 break; 426 case 0xe5: 427 hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0, 428 decode->operand_size, 1); 429 if (decode->operand_size == 2) { 430 AX(env) = val; 431 } else { 432 RAX(env) = (uint32_t)val; 433 } 434 break; 435 case 0xec: 436 hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1); 437 break; 438 case 0xed: 439 hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1); 440 if (decode->operand_size == 2) { 441 AX(env) = val; 442 } else { 443 RAX(env) = (uint32_t)val; 444 } 445 446 break; 447 default: 448 VM_PANIC("Bad in opcode\n"); 449 break; 450 } 451 452 env->eip += decode->len; 453 } 454 455 static inline void string_increment_reg(CPUX86State *env, int reg, 456 struct x86_decode *decode) 457 { 458 target_ulong val = read_reg(env, reg, decode->addressing_size); 459 if (env->eflags & DF_MASK) { 460 val -= decode->operand_size; 461 } else { 462 val += decode->operand_size; 463 } 464 write_reg(env, reg, val, decode->addressing_size); 465 } 466 467 static inline void string_rep(CPUX86State *env, struct x86_decode *decode, 468 void (*func)(CPUX86State *env, 469 struct x86_decode *ins), int rep) 470 { 471 target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size); 472 while (rcx--) { 473 func(env, decode); 474 write_reg(env, R_ECX, rcx, decode->addressing_size); 475 if ((PREFIX_REP == rep) && !get_ZF(env)) { 476 break; 477 } 478 if ((PREFIX_REPN == rep) && get_ZF(env)) { 479 break; 480 } 481 } 482 } 483 484 static void exec_ins_single(CPUX86State *env, struct x86_decode *decode) 485 { 486 target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), 487 decode->addressing_size, R_ES); 488 489 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0, 490 decode->operand_size, 1); 491 vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf, 492 decode->operand_size); 493 494 string_increment_reg(env, R_EDI, decode); 495 } 496 497 static void exec_ins(CPUX86State *env, struct x86_decode *decode) 498 { 499 if (decode->rep) { 500 string_rep(env, decode, exec_ins_single, 0); 501 } else { 502 exec_ins_single(env, decode); 503 } 504 505 env->eip += decode->len; 506 } 507 508 static void exec_outs_single(CPUX86State *env, struct x86_decode *decode) 509 { 510 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); 511 512 vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr, 513 decode->operand_size); 514 hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1, 515 decode->operand_size, 1); 516 517 string_increment_reg(env, R_ESI, decode); 518 } 519 520 static void exec_outs(CPUX86State *env, struct x86_decode *decode) 521 { 522 if (decode->rep) { 523 string_rep(env, decode, exec_outs_single, 0); 524 } else { 525 exec_outs_single(env, decode); 526 } 527 528 env->eip += decode->len; 529 } 530 531 static void exec_movs_single(CPUX86State *env, struct x86_decode *decode) 532 { 533 target_ulong src_addr; 534 target_ulong dst_addr; 535 target_ulong val; 536 537 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 538 dst_addr = linear_addr_size(env_cpu(env), RDI(env), 539 decode->addressing_size, R_ES); 540 541 val = read_val_ext(env, src_addr, decode->operand_size); 542 write_val_ext(env, dst_addr, val, decode->operand_size); 543 544 string_increment_reg(env, R_ESI, decode); 545 string_increment_reg(env, R_EDI, decode); 546 } 547 548 static void exec_movs(CPUX86State *env, struct x86_decode *decode) 549 { 550 if (decode->rep) { 551 string_rep(env, decode, exec_movs_single, 0); 552 } else { 553 exec_movs_single(env, decode); 554 } 555 556 env->eip += decode->len; 557 } 558 559 static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode) 560 { 561 target_ulong src_addr; 562 target_ulong dst_addr; 563 564 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 565 dst_addr = linear_addr_size(env_cpu(env), RDI(env), 566 decode->addressing_size, R_ES); 567 568 decode->op[0].type = X86_VAR_IMMEDIATE; 569 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size); 570 decode->op[1].type = X86_VAR_IMMEDIATE; 571 decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size); 572 573 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 574 575 string_increment_reg(env, R_ESI, decode); 576 string_increment_reg(env, R_EDI, decode); 577 } 578 579 static void exec_cmps(CPUX86State *env, struct x86_decode *decode) 580 { 581 if (decode->rep) { 582 string_rep(env, decode, exec_cmps_single, decode->rep); 583 } else { 584 exec_cmps_single(env, decode); 585 } 586 env->eip += decode->len; 587 } 588 589 590 static void exec_stos_single(CPUX86State *env, struct x86_decode *decode) 591 { 592 target_ulong addr; 593 target_ulong val; 594 595 addr = linear_addr_size(env_cpu(env), RDI(env), 596 decode->addressing_size, R_ES); 597 val = read_reg(env, R_EAX, decode->operand_size); 598 vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size); 599 600 string_increment_reg(env, R_EDI, decode); 601 } 602 603 604 static void exec_stos(CPUX86State *env, struct x86_decode *decode) 605 { 606 if (decode->rep) { 607 string_rep(env, decode, exec_stos_single, 0); 608 } else { 609 exec_stos_single(env, decode); 610 } 611 612 env->eip += decode->len; 613 } 614 615 static void exec_scas_single(CPUX86State *env, struct x86_decode *decode) 616 { 617 target_ulong addr; 618 619 addr = linear_addr_size(env_cpu(env), RDI(env), 620 decode->addressing_size, R_ES); 621 decode->op[1].type = X86_VAR_IMMEDIATE; 622 vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size); 623 624 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 625 string_increment_reg(env, R_EDI, decode); 626 } 627 628 static void exec_scas(CPUX86State *env, struct x86_decode *decode) 629 { 630 decode->op[0].type = X86_VAR_REG; 631 decode->op[0].reg = R_EAX; 632 if (decode->rep) { 633 string_rep(env, decode, exec_scas_single, decode->rep); 634 } else { 635 exec_scas_single(env, decode); 636 } 637 638 env->eip += decode->len; 639 } 640 641 static void exec_lods_single(CPUX86State *env, struct x86_decode *decode) 642 { 643 target_ulong addr; 644 target_ulong val = 0; 645 646 addr = decode_linear_addr(env, decode, RSI(env), R_DS); 647 vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size); 648 write_reg(env, R_EAX, val, decode->operand_size); 649 650 string_increment_reg(env, R_ESI, decode); 651 } 652 653 static void exec_lods(CPUX86State *env, struct x86_decode *decode) 654 { 655 if (decode->rep) { 656 string_rep(env, decode, exec_lods_single, 0); 657 } else { 658 exec_lods_single(env, decode); 659 } 660 661 env->eip += decode->len; 662 } 663 664 static void raise_exception(CPUX86State *env, int exception_index, 665 int error_code) 666 { 667 env->exception_nr = exception_index; 668 env->error_code = error_code; 669 env->has_error_code = true; 670 env->exception_injected = 1; 671 } 672 673 void simulate_rdmsr(CPUX86State *env) 674 { 675 X86CPU *cpu = env_archcpu(env); 676 CPUState *cs = env_cpu(env); 677 uint32_t msr = ECX(env); 678 uint64_t val = 0; 679 680 switch (msr) { 681 case MSR_IA32_TSC: 682 val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); 683 break; 684 case MSR_IA32_APICBASE: 685 val = cpu_get_apic_base(cpu->apic_state); 686 break; 687 case MSR_APIC_START ... MSR_APIC_END: { 688 int ret; 689 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; 690 691 ret = apic_msr_read(index, &val); 692 if (ret < 0) { 693 raise_exception(env, EXCP0D_GPF, 0); 694 } 695 696 break; 697 } 698 case MSR_IA32_UCODE_REV: 699 val = cpu->ucode_rev; 700 break; 701 case MSR_EFER: 702 val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); 703 break; 704 case MSR_FSBASE: 705 val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE); 706 break; 707 case MSR_GSBASE: 708 val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE); 709 break; 710 case MSR_KERNELGSBASE: 711 val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE); 712 break; 713 case MSR_STAR: 714 abort(); 715 break; 716 case MSR_LSTAR: 717 abort(); 718 break; 719 case MSR_CSTAR: 720 abort(); 721 break; 722 case MSR_IA32_MISC_ENABLE: 723 val = env->msr_ia32_misc_enable; 724 break; 725 case MSR_MTRRphysBase(0): 726 case MSR_MTRRphysBase(1): 727 case MSR_MTRRphysBase(2): 728 case MSR_MTRRphysBase(3): 729 case MSR_MTRRphysBase(4): 730 case MSR_MTRRphysBase(5): 731 case MSR_MTRRphysBase(6): 732 case MSR_MTRRphysBase(7): 733 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base; 734 break; 735 case MSR_MTRRphysMask(0): 736 case MSR_MTRRphysMask(1): 737 case MSR_MTRRphysMask(2): 738 case MSR_MTRRphysMask(3): 739 case MSR_MTRRphysMask(4): 740 case MSR_MTRRphysMask(5): 741 case MSR_MTRRphysMask(6): 742 case MSR_MTRRphysMask(7): 743 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask; 744 break; 745 case MSR_MTRRfix64K_00000: 746 val = env->mtrr_fixed[0]; 747 break; 748 case MSR_MTRRfix16K_80000: 749 case MSR_MTRRfix16K_A0000: 750 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1]; 751 break; 752 case MSR_MTRRfix4K_C0000: 753 case MSR_MTRRfix4K_C8000: 754 case MSR_MTRRfix4K_D0000: 755 case MSR_MTRRfix4K_D8000: 756 case MSR_MTRRfix4K_E0000: 757 case MSR_MTRRfix4K_E8000: 758 case MSR_MTRRfix4K_F0000: 759 case MSR_MTRRfix4K_F8000: 760 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3]; 761 break; 762 case MSR_MTRRdefType: 763 val = env->mtrr_deftype; 764 break; 765 case MSR_CORE_THREAD_COUNT: 766 val = cpu_x86_get_msr_core_thread_count(cpu); 767 break; 768 default: 769 /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */ 770 val = 0; 771 break; 772 } 773 774 RAX(env) = (uint32_t)val; 775 RDX(env) = (uint32_t)(val >> 32); 776 } 777 778 static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) 779 { 780 simulate_rdmsr(env); 781 env->eip += decode->len; 782 } 783 784 void simulate_wrmsr(CPUX86State *env) 785 { 786 X86CPU *cpu = env_archcpu(env); 787 CPUState *cs = env_cpu(env); 788 uint32_t msr = ECX(env); 789 uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env); 790 791 switch (msr) { 792 case MSR_IA32_TSC: 793 break; 794 case MSR_IA32_APICBASE: { 795 int r; 796 797 r = cpu_set_apic_base(cpu->apic_state, data); 798 if (r < 0) { 799 raise_exception(env, EXCP0D_GPF, 0); 800 } 801 802 break; 803 } 804 case MSR_APIC_START ... MSR_APIC_END: { 805 int ret; 806 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; 807 808 ret = apic_msr_write(index, data); 809 if (ret < 0) { 810 raise_exception(env, EXCP0D_GPF, 0); 811 } 812 813 break; 814 } 815 case MSR_FSBASE: 816 wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data); 817 break; 818 case MSR_GSBASE: 819 wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data); 820 break; 821 case MSR_KERNELGSBASE: 822 wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data); 823 break; 824 case MSR_STAR: 825 abort(); 826 break; 827 case MSR_LSTAR: 828 abort(); 829 break; 830 case MSR_CSTAR: 831 abort(); 832 break; 833 case MSR_EFER: 834 /*printf("new efer %llx\n", EFER(cs));*/ 835 wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data); 836 if (data & MSR_EFER_NXE) { 837 hv_vcpu_invalidate_tlb(cs->accel->fd); 838 } 839 break; 840 case MSR_MTRRphysBase(0): 841 case MSR_MTRRphysBase(1): 842 case MSR_MTRRphysBase(2): 843 case MSR_MTRRphysBase(3): 844 case MSR_MTRRphysBase(4): 845 case MSR_MTRRphysBase(5): 846 case MSR_MTRRphysBase(6): 847 case MSR_MTRRphysBase(7): 848 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data; 849 break; 850 case MSR_MTRRphysMask(0): 851 case MSR_MTRRphysMask(1): 852 case MSR_MTRRphysMask(2): 853 case MSR_MTRRphysMask(3): 854 case MSR_MTRRphysMask(4): 855 case MSR_MTRRphysMask(5): 856 case MSR_MTRRphysMask(6): 857 case MSR_MTRRphysMask(7): 858 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data; 859 break; 860 case MSR_MTRRfix64K_00000: 861 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data; 862 break; 863 case MSR_MTRRfix16K_80000: 864 case MSR_MTRRfix16K_A0000: 865 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data; 866 break; 867 case MSR_MTRRfix4K_C0000: 868 case MSR_MTRRfix4K_C8000: 869 case MSR_MTRRfix4K_D0000: 870 case MSR_MTRRfix4K_D8000: 871 case MSR_MTRRfix4K_E0000: 872 case MSR_MTRRfix4K_E8000: 873 case MSR_MTRRfix4K_F0000: 874 case MSR_MTRRfix4K_F8000: 875 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data; 876 break; 877 case MSR_MTRRdefType: 878 env->mtrr_deftype = data; 879 break; 880 default: 881 break; 882 } 883 884 /* Related to support known hypervisor interface */ 885 /* if (g_hypervisor_iface) 886 g_hypervisor_iface->wrmsr_handler(cs, msr, data); 887 888 printf("write msr %llx\n", RCX(cs));*/ 889 } 890 891 static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) 892 { 893 simulate_wrmsr(env); 894 env->eip += decode->len; 895 } 896 897 /* 898 * flag: 899 * 0 - bt, 1 - btc, 2 - bts, 3 - btr 900 */ 901 static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag) 902 { 903 int32_t displacement; 904 uint8_t index; 905 bool cf; 906 int mask = (4 == decode->operand_size) ? 0x1f : 0xf; 907 908 VM_PANIC_ON(decode->rex.rex); 909 910 fetch_operands(env, decode, 2, false, true, false); 911 index = decode->op[1].val & mask; 912 913 if (decode->op[0].type != X86_VAR_REG) { 914 if (4 == decode->operand_size) { 915 displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32; 916 decode->op[0].ptr += 4 * displacement; 917 } else if (2 == decode->operand_size) { 918 displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16; 919 decode->op[0].ptr += 2 * displacement; 920 } else { 921 VM_PANIC("bt 64bit\n"); 922 } 923 } 924 decode->op[0].val = read_val_ext(env, decode->op[0].ptr, 925 decode->operand_size); 926 cf = (decode->op[0].val >> index) & 0x01; 927 928 switch (flag) { 929 case 0: 930 set_CF(env, cf); 931 return; 932 case 1: 933 decode->op[0].val ^= (1u << index); 934 break; 935 case 2: 936 decode->op[0].val |= (1u << index); 937 break; 938 case 3: 939 decode->op[0].val &= ~(1u << index); 940 break; 941 } 942 write_val_ext(env, decode->op[0].ptr, decode->op[0].val, 943 decode->operand_size); 944 set_CF(env, cf); 945 } 946 947 static void exec_bt(CPUX86State *env, struct x86_decode *decode) 948 { 949 do_bt(env, decode, 0); 950 env->eip += decode->len; 951 } 952 953 static void exec_btc(CPUX86State *env, struct x86_decode *decode) 954 { 955 do_bt(env, decode, 1); 956 env->eip += decode->len; 957 } 958 959 static void exec_btr(CPUX86State *env, struct x86_decode *decode) 960 { 961 do_bt(env, decode, 3); 962 env->eip += decode->len; 963 } 964 965 static void exec_bts(CPUX86State *env, struct x86_decode *decode) 966 { 967 do_bt(env, decode, 2); 968 env->eip += decode->len; 969 } 970 971 void exec_shl(CPUX86State *env, struct x86_decode *decode) 972 { 973 uint8_t count; 974 int of = 0, cf = 0; 975 976 fetch_operands(env, decode, 2, true, true, false); 977 978 count = decode->op[1].val; 979 count &= 0x1f; /* count is masked to 5 bits*/ 980 if (!count) { 981 goto exit; 982 } 983 984 switch (decode->operand_size) { 985 case 1: 986 { 987 uint8_t res = 0; 988 if (count <= 8) { 989 res = (decode->op[0].val << count); 990 cf = (decode->op[0].val >> (8 - count)) & 0x1; 991 of = cf ^ (res >> 7); 992 } 993 994 write_val_ext(env, decode->op[0].ptr, res, 1); 995 SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res); 996 SET_FLAGS_OxxxxC(env, of, cf); 997 break; 998 } 999 case 2: 1000 { 1001 uint16_t res = 0; 1002 1003 /* from bochs */ 1004 if (count <= 16) { 1005 res = (decode->op[0].val << count); 1006 cf = (decode->op[0].val >> (16 - count)) & 0x1; 1007 of = cf ^ (res >> 15); /* of = cf ^ result15 */ 1008 } 1009 1010 write_val_ext(env, decode->op[0].ptr, res, 2); 1011 SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res); 1012 SET_FLAGS_OxxxxC(env, of, cf); 1013 break; 1014 } 1015 case 4: 1016 { 1017 uint32_t res = decode->op[0].val << count; 1018 1019 write_val_ext(env, decode->op[0].ptr, res, 4); 1020 SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res); 1021 cf = (decode->op[0].val >> (32 - count)) & 0x1; 1022 of = cf ^ (res >> 31); /* of = cf ^ result31 */ 1023 SET_FLAGS_OxxxxC(env, of, cf); 1024 break; 1025 } 1026 default: 1027 abort(); 1028 } 1029 1030 exit: 1031 /* lflags_to_rflags(env); */ 1032 env->eip += decode->len; 1033 } 1034 1035 void exec_movsx(CPUX86State *env, struct x86_decode *decode) 1036 { 1037 int src_op_size; 1038 int op_size = decode->operand_size; 1039 1040 fetch_operands(env, decode, 2, false, false, false); 1041 1042 if (0xbe == decode->opcode[1]) { 1043 src_op_size = 1; 1044 } else { 1045 src_op_size = 2; 1046 } 1047 1048 decode->operand_size = src_op_size; 1049 calc_modrm_operand(env, decode, &decode->op[1]); 1050 decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size), 1051 src_op_size); 1052 1053 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); 1054 1055 env->eip += decode->len; 1056 } 1057 1058 void exec_ror(CPUX86State *env, struct x86_decode *decode) 1059 { 1060 uint8_t count; 1061 1062 fetch_operands(env, decode, 2, true, true, false); 1063 count = decode->op[1].val; 1064 1065 switch (decode->operand_size) { 1066 case 1: 1067 { 1068 uint32_t bit6, bit7; 1069 uint8_t res; 1070 1071 if ((count & 0x07) == 0) { 1072 if (count & 0x18) { 1073 bit6 = ((uint8_t)decode->op[0].val >> 6) & 1; 1074 bit7 = ((uint8_t)decode->op[0].val >> 7) & 1; 1075 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); 1076 } 1077 } else { 1078 count &= 0x7; /* use only bottom 3 bits */ 1079 res = ((uint8_t)decode->op[0].val >> count) | 1080 ((uint8_t)decode->op[0].val << (8 - count)); 1081 write_val_ext(env, decode->op[0].ptr, res, 1); 1082 bit6 = (res >> 6) & 1; 1083 bit7 = (res >> 7) & 1; 1084 /* set eflags: ROR count affects the following flags: C, O */ 1085 SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); 1086 } 1087 break; 1088 } 1089 case 2: 1090 { 1091 uint32_t bit14, bit15; 1092 uint16_t res; 1093 1094 if ((count & 0x0f) == 0) { 1095 if (count & 0x10) { 1096 bit14 = ((uint16_t)decode->op[0].val >> 14) & 1; 1097 bit15 = ((uint16_t)decode->op[0].val >> 15) & 1; 1098 /* of = result14 ^ result15 */ 1099 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); 1100 } 1101 } else { 1102 count &= 0x0f; /* use only 4 LSB's */ 1103 res = ((uint16_t)decode->op[0].val >> count) | 1104 ((uint16_t)decode->op[0].val << (16 - count)); 1105 write_val_ext(env, decode->op[0].ptr, res, 2); 1106 1107 bit14 = (res >> 14) & 1; 1108 bit15 = (res >> 15) & 1; 1109 /* of = result14 ^ result15 */ 1110 SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); 1111 } 1112 break; 1113 } 1114 case 4: 1115 { 1116 uint32_t bit31, bit30; 1117 uint32_t res; 1118 1119 count &= 0x1f; 1120 if (count) { 1121 res = ((uint32_t)decode->op[0].val >> count) | 1122 ((uint32_t)decode->op[0].val << (32 - count)); 1123 write_val_ext(env, decode->op[0].ptr, res, 4); 1124 1125 bit31 = (res >> 31) & 1; 1126 bit30 = (res >> 30) & 1; 1127 /* of = result30 ^ result31 */ 1128 SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31); 1129 } 1130 break; 1131 } 1132 } 1133 env->eip += decode->len; 1134 } 1135 1136 void exec_rol(CPUX86State *env, struct x86_decode *decode) 1137 { 1138 uint8_t count; 1139 1140 fetch_operands(env, decode, 2, true, true, false); 1141 count = decode->op[1].val; 1142 1143 switch (decode->operand_size) { 1144 case 1: 1145 { 1146 uint32_t bit0, bit7; 1147 uint8_t res; 1148 1149 if ((count & 0x07) == 0) { 1150 if (count & 0x18) { 1151 bit0 = ((uint8_t)decode->op[0].val & 1); 1152 bit7 = ((uint8_t)decode->op[0].val >> 7); 1153 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); 1154 } 1155 } else { 1156 count &= 0x7; /* use only lowest 3 bits */ 1157 res = ((uint8_t)decode->op[0].val << count) | 1158 ((uint8_t)decode->op[0].val >> (8 - count)); 1159 1160 write_val_ext(env, decode->op[0].ptr, res, 1); 1161 /* set eflags: 1162 * ROL count affects the following flags: C, O 1163 */ 1164 bit0 = (res & 1); 1165 bit7 = (res >> 7); 1166 SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); 1167 } 1168 break; 1169 } 1170 case 2: 1171 { 1172 uint32_t bit0, bit15; 1173 uint16_t res; 1174 1175 if ((count & 0x0f) == 0) { 1176 if (count & 0x10) { 1177 bit0 = ((uint16_t)decode->op[0].val & 0x1); 1178 bit15 = ((uint16_t)decode->op[0].val >> 15); 1179 /* of = cf ^ result15 */ 1180 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); 1181 } 1182 } else { 1183 count &= 0x0f; /* only use bottom 4 bits */ 1184 res = ((uint16_t)decode->op[0].val << count) | 1185 ((uint16_t)decode->op[0].val >> (16 - count)); 1186 1187 write_val_ext(env, decode->op[0].ptr, res, 2); 1188 bit0 = (res & 0x1); 1189 bit15 = (res >> 15); 1190 /* of = cf ^ result15 */ 1191 SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); 1192 } 1193 break; 1194 } 1195 case 4: 1196 { 1197 uint32_t bit0, bit31; 1198 uint32_t res; 1199 1200 count &= 0x1f; 1201 if (count) { 1202 res = ((uint32_t)decode->op[0].val << count) | 1203 ((uint32_t)decode->op[0].val >> (32 - count)); 1204 1205 write_val_ext(env, decode->op[0].ptr, res, 4); 1206 bit0 = (res & 0x1); 1207 bit31 = (res >> 31); 1208 /* of = cf ^ result31 */ 1209 SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0); 1210 } 1211 break; 1212 } 1213 } 1214 env->eip += decode->len; 1215 } 1216 1217 1218 void exec_rcl(CPUX86State *env, struct x86_decode *decode) 1219 { 1220 uint8_t count; 1221 int of = 0, cf = 0; 1222 1223 fetch_operands(env, decode, 2, true, true, false); 1224 count = decode->op[1].val & 0x1f; 1225 1226 switch (decode->operand_size) { 1227 case 1: 1228 { 1229 uint8_t op1_8 = decode->op[0].val; 1230 uint8_t res; 1231 count %= 9; 1232 if (!count) { 1233 break; 1234 } 1235 1236 if (1 == count) { 1237 res = (op1_8 << 1) | get_CF(env); 1238 } else { 1239 res = (op1_8 << count) | (get_CF(env) << (count - 1)) | 1240 (op1_8 >> (9 - count)); 1241 } 1242 1243 write_val_ext(env, decode->op[0].ptr, res, 1); 1244 1245 cf = (op1_8 >> (8 - count)) & 0x01; 1246 of = cf ^ (res >> 7); /* of = cf ^ result7 */ 1247 SET_FLAGS_OxxxxC(env, of, cf); 1248 break; 1249 } 1250 case 2: 1251 { 1252 uint16_t res; 1253 uint16_t op1_16 = decode->op[0].val; 1254 1255 count %= 17; 1256 if (!count) { 1257 break; 1258 } 1259 1260 if (1 == count) { 1261 res = (op1_16 << 1) | get_CF(env); 1262 } else if (count == 16) { 1263 res = (get_CF(env) << 15) | (op1_16 >> 1); 1264 } else { /* 2..15 */ 1265 res = (op1_16 << count) | (get_CF(env) << (count - 1)) | 1266 (op1_16 >> (17 - count)); 1267 } 1268 1269 write_val_ext(env, decode->op[0].ptr, res, 2); 1270 1271 cf = (op1_16 >> (16 - count)) & 0x1; 1272 of = cf ^ (res >> 15); /* of = cf ^ result15 */ 1273 SET_FLAGS_OxxxxC(env, of, cf); 1274 break; 1275 } 1276 case 4: 1277 { 1278 uint32_t res; 1279 uint32_t op1_32 = decode->op[0].val; 1280 1281 if (!count) { 1282 break; 1283 } 1284 1285 if (1 == count) { 1286 res = (op1_32 << 1) | get_CF(env); 1287 } else { 1288 res = (op1_32 << count) | (get_CF(env) << (count - 1)) | 1289 (op1_32 >> (33 - count)); 1290 } 1291 1292 write_val_ext(env, decode->op[0].ptr, res, 4); 1293 1294 cf = (op1_32 >> (32 - count)) & 0x1; 1295 of = cf ^ (res >> 31); /* of = cf ^ result31 */ 1296 SET_FLAGS_OxxxxC(env, of, cf); 1297 break; 1298 } 1299 } 1300 env->eip += decode->len; 1301 } 1302 1303 void exec_rcr(CPUX86State *env, struct x86_decode *decode) 1304 { 1305 uint8_t count; 1306 int of = 0, cf = 0; 1307 1308 fetch_operands(env, decode, 2, true, true, false); 1309 count = decode->op[1].val & 0x1f; 1310 1311 switch (decode->operand_size) { 1312 case 1: 1313 { 1314 uint8_t op1_8 = decode->op[0].val; 1315 uint8_t res; 1316 1317 count %= 9; 1318 if (!count) { 1319 break; 1320 } 1321 res = (op1_8 >> count) | (get_CF(env) << (8 - count)) | 1322 (op1_8 << (9 - count)); 1323 1324 write_val_ext(env, decode->op[0].ptr, res, 1); 1325 1326 cf = (op1_8 >> (count - 1)) & 0x1; 1327 of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */ 1328 SET_FLAGS_OxxxxC(env, of, cf); 1329 break; 1330 } 1331 case 2: 1332 { 1333 uint16_t op1_16 = decode->op[0].val; 1334 uint16_t res; 1335 1336 count %= 17; 1337 if (!count) { 1338 break; 1339 } 1340 res = (op1_16 >> count) | (get_CF(env) << (16 - count)) | 1341 (op1_16 << (17 - count)); 1342 1343 write_val_ext(env, decode->op[0].ptr, res, 2); 1344 1345 cf = (op1_16 >> (count - 1)) & 0x1; 1346 of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^ 1347 result14 */ 1348 SET_FLAGS_OxxxxC(env, of, cf); 1349 break; 1350 } 1351 case 4: 1352 { 1353 uint32_t res; 1354 uint32_t op1_32 = decode->op[0].val; 1355 1356 if (!count) { 1357 break; 1358 } 1359 1360 if (1 == count) { 1361 res = (op1_32 >> 1) | (get_CF(env) << 31); 1362 } else { 1363 res = (op1_32 >> count) | (get_CF(env) << (32 - count)) | 1364 (op1_32 << (33 - count)); 1365 } 1366 1367 write_val_ext(env, decode->op[0].ptr, res, 4); 1368 1369 cf = (op1_32 >> (count - 1)) & 0x1; 1370 of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */ 1371 SET_FLAGS_OxxxxC(env, of, cf); 1372 break; 1373 } 1374 } 1375 env->eip += decode->len; 1376 } 1377 1378 static void exec_xchg(CPUX86State *env, struct x86_decode *decode) 1379 { 1380 fetch_operands(env, decode, 2, true, true, false); 1381 1382 write_val_ext(env, decode->op[0].ptr, decode->op[1].val, 1383 decode->operand_size); 1384 write_val_ext(env, decode->op[1].ptr, decode->op[0].val, 1385 decode->operand_size); 1386 1387 env->eip += decode->len; 1388 } 1389 1390 static void exec_xadd(CPUX86State *env, struct x86_decode *decode) 1391 { 1392 EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); 1393 write_val_ext(env, decode->op[1].ptr, decode->op[0].val, 1394 decode->operand_size); 1395 1396 env->eip += decode->len; 1397 } 1398 1399 static struct cmd_handler { 1400 enum x86_decode_cmd cmd; 1401 void (*handler)(CPUX86State *env, struct x86_decode *ins); 1402 } handlers[] = { 1403 {X86_DECODE_CMD_INVL, NULL,}, 1404 {X86_DECODE_CMD_MOV, exec_mov}, 1405 {X86_DECODE_CMD_ADD, exec_add}, 1406 {X86_DECODE_CMD_OR, exec_or}, 1407 {X86_DECODE_CMD_ADC, exec_adc}, 1408 {X86_DECODE_CMD_SBB, exec_sbb}, 1409 {X86_DECODE_CMD_AND, exec_and}, 1410 {X86_DECODE_CMD_SUB, exec_sub}, 1411 {X86_DECODE_CMD_NEG, exec_neg}, 1412 {X86_DECODE_CMD_XOR, exec_xor}, 1413 {X86_DECODE_CMD_CMP, exec_cmp}, 1414 {X86_DECODE_CMD_INC, exec_inc}, 1415 {X86_DECODE_CMD_DEC, exec_dec}, 1416 {X86_DECODE_CMD_TST, exec_tst}, 1417 {X86_DECODE_CMD_NOT, exec_not}, 1418 {X86_DECODE_CMD_MOVZX, exec_movzx}, 1419 {X86_DECODE_CMD_OUT, exec_out}, 1420 {X86_DECODE_CMD_IN, exec_in}, 1421 {X86_DECODE_CMD_INS, exec_ins}, 1422 {X86_DECODE_CMD_OUTS, exec_outs}, 1423 {X86_DECODE_CMD_RDMSR, exec_rdmsr}, 1424 {X86_DECODE_CMD_WRMSR, exec_wrmsr}, 1425 {X86_DECODE_CMD_BT, exec_bt}, 1426 {X86_DECODE_CMD_BTR, exec_btr}, 1427 {X86_DECODE_CMD_BTC, exec_btc}, 1428 {X86_DECODE_CMD_BTS, exec_bts}, 1429 {X86_DECODE_CMD_SHL, exec_shl}, 1430 {X86_DECODE_CMD_ROL, exec_rol}, 1431 {X86_DECODE_CMD_ROR, exec_ror}, 1432 {X86_DECODE_CMD_RCR, exec_rcr}, 1433 {X86_DECODE_CMD_RCL, exec_rcl}, 1434 /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/ 1435 {X86_DECODE_CMD_MOVS, exec_movs}, 1436 {X86_DECODE_CMD_CMPS, exec_cmps}, 1437 {X86_DECODE_CMD_STOS, exec_stos}, 1438 {X86_DECODE_CMD_SCAS, exec_scas}, 1439 {X86_DECODE_CMD_LODS, exec_lods}, 1440 {X86_DECODE_CMD_MOVSX, exec_movsx}, 1441 {X86_DECODE_CMD_XCHG, exec_xchg}, 1442 {X86_DECODE_CMD_XADD, exec_xadd}, 1443 }; 1444 1445 static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST]; 1446 1447 static void init_cmd_handler(void) 1448 { 1449 int i; 1450 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 1451 _cmd_handler[handlers[i].cmd] = handlers[i]; 1452 } 1453 } 1454 1455 void load_regs(CPUState *cs) 1456 { 1457 X86CPU *cpu = X86_CPU(cs); 1458 CPUX86State *env = &cpu->env; 1459 1460 int i = 0; 1461 RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); 1462 RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); 1463 RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); 1464 RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); 1465 RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); 1466 RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); 1467 RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); 1468 RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); 1469 for (i = 8; i < 16; i++) { 1470 RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); 1471 } 1472 1473 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); 1474 rflags_to_lflags(env); 1475 env->eip = rreg(cs->accel->fd, HV_X86_RIP); 1476 } 1477 1478 void store_regs(CPUState *cs) 1479 { 1480 X86CPU *cpu = X86_CPU(cs); 1481 CPUX86State *env = &cpu->env; 1482 1483 int i = 0; 1484 wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); 1485 wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); 1486 wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); 1487 wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); 1488 wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); 1489 wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); 1490 wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); 1491 wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); 1492 for (i = 8; i < 16; i++) { 1493 wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); 1494 } 1495 1496 lflags_to_rflags(env); 1497 wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); 1498 macvm_set_rip(cs, env->eip); 1499 } 1500 1501 bool exec_instruction(CPUX86State *env, struct x86_decode *ins) 1502 { 1503 /*if (hvf_vcpu_id(cs)) 1504 printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip, 1505 decode_cmd_to_string(ins->cmd));*/ 1506 1507 if (!_cmd_handler[ins->cmd].handler) { 1508 printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip, 1509 ins->cmd, ins->opcode[0], 1510 ins->opcode_len > 1 ? ins->opcode[1] : 0); 1511 env->eip += ins->len; 1512 return true; 1513 } 1514 1515 _cmd_handler[ins->cmd].handler(env, ins); 1516 return true; 1517 } 1518 1519 void init_emu(void) 1520 { 1521 init_cmd_handler(); 1522 } 1523