1 // 2 // Copyright © 2020 Intel Corporation 3 // 4 // SPDX-License-Identifier: Apache-2.0 5 // 6 7 #![allow(non_camel_case_types)] 8 9 // 10 // MOV-Move 11 // SDM Volume 1, Chapter 4.3 12 // Copies the second operand (source operand) to the first operand (destination operand). 13 // 14 15 use crate::arch::x86::emulator::instructions::*; 16 17 macro_rules! mov_rm_r { 18 ($bound:ty) => { 19 fn emulate( 20 &self, 21 insn: &Instruction, 22 state: &mut T, 23 platform: &mut dyn PlatformEmulator<CpuState = T>, 24 ) -> Result<(), EmulationError<Exception>> { 25 let src_reg_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 26 .map_err(EmulationError::PlatformEmulationError)?; 27 28 set_op( 29 &insn, 30 0, 31 std::mem::size_of::<$bound>(), 32 state, 33 platform, 34 src_reg_value, 35 ) 36 .map_err(EmulationError::PlatformEmulationError)?; 37 38 Ok(()) 39 } 40 }; 41 } 42 43 macro_rules! mov_rm_imm { 44 ($bound:ty) => { 45 fn emulate( 46 &self, 47 insn: &Instruction, 48 state: &mut T, 49 platform: &mut dyn PlatformEmulator<CpuState = T>, 50 ) -> Result<(), EmulationError<Exception>> { 51 let imm = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 52 .map_err(EmulationError::PlatformEmulationError)?; 53 54 set_op( 55 &insn, 56 0, 57 std::mem::size_of::<$bound>(), 58 state, 59 platform, 60 imm, 61 ) 62 .map_err(EmulationError::PlatformEmulationError)?; 63 64 Ok(()) 65 } 66 }; 67 } 68 69 macro_rules! movzx { 70 ($dest_op_size:ty, $src_op_size:ty) => { 71 fn emulate( 72 &self, 73 insn: &Instruction, 74 state: &mut T, 75 platform: &mut dyn PlatformEmulator<CpuState = T>, 76 ) -> Result<(), EmulationError<Exception>> { 77 let src_value = get_op( 78 &insn, 79 1, 80 std::mem::size_of::<$src_op_size>(), 81 state, 82 platform, 83 ) 84 .map_err(EmulationError::PlatformEmulationError)?; 85 86 set_op( 87 &insn, 88 0, 89 std::mem::size_of::<$dest_op_size>(), 90 state, 91 platform, 92 src_value, 93 ) 94 .map_err(EmulationError::PlatformEmulationError)?; 95 96 Ok(()) 97 } 98 }; 99 } 100 101 // MOV r/rm is a special case of MOVZX, where both operands have the same size. 102 macro_rules! mov_r_rm { 103 ($op_size:ty) => { 104 movzx!($op_size, $op_size); 105 }; 106 } 107 108 macro_rules! mov_r_imm { 109 ($bound:ty) => { 110 fn emulate( 111 &self, 112 insn: &Instruction, 113 state: &mut T, 114 platform: &mut dyn PlatformEmulator<CpuState = T>, 115 ) -> Result<(), EmulationError<Exception>> { 116 let imm = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 117 .map_err(EmulationError::PlatformEmulationError)?; 118 119 set_op( 120 &insn, 121 0, 122 std::mem::size_of::<$bound>(), 123 state, 124 platform, 125 imm, 126 ) 127 .map_err(EmulationError::PlatformEmulationError)?; 128 129 Ok(()) 130 } 131 }; 132 } 133 134 pub struct Mov_r8_rm8; 135 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r8_rm8 { 136 mov_r_rm!(u8); 137 } 138 139 pub struct Mov_r8_imm8; 140 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r8_imm8 { 141 mov_r_imm!(u8); 142 } 143 144 pub struct Mov_r16_rm16; 145 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r16_rm16 { 146 mov_r_rm!(u16); 147 } 148 149 pub struct Mov_r16_imm16; 150 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r16_imm16 { 151 mov_r_imm!(u16); 152 } 153 154 pub struct Mov_r32_rm32; 155 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r32_rm32 { 156 mov_r_rm!(u32); 157 } 158 159 pub struct Mov_r32_imm32; 160 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r32_imm32 { 161 mov_r_imm!(u32); 162 } 163 164 pub struct Mov_r64_rm64; 165 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r64_rm64 { 166 mov_r_rm!(u64); 167 } 168 169 pub struct Mov_r64_imm64; 170 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r64_imm64 { 171 mov_r_imm!(u64); 172 } 173 174 pub struct Mov_rm8_imm8; 175 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm8_imm8 { 176 mov_rm_imm!(u8); 177 } 178 179 pub struct Mov_rm8_r8; 180 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm8_r8 { 181 mov_rm_r!(u8); 182 } 183 184 pub struct Mov_rm16_imm16; 185 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm16_imm16 { 186 mov_rm_imm!(u16); 187 } 188 189 pub struct Mov_rm16_r16; 190 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm16_r16 { 191 mov_rm_r!(u16); 192 } 193 194 pub struct Mov_rm32_imm32; 195 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm32_imm32 { 196 mov_rm_imm!(u32); 197 } 198 199 pub struct Mov_rm32_r32; 200 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm32_r32 { 201 mov_rm_r!(u32); 202 } 203 204 pub struct Mov_rm64_imm32; 205 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm64_imm32 { 206 mov_rm_imm!(u32); 207 } 208 209 pub struct Mov_rm64_r64; 210 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm64_r64 { 211 mov_rm_r!(u64); 212 } 213 214 // MOVZX 215 pub struct Movzx_r16_rm8; 216 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r16_rm8 { 217 movzx!(u16, u8); 218 } 219 220 pub struct Movzx_r32_rm8; 221 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r32_rm8 { 222 movzx!(u32, u8); 223 } 224 225 pub struct Movzx_r64_rm8; 226 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r64_rm8 { 227 movzx!(u64, u8); 228 } 229 230 pub struct Movzx_r32_rm16; 231 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r32_rm16 { 232 movzx!(u32, u16); 233 } 234 235 pub struct Movzx_r64_rm16; 236 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r64_rm16 { 237 movzx!(u64, u16); 238 } 239 240 pub struct Mov_moffs16_AX; 241 impl<T: CpuStateManager> InstructionHandler<T> for Mov_moffs16_AX { 242 movzx!(u16, u16); 243 } 244 245 pub struct Mov_AX_moffs16; 246 impl<T: CpuStateManager> InstructionHandler<T> for Mov_AX_moffs16 { 247 movzx!(u16, u16); 248 } 249 250 pub struct Mov_moffs32_EAX; 251 impl<T: CpuStateManager> InstructionHandler<T> for Mov_moffs32_EAX { 252 movzx!(u32, u32); 253 } 254 255 pub struct Mov_EAX_moffs32; 256 impl<T: CpuStateManager> InstructionHandler<T> for Mov_EAX_moffs32 { 257 movzx!(u32, u32); 258 } 259 260 pub struct Mov_moffs64_RAX; 261 impl<T: CpuStateManager> InstructionHandler<T> for Mov_moffs64_RAX { 262 movzx!(u64, u64); 263 } 264 265 pub struct Mov_RAX_moffs64; 266 impl<T: CpuStateManager> InstructionHandler<T> for Mov_RAX_moffs64 { 267 movzx!(u64, u64); 268 } 269 270 #[cfg(test)] 271 mod tests { 272 #![allow(unused_mut)] 273 use super::*; 274 use crate::arch::x86::emulator::mock_vmm::*; 275 276 #[test] 277 // mov rax,rbx 278 fn test_mov_r64_r64() { 279 let rbx: u64 = 0x8899aabbccddeeff; 280 let ip: u64 = 0x1000; 281 let cpu_id = 0; 282 let insn = [0x48, 0x89, 0xd8]; 283 let mut vmm = MockVmm::new(ip, vec![(Register::RBX, rbx)], None); 284 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 285 286 let rax: u64 = vmm 287 .cpu_state(cpu_id) 288 .unwrap() 289 .read_reg(Register::RAX) 290 .unwrap(); 291 assert_eq!(rax, rbx); 292 } 293 294 #[test] 295 // mov rax,0x1122334411223344 296 fn test_mov_r64_imm64() { 297 let imm64: u64 = 0x1122334411223344; 298 let ip: u64 = 0x1000; 299 let cpu_id = 0; 300 let insn = [0x48, 0xb8, 0x44, 0x33, 0x22, 0x11, 0x44, 0x33, 0x22, 0x11]; 301 let mut vmm = MockVmm::new(ip, vec![], None); 302 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 303 304 let rax: u64 = vmm 305 .cpu_state(cpu_id) 306 .unwrap() 307 .read_reg(Register::RAX) 308 .unwrap(); 309 assert_eq!(rax, imm64); 310 } 311 312 #[test] 313 // mov rax, [rax+rax] 314 fn test_mov_r64_m64() { 315 let target_rax: u64 = 0x1234567812345678; 316 let mut rax: u64 = 0x100; 317 let ip: u64 = 0x1000; 318 let cpu_id = 0; 319 let memory: [u8; 8] = target_rax.to_le_bytes(); 320 let insn = [0x48, 0x8b, 0x04, 0x00]; 321 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], Some((rax + rax, &memory))); 322 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 323 324 rax = vmm 325 .cpu_state(cpu_id) 326 .unwrap() 327 .read_reg(Register::RAX) 328 .unwrap(); 329 assert_eq!(rax, target_rax); 330 } 331 332 #[test] 333 // mov al,0x11 334 fn test_mov_r8_imm8() { 335 let imm8: u8 = 0x11; 336 let ip: u64 = 0x1000; 337 let cpu_id = 0; 338 let insn = [0xb0, 0x11]; 339 let mut vmm = MockVmm::new(ip, vec![], None); 340 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 341 342 let al = vmm 343 .cpu_state(cpu_id) 344 .unwrap() 345 .read_reg(Register::AL) 346 .unwrap(); 347 assert_eq!(al as u8, imm8); 348 } 349 350 #[test] 351 // mov eax,0x11 352 fn test_mov_r32_imm8() { 353 let imm8: u8 = 0x11; 354 let ip: u64 = 0x1000; 355 let cpu_id = 0; 356 let insn = [0xb8, 0x11, 0x00, 0x00, 0x00]; 357 let mut vmm = MockVmm::new(ip, vec![], None); 358 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 359 360 let eax = vmm 361 .cpu_state(cpu_id) 362 .unwrap() 363 .read_reg(Register::EAX) 364 .unwrap(); 365 assert_eq!(eax as u8, imm8); 366 } 367 368 #[test] 369 // mov rax,0x11223344 370 fn test_mov_r64_imm32() { 371 let imm32: u32 = 0x11223344; 372 let ip: u64 = 0x1000; 373 let cpu_id = 0; 374 let insn = [0x48, 0xc7, 0xc0, 0x44, 0x33, 0x22, 0x11]; 375 let mut vmm = MockVmm::new(ip, vec![], None); 376 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 377 378 let rax: u64 = vmm 379 .cpu_state(cpu_id) 380 .unwrap() 381 .read_reg(Register::RAX) 382 .unwrap(); 383 assert_eq!(rax, imm32 as u64); 384 } 385 386 #[test] 387 // mov byte ptr [rax],dh 388 fn test_mov_m8_r8() { 389 let rax: u64 = 0x100; 390 let dh: u8 = 0x99; 391 let ip: u64 = 0x1000; 392 let cpu_id = 0; 393 let insn = [0x88, 0x30]; 394 let mut vmm = MockVmm::new( 395 ip, 396 vec![(Register::RAX, rax), (Register::DH, dh.into())], 397 None, 398 ); 399 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 400 401 let mut memory: [u8; 1] = [0; 1]; 402 vmm.read_memory(rax, &mut memory).unwrap(); 403 404 assert_eq!(u8::from_le_bytes(memory), dh); 405 } 406 407 #[test] 408 // mov dword ptr [rax],esi 409 fn test_mov_m32_r32() { 410 let rax: u64 = 0x100; 411 let esi: u32 = 0x8899; 412 let ip: u64 = 0x1000; 413 let cpu_id = 0; 414 let insn = [0x89, 0x30]; 415 let mut vmm = MockVmm::new( 416 ip, 417 vec![(Register::RAX, rax), (Register::ESI, esi.into())], 418 None, 419 ); 420 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 421 422 let mut memory: [u8; 4] = [0; 4]; 423 vmm.read_memory(rax, &mut memory).unwrap(); 424 425 assert_eq!(u32::from_le_bytes(memory), esi); 426 } 427 428 #[test] 429 // mov dword ptr [rax+0x00000001],edi 430 fn test_mov_m32imm32_r32() { 431 let rax: u64 = 0x100; 432 let displacement: u64 = 0x1; 433 let edi: u32 = 0x8899; 434 let ip: u64 = 0x1000; 435 let cpu_id = 0; 436 let insn = [0x89, 0x3c, 0x05, 0x01, 0x00, 0x00, 0x00]; 437 let mut vmm = MockVmm::new( 438 ip, 439 vec![(Register::RAX, rax), (Register::EDI, edi.into())], 440 None, 441 ); 442 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 443 444 let mut memory: [u8; 4] = [0; 4]; 445 vmm.read_memory(rax + displacement, &mut memory).unwrap(); 446 447 assert_eq!(u32::from_le_bytes(memory), edi); 448 } 449 450 #[test] 451 // mov eax,dword ptr [rax+10h] 452 fn test_mov_r32_m32imm32() { 453 let rax: u64 = 0x100; 454 let displacement: u64 = 0x10; 455 let eax: u32 = 0xaabbccdd; 456 let memory: [u8; 4] = eax.to_le_bytes(); 457 let ip: u64 = 0x1000; 458 let cpu_id = 0; 459 let insn = [0x8b, 0x40, 0x10]; 460 let mut vmm = MockVmm::new( 461 ip, 462 vec![(Register::RAX, rax)], 463 Some((rax + displacement, &memory)), 464 ); 465 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 466 467 let new_eax = vmm 468 .cpu_state(cpu_id) 469 .unwrap() 470 .read_reg(Register::EAX) 471 .unwrap(); 472 assert_eq!(new_eax, eax as u64); 473 } 474 475 #[test] 476 // mov al,byte ptr [rax+10h] 477 fn test_mov_r8_m32imm32() { 478 let rax: u64 = 0x100; 479 let displacement: u64 = 0x10; 480 let al: u8 = 0xaa; 481 let ip: u64 = 0x1000; 482 let cpu_id = 0; 483 let insn = [0x8a, 0x40, 0x10]; 484 let memory: [u8; 1] = al.to_le_bytes(); 485 let mut vmm = MockVmm::new( 486 ip, 487 vec![(Register::RAX, rax)], 488 Some((rax + displacement, &memory)), 489 ); 490 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 491 492 let new_al = vmm 493 .cpu_state(cpu_id) 494 .unwrap() 495 .read_reg(Register::AL) 496 .unwrap(); 497 assert_eq!(new_al, al as u64); 498 } 499 500 #[test] 501 // mov rax, 0x100 502 // mov rbx, qword ptr [rax+10h] 503 fn test_mov_r64_imm64_and_r64_m64() { 504 let target_rax: u64 = 0x1234567812345678; 505 let rax: u64 = 0x100; 506 let displacement: u64 = 0x10; 507 let ip: u64 = 0x1000; 508 let cpu_id = 0; 509 let memory: [u8; 8] = target_rax.to_le_bytes(); 510 let insn = [ 511 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 512 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 513 ]; 514 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 515 assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok()); 516 517 let rbx: u64 = vmm 518 .cpu_state(cpu_id) 519 .unwrap() 520 .read_reg(Register::RBX) 521 .unwrap(); 522 assert_eq!(rbx, target_rax); 523 } 524 525 #[test] 526 // mov rax, 0x100 527 // mov rbx, qword ptr [rax+10h] 528 fn test_mov_r64_imm64_and_r64_m64_first_insn() { 529 let target_rax: u64 = 0x1234567812345678; 530 let rax: u64 = 0x100; 531 let displacement: u64 = 0x10; 532 let ip: u64 = 0x1000; 533 let cpu_id = 0; 534 let memory: [u8; 8] = target_rax.to_le_bytes(); 535 let insn = [ 536 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 537 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 538 ]; 539 540 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 541 // Only run the first instruction. 542 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 543 544 assert_eq!(ip + 7, vmm.cpu_state(cpu_id).unwrap().ip()); 545 546 let new_rax: u64 = vmm 547 .cpu_state(cpu_id) 548 .unwrap() 549 .read_reg(Register::RAX) 550 .unwrap(); 551 assert_eq!(rax, new_rax); 552 } 553 554 #[test] 555 // mov rax, 0x100 556 // mov rbx, qword ptr [rax+10h] 557 // mov rax, 0x200 558 fn test_mov_r64_imm64_and_r64_m64_two_insns() { 559 let target_rax: u64 = 0x1234567812345678; 560 let rax: u64 = 0x100; 561 let displacement: u64 = 0x10; 562 let ip: u64 = 0x1000; 563 let cpu_id = 0; 564 let memory: [u8; 8] = target_rax.to_le_bytes(); 565 let insn = [ 566 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 567 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 568 0x48, 0xc7, 0xc0, 0x00, 0x02, 0x00, 0x00, // mov rax, 0x200 569 ]; 570 571 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 572 // Run the 2 first instructions. 573 assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok()); 574 575 assert_eq!(ip + 7 + 4, vmm.cpu_state(cpu_id).unwrap().ip()); 576 577 let rbx: u64 = vmm 578 .cpu_state(cpu_id) 579 .unwrap() 580 .read_reg(Register::RBX) 581 .unwrap(); 582 assert_eq!(rbx, target_rax); 583 584 // Check that rax is still at 0x100 585 let new_rax: u64 = vmm 586 .cpu_state(cpu_id) 587 .unwrap() 588 .read_reg(Register::RAX) 589 .unwrap(); 590 assert_eq!(rax, new_rax); 591 } 592 593 #[test] 594 // movzx eax, bl 595 fn test_movzx_r32_r8l() { 596 let bx: u16 = 0x8899; 597 let ip: u64 = 0x1000; 598 let cpu_id = 0; 599 let insn = [0x0f, 0xb6, 0xc3]; 600 let mut vmm = MockVmm::new(ip, vec![(Register::BX, bx as u64)], None); 601 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 602 603 let eax: u64 = vmm 604 .cpu_state(cpu_id) 605 .unwrap() 606 .read_reg(Register::EAX) 607 .unwrap(); 608 assert_eq!(eax, (bx & 0xff) as u64); 609 } 610 611 #[test] 612 // movzx eax, bh 613 fn test_movzx_r32_r8h() { 614 let bx: u16 = 0x8899; 615 let ip: u64 = 0x1000; 616 let cpu_id = 0; 617 let insn = [0x0f, 0xb6, 0xc7]; 618 let mut vmm = MockVmm::new(ip, vec![(Register::BX, bx as u64)], None); 619 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 620 621 let eax: u64 = vmm 622 .cpu_state(cpu_id) 623 .unwrap() 624 .read_reg(Register::EAX) 625 .unwrap(); 626 assert_eq!(eax, (bx >> 8) as u64); 627 } 628 629 #[test] 630 // movzx eax, byte ptr [rbx] 631 fn test_movzx_r32_m8() { 632 let rbx: u64 = 0x100; 633 let value: u8 = 0xaa; 634 let ip: u64 = 0x1000; 635 let cpu_id = 0; 636 let insn = [0x0f, 0xb7, 0x03]; 637 let memory: [u8; 1] = value.to_le_bytes(); 638 let mut vmm = MockVmm::new(ip, vec![(Register::RBX, rbx)], Some((rbx, &memory))); 639 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 640 641 let eax: u64 = vmm 642 .cpu_state(cpu_id) 643 .unwrap() 644 .read_reg(Register::EAX) 645 .unwrap(); 646 assert_eq!(eax, value as u64); 647 } 648 649 #[test] 650 // movabs ax, ds:0x1337 651 // movabs eax, ds:0x1337 652 // movabs rax, ds:0x1337 653 fn test_mov_memoff_ax() { 654 let test_inputs: [(Register, &[u8]); 3] = [ 655 (Register::AX, &[0x66, 0xa1]), 656 (Register::EAX, &[0xa1]), 657 (Register::RAX, &[0x48, 0xa1]), 658 ]; 659 660 // Constructs the instruction with the provided inputs and emulates it. 661 fn helper(register: Register, instruction_prefix: &[u8]) { 662 let mem_addr: u64 = 0x1337; 663 let mem_value: u64 = 0x13371337deadbeef; 664 let ip: u64 = 0x1000; 665 let cpu_id = 0; 666 667 let mut instruction_bytes = Vec::new(); 668 // instruction prefix with specified register 669 instruction_bytes.extend(instruction_prefix); 670 // 64-bit memory operand 671 instruction_bytes.extend([ 672 mem_addr.to_le_bytes()[0], 673 mem_addr.to_le_bytes()[1], 674 0, 675 0, 676 0, 677 0, 678 0, 679 0, 680 ]); 681 682 let memory: [u8; 8] = mem_value.to_le_bytes(); 683 let mut vmm = MockVmm::new(ip, vec![], Some((mem_addr, &memory))); 684 assert!(vmm.emulate_first_insn(cpu_id, &instruction_bytes).is_ok()); 685 686 let ax: u64 = vmm.cpu_state(cpu_id).unwrap().read_reg(register).unwrap(); 687 688 match register { 689 Register::AX => { 690 assert_eq!(ax as u16, mem_value as u16); 691 } 692 Register::EAX => { 693 assert_eq!(ax as u32, mem_value as u32); 694 } 695 Register::RAX => { 696 assert_eq!(ax, mem_value); 697 } 698 _ => panic!(), 699 } 700 } 701 702 for (register, instruction_prefix) in test_inputs { 703 helper(register, instruction_prefix) 704 } 705 } 706 707 #[test] 708 // movabs ds:0x1337, ax 709 // movabs ds:0x1337, eax 710 // movabs ds:0x1337, rax 711 fn test_mov_ax_memoff() { 712 let test_inputs: [(Register, &[u8]); 3] = [ 713 (Register::AX, &[0x66, 0xa3]), 714 (Register::EAX, &[0xa3]), 715 (Register::RAX, &[0x48, 0xa3]), 716 ]; 717 718 // Constructs the instruction with the provided inputs and emulates it. 719 fn helper(register: Register, instruction_prefix: &[u8]) { 720 let mem_addr: u64 = 0x1337; 721 let ax: u64 = 0x13371337deadbeef; 722 let ip: u64 = 0x1000; 723 let cpu_id = 0; 724 725 let mut instruction_bytes = Vec::new(); 726 // instruction prefix with specified register 727 instruction_bytes.extend(instruction_prefix); 728 // 64-bit memory operand 729 instruction_bytes.extend([ 730 mem_addr.to_le_bytes()[0], 731 mem_addr.to_le_bytes()[1], 732 0, 733 0, 734 0, 735 0, 736 0, 737 0, 738 ]); 739 740 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, ax)], None); 741 assert!(vmm.emulate_first_insn(cpu_id, &instruction_bytes).is_ok()); 742 743 match register { 744 Register::AX => { 745 let mut memory: [u8; 2] = [0; 2]; 746 vmm.read_memory(mem_addr, &mut memory).unwrap(); 747 assert_eq!(u16::from_le_bytes(memory), ax as u16); 748 } 749 Register::EAX => { 750 let mut memory: [u8; 4] = [0; 4]; 751 vmm.read_memory(mem_addr, &mut memory).unwrap(); 752 assert_eq!(u32::from_le_bytes(memory), ax as u32); 753 } 754 Register::RAX => { 755 let mut memory: [u8; 8] = [0; 8]; 756 vmm.read_memory(mem_addr, &mut memory).unwrap(); 757 assert_eq!(u64::from_le_bytes(memory), ax); 758 } 759 _ => panic!(), 760 } 761 } 762 763 for (register, instruction_prefix) in test_inputs { 764 helper(register, instruction_prefix) 765 } 766 } 767 } 768