1 // 2 // Copyright © 2020 Intel Corporation 3 // 4 // SPDX-License-Identifier: Apache-2.0 5 // 6 7 #![allow(non_camel_case_types)] 8 9 // 10 // MOV-Move 11 // SDM Volume 1, Chapter 4.3 12 // Copies the second operand (source operand) to the first operand (destination operand). 13 // 14 15 use crate::arch::emulator::{EmulationError, PlatformEmulator}; 16 use crate::arch::x86::emulator::instructions::*; 17 use crate::arch::x86::Exception; 18 19 macro_rules! mov_rm_r { 20 ($bound:ty) => { 21 fn emulate( 22 &self, 23 insn: &Instruction, 24 state: &mut T, 25 platform: &mut dyn PlatformEmulator<CpuState = T>, 26 ) -> Result<(), EmulationError<Exception>> { 27 let src_reg_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 28 .map_err(EmulationError::PlatformEmulationError)?; 29 30 set_op( 31 &insn, 32 0, 33 std::mem::size_of::<$bound>(), 34 state, 35 platform, 36 src_reg_value, 37 ) 38 .map_err(EmulationError::PlatformEmulationError)?; 39 40 Ok(()) 41 } 42 }; 43 } 44 45 macro_rules! mov_rm_imm { 46 ($bound:ty) => { 47 fn emulate( 48 &self, 49 insn: &Instruction, 50 state: &mut T, 51 platform: &mut dyn PlatformEmulator<CpuState = T>, 52 ) -> Result<(), EmulationError<Exception>> { 53 let imm = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 54 .map_err(EmulationError::PlatformEmulationError)?; 55 56 set_op( 57 &insn, 58 0, 59 std::mem::size_of::<$bound>(), 60 state, 61 platform, 62 imm, 63 ) 64 .map_err(EmulationError::PlatformEmulationError)?; 65 66 Ok(()) 67 } 68 }; 69 } 70 71 macro_rules! movzx { 72 ($dest_op_size:ty, $src_op_size:ty) => { 73 fn emulate( 74 &self, 75 insn: &Instruction, 76 state: &mut T, 77 platform: &mut dyn PlatformEmulator<CpuState = T>, 78 ) -> Result<(), EmulationError<Exception>> { 79 let src_value = get_op( 80 &insn, 81 1, 82 std::mem::size_of::<$src_op_size>(), 83 state, 84 platform, 85 ) 86 .map_err(EmulationError::PlatformEmulationError)?; 87 88 set_op( 89 &insn, 90 0, 91 std::mem::size_of::<$dest_op_size>(), 92 state, 93 platform, 94 src_value, 95 ) 96 .map_err(EmulationError::PlatformEmulationError)?; 97 98 Ok(()) 99 } 100 }; 101 } 102 103 // MOV r/rm is a special case of MOVZX, where both operands have the same size. 104 macro_rules! mov_r_rm { 105 ($op_size:ty) => { 106 movzx!($op_size, $op_size); 107 }; 108 } 109 110 macro_rules! mov_r_imm { 111 ($bound:ty) => { 112 fn emulate( 113 &self, 114 insn: &Instruction, 115 state: &mut T, 116 platform: &mut dyn PlatformEmulator<CpuState = T>, 117 ) -> Result<(), EmulationError<Exception>> { 118 let imm = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 119 .map_err(EmulationError::PlatformEmulationError)?; 120 121 set_op( 122 &insn, 123 0, 124 std::mem::size_of::<$bound>(), 125 state, 126 platform, 127 imm, 128 ) 129 .map_err(EmulationError::PlatformEmulationError)?; 130 131 Ok(()) 132 } 133 }; 134 } 135 136 pub struct Mov_r8_rm8; 137 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r8_rm8 { 138 mov_r_rm!(u8); 139 } 140 141 pub struct Mov_r8_imm8; 142 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r8_imm8 { 143 mov_r_imm!(u8); 144 } 145 146 pub struct Mov_r16_rm16; 147 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r16_rm16 { 148 mov_r_rm!(u16); 149 } 150 151 pub struct Mov_r16_imm16; 152 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r16_imm16 { 153 mov_r_imm!(u16); 154 } 155 156 pub struct Mov_r32_rm32; 157 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r32_rm32 { 158 mov_r_rm!(u32); 159 } 160 161 pub struct Mov_r32_imm32; 162 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r32_imm32 { 163 mov_r_imm!(u32); 164 } 165 166 pub struct Mov_r64_rm64; 167 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r64_rm64 { 168 mov_r_rm!(u64); 169 } 170 171 pub struct Mov_r64_imm64; 172 impl<T: CpuStateManager> InstructionHandler<T> for Mov_r64_imm64 { 173 mov_r_imm!(u64); 174 } 175 176 pub struct Mov_rm8_imm8; 177 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm8_imm8 { 178 mov_rm_imm!(u8); 179 } 180 181 pub struct Mov_rm8_r8; 182 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm8_r8 { 183 mov_rm_r!(u8); 184 } 185 186 pub struct Mov_rm16_imm16; 187 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm16_imm16 { 188 mov_rm_imm!(u16); 189 } 190 191 pub struct Mov_rm16_r16; 192 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm16_r16 { 193 mov_rm_r!(u16); 194 } 195 196 pub struct Mov_rm32_imm32; 197 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm32_imm32 { 198 mov_rm_imm!(u32); 199 } 200 201 pub struct Mov_rm32_r32; 202 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm32_r32 { 203 mov_rm_r!(u32); 204 } 205 206 pub struct Mov_rm64_imm32; 207 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm64_imm32 { 208 mov_rm_imm!(u32); 209 } 210 211 pub struct Mov_rm64_r64; 212 impl<T: CpuStateManager> InstructionHandler<T> for Mov_rm64_r64 { 213 mov_rm_r!(u64); 214 } 215 216 // MOVZX 217 pub struct Movzx_r16_rm8; 218 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r16_rm8 { 219 movzx!(u16, u8); 220 } 221 222 pub struct Movzx_r32_rm8; 223 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r32_rm8 { 224 movzx!(u32, u8); 225 } 226 227 pub struct Movzx_r64_rm8; 228 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r64_rm8 { 229 movzx!(u64, u8); 230 } 231 232 pub struct Movzx_r32_rm16; 233 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r32_rm16 { 234 movzx!(u32, u16); 235 } 236 237 pub struct Movzx_r64_rm16; 238 impl<T: CpuStateManager> InstructionHandler<T> for Movzx_r64_rm16 { 239 movzx!(u64, u16); 240 } 241 242 #[cfg(test)] 243 mod tests { 244 #![allow(unused_mut)] 245 use super::*; 246 use crate::arch::x86::emulator::mock_vmm::*; 247 248 #[test] 249 // mov rax,rbx 250 fn test_mov_r64_r64() { 251 let rbx: u64 = 0x8899aabbccddeeff; 252 let ip: u64 = 0x1000; 253 let cpu_id = 0; 254 let insn = [0x48, 0x89, 0xd8]; 255 let mut vmm = MockVmm::new(ip, vec![(Register::RBX, rbx)], None); 256 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 257 258 let rax: u64 = vmm 259 .cpu_state(cpu_id) 260 .unwrap() 261 .read_reg(Register::RAX) 262 .unwrap(); 263 assert_eq!(rax, rbx); 264 } 265 266 #[test] 267 // mov rax,0x1122334411223344 268 fn test_mov_r64_imm64() { 269 let imm64: u64 = 0x1122334411223344; 270 let ip: u64 = 0x1000; 271 let cpu_id = 0; 272 let insn = [0x48, 0xb8, 0x44, 0x33, 0x22, 0x11, 0x44, 0x33, 0x22, 0x11]; 273 let mut vmm = MockVmm::new(ip, vec![], None); 274 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 275 276 let rax: u64 = vmm 277 .cpu_state(cpu_id) 278 .unwrap() 279 .read_reg(Register::RAX) 280 .unwrap(); 281 assert_eq!(rax, imm64); 282 } 283 284 #[test] 285 // mov rax, [rax+rax] 286 fn test_mov_r64_m64() { 287 let target_rax: u64 = 0x1234567812345678; 288 let mut rax: u64 = 0x100; 289 let ip: u64 = 0x1000; 290 let cpu_id = 0; 291 let memory: [u8; 8] = target_rax.to_le_bytes(); 292 let insn = [0x48, 0x8b, 0x04, 0x00]; 293 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], Some((rax + rax, &memory))); 294 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 295 296 rax = vmm 297 .cpu_state(cpu_id) 298 .unwrap() 299 .read_reg(Register::RAX) 300 .unwrap(); 301 assert_eq!(rax, target_rax); 302 } 303 304 #[test] 305 // mov al,0x11 306 fn test_mov_r8_imm8() { 307 let imm8: u8 = 0x11; 308 let ip: u64 = 0x1000; 309 let cpu_id = 0; 310 let insn = [0xb0, 0x11]; 311 let mut vmm = MockVmm::new(ip, vec![], None); 312 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 313 314 let al = vmm 315 .cpu_state(cpu_id) 316 .unwrap() 317 .read_reg(Register::AL) 318 .unwrap(); 319 assert_eq!(al as u8, imm8); 320 } 321 322 #[test] 323 // mov eax,0x11 324 fn test_mov_r32_imm8() { 325 let imm8: u8 = 0x11; 326 let ip: u64 = 0x1000; 327 let cpu_id = 0; 328 let insn = [0xb8, 0x11, 0x00, 0x00, 0x00]; 329 let mut vmm = MockVmm::new(ip, vec![], None); 330 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 331 332 let eax = vmm 333 .cpu_state(cpu_id) 334 .unwrap() 335 .read_reg(Register::EAX) 336 .unwrap(); 337 assert_eq!(eax as u8, imm8); 338 } 339 340 #[test] 341 // mov rax,0x11223344 342 fn test_mov_r64_imm32() { 343 let imm32: u32 = 0x11223344; 344 let ip: u64 = 0x1000; 345 let cpu_id = 0; 346 let insn = [0x48, 0xc7, 0xc0, 0x44, 0x33, 0x22, 0x11]; 347 let mut vmm = MockVmm::new(ip, vec![], None); 348 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 349 350 let rax: u64 = vmm 351 .cpu_state(cpu_id) 352 .unwrap() 353 .read_reg(Register::RAX) 354 .unwrap(); 355 assert_eq!(rax, imm32 as u64); 356 } 357 358 #[test] 359 // mov byte ptr [rax],dh 360 fn test_mov_m8_r8() { 361 let rax: u64 = 0x100; 362 let dh: u8 = 0x99; 363 let ip: u64 = 0x1000; 364 let cpu_id = 0; 365 let insn = [0x88, 0x30]; 366 let mut vmm = MockVmm::new( 367 ip, 368 vec![(Register::RAX, rax), (Register::DH, dh.into())], 369 None, 370 ); 371 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 372 373 let mut memory: [u8; 1] = [0; 1]; 374 vmm.read_memory(rax, &mut memory).unwrap(); 375 376 assert_eq!(u8::from_le_bytes(memory), dh); 377 } 378 379 #[test] 380 // mov dword ptr [rax],esi 381 fn test_mov_m32_r32() { 382 let rax: u64 = 0x100; 383 let esi: u32 = 0x8899; 384 let ip: u64 = 0x1000; 385 let cpu_id = 0; 386 let insn = [0x89, 0x30]; 387 let mut vmm = MockVmm::new( 388 ip, 389 vec![(Register::RAX, rax), (Register::ESI, esi.into())], 390 None, 391 ); 392 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 393 394 let mut memory: [u8; 4] = [0; 4]; 395 vmm.read_memory(rax, &mut memory).unwrap(); 396 397 assert_eq!(u32::from_le_bytes(memory), esi); 398 } 399 400 #[test] 401 // mov dword ptr [rax+0x00000001],edi 402 fn test_mov_m32imm32_r32() { 403 let rax: u64 = 0x100; 404 let displacement: u64 = 0x1; 405 let edi: u32 = 0x8899; 406 let ip: u64 = 0x1000; 407 let cpu_id = 0; 408 let insn = [0x89, 0x3c, 0x05, 0x01, 0x00, 0x00, 0x00]; 409 let mut vmm = MockVmm::new( 410 ip, 411 vec![(Register::RAX, rax), (Register::EDI, edi.into())], 412 None, 413 ); 414 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 415 416 let mut memory: [u8; 4] = [0; 4]; 417 vmm.read_memory(rax + displacement, &mut memory).unwrap(); 418 419 assert_eq!(u32::from_le_bytes(memory), edi); 420 } 421 422 #[test] 423 // mov eax,dword ptr [rax+10h] 424 fn test_mov_r32_m32imm32() { 425 let rax: u64 = 0x100; 426 let displacement: u64 = 0x10; 427 let eax: u32 = 0xaabbccdd; 428 let memory: [u8; 4] = eax.to_le_bytes(); 429 let ip: u64 = 0x1000; 430 let cpu_id = 0; 431 let insn = [0x8b, 0x40, 0x10]; 432 let mut vmm = MockVmm::new( 433 ip, 434 vec![(Register::RAX, rax)], 435 Some((rax + displacement, &memory)), 436 ); 437 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 438 439 let new_eax = vmm 440 .cpu_state(cpu_id) 441 .unwrap() 442 .read_reg(Register::EAX) 443 .unwrap(); 444 assert_eq!(new_eax, eax as u64); 445 } 446 447 #[test] 448 // mov al,byte ptr [rax+10h] 449 fn test_mov_r8_m32imm32() { 450 let rax: u64 = 0x100; 451 let displacement: u64 = 0x10; 452 let al: u8 = 0xaa; 453 let ip: u64 = 0x1000; 454 let cpu_id = 0; 455 let insn = [0x8a, 0x40, 0x10]; 456 let memory: [u8; 1] = al.to_le_bytes(); 457 let mut vmm = MockVmm::new( 458 ip, 459 vec![(Register::RAX, rax)], 460 Some((rax + displacement, &memory)), 461 ); 462 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 463 464 let new_al = vmm 465 .cpu_state(cpu_id) 466 .unwrap() 467 .read_reg(Register::AL) 468 .unwrap(); 469 assert_eq!(new_al, al as u64); 470 } 471 472 #[test] 473 // mov rax, 0x100 474 // mov rbx, qword ptr [rax+10h] 475 fn test_mov_r64_imm64_and_r64_m64() { 476 let target_rax: u64 = 0x1234567812345678; 477 let rax: u64 = 0x100; 478 let displacement: u64 = 0x10; 479 let ip: u64 = 0x1000; 480 let cpu_id = 0; 481 let memory: [u8; 8] = target_rax.to_le_bytes(); 482 let insn = [ 483 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 484 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 485 ]; 486 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 487 assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok()); 488 489 let rbx: u64 = vmm 490 .cpu_state(cpu_id) 491 .unwrap() 492 .read_reg(Register::RBX) 493 .unwrap(); 494 assert_eq!(rbx, target_rax); 495 } 496 497 #[test] 498 // mov rax, 0x100 499 // mov rbx, qword ptr [rax+10h] 500 fn test_mov_r64_imm64_and_r64_m64_first_insn() { 501 let target_rax: u64 = 0x1234567812345678; 502 let rax: u64 = 0x100; 503 let displacement: u64 = 0x10; 504 let ip: u64 = 0x1000; 505 let cpu_id = 0; 506 let memory: [u8; 8] = target_rax.to_le_bytes(); 507 let insn = [ 508 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 509 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 510 ]; 511 512 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 513 // Only run the first instruction. 514 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 515 516 assert_eq!(ip + 7, vmm.cpu_state(cpu_id).unwrap().ip()); 517 518 let new_rax: u64 = vmm 519 .cpu_state(cpu_id) 520 .unwrap() 521 .read_reg(Register::RAX) 522 .unwrap(); 523 assert_eq!(rax, new_rax); 524 } 525 526 #[test] 527 // mov rax, 0x100 528 // mov rbx, qword ptr [rax+10h] 529 // mov rax, 0x200 530 fn test_mov_r64_imm64_and_r64_m64_two_insns() { 531 let target_rax: u64 = 0x1234567812345678; 532 let rax: u64 = 0x100; 533 let displacement: u64 = 0x10; 534 let ip: u64 = 0x1000; 535 let cpu_id = 0; 536 let memory: [u8; 8] = target_rax.to_le_bytes(); 537 let insn = [ 538 0x48, 0xc7, 0xc0, 0x00, 0x01, 0x00, 0x00, // mov rax, 0x100 539 0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h] 540 0x48, 0xc7, 0xc0, 0x00, 0x02, 0x00, 0x00, // mov rax, 0x200 541 ]; 542 543 let mut vmm = MockVmm::new(ip, vec![], Some((rax + displacement, &memory))); 544 // Run the 2 first instructions. 545 assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok()); 546 547 assert_eq!(ip + 7 + 4, vmm.cpu_state(cpu_id).unwrap().ip()); 548 549 let rbx: u64 = vmm 550 .cpu_state(cpu_id) 551 .unwrap() 552 .read_reg(Register::RBX) 553 .unwrap(); 554 assert_eq!(rbx, target_rax); 555 556 // Check that rax is still at 0x100 557 let new_rax: u64 = vmm 558 .cpu_state(cpu_id) 559 .unwrap() 560 .read_reg(Register::RAX) 561 .unwrap(); 562 assert_eq!(rax, new_rax); 563 } 564 565 #[test] 566 // movzx eax, bl 567 fn test_movzx_r32_r8l() { 568 let bx: u16 = 0x8899; 569 let ip: u64 = 0x1000; 570 let cpu_id = 0; 571 let insn = [0x0f, 0xb6, 0xc3]; 572 let mut vmm = MockVmm::new(ip, vec![(Register::BX, bx as u64)], None); 573 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 574 575 let eax: u64 = vmm 576 .cpu_state(cpu_id) 577 .unwrap() 578 .read_reg(Register::EAX) 579 .unwrap(); 580 assert_eq!(eax, (bx & 0xff) as u64); 581 } 582 583 #[test] 584 // movzx eax, bh 585 fn test_movzx_r32_r8h() { 586 let bx: u16 = 0x8899; 587 let ip: u64 = 0x1000; 588 let cpu_id = 0; 589 let insn = [0x0f, 0xb6, 0xc7]; 590 let mut vmm = MockVmm::new(ip, vec![(Register::BX, bx as u64)], None); 591 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 592 593 let eax: u64 = vmm 594 .cpu_state(cpu_id) 595 .unwrap() 596 .read_reg(Register::EAX) 597 .unwrap(); 598 assert_eq!(eax, (bx >> 8) as u64); 599 } 600 601 #[test] 602 // movzx eax, byte ptr [rbx] 603 fn test_movzx_r32_m8() { 604 let rbx: u64 = 0x100; 605 let value: u8 = 0xaa; 606 let ip: u64 = 0x1000; 607 let cpu_id = 0; 608 let insn = [0x0f, 0xb7, 0x03]; 609 let memory: [u8; 1] = value.to_le_bytes(); 610 let mut vmm = MockVmm::new(ip, vec![(Register::RBX, rbx)], Some((rbx, &memory))); 611 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 612 613 let eax: u64 = vmm 614 .cpu_state(cpu_id) 615 .unwrap() 616 .read_reg(Register::EAX) 617 .unwrap(); 618 assert_eq!(eax, value as u64); 619 } 620 } 621