1 // 2 // Copyright © 2020 Microsoft 3 // 4 // SPDX-License-Identifier: Apache-2.0 5 // 6 7 #![allow(non_camel_case_types, clippy::upper_case_acronyms)] 8 9 // 10 // CMP-Compare Two Operands 11 // 12 13 use crate::arch::x86::emulator::instructions::*; 14 use crate::arch::x86::regs::*; 15 16 // CMP affects OF, SF, ZF, AF, PF and CF 17 const FLAGS_MASK: u64 = CF | PF | AF | ZF | SF | OF; 18 19 // TODO: Switch to inline asm when that's stable. Executing CMP (or any arithmetic instructions) 20 // natively and extracting RFLAGS will be much faster and make the code simpler. 21 fn calc_rflags_cpazso(op0: u64, op1: u64, op_size: usize) -> u64 { 22 let op_bits = op_size * 8; 23 let msb_shift = op_bits - 1; 24 // CMP is the same as SUB. 25 let result = op0.wrapping_sub(op1); 26 27 // Carry-out vector for SUB. 28 let cout = (!op0 & op1) | ((!op0 ^ op1) & result); 29 30 let cf = ((cout >> msb_shift) & 0x1) << CF_SHIFT; 31 32 // PF only needs the least significant byte. XOR its higher 4 bits with its lower 4 bits then 33 // use the value directly. 34 let pf = ((0x9669 >> ((result ^ (result >> 4)) & 0xf)) & 0x1) << PF_SHIFT; 35 36 // AF cares about the lowest 4 bits (nibble). msb_shift is 3 in this case. 37 let af = ((cout >> 3) & 0x1) << AF_SHIFT; 38 39 let zf = u64::from(result & (!0u64 >> (63 - msb_shift)) == 0) << ZF_SHIFT; 40 41 let sf = ((result >> msb_shift) & 0x1) << SF_SHIFT; 42 43 // Overflow happens when two operands have the same sign but the result has a different sign. 44 let of = ((((op0 ^ op1) & (op0 ^ result)) >> msb_shift) & 0x1) << OF_SHIFT; 45 46 cf | pf | af | zf | sf | of 47 } 48 49 macro_rules! cmp_rm_r { 50 ($bound:ty) => { 51 fn emulate( 52 &self, 53 insn: &Instruction, 54 state: &mut T, 55 platform: &mut dyn PlatformEmulator<CpuState = T>, 56 ) -> Result<(), EmulationError<Exception>> { 57 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 58 .map_err(EmulationError::PlatformEmulationError)?; 59 let op1_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 60 .map_err(EmulationError::PlatformEmulationError)?; 61 62 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 63 64 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 65 66 Ok(()) 67 } 68 }; 69 } 70 71 macro_rules! cmp_r_rm { 72 ($bound:ty) => { 73 fn emulate( 74 &self, 75 insn: &Instruction, 76 state: &mut T, 77 platform: &mut dyn PlatformEmulator<CpuState = T>, 78 ) -> Result<(), EmulationError<Exception>> { 79 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 80 .map_err(EmulationError::PlatformEmulationError)?; 81 let op1_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 82 .map_err(EmulationError::PlatformEmulationError)?; 83 84 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 85 86 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 87 88 Ok(()) 89 } 90 }; 91 } 92 93 macro_rules! cmp_rm_imm { 94 ($imm:ty, $bound:ty) => { 95 fn emulate( 96 &self, 97 insn: &Instruction, 98 state: &mut T, 99 platform: &mut dyn PlatformEmulator<CpuState = T>, 100 ) -> Result<(), EmulationError<Exception>> { 101 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 102 .map_err(EmulationError::PlatformEmulationError)?; 103 let op1_value = get_op(&insn, 1, std::mem::size_of::<$imm>(), state, platform) 104 .map_err(EmulationError::PlatformEmulationError)?; 105 106 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 107 108 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 109 110 Ok(()) 111 } 112 }; 113 } 114 115 pub struct Cmp_rm64_r64; 116 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_r64 { 117 cmp_rm_r!(u64); 118 } 119 120 pub struct Cmp_rm32_r32; 121 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_r32 { 122 cmp_rm_r!(u32); 123 } 124 125 pub struct Cmp_rm16_r16; 126 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_r16 { 127 cmp_rm_r!(u16); 128 } 129 130 pub struct Cmp_rm8_r8; 131 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm8_r8 { 132 cmp_rm_r!(u8); 133 } 134 135 pub struct Cmp_r64_rm64; 136 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r64_rm64 { 137 cmp_r_rm!(u64); 138 } 139 140 pub struct Cmp_r32_rm32; 141 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r32_rm32 { 142 cmp_r_rm!(u32); 143 } 144 145 pub struct Cmp_r16_rm16; 146 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r16_rm16 { 147 cmp_r_rm!(u16); 148 } 149 150 pub struct Cmp_r8_rm8; 151 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r8_rm8 { 152 cmp_r_rm!(u8); 153 } 154 155 pub struct Cmp_AL_imm8; 156 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_AL_imm8 { 157 cmp_rm_imm!(u8, u8); 158 } 159 160 pub struct Cmp_AX_imm16; 161 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_AX_imm16 { 162 cmp_rm_imm!(u16, u16); 163 } 164 165 pub struct Cmp_EAX_imm32; 166 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_EAX_imm32 { 167 cmp_rm_imm!(u32, u32); 168 } 169 170 pub struct Cmp_RAX_imm32; 171 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_RAX_imm32 { 172 cmp_rm_imm!(u32, u64); 173 } 174 175 pub struct Cmp_rm8_imm8; 176 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm8_imm8 { 177 cmp_rm_imm!(u8, u8); 178 } 179 180 pub struct Cmp_rm16_imm16; 181 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_imm16 { 182 cmp_rm_imm!(u16, u16); 183 } 184 185 pub struct Cmp_rm32_imm32; 186 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_imm32 { 187 cmp_rm_imm!(u32, u32); 188 } 189 190 pub struct Cmp_rm64_imm32; 191 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_imm32 { 192 cmp_rm_imm!(u32, u64); 193 } 194 195 pub struct Cmp_rm16_imm8; 196 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_imm8 { 197 cmp_rm_imm!(u8, u16); 198 } 199 200 pub struct Cmp_rm32_imm8; 201 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_imm8 { 202 cmp_rm_imm!(u8, u32); 203 } 204 205 pub struct Cmp_rm64_imm8; 206 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_imm8 { 207 cmp_rm_imm!(u8, u64); 208 } 209 210 #[cfg(test)] 211 mod tests { 212 #![allow(unused_mut)] 213 214 use super::*; 215 use crate::arch::x86::emulator::mock_vmm::*; 216 217 #[test] 218 // cmp ah,al 219 fn test_cmp_rm8_r8_1() { 220 let rax: u64 = 0x0; 221 let ip: u64 = 0x1000; 222 let cpu_id = 0; 223 let insn = [0x38, 0xc4]; // cmp ah,al 224 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 225 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 226 227 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 228 assert_eq!(0b1000100, rflags); 229 } 230 231 #[test] 232 // cmp eax,100 233 fn test_cmp_rm32_imm8_1() { 234 let rax: u64 = 0xabcdef; 235 let ip: u64 = 0x1000; 236 let cpu_id = 0; 237 let insn = [0x83, 0xf8, 0x64]; // cmp eax,100 238 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 239 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 240 241 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 242 assert_eq!(0b100, rflags); 243 } 244 245 #[test] 246 // cmp eax,-1 247 fn test_cmp_rm32_imm8_2() { 248 let rax: u64 = 0xabcdef; 249 let ip: u64 = 0x1000; 250 let cpu_id = 0; 251 let insn = [0x83, 0xf8, 0xff]; // cmp eax,-1 252 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 253 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 254 255 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 256 assert_eq!(0b101, rflags); 257 } 258 259 #[test] 260 // cmp rax,rbx 261 fn test_cmp_rm64_r64() { 262 let rax: u64 = 0xabcdef; 263 let rbx: u64 = 0x1234; 264 let ip: u64 = 0x1000; 265 let cpu_id = 0; 266 let insn = [0x48, 0x39, 0xd8, 0x00, 0xc3]; // cmp rax,rbx + two bytes garbage 267 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax), (Register::RBX, rbx)], None); 268 assert!(vmm.emulate_first_insn(cpu_id, &insn).is_ok()); 269 270 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 271 assert_eq!(0b100, rflags); 272 } 273 274 #[test] 275 fn test_cmp_64() { 276 let data = [ 277 (0xabcdef, 0x1234, 0b100), 278 (0x0, 0x101, 0b1001_0101), 279 (0x0, 0x8000_0000_0000_0000, 0b1000_1000_0101), 280 (0x1234abcd, 0x1234abcd, 0b100_0100), 281 (0x1234abcd, 0xdeadbeef, 0b1001_0101), 282 (0xffff_ffff_ffff_ffff, 0xdeadbeef, 0b1000_0000), 283 (0xffff_ffff_ffff_ffff, 0x0, 0b1000_0100), 284 ]; 285 286 for d in data.iter() { 287 let rax = d.0; 288 let rbx = d.1; 289 let insn = [0x48, 0x39, 0xd8]; // cmp rax,rbx 290 let mut vmm = MockVmm::new( 291 0x1000, 292 vec![(Register::RAX, rax), (Register::RBX, rbx)], 293 None, 294 ); 295 assert!(vmm.emulate_first_insn(0, &insn).is_ok()); 296 297 let rflags: u64 = vmm.cpu_state(0).unwrap().flags() & FLAGS_MASK; 298 assert_eq!(d.2, rflags); 299 } 300 } 301 302 #[test] 303 fn test_cmp_32() { 304 let data = [ 305 (0xabcdef, 0x1234, 0b100), 306 (0x0, 0x101, 0b1001_0101), 307 (0x0, 0x8000_0000_0000_0000, 0b100_0100), // Same as cmp 0,0 due to truncation 308 (0x1234abcd, 0x1234abcd, 0b100_0100), 309 (0x1234abcd, 0xdeadbeef, 0b1_0101), 310 (0xffff_ffff_ffff_ffff, 0xdeadbeef, 0b0), // Same as cmp 0xffffffff,0xdeadbeef 311 (0xffff_ffff, 0x0, 0b1000_0100), 312 ]; 313 314 for d in data.iter() { 315 let rax = d.0; 316 let rbx = d.1; 317 let insn = [0x39, 0xd8]; // cmp eax,ebx 318 let mut vmm = MockVmm::new( 319 0x1000, 320 vec![(Register::RAX, rax), (Register::RBX, rbx)], 321 None, 322 ); 323 assert!(vmm.emulate_first_insn(0, &insn).is_ok()); 324 325 let rflags: u64 = vmm.cpu_state(0).unwrap().flags() & FLAGS_MASK; 326 assert_eq!(d.2, rflags); 327 } 328 } 329 } 330