1 // 2 // Copyright © 2020 Microsoft 3 // 4 // SPDX-License-Identifier: Apache-2.0 5 // 6 7 #![allow(non_camel_case_types, clippy::upper_case_acronyms)] 8 9 // 10 // CMP-Compare Two Operands 11 // 12 13 use crate::arch::x86::emulator::instructions::*; 14 use crate::arch::x86::regs::*; 15 16 // CMP affects OF, SF, ZF, AF, PF and CF 17 const FLAGS_MASK: u64 = CF | PF | AF | ZF | SF | OF; 18 19 // TODO: Switch to inline asm when that's stable. Executing CMP (or any arithmetic instructions) 20 // natively and extracting RFLAGS will be much faster and make the code simpler. 21 fn calc_rflags_cpazso(op0: u64, op1: u64, op_size: usize) -> u64 { 22 let op_bits = op_size * 8; 23 let msb_shift = op_bits - 1; 24 // CMP is the same as SUB. 25 let result = op0.wrapping_sub(op1); 26 27 // Carry-out vector for SUB. 28 let cout = (!op0 & op1) | ((!op0 ^ op1) & result); 29 30 let cf = ((cout >> msb_shift) & 0x1) << CF_SHIFT; 31 32 // PF only needs the least significant byte. XOR its higher 4 bits with its lower 4 bits then 33 // use the value directly. 34 let pf = ((0x9669 >> ((result ^ (result >> 4)) & 0xf)) & 0x1) << PF_SHIFT; 35 36 // AF cares about the lowest 4 bits (nibble). msb_shift is 3 in this case. 37 let af = ((cout >> 3) & 0x1) << AF_SHIFT; 38 39 let zf = u64::from(result & (!0u64 >> (63 - msb_shift)) == 0) << ZF_SHIFT; 40 41 let sf = ((result >> msb_shift) & 0x1) << SF_SHIFT; 42 43 // Overflow happens when two operands have the same sign but the result has a different sign. 44 let of = ((((op0 ^ op1) & (op0 ^ result)) >> msb_shift) & 0x1) << OF_SHIFT; 45 46 cf | pf | af | zf | sf | of 47 } 48 49 macro_rules! cmp_rm_r { 50 ($bound:ty) => { 51 fn emulate( 52 &self, 53 insn: &Instruction, 54 state: &mut T, 55 platform: &mut dyn PlatformEmulator<CpuState = T>, 56 ) -> Result<(), EmulationError<Exception>> { 57 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 58 .map_err(EmulationError::PlatformEmulationError)?; 59 let op1_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 60 .map_err(EmulationError::PlatformEmulationError)?; 61 62 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 63 64 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 65 66 Ok(()) 67 } 68 }; 69 } 70 71 macro_rules! cmp_r_rm { 72 ($bound:ty) => { 73 fn emulate( 74 &self, 75 insn: &Instruction, 76 state: &mut T, 77 platform: &mut dyn PlatformEmulator<CpuState = T>, 78 ) -> Result<(), EmulationError<Exception>> { 79 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 80 .map_err(EmulationError::PlatformEmulationError)?; 81 let op1_value = get_op(&insn, 1, std::mem::size_of::<$bound>(), state, platform) 82 .map_err(EmulationError::PlatformEmulationError)?; 83 84 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 85 86 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 87 88 Ok(()) 89 } 90 }; 91 } 92 93 macro_rules! cmp_rm_imm { 94 ($imm:ty, $bound:ty) => { 95 fn emulate( 96 &self, 97 insn: &Instruction, 98 state: &mut T, 99 platform: &mut dyn PlatformEmulator<CpuState = T>, 100 ) -> Result<(), EmulationError<Exception>> { 101 let op0_value = get_op(&insn, 0, std::mem::size_of::<$bound>(), state, platform) 102 .map_err(EmulationError::PlatformEmulationError)?; 103 let op1_value = get_op(&insn, 1, std::mem::size_of::<$imm>(), state, platform) 104 .map_err(EmulationError::PlatformEmulationError)?; 105 106 let cpazso = calc_rflags_cpazso(op0_value, op1_value, std::mem::size_of::<$bound>()); 107 108 state.set_flags((state.flags() & !FLAGS_MASK) | cpazso); 109 110 Ok(()) 111 } 112 }; 113 } 114 115 pub struct Cmp_rm64_r64; 116 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_r64 { 117 cmp_rm_r!(u64); 118 } 119 120 pub struct Cmp_rm32_r32; 121 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_r32 { 122 cmp_rm_r!(u32); 123 } 124 125 pub struct Cmp_rm16_r16; 126 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_r16 { 127 cmp_rm_r!(u16); 128 } 129 130 pub struct Cmp_rm8_r8; 131 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm8_r8 { 132 cmp_rm_r!(u8); 133 } 134 135 pub struct Cmp_r64_rm64; 136 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r64_rm64 { 137 cmp_r_rm!(u64); 138 } 139 140 pub struct Cmp_r32_rm32; 141 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r32_rm32 { 142 cmp_r_rm!(u32); 143 } 144 145 pub struct Cmp_r16_rm16; 146 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r16_rm16 { 147 cmp_r_rm!(u16); 148 } 149 150 pub struct Cmp_r8_rm8; 151 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_r8_rm8 { 152 cmp_r_rm!(u8); 153 } 154 155 pub struct Cmp_AL_imm8; 156 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_AL_imm8 { 157 cmp_rm_imm!(u8, u8); 158 } 159 160 pub struct Cmp_AX_imm16; 161 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_AX_imm16 { 162 cmp_rm_imm!(u16, u16); 163 } 164 165 pub struct Cmp_EAX_imm32; 166 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_EAX_imm32 { 167 cmp_rm_imm!(u32, u32); 168 } 169 170 pub struct Cmp_RAX_imm32; 171 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_RAX_imm32 { 172 cmp_rm_imm!(u32, u64); 173 } 174 175 pub struct Cmp_rm8_imm8; 176 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm8_imm8 { 177 cmp_rm_imm!(u8, u8); 178 } 179 180 pub struct Cmp_rm16_imm16; 181 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_imm16 { 182 cmp_rm_imm!(u16, u16); 183 } 184 185 pub struct Cmp_rm32_imm32; 186 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_imm32 { 187 cmp_rm_imm!(u32, u32); 188 } 189 190 pub struct Cmp_rm64_imm32; 191 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_imm32 { 192 cmp_rm_imm!(u32, u64); 193 } 194 195 pub struct Cmp_rm16_imm8; 196 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm16_imm8 { 197 cmp_rm_imm!(u8, u16); 198 } 199 200 pub struct Cmp_rm32_imm8; 201 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm32_imm8 { 202 cmp_rm_imm!(u8, u32); 203 } 204 205 pub struct Cmp_rm64_imm8; 206 impl<T: CpuStateManager> InstructionHandler<T> for Cmp_rm64_imm8 { 207 cmp_rm_imm!(u8, u64); 208 } 209 210 #[cfg(test)] 211 mod tests { 212 use super::*; 213 use crate::arch::x86::emulator::mock_vmm::*; 214 215 #[test] 216 // cmp ah,al 217 fn test_cmp_rm8_r8_1() { 218 let rax: u64 = 0x0; 219 let ip: u64 = 0x1000; 220 let cpu_id = 0; 221 let insn = [0x38, 0xc4]; // cmp ah,al 222 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 223 vmm.emulate_first_insn(cpu_id, &insn).unwrap(); 224 225 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 226 assert_eq!(0b1000100, rflags); 227 } 228 229 #[test] 230 // cmp eax,100 231 fn test_cmp_rm32_imm8_1() { 232 let rax: u64 = 0xabcdef; 233 let ip: u64 = 0x1000; 234 let cpu_id = 0; 235 let insn = [0x83, 0xf8, 0x64]; // cmp eax,100 236 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 237 vmm.emulate_first_insn(cpu_id, &insn).unwrap(); 238 239 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 240 assert_eq!(0b100, rflags); 241 } 242 243 #[test] 244 // cmp eax,-1 245 fn test_cmp_rm32_imm8_2() { 246 let rax: u64 = 0xabcdef; 247 let ip: u64 = 0x1000; 248 let cpu_id = 0; 249 let insn = [0x83, 0xf8, 0xff]; // cmp eax,-1 250 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax)], None); 251 vmm.emulate_first_insn(cpu_id, &insn).unwrap(); 252 253 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 254 assert_eq!(0b101, rflags); 255 } 256 257 #[test] 258 // cmp rax,rbx 259 fn test_cmp_rm64_r64() { 260 let rax: u64 = 0xabcdef; 261 let rbx: u64 = 0x1234; 262 let ip: u64 = 0x1000; 263 let cpu_id = 0; 264 let insn = [0x48, 0x39, 0xd8, 0x00, 0xc3]; // cmp rax,rbx + two bytes garbage 265 let mut vmm = MockVmm::new(ip, vec![(Register::RAX, rax), (Register::RBX, rbx)], None); 266 vmm.emulate_first_insn(cpu_id, &insn).unwrap(); 267 268 let rflags: u64 = vmm.cpu_state(cpu_id).unwrap().flags() & FLAGS_MASK; 269 assert_eq!(0b100, rflags); 270 } 271 272 #[test] 273 fn test_cmp_64() { 274 let data = [ 275 (0xabcdef, 0x1234, 0b100), 276 (0x0, 0x101, 0b1001_0101), 277 (0x0, 0x8000_0000_0000_0000, 0b1000_1000_0101), 278 (0x1234abcd, 0x1234abcd, 0b100_0100), 279 (0x1234abcd, 0xdeadbeef, 0b1001_0101), 280 (0xffff_ffff_ffff_ffff, 0xdeadbeef, 0b1000_0000), 281 (0xffff_ffff_ffff_ffff, 0x0, 0b1000_0100), 282 ]; 283 284 for d in data.iter() { 285 let rax = d.0; 286 let rbx = d.1; 287 let insn = [0x48, 0x39, 0xd8]; // cmp rax,rbx 288 let mut vmm = MockVmm::new( 289 0x1000, 290 vec![(Register::RAX, rax), (Register::RBX, rbx)], 291 None, 292 ); 293 vmm.emulate_first_insn(0, &insn).unwrap(); 294 295 let rflags: u64 = vmm.cpu_state(0).unwrap().flags() & FLAGS_MASK; 296 assert_eq!(d.2, rflags); 297 } 298 } 299 300 #[test] 301 fn test_cmp_32() { 302 let data = [ 303 (0xabcdef, 0x1234, 0b100), 304 (0x0, 0x101, 0b1001_0101), 305 (0x0, 0x8000_0000_0000_0000, 0b100_0100), // Same as cmp 0,0 due to truncation 306 (0x1234abcd, 0x1234abcd, 0b100_0100), 307 (0x1234abcd, 0xdeadbeef, 0b1_0101), 308 (0xffff_ffff_ffff_ffff, 0xdeadbeef, 0b0), // Same as cmp 0xffffffff,0xdeadbeef 309 (0xffff_ffff, 0x0, 0b1000_0100), 310 ]; 311 312 for d in data.iter() { 313 let rax = d.0; 314 let rbx = d.1; 315 let insn = [0x39, 0xd8]; // cmp eax,ebx 316 let mut vmm = MockVmm::new( 317 0x1000, 318 vec![(Register::RAX, rax), (Register::RBX, rbx)], 319 None, 320 ); 321 vmm.emulate_first_insn(0, &insn).unwrap(); 322 323 let rflags: u64 = vmm.cpu_state(0).unwrap().flags() & FLAGS_MASK; 324 assert_eq!(d.2, rflags); 325 } 326 } 327 } 328