xref: /cloud-hypervisor/hypervisor/src/arch/x86/emulator/mod.rs (revision 2571e59438597f53aa4993cd70d6462fe1364ba7)
1 //
2 // Copyright © 2020 Intel Corporation
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 //
6 
7 use crate::arch::emulator::{EmulationError, EmulationResult, PlatformEmulator, PlatformError};
8 use crate::arch::x86::emulator::instructions::*;
9 use crate::arch::x86::regs::{CR0_PE, EFER_LMA};
10 use crate::arch::x86::{
11     segment_type_expand_down, segment_type_ro, Exception, SegmentRegister, SpecialRegisters,
12     StandardRegisters,
13 };
14 use anyhow::Context;
15 use iced_x86::*;
16 
17 #[macro_use]
18 mod instructions;
19 
20 /// x86 CPU modes
21 #[derive(Debug, PartialEq, Eq)]
22 pub enum CpuMode {
23     /// Real mode
24     Real,
25 
26     /// Virtual 8086 mode
27     Virtual8086,
28 
29     /// 16-bit protected mode
30     Protected16,
31 
32     /// 32-bit protected mode
33     Protected,
34 
35     /// 64-bit mode, a.k.a. long mode
36     Long,
37 }
38 
39 /// CpuStateManager manages an x86 CPU state.
40 ///
41 /// Instruction emulation handlers get a mutable reference to
42 /// a `CpuStateManager` implementation, representing the current state of the
43 /// CPU they have to emulate an instruction stream against. Usually those
44 /// handlers will modify the CPU state by modifying `CpuState` and it is up to
45 /// the handler caller to commit those changes back by invoking a
46 /// `PlatformEmulator` implementation `set_state()` method.
47 ///
48 pub trait CpuStateManager: Clone {
49     /// Reads a CPU register.
50     ///
51     /// # Arguments
52     ///
53     /// * `reg` - A general purpose, control or debug register.
54     fn read_reg(&self, reg: Register) -> Result<u64, PlatformError>;
55 
56     /// Write to a CPU register.
57     ///
58     /// # Arguments
59     ///
60     /// * `reg` - A general purpose, control or debug register.
61     /// * `val` - The value to load.
62     fn write_reg(&mut self, reg: Register, val: u64) -> Result<(), PlatformError>;
63 
64     /// Reads a segment register.
65     ///
66     /// # Arguments
67     ///
68     /// * `reg` - A segment register.
69     fn read_segment(&self, reg: Register) -> Result<SegmentRegister, PlatformError>;
70 
71     /// Write to a segment register.
72     ///
73     /// # Arguments
74     ///
75     /// * `reg` - A segment register.
76     /// * `segment_reg` - The segment register value to load.
77     fn write_segment(
78         &mut self,
79         reg: Register,
80         segment_reg: SegmentRegister,
81     ) -> Result<(), PlatformError>;
82 
83     /// Get the CPU instruction pointer.
84     fn ip(&self) -> u64;
85 
86     /// Set the CPU instruction pointer.
87     ///
88     /// # Arguments
89     ///
90     /// * `ip` - The CPU instruction pointer.
91     fn set_ip(&mut self, ip: u64);
92 
93     /// Get the CPU Extended Feature Enable Register.
94     fn efer(&self) -> u64;
95 
96     /// Set the CPU Extended Feature Enable Register.
97     ///
98     /// # Arguments
99     ///
100     /// * `efer` - The CPU EFER value.
101     fn set_efer(&mut self, efer: u64);
102 
103     /// Get the CPU flags.
104     fn flags(&self) -> u64;
105 
106     /// Set the CPU flags.
107     ///
108     /// # Arguments
109     ///
110     /// * `flags` - The CPU flags
111     fn set_flags(&mut self, flags: u64);
112 
113     /// Get the CPU mode.
114     fn mode(&self) -> Result<CpuMode, PlatformError>;
115 
116     /// Translate a logical (segmented) address into a linear (virtual) one.
117     ///
118     /// # Arguments
119     ///
120     /// * `segment` - Which segment to use for linearization
121     /// * `logical_addr` - The logical address to be translated
122     fn linearize(
123         &self,
124         segment: Register,
125         logical_addr: u64,
126         write: bool,
127     ) -> Result<u64, PlatformError> {
128         let segment_register = self.read_segment(segment)?;
129         let mode = self.mode()?;
130 
131         match mode {
132             CpuMode::Long => {
133                 // TODO Check that we got a canonical address.
134                 Ok(logical_addr
135                     .checked_add(segment_register.base)
136                     .ok_or_else(|| {
137                         PlatformError::InvalidAddress(anyhow!(
138                             "Logical address {:#x} can not be linearized with segment {:#x?}",
139                             logical_addr,
140                             segment_register
141                         ))
142                     })?)
143             }
144 
145             CpuMode::Protected | CpuMode::Real => {
146                 let segment_type = segment_register.segment_type();
147 
148                 // Must not write to a read-only segment.
149                 if segment_type_ro(segment_type) && write {
150                     return Err(PlatformError::InvalidAddress(anyhow!(
151                         "Can not write to a read-only segment"
152                     )));
153                 }
154 
155                 let logical_addr = logical_addr & 0xffff_ffffu64;
156                 let mut segment_limit: u32 = if segment_register.granularity() != 0 {
157                     (segment_register.limit << 12) | 0xfff
158                 } else {
159                     segment_register.limit
160                 };
161 
162                 // Expand-down segment
163                 if segment_type_expand_down(segment_type) {
164                     if logical_addr >= segment_limit.into() {
165                         return Err(PlatformError::InvalidAddress(anyhow!(
166                             "{:#x} is off limits {:#x} (expand down)",
167                             logical_addr,
168                             segment_limit
169                         )));
170                     }
171 
172                     if segment_register.db() != 0 {
173                         segment_limit = 0xffffffff
174                     } else {
175                         segment_limit = 0xffff
176                     }
177                 }
178 
179                 if logical_addr > segment_limit.into() {
180                     return Err(PlatformError::InvalidAddress(anyhow!(
181                         "{:#x} is off limits {:#x}",
182                         logical_addr,
183                         segment_limit
184                     )));
185                 }
186 
187                 Ok(logical_addr + segment_register.base)
188             }
189 
190             _ => Err(PlatformError::UnsupportedCpuMode(anyhow!("{:?}", mode))),
191         }
192     }
193 }
194 
195 const REGISTER_MASK_64: u64 = 0xffff_ffff_ffff_ffffu64;
196 const REGISTER_MASK_32: u64 = 0xffff_ffffu64;
197 const REGISTER_MASK_16: u64 = 0xffffu64;
198 const REGISTER_MASK_8: u64 = 0xffu64;
199 
200 macro_rules! set_reg {
201     ($reg:expr, $mask:expr, $value:expr) => {
202         $reg = ($reg & $mask) | $value
203     };
204 }
205 
206 #[derive(Clone, Default, Debug)]
207 /// A minimal, emulated CPU state.
208 ///
209 /// Hypervisors needing x86 emulation can choose to either use their own
210 /// CPU state structures and implement the CpuStateManager interface for it,
211 /// or use `EmulatorCpuState`. The latter implies creating a new state
212 /// `EmulatorCpuState` instance for each platform `cpu_state()` call, which
213 /// might be less efficient.
214 pub struct EmulatorCpuState {
215     pub regs: StandardRegisters,
216     pub sregs: SpecialRegisters,
217 }
218 
219 impl CpuStateManager for EmulatorCpuState {
220     fn read_reg(&self, reg: Register) -> Result<u64, PlatformError> {
221         let mut reg_value: u64 = match reg {
222             Register::RAX | Register::EAX | Register::AX | Register::AL | Register::AH => {
223                 self.regs.rax
224             }
225             Register::RBX | Register::EBX | Register::BX | Register::BL | Register::BH => {
226                 self.regs.rbx
227             }
228             Register::RCX | Register::ECX | Register::CX | Register::CL | Register::CH => {
229                 self.regs.rcx
230             }
231             Register::RDX | Register::EDX | Register::DX | Register::DL | Register::DH => {
232                 self.regs.rdx
233             }
234             Register::RSP | Register::ESP | Register::SP => self.regs.rsp,
235             Register::RBP | Register::EBP | Register::BP => self.regs.rbp,
236             Register::RSI | Register::ESI | Register::SI | Register::SIL => self.regs.rsi,
237             Register::RDI | Register::EDI | Register::DI | Register::DIL => self.regs.rdi,
238             Register::R8 | Register::R8D | Register::R8W | Register::R8L => self.regs.r8,
239             Register::R9 | Register::R9D | Register::R9W | Register::R9L => self.regs.r9,
240             Register::R10 | Register::R10D | Register::R10W | Register::R10L => self.regs.r10,
241             Register::R11 | Register::R11D | Register::R11W | Register::R11L => self.regs.r11,
242             Register::R12 | Register::R12D | Register::R12W | Register::R12L => self.regs.r12,
243             Register::R13 | Register::R13D | Register::R13W | Register::R13L => self.regs.r13,
244             Register::R14 | Register::R14D | Register::R14W | Register::R14L => self.regs.r14,
245             Register::R15 | Register::R15D | Register::R15W | Register::R15L => self.regs.r15,
246             Register::CR0 => self.sregs.cr0,
247             Register::CR2 => self.sregs.cr2,
248             Register::CR3 => self.sregs.cr3,
249             Register::CR4 => self.sregs.cr4,
250             Register::CR8 => self.sregs.cr8,
251 
252             r => {
253                 return Err(PlatformError::InvalidRegister(anyhow!(
254                     "read_reg invalid GPR {:?}",
255                     r
256                 )))
257             }
258         };
259 
260         reg_value = if reg.is_gpr64() || reg.is_cr() {
261             reg_value
262         } else if reg.is_gpr32() {
263             reg_value & REGISTER_MASK_32
264         } else if reg.is_gpr16() {
265             reg_value & REGISTER_MASK_16
266         } else if reg.is_gpr8() {
267             if reg == Register::AH
268                 || reg == Register::BH
269                 || reg == Register::CH
270                 || reg == Register::DH
271             {
272                 (reg_value >> 8) & REGISTER_MASK_8
273             } else {
274                 reg_value & REGISTER_MASK_8
275             }
276         } else {
277             return Err(PlatformError::InvalidRegister(anyhow!(
278                 "read_reg invalid GPR {:?}",
279                 reg
280             )));
281         };
282 
283         debug!("Register read: {:#x} from {:?}", reg_value, reg);
284 
285         Ok(reg_value)
286     }
287 
288     fn write_reg(&mut self, reg: Register, val: u64) -> Result<(), PlatformError> {
289         debug!("Register write: {:#x} to {:?}", val, reg);
290 
291         // SDM Vol 1 - 3.4.1.1
292         //
293         // 8-bit and 16-bit operands generate an 8-bit or 16-bit result.
294         // The upper 56 bits or 48 bits (respectively) of the destination
295         // general-purpose register are not modified by the operation.
296         let (reg_value, mask): (u64, u64) = if reg.is_gpr64() || reg.is_cr() {
297             (val, !REGISTER_MASK_64)
298         } else if reg.is_gpr32() {
299             (val & REGISTER_MASK_32, !REGISTER_MASK_64)
300         } else if reg.is_gpr16() {
301             (val & REGISTER_MASK_16, !REGISTER_MASK_16)
302         } else if reg.is_gpr8() {
303             if reg == Register::AH
304                 || reg == Register::BH
305                 || reg == Register::CH
306                 || reg == Register::DH
307             {
308                 ((val & REGISTER_MASK_8) << 8, !(REGISTER_MASK_8 << 8))
309             } else {
310                 (val & REGISTER_MASK_8, !REGISTER_MASK_8)
311             }
312         } else {
313             return Err(PlatformError::InvalidRegister(anyhow!(
314                 "write_reg invalid register {:?}",
315                 reg
316             )));
317         };
318 
319         match reg {
320             Register::RAX | Register::EAX | Register::AX | Register::AL | Register::AH => {
321                 set_reg!(self.regs.rax, mask, reg_value);
322             }
323             Register::RBX | Register::EBX | Register::BX | Register::BL | Register::BH => {
324                 set_reg!(self.regs.rbx, mask, reg_value);
325             }
326             Register::RCX | Register::ECX | Register::CX | Register::CL | Register::CH => {
327                 set_reg!(self.regs.rcx, mask, reg_value);
328             }
329             Register::RDX | Register::EDX | Register::DX | Register::DL | Register::DH => {
330                 set_reg!(self.regs.rdx, mask, reg_value);
331             }
332             Register::RSP | Register::ESP | Register::SP => {
333                 set_reg!(self.regs.rsp, mask, reg_value)
334             }
335             Register::RBP | Register::EBP | Register::BP => {
336                 set_reg!(self.regs.rbp, mask, reg_value)
337             }
338             Register::RSI | Register::ESI | Register::SI | Register::SIL => {
339                 set_reg!(self.regs.rsi, mask, reg_value)
340             }
341             Register::RDI | Register::EDI | Register::DI | Register::DIL => {
342                 set_reg!(self.regs.rdi, mask, reg_value)
343             }
344             Register::R8 | Register::R8D | Register::R8W | Register::R8L => {
345                 set_reg!(self.regs.r8, mask, reg_value)
346             }
347             Register::R9 | Register::R9D | Register::R9W | Register::R9L => {
348                 set_reg!(self.regs.r9, mask, reg_value)
349             }
350             Register::R10 | Register::R10D | Register::R10W | Register::R10L => {
351                 set_reg!(self.regs.r10, mask, reg_value)
352             }
353             Register::R11 | Register::R11D | Register::R11W | Register::R11L => {
354                 set_reg!(self.regs.r11, mask, reg_value)
355             }
356             Register::R12 | Register::R12D | Register::R12W | Register::R12L => {
357                 set_reg!(self.regs.r12, mask, reg_value)
358             }
359             Register::R13 | Register::R13D | Register::R13W | Register::R13L => {
360                 set_reg!(self.regs.r13, mask, reg_value)
361             }
362             Register::R14 | Register::R14D | Register::R14W | Register::R14L => {
363                 set_reg!(self.regs.r14, mask, reg_value)
364             }
365             Register::R15 | Register::R15D | Register::R15W | Register::R15L => {
366                 set_reg!(self.regs.r15, mask, reg_value)
367             }
368             Register::CR0 => set_reg!(self.sregs.cr0, mask, reg_value),
369             Register::CR2 => set_reg!(self.sregs.cr2, mask, reg_value),
370             Register::CR3 => set_reg!(self.sregs.cr3, mask, reg_value),
371             Register::CR4 => set_reg!(self.sregs.cr4, mask, reg_value),
372             Register::CR8 => set_reg!(self.sregs.cr8, mask, reg_value),
373             _ => {
374                 return Err(PlatformError::InvalidRegister(anyhow!(
375                     "write_reg invalid register {:?}",
376                     reg
377                 )))
378             }
379         }
380 
381         Ok(())
382     }
383 
384     fn read_segment(&self, reg: Register) -> Result<SegmentRegister, PlatformError> {
385         if !reg.is_segment_register() {
386             return Err(PlatformError::InvalidRegister(anyhow!(
387                 "read_segment {:?} is not a segment register",
388                 reg
389             )));
390         }
391 
392         match reg {
393             Register::CS => Ok(self.sregs.cs),
394             Register::DS => Ok(self.sregs.ds),
395             Register::ES => Ok(self.sregs.es),
396             Register::FS => Ok(self.sregs.fs),
397             Register::GS => Ok(self.sregs.gs),
398             Register::SS => Ok(self.sregs.ss),
399             r => Err(PlatformError::InvalidRegister(anyhow!(
400                 "read_segment invalid register {:?}",
401                 r
402             ))),
403         }
404     }
405 
406     fn write_segment(
407         &mut self,
408         reg: Register,
409         segment_register: SegmentRegister,
410     ) -> Result<(), PlatformError> {
411         if !reg.is_segment_register() {
412             return Err(PlatformError::InvalidRegister(anyhow!("{:?}", reg)));
413         }
414 
415         match reg {
416             Register::CS => self.sregs.cs = segment_register,
417             Register::DS => self.sregs.ds = segment_register,
418             Register::ES => self.sregs.es = segment_register,
419             Register::FS => self.sregs.fs = segment_register,
420             Register::GS => self.sregs.gs = segment_register,
421             Register::SS => self.sregs.ss = segment_register,
422             r => return Err(PlatformError::InvalidRegister(anyhow!("{:?}", r))),
423         }
424 
425         Ok(())
426     }
427 
428     fn ip(&self) -> u64 {
429         self.regs.rip
430     }
431 
432     fn set_ip(&mut self, ip: u64) {
433         self.regs.rip = ip;
434     }
435 
436     fn efer(&self) -> u64 {
437         self.sregs.efer
438     }
439 
440     fn set_efer(&mut self, efer: u64) {
441         self.sregs.efer = efer
442     }
443 
444     fn flags(&self) -> u64 {
445         self.regs.rflags
446     }
447 
448     fn set_flags(&mut self, flags: u64) {
449         self.regs.rflags = flags;
450     }
451 
452     fn mode(&self) -> Result<CpuMode, PlatformError> {
453         let efer = self.efer();
454         let cr0 = self.read_reg(Register::CR0)?;
455         let mut mode = CpuMode::Real;
456 
457         if (cr0 & CR0_PE) == CR0_PE {
458             mode = CpuMode::Protected;
459         }
460 
461         if (efer & EFER_LMA) == EFER_LMA {
462             if mode != CpuMode::Protected {
463                 return Err(PlatformError::InvalidState(anyhow!(
464                     "Protection must be enabled in long mode"
465                 )));
466             }
467 
468             mode = CpuMode::Long;
469         }
470 
471         Ok(mode)
472     }
473 }
474 
475 pub struct Emulator<'a, T: CpuStateManager> {
476     platform: &'a mut dyn PlatformEmulator<CpuState = T>,
477 }
478 
479 // Reduce repetition, see its invocation in get_handler().
480 macro_rules! gen_handler_match {
481     ($value: ident, $( ($module:ident, $code:ident) ),* ) => {
482         match $value {
483             $(
484                 Code::$code => Some(Box::new($module::$code)),
485             )*
486             _ => None,
487         }
488     };
489 }
490 
491 impl<'a, T: CpuStateManager> Emulator<'a, T> {
492     pub fn new(platform: &mut dyn PlatformEmulator<CpuState = T>) -> Emulator<T> {
493         Emulator { platform }
494     }
495 
496     fn get_handler(code: Code) -> Option<Box<dyn InstructionHandler<T>>> {
497         let handler: Option<Box<dyn InstructionHandler<T>>> = gen_handler_match!(
498             code,
499             // CMP
500             (cmp, Cmp_rm32_r32),
501             (cmp, Cmp_rm8_r8),
502             (cmp, Cmp_rm32_imm8),
503             (cmp, Cmp_rm64_r64),
504             // MOV
505             (mov, Mov_r8_rm8),
506             (mov, Mov_r8_imm8),
507             (mov, Mov_r16_imm16),
508             (mov, Mov_r16_rm16),
509             (mov, Mov_r32_imm32),
510             (mov, Mov_r32_rm32),
511             (mov, Mov_r64_imm64),
512             (mov, Mov_r64_rm64),
513             (mov, Mov_rm8_imm8),
514             (mov, Mov_rm8_r8),
515             (mov, Mov_rm16_imm16),
516             (mov, Mov_rm16_r16),
517             (mov, Mov_rm32_imm32),
518             (mov, Mov_rm32_r32),
519             (mov, Mov_rm64_imm32),
520             (mov, Mov_rm64_r64),
521             // MOVZX
522             (mov, Movzx_r16_rm8),
523             (mov, Movzx_r32_rm8),
524             (mov, Movzx_r64_rm8),
525             (mov, Movzx_r32_rm16),
526             (mov, Movzx_r64_rm16),
527             // MOV MOFFS
528             (mov, Mov_moffs16_AX),
529             (mov, Mov_AX_moffs16),
530             (mov, Mov_moffs32_EAX),
531             (mov, Mov_EAX_moffs32),
532             (mov, Mov_moffs64_RAX),
533             (mov, Mov_RAX_moffs64),
534             // MOVS
535             (movs, Movsd_m32_m32),
536             (movs, Movsw_m16_m16),
537             (movs, Movsb_m8_m8),
538             // OR
539             (or, Or_rm8_r8)
540         );
541 
542         handler
543     }
544 
545     fn emulate_insn_stream(
546         &mut self,
547         cpu_id: usize,
548         insn_stream: &[u8],
549         num_insn: Option<usize>,
550     ) -> EmulationResult<T, Exception> {
551         let mut state = self
552             .platform
553             .cpu_state(cpu_id)
554             .map_err(EmulationError::PlatformEmulationError)?;
555         let mut decoder = Decoder::new(64, insn_stream, DecoderOptions::NONE);
556         let mut insn = Instruction::default();
557         let mut num_insn_emulated: usize = 0;
558         let mut fetched_insn_stream: [u8; 16] = [0; 16];
559         let mut last_decoded_ip: u64 = state.ip();
560         let mut stop_emulation: bool = false;
561 
562         decoder.set_ip(state.ip());
563 
564         while decoder.can_decode() && !stop_emulation {
565             decoder.decode_out(&mut insn);
566 
567             if decoder.last_error() == DecoderError::NoMoreBytes {
568                 // The decoder is missing some bytes to decode the current
569                 // instruction, for example because the instruction stream
570                 // crosses a page boundary.
571                 // We fetch 16 more bytes from the instruction segment,
572                 // decode and emulate the failing instruction and terminate
573                 // the emulation loop.
574                 debug!(
575                     "Fetching {} bytes from {:#x}",
576                     fetched_insn_stream.len(),
577                     last_decoded_ip
578                 );
579 
580                 // fetched_insn_stream is 16 bytes long, enough to contain
581                 // any complete x86 instruction.
582                 self.platform
583                     .fetch(last_decoded_ip, &mut fetched_insn_stream)
584                     .map_err(EmulationError::PlatformEmulationError)?;
585 
586                 debug!("Fetched {:x?}", fetched_insn_stream);
587 
588                 // Once we have the new stream, we must create a new decoder
589                 // and emulate one last instruction from the last decoded IP.
590                 decoder = Decoder::new(64, &fetched_insn_stream, DecoderOptions::NONE);
591                 decoder.decode_out(&mut insn);
592                 if decoder.last_error() != DecoderError::None {
593                     return Err(EmulationError::InstructionFetchingError(anyhow!(
594                         "{:#x?}",
595                         insn_format!(insn)
596                     )));
597                 }
598 
599                 stop_emulation = true;
600             }
601 
602             // Emulate the decoded instruction
603             Emulator::get_handler(insn.code())
604                 .ok_or_else(|| {
605                     EmulationError::UnsupportedInstruction(anyhow!(
606                         "{:#x?} {:?} {:?}",
607                         insn_format!(insn),
608                         insn.mnemonic(),
609                         insn.code()
610                     ))
611                 })?
612                 .emulate(&insn, &mut state, self.platform)
613                 .context(anyhow!("Failed to emulate {:#x?}", insn_format!(insn)))?;
614 
615             last_decoded_ip = decoder.ip();
616             num_insn_emulated += 1;
617 
618             if let Some(num_insn) = num_insn {
619                 if num_insn_emulated >= num_insn {
620                     // Exit the decoding loop, do not decode the next instruction.
621                     stop_emulation = true;
622                 }
623             }
624         }
625 
626         state.set_ip(decoder.ip());
627         Ok(state)
628     }
629 
630     /// Emulate all instructions from the instructions stream.
631     pub fn emulate(&mut self, cpu_id: usize, insn_stream: &[u8]) -> EmulationResult<T, Exception> {
632         self.emulate_insn_stream(cpu_id, insn_stream, None)
633     }
634 
635     /// Only emulate the first instruction from the stream.
636     ///
637     /// This is useful for cases where we get readahead instruction stream
638     /// but implicitly must only emulate the first instruction, and then return
639     /// to the guest.
640     pub fn emulate_first_insn(
641         &mut self,
642         cpu_id: usize,
643         insn_stream: &[u8],
644     ) -> EmulationResult<T, Exception> {
645         self.emulate_insn_stream(cpu_id, insn_stream, Some(1))
646     }
647 }
648 
649 #[cfg(test)]
650 mod mock_vmm {
651     #![allow(unused_mut)]
652 
653     use super::*;
654     use crate::arch::emulator::{EmulationError, PlatformEmulator};
655     use crate::arch::x86::emulator::{Emulator, EmulatorCpuState as CpuState};
656     use crate::arch::x86::gdt::{gdt_entry, segment_from_gdt};
657     use crate::arch::x86::Exception;
658     use std::sync::{Arc, Mutex};
659 
660     #[derive(Debug, Clone)]
661     pub struct MockVmm {
662         memory: Vec<u8>,
663         state: Arc<Mutex<CpuState>>,
664     }
665 
666     pub type MockResult = Result<(), EmulationError<Exception>>;
667 
668     impl MockVmm {
669         pub fn new(ip: u64, regs: Vec<(Register, u64)>, memory: Option<(u64, &[u8])>) -> MockVmm {
670             let _ = env_logger::try_init();
671             let cs_reg = segment_from_gdt(gdt_entry(0xc09b, 0, 0xffffffff), 1);
672             let ds_reg = segment_from_gdt(gdt_entry(0xc093, 0, 0xffffffff), 2);
673             let es_reg = segment_from_gdt(gdt_entry(0xc093, 0, 0xffffffff), 3);
674             let mut initial_state = CpuState::default();
675             initial_state.set_ip(ip);
676             initial_state.write_segment(Register::CS, cs_reg).unwrap();
677             initial_state.write_segment(Register::DS, ds_reg).unwrap();
678             initial_state.write_segment(Register::ES, es_reg).unwrap();
679             for (reg, value) in regs {
680                 initial_state.write_reg(reg, value).unwrap();
681             }
682 
683             let mut vmm = MockVmm {
684                 memory: vec![0; 8192],
685                 state: Arc::new(Mutex::new(initial_state)),
686             };
687 
688             if let Some(mem) = memory {
689                 vmm.write_memory(mem.0, mem.1).unwrap();
690             }
691 
692             vmm
693         }
694 
695         pub fn emulate_insn(
696             &mut self,
697             cpu_id: usize,
698             insn: &[u8],
699             num_insn: Option<usize>,
700         ) -> MockResult {
701             let ip = self.cpu_state(cpu_id).unwrap().ip();
702             let mut emulator = Emulator::new(self);
703 
704             let new_state = emulator.emulate_insn_stream(cpu_id, insn, num_insn)?;
705             if num_insn.is_none() {
706                 assert_eq!(ip + insn.len() as u64, new_state.ip());
707             }
708 
709             self.set_cpu_state(cpu_id, new_state).unwrap();
710 
711             Ok(())
712         }
713 
714         pub fn emulate_first_insn(&mut self, cpu_id: usize, insn: &[u8]) -> MockResult {
715             self.emulate_insn(cpu_id, insn, Some(1))
716         }
717     }
718 
719     impl PlatformEmulator for MockVmm {
720         type CpuState = CpuState;
721 
722         fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
723             debug!(
724                 "Memory read {} bytes from [{:#x} -> {:#x}]",
725                 data.len(),
726                 gva,
727                 gva + data.len() as u64 - 1
728             );
729             data.copy_from_slice(&self.memory[gva as usize..gva as usize + data.len()]);
730             Ok(())
731         }
732 
733         fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
734             debug!(
735                 "Memory write {} bytes at [{:#x} -> {:#x}]",
736                 data.len(),
737                 gva,
738                 gva + data.len() as u64 - 1
739             );
740             self.memory[gva as usize..gva as usize + data.len()].copy_from_slice(data);
741 
742             Ok(())
743         }
744 
745         fn cpu_state(&self, _cpu_id: usize) -> Result<CpuState, PlatformError> {
746             Ok(self.state.lock().unwrap().clone())
747         }
748 
749         fn set_cpu_state(
750             &self,
751             _cpu_id: usize,
752             state: Self::CpuState,
753         ) -> Result<(), PlatformError> {
754             *self.state.lock().unwrap() = state;
755             Ok(())
756         }
757 
758         fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
759             Ok(gva)
760         }
761 
762         fn fetch(&self, ip: u64, instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
763             let rip = self
764                 .state
765                 .lock()
766                 .unwrap()
767                 .linearize(Register::CS, ip, false)?;
768             self.read_memory(rip, instruction_bytes)
769         }
770     }
771 }
772 
773 #[cfg(test)]
774 mod tests {
775     #![allow(unused_mut)]
776     use super::*;
777     use crate::arch::x86::emulator::mock_vmm::*;
778 
779     #[test]
780     // Emulate truncated instruction stream, which should cause a fetch.
781     //
782     // mov rax, 0x1000
783     // Test with a first instruction truncated.
784     fn test_fetch_first_instruction() {
785         let ip: u64 = 0x1000;
786         let cpu_id = 0;
787         let memory = [
788             // Code at IP
789             0x48, 0xc7, 0xc0, 0x00, 0x10, 0x00, 0x00, // mov rax, 0x1000
790             0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h]
791             // Padding
792             0x00, 0x00, 0x00, 0x00, 0x00, // Padding is all zeroes
793             // Data at IP + 0x10 (0x1234567812345678 in LE)
794             0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12,
795         ];
796         let insn = [
797             // First instruction is truncated
798             0x48, 0xc7, 0xc0, 0x00, // mov rax, 0x1000 -- Missing bytes: 0x00, 0x10, 0x00, 0x00,
799         ];
800 
801         let mut vmm = MockVmm::new(ip, vec![], Some((ip, &memory)));
802         assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok());
803 
804         let rax: u64 = vmm
805             .cpu_state(cpu_id)
806             .unwrap()
807             .read_reg(Register::RAX)
808             .unwrap();
809         assert_eq!(rax, ip);
810     }
811 
812     #[test]
813     // Emulate truncated instruction stream, which should cause a fetch.
814     //
815     // mov rax, 0x1000
816     // mov rbx, qword ptr [rax+10h]
817     // Test with a 2nd instruction truncated.
818     fn test_fetch_second_instruction() {
819         let target_rax: u64 = 0x1234567812345678;
820         let ip: u64 = 0x1000;
821         let cpu_id = 0;
822         let memory = [
823             // Code at IP
824             0x48, 0xc7, 0xc0, 0x00, 0x10, 0x00, 0x00, // mov rax, 0x1000
825             0x48, 0x8b, 0x58, 0x10, // mov rbx, qword ptr [rax+10h]
826             // Padding
827             0x00, 0x00, 0x00, 0x00, 0x00, // Padding is all zeroes
828             // Data at IP + 0x10 (0x1234567812345678 in LE)
829             0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12,
830         ];
831         let insn = [
832             0x48, 0xc7, 0xc0, 0x00, 0x10, 0x00, 0x00, // mov rax, 0x1000
833             0x48, 0x8b, // Truncated mov rbx, qword ptr [rax+10h] -- missing [0x58, 0x10]
834         ];
835 
836         let mut vmm = MockVmm::new(ip, vec![], Some((ip, &memory)));
837         assert!(vmm.emulate_insn(cpu_id, &insn, Some(2)).is_ok());
838 
839         let rbx: u64 = vmm
840             .cpu_state(cpu_id)
841             .unwrap()
842             .read_reg(Register::RBX)
843             .unwrap();
844         assert_eq!(rbx, target_rax);
845     }
846 
847     #[test]
848     // Emulate truncated instruction stream, which should cause a fetch.
849     //
850     // mov rax, 0x1000
851     // Test with a first instruction truncated and a bad fetched instruction.
852     // Verify that the instruction emulation returns an error.
853     fn test_fetch_bad_insn() {
854         let ip: u64 = 0x1000;
855         let cpu_id = 0;
856         let memory = [
857             // Code at IP
858             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
859             0xff, 0xff,
860         ];
861         let insn = [
862             // First instruction is truncated
863             0x48, 0xc7, 0xc0, 0x00, // mov rax, 0x1000 -- Missing bytes: 0x00, 0x10, 0x00, 0x00,
864         ];
865 
866         let mut vmm = MockVmm::new(ip, vec![], Some((ip, &memory)));
867         assert!(vmm.emulate_first_insn(cpu_id, &insn).is_err());
868     }
869 }
870