xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision 07d1208dd53a207a65b649b8952780dfd0ca59d9)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 // x86_64 dependencies
24 #[cfg(target_arch = "x86_64")]
25 pub mod x86_64;
26 use crate::{
27     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
28     USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
29 };
30 use vmm_sys_util::eventfd::EventFd;
31 #[cfg(target_arch = "x86_64")]
32 pub use x86_64::VcpuMshvState;
33 #[cfg(target_arch = "x86_64")]
34 pub use x86_64::*;
35 
36 #[cfg(target_arch = "x86_64")]
37 use std::fs::File;
38 use std::os::unix::io::AsRawFd;
39 
40 #[cfg(target_arch = "x86_64")]
41 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
42 
43 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
44 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
45 
46 ///
47 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
48 ///
49 pub use {
50     mshv_bindings::mshv_create_device as CreateDevice,
51     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
52 };
53 
54 pub const PAGE_SHIFT: usize = 12;
55 
56 impl From<mshv_user_mem_region> for UserMemoryRegion {
57     fn from(region: mshv_user_mem_region) -> Self {
58         let mut flags: u32 = 0;
59         if region.flags & HV_MAP_GPA_READABLE != 0 {
60             flags |= USER_MEMORY_REGION_READ;
61         }
62         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
63             flags |= USER_MEMORY_REGION_WRITE;
64         }
65         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
66             flags |= USER_MEMORY_REGION_EXECUTE;
67         }
68 
69         UserMemoryRegion {
70             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
71                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
72             memory_size: region.size,
73             userspace_addr: region.userspace_addr,
74             flags,
75             ..Default::default()
76         }
77     }
78 }
79 
80 impl From<UserMemoryRegion> for mshv_user_mem_region {
81     fn from(region: UserMemoryRegion) -> Self {
82         let mut flags: u32 = 0;
83         if region.flags & USER_MEMORY_REGION_READ != 0 {
84             flags |= HV_MAP_GPA_READABLE;
85         }
86         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
87             flags |= HV_MAP_GPA_WRITABLE;
88         }
89         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
90             flags |= HV_MAP_GPA_EXECUTABLE;
91         }
92 
93         mshv_user_mem_region {
94             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
95             size: region.memory_size,
96             userspace_addr: region.userspace_addr,
97             flags,
98         }
99     }
100 }
101 
102 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
103     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
104         match a {
105             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
106             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
107         }
108     }
109 }
110 
111 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
112     fn from(a: IoEventAddress) -> Self {
113         match a {
114             IoEventAddress::Pio(x) => Self::Pio(x),
115             IoEventAddress::Mmio(x) => Self::Mmio(x),
116         }
117     }
118 }
119 
120 impl From<VcpuMshvState> for CpuState {
121     fn from(s: VcpuMshvState) -> Self {
122         CpuState::Mshv(s)
123     }
124 }
125 
126 impl From<CpuState> for VcpuMshvState {
127     fn from(s: CpuState) -> Self {
128         match s {
129             CpuState::Mshv(s) => s,
130             /* Needed in case other hypervisors are enabled */
131             #[allow(unreachable_patterns)]
132             _ => panic!("CpuState is not valid"),
133         }
134     }
135 }
136 
137 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
138     fn from(s: mshv_msi_routing_entry) -> Self {
139         IrqRoutingEntry::Mshv(s)
140     }
141 }
142 
143 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
144     fn from(e: IrqRoutingEntry) -> Self {
145         match e {
146             IrqRoutingEntry::Mshv(e) => e,
147             /* Needed in case other hypervisors are enabled */
148             #[allow(unreachable_patterns)]
149             _ => panic!("IrqRoutingEntry is not valid"),
150         }
151     }
152 }
153 
154 struct MshvDirtyLogSlot {
155     guest_pfn: u64,
156     memory_size: u64,
157 }
158 
159 /// Wrapper over mshv system ioctls.
160 pub struct MshvHypervisor {
161     mshv: Mshv,
162 }
163 
164 impl MshvHypervisor {
165     #[cfg(target_arch = "x86_64")]
166     ///
167     /// Retrieve the list of MSRs supported by MSHV.
168     ///
169     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
170         self.mshv
171             .get_msr_index_list()
172             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
173     }
174 }
175 
176 impl MshvHypervisor {
177     /// Create a hypervisor based on Mshv
178     #[allow(clippy::new_ret_no_self)]
179     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
180         let mshv_obj =
181             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
182         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
183     }
184     /// Check if the hypervisor is available
185     pub fn is_available() -> hypervisor::Result<bool> {
186         match std::fs::metadata("/dev/mshv") {
187             Ok(_) => Ok(true),
188             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
189             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
190                 err.into(),
191             )),
192         }
193     }
194 }
195 /// Implementation of Hypervisor trait for Mshv
196 ///
197 /// # Examples
198 ///
199 /// ```
200 /// # use hypervisor::mshv::MshvHypervisor;
201 /// # use std::sync::Arc;
202 /// let mshv = MshvHypervisor::new().unwrap();
203 /// let hypervisor = Arc::new(mshv);
204 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
205 /// ```
206 impl hypervisor::Hypervisor for MshvHypervisor {
207     ///
208     /// Returns the type of the hypervisor
209     ///
210     fn hypervisor_type(&self) -> HypervisorType {
211         HypervisorType::Mshv
212     }
213 
214     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
215         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
216             Ok(vm_type) => vm_type,
217             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
218         };
219         let fd: VmFd;
220         loop {
221             match self.mshv.create_vm_with_type(mshv_vm_type) {
222                 Ok(res) => fd = res,
223                 Err(e) => {
224                     if e.errno() == libc::EINTR {
225                         // If the error returned is EINTR, which means the
226                         // ioctl has been interrupted, we have to retry as
227                         // this can't be considered as a regular error.
228                         continue;
229                     } else {
230                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
231                     }
232                 }
233             }
234             break;
235         }
236 
237         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
238         // send a fault to the guest if it tries to access it. It is possible
239         // to override this behavior with a more suitable option i.e., ignore
240         // writes from the guest and return zero in attempt to read unimplemented
241         // MSR.
242         fd.set_partition_property(
243             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
244             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
245         )
246         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
247 
248         let msr_list = self.get_msr_list()?;
249         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
250         let mut msrs: Vec<MsrEntry> = vec![
251             MsrEntry {
252                 ..Default::default()
253             };
254             num_msrs
255         ];
256         let indices = msr_list.as_slice();
257         for (pos, index) in indices.iter().enumerate() {
258             msrs[pos].index = *index;
259         }
260         let vm_fd = Arc::new(fd);
261 
262         Ok(Arc::new(MshvVm {
263             fd: vm_fd,
264             msrs,
265             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
266         }))
267     }
268 
269     /// Create a mshv vm object and return the object as Vm trait object
270     ///
271     /// # Examples
272     ///
273     /// ```
274     /// # extern crate hypervisor;
275     /// # use hypervisor::mshv::MshvHypervisor;
276     /// use hypervisor::mshv::MshvVm;
277     /// let hypervisor = MshvHypervisor::new().unwrap();
278     /// let vm = hypervisor.create_vm().unwrap();
279     /// ```
280     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
281         let vm_type = 0;
282         self.create_vm_with_type(vm_type)
283     }
284     ///
285     /// Get the supported CpuID
286     ///
287     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
288         Ok(Vec::new())
289     }
290 
291     /// Get maximum number of vCPUs
292     fn get_max_vcpus(&self) -> u32 {
293         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
294         // but the ioctl API is limited to u8
295         256
296     }
297 }
298 
299 /// Vcpu struct for Microsoft Hypervisor
300 pub struct MshvVcpu {
301     fd: VcpuFd,
302     vp_index: u8,
303     cpuid: Vec<CpuIdEntry>,
304     msrs: Vec<MsrEntry>,
305     vm_ops: Option<Arc<dyn vm::VmOps>>,
306 }
307 
308 /// Implementation of Vcpu trait for Microsoft Hypervisor
309 ///
310 /// # Examples
311 ///
312 /// ```
313 /// # use hypervisor::mshv::MshvHypervisor;
314 /// # use std::sync::Arc;
315 /// let mshv = MshvHypervisor::new().unwrap();
316 /// let hypervisor = Arc::new(mshv);
317 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
318 /// let vcpu = vm.create_vcpu(0, None).unwrap();
319 /// ```
320 impl cpu::Vcpu for MshvVcpu {
321     #[cfg(target_arch = "x86_64")]
322     ///
323     /// Returns the vCPU general purpose registers.
324     ///
325     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
326         Ok(self
327             .fd
328             .get_regs()
329             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
330             .into())
331     }
332     #[cfg(target_arch = "x86_64")]
333     ///
334     /// Sets the vCPU general purpose registers.
335     ///
336     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
337         let regs = (*regs).into();
338         self.fd
339             .set_regs(&regs)
340             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
341     }
342     #[cfg(target_arch = "x86_64")]
343     ///
344     /// Returns the vCPU special registers.
345     ///
346     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
347         Ok(self
348             .fd
349             .get_sregs()
350             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
351             .into())
352     }
353     #[cfg(target_arch = "x86_64")]
354     ///
355     /// Sets the vCPU special registers.
356     ///
357     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
358         let sregs = (*sregs).into();
359         self.fd
360             .set_sregs(&sregs)
361             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
362     }
363     #[cfg(target_arch = "x86_64")]
364     ///
365     /// Returns the floating point state (FPU) from the vCPU.
366     ///
367     fn get_fpu(&self) -> cpu::Result<FpuState> {
368         Ok(self
369             .fd
370             .get_fpu()
371             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
372             .into())
373     }
374     #[cfg(target_arch = "x86_64")]
375     ///
376     /// Set the floating point state (FPU) of a vCPU.
377     ///
378     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
379         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
380         self.fd
381             .set_fpu(&fpu)
382             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
383     }
384 
385     #[cfg(target_arch = "x86_64")]
386     ///
387     /// Returns the model-specific registers (MSR) for this vCPU.
388     ///
389     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
390         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
391         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
392         let succ = self
393             .fd
394             .get_msrs(&mut mshv_msrs)
395             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
396 
397         msrs[..succ].copy_from_slice(
398             &mshv_msrs.as_slice()[..succ]
399                 .iter()
400                 .map(|e| (*e).into())
401                 .collect::<Vec<MsrEntry>>(),
402         );
403 
404         Ok(succ)
405     }
406     #[cfg(target_arch = "x86_64")]
407     ///
408     /// Setup the model-specific registers (MSR) for this vCPU.
409     /// Returns the number of MSR entries actually written.
410     ///
411     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
412         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
413         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
414         self.fd
415             .set_msrs(&mshv_msrs)
416             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
417     }
418 
419     #[cfg(target_arch = "x86_64")]
420     ///
421     /// X86 specific call to enable HyperV SynIC
422     ///
423     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
424         /* We always have SynIC enabled on MSHV */
425         Ok(())
426     }
427     #[allow(non_upper_case_globals)]
428     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
429         let hv_message: hv_message = hv_message::default();
430         match self.fd.run(hv_message) {
431             Ok(x) => match x.header.message_type {
432                 hv_message_type_HVMSG_X64_HALT => {
433                     debug!("HALT");
434                     Ok(cpu::VmExit::Reset)
435                 }
436                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
437                     warn!("TRIPLE FAULT");
438                     Ok(cpu::VmExit::Shutdown)
439                 }
440                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
441                     let info = x.to_ioport_info().unwrap();
442                     let access_info = info.access_info;
443                     // SAFETY: access_info is valid, otherwise we won't be here
444                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
445                     let is_write = info.header.intercept_access_type == 1;
446                     let port = info.port_number;
447                     let mut data: [u8; 4] = [0; 4];
448                     let mut ret_rax = info.rax;
449 
450                     /*
451                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
452                      *
453                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
454                      * under the "fwdebug" feature flag. But that feature is not enabled by default
455                      * and is considered legacy.
456                      *
457                      * OVMF unconditionally pokes these IO ports with string IO.
458                      *
459                      * Instead of trying to implement string IO support now which does not do much
460                      * now, skip those ports explicitly to avoid panicking.
461                      *
462                      * Proper string IO support can be added once we gain the ability to translate
463                      * guest virtual addresses to guest physical addresses on MSHV.
464                      */
465                     match port {
466                         0x402 | 0x510 | 0x511 | 0x514 => {
467                             let insn_len = info.header.instruction_length() as u64;
468 
469                             /* Advance RIP and update RAX */
470                             let arr_reg_name_value = [
471                                 (
472                                     hv_register_name_HV_X64_REGISTER_RIP,
473                                     info.header.rip + insn_len,
474                                 ),
475                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
476                             ];
477                             set_registers_64!(self.fd, arr_reg_name_value)
478                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
479                             return Ok(cpu::VmExit::Ignore);
480                         }
481                         _ => {}
482                     }
483 
484                     assert!(
485                         // SAFETY: access_info is valid, otherwise we won't be here
486                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
487                         "String IN/OUT not supported"
488                     );
489                     assert!(
490                         // SAFETY: access_info is valid, otherwise we won't be here
491                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
492                         "Rep IN/OUT not supported"
493                     );
494 
495                     if is_write {
496                         let data = (info.rax as u32).to_le_bytes();
497                         if let Some(vm_ops) = &self.vm_ops {
498                             vm_ops
499                                 .pio_write(port.into(), &data[0..len])
500                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
501                         }
502                     } else {
503                         if let Some(vm_ops) = &self.vm_ops {
504                             vm_ops
505                                 .pio_read(port.into(), &mut data[0..len])
506                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
507                         }
508 
509                         let v = u32::from_le_bytes(data);
510                         /* Preserve high bits in EAX but clear out high bits in RAX */
511                         let mask = 0xffffffff >> (32 - len * 8);
512                         let eax = (info.rax as u32 & !mask) | (v & mask);
513                         ret_rax = eax as u64;
514                     }
515 
516                     let insn_len = info.header.instruction_length() as u64;
517 
518                     /* Advance RIP and update RAX */
519                     let arr_reg_name_value = [
520                         (
521                             hv_register_name_HV_X64_REGISTER_RIP,
522                             info.header.rip + insn_len,
523                         ),
524                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
525                     ];
526                     set_registers_64!(self.fd, arr_reg_name_value)
527                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
528                     Ok(cpu::VmExit::Ignore)
529                 }
530                 hv_message_type_HVMSG_UNMAPPED_GPA => {
531                     let info = x.to_memory_info().unwrap();
532                     let insn_len = info.instruction_byte_count as usize;
533                     assert!(insn_len > 0 && insn_len <= 16);
534 
535                     let mut context = MshvEmulatorContext {
536                         vcpu: self,
537                         map: (info.guest_virtual_address, info.guest_physical_address),
538                     };
539 
540                     // Create a new emulator.
541                     let mut emul = Emulator::new(&mut context);
542 
543                     // Emulate the trapped instruction, and only the first one.
544                     let new_state = emul
545                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
546                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
547 
548                     // Set CPU state back.
549                     context
550                         .set_cpu_state(self.vp_index as usize, new_state)
551                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
552 
553                     Ok(cpu::VmExit::Ignore)
554                 }
555                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
556                     let info = x.to_cpuid_info().unwrap();
557                     debug!("cpuid eax: {:x}", { info.rax });
558                     Ok(cpu::VmExit::Ignore)
559                 }
560                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
561                     let info = x.to_msr_info().unwrap();
562                     if info.header.intercept_access_type == 0 {
563                         debug!("msr read: {:x}", { info.msr_number });
564                     } else {
565                         debug!("msr write: {:x}", { info.msr_number });
566                     }
567                     Ok(cpu::VmExit::Ignore)
568                 }
569                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
570                     //TODO: Handler for VMCALL here.
571                     let info = x.to_exception_info().unwrap();
572                     debug!("Exception Info {:?}", { info.exception_vector });
573                     Ok(cpu::VmExit::Ignore)
574                 }
575                 hv_message_type_HVMSG_X64_APIC_EOI => {
576                     let info = x.to_apic_eoi_info().unwrap();
577                     // The kernel should dispatch the EOI to the correct thread.
578                     // Check the VP index is the same as the one we have.
579                     assert!(info.vp_index == self.vp_index as u32);
580                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
581                     // There is no good way to recover from this if the hypervisor messes around.
582                     // Just unwrap.
583                     Ok(cpu::VmExit::IoapicEoi(
584                         info.interrupt_vector.try_into().unwrap(),
585                     ))
586                 }
587                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
588                     "Unhandled VCPU exit {:?}",
589                     exit
590                 ))),
591             },
592 
593             Err(e) => match e.errno() {
594                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
595                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
596                     "VCPU error {:?}",
597                     e
598                 ))),
599             },
600         }
601     }
602     #[cfg(target_arch = "x86_64")]
603     ///
604     /// X86 specific call to setup the CPUID registers.
605     ///
606     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
607         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
608         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
609             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
610 
611         self.fd
612             .register_intercept_result_cpuid(&mshv_cpuid)
613             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
614     }
615     #[cfg(target_arch = "x86_64")]
616     ///
617     /// X86 specific call to retrieve the CPUID registers.
618     ///
619     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
620         Ok(self.cpuid.clone())
621     }
622     #[cfg(target_arch = "x86_64")]
623     ///
624     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
625     ///
626     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
627         Ok(self
628             .fd
629             .get_lapic()
630             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
631             .into())
632     }
633     #[cfg(target_arch = "x86_64")]
634     ///
635     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
636     ///
637     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
638         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
639         self.fd
640             .set_lapic(&lapic)
641             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
642     }
643     ///
644     /// Returns the vcpu's current "multiprocessing state".
645     ///
646     fn get_mp_state(&self) -> cpu::Result<MpState> {
647         Ok(MpState::Mshv)
648     }
649     ///
650     /// Sets the vcpu's current "multiprocessing state".
651     ///
652     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
653         Ok(())
654     }
655     ///
656     /// Set CPU state
657     ///
658     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
659         let state: VcpuMshvState = state.clone().into();
660         self.set_msrs(&state.msrs)?;
661         self.set_vcpu_events(&state.vcpu_events)?;
662         self.set_regs(&state.regs.into())?;
663         self.set_sregs(&state.sregs.into())?;
664         self.set_fpu(&state.fpu)?;
665         self.set_xcrs(&state.xcrs)?;
666         self.set_lapic(&state.lapic)?;
667         self.set_xsave(&state.xsave)?;
668         // These registers are global and needed to be set only for first VCPU
669         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
670         if self.vp_index == 0 {
671             self.fd
672                 .set_misc_regs(&state.misc)
673                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
674         }
675         self.fd
676             .set_debug_regs(&state.dbg)
677             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
678         Ok(())
679     }
680     ///
681     /// Get CPU State
682     ///
683     fn state(&self) -> cpu::Result<CpuState> {
684         let regs = self.get_regs()?;
685         let sregs = self.get_sregs()?;
686         let xcrs = self.get_xcrs()?;
687         let fpu = self.get_fpu()?;
688         let vcpu_events = self.get_vcpu_events()?;
689         let mut msrs = self.msrs.clone();
690         self.get_msrs(&mut msrs)?;
691         let lapic = self.get_lapic()?;
692         let xsave = self.get_xsave()?;
693         let misc = self
694             .fd
695             .get_misc_regs()
696             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
697         let dbg = self
698             .fd
699             .get_debug_regs()
700             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
701 
702         Ok(VcpuMshvState {
703             msrs,
704             vcpu_events,
705             regs: regs.into(),
706             sregs: sregs.into(),
707             fpu,
708             xcrs,
709             lapic,
710             dbg,
711             xsave,
712             misc,
713         }
714         .into())
715     }
716     #[cfg(target_arch = "x86_64")]
717     ///
718     /// Translate guest virtual address to guest physical address
719     ///
720     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
721         let r = self
722             .fd
723             .translate_gva(gva, flags)
724             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
725 
726         let gpa = r.0;
727         // SAFETY: r is valid, otherwise this function will have returned
728         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
729 
730         Ok((gpa, result_code))
731     }
732     #[cfg(target_arch = "x86_64")]
733     ///
734     /// Return the list of initial MSR entries for a VCPU
735     ///
736     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
737         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
738 
739         [
740             msr!(msr_index::MSR_IA32_SYSENTER_CS),
741             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
742             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
743             msr!(msr_index::MSR_STAR),
744             msr!(msr_index::MSR_CSTAR),
745             msr!(msr_index::MSR_LSTAR),
746             msr!(msr_index::MSR_KERNEL_GS_BASE),
747             msr!(msr_index::MSR_SYSCALL_MASK),
748             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
749         ]
750         .to_vec()
751     }
752 }
753 
754 impl MshvVcpu {
755     #[cfg(target_arch = "x86_64")]
756     ///
757     /// X86 specific call that returns the vcpu's current "xsave struct".
758     ///
759     fn get_xsave(&self) -> cpu::Result<Xsave> {
760         self.fd
761             .get_xsave()
762             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
763     }
764     #[cfg(target_arch = "x86_64")]
765     ///
766     /// X86 specific call that sets the vcpu's current "xsave struct".
767     ///
768     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
769         self.fd
770             .set_xsave(xsave)
771             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
772     }
773     #[cfg(target_arch = "x86_64")]
774     ///
775     /// X86 specific call that returns the vcpu's current "xcrs".
776     ///
777     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
778         self.fd
779             .get_xcrs()
780             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
781     }
782     #[cfg(target_arch = "x86_64")]
783     ///
784     /// X86 specific call that sets the vcpu's current "xcrs".
785     ///
786     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
787         self.fd
788             .set_xcrs(xcrs)
789             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
790     }
791     #[cfg(target_arch = "x86_64")]
792     ///
793     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
794     /// states of the vcpu.
795     ///
796     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
797         self.fd
798             .get_vcpu_events()
799             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
800     }
801     #[cfg(target_arch = "x86_64")]
802     ///
803     /// Sets pending exceptions, interrupts, and NMIs as well as related states
804     /// of the vcpu.
805     ///
806     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
807         self.fd
808             .set_vcpu_events(events)
809             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
810     }
811 }
812 
813 struct MshvEmulatorContext<'a> {
814     vcpu: &'a MshvVcpu,
815     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
816 }
817 
818 impl<'a> MshvEmulatorContext<'a> {
819     // Do the actual gva -> gpa translation
820     #[allow(non_upper_case_globals)]
821     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
822         if self.map.0 == gva {
823             return Ok(self.map.1);
824         }
825 
826         // TODO: More fine-grained control for the flags
827         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
828 
829         let (gpa, result_code) = self
830             .vcpu
831             .translate_gva(gva, flags.into())
832             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
833 
834         match result_code {
835             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
836             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
837         }
838     }
839 }
840 
841 /// Platform emulation for Hyper-V
842 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
843     type CpuState = EmulatorCpuState;
844 
845     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
846         let gpa = self.translate(gva)?;
847         debug!(
848             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
849             data.len(),
850             gva,
851             gpa
852         );
853 
854         if let Some(vm_ops) = &self.vcpu.vm_ops {
855             if vm_ops.guest_mem_read(gpa, data).is_err() {
856                 vm_ops
857                     .mmio_read(gpa, data)
858                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
859             }
860         }
861 
862         Ok(())
863     }
864 
865     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
866         let gpa = self.translate(gva)?;
867         debug!(
868             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
869             data.len(),
870             gva,
871             gpa
872         );
873 
874         if let Some(vm_ops) = &self.vcpu.vm_ops {
875             if vm_ops.guest_mem_write(gpa, data).is_err() {
876                 vm_ops
877                     .mmio_write(gpa, data)
878                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
879             }
880         }
881 
882         Ok(())
883     }
884 
885     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
886         if cpu_id != self.vcpu.vp_index as usize {
887             return Err(PlatformError::GetCpuStateFailure(anyhow!(
888                 "CPU id mismatch {:?} {:?}",
889                 cpu_id,
890                 self.vcpu.vp_index
891             )));
892         }
893 
894         let regs = self
895             .vcpu
896             .get_regs()
897             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
898         let sregs = self
899             .vcpu
900             .get_sregs()
901             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
902 
903         debug!("mshv emulator: Getting new CPU state");
904         debug!("mshv emulator: {:#x?}", regs);
905 
906         Ok(EmulatorCpuState { regs, sregs })
907     }
908 
909     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
910         if cpu_id != self.vcpu.vp_index as usize {
911             return Err(PlatformError::SetCpuStateFailure(anyhow!(
912                 "CPU id mismatch {:?} {:?}",
913                 cpu_id,
914                 self.vcpu.vp_index
915             )));
916         }
917 
918         debug!("mshv emulator: Setting new CPU state");
919         debug!("mshv emulator: {:#x?}", state.regs);
920 
921         self.vcpu
922             .set_regs(&state.regs)
923             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
924         self.vcpu
925             .set_sregs(&state.sregs)
926             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
927     }
928 
929     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
930         self.translate(gva)
931     }
932 
933     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
934         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
935     }
936 }
937 
938 /// Wrapper over Mshv VM ioctls.
939 pub struct MshvVm {
940     fd: Arc<VmFd>,
941     msrs: Vec<MsrEntry>,
942     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
943 }
944 
945 impl MshvVm {
946     ///
947     /// Creates an in-kernel device.
948     ///
949     /// See the documentation for `MSHV_CREATE_DEVICE`.
950     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
951         let device_fd = self
952             .fd
953             .create_device(device)
954             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
955         Ok(VfioDeviceFd::new_from_mshv(device_fd))
956     }
957 }
958 
959 ///
960 /// Implementation of Vm trait for Mshv
961 ///
962 /// # Examples
963 ///
964 /// ```
965 /// # extern crate hypervisor;
966 /// # use hypervisor::mshv::MshvHypervisor;
967 /// # use std::sync::Arc;
968 /// let mshv = MshvHypervisor::new().unwrap();
969 /// let hypervisor = Arc::new(mshv);
970 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
971 /// ```
972 impl vm::Vm for MshvVm {
973     #[cfg(target_arch = "x86_64")]
974     ///
975     /// Sets the address of the one-page region in the VM's address space.
976     ///
977     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
978         Ok(())
979     }
980     #[cfg(target_arch = "x86_64")]
981     ///
982     /// Sets the address of the three-page region in the VM's address space.
983     ///
984     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
985         Ok(())
986     }
987     ///
988     /// Creates an in-kernel interrupt controller.
989     ///
990     fn create_irq_chip(&self) -> vm::Result<()> {
991         Ok(())
992     }
993     ///
994     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
995     ///
996     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
997         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
998 
999         self.fd
1000             .register_irqfd(fd, gsi)
1001             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1002 
1003         Ok(())
1004     }
1005     ///
1006     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1007     ///
1008     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1009         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1010 
1011         self.fd
1012             .unregister_irqfd(fd, gsi)
1013             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1014 
1015         Ok(())
1016     }
1017     ///
1018     /// Creates a VcpuFd object from a vcpu RawFd.
1019     ///
1020     fn create_vcpu(
1021         &self,
1022         id: u8,
1023         vm_ops: Option<Arc<dyn VmOps>>,
1024     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1025         let vcpu_fd = self
1026             .fd
1027             .create_vcpu(id)
1028             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1029         let vcpu = MshvVcpu {
1030             fd: vcpu_fd,
1031             vp_index: id,
1032             cpuid: Vec::new(),
1033             msrs: self.msrs.clone(),
1034             vm_ops,
1035         };
1036         Ok(Arc::new(vcpu))
1037     }
1038     #[cfg(target_arch = "x86_64")]
1039     fn enable_split_irq(&self) -> vm::Result<()> {
1040         Ok(())
1041     }
1042     #[cfg(target_arch = "x86_64")]
1043     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1044         Ok(())
1045     }
1046     fn register_ioevent(
1047         &self,
1048         fd: &EventFd,
1049         addr: &IoEventAddress,
1050         datamatch: Option<DataMatch>,
1051     ) -> vm::Result<()> {
1052         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1053         debug!(
1054             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1055             fd.as_raw_fd(),
1056             addr,
1057             datamatch
1058         );
1059         if let Some(dm) = datamatch {
1060             match dm {
1061                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1062                     .fd
1063                     .register_ioevent(fd, addr, mshv_dm32)
1064                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1065                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1066                     .fd
1067                     .register_ioevent(fd, addr, mshv_dm64)
1068                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1069             }
1070         } else {
1071             self.fd
1072                 .register_ioevent(fd, addr, NoDatamatch)
1073                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1074         }
1075     }
1076     /// Unregister an event from a certain address it has been previously registered to.
1077     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1078         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1079         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1080 
1081         self.fd
1082             .unregister_ioevent(fd, addr, NoDatamatch)
1083             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1084     }
1085 
1086     /// Creates a guest physical memory region.
1087     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1088         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1089         // No matter read only or not we keep track the slots.
1090         // For readonly hypervisor can enable the dirty bits,
1091         // but a VM exit happens before setting the dirty bits
1092         self.dirty_log_slots.write().unwrap().insert(
1093             user_memory_region.guest_pfn,
1094             MshvDirtyLogSlot {
1095                 guest_pfn: user_memory_region.guest_pfn,
1096                 memory_size: user_memory_region.size,
1097             },
1098         );
1099 
1100         self.fd
1101             .map_user_memory(user_memory_region)
1102             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1103         Ok(())
1104     }
1105 
1106     /// Removes a guest physical memory region.
1107     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1108         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1109         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1110         self.dirty_log_slots
1111             .write()
1112             .unwrap()
1113             .remove(&user_memory_region.guest_pfn);
1114 
1115         self.fd
1116             .unmap_user_memory(user_memory_region)
1117             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1118         Ok(())
1119     }
1120 
1121     fn make_user_memory_region(
1122         &self,
1123         _slot: u32,
1124         guest_phys_addr: u64,
1125         memory_size: u64,
1126         userspace_addr: u64,
1127         readonly: bool,
1128         _log_dirty_pages: bool,
1129     ) -> UserMemoryRegion {
1130         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE;
1131         if !readonly {
1132             flags |= HV_MAP_GPA_WRITABLE;
1133         }
1134 
1135         mshv_user_mem_region {
1136             flags,
1137             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1138             size: memory_size,
1139             userspace_addr,
1140         }
1141         .into()
1142     }
1143 
1144     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1145         let mut vfio_dev = mshv_create_device {
1146             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1147             fd: 0,
1148             flags: 0,
1149         };
1150 
1151         self.create_device(&mut vfio_dev)
1152             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1153     }
1154 
1155     ///
1156     /// Constructs a routing entry
1157     ///
1158     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1159         match config {
1160             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1161                 gsi,
1162                 address_lo: cfg.low_addr,
1163                 address_hi: cfg.high_addr,
1164                 data: cfg.data,
1165             }
1166             .into(),
1167             _ => {
1168                 unreachable!()
1169             }
1170         }
1171     }
1172 
1173     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1174         let mut msi_routing =
1175             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1176         msi_routing[0].nr = entries.len() as u32;
1177 
1178         let entries: Vec<mshv_msi_routing_entry> = entries
1179             .iter()
1180             .map(|entry| match entry {
1181                 IrqRoutingEntry::Mshv(e) => *e,
1182                 #[allow(unreachable_patterns)]
1183                 _ => panic!("IrqRoutingEntry type is wrong"),
1184             })
1185             .collect();
1186 
1187         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1188         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1189         // everything from entries.
1190         unsafe {
1191             let entries_slice: &mut [mshv_msi_routing_entry] =
1192                 msi_routing[0].entries.as_mut_slice(entries.len());
1193             entries_slice.copy_from_slice(&entries);
1194         }
1195 
1196         self.fd
1197             .set_msi_routing(&msi_routing[0])
1198             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1199     }
1200     ///
1201     /// Start logging dirty pages
1202     ///
1203     fn start_dirty_log(&self) -> vm::Result<()> {
1204         self.fd
1205             .enable_dirty_page_tracking()
1206             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1207     }
1208     ///
1209     /// Stop logging dirty pages
1210     ///
1211     fn stop_dirty_log(&self) -> vm::Result<()> {
1212         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1213         // Before disabling the dirty page tracking we need
1214         // to set the dirty bits in the Hypervisor
1215         // This is a requirement from Microsoft Hypervisor
1216         for (_, s) in dirty_log_slots.iter() {
1217             self.fd
1218                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1219                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1220         }
1221         self.fd
1222             .disable_dirty_page_tracking()
1223             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1224         Ok(())
1225     }
1226     ///
1227     /// Get dirty pages bitmap (one bit per page)
1228     ///
1229     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1230         self.fd
1231             .get_dirty_log(
1232                 base_gpa >> PAGE_SHIFT,
1233                 memory_size as usize,
1234                 DIRTY_BITMAP_CLEAR_DIRTY,
1235             )
1236             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1237     }
1238     /// Retrieve guest clock.
1239     #[cfg(target_arch = "x86_64")]
1240     fn get_clock(&self) -> vm::Result<ClockData> {
1241         Ok(ClockData::Mshv)
1242     }
1243     /// Set guest clock.
1244     #[cfg(target_arch = "x86_64")]
1245     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1246         Ok(())
1247     }
1248     /// Downcast to the underlying MshvVm type
1249     fn as_any(&self) -> &dyn Any {
1250         self
1251     }
1252 }
1253