xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision 274f1aa2e738d579ffff9d4cfd7ed7c45293af31)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 // x86_64 dependencies
24 #[cfg(target_arch = "x86_64")]
25 pub mod x86_64;
26 use crate::{
27     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
28     USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
29 };
30 use vmm_sys_util::eventfd::EventFd;
31 #[cfg(target_arch = "x86_64")]
32 pub use x86_64::VcpuMshvState;
33 #[cfg(target_arch = "x86_64")]
34 pub use x86_64::*;
35 
36 #[cfg(target_arch = "x86_64")]
37 use std::fs::File;
38 use std::os::unix::io::AsRawFd;
39 
40 #[cfg(target_arch = "x86_64")]
41 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
42 
43 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
44 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
45 
46 ///
47 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
48 ///
49 pub use {
50     mshv_bindings::mshv_create_device as CreateDevice,
51     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
52 };
53 
54 pub const PAGE_SHIFT: usize = 12;
55 
56 impl From<mshv_user_mem_region> for UserMemoryRegion {
57     fn from(region: mshv_user_mem_region) -> Self {
58         let mut flags: u32 = 0;
59         if region.flags & HV_MAP_GPA_READABLE != 0 {
60             flags |= USER_MEMORY_REGION_READ;
61         }
62         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
63             flags |= USER_MEMORY_REGION_WRITE;
64         }
65         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
66             flags |= USER_MEMORY_REGION_EXECUTE;
67         }
68 
69         UserMemoryRegion {
70             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
71                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
72             memory_size: region.size,
73             userspace_addr: region.userspace_addr,
74             flags,
75             ..Default::default()
76         }
77     }
78 }
79 
80 impl From<UserMemoryRegion> for mshv_user_mem_region {
81     fn from(region: UserMemoryRegion) -> Self {
82         let mut flags: u32 = 0;
83         if region.flags & USER_MEMORY_REGION_READ != 0 {
84             flags |= HV_MAP_GPA_READABLE;
85         }
86         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
87             flags |= HV_MAP_GPA_WRITABLE;
88         }
89         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
90             flags |= HV_MAP_GPA_EXECUTABLE;
91         }
92 
93         mshv_user_mem_region {
94             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
95             size: region.memory_size,
96             userspace_addr: region.userspace_addr,
97             flags,
98         }
99     }
100 }
101 
102 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
103     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
104         match a {
105             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
106             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
107         }
108     }
109 }
110 
111 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
112     fn from(a: IoEventAddress) -> Self {
113         match a {
114             IoEventAddress::Pio(x) => Self::Pio(x),
115             IoEventAddress::Mmio(x) => Self::Mmio(x),
116         }
117     }
118 }
119 
120 impl From<VcpuMshvState> for CpuState {
121     fn from(s: VcpuMshvState) -> Self {
122         CpuState::Mshv(s)
123     }
124 }
125 
126 impl From<CpuState> for VcpuMshvState {
127     fn from(s: CpuState) -> Self {
128         match s {
129             CpuState::Mshv(s) => s,
130             /* Needed in case other hypervisors are enabled */
131             #[allow(unreachable_patterns)]
132             _ => panic!("CpuState is not valid"),
133         }
134     }
135 }
136 
137 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
138     fn from(s: mshv_msi_routing_entry) -> Self {
139         IrqRoutingEntry::Mshv(s)
140     }
141 }
142 
143 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
144     fn from(e: IrqRoutingEntry) -> Self {
145         match e {
146             IrqRoutingEntry::Mshv(e) => e,
147             /* Needed in case other hypervisors are enabled */
148             #[allow(unreachable_patterns)]
149             _ => panic!("IrqRoutingEntry is not valid"),
150         }
151     }
152 }
153 
154 struct MshvDirtyLogSlot {
155     guest_pfn: u64,
156     memory_size: u64,
157 }
158 
159 /// Wrapper over mshv system ioctls.
160 pub struct MshvHypervisor {
161     mshv: Mshv,
162 }
163 
164 impl MshvHypervisor {
165     #[cfg(target_arch = "x86_64")]
166     ///
167     /// Retrieve the list of MSRs supported by MSHV.
168     ///
169     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
170         self.mshv
171             .get_msr_index_list()
172             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
173     }
174 }
175 
176 impl MshvHypervisor {
177     /// Create a hypervisor based on Mshv
178     #[allow(clippy::new_ret_no_self)]
179     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
180         let mshv_obj =
181             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
182         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
183     }
184     /// Check if the hypervisor is available
185     pub fn is_available() -> hypervisor::Result<bool> {
186         match std::fs::metadata("/dev/mshv") {
187             Ok(_) => Ok(true),
188             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
189             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
190                 err.into(),
191             )),
192         }
193     }
194 }
195 /// Implementation of Hypervisor trait for Mshv
196 ///
197 /// # Examples
198 ///
199 /// ```
200 /// # use hypervisor::mshv::MshvHypervisor;
201 /// # use std::sync::Arc;
202 /// let mshv = MshvHypervisor::new().unwrap();
203 /// let hypervisor = Arc::new(mshv);
204 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
205 /// ```
206 impl hypervisor::Hypervisor for MshvHypervisor {
207     ///
208     /// Returns the type of the hypervisor
209     ///
210     fn hypervisor_type(&self) -> HypervisorType {
211         HypervisorType::Mshv
212     }
213 
214     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
215         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
216             Ok(vm_type) => vm_type,
217             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
218         };
219         let fd: VmFd;
220         loop {
221             match self.mshv.create_vm_with_type(mshv_vm_type) {
222                 Ok(res) => fd = res,
223                 Err(e) => {
224                     if e.errno() == libc::EINTR {
225                         // If the error returned is EINTR, which means the
226                         // ioctl has been interrupted, we have to retry as
227                         // this can't be considered as a regular error.
228                         continue;
229                     } else {
230                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
231                     }
232                 }
233             }
234             break;
235         }
236 
237         // Set additional partition property for SEV-SNP partition.
238         if mshv_vm_type == VmType::Snp {
239             let snp_policy = snp::get_default_snp_guest_policy();
240             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
241             // SAFETY: access union fields
242             unsafe {
243                 debug!(
244                     "Setting the partition isolation policy as: 0x{:x}",
245                     snp_policy.as_uint64
246                 );
247                 fd.set_partition_property(
248                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
249                     snp_policy.as_uint64,
250                 )
251                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
252                 debug!(
253                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
254                     vmgexit_offloads.as_uint64
255                 );
256                 fd.set_partition_property(
257                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
258                     vmgexit_offloads.as_uint64,
259                 )
260                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
261             }
262         }
263 
264         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
265         // send a fault to the guest if it tries to access it. It is possible
266         // to override this behavior with a more suitable option i.e., ignore
267         // writes from the guest and return zero in attempt to read unimplemented
268         // MSR.
269         fd.set_partition_property(
270             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
271             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
272         )
273         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
274 
275         let msr_list = self.get_msr_list()?;
276         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
277         let mut msrs: Vec<MsrEntry> = vec![
278             MsrEntry {
279                 ..Default::default()
280             };
281             num_msrs
282         ];
283         let indices = msr_list.as_slice();
284         for (pos, index) in indices.iter().enumerate() {
285             msrs[pos].index = *index;
286         }
287         let vm_fd = Arc::new(fd);
288 
289         Ok(Arc::new(MshvVm {
290             fd: vm_fd,
291             msrs,
292             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
293         }))
294     }
295 
296     /// Create a mshv vm object and return the object as Vm trait object
297     ///
298     /// # Examples
299     ///
300     /// ```
301     /// # extern crate hypervisor;
302     /// # use hypervisor::mshv::MshvHypervisor;
303     /// use hypervisor::mshv::MshvVm;
304     /// let hypervisor = MshvHypervisor::new().unwrap();
305     /// let vm = hypervisor.create_vm().unwrap();
306     /// ```
307     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
308         let vm_type = 0;
309         self.create_vm_with_type(vm_type)
310     }
311     ///
312     /// Get the supported CpuID
313     ///
314     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
315         Ok(Vec::new())
316     }
317 
318     /// Get maximum number of vCPUs
319     fn get_max_vcpus(&self) -> u32 {
320         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
321         // but the ioctl API is limited to u8
322         256
323     }
324 }
325 
326 /// Vcpu struct for Microsoft Hypervisor
327 pub struct MshvVcpu {
328     fd: VcpuFd,
329     vp_index: u8,
330     cpuid: Vec<CpuIdEntry>,
331     msrs: Vec<MsrEntry>,
332     vm_ops: Option<Arc<dyn vm::VmOps>>,
333 }
334 
335 /// Implementation of Vcpu trait for Microsoft Hypervisor
336 ///
337 /// # Examples
338 ///
339 /// ```
340 /// # use hypervisor::mshv::MshvHypervisor;
341 /// # use std::sync::Arc;
342 /// let mshv = MshvHypervisor::new().unwrap();
343 /// let hypervisor = Arc::new(mshv);
344 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
345 /// let vcpu = vm.create_vcpu(0, None).unwrap();
346 /// ```
347 impl cpu::Vcpu for MshvVcpu {
348     #[cfg(target_arch = "x86_64")]
349     ///
350     /// Returns the vCPU general purpose registers.
351     ///
352     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
353         Ok(self
354             .fd
355             .get_regs()
356             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
357             .into())
358     }
359     #[cfg(target_arch = "x86_64")]
360     ///
361     /// Sets the vCPU general purpose registers.
362     ///
363     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
364         let regs = (*regs).into();
365         self.fd
366             .set_regs(&regs)
367             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
368     }
369     #[cfg(target_arch = "x86_64")]
370     ///
371     /// Returns the vCPU special registers.
372     ///
373     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
374         Ok(self
375             .fd
376             .get_sregs()
377             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
378             .into())
379     }
380     #[cfg(target_arch = "x86_64")]
381     ///
382     /// Sets the vCPU special registers.
383     ///
384     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
385         let sregs = (*sregs).into();
386         self.fd
387             .set_sregs(&sregs)
388             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
389     }
390     #[cfg(target_arch = "x86_64")]
391     ///
392     /// Returns the floating point state (FPU) from the vCPU.
393     ///
394     fn get_fpu(&self) -> cpu::Result<FpuState> {
395         Ok(self
396             .fd
397             .get_fpu()
398             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
399             .into())
400     }
401     #[cfg(target_arch = "x86_64")]
402     ///
403     /// Set the floating point state (FPU) of a vCPU.
404     ///
405     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
406         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
407         self.fd
408             .set_fpu(&fpu)
409             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
410     }
411 
412     #[cfg(target_arch = "x86_64")]
413     ///
414     /// Returns the model-specific registers (MSR) for this vCPU.
415     ///
416     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
417         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
418         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
419         let succ = self
420             .fd
421             .get_msrs(&mut mshv_msrs)
422             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
423 
424         msrs[..succ].copy_from_slice(
425             &mshv_msrs.as_slice()[..succ]
426                 .iter()
427                 .map(|e| (*e).into())
428                 .collect::<Vec<MsrEntry>>(),
429         );
430 
431         Ok(succ)
432     }
433     #[cfg(target_arch = "x86_64")]
434     ///
435     /// Setup the model-specific registers (MSR) for this vCPU.
436     /// Returns the number of MSR entries actually written.
437     ///
438     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
439         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
440         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
441         self.fd
442             .set_msrs(&mshv_msrs)
443             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
444     }
445 
446     #[cfg(target_arch = "x86_64")]
447     ///
448     /// X86 specific call to enable HyperV SynIC
449     ///
450     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
451         /* We always have SynIC enabled on MSHV */
452         Ok(())
453     }
454     #[allow(non_upper_case_globals)]
455     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
456         let hv_message: hv_message = hv_message::default();
457         match self.fd.run(hv_message) {
458             Ok(x) => match x.header.message_type {
459                 hv_message_type_HVMSG_X64_HALT => {
460                     debug!("HALT");
461                     Ok(cpu::VmExit::Reset)
462                 }
463                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
464                     warn!("TRIPLE FAULT");
465                     Ok(cpu::VmExit::Shutdown)
466                 }
467                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
468                     let info = x.to_ioport_info().unwrap();
469                     let access_info = info.access_info;
470                     // SAFETY: access_info is valid, otherwise we won't be here
471                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
472                     let is_write = info.header.intercept_access_type == 1;
473                     let port = info.port_number;
474                     let mut data: [u8; 4] = [0; 4];
475                     let mut ret_rax = info.rax;
476 
477                     /*
478                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
479                      *
480                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
481                      * under the "fwdebug" feature flag. But that feature is not enabled by default
482                      * and is considered legacy.
483                      *
484                      * OVMF unconditionally pokes these IO ports with string IO.
485                      *
486                      * Instead of trying to implement string IO support now which does not do much
487                      * now, skip those ports explicitly to avoid panicking.
488                      *
489                      * Proper string IO support can be added once we gain the ability to translate
490                      * guest virtual addresses to guest physical addresses on MSHV.
491                      */
492                     match port {
493                         0x402 | 0x510 | 0x511 | 0x514 => {
494                             let insn_len = info.header.instruction_length() as u64;
495 
496                             /* Advance RIP and update RAX */
497                             let arr_reg_name_value = [
498                                 (
499                                     hv_register_name_HV_X64_REGISTER_RIP,
500                                     info.header.rip + insn_len,
501                                 ),
502                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
503                             ];
504                             set_registers_64!(self.fd, arr_reg_name_value)
505                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
506                             return Ok(cpu::VmExit::Ignore);
507                         }
508                         _ => {}
509                     }
510 
511                     assert!(
512                         // SAFETY: access_info is valid, otherwise we won't be here
513                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
514                         "String IN/OUT not supported"
515                     );
516                     assert!(
517                         // SAFETY: access_info is valid, otherwise we won't be here
518                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
519                         "Rep IN/OUT not supported"
520                     );
521 
522                     if is_write {
523                         let data = (info.rax as u32).to_le_bytes();
524                         if let Some(vm_ops) = &self.vm_ops {
525                             vm_ops
526                                 .pio_write(port.into(), &data[0..len])
527                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
528                         }
529                     } else {
530                         if let Some(vm_ops) = &self.vm_ops {
531                             vm_ops
532                                 .pio_read(port.into(), &mut data[0..len])
533                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
534                         }
535 
536                         let v = u32::from_le_bytes(data);
537                         /* Preserve high bits in EAX but clear out high bits in RAX */
538                         let mask = 0xffffffff >> (32 - len * 8);
539                         let eax = (info.rax as u32 & !mask) | (v & mask);
540                         ret_rax = eax as u64;
541                     }
542 
543                     let insn_len = info.header.instruction_length() as u64;
544 
545                     /* Advance RIP and update RAX */
546                     let arr_reg_name_value = [
547                         (
548                             hv_register_name_HV_X64_REGISTER_RIP,
549                             info.header.rip + insn_len,
550                         ),
551                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
552                     ];
553                     set_registers_64!(self.fd, arr_reg_name_value)
554                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
555                     Ok(cpu::VmExit::Ignore)
556                 }
557                 hv_message_type_HVMSG_UNMAPPED_GPA => {
558                     let info = x.to_memory_info().unwrap();
559                     let insn_len = info.instruction_byte_count as usize;
560                     assert!(insn_len > 0 && insn_len <= 16);
561 
562                     let mut context = MshvEmulatorContext {
563                         vcpu: self,
564                         map: (info.guest_virtual_address, info.guest_physical_address),
565                     };
566 
567                     // Create a new emulator.
568                     let mut emul = Emulator::new(&mut context);
569 
570                     // Emulate the trapped instruction, and only the first one.
571                     let new_state = emul
572                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
573                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
574 
575                     // Set CPU state back.
576                     context
577                         .set_cpu_state(self.vp_index as usize, new_state)
578                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
579 
580                     Ok(cpu::VmExit::Ignore)
581                 }
582                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
583                     let info = x.to_cpuid_info().unwrap();
584                     debug!("cpuid eax: {:x}", { info.rax });
585                     Ok(cpu::VmExit::Ignore)
586                 }
587                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
588                     let info = x.to_msr_info().unwrap();
589                     if info.header.intercept_access_type == 0 {
590                         debug!("msr read: {:x}", { info.msr_number });
591                     } else {
592                         debug!("msr write: {:x}", { info.msr_number });
593                     }
594                     Ok(cpu::VmExit::Ignore)
595                 }
596                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
597                     //TODO: Handler for VMCALL here.
598                     let info = x.to_exception_info().unwrap();
599                     debug!("Exception Info {:?}", { info.exception_vector });
600                     Ok(cpu::VmExit::Ignore)
601                 }
602                 hv_message_type_HVMSG_X64_APIC_EOI => {
603                     let info = x.to_apic_eoi_info().unwrap();
604                     // The kernel should dispatch the EOI to the correct thread.
605                     // Check the VP index is the same as the one we have.
606                     assert!(info.vp_index == self.vp_index as u32);
607                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
608                     // There is no good way to recover from this if the hypervisor messes around.
609                     // Just unwrap.
610                     Ok(cpu::VmExit::IoapicEoi(
611                         info.interrupt_vector.try_into().unwrap(),
612                     ))
613                 }
614                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
615                     "Unhandled VCPU exit {:?}",
616                     exit
617                 ))),
618             },
619 
620             Err(e) => match e.errno() {
621                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
622                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
623                     "VCPU error {:?}",
624                     e
625                 ))),
626             },
627         }
628     }
629     #[cfg(target_arch = "x86_64")]
630     ///
631     /// X86 specific call to setup the CPUID registers.
632     ///
633     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
634         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
635         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
636             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
637 
638         self.fd
639             .register_intercept_result_cpuid(&mshv_cpuid)
640             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
641     }
642     #[cfg(target_arch = "x86_64")]
643     ///
644     /// X86 specific call to retrieve the CPUID registers.
645     ///
646     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
647         Ok(self.cpuid.clone())
648     }
649     #[cfg(target_arch = "x86_64")]
650     ///
651     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
652     ///
653     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
654         Ok(self
655             .fd
656             .get_lapic()
657             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
658             .into())
659     }
660     #[cfg(target_arch = "x86_64")]
661     ///
662     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
663     ///
664     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
665         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
666         self.fd
667             .set_lapic(&lapic)
668             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
669     }
670     ///
671     /// Returns the vcpu's current "multiprocessing state".
672     ///
673     fn get_mp_state(&self) -> cpu::Result<MpState> {
674         Ok(MpState::Mshv)
675     }
676     ///
677     /// Sets the vcpu's current "multiprocessing state".
678     ///
679     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
680         Ok(())
681     }
682     ///
683     /// Set CPU state
684     ///
685     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
686         let state: VcpuMshvState = state.clone().into();
687         self.set_msrs(&state.msrs)?;
688         self.set_vcpu_events(&state.vcpu_events)?;
689         self.set_regs(&state.regs.into())?;
690         self.set_sregs(&state.sregs.into())?;
691         self.set_fpu(&state.fpu)?;
692         self.set_xcrs(&state.xcrs)?;
693         self.set_lapic(&state.lapic)?;
694         self.set_xsave(&state.xsave)?;
695         // These registers are global and needed to be set only for first VCPU
696         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
697         if self.vp_index == 0 {
698             self.fd
699                 .set_misc_regs(&state.misc)
700                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
701         }
702         self.fd
703             .set_debug_regs(&state.dbg)
704             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
705         Ok(())
706     }
707     ///
708     /// Get CPU State
709     ///
710     fn state(&self) -> cpu::Result<CpuState> {
711         let regs = self.get_regs()?;
712         let sregs = self.get_sregs()?;
713         let xcrs = self.get_xcrs()?;
714         let fpu = self.get_fpu()?;
715         let vcpu_events = self.get_vcpu_events()?;
716         let mut msrs = self.msrs.clone();
717         self.get_msrs(&mut msrs)?;
718         let lapic = self.get_lapic()?;
719         let xsave = self.get_xsave()?;
720         let misc = self
721             .fd
722             .get_misc_regs()
723             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
724         let dbg = self
725             .fd
726             .get_debug_regs()
727             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
728 
729         Ok(VcpuMshvState {
730             msrs,
731             vcpu_events,
732             regs: regs.into(),
733             sregs: sregs.into(),
734             fpu,
735             xcrs,
736             lapic,
737             dbg,
738             xsave,
739             misc,
740         }
741         .into())
742     }
743     #[cfg(target_arch = "x86_64")]
744     ///
745     /// Translate guest virtual address to guest physical address
746     ///
747     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
748         let r = self
749             .fd
750             .translate_gva(gva, flags)
751             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
752 
753         let gpa = r.0;
754         // SAFETY: r is valid, otherwise this function will have returned
755         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
756 
757         Ok((gpa, result_code))
758     }
759     #[cfg(target_arch = "x86_64")]
760     ///
761     /// Return the list of initial MSR entries for a VCPU
762     ///
763     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
764         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
765 
766         [
767             msr!(msr_index::MSR_IA32_SYSENTER_CS),
768             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
769             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
770             msr!(msr_index::MSR_STAR),
771             msr!(msr_index::MSR_CSTAR),
772             msr!(msr_index::MSR_LSTAR),
773             msr!(msr_index::MSR_KERNEL_GS_BASE),
774             msr!(msr_index::MSR_SYSCALL_MASK),
775             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
776         ]
777         .to_vec()
778     }
779 }
780 
781 impl MshvVcpu {
782     #[cfg(target_arch = "x86_64")]
783     ///
784     /// X86 specific call that returns the vcpu's current "xsave struct".
785     ///
786     fn get_xsave(&self) -> cpu::Result<Xsave> {
787         self.fd
788             .get_xsave()
789             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
790     }
791     #[cfg(target_arch = "x86_64")]
792     ///
793     /// X86 specific call that sets the vcpu's current "xsave struct".
794     ///
795     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
796         self.fd
797             .set_xsave(xsave)
798             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
799     }
800     #[cfg(target_arch = "x86_64")]
801     ///
802     /// X86 specific call that returns the vcpu's current "xcrs".
803     ///
804     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
805         self.fd
806             .get_xcrs()
807             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
808     }
809     #[cfg(target_arch = "x86_64")]
810     ///
811     /// X86 specific call that sets the vcpu's current "xcrs".
812     ///
813     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
814         self.fd
815             .set_xcrs(xcrs)
816             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
817     }
818     #[cfg(target_arch = "x86_64")]
819     ///
820     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
821     /// states of the vcpu.
822     ///
823     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
824         self.fd
825             .get_vcpu_events()
826             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
827     }
828     #[cfg(target_arch = "x86_64")]
829     ///
830     /// Sets pending exceptions, interrupts, and NMIs as well as related states
831     /// of the vcpu.
832     ///
833     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
834         self.fd
835             .set_vcpu_events(events)
836             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
837     }
838 }
839 
840 struct MshvEmulatorContext<'a> {
841     vcpu: &'a MshvVcpu,
842     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
843 }
844 
845 impl<'a> MshvEmulatorContext<'a> {
846     // Do the actual gva -> gpa translation
847     #[allow(non_upper_case_globals)]
848     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
849         if self.map.0 == gva {
850             return Ok(self.map.1);
851         }
852 
853         // TODO: More fine-grained control for the flags
854         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
855 
856         let (gpa, result_code) = self
857             .vcpu
858             .translate_gva(gva, flags.into())
859             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
860 
861         match result_code {
862             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
863             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
864         }
865     }
866 }
867 
868 /// Platform emulation for Hyper-V
869 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
870     type CpuState = EmulatorCpuState;
871 
872     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
873         let gpa = self.translate(gva)?;
874         debug!(
875             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
876             data.len(),
877             gva,
878             gpa
879         );
880 
881         if let Some(vm_ops) = &self.vcpu.vm_ops {
882             if vm_ops.guest_mem_read(gpa, data).is_err() {
883                 vm_ops
884                     .mmio_read(gpa, data)
885                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
886             }
887         }
888 
889         Ok(())
890     }
891 
892     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
893         let gpa = self.translate(gva)?;
894         debug!(
895             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
896             data.len(),
897             gva,
898             gpa
899         );
900 
901         if let Some(vm_ops) = &self.vcpu.vm_ops {
902             if vm_ops.guest_mem_write(gpa, data).is_err() {
903                 vm_ops
904                     .mmio_write(gpa, data)
905                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
906             }
907         }
908 
909         Ok(())
910     }
911 
912     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
913         if cpu_id != self.vcpu.vp_index as usize {
914             return Err(PlatformError::GetCpuStateFailure(anyhow!(
915                 "CPU id mismatch {:?} {:?}",
916                 cpu_id,
917                 self.vcpu.vp_index
918             )));
919         }
920 
921         let regs = self
922             .vcpu
923             .get_regs()
924             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
925         let sregs = self
926             .vcpu
927             .get_sregs()
928             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
929 
930         debug!("mshv emulator: Getting new CPU state");
931         debug!("mshv emulator: {:#x?}", regs);
932 
933         Ok(EmulatorCpuState { regs, sregs })
934     }
935 
936     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
937         if cpu_id != self.vcpu.vp_index as usize {
938             return Err(PlatformError::SetCpuStateFailure(anyhow!(
939                 "CPU id mismatch {:?} {:?}",
940                 cpu_id,
941                 self.vcpu.vp_index
942             )));
943         }
944 
945         debug!("mshv emulator: Setting new CPU state");
946         debug!("mshv emulator: {:#x?}", state.regs);
947 
948         self.vcpu
949             .set_regs(&state.regs)
950             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
951         self.vcpu
952             .set_sregs(&state.sregs)
953             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
954     }
955 
956     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
957         self.translate(gva)
958     }
959 
960     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
961         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
962     }
963 }
964 
965 /// Wrapper over Mshv VM ioctls.
966 pub struct MshvVm {
967     fd: Arc<VmFd>,
968     msrs: Vec<MsrEntry>,
969     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
970 }
971 
972 impl MshvVm {
973     ///
974     /// Creates an in-kernel device.
975     ///
976     /// See the documentation for `MSHV_CREATE_DEVICE`.
977     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
978         let device_fd = self
979             .fd
980             .create_device(device)
981             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
982         Ok(VfioDeviceFd::new_from_mshv(device_fd))
983     }
984 }
985 
986 ///
987 /// Implementation of Vm trait for Mshv
988 ///
989 /// # Examples
990 ///
991 /// ```
992 /// # extern crate hypervisor;
993 /// # use hypervisor::mshv::MshvHypervisor;
994 /// # use std::sync::Arc;
995 /// let mshv = MshvHypervisor::new().unwrap();
996 /// let hypervisor = Arc::new(mshv);
997 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
998 /// ```
999 impl vm::Vm for MshvVm {
1000     #[cfg(target_arch = "x86_64")]
1001     ///
1002     /// Sets the address of the one-page region in the VM's address space.
1003     ///
1004     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1005         Ok(())
1006     }
1007     #[cfg(target_arch = "x86_64")]
1008     ///
1009     /// Sets the address of the three-page region in the VM's address space.
1010     ///
1011     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1012         Ok(())
1013     }
1014     ///
1015     /// Creates an in-kernel interrupt controller.
1016     ///
1017     fn create_irq_chip(&self) -> vm::Result<()> {
1018         Ok(())
1019     }
1020     ///
1021     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1022     ///
1023     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1024         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1025 
1026         self.fd
1027             .register_irqfd(fd, gsi)
1028             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1029 
1030         Ok(())
1031     }
1032     ///
1033     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1034     ///
1035     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1036         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1037 
1038         self.fd
1039             .unregister_irqfd(fd, gsi)
1040             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1041 
1042         Ok(())
1043     }
1044     ///
1045     /// Creates a VcpuFd object from a vcpu RawFd.
1046     ///
1047     fn create_vcpu(
1048         &self,
1049         id: u8,
1050         vm_ops: Option<Arc<dyn VmOps>>,
1051     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1052         let vcpu_fd = self
1053             .fd
1054             .create_vcpu(id)
1055             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1056         let vcpu = MshvVcpu {
1057             fd: vcpu_fd,
1058             vp_index: id,
1059             cpuid: Vec::new(),
1060             msrs: self.msrs.clone(),
1061             vm_ops,
1062         };
1063         Ok(Arc::new(vcpu))
1064     }
1065     #[cfg(target_arch = "x86_64")]
1066     fn enable_split_irq(&self) -> vm::Result<()> {
1067         Ok(())
1068     }
1069     #[cfg(target_arch = "x86_64")]
1070     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1071         Ok(())
1072     }
1073     fn register_ioevent(
1074         &self,
1075         fd: &EventFd,
1076         addr: &IoEventAddress,
1077         datamatch: Option<DataMatch>,
1078     ) -> vm::Result<()> {
1079         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1080         debug!(
1081             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1082             fd.as_raw_fd(),
1083             addr,
1084             datamatch
1085         );
1086         if let Some(dm) = datamatch {
1087             match dm {
1088                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1089                     .fd
1090                     .register_ioevent(fd, addr, mshv_dm32)
1091                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1092                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1093                     .fd
1094                     .register_ioevent(fd, addr, mshv_dm64)
1095                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1096             }
1097         } else {
1098             self.fd
1099                 .register_ioevent(fd, addr, NoDatamatch)
1100                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1101         }
1102     }
1103     /// Unregister an event from a certain address it has been previously registered to.
1104     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1105         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1106         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1107 
1108         self.fd
1109             .unregister_ioevent(fd, addr, NoDatamatch)
1110             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1111     }
1112 
1113     /// Creates a guest physical memory region.
1114     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1115         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1116         // No matter read only or not we keep track the slots.
1117         // For readonly hypervisor can enable the dirty bits,
1118         // but a VM exit happens before setting the dirty bits
1119         self.dirty_log_slots.write().unwrap().insert(
1120             user_memory_region.guest_pfn,
1121             MshvDirtyLogSlot {
1122                 guest_pfn: user_memory_region.guest_pfn,
1123                 memory_size: user_memory_region.size,
1124             },
1125         );
1126 
1127         self.fd
1128             .map_user_memory(user_memory_region)
1129             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1130         Ok(())
1131     }
1132 
1133     /// Removes a guest physical memory region.
1134     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1135         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1136         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1137         self.dirty_log_slots
1138             .write()
1139             .unwrap()
1140             .remove(&user_memory_region.guest_pfn);
1141 
1142         self.fd
1143             .unmap_user_memory(user_memory_region)
1144             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1145         Ok(())
1146     }
1147 
1148     fn make_user_memory_region(
1149         &self,
1150         _slot: u32,
1151         guest_phys_addr: u64,
1152         memory_size: u64,
1153         userspace_addr: u64,
1154         readonly: bool,
1155         _log_dirty_pages: bool,
1156     ) -> UserMemoryRegion {
1157         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE;
1158         if !readonly {
1159             flags |= HV_MAP_GPA_WRITABLE;
1160         }
1161 
1162         mshv_user_mem_region {
1163             flags,
1164             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1165             size: memory_size,
1166             userspace_addr,
1167         }
1168         .into()
1169     }
1170 
1171     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1172         let mut vfio_dev = mshv_create_device {
1173             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1174             fd: 0,
1175             flags: 0,
1176         };
1177 
1178         self.create_device(&mut vfio_dev)
1179             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1180     }
1181 
1182     ///
1183     /// Constructs a routing entry
1184     ///
1185     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1186         match config {
1187             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1188                 gsi,
1189                 address_lo: cfg.low_addr,
1190                 address_hi: cfg.high_addr,
1191                 data: cfg.data,
1192             }
1193             .into(),
1194             _ => {
1195                 unreachable!()
1196             }
1197         }
1198     }
1199 
1200     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1201         let mut msi_routing =
1202             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1203         msi_routing[0].nr = entries.len() as u32;
1204 
1205         let entries: Vec<mshv_msi_routing_entry> = entries
1206             .iter()
1207             .map(|entry| match entry {
1208                 IrqRoutingEntry::Mshv(e) => *e,
1209                 #[allow(unreachable_patterns)]
1210                 _ => panic!("IrqRoutingEntry type is wrong"),
1211             })
1212             .collect();
1213 
1214         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1215         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1216         // everything from entries.
1217         unsafe {
1218             let entries_slice: &mut [mshv_msi_routing_entry] =
1219                 msi_routing[0].entries.as_mut_slice(entries.len());
1220             entries_slice.copy_from_slice(&entries);
1221         }
1222 
1223         self.fd
1224             .set_msi_routing(&msi_routing[0])
1225             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1226     }
1227     ///
1228     /// Start logging dirty pages
1229     ///
1230     fn start_dirty_log(&self) -> vm::Result<()> {
1231         self.fd
1232             .enable_dirty_page_tracking()
1233             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1234     }
1235     ///
1236     /// Stop logging dirty pages
1237     ///
1238     fn stop_dirty_log(&self) -> vm::Result<()> {
1239         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1240         // Before disabling the dirty page tracking we need
1241         // to set the dirty bits in the Hypervisor
1242         // This is a requirement from Microsoft Hypervisor
1243         for (_, s) in dirty_log_slots.iter() {
1244             self.fd
1245                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1246                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1247         }
1248         self.fd
1249             .disable_dirty_page_tracking()
1250             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1251         Ok(())
1252     }
1253     ///
1254     /// Get dirty pages bitmap (one bit per page)
1255     ///
1256     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1257         self.fd
1258             .get_dirty_log(
1259                 base_gpa >> PAGE_SHIFT,
1260                 memory_size as usize,
1261                 DIRTY_BITMAP_CLEAR_DIRTY,
1262             )
1263             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1264     }
1265     /// Retrieve guest clock.
1266     #[cfg(target_arch = "x86_64")]
1267     fn get_clock(&self) -> vm::Result<ClockData> {
1268         Ok(ClockData::Mshv)
1269     }
1270     /// Set guest clock.
1271     #[cfg(target_arch = "x86_64")]
1272     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1273         Ok(())
1274     }
1275     /// Downcast to the underlying MshvVm type
1276     fn as_any(&self) -> &dyn Any {
1277         self
1278     }
1279     /// Initialize the SEV-SNP VM
1280     #[cfg(feature = "sev_snp")]
1281     fn sev_snp_init(&self) -> vm::Result<()> {
1282         self.fd
1283             .set_partition_property(
1284                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
1285                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
1286             )
1287             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
1288     }
1289 }
1290