xref: /cloud-hypervisor/hypervisor/src/mshv/mod.rs (revision 3ce0fef7fd546467398c914dbc74d8542e45cf6f)
1 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
2 //
3 // Copyright © 2020, Microsoft Corporation
4 //
5 
6 use crate::arch::emulator::{PlatformEmulator, PlatformError};
7 
8 #[cfg(target_arch = "x86_64")]
9 use crate::arch::x86::emulator::{Emulator, EmulatorCpuState};
10 use crate::cpu;
11 use crate::cpu::Vcpu;
12 use crate::hypervisor;
13 use crate::vec_with_array_field;
14 use crate::vm::{self, InterruptSourceConfig, VmOps};
15 use crate::HypervisorType;
16 pub use mshv_bindings::*;
17 use mshv_ioctls::{set_registers_64, Mshv, NoDatamatch, VcpuFd, VmFd, VmType};
18 use std::any::Any;
19 use std::collections::HashMap;
20 use std::sync::{Arc, RwLock};
21 use vfio_ioctls::VfioDeviceFd;
22 use vm::DataMatch;
23 
24 #[cfg(feature = "sev_snp")]
25 mod snp_constants;
26 // x86_64 dependencies
27 #[cfg(target_arch = "x86_64")]
28 pub mod x86_64;
29 #[cfg(feature = "sev_snp")]
30 use snp_constants::*;
31 
32 use crate::{
33     ClockData, CpuState, IoEventAddress, IrqRoutingEntry, MpState, UserMemoryRegion,
34     USER_MEMORY_REGION_EXECUTE, USER_MEMORY_REGION_READ, USER_MEMORY_REGION_WRITE,
35 };
36 #[cfg(feature = "sev_snp")]
37 use igvm_defs::IGVM_VHS_SNP_ID_BLOCK;
38 use vmm_sys_util::eventfd::EventFd;
39 #[cfg(target_arch = "x86_64")]
40 pub use x86_64::VcpuMshvState;
41 #[cfg(target_arch = "x86_64")]
42 pub use x86_64::*;
43 
44 #[cfg(target_arch = "x86_64")]
45 use std::fs::File;
46 use std::os::unix::io::AsRawFd;
47 
48 #[cfg(target_arch = "x86_64")]
49 use crate::arch::x86::{CpuIdEntry, FpuState, MsrEntry};
50 
51 const DIRTY_BITMAP_CLEAR_DIRTY: u64 = 0x4;
52 const DIRTY_BITMAP_SET_DIRTY: u64 = 0x8;
53 
54 ///
55 /// Export generically-named wrappers of mshv-bindings for Unix-based platforms
56 ///
57 pub use {
58     mshv_bindings::mshv_create_device as CreateDevice,
59     mshv_bindings::mshv_device_attr as DeviceAttr, mshv_ioctls::DeviceFd,
60 };
61 
62 pub const PAGE_SHIFT: usize = 12;
63 
64 impl From<mshv_user_mem_region> for UserMemoryRegion {
65     fn from(region: mshv_user_mem_region) -> Self {
66         let mut flags: u32 = 0;
67         if region.flags & HV_MAP_GPA_READABLE != 0 {
68             flags |= USER_MEMORY_REGION_READ;
69         }
70         if region.flags & HV_MAP_GPA_WRITABLE != 0 {
71             flags |= USER_MEMORY_REGION_WRITE;
72         }
73         if region.flags & HV_MAP_GPA_EXECUTABLE != 0 {
74             flags |= USER_MEMORY_REGION_EXECUTE;
75         }
76 
77         UserMemoryRegion {
78             guest_phys_addr: (region.guest_pfn << PAGE_SHIFT as u64)
79                 + (region.userspace_addr & ((1 << PAGE_SHIFT) - 1)),
80             memory_size: region.size,
81             userspace_addr: region.userspace_addr,
82             flags,
83             ..Default::default()
84         }
85     }
86 }
87 
88 impl From<UserMemoryRegion> for mshv_user_mem_region {
89     fn from(region: UserMemoryRegion) -> Self {
90         let mut flags: u32 = 0;
91         if region.flags & USER_MEMORY_REGION_READ != 0 {
92             flags |= HV_MAP_GPA_READABLE;
93         }
94         if region.flags & USER_MEMORY_REGION_WRITE != 0 {
95             flags |= HV_MAP_GPA_WRITABLE;
96         }
97         if region.flags & USER_MEMORY_REGION_EXECUTE != 0 {
98             flags |= HV_MAP_GPA_EXECUTABLE;
99         }
100 
101         mshv_user_mem_region {
102             guest_pfn: region.guest_phys_addr >> PAGE_SHIFT,
103             size: region.memory_size,
104             userspace_addr: region.userspace_addr,
105             flags,
106         }
107     }
108 }
109 
110 impl From<mshv_ioctls::IoEventAddress> for IoEventAddress {
111     fn from(a: mshv_ioctls::IoEventAddress) -> Self {
112         match a {
113             mshv_ioctls::IoEventAddress::Pio(x) => Self::Pio(x),
114             mshv_ioctls::IoEventAddress::Mmio(x) => Self::Mmio(x),
115         }
116     }
117 }
118 
119 impl From<IoEventAddress> for mshv_ioctls::IoEventAddress {
120     fn from(a: IoEventAddress) -> Self {
121         match a {
122             IoEventAddress::Pio(x) => Self::Pio(x),
123             IoEventAddress::Mmio(x) => Self::Mmio(x),
124         }
125     }
126 }
127 
128 impl From<VcpuMshvState> for CpuState {
129     fn from(s: VcpuMshvState) -> Self {
130         CpuState::Mshv(s)
131     }
132 }
133 
134 impl From<CpuState> for VcpuMshvState {
135     fn from(s: CpuState) -> Self {
136         match s {
137             CpuState::Mshv(s) => s,
138             /* Needed in case other hypervisors are enabled */
139             #[allow(unreachable_patterns)]
140             _ => panic!("CpuState is not valid"),
141         }
142     }
143 }
144 
145 impl From<mshv_msi_routing_entry> for IrqRoutingEntry {
146     fn from(s: mshv_msi_routing_entry) -> Self {
147         IrqRoutingEntry::Mshv(s)
148     }
149 }
150 
151 impl From<IrqRoutingEntry> for mshv_msi_routing_entry {
152     fn from(e: IrqRoutingEntry) -> Self {
153         match e {
154             IrqRoutingEntry::Mshv(e) => e,
155             /* Needed in case other hypervisors are enabled */
156             #[allow(unreachable_patterns)]
157             _ => panic!("IrqRoutingEntry is not valid"),
158         }
159     }
160 }
161 
162 struct MshvDirtyLogSlot {
163     guest_pfn: u64,
164     memory_size: u64,
165 }
166 
167 /// Wrapper over mshv system ioctls.
168 pub struct MshvHypervisor {
169     mshv: Mshv,
170 }
171 
172 impl MshvHypervisor {
173     #[cfg(target_arch = "x86_64")]
174     ///
175     /// Retrieve the list of MSRs supported by MSHV.
176     ///
177     fn get_msr_list(&self) -> hypervisor::Result<MsrList> {
178         self.mshv
179             .get_msr_index_list()
180             .map_err(|e| hypervisor::HypervisorError::GetMsrList(e.into()))
181     }
182 }
183 
184 impl MshvHypervisor {
185     /// Create a hypervisor based on Mshv
186     #[allow(clippy::new_ret_no_self)]
187     pub fn new() -> hypervisor::Result<Arc<dyn hypervisor::Hypervisor>> {
188         let mshv_obj =
189             Mshv::new().map_err(|e| hypervisor::HypervisorError::HypervisorCreate(e.into()))?;
190         Ok(Arc::new(MshvHypervisor { mshv: mshv_obj }))
191     }
192     /// Check if the hypervisor is available
193     pub fn is_available() -> hypervisor::Result<bool> {
194         match std::fs::metadata("/dev/mshv") {
195             Ok(_) => Ok(true),
196             Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false),
197             Err(err) => Err(hypervisor::HypervisorError::HypervisorAvailableCheck(
198                 err.into(),
199             )),
200         }
201     }
202 }
203 
204 /// Implementation of Hypervisor trait for Mshv
205 ///
206 /// # Examples
207 ///
208 /// ```
209 /// # use hypervisor::mshv::MshvHypervisor;
210 /// # use std::sync::Arc;
211 /// let mshv = MshvHypervisor::new().unwrap();
212 /// let hypervisor = Arc::new(mshv);
213 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
214 /// ```
215 impl hypervisor::Hypervisor for MshvHypervisor {
216     ///
217     /// Returns the type of the hypervisor
218     ///
219     fn hypervisor_type(&self) -> HypervisorType {
220         HypervisorType::Mshv
221     }
222 
223     fn create_vm_with_type(&self, vm_type: u64) -> hypervisor::Result<Arc<dyn crate::Vm>> {
224         let mshv_vm_type: VmType = match VmType::try_from(vm_type) {
225             Ok(vm_type) => vm_type,
226             Err(_) => return Err(hypervisor::HypervisorError::UnsupportedVmType()),
227         };
228         let fd: VmFd;
229         loop {
230             match self.mshv.create_vm_with_type(mshv_vm_type) {
231                 Ok(res) => fd = res,
232                 Err(e) => {
233                     if e.errno() == libc::EINTR {
234                         // If the error returned is EINTR, which means the
235                         // ioctl has been interrupted, we have to retry as
236                         // this can't be considered as a regular error.
237                         continue;
238                     } else {
239                         return Err(hypervisor::HypervisorError::VmCreate(e.into()));
240                     }
241                 }
242             }
243             break;
244         }
245 
246         // Set additional partition property for SEV-SNP partition.
247         if mshv_vm_type == VmType::Snp {
248             let snp_policy = snp::get_default_snp_guest_policy();
249             let vmgexit_offloads = snp::get_default_vmgexit_offload_features();
250             // SAFETY: access union fields
251             unsafe {
252                 debug!(
253                     "Setting the partition isolation policy as: 0x{:x}",
254                     snp_policy.as_uint64
255                 );
256                 fd.set_partition_property(
257                     hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_POLICY,
258                     snp_policy.as_uint64,
259                 )
260                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
261                 debug!(
262                     "Setting the partition property to enable VMGEXIT offloads as : 0x{:x}",
263                     vmgexit_offloads.as_uint64
264                 );
265                 fd.set_partition_property(
266                     hv_partition_property_code_HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS,
267                     vmgexit_offloads.as_uint64,
268                 )
269                 .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
270             }
271         }
272 
273         // Default Microsoft Hypervisor behavior for unimplemented MSR is to
274         // send a fault to the guest if it tries to access it. It is possible
275         // to override this behavior with a more suitable option i.e., ignore
276         // writes from the guest and return zero in attempt to read unimplemented
277         // MSR.
278         fd.set_partition_property(
279             hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
280             hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
281         )
282         .map_err(|e| hypervisor::HypervisorError::SetPartitionProperty(e.into()))?;
283 
284         let msr_list = self.get_msr_list()?;
285         let num_msrs = msr_list.as_fam_struct_ref().nmsrs as usize;
286         let mut msrs: Vec<MsrEntry> = vec![
287             MsrEntry {
288                 ..Default::default()
289             };
290             num_msrs
291         ];
292         let indices = msr_list.as_slice();
293         for (pos, index) in indices.iter().enumerate() {
294             msrs[pos].index = *index;
295         }
296         let vm_fd = Arc::new(fd);
297 
298         Ok(Arc::new(MshvVm {
299             fd: vm_fd,
300             msrs,
301             dirty_log_slots: Arc::new(RwLock::new(HashMap::new())),
302         }))
303     }
304 
305     /// Create a mshv vm object and return the object as Vm trait object
306     ///
307     /// # Examples
308     ///
309     /// ```
310     /// # extern crate hypervisor;
311     /// # use hypervisor::mshv::MshvHypervisor;
312     /// use hypervisor::mshv::MshvVm;
313     /// let hypervisor = MshvHypervisor::new().unwrap();
314     /// let vm = hypervisor.create_vm().unwrap();
315     /// ```
316     fn create_vm(&self) -> hypervisor::Result<Arc<dyn vm::Vm>> {
317         let vm_type = 0;
318         self.create_vm_with_type(vm_type)
319     }
320     ///
321     /// Get the supported CpuID
322     ///
323     fn get_supported_cpuid(&self) -> hypervisor::Result<Vec<CpuIdEntry>> {
324         Ok(Vec::new())
325     }
326 
327     /// Get maximum number of vCPUs
328     fn get_max_vcpus(&self) -> u32 {
329         // TODO: Using HV_MAXIMUM_PROCESSORS would be better
330         // but the ioctl API is limited to u8
331         256
332     }
333 }
334 
335 /// Vcpu struct for Microsoft Hypervisor
336 pub struct MshvVcpu {
337     fd: VcpuFd,
338     vp_index: u8,
339     cpuid: Vec<CpuIdEntry>,
340     msrs: Vec<MsrEntry>,
341     vm_ops: Option<Arc<dyn vm::VmOps>>,
342     #[cfg(feature = "sev_snp")]
343     vm_fd: Arc<VmFd>,
344 }
345 
346 /// Implementation of Vcpu trait for Microsoft Hypervisor
347 ///
348 /// # Examples
349 ///
350 /// ```
351 /// # use hypervisor::mshv::MshvHypervisor;
352 /// # use std::sync::Arc;
353 /// let mshv = MshvHypervisor::new().unwrap();
354 /// let hypervisor = Arc::new(mshv);
355 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
356 /// let vcpu = vm.create_vcpu(0, None).unwrap();
357 /// ```
358 impl cpu::Vcpu for MshvVcpu {
359     #[cfg(target_arch = "x86_64")]
360     ///
361     /// Returns the vCPU general purpose registers.
362     ///
363     fn get_regs(&self) -> cpu::Result<crate::arch::x86::StandardRegisters> {
364         Ok(self
365             .fd
366             .get_regs()
367             .map_err(|e| cpu::HypervisorCpuError::GetStandardRegs(e.into()))?
368             .into())
369     }
370 
371     #[cfg(target_arch = "x86_64")]
372     ///
373     /// Sets the vCPU general purpose registers.
374     ///
375     fn set_regs(&self, regs: &crate::arch::x86::StandardRegisters) -> cpu::Result<()> {
376         let regs = (*regs).into();
377         self.fd
378             .set_regs(&regs)
379             .map_err(|e| cpu::HypervisorCpuError::SetStandardRegs(e.into()))
380     }
381 
382     #[cfg(target_arch = "x86_64")]
383     ///
384     /// Returns the vCPU special registers.
385     ///
386     fn get_sregs(&self) -> cpu::Result<crate::arch::x86::SpecialRegisters> {
387         Ok(self
388             .fd
389             .get_sregs()
390             .map_err(|e| cpu::HypervisorCpuError::GetSpecialRegs(e.into()))?
391             .into())
392     }
393 
394     #[cfg(target_arch = "x86_64")]
395     ///
396     /// Sets the vCPU special registers.
397     ///
398     fn set_sregs(&self, sregs: &crate::arch::x86::SpecialRegisters) -> cpu::Result<()> {
399         let sregs = (*sregs).into();
400         self.fd
401             .set_sregs(&sregs)
402             .map_err(|e| cpu::HypervisorCpuError::SetSpecialRegs(e.into()))
403     }
404 
405     #[cfg(target_arch = "x86_64")]
406     ///
407     /// Returns the floating point state (FPU) from the vCPU.
408     ///
409     fn get_fpu(&self) -> cpu::Result<FpuState> {
410         Ok(self
411             .fd
412             .get_fpu()
413             .map_err(|e| cpu::HypervisorCpuError::GetFloatingPointRegs(e.into()))?
414             .into())
415     }
416 
417     #[cfg(target_arch = "x86_64")]
418     ///
419     /// Set the floating point state (FPU) of a vCPU.
420     ///
421     fn set_fpu(&self, fpu: &FpuState) -> cpu::Result<()> {
422         let fpu: mshv_bindings::FloatingPointUnit = (*fpu).clone().into();
423         self.fd
424             .set_fpu(&fpu)
425             .map_err(|e| cpu::HypervisorCpuError::SetFloatingPointRegs(e.into()))
426     }
427 
428     #[cfg(target_arch = "x86_64")]
429     ///
430     /// Returns the model-specific registers (MSR) for this vCPU.
431     ///
432     fn get_msrs(&self, msrs: &mut Vec<MsrEntry>) -> cpu::Result<usize> {
433         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
434         let mut mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
435         let succ = self
436             .fd
437             .get_msrs(&mut mshv_msrs)
438             .map_err(|e| cpu::HypervisorCpuError::GetMsrEntries(e.into()))?;
439 
440         msrs[..succ].copy_from_slice(
441             &mshv_msrs.as_slice()[..succ]
442                 .iter()
443                 .map(|e| (*e).into())
444                 .collect::<Vec<MsrEntry>>(),
445         );
446 
447         Ok(succ)
448     }
449 
450     #[cfg(target_arch = "x86_64")]
451     ///
452     /// Setup the model-specific registers (MSR) for this vCPU.
453     /// Returns the number of MSR entries actually written.
454     ///
455     fn set_msrs(&self, msrs: &[MsrEntry]) -> cpu::Result<usize> {
456         let mshv_msrs: Vec<msr_entry> = msrs.iter().map(|e| (*e).into()).collect();
457         let mshv_msrs = MsrEntries::from_entries(&mshv_msrs).unwrap();
458         self.fd
459             .set_msrs(&mshv_msrs)
460             .map_err(|e| cpu::HypervisorCpuError::SetMsrEntries(e.into()))
461     }
462 
463     #[cfg(target_arch = "x86_64")]
464     ///
465     /// X86 specific call to enable HyperV SynIC
466     ///
467     fn enable_hyperv_synic(&self) -> cpu::Result<()> {
468         /* We always have SynIC enabled on MSHV */
469         Ok(())
470     }
471 
472     #[allow(non_upper_case_globals)]
473     fn run(&self) -> std::result::Result<cpu::VmExit, cpu::HypervisorCpuError> {
474         let hv_message: hv_message = hv_message::default();
475         match self.fd.run(hv_message) {
476             Ok(x) => match x.header.message_type {
477                 hv_message_type_HVMSG_X64_HALT => {
478                     debug!("HALT");
479                     Ok(cpu::VmExit::Reset)
480                 }
481                 hv_message_type_HVMSG_UNRECOVERABLE_EXCEPTION => {
482                     warn!("TRIPLE FAULT");
483                     Ok(cpu::VmExit::Shutdown)
484                 }
485                 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
486                     let info = x.to_ioport_info().unwrap();
487                     let access_info = info.access_info;
488                     // SAFETY: access_info is valid, otherwise we won't be here
489                     let len = unsafe { access_info.__bindgen_anon_1.access_size() } as usize;
490                     let is_write = info.header.intercept_access_type == 1;
491                     let port = info.port_number;
492                     let mut data: [u8; 4] = [0; 4];
493                     let mut ret_rax = info.rax;
494 
495                     /*
496                      * XXX: Ignore QEMU fw_cfg (0x5xx) and debug console (0x402) ports.
497                      *
498                      * Cloud Hypervisor doesn't support fw_cfg at the moment. It does support 0x402
499                      * under the "fwdebug" feature flag. But that feature is not enabled by default
500                      * and is considered legacy.
501                      *
502                      * OVMF unconditionally pokes these IO ports with string IO.
503                      *
504                      * Instead of trying to implement string IO support now which does not do much
505                      * now, skip those ports explicitly to avoid panicking.
506                      *
507                      * Proper string IO support can be added once we gain the ability to translate
508                      * guest virtual addresses to guest physical addresses on MSHV.
509                      */
510                     match port {
511                         0x402 | 0x510 | 0x511 | 0x514 => {
512                             let insn_len = info.header.instruction_length() as u64;
513 
514                             /* Advance RIP and update RAX */
515                             let arr_reg_name_value = [
516                                 (
517                                     hv_register_name_HV_X64_REGISTER_RIP,
518                                     info.header.rip + insn_len,
519                                 ),
520                                 (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
521                             ];
522                             set_registers_64!(self.fd, arr_reg_name_value)
523                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
524                             return Ok(cpu::VmExit::Ignore);
525                         }
526                         _ => {}
527                     }
528 
529                     assert!(
530                         // SAFETY: access_info is valid, otherwise we won't be here
531                         (unsafe { access_info.__bindgen_anon_1.string_op() } != 1),
532                         "String IN/OUT not supported"
533                     );
534                     assert!(
535                         // SAFETY: access_info is valid, otherwise we won't be here
536                         (unsafe { access_info.__bindgen_anon_1.rep_prefix() } != 1),
537                         "Rep IN/OUT not supported"
538                     );
539 
540                     if is_write {
541                         let data = (info.rax as u32).to_le_bytes();
542                         if let Some(vm_ops) = &self.vm_ops {
543                             vm_ops
544                                 .pio_write(port.into(), &data[0..len])
545                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
546                         }
547                     } else {
548                         if let Some(vm_ops) = &self.vm_ops {
549                             vm_ops
550                                 .pio_read(port.into(), &mut data[0..len])
551                                 .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
552                         }
553 
554                         let v = u32::from_le_bytes(data);
555                         /* Preserve high bits in EAX but clear out high bits in RAX */
556                         let mask = 0xffffffff >> (32 - len * 8);
557                         let eax = (info.rax as u32 & !mask) | (v & mask);
558                         ret_rax = eax as u64;
559                     }
560 
561                     let insn_len = info.header.instruction_length() as u64;
562 
563                     /* Advance RIP and update RAX */
564                     let arr_reg_name_value = [
565                         (
566                             hv_register_name_HV_X64_REGISTER_RIP,
567                             info.header.rip + insn_len,
568                         ),
569                         (hv_register_name_HV_X64_REGISTER_RAX, ret_rax),
570                     ];
571                     set_registers_64!(self.fd, arr_reg_name_value)
572                         .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
573                     Ok(cpu::VmExit::Ignore)
574                 }
575                 hv_message_type_HVMSG_UNMAPPED_GPA => {
576                     let info = x.to_memory_info().unwrap();
577                     let insn_len = info.instruction_byte_count as usize;
578                     assert!(insn_len > 0 && insn_len <= 16);
579 
580                     let mut context = MshvEmulatorContext {
581                         vcpu: self,
582                         map: (info.guest_virtual_address, info.guest_physical_address),
583                     };
584 
585                     // Create a new emulator.
586                     let mut emul = Emulator::new(&mut context);
587 
588                     // Emulate the trapped instruction, and only the first one.
589                     let new_state = emul
590                         .emulate_first_insn(self.vp_index as usize, &info.instruction_bytes)
591                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
592 
593                     // Set CPU state back.
594                     context
595                         .set_cpu_state(self.vp_index as usize, new_state)
596                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
597 
598                     Ok(cpu::VmExit::Ignore)
599                 }
600                 hv_message_type_HVMSG_X64_CPUID_INTERCEPT => {
601                     let info = x.to_cpuid_info().unwrap();
602                     debug!("cpuid eax: {:x}", { info.rax });
603                     Ok(cpu::VmExit::Ignore)
604                 }
605                 hv_message_type_HVMSG_X64_MSR_INTERCEPT => {
606                     let info = x.to_msr_info().unwrap();
607                     if info.header.intercept_access_type == 0 {
608                         debug!("msr read: {:x}", { info.msr_number });
609                     } else {
610                         debug!("msr write: {:x}", { info.msr_number });
611                     }
612                     Ok(cpu::VmExit::Ignore)
613                 }
614                 hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT => {
615                     //TODO: Handler for VMCALL here.
616                     let info = x.to_exception_info().unwrap();
617                     debug!("Exception Info {:?}", { info.exception_vector });
618                     Ok(cpu::VmExit::Ignore)
619                 }
620                 hv_message_type_HVMSG_X64_APIC_EOI => {
621                     let info = x.to_apic_eoi_info().unwrap();
622                     // The kernel should dispatch the EOI to the correct thread.
623                     // Check the VP index is the same as the one we have.
624                     assert!(info.vp_index == self.vp_index as u32);
625                     // The interrupt vector in info is u32, but x86 only supports 256 vectors.
626                     // There is no good way to recover from this if the hypervisor messes around.
627                     // Just unwrap.
628                     Ok(cpu::VmExit::IoapicEoi(
629                         info.interrupt_vector.try_into().unwrap(),
630                     ))
631                 }
632                 #[cfg(feature = "sev_snp")]
633                 hv_message_type_HVMSG_X64_SEV_VMGEXIT_INTERCEPT => {
634                     let info = x.to_vmg_intercept_info().unwrap();
635                     let ghcb_data = info.ghcb_msr >> GHCB_INFO_BIT_WIDTH;
636                     let ghcb_msr = svm_ghcb_msr {
637                         as_uint64: info.ghcb_msr,
638                     };
639                     // SAFETY: Accessing a union element from bindgen generated bindings.
640                     let ghcb_op = unsafe { ghcb_msr.__bindgen_anon_2.ghcb_info() as u32 };
641                     // Sanity check on the header fields before handling other operations.
642                     assert!(info.header.intercept_access_type == HV_INTERCEPT_ACCESS_EXECUTE as u8);
643 
644                     match ghcb_op {
645                         GHCB_INFO_HYP_FEATURE_REQUEST => {
646                             // Pre-condition: GHCB data must be zero
647                             assert!(ghcb_data == 0);
648                             let mut ghcb_response = GHCB_INFO_HYP_FEATURE_RESPONSE as u64;
649                             // Indicate support for basic SEV-SNP features
650                             ghcb_response |=
651                                 (GHCB_HYP_FEATURE_SEV_SNP << GHCB_INFO_BIT_WIDTH) as u64;
652                             // Indicate support for SEV-SNP AP creation
653                             ghcb_response |= (GHCB_HYP_FEATURE_SEV_SNP_AP_CREATION
654                                 << GHCB_INFO_BIT_WIDTH)
655                                 as u64;
656                             debug!(
657                                 "GHCB_INFO_HYP_FEATURE_REQUEST: Supported features: {:0x}",
658                                 ghcb_response
659                             );
660                             let arr_reg_name_value =
661                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
662                             set_registers_64!(self.fd, arr_reg_name_value)
663                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
664                         }
665                         GHCB_INFO_REGISTER_REQUEST => {
666                             let mut ghcb_gpa = hv_x64_register_sev_ghcb::default();
667                             // SAFETY: Accessing a union element from bindgen generated bindings.
668                             unsafe {
669                                 ghcb_gpa.__bindgen_anon_1.set_enabled(1);
670                                 ghcb_gpa
671                                     .__bindgen_anon_1
672                                     .set_page_number(ghcb_msr.__bindgen_anon_2.gpa_page_number());
673                             }
674                             // SAFETY: Accessing a union element from bindgen generated bindings.
675                             let reg_name_value = unsafe {
676                                 [(
677                                     hv_register_name_HV_X64_REGISTER_SEV_GHCB_GPA,
678                                     ghcb_gpa.as_uint64,
679                                 )]
680                             };
681 
682                             set_registers_64!(self.fd, reg_name_value)
683                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
684 
685                             let mut resp_ghcb_msr = svm_ghcb_msr::default();
686                             // SAFETY: Accessing a union element from bindgen generated bindings.
687                             unsafe {
688                                 resp_ghcb_msr
689                                     .__bindgen_anon_2
690                                     .set_ghcb_info(GHCB_INFO_REGISTER_RESPONSE as u64);
691                                 resp_ghcb_msr.__bindgen_anon_2.set_gpa_page_number(
692                                     ghcb_msr.__bindgen_anon_2.gpa_page_number(),
693                                 );
694                             }
695                             // SAFETY: Accessing a union element from bindgen generated bindings.
696                             let reg_name_value = unsafe {
697                                 [(
698                                     hv_register_name_HV_X64_REGISTER_GHCB,
699                                     resp_ghcb_msr.as_uint64,
700                                 )]
701                             };
702 
703                             set_registers_64!(self.fd, reg_name_value)
704                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
705                         }
706                         GHCB_INFO_SEV_INFO_REQUEST => {
707                             let sev_cpuid_function = 0x8000_001F;
708                             let cpu_leaf = self
709                                 .fd
710                                 .get_cpuid_values(sev_cpuid_function, 0, 0, 0)
711                                 .unwrap();
712                             let ebx = cpu_leaf[1];
713                             // First 6-byte of EBX represents page table encryption bit number
714                             let pbit_encryption = (ebx & 0x3f) as u8;
715                             let mut ghcb_response = GHCB_INFO_SEV_INFO_RESPONSE as u64;
716 
717                             // GHCBData[63:48] specifies the maximum GHCB protocol version supported
718                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MAX as u64) << 48;
719                             // GHCBData[47:32] specifies the minimum GHCB protocol version supported
720                             ghcb_response |= (GHCB_PROTOCOL_VERSION_MIN as u64) << 32;
721                             // GHCBData[31:24] specifies the SEV page table encryption bit number.
722                             ghcb_response |= (pbit_encryption as u64) << 24;
723 
724                             let arr_reg_name_value =
725                                 [(hv_register_name_HV_X64_REGISTER_GHCB, ghcb_response)];
726                             set_registers_64!(self.fd, arr_reg_name_value)
727                                 .map_err(|e| cpu::HypervisorCpuError::SetRegister(e.into()))?;
728                         }
729                         GHCB_INFO_NORMAL => {
730                             let exit_code =
731                                 info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_code as u32;
732                             // SAFETY: Accessing a union element from bindgen generated bindings.
733                             let pfn = unsafe { ghcb_msr.__bindgen_anon_2.gpa_page_number() };
734                             let ghcb_gpa = pfn << GHCB_INFO_BIT_WIDTH;
735                             match exit_code {
736                                 SVM_EXITCODE_HV_DOORBELL_PAGE => {
737                                     let exit_info1 =
738                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
739                                     match exit_info1 {
740                                         SVM_NAE_HV_DOORBELL_PAGE_GET_PREFERRED => {
741                                             // Hypervisor does not have any preference for doorbell GPA.
742                                             let preferred_doorbell_gpa: u64 = 0xFFFFFFFFFFFFFFFF;
743                                             let mut swei2_rw_gpa_arg =
744                                                 mshv_bindings::mshv_read_write_gpa {
745                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
746                                                     byte_count: std::mem::size_of::<u64>() as u32,
747                                                     ..Default::default()
748                                                 };
749                                             swei2_rw_gpa_arg.data.copy_from_slice(
750                                                 &preferred_doorbell_gpa.to_le_bytes(),
751                                             );
752                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
753                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
754                                             )?;
755                                         }
756                                         SVM_NAE_HV_DOORBELL_PAGE_SET => {
757                                             let exit_info2 = info
758                                                 .__bindgen_anon_2
759                                                 .__bindgen_anon_1
760                                                 .sw_exit_info2;
761                                             let mut ghcb_doorbell_gpa =
762                                                 hv_x64_register_sev_hv_doorbell::default();
763                                             // SAFETY: Accessing a union element from bindgen generated bindings.
764                                             unsafe {
765                                                 ghcb_doorbell_gpa.__bindgen_anon_1.set_enabled(1);
766                                                 ghcb_doorbell_gpa
767                                                     .__bindgen_anon_1
768                                                     .set_page_number(exit_info2 >> PAGE_SHIFT);
769                                             }
770                                             // SAFETY: Accessing a union element from bindgen generated bindings.
771                                             let reg_names = unsafe {
772                                                 [(
773                                                     hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
774                                                     ghcb_doorbell_gpa.as_uint64,
775                                                 )]
776                                             };
777                                             set_registers_64!(self.fd, reg_names).map_err(|e| {
778                                                 cpu::HypervisorCpuError::SetRegister(e.into())
779                                             })?;
780 
781                                             let mut swei2_rw_gpa_arg =
782                                                 mshv_bindings::mshv_read_write_gpa {
783                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
784                                                     byte_count: std::mem::size_of::<u64>() as u32,
785                                                     ..Default::default()
786                                                 };
787                                             swei2_rw_gpa_arg.data[0..8]
788                                                 .copy_from_slice(&exit_info2.to_le_bytes());
789                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
790                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
791                                             )?;
792 
793                                             // Clear the SW_EXIT_INFO1 register to indicate no error
794                                             let mut swei1_rw_gpa_arg =
795                                                 mshv_bindings::mshv_read_write_gpa {
796                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
797                                                     byte_count: std::mem::size_of::<u64>() as u32,
798                                                     ..Default::default()
799                                                 };
800                                             self.fd.gpa_write(&mut swei1_rw_gpa_arg).map_err(
801                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
802                                             )?;
803                                         }
804                                         SVM_NAE_HV_DOORBELL_PAGE_QUERY => {
805                                             let mut reg_assocs = [ hv_register_assoc {
806                                                 name: hv_register_name_HV_X64_REGISTER_SEV_DOORBELL_GPA,
807                                                 ..Default::default()
808                                             } ];
809                                             self.fd.get_reg(&mut reg_assocs).unwrap();
810                                             // SAFETY: Accessing a union element from bindgen generated bindings.
811                                             let doorbell_gpa = unsafe { reg_assocs[0].value.reg64 };
812                                             let mut swei2_rw_gpa_arg =
813                                                 mshv_bindings::mshv_read_write_gpa {
814                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
815                                                     byte_count: std::mem::size_of::<u64>() as u32,
816                                                     ..Default::default()
817                                                 };
818                                             swei2_rw_gpa_arg
819                                                 .data
820                                                 .copy_from_slice(&doorbell_gpa.to_le_bytes());
821                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
822                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
823                                             )?;
824                                         }
825                                         SVM_NAE_HV_DOORBELL_PAGE_CLEAR => {
826                                             let mut swei2_rw_gpa_arg =
827                                                 mshv_bindings::mshv_read_write_gpa {
828                                                     base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
829                                                     byte_count: std::mem::size_of::<u64>() as u32,
830                                                     ..Default::default()
831                                                 };
832                                             self.fd.gpa_write(&mut swei2_rw_gpa_arg).map_err(
833                                                 |e| cpu::HypervisorCpuError::GpaWrite(e.into()),
834                                             )?;
835                                         }
836                                         _ => {
837                                             panic!(
838                                                 "SVM_EXITCODE_HV_DOORBELL_PAGE: Unhandled exit code: {:0x}",
839                                                 exit_info1
840                                             );
841                                         }
842                                     }
843                                 }
844                                 SVM_EXITCODE_SNP_EXTENDED_GUEST_REQUEST => {
845                                     warn!("Fetching extended guest request is not supported");
846                                     // Extended guest request is not supported by the Hypervisor
847                                     // Returning the error to the guest
848                                     // 0x6 means `The NAE event was not valid`
849                                     // Reference: GHCB Spec, page 42
850                                     let value: u64 = 0x6;
851                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
852                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
853                                         byte_count: std::mem::size_of::<u64>() as u32,
854                                         ..Default::default()
855                                     };
856                                     swei2_rw_gpa_arg.data.copy_from_slice(&value.to_le_bytes());
857                                     self.fd
858                                         .gpa_write(&mut swei2_rw_gpa_arg)
859                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
860                                 }
861                                 SVM_EXITCODE_IOIO_PROT => {
862                                     let exit_info1 =
863                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 as u32;
864                                     let port_info = hv_sev_vmgexit_port_info {
865                                         as_uint32: exit_info1,
866                                     };
867 
868                                     let port =
869                                         // SAFETY: Accessing a union element from bindgen generated bindings.
870                                         unsafe { port_info.__bindgen_anon_1.intercepted_port() };
871                                     let mut len = 4;
872                                     // SAFETY: Accessing a union element from bindgen generated bindings.
873                                     unsafe {
874                                         if port_info.__bindgen_anon_1.operand_size_16bit() == 1 {
875                                             len = 2;
876                                         } else if port_info.__bindgen_anon_1.operand_size_8bit()
877                                             == 1
878                                         {
879                                             len = 1;
880                                         }
881                                     }
882                                     let is_write =
883                                         // SAFETY: Accessing a union element from bindgen generated bindings.
884                                         unsafe { port_info.__bindgen_anon_1.access_type() == 0 };
885                                     let mut rax_rw_gpa_arg: mshv_read_write_gpa =
886                                         mshv_bindings::mshv_read_write_gpa {
887                                             base_gpa: ghcb_gpa + GHCB_RAX_OFFSET,
888                                             byte_count: std::mem::size_of::<u64>() as u32,
889                                             ..Default::default()
890                                         };
891                                     self.fd
892                                         .gpa_read(&mut rax_rw_gpa_arg)
893                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
894 
895                                     if is_write {
896                                         if let Some(vm_ops) = &self.vm_ops {
897                                             vm_ops
898                                                 .pio_write(
899                                                     port.into(),
900                                                     &rax_rw_gpa_arg.data[0..len],
901                                                 )
902                                                 .map_err(|e| {
903                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
904                                                 })?;
905                                         }
906                                     } else {
907                                         if let Some(vm_ops) = &self.vm_ops {
908                                             vm_ops
909                                                 .pio_read(
910                                                     port.into(),
911                                                     &mut rax_rw_gpa_arg.data[0..len],
912                                                 )
913                                                 .map_err(|e| {
914                                                     cpu::HypervisorCpuError::RunVcpu(e.into())
915                                                 })?;
916                                         }
917 
918                                         self.fd.gpa_write(&mut rax_rw_gpa_arg).map_err(|e| {
919                                             cpu::HypervisorCpuError::GpaWrite(e.into())
920                                         })?;
921                                     }
922 
923                                     // Clear the SW_EXIT_INFO1 register to indicate no error
924                                     let mut swei1_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
925                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO1_OFFSET,
926                                         byte_count: std::mem::size_of::<u64>() as u32,
927                                         ..Default::default()
928                                     };
929                                     self.fd
930                                         .gpa_write(&mut swei1_rw_gpa_arg)
931                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
932                                 }
933                                 SVM_EXITCODE_MMIO_READ => {
934                                     let src_gpa =
935                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
936                                     let dst_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
937                                     let data_len =
938                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
939                                             as usize;
940                                     // Sanity check to make sure data len is within supported range.
941                                     assert!(data_len <= 0x8);
942 
943                                     let mut data: Vec<u8> = vec![0; data_len];
944                                     if let Some(vm_ops) = &self.vm_ops {
945                                         vm_ops.mmio_read(src_gpa, &mut data[0..data_len]).map_err(
946                                             |e| cpu::HypervisorCpuError::RunVcpu(e.into()),
947                                         )?;
948                                     }
949                                     let mut arg: mshv_read_write_gpa =
950                                         mshv_bindings::mshv_read_write_gpa {
951                                             base_gpa: dst_gpa,
952                                             byte_count: data_len as u32,
953                                             ..Default::default()
954                                         };
955                                     arg.data[0..data_len].copy_from_slice(&data);
956 
957                                     self.fd
958                                         .gpa_write(&mut arg)
959                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
960                                 }
961                                 SVM_EXITCODE_MMIO_WRITE => {
962                                     let dst_gpa =
963                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
964                                     let src_gpa = info.__bindgen_anon_2.__bindgen_anon_1.sw_scratch;
965                                     let data_len =
966                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2
967                                             as usize;
968                                     // Sanity check to make sure data len is within supported range.
969                                     assert!(data_len <= 0x8);
970                                     let mut arg: mshv_read_write_gpa =
971                                         mshv_bindings::mshv_read_write_gpa {
972                                             base_gpa: src_gpa,
973                                             byte_count: data_len as u32,
974                                             ..Default::default()
975                                         };
976 
977                                     self.fd
978                                         .gpa_read(&mut arg)
979                                         .map_err(|e| cpu::HypervisorCpuError::GpaRead(e.into()))?;
980 
981                                     if let Some(vm_ops) = &self.vm_ops {
982                                         vm_ops
983                                             .mmio_write(dst_gpa, &arg.data[0..data_len])
984                                             .map_err(|e| {
985                                                 cpu::HypervisorCpuError::RunVcpu(e.into())
986                                             })?;
987                                     }
988                                 }
989                                 SVM_EXITCODE_SNP_GUEST_REQUEST => {
990                                     let req_gpa =
991                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1;
992                                     let rsp_gpa =
993                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
994 
995                                     let mshv_psp_req =
996                                         mshv_issue_psp_guest_request { req_gpa, rsp_gpa };
997                                     self.vm_fd
998                                         .psp_issue_guest_request(&mshv_psp_req)
999                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1000 
1001                                     debug!(
1002                                         "SNP guest request: req_gpa {:0x} rsp_gpa {:0x}",
1003                                         req_gpa, rsp_gpa
1004                                     );
1005 
1006                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1007                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1008                                         byte_count: std::mem::size_of::<u64>() as u32,
1009                                         ..Default::default()
1010                                     };
1011                                     self.fd
1012                                         .gpa_write(&mut swei2_rw_gpa_arg)
1013                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1014                                 }
1015                                 SVM_EXITCODE_SNP_AP_CREATION => {
1016                                     let vmsa_gpa =
1017                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info2;
1018                                     let apic_id =
1019                                         info.__bindgen_anon_2.__bindgen_anon_1.sw_exit_info1 >> 32;
1020                                     debug!(
1021                                         "SNP AP CREATE REQUEST with VMSA GPA {:0x}, and APIC ID {:?}",
1022                                         vmsa_gpa, apic_id
1023                                     );
1024 
1025                                     let mshv_ap_create_req = mshv_sev_snp_ap_create {
1026                                         vp_id: apic_id,
1027                                         vmsa_gpa,
1028                                     };
1029                                     self.vm_fd
1030                                         .sev_snp_ap_create(&mshv_ap_create_req)
1031                                         .map_err(|e| cpu::HypervisorCpuError::RunVcpu(e.into()))?;
1032 
1033                                     let mut swei2_rw_gpa_arg = mshv_bindings::mshv_read_write_gpa {
1034                                         base_gpa: ghcb_gpa + GHCB_SW_EXITINFO2_OFFSET,
1035                                         byte_count: std::mem::size_of::<u64>() as u32,
1036                                         ..Default::default()
1037                                     };
1038                                     self.fd
1039                                         .gpa_write(&mut swei2_rw_gpa_arg)
1040                                         .map_err(|e| cpu::HypervisorCpuError::GpaWrite(e.into()))?;
1041                                 }
1042                                 _ => panic!(
1043                                     "GHCB_INFO_NORMAL: Unhandled exit code: {:0x}",
1044                                     exit_code
1045                                 ),
1046                             }
1047                         }
1048                         _ => panic!("Unsupported VMGEXIT operation: {:0x}", ghcb_op),
1049                     }
1050 
1051                     Ok(cpu::VmExit::Ignore)
1052                 }
1053                 exit => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1054                     "Unhandled VCPU exit {:?}",
1055                     exit
1056                 ))),
1057             },
1058 
1059             Err(e) => match e.errno() {
1060                 libc::EAGAIN | libc::EINTR => Ok(cpu::VmExit::Ignore),
1061                 _ => Err(cpu::HypervisorCpuError::RunVcpu(anyhow!(
1062                     "VCPU error {:?}",
1063                     e
1064                 ))),
1065             },
1066         }
1067     }
1068 
1069     #[cfg(target_arch = "x86_64")]
1070     ///
1071     /// X86 specific call to setup the CPUID registers.
1072     ///
1073     fn set_cpuid2(&self, cpuid: &[CpuIdEntry]) -> cpu::Result<()> {
1074         let cpuid: Vec<mshv_bindings::hv_cpuid_entry> = cpuid.iter().map(|e| (*e).into()).collect();
1075         let mshv_cpuid = <CpuId>::from_entries(&cpuid)
1076             .map_err(|_| cpu::HypervisorCpuError::SetCpuid(anyhow!("failed to create CpuId")))?;
1077 
1078         self.fd
1079             .register_intercept_result_cpuid(&mshv_cpuid)
1080             .map_err(|e| cpu::HypervisorCpuError::SetCpuid(e.into()))
1081     }
1082 
1083     #[cfg(target_arch = "x86_64")]
1084     ///
1085     /// X86 specific call to retrieve the CPUID registers.
1086     ///
1087     fn get_cpuid2(&self, _num_entries: usize) -> cpu::Result<Vec<CpuIdEntry>> {
1088         Ok(self.cpuid.clone())
1089     }
1090 
1091     #[cfg(target_arch = "x86_64")]
1092     ///
1093     /// X86 specific call to retrieve cpuid leaf
1094     ///
1095     fn get_cpuid_values(
1096         &self,
1097         function: u32,
1098         index: u32,
1099         xfem: u64,
1100         xss: u64,
1101     ) -> cpu::Result<[u32; 4]> {
1102         self.fd
1103             .get_cpuid_values(function, index, xfem, xss)
1104             .map_err(|e| cpu::HypervisorCpuError::GetCpuidVales(e.into()))
1105     }
1106 
1107     #[cfg(target_arch = "x86_64")]
1108     ///
1109     /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1110     ///
1111     fn get_lapic(&self) -> cpu::Result<crate::arch::x86::LapicState> {
1112         Ok(self
1113             .fd
1114             .get_lapic()
1115             .map_err(|e| cpu::HypervisorCpuError::GetlapicState(e.into()))?
1116             .into())
1117     }
1118 
1119     #[cfg(target_arch = "x86_64")]
1120     ///
1121     /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller).
1122     ///
1123     fn set_lapic(&self, lapic: &crate::arch::x86::LapicState) -> cpu::Result<()> {
1124         let lapic: mshv_bindings::LapicState = (*lapic).clone().into();
1125         self.fd
1126             .set_lapic(&lapic)
1127             .map_err(|e| cpu::HypervisorCpuError::SetLapicState(e.into()))
1128     }
1129 
1130     ///
1131     /// Returns the vcpu's current "multiprocessing state".
1132     ///
1133     fn get_mp_state(&self) -> cpu::Result<MpState> {
1134         Ok(MpState::Mshv)
1135     }
1136 
1137     ///
1138     /// Sets the vcpu's current "multiprocessing state".
1139     ///
1140     fn set_mp_state(&self, _mp_state: MpState) -> cpu::Result<()> {
1141         Ok(())
1142     }
1143 
1144     ///
1145     /// Set CPU state
1146     ///
1147     fn set_state(&self, state: &CpuState) -> cpu::Result<()> {
1148         let state: VcpuMshvState = state.clone().into();
1149         self.set_msrs(&state.msrs)?;
1150         self.set_vcpu_events(&state.vcpu_events)?;
1151         self.set_regs(&state.regs.into())?;
1152         self.set_sregs(&state.sregs.into())?;
1153         self.set_fpu(&state.fpu)?;
1154         self.set_xcrs(&state.xcrs)?;
1155         self.set_lapic(&state.lapic)?;
1156         self.set_xsave(&state.xsave)?;
1157         // These registers are global and needed to be set only for first VCPU
1158         // as Microsoft Hypervisor allows setting this regsier for only one VCPU
1159         if self.vp_index == 0 {
1160             self.fd
1161                 .set_misc_regs(&state.misc)
1162                 .map_err(|e| cpu::HypervisorCpuError::SetMiscRegs(e.into()))?
1163         }
1164         self.fd
1165             .set_debug_regs(&state.dbg)
1166             .map_err(|e| cpu::HypervisorCpuError::SetDebugRegs(e.into()))?;
1167         Ok(())
1168     }
1169 
1170     ///
1171     /// Get CPU State
1172     ///
1173     fn state(&self) -> cpu::Result<CpuState> {
1174         let regs = self.get_regs()?;
1175         let sregs = self.get_sregs()?;
1176         let xcrs = self.get_xcrs()?;
1177         let fpu = self.get_fpu()?;
1178         let vcpu_events = self.get_vcpu_events()?;
1179         let mut msrs = self.msrs.clone();
1180         self.get_msrs(&mut msrs)?;
1181         let lapic = self.get_lapic()?;
1182         let xsave = self.get_xsave()?;
1183         let misc = self
1184             .fd
1185             .get_misc_regs()
1186             .map_err(|e| cpu::HypervisorCpuError::GetMiscRegs(e.into()))?;
1187         let dbg = self
1188             .fd
1189             .get_debug_regs()
1190             .map_err(|e| cpu::HypervisorCpuError::GetDebugRegs(e.into()))?;
1191 
1192         Ok(VcpuMshvState {
1193             msrs,
1194             vcpu_events,
1195             regs: regs.into(),
1196             sregs: sregs.into(),
1197             fpu,
1198             xcrs,
1199             lapic,
1200             dbg,
1201             xsave,
1202             misc,
1203         }
1204         .into())
1205     }
1206 
1207     #[cfg(target_arch = "x86_64")]
1208     ///
1209     /// Translate guest virtual address to guest physical address
1210     ///
1211     fn translate_gva(&self, gva: u64, flags: u64) -> cpu::Result<(u64, u32)> {
1212         let r = self
1213             .fd
1214             .translate_gva(gva, flags)
1215             .map_err(|e| cpu::HypervisorCpuError::TranslateVirtualAddress(e.into()))?;
1216 
1217         let gpa = r.0;
1218         // SAFETY: r is valid, otherwise this function will have returned
1219         let result_code = unsafe { r.1.__bindgen_anon_1.result_code };
1220 
1221         Ok((gpa, result_code))
1222     }
1223 
1224     #[cfg(target_arch = "x86_64")]
1225     ///
1226     /// Return the list of initial MSR entries for a VCPU
1227     ///
1228     fn boot_msr_entries(&self) -> Vec<MsrEntry> {
1229         use crate::arch::x86::{msr_index, MTRR_ENABLE, MTRR_MEM_TYPE_WB};
1230 
1231         [
1232             msr!(msr_index::MSR_IA32_SYSENTER_CS),
1233             msr!(msr_index::MSR_IA32_SYSENTER_ESP),
1234             msr!(msr_index::MSR_IA32_SYSENTER_EIP),
1235             msr!(msr_index::MSR_STAR),
1236             msr!(msr_index::MSR_CSTAR),
1237             msr!(msr_index::MSR_LSTAR),
1238             msr!(msr_index::MSR_KERNEL_GS_BASE),
1239             msr!(msr_index::MSR_SYSCALL_MASK),
1240             msr_data!(msr_index::MSR_MTRRdefType, MTRR_ENABLE | MTRR_MEM_TYPE_WB),
1241         ]
1242         .to_vec()
1243     }
1244 
1245     ///
1246     /// Sets the AMD specific vcpu's sev control register.
1247     ///
1248     #[cfg(feature = "sev_snp")]
1249     fn set_sev_control_register(&self, vmsa_pfn: u64) -> cpu::Result<()> {
1250         let sev_control_reg = snp::get_sev_control_register(vmsa_pfn);
1251 
1252         self.fd
1253             .set_sev_control_register(sev_control_reg)
1254             .map_err(|e| cpu::HypervisorCpuError::SetSevControlRegister(e.into()))
1255     }
1256 }
1257 
1258 impl MshvVcpu {
1259     #[cfg(target_arch = "x86_64")]
1260     ///
1261     /// X86 specific call that returns the vcpu's current "xsave struct".
1262     ///
1263     fn get_xsave(&self) -> cpu::Result<Xsave> {
1264         self.fd
1265             .get_xsave()
1266             .map_err(|e| cpu::HypervisorCpuError::GetXsaveState(e.into()))
1267     }
1268 
1269     #[cfg(target_arch = "x86_64")]
1270     ///
1271     /// X86 specific call that sets the vcpu's current "xsave struct".
1272     ///
1273     fn set_xsave(&self, xsave: &Xsave) -> cpu::Result<()> {
1274         self.fd
1275             .set_xsave(xsave)
1276             .map_err(|e| cpu::HypervisorCpuError::SetXsaveState(e.into()))
1277     }
1278 
1279     #[cfg(target_arch = "x86_64")]
1280     ///
1281     /// X86 specific call that returns the vcpu's current "xcrs".
1282     ///
1283     fn get_xcrs(&self) -> cpu::Result<ExtendedControlRegisters> {
1284         self.fd
1285             .get_xcrs()
1286             .map_err(|e| cpu::HypervisorCpuError::GetXcsr(e.into()))
1287     }
1288 
1289     #[cfg(target_arch = "x86_64")]
1290     ///
1291     /// X86 specific call that sets the vcpu's current "xcrs".
1292     ///
1293     fn set_xcrs(&self, xcrs: &ExtendedControlRegisters) -> cpu::Result<()> {
1294         self.fd
1295             .set_xcrs(xcrs)
1296             .map_err(|e| cpu::HypervisorCpuError::SetXcsr(e.into()))
1297     }
1298 
1299     #[cfg(target_arch = "x86_64")]
1300     ///
1301     /// Returns currently pending exceptions, interrupts, and NMIs as well as related
1302     /// states of the vcpu.
1303     ///
1304     fn get_vcpu_events(&self) -> cpu::Result<VcpuEvents> {
1305         self.fd
1306             .get_vcpu_events()
1307             .map_err(|e| cpu::HypervisorCpuError::GetVcpuEvents(e.into()))
1308     }
1309 
1310     #[cfg(target_arch = "x86_64")]
1311     ///
1312     /// Sets pending exceptions, interrupts, and NMIs as well as related states
1313     /// of the vcpu.
1314     ///
1315     fn set_vcpu_events(&self, events: &VcpuEvents) -> cpu::Result<()> {
1316         self.fd
1317             .set_vcpu_events(events)
1318             .map_err(|e| cpu::HypervisorCpuError::SetVcpuEvents(e.into()))
1319     }
1320 }
1321 
1322 struct MshvEmulatorContext<'a> {
1323     vcpu: &'a MshvVcpu,
1324     map: (u64, u64), // Initial GVA to GPA mapping provided by the hypervisor
1325 }
1326 
1327 impl<'a> MshvEmulatorContext<'a> {
1328     // Do the actual gva -> gpa translation
1329     #[allow(non_upper_case_globals)]
1330     fn translate(&self, gva: u64) -> Result<u64, PlatformError> {
1331         if self.map.0 == gva {
1332             return Ok(self.map.1);
1333         }
1334 
1335         // TODO: More fine-grained control for the flags
1336         let flags = HV_TRANSLATE_GVA_VALIDATE_READ | HV_TRANSLATE_GVA_VALIDATE_WRITE;
1337 
1338         let (gpa, result_code) = self
1339             .vcpu
1340             .translate_gva(gva, flags.into())
1341             .map_err(|e| PlatformError::TranslateVirtualAddress(anyhow!(e)))?;
1342 
1343         match result_code {
1344             hv_translate_gva_result_code_HV_TRANSLATE_GVA_SUCCESS => Ok(gpa),
1345             _ => Err(PlatformError::TranslateVirtualAddress(anyhow!(result_code))),
1346         }
1347     }
1348 }
1349 
1350 /// Platform emulation for Hyper-V
1351 impl<'a> PlatformEmulator for MshvEmulatorContext<'a> {
1352     type CpuState = EmulatorCpuState;
1353 
1354     fn read_memory(&self, gva: u64, data: &mut [u8]) -> Result<(), PlatformError> {
1355         let gpa = self.translate(gva)?;
1356         debug!(
1357             "mshv emulator: memory read {} bytes from [{:#x} -> {:#x}]",
1358             data.len(),
1359             gva,
1360             gpa
1361         );
1362 
1363         if let Some(vm_ops) = &self.vcpu.vm_ops {
1364             if vm_ops.guest_mem_read(gpa, data).is_err() {
1365                 vm_ops
1366                     .mmio_read(gpa, data)
1367                     .map_err(|e| PlatformError::MemoryReadFailure(e.into()))?;
1368             }
1369         }
1370 
1371         Ok(())
1372     }
1373 
1374     fn write_memory(&mut self, gva: u64, data: &[u8]) -> Result<(), PlatformError> {
1375         let gpa = self.translate(gva)?;
1376         debug!(
1377             "mshv emulator: memory write {} bytes at [{:#x} -> {:#x}]",
1378             data.len(),
1379             gva,
1380             gpa
1381         );
1382 
1383         if let Some(vm_ops) = &self.vcpu.vm_ops {
1384             if vm_ops.guest_mem_write(gpa, data).is_err() {
1385                 vm_ops
1386                     .mmio_write(gpa, data)
1387                     .map_err(|e| PlatformError::MemoryWriteFailure(e.into()))?;
1388             }
1389         }
1390 
1391         Ok(())
1392     }
1393 
1394     fn cpu_state(&self, cpu_id: usize) -> Result<Self::CpuState, PlatformError> {
1395         if cpu_id != self.vcpu.vp_index as usize {
1396             return Err(PlatformError::GetCpuStateFailure(anyhow!(
1397                 "CPU id mismatch {:?} {:?}",
1398                 cpu_id,
1399                 self.vcpu.vp_index
1400             )));
1401         }
1402 
1403         let regs = self
1404             .vcpu
1405             .get_regs()
1406             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1407         let sregs = self
1408             .vcpu
1409             .get_sregs()
1410             .map_err(|e| PlatformError::GetCpuStateFailure(e.into()))?;
1411 
1412         debug!("mshv emulator: Getting new CPU state");
1413         debug!("mshv emulator: {:#x?}", regs);
1414 
1415         Ok(EmulatorCpuState { regs, sregs })
1416     }
1417 
1418     fn set_cpu_state(&self, cpu_id: usize, state: Self::CpuState) -> Result<(), PlatformError> {
1419         if cpu_id != self.vcpu.vp_index as usize {
1420             return Err(PlatformError::SetCpuStateFailure(anyhow!(
1421                 "CPU id mismatch {:?} {:?}",
1422                 cpu_id,
1423                 self.vcpu.vp_index
1424             )));
1425         }
1426 
1427         debug!("mshv emulator: Setting new CPU state");
1428         debug!("mshv emulator: {:#x?}", state.regs);
1429 
1430         self.vcpu
1431             .set_regs(&state.regs)
1432             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))?;
1433         self.vcpu
1434             .set_sregs(&state.sregs)
1435             .map_err(|e| PlatformError::SetCpuStateFailure(e.into()))
1436     }
1437 
1438     fn gva_to_gpa(&self, gva: u64) -> Result<u64, PlatformError> {
1439         self.translate(gva)
1440     }
1441 
1442     fn fetch(&self, _ip: u64, _instruction_bytes: &mut [u8]) -> Result<(), PlatformError> {
1443         Err(PlatformError::MemoryReadFailure(anyhow!("unimplemented")))
1444     }
1445 }
1446 
1447 /// Wrapper over Mshv VM ioctls.
1448 pub struct MshvVm {
1449     fd: Arc<VmFd>,
1450     msrs: Vec<MsrEntry>,
1451     dirty_log_slots: Arc<RwLock<HashMap<u64, MshvDirtyLogSlot>>>,
1452 }
1453 
1454 impl MshvVm {
1455     ///
1456     /// Creates an in-kernel device.
1457     ///
1458     /// See the documentation for `MSHV_CREATE_DEVICE`.
1459     fn create_device(&self, device: &mut CreateDevice) -> vm::Result<VfioDeviceFd> {
1460         let device_fd = self
1461             .fd
1462             .create_device(device)
1463             .map_err(|e| vm::HypervisorVmError::CreateDevice(e.into()))?;
1464         Ok(VfioDeviceFd::new_from_mshv(device_fd))
1465     }
1466 }
1467 
1468 ///
1469 /// Implementation of Vm trait for Mshv
1470 ///
1471 /// # Examples
1472 ///
1473 /// ```
1474 /// # extern crate hypervisor;
1475 /// # use hypervisor::mshv::MshvHypervisor;
1476 /// # use std::sync::Arc;
1477 /// let mshv = MshvHypervisor::new().unwrap();
1478 /// let hypervisor = Arc::new(mshv);
1479 /// let vm = hypervisor.create_vm().expect("new VM fd creation failed");
1480 /// ```
1481 impl vm::Vm for MshvVm {
1482     #[cfg(target_arch = "x86_64")]
1483     ///
1484     /// Sets the address of the one-page region in the VM's address space.
1485     ///
1486     fn set_identity_map_address(&self, _address: u64) -> vm::Result<()> {
1487         Ok(())
1488     }
1489 
1490     #[cfg(target_arch = "x86_64")]
1491     ///
1492     /// Sets the address of the three-page region in the VM's address space.
1493     ///
1494     fn set_tss_address(&self, _offset: usize) -> vm::Result<()> {
1495         Ok(())
1496     }
1497 
1498     ///
1499     /// Creates an in-kernel interrupt controller.
1500     ///
1501     fn create_irq_chip(&self) -> vm::Result<()> {
1502         Ok(())
1503     }
1504 
1505     ///
1506     /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
1507     ///
1508     fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1509         debug!("register_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1510 
1511         self.fd
1512             .register_irqfd(fd, gsi)
1513             .map_err(|e| vm::HypervisorVmError::RegisterIrqFd(e.into()))?;
1514 
1515         Ok(())
1516     }
1517 
1518     ///
1519     /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
1520     ///
1521     fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> vm::Result<()> {
1522         debug!("unregister_irqfd fd {} gsi {}", fd.as_raw_fd(), gsi);
1523 
1524         self.fd
1525             .unregister_irqfd(fd, gsi)
1526             .map_err(|e| vm::HypervisorVmError::UnregisterIrqFd(e.into()))?;
1527 
1528         Ok(())
1529     }
1530 
1531     ///
1532     /// Creates a VcpuFd object from a vcpu RawFd.
1533     ///
1534     fn create_vcpu(
1535         &self,
1536         id: u8,
1537         vm_ops: Option<Arc<dyn VmOps>>,
1538     ) -> vm::Result<Arc<dyn cpu::Vcpu>> {
1539         let vcpu_fd = self
1540             .fd
1541             .create_vcpu(id)
1542             .map_err(|e| vm::HypervisorVmError::CreateVcpu(e.into()))?;
1543         let vcpu = MshvVcpu {
1544             fd: vcpu_fd,
1545             vp_index: id,
1546             cpuid: Vec::new(),
1547             msrs: self.msrs.clone(),
1548             vm_ops,
1549             #[cfg(feature = "sev_snp")]
1550             vm_fd: self.fd.clone(),
1551         };
1552         Ok(Arc::new(vcpu))
1553     }
1554 
1555     #[cfg(target_arch = "x86_64")]
1556     fn enable_split_irq(&self) -> vm::Result<()> {
1557         Ok(())
1558     }
1559 
1560     #[cfg(target_arch = "x86_64")]
1561     fn enable_sgx_attribute(&self, _file: File) -> vm::Result<()> {
1562         Ok(())
1563     }
1564 
1565     fn register_ioevent(
1566         &self,
1567         fd: &EventFd,
1568         addr: &IoEventAddress,
1569         datamatch: Option<DataMatch>,
1570     ) -> vm::Result<()> {
1571         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1572         debug!(
1573             "register_ioevent fd {} addr {:x?} datamatch {:?}",
1574             fd.as_raw_fd(),
1575             addr,
1576             datamatch
1577         );
1578         if let Some(dm) = datamatch {
1579             match dm {
1580                 vm::DataMatch::DataMatch32(mshv_dm32) => self
1581                     .fd
1582                     .register_ioevent(fd, addr, mshv_dm32)
1583                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1584                 vm::DataMatch::DataMatch64(mshv_dm64) => self
1585                     .fd
1586                     .register_ioevent(fd, addr, mshv_dm64)
1587                     .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into())),
1588             }
1589         } else {
1590             self.fd
1591                 .register_ioevent(fd, addr, NoDatamatch)
1592                 .map_err(|e| vm::HypervisorVmError::RegisterIoEvent(e.into()))
1593         }
1594     }
1595 
1596     /// Unregister an event from a certain address it has been previously registered to.
1597     fn unregister_ioevent(&self, fd: &EventFd, addr: &IoEventAddress) -> vm::Result<()> {
1598         let addr = &mshv_ioctls::IoEventAddress::from(*addr);
1599         debug!("unregister_ioevent fd {} addr {:x?}", fd.as_raw_fd(), addr);
1600 
1601         self.fd
1602             .unregister_ioevent(fd, addr, NoDatamatch)
1603             .map_err(|e| vm::HypervisorVmError::UnregisterIoEvent(e.into()))
1604     }
1605 
1606     /// Creates a guest physical memory region.
1607     fn create_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1608         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1609         // No matter read only or not we keep track the slots.
1610         // For readonly hypervisor can enable the dirty bits,
1611         // but a VM exit happens before setting the dirty bits
1612         self.dirty_log_slots.write().unwrap().insert(
1613             user_memory_region.guest_pfn,
1614             MshvDirtyLogSlot {
1615                 guest_pfn: user_memory_region.guest_pfn,
1616                 memory_size: user_memory_region.size,
1617             },
1618         );
1619 
1620         self.fd
1621             .map_user_memory(user_memory_region)
1622             .map_err(|e| vm::HypervisorVmError::CreateUserMemory(e.into()))?;
1623         Ok(())
1624     }
1625 
1626     /// Removes a guest physical memory region.
1627     fn remove_user_memory_region(&self, user_memory_region: UserMemoryRegion) -> vm::Result<()> {
1628         let user_memory_region: mshv_user_mem_region = user_memory_region.into();
1629         // Remove the corresponding entry from "self.dirty_log_slots" if needed
1630         self.dirty_log_slots
1631             .write()
1632             .unwrap()
1633             .remove(&user_memory_region.guest_pfn);
1634 
1635         self.fd
1636             .unmap_user_memory(user_memory_region)
1637             .map_err(|e| vm::HypervisorVmError::RemoveUserMemory(e.into()))?;
1638         Ok(())
1639     }
1640 
1641     fn make_user_memory_region(
1642         &self,
1643         _slot: u32,
1644         guest_phys_addr: u64,
1645         memory_size: u64,
1646         userspace_addr: u64,
1647         readonly: bool,
1648         _log_dirty_pages: bool,
1649     ) -> UserMemoryRegion {
1650         let mut flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE;
1651         if !readonly {
1652             flags |= HV_MAP_GPA_WRITABLE;
1653         }
1654 
1655         mshv_user_mem_region {
1656             flags,
1657             guest_pfn: guest_phys_addr >> PAGE_SHIFT,
1658             size: memory_size,
1659             userspace_addr,
1660         }
1661         .into()
1662     }
1663 
1664     fn create_passthrough_device(&self) -> vm::Result<VfioDeviceFd> {
1665         let mut vfio_dev = mshv_create_device {
1666             type_: mshv_device_type_MSHV_DEV_TYPE_VFIO,
1667             fd: 0,
1668             flags: 0,
1669         };
1670 
1671         self.create_device(&mut vfio_dev)
1672             .map_err(|e| vm::HypervisorVmError::CreatePassthroughDevice(e.into()))
1673     }
1674 
1675     ///
1676     /// Constructs a routing entry
1677     ///
1678     fn make_routing_entry(&self, gsi: u32, config: &InterruptSourceConfig) -> IrqRoutingEntry {
1679         match config {
1680             InterruptSourceConfig::MsiIrq(cfg) => mshv_msi_routing_entry {
1681                 gsi,
1682                 address_lo: cfg.low_addr,
1683                 address_hi: cfg.high_addr,
1684                 data: cfg.data,
1685             }
1686             .into(),
1687             _ => {
1688                 unreachable!()
1689             }
1690         }
1691     }
1692 
1693     fn set_gsi_routing(&self, entries: &[IrqRoutingEntry]) -> vm::Result<()> {
1694         let mut msi_routing =
1695             vec_with_array_field::<mshv_msi_routing, mshv_msi_routing_entry>(entries.len());
1696         msi_routing[0].nr = entries.len() as u32;
1697 
1698         let entries: Vec<mshv_msi_routing_entry> = entries
1699             .iter()
1700             .map(|entry| match entry {
1701                 IrqRoutingEntry::Mshv(e) => *e,
1702                 #[allow(unreachable_patterns)]
1703                 _ => panic!("IrqRoutingEntry type is wrong"),
1704             })
1705             .collect();
1706 
1707         // SAFETY: msi_routing initialized with entries.len() and now it is being turned into
1708         // entries_slice with entries.len() again. It is guaranteed to be large enough to hold
1709         // everything from entries.
1710         unsafe {
1711             let entries_slice: &mut [mshv_msi_routing_entry] =
1712                 msi_routing[0].entries.as_mut_slice(entries.len());
1713             entries_slice.copy_from_slice(&entries);
1714         }
1715 
1716         self.fd
1717             .set_msi_routing(&msi_routing[0])
1718             .map_err(|e| vm::HypervisorVmError::SetGsiRouting(e.into()))
1719     }
1720 
1721     ///
1722     /// Start logging dirty pages
1723     ///
1724     fn start_dirty_log(&self) -> vm::Result<()> {
1725         self.fd
1726             .enable_dirty_page_tracking()
1727             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))
1728     }
1729 
1730     ///
1731     /// Stop logging dirty pages
1732     ///
1733     fn stop_dirty_log(&self) -> vm::Result<()> {
1734         let dirty_log_slots = self.dirty_log_slots.read().unwrap();
1735         // Before disabling the dirty page tracking we need
1736         // to set the dirty bits in the Hypervisor
1737         // This is a requirement from Microsoft Hypervisor
1738         for (_, s) in dirty_log_slots.iter() {
1739             self.fd
1740                 .get_dirty_log(s.guest_pfn, s.memory_size as usize, DIRTY_BITMAP_SET_DIRTY)
1741                 .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1742         }
1743         self.fd
1744             .disable_dirty_page_tracking()
1745             .map_err(|e| vm::HypervisorVmError::StartDirtyLog(e.into()))?;
1746         Ok(())
1747     }
1748 
1749     ///
1750     /// Get dirty pages bitmap (one bit per page)
1751     ///
1752     fn get_dirty_log(&self, _slot: u32, base_gpa: u64, memory_size: u64) -> vm::Result<Vec<u64>> {
1753         self.fd
1754             .get_dirty_log(
1755                 base_gpa >> PAGE_SHIFT,
1756                 memory_size as usize,
1757                 DIRTY_BITMAP_CLEAR_DIRTY,
1758             )
1759             .map_err(|e| vm::HypervisorVmError::GetDirtyLog(e.into()))
1760     }
1761 
1762     /// Retrieve guest clock.
1763     #[cfg(target_arch = "x86_64")]
1764     fn get_clock(&self) -> vm::Result<ClockData> {
1765         Ok(ClockData::Mshv)
1766     }
1767 
1768     /// Set guest clock.
1769     #[cfg(target_arch = "x86_64")]
1770     fn set_clock(&self, _data: &ClockData) -> vm::Result<()> {
1771         Ok(())
1772     }
1773 
1774     /// Downcast to the underlying MshvVm type
1775     fn as_any(&self) -> &dyn Any {
1776         self
1777     }
1778 
1779     /// Initialize the SEV-SNP VM
1780     #[cfg(feature = "sev_snp")]
1781     fn sev_snp_init(&self) -> vm::Result<()> {
1782         self.fd
1783             .set_partition_property(
1784                 hv_partition_property_code_HV_PARTITION_PROPERTY_ISOLATION_STATE,
1785                 hv_partition_isolation_state_HV_PARTITION_ISOLATION_SECURE as u64,
1786             )
1787             .map_err(|e| vm::HypervisorVmError::InitializeSevSnp(e.into()))
1788     }
1789 
1790     ///
1791     /// Importing isolated pages, these pages will be used
1792     /// for the PSP(Platform Security Processor) measurement.
1793     #[cfg(feature = "sev_snp")]
1794     fn import_isolated_pages(
1795         &self,
1796         page_type: u32,
1797         page_size: u32,
1798         pages: &[u64],
1799     ) -> vm::Result<()> {
1800         if pages.is_empty() {
1801             return Ok(());
1802         }
1803 
1804         let mut isolated_pages =
1805             vec_with_array_field::<mshv_import_isolated_pages, u64>(pages.len());
1806         isolated_pages[0].num_pages = pages.len() as u64;
1807         isolated_pages[0].page_type = page_type;
1808         isolated_pages[0].page_size = page_size;
1809         // SAFETY: isolated_pages initialized with pages.len() and now it is being turned into
1810         // pages_slice with pages.len() again. It is guaranteed to be large enough to hold
1811         // everything from pages.
1812         unsafe {
1813             let pages_slice: &mut [u64] = isolated_pages[0].page_number.as_mut_slice(pages.len());
1814             pages_slice.copy_from_slice(pages);
1815         }
1816         self.fd
1817             .import_isolated_pages(&isolated_pages[0])
1818             .map_err(|e| vm::HypervisorVmError::ImportIsolatedPages(e.into()))
1819     }
1820 
1821     ///
1822     /// Complete isolated import, telling the hypervisor that
1823     /// importing the pages to guest memory is complete.
1824     ///
1825     #[cfg(feature = "sev_snp")]
1826     fn complete_isolated_import(
1827         &self,
1828         snp_id_block: IGVM_VHS_SNP_ID_BLOCK,
1829         host_data: [u8; 32],
1830         id_block_enabled: u8,
1831     ) -> vm::Result<()> {
1832         let mut auth_info = hv_snp_id_auth_info {
1833             id_key_algorithm: snp_id_block.id_key_algorithm,
1834             auth_key_algorithm: snp_id_block.author_key_algorithm,
1835             ..Default::default()
1836         };
1837         // Each of r/s component is 576 bits long
1838         auth_info.id_block_signature[..SIG_R_COMPONENT_SIZE_IN_BYTES]
1839             .copy_from_slice(snp_id_block.id_key_signature.r_comp.as_ref());
1840         auth_info.id_block_signature
1841             [SIG_R_COMPONENT_SIZE_IN_BYTES..SIG_R_AND_S_COMPONENT_SIZE_IN_BYTES]
1842             .copy_from_slice(snp_id_block.id_key_signature.s_comp.as_ref());
1843         auth_info.id_key[..ECDSA_CURVE_ID_SIZE_IN_BYTES]
1844             .copy_from_slice(snp_id_block.id_public_key.curve.to_le_bytes().as_ref());
1845         auth_info.id_key[ECDSA_SIG_X_COMPONENT_START..ECDSA_SIG_X_COMPONENT_END]
1846             .copy_from_slice(snp_id_block.id_public_key.qx.as_ref());
1847         auth_info.id_key[ECDSA_SIG_Y_COMPONENT_START..ECDSA_SIG_Y_COMPONENT_END]
1848             .copy_from_slice(snp_id_block.id_public_key.qy.as_ref());
1849 
1850         let data = mshv_complete_isolated_import {
1851             import_data: hv_partition_complete_isolated_import_data {
1852                 psp_parameters: hv_psp_launch_finish_data {
1853                     id_block: hv_snp_id_block {
1854                         launch_digest: snp_id_block.ld,
1855                         family_id: snp_id_block.family_id,
1856                         image_id: snp_id_block.image_id,
1857                         version: snp_id_block.version,
1858                         guest_svn: snp_id_block.guest_svn,
1859                         policy: get_default_snp_guest_policy(),
1860                     },
1861                     id_auth_info: auth_info,
1862                     host_data,
1863                     id_block_enabled,
1864                     author_key_enabled: 0,
1865                 },
1866             },
1867         };
1868         self.fd
1869             .complete_isolated_import(&data)
1870             .map_err(|e| vm::HypervisorVmError::CompleteIsolatedImport(e.into()))
1871     }
1872 }
1873